text stringlengths 957 885k |
|---|
<reponame>dichodaemon/chrysophylax<gh_stars>0
import pandas as pd
from functools import reduce
def max_in_window(parms, data):
return data[parms.input_column].rolling(parms.window_size).max()
def min_in_window(parms, data):
return data[parms.input_column].rolling(parms.window_size).min()
def moving_average(parms, data):
return data[parms.input_column].rolling(parms.window_size).mean()
def true_range(parms, data):
data["hl"] = data["high"] - data["low"]
data["hpdc"] = data["high"] - data.close.shift()
data["pdcl"] = data.close.shift() - data["low"]
return data[["hl", "hpdc", "pdcl"]].max(axis=1)
def average_true_range(parms, data):
return data["true_range"].rolling(parms.window_size).mean()
def efficiency_ratio(parms, data):
movement_speed = (data.close
- data.shift(parms.window_size).close).abs()
tmp = (data.close - data.shift().close).abs()
volatility = tmp.rolling(parms.window_size).sum()
result = movement_speed / volatility
return result
def simple_turtle_signals(parms, data):
max_entry = "high_max_{}".format(parms.entry)
min_entry = "low_min_{}".format(parms.entry)
max_exit = "high_max_{}".format(parms.exit)
min_exit = "low_min_{}".format(parms.exit)
atr = "atr_{}".format(parms.atr_window)
data["stop_loss_delta"] = data[atr] * parms.stop_loss_multiplier
data["trailing_stop_delta"] = data[atr] * parms.trailing_stop_multiplier
data["long_setup"] = "True"
data["long_entry"] = "ticker.price > trigger.{}".format(max_entry)
data["long_exit"] = "ticker.price < trigger.{}".format(min_exit)
data["short_setup"] = "True"
data["short_entry"] = "ticker.price < trigger.{}".format(min_entry)
data["short_exit"] = "ticker.price > trigger.{}".format(max_exit)
def turtle_soup_signals(parms, data):
min_column = "low_min_{}".format(parms.entry_window)
atr_column = "atr_{}".format(parms.atr_window)
data["setup"] = data[min_column] < data.shift()[min_column]
data["setup"] = (data["setup"] == True) \
& (data["setup"].shift().rolling(parms.wait).max() == False)
data["long_entry_value"] = float("nan")
data["long_entry_type"] = "disabled"
selected = (data["setup"] == True)
data["long_entry_value"][selected] = \
data[min_column][selected] \
+ data[atr_column][selected] * parms.entry_multiplier
data["long_entry_type"][selected] = "price_gt"
data["long_exit_value"] = float("nan")
data["long_exit_type"] = "disabled"
data["short_entry_value"] = float("nan")
data["short_entry_type"] = "disabled"
data["short_exit_value"] = float("nan")
data["short_exit_type"] = "disabled"
data["stop_loss_delta"] = data[atr_column] * \
(parms.entry_multiplier + parms.exit_multiplier)
data["trailing_stop_delta"] = data[atr_column] \
* parms.trailing_stop_multiplier
def buy_and_hold_signals(parms, data):
if row.Index.date() == parms.start_date:
data["entry_long"] = True
if row.Index.date() == parms.end_date:
date["exit_long"] = False
|
# Importing the required libraries
from surprise import Reader, Dataset
from surprise import SVD, accuracy, SVDpp, SlopeOne, BaselineOnly, CoClustering
import datetime
import requests, zipfile, io
from os import path
import pandas as pd
import tqdm as tqdm
from numpy import *
from sklearn.model_selection import train_test_split
import time
import pickle
# Loading the mapping data which is to map each movie Id
# in the ratings with it's title and genre
# the resulted data structure is a dictionary where the
# movie id is the key, the genre and titles are values
def load_mapping_data():
movie_data = {}
chunk_size = 500000
df_dtype = {
"movieId": int,
"title": str,
"genres": str
}
cols = list(df_dtype.keys())
for df_chunk in tqdm.tqdm(pd.read_csv('ml-latest-small/movies.csv', usecols=cols, dtype=df_dtype, chunksize=chunk_size)):
df_chunk.shape[0]
combine_data = [list(a) for a in
zip(df_chunk["movieId"].tolist(), df_chunk["title"].tolist(),
df_chunk["genres"].tolist())]
for a in combine_data:
movie_data[a[0]] = [a[1], a[2]]
del df_chunk
return movie_data
# Loading the rating data which is around 27M records it takes around 2 minutes
# the resulted data structure us a dictionary where the
# user id is the key and all their raings are values for example for user 1 :
# 1 = {
# [movieId,rating,timestamp],
# [movieId,rating,timestamp],
# [movieId,rating,timestamp],
# }
def load_data():
rating_data = {}
unique_user_id = []
chunk_size = 50000
df_dtype = {
"userId": int,
"movieId": int,
"rating": float,
"timestamp": int,
}
cols = list(df_dtype.keys())
for df_chunk in tqdm.tqdm(pd.read_csv('ml-latest-small/ratings.csv', usecols=cols, dtype=df_dtype, chunksize=chunk_size)):
user_id = df_chunk["userId"].tolist()
unique_user_id.extend(set(user_id))
movie_id = df_chunk["movieId"].tolist()
rating = df_chunk["rating"].tolist()
timestamp = df_chunk["timestamp"].tolist()
combine_data = [list(a) for a in zip(user_id, movie_id, rating, timestamp)]
for a in combine_data:
if a[0] in rating_data.keys():
rating_data[a[0]].extend([[a[0], a[1], a[2], a[3]]])
else:
rating_data[a[0]] = [[a[0], a[1], a[2], a[3]]]
del df_chunk
return rating_data, unique_user_id
# Split the data into training and testing
# this processes isn't being done for the whole dataset instead it's being done
# for each user id, for each user we split their ratings 80 training and 20 testing
# the resulted training and testing datasets are including the whole original dataset
def spilt_data(rating_data, unique_user_id):
training_data = []
testing_data = []
t0 = time.time()
t1 = time.time()
for u in unique_user_id:
if len(rating_data[u]) == 1:
x_test = rating_data[u]
x_train = rating_data[u]
else:
x_train, x_test = train_test_split(rating_data[u], test_size=0.2)
training_data.extend(x_train)
testing_data.extend(x_test)
total = t1 - t0
print(int(total))
return training_data, testing_data
def get_movie_title(movie_id, movie_data):
if movie_id in movie_data.keys():
return movie_data[movie_id][0]
def get_movie_genre(movie_id, movie_data):
if movie_id in movie_data.keys():
return movie_data[movie_id][1]
# def get_train_test_data():
# rating_data, unique_user_id = load_data()
# training_data, testing_data = spilt_data(rating_data, unique_user_id)
# training_dataframe = pd.DataFrame.from_records(training_data)
# training_dataframe.columns = ["userId","movieId","rating","timestamp"]
# testing_dataframe = pd.DataFrame.from_records(testing_data)
# testing_dataframe.columns= ["userId","movieId","rating","timestamp"]
# return training_dataframe, testing_dataframe
def get_train_test_data(new_sample = False):
if new_sample:
rating_data, unique_user_id = load_data()
training_data, testing_data = spilt_data(rating_data, unique_user_id)
training_dataframe = pd.DataFrame.from_records(training_data)
training_dataframe.columns = ["userId","movieId","rating","timestamp"]
testing_dataframe = pd.DataFrame.from_records(testing_data)
testing_dataframe.columns=["userId","movieId","rating","timestamp"]
# df_links = pd.read_csv('ml-latest-small/links.csv')
file = open('training_dataframe.txt', 'wb')
pickle.dump(training_dataframe, file)
file.close()
file = open('testing_dataframe.txt', 'wb')
pickle.dump(testing_dataframe, file)
file.close()
else:
file = open('training_dataframe.txt', 'rb')
training_dataframe = pickle.load(file)
file.close()
file = open('testing_dataframe.txt', 'rb')
testing_dataframe = pickle.load(file)
file.close()
return training_dataframe, testing_dataframe
if __name__ == "__main__":
# download http://files.grouplens.org/datasets/movielens/ml-latest-small.zip with 1M records File
# all files should be placed inside ml-latest folder
if not path.exists('ml-latest-small'):
print("Downloading Files for first time use: ")
download_file = requests.get('http://files.grouplens.org/datasets/movielens/ml-latest-small.zip')
zipped_file = zipfile.ZipFile(io.BytesIO(download_file.content)) # having First.csv zipped file.
zipped_file.extractall()
print("Data Loading and Processing, Estimated Time 2 minutes :")
rating_data, unique_user_id = load_data()
print("Training and Testing DataSets Construction, Estimated Time 40 seconds :")
training_data, testing_data = spilt_data(rating_data, unique_user_id)
print("Mapping Data Processing :")
movie_data = load_mapping_data()
print("Movie name with id = 1 :")
print(get_movie_title(1, movie_data))
print("Movie genre with id = 1 :")
print(get_movie_genre(1, movie_data))
|
<reponame>namph-sgn/Deep-learning-BLSTM<filename>flask_app/access_gcp_data.py
import feedparser
import pandas as pd
import numpy as np
from google.cloud import storage
from io import StringIO
def get_new_data():
def categorize_AQI(AQI_data):
"""
Input: Series of AQI_values
Output: Series of AQI category
7 categories [Good, Moderate, Unhealthy for Sensitive, Unhealthy, Very Unhealthy, Hazardous, Out of AQI]
range of categories [0-50, 51-100, 101-150, 151-200, 201-300, 301-500, >500]
"""
bins = [-1, 50, 100, 150, 200, 300, 500, np.inf]
labels = ["Good", "Moderate", "Unhealthy for Sensitive",
"Unhealthy", "Very Unhealthy", "Hazardous", "Beyond AQI"]
return pd.cut(AQI_data, bins=bins, labels=labels)
feed = "http://dosairnowdata.org/dos/RSS/HoChiMinhCity/HoChiMinhCity-PM2.5.xml"
NewsFeed = feedparser.parse(feed)
train = pd.DataFrame.from_dict(NewsFeed, orient='index')
train2 = pd.DataFrame.from_dict(train.loc['entries', :].values[0])
train2 = train2[['title', 'aqi']]
train2.rename(columns={'title': 'time', 'aqi': 'AQI_h'}, inplace=True)
train2 = train2.astype({'time': 'datetime64[ns]', 'AQI_h': 'float'})
train2['site_id'] = 49
train2.set_index(['site_id', 'time'], inplace=True)
train2['AQI_h_label'] = categorize_AQI(train2['AQI_h'])
train2['AQI_h_I'] = train2['AQI_h_label'].cat.codes + 1
train2['Continous length'] = 0
return train2
def get_data_from_bucket_as_dataframe(filename="past_data.csv"):
"""Read a blob"""
bucket_name = "deep_learning_model_bucket"
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(filename)
if blob.exists() == False:
return None
return_data = blob.download_as_text()
return_data = StringIO(return_data)
df = pd.read_csv(return_data, sep=",", header=0, index_col=False)
return df
def concat_past_and_new_data():
idx = pd.IndexSlice
past_data = get_data_from_bucket_as_dataframe(filename="past_data.csv")
past_data = past_data.astype({'time': 'datetime64[ns]', 'AQI_h': 'float'})
past_data.set_index(['site_id', 'time'], inplace=True)
new_data = get_new_data()
if past_data is None:
return new_data
max_time_past = past_data.index.get_level_values(
1).max() + pd.Timedelta(hours=1)
max_time_new = new_data.index.get_level_values(1).max()
past_data = pd.concat(
[past_data, new_data.loc[idx[:, max_time_past:max_time_new], :]])
return past_data
def concat_past_and_new_prediction(new_prediction):
idx = pd.IndexSlice
past_data = get_data_from_bucket_as_dataframe(filename="past_prediction.csv")
if past_data is None:
return new_prediction
print(past_data)
past_data = past_data.astype({'time': 'datetime64[ns]', 'AQI_h': 'float'})
past_data.set_index(['time'], inplace=True)
max_time_past = past_data.index.max()
max_time_new = new_prediction.index.max()
if max_time_past != max_time_new:
max_time_past = max_time_past + pd.Timedelta(hours=1)
past_data = pd.concat(
[past_data, new_prediction.loc[idx[max_time_past:max_time_new], :]])
return past_data
def delete_past_data_from_bucket(delete_file_name="past_data.csv"):
bucket_name = "deep_learning_model_bucket"
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(delete_file_name)
if blob.exists():
return_data = blob.delete()
return "Deleted"
def create_new_file_in_bucket(upload_file=None):
bucket_name = "deep_learning_model_bucket"
if upload_file is None:
upload_file = 'test.csv'
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(upload_file)
return_data = blob.upload_from_filename(upload_file)
print(return_data)
return "Created"
|
<gh_stars>1-10
import boto3
import os
import json
import logging
from sqs import Sqs
from sitewise_asset import SitewiseAsset
from sitewise_assets_cache import SitewiseAssetsCache
from association_converter import AssociationConverter
from sitewise_integration_points import SitewiseIntegrationPoints
logger = logging.getLogger()
logger.setLevel(getattr(logging, os.environ.get("LOG_LEVEL", "INFO").upper()))
assets_cache = SitewiseAssetsCache(os.environ['DYNAMO_ASSETS_TABLE_NAME'], os.environ['AWS_REGION'])
integration_points_cache = SitewiseIntegrationPoints(os.environ['DYNAMO_INTEGRATION_POINTS_TABLE_NAME'], os.environ['AWS_REGION'])
sitewise = SitewiseAsset()
sqs = Sqs(os.environ['ASSETS_TO_UPDATE_QUEUE_URL'], int(os.environ.get("BATCH_SIZE", 10)), os.environ["AWS_REGION"])
association_converter = AssociationConverter(assets_cache, sitewise)
def get_cache_ids(event, integration_points):
if not event['type'] == 'lifecycle':
return
if event['reading']['et'] == 'operator_updated':
return {
'child': f"operator-{event['reading']['id']}",
'parent': 'root-urban.io'
}
elif event['reading']['et'] == 'customer_updated':
parent_operators = [integration for integration in integration_points if integration['Id'] in event['metadata']['ref']['o']]
if len(parent_operators) > 0 :
return {
'child': f"customer-{event['reading']['id']}",
'parent': f"operator-{parent_operators[0]['Id']}"
}
elif event['reading']['et'] == 'location_updated':
return {
'child': f"location-{event['reading']['id']}",
'parent': f"customer-{event['metadata']['ref']['c']}"
}
elif event['reading']['et'] == 'category_updated':
return {
'child': f"category-{event['reading']['id']}",
'parent': f"location-{event['metadata']['ref']['l']}"
}
elif event['reading']['et'] == 'device_updated':
return {
'child': f"device-{event['reading']['id']}",
'parent': f"category-{event['reading']['device_category']}-{event['metadata']['ref']['l']}"
}
def process_event(event, integration_points):
cache_ids = get_cache_ids(event, integration_points)
if cache_ids and cache_ids.get('child') and cache_ids.get('parent'):
child_asset = assets_cache.get(cache_ids.get('child'))
parent_asset = assets_cache.get(cache_ids.get('parent'))
if child_asset is None:
logger.warn(f"Asset with id={cache_ids.get('child')} isn't found.")
return
if parent_asset is None:
logger.error(f"No parent asset with id={cache_ids.get('parent')} found for {cache_ids.get('child')}")
return
association_converter.associate_asset(parent_asset, child_asset)
def handler(event, context):
logger.debug('event is {}'.format(event))
assets_to_update = []
integration_points = integration_points_cache.get_all()
try:
for record in event['Records']:
# Batch by 10
lifecycle_event = json.loads(record["body"])
logger.info(f"Message: {lifecycle_event}")
process_event(lifecycle_event, integration_points)
assets_to_update.append(lifecycle_event)
sqs.send_messages(assets_to_update)
except Exception as e:
# Send some context about this error to Lambda Logs
logger.error(e)
# throw exception, do not handle. Lambda will make message visible again.
raise e
|
<gh_stars>1-10
# ------------------------------------------------------------------------------
# Program: The LDAR Simulator (LDAR-Sim)
# File: OGI company
# Purpose: Company managing OGI agents.
#
# Copyright (C) 2018-2020 <NAME>, <NAME>, <NAME>, <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the MIT License as published
# by the Free Software Foundation, version 3.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
# You should have received a copy of the MIT License
# along with this program. If not, see <https://opensource.org/licenses/MIT>.
#
# ------------------------------------------------------------------------------
from OGI_crew import OGI_crew
import numpy as np
class OGI_company:
def __init__(self, state, parameters, config, timeseries):
"""
Initialize a company to manage the OGI crews (e.g. a contracting company).
"""
self.name = 'OGI'
self.state = state
self.parameters = parameters
self.config = config
self.timeseries = timeseries
self.crews = []
self.deployment_days = self.state['weather'].deployment_days(
method_name=self.name,
start_date=self.state['t'].start_date,
start_work_hour=8, # Start hour in day
consider_weather=parameters['consider_weather'])
self.timeseries['OGI_prop_sites_avail'] = []
self.timeseries['OGI_cost'] = np.zeros(self.parameters['timesteps'])
self.timeseries['OGI_redund_tags'] = np.zeros(self.parameters['timesteps'])
self.timeseries['OGI_sites_visited'] = np.zeros(self.parameters['timesteps'])
# Additional variable(s) for each site
for site in self.state['sites']:
site.update({'OGI_t_since_last_LDAR': 0})
site.update({'OGI_surveys_conducted': 0})
site.update({'attempted_today_OGI?': False})
site.update({'surveys_done_this_year_OGI': 0})
site.update({'OGI_missed_leaks': 0})
# Initialize 2D matrices to store deployment day (DD) counts and MCBs
self.DD_map = np.zeros(
(len(self.state['weather'].longitude),
len(self.state['weather'].latitude)))
self.MCB_map = np.zeros(
(len(self.state['weather'].longitude),
len(self.state['weather'].latitude)))
# Initialize the individual OGI crews (the agents)
for i in range(config['n_crews']):
self.crews.append(OGI_crew(state, parameters, config,
timeseries, self.deployment_days, id=i + 1))
return
def find_leaks(self):
"""
The OGI company tells all the crews to get to work.
"""
for i in self.crews:
i.work_a_day()
# Update method-specific site variables each day
for site in self.state['sites']:
site['OGI_t_since_last_LDAR'] += 1
site['attempted_today_OGI?'] = False
if self.state['t'].current_date.day == 1 and self.state['t'].current_date.month == 1:
for site in self.state['sites']:
site['surveys_done_this_year_OGI'] = 0
# Calculate proportion sites available
available_sites = 0
for site in self.state['sites']:
if self.deployment_days[site['lon_index'],
site['lat_index'],
self.state['t'].current_timestep]:
available_sites += 1
prop_avail = available_sites / len(self.state['sites'])
self.timeseries['OGI_prop_sites_avail'].append(prop_avail)
return
def site_reports(self):
"""
Writes site-level deployment days (DDs) and maximum condition blackouts
(MCBs) for each site.
"""
for site in self.state['sites']:
site['OGI_prop_DDs'] = self.DD_map[site['lon_index'], site['lat_index']]
site['OGI_MCB'] = self.MCB_map[site['lon_index'], site['lat_index']]
return
|
#!/usr/bin/env python
# encoding: utf-8
#
# virtualenv-burrito.py — manages the Virtualenv Burrito environment
#
__version__ = "2.0.5"
import sys
import os
import csv
import urllib
import urllib2
import shutil
import glob
import tempfile
try:
import hashlib
sha1 = hashlib.sha1
except ImportError: # Python < 2.5
import sha
sha1 = sha.new
try:
import subprocess
sh = lambda cmd: subprocess.call(cmd, shell=True)
except ImportError: # Python < 2.4
sh = os.system
NAME = os.path.basename(__file__)
VENVBURRITO = os.path.join(os.environ['HOME'], ".venvburrito")
VENVBURRITO_LIB = os.path.join(VENVBURRITO, "lib")
VERSIONS_URL = "https://raw.github.com/brainsik/virtualenv-burrito/master/versions.csv"
def get_installed_version(name):
"""Returns current version of `name`."""
pkg = os.path.join(VENVBURRITO_LIB, "python", name)
versions = []
for egg in glob.glob("%s-*.egg" % pkg):
versions.append(map(int, egg.split('-')[1].split('.')))
if versions:
return ".".join(map(str, max(versions)))
def download(url, digest):
"""Returns a filename containing the contents of the URL.
Downloads and checks the SHA1 of the data matches the given hex digest.
"""
name = url.split('/')[-1]
print " Downloading", name, "…"
try:
filename = urllib.urlretrieve(url)[0]
except Exception, e:
sys.stderr.write("\nERROR - Unable to download %s: %s %s\n"
% (url, type(e), str(e)))
raise SystemExit(1)
filehash = sha1()
f = open(filename, 'rb')
filehash.update(f.read())
f.close()
if filehash.hexdigest() == digest:
return filename
print ("\nThe file %s didn't look like we expected.\n"
"It may have been moved or tampered with. You should tell me:"
" @brainsik." % name)
try:
os.remove(filename)
except OSError:
pass
raise SystemExit(1)
def drop_startup_sh():
# create the startup script
script = """
export WORKON_HOME="$HOME/.virtualenvs"
export PIP_VIRTUALENV_BASE="$WORKON_HOME"
export PIP_RESPECT_VIRTUALENV=true
venvb_py_path="$HOME/.venvburrito/lib/python"
if [ -z "$PYTHONPATH" ]; then
export PYTHONPATH="$venvb_py_path"
elif ! echo $PYTHONPATH | grep -q "$venvb_py_path"; then
export PYTHONPATH="$venvb_py_path:$PYTHONPATH"
fi
venvb_bin_path="$HOME/.venvburrito/bin"
if ! echo $PATH | grep -q "$venvb_bin_path"; then
export PATH="$venvb_bin_path:$PATH"
fi
. $HOME/.venvburrito/bin/virtualenvwrapper.sh
if ! [ -e $HOME/.venvburrito/.firstrun ]; then
echo
echo "To create a virtualenv, run:"
echo "mkvirtualenv <cool-name>"
touch $HOME/.venvburrito/.firstrun
fi
"""
startup_sh = open(os.path.join(VENVBURRITO, "startup.sh"), 'w')
startup_sh.write(script)
startup_sh.close()
def selfupdate(src):
"""Copy src to our destination and exec the new script."""
dst = os.path.join(VENVBURRITO, "bin", "virtualenv-burrito")
shutil.copyfile(src, dst)
os.remove(src)
os.chmod(dst, 0755)
print " Restarting!\n"
sys.stdout.flush()
os.execl(dst, "virtualenv-burrito", "upgrade", "selfupdated")
def fix_bin_virtualenv():
"""Untie the virtualenv script from a specific version of Python"""
bin_virtualenv = os.path.join(VENVBURRITO, "bin", "virtualenv")
fi = open(bin_virtualenv, 'r')
fi.readline() # skip the hash bang
fo = open(bin_virtualenv, 'w')
fo.write("#!/usr/bin/env python\n")
fo.write(fi.read())
fi.close()
fo.close()
def upgrade_package(filename, name, version):
"""Install Python package in tarball `filename`."""
try:
owd = os.getcwd()
except OSError:
owd = None
realname = "%s-%s" % (name, version)
print " Installing", realname
os.environ['PYTHONPATH'] = os.path.join(VENVBURRITO_LIB, "python")
tmp = tempfile.mkdtemp(prefix='venvburrito.')
try:
os.chdir(tmp)
sh("tar xfz %s" % filename)
os.chdir(os.path.join(tmp, realname))
if name == 'distribute':
# build and install the egg to avoid patching the system
sh("%s setup.py bdist_egg" % sys.executable)
egg = glob.glob(os.path.join(os.getcwd(), "dist", "*egg"))[0]
sh("%s setup.py easy_install --exclude-scripts --install-dir %s %s >/dev/null"
% (sys.executable, os.path.join(VENVBURRITO_LIB, "python"), egg))
else:
sh("%s setup.py install --home %s --install-scripts %s --no-compile >/dev/null"
% (sys.executable, VENVBURRITO, os.path.join(VENVBURRITO, "bin")))
if name in ['virtualenv', 'virtualenvwrapper']:
fix_bin_virtualenv()
finally:
os.chdir(owd or VENVBURRITO)
shutil.rmtree(tmp)
def check_versions(selfcheck=True):
"""Return packages which can be upgraded."""
try:
fp = urllib2.urlopen(VERSIONS_URL)
except Exception, e:
sys.stderr.write("\nERROR - Couldn't open versions file at %s: %s %s\n"
% (VERSIONS_URL, type(e), str(e)))
raise SystemExit(1)
reader = csv.reader(fp)
has_update = []
for name, version, url, digest in reader:
if name == '_virtualenv-burrito':
if not selfcheck:
continue
name = NAME
current = __version__
else:
current = get_installed_version(name)
if not current or version != current:
print "+ %s will upgrade (%s -> %s)" % (name, current, version)
has_update.append((name, version, url, digest))
if name == NAME:
break
return has_update
def handle_upgrade(selfupdated=False, firstrun=False):
"""Handles the upgrade command."""
if os.path.exists(VENVBURRITO_LIB):
if not os.path.isdir(os.path.join(VENVBURRITO_LIB, "python")):
print "! Removing old v1 packages and doing fresh v2 install"
shutil.rmtree(VENVBURRITO_LIB)
os.mkdir(VENVBURRITO_LIB)
os.mkdir(os.path.join(VENVBURRITO_LIB, "python"))
has_update = check_versions(selfupdated == False)
# update other packages
for update in has_update:
name, version, url, digest = update
filename = download(url, digest)
try:
if name == NAME:
print "* Upgrading ourself …"
selfupdate(filename) # calls os.exec
else:
print "* Upgrading %s …" % name
upgrade_package(filename, name, version)
finally:
if filename and os.path.exists(filename):
os.remove(filename)
# startup.sh needs to be created after selfupdate AND on install
if selfupdated or firstrun:
drop_startup_sh()
if selfupdated:
print "\nTo finish the upgrade, run this:"
print "source %s/startup.sh" % VENVBURRITO
elif not has_update:
print "Everything is up to date."
return
else:
print "\nFin."
def usage(returncode=1):
print "Use like this:\n\t%s upgrade" % NAME
raise SystemExit(returncode)
def main(argv):
if len(argv) < 2:
usage()
if argv[1] in ['help', '--help', '-h', '-?']:
usage(returncode=0)
if argv[1] in ['upgrade', 'update']:
if len(argv) > 2:
if argv[2] in ['selfupdated', 'no-selfcheck']:
handle_upgrade(selfupdated=True)
elif argv[2] == 'firstrun':
handle_upgrade(firstrun=True)
else:
usage()
else:
handle_upgrade()
else:
usage()
if __name__ == '__main__':
main(sys.argv)
|
<gh_stars>10-100
from neopixel import *
import atexit
import colorsys
# LED strip configuration:
LED_COUNT = 64 # Number of LED pixels.
LED_PIN = 18 # GPIO pin connected to the pixels (must support PWM!).
LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)
LED_DMA = 5 # DMA channel to use for generating signal (try 5)
LED_BRIGHTNESS = 255 # Set to 0 for darkest and 255 for brightest
LED_INVERT = False # True to invert the signal (when using NPN transistor level shift)
ws2812 = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS)
ws2812.begin()
"""
Store the rotation of UnicornHat, defaults to
0 which places 0,0 on the top left with the B+
HDMI port facing downwards
"""
_rotation = 0
"""
Store a map of pixel indexes for
translating x, y coordinates.
"""
map = [
[7 , 6 , 5 , 4 , 3 , 2 , 1 , 0 ],
[8 , 9 , 10, 11, 12, 13, 14, 15],
[23, 22, 21, 20, 19, 18, 17, 16],
[24, 25, 26, 27, 28, 29, 30, 31],
[39, 38, 37, 36, 35, 34, 33, 32],
[40, 41, 42, 43, 44, 45, 46, 47],
[55, 54, 53, 52, 51, 50, 49, 48],
[56, 57, 58, 59, 60, 61, 62, 63]
]
def _clean_shutdown():
"""Registered at exit to ensure ws2812 cleans up after itself
and all pixels are turned off.
"""
off()
def rotation(r=0):
"""Set the display rotation
Valid values:
0
90
180
270"""
global _rotation
if r in [0, 90, 180, 270]:
_rotation = r
return True
else:
raise ValueError('Rotation must be 0, 90, 180 or 270 degrees')
def brightness(b=0.2):
"""Set the display brightness between 0.0 and 1.0
0.2 is highly recommended, UnicornHat can get painfully bright!"""
if b > 1 or b < 0:
raise ValueError('Brightness must be between 0.0 and 1.0')
ws2812.setBrightness(int(b*255.0))
def get_brightness():
"""Get the display brightness value
Returns a float between 0.0 and 1.0
"""
return 0#ws2812.getBrightness()
def clear():
"""Clear the buffer"""
for x in range(64):
ws2812.setPixelColorRGB(x, 0, 0, 0)
def off():
"""Clear the buffer and immediately update UnicornHat
Turns off all pixels."""
clear()
show()
def get_index_from_xy(x, y):
"""Convert an x, y value to an index on the display"""
if x > 7 or x < 0:
raise ValueError('X position must be between 0 and 7')
if y > 7 or y < 0:
raise ValueError('Y position must be between 0 and 7')
y = 7-y
if _rotation == 90:
x, y = y, 7-x
elif _rotation == 180:
x, y = 7-x, 7-y
elif _rotation == 270:
x, y = 7-y, x
return map[x][y]
def set_pixel_hsv(x, y, h, s, v):
"""Set a single pixel to a colour using HSV"""
index = get_index_from_xy(x, y)
if index is not None:
r, g, b = [int(n*255) for n in colorsys.hsv_to_rgb(h, s, v)]
ws2812.setPixelColorRGB(index, r, g, b)
def set_pixel(x, y, r, g, b):
"""Set a single pixel to RGB colour"""
index = get_index_from_xy(x, y)
if index is not None:
ws2812.setPixelColorRGB(index, r, g, b)
def get_pixel(x, y):
"""Get the RGB value of a single pixel"""
index = get_index_from_xy(x, y)
if index is not None:
pixel = ws2812.getPixelColorRGB(index)
return int(pixel.r), int(pixel.g), int(pixel.b)
def set_pixels(pixels):
for x in range(8):
for y in range(8):
r, g, b = pixels[y][x]
set_pixel(x, y, r, g, b)
def get_pixels():
"""Get the RGB value of all pixels in a 7x7x3 2d array of tuples"""
return [[get_pixel(x, y) for x in range(8)] for y in range(8)]
def show():
"""Update UnicornHat with the contents of the display buffer"""
ws2812.show()
atexit.register(_clean_shutdown)
|
<filename>apps/breakfast/Sensorbed/version2/MIB_UART_ID.py
#!/usr/bin/env python
import socket, asyncore, asynchat, struct, array, signal, fcntl, os, time, tos_MIBUART
__all__ = ['NSLUListener', 'ServerToMoteListener', 'UserChannelListener', 'ReprogramListener']
HOST = '0.0.0.0'
REPROG_PORT = 16462
NSLU_PORT = 16461
STREAM_PORT = 16463
MAX_TOS_LEN = 135
nslus = {}
user_stream = {} # user_stream[mac] = {'to user':[], 'to mote': []}
ui = None
status = {}
mac2id = {}
f = file('map')
for line in f.readlines():
(mac, node) = line.split()
mac2id[mac] = node
f.close()
class NSLUListener(asyncore.dispatcher):
def __init__(self):
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind((HOST, NSLU_PORT))
self.listen(1)
def handle_accept(self):
(conn, addr) = self.accept()
NSLUHandler(conn, addr)
class NSLUHandler(asyncore.dispatcher):
cmdMap = {'erase':2, 'reset':3}
def __init__(self, conn, addr): # addr = (IP, client port)
self.addr = addr
self.mac = None
self.writing = ''
self.chanListen = None
print 'New NSLU: ', self.addr
asyncore.dispatcher.__init__(self, conn)
def handle_read(self):
#print "DEBUG NSLUHandler.handle_read"
packetType = self.recv(1)
if packetType == '': return
packetType = ord(packetType)
# periodic status message (packetLen = 0) or data from mote
if packetType == 0:
(mac) = struct.unpack('!8s', self.recv(8))[0]
if not self.mac and mac in mac2id:
self.mac = mac
print 'New mac:', self.mac, self.addr
nslus[self.mac] = []
user_stream[self.mac] = {'to mote':[]}
self.chanListen = UserChannelListener(self.mac)
elif mac != self.mac:
del nslus[mac]
del user_stream[mac]
self.mac = mac
nslus[self.mac] = []
user_stream[self.mac] = {'to mote':[]}
if self.addr[0] not in status:
status[self.addr[0]] = {}
if self.mac not in status[self.addr[0]]:
status[self.addr[0]][self.mac] = [0,0]
elif packetType == 1:
error = struct.unpack('B', self.recv(1))[0]
if error == 1:
print 'Client:', self.addr, self.mac, 'operation successful'
status[self.addr[0]][self.mac] = [0,0]
else:
print 'Client:', self.addr, self.mac, 'operation failed'
def handle_error(self):
print 'unhandled error'
def writable(self):
return self.writing or (self.mac and (self.mac in nslus) and nslus[self.mac])
def handle_write(self):
#print 'DEBUG NSLUHandler handle_write'
if self.writing:
s = self.send(self.writing)
print 'DEBUG: send', s
self.writing = self.writing[s:]
else:
if self.mac and (self.mac in nslus) and nslus[self.mac]:
(cmd, param) = nslus[self.mac][0]
print 'cmd', cmd
if cmd in NSLUHandler.cmdMap:
self.send(chr(NSLUHandler.cmdMap[cmd]))
elif cmd in ['reprogram', 'reprogram-quick']:
print 'Sending program %d bytes to %s (%s)'%(len(param), self.addr, self.mac)
print param
self.writing = param
self.send(struct.pack('!BI', 1, len(param)))
s = self.send(param)
print 'DEBUG: send', s
self.writing = self.writing[s:]
del nslus[self.mac][0]
def handle_close(self):
print 'Remove NSLU:', self.addr
self.close()
if self.mac:
del nslus[self.mac]
del user_stream[self.mac]
if len(status[self.addr[0]]) >= 2:
del status[self.addr[0]][self.mac]
else:
del status[self.addr[0]]
self.chanListen.close()
class ServerToMoteListener(asyncore.dispatcher):
def __init__(self):
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind((HOST, STREAM_PORT))
self.listen(1)
def handle_accept(self):
(conn, addr) = self.accept()
#print 'connection accepted'
ServerToMoteHandler(conn, addr)
class ServerToMoteHandler(asyncore.dispatcher):
def __init__(self, conn, addr):
self.addr = addr
self.mac = None
self.id = 0
self._hdlc= tos_MIBUART.HDLC()
self.start = 0
self.incoming = ''
self.inprocessing = ''
asyncore.dispatcher.__init__(self, conn)
def handle_connect(self):
print 'DEBUG: connected to', self.addr, self.port
def writable(self):
return (self.mac and (self.mac in user_stream) and \
('to mote' in user_stream[self.mac]) and user_stream[self.mac]['to mote'])
def printfHook(self, packet):
if packet == None:
return
if packet.type == 100:
s = "".join([chr(i) for i in packet.data]).strip('\0')
lines = s.split('\r')
for line in lines:
if line: print >>logfile, '%.2f ID:%s'%(time.time(), self.id),\
"PRINTF: ", line
packet = None # No further processing for the printf packet
return packet
def handle_read(self):
#print "DEBUG ServerToMoteHandler handle_read"
if self.start == 0:
(mac) = struct.unpack('!8s', self.recv(8))
self.mac = mac[0]
self.id = mac2id[self.mac]
self.start = 1
else:
#(p, op) = self.am.read()
data = self.recv(1024)
if data == '':
self.close()
print 'DEBUG: network closed by ', self.addr
return
self.incoming += data
#print data
#if self.id == '25':
# print 'ID:', self.id, len(self.incoming)
# print [ord(self.incoming[i]) for i in range(len(self.incoming))]
# lines = self.incoming.split('\r\n')
#lines = self.incoming.split('\r\n')
lines = self.incoming.split('\n')
fcntl.lockf(logfile.fileno(), fcntl.LOCK_EX)
for i in range(len(lines)-1):
print >>logfile, time.time(), self.id, lines[i]
logfile.flush()
fcntl.lockf(logfile.fileno(), fcntl.LOCK_UN)
self.incoming = lines.pop(-1)
def handle_write(self):
#print 'DEBUG ServerToMoteHandler handle_write'
data = user_stream[self.mac]['to mote'].pop()
self.send(data)
def handle_close(self):
print 'DEBUG: connection closed by', self.addr
self.close()
class UserChannelListener(asyncore.dispatcher):
def __init__(self, mac):
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.mac = mac
self.chanHan = None
if mac in mac2id:
nodeid = mac2id[mac]
UPORT = '17'
# node id be 3-digit
for i in range(3-len(nodeid)):
UPORT = UPORT + '0'
UPORT = UPORT + nodeid
try:
self.bind((HOST, int(UPORT)))
self.listen(1)
except socket.error, msg:
print 'ERROR: binding or listening'
print 'ERROR: close connection'
self.close()
def handle_error(self):
print 'ERROR: unknown error raised'
self.close()
if self.chanHan != None: self.chanHan.close()
def handle_accept(self):
(conn, addr) = self.accept()
if self.chanHan != None: self.chanHan.close()
self.chanHan = UserChannelHandler(conn, addr, self.mac)
def handle_close(self):
self.close()
if self.chanHan != None: self.chanHan.close()
if 'to user' in user_stream[self.mac]:
del user_stream[self.mac]['to user']
class UserChannelHandler(asyncore.dispatcher):
def __init__(self, conn, addr, mac):
self.mac = mac
self.addr = addr
user_stream[self.mac]['to user'] = []
asyncore.dispatcher.__init__(self, conn)
def writable(self):
return (self.mac in user_stream) and ('to user' in user_stream[self.mac]) and user_stream[self.mac]['to user']
def handle_read(self):
#print "DEBUG UserChannelHandler handle_read"
# stream from user to mote
if len(user_stream[self.mac]['to mote']) == 0:
user_stream[self.mac]['to mote'].append(self.recv(MAX_TOS_LEN))
def handle_write(self):
#print "DEBUG UserChannelHandler handle_write"
self.send(user_stream[self.mac]['to user'].pop())
def handle_close(self):
if 'to user' in user_stream[self.mac]: del user_stream[self.mac]['to user']
#print 'DEBUG: user at', self.addr[0],'closed the channel'
self.close()
class ReprogramListener(asyncore.dispatcher):
def __init__(self):
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind((HOST, REPROG_PORT))
self.listen(1)
def handle_accept(self):
(conn, addr) = self.accept()
ReprogramHandler(conn, addr)
class ReprogramHandler(asynchat.async_chat):
def __init__(self, conn, addr):
global ui
if ui:
print 'Error: only one UI is supported'
conn.send('Error: only one UI is supported\n')
conn.close()
return
self.addr = addr
print 'New UI:', self.addr
self.set_terminator('\n')
asynchat.async_chat.__init__(self, conn)
self.buffer = []
self.file = []
ui = self
def collect_incoming_data(self, data):
self.buffer.append(data)
def found_terminator(self):
global ui
print 'Cmd(%s): %s'%(self.addr, self.buffer)
if self.buffer == []: return
self.buffer = ''.join(self.buffer)
if self.buffer[0] == ':':
self.file.append(self.buffer)
self.buffer = []
return
v = self.buffer.split()
cmd = v[0]
params = v[1:]
if cmd in ['s', 'status']:
self.push('%s\n'%nslus.keys())
elif cmd in ['erase', 'reset', 'reprogram', 'reprogram-quick']:
c = params[0]
print 'params', params
if c not in nslus:
f = file('map')
for line in f.readlines():
(mac, node) = line.split()
if c == node:
c = mac
break
f.close()
if c in nslus:
if cmd == 'reprogram':
nslus[c].append((cmd, '\n'.join(self.file) + '\n'))
elif cmd == 'reprogram-quick':
print 'DEBUG: reprogram-quick', c
nslus[c].append((cmd, '\n'.join(self.file) + '\n'))
self.close_when_done()
else:
nslus[c].append((cmd, None))
else:
self.push('ERROR No such client.\n')
ui = None
else:
self.push('ERROR Available commands are: status, erase, reset, reprogram\n')
self.buffer = []
self.close()
def handle_close(self):
global ui
print 'Remove UI:', self.addr
self.close()
ui = None
def sigalrm_handler(signum, frame):
#print 'status:', status
#print 'nslus:', nslus
#print 'mac2id:', mac2id
#print 'user stream:', user_stream
f = file('status', 'w')
fcntl.lockf(f.fileno(), fcntl.LOCK_EX)
for ip in sorted(status.keys()):
for (mac, [reads, writes]) in status[ip].items():
print >>f, ip, mac, mac2id.get(mac,0), reads, writes
fcntl.lockf(f.fileno(), fcntl.LOCK_UN)
f.close()
#logfile.flush()
signal.alarm(5)
if __name__ == '__main__':
signal.signal(signal.SIGALRM, sigalrm_handler)
signal.alarm(5)
logfilename = 'logs/current'
if not os.path.isdir('./logs'):
os.mkdir('./logs')
if os.path.exists(logfilename) and os.path.getsize(logfilename) != 0:
incompleteFileName = 'logs/%.4f.incomplete'%(time.time())
os.rename(logfilename, incompleteFileName)
logfile = file(logfilename, 'w')
NSLUListener()
ServerToMoteListener()
ReprogramListener()
asyncore.loop()
|
# -*- coding: utf-8 -*-
"""Functions for downloading and analysing data on MPs."""
# Imports ---------------------------------------------------------------------
import numpy as np
import pandas as pd
from . import combine
from . import constants
from . import core
from . import elections
from . import filter
from . import members
from . import utils
# Raw MPs queries -------------------------------------------------------------
def fetch_mps_raw():
"""Fetch key details for all MPs."""
return members.fetch_members_raw(
house=constants.PDP_ID_HOUSE_OF_COMMONS)
def fetch_commons_memberships_raw():
"""Fetch Commons memberships for all MPs."""
commons_memberships_query = """
PREFIX : <https://id.parliament.uk/schema/>
PREFIX d: <https://id.parliament.uk/>
SELECT DISTINCT
?person_id
?mnis_id
?given_name
?family_name
?display_name
?constituency_id
?constituency_name
?constituency_ons_id
?seat_incumbency_id
?seat_incumbency_start_date
?seat_incumbency_end_date
WHERE {{
# House constraint for the House of Commons
BIND(d:{0} AS ?house)
?person_id :memberMnisId ?mnis_id;
:personGivenName ?given_name ;
:personFamilyName ?family_name ;
<http://example.com/F31CBD81AD8343898B49DC65743F0BDF> ?display_name ;
:memberHasParliamentaryIncumbency ?seat_incumbency_id .
?seat_incumbency_id a :SeatIncumbency ;
:seatIncumbencyHasHouseSeat ?seat ;
:parliamentaryIncumbencyStartDate ?seat_incumbency_start_date .
OPTIONAL {{ ?seat_incumbency_id :parliamentaryIncumbencyEndDate ?seat_incumbency_end_date . }}
?seat :houseSeatHasHouse ?house ;
:houseSeatHasConstituencyGroup ?constituency_id .
?constituency_id :constituencyGroupName ?constituency_name ;
:constituencyGroupStartDate ?constituencyStartDate .
OPTIONAL {{ ?constituency_id :constituencyGroupOnsCode ?constituency_ons_id . }}
}}
""".format(constants.PDP_ID_HOUSE_OF_COMMONS)
return core.sparql_select(commons_memberships_query)
def fetch_mps_party_memberships_raw():
"""Fetch party memberships for all MPs."""
return members.fetch_party_memberships_raw(
house=constants.PDP_ID_HOUSE_OF_COMMONS)
def fetch_mps_government_roles_raw():
"""Fetch government roles for all MPs."""
return members.fetch_government_roles_raw(
house=constants.PDP_ID_HOUSE_OF_COMMONS)
def fetch_mps_opposition_roles_raw():
"""Fetch opposition roles for all MPs."""
return members.fetch_opposition_roles_raw(
house=constants.PDP_ID_HOUSE_OF_COMMONS)
def fetch_mps_committee_memberships_raw():
"""Fetch committee memberships for all MPs."""
return members.fetch_committee_memberships_raw(
house=constants.PDP_ID_HOUSE_OF_COMMONS)
# Main MPs API ----------------------------------------------------------------
def fetch_mps(from_date=np.NaN,
to_date=np.NaN,
on_date=np.NaN):
"""Fetch key details for all MPs.
fetch_mps fetches data from the data platform showing key details about
each MP, with one row per MP.
The from_date and to_date arguments can be used to filter the MPs returned
based on the dates of their Commons memberships. The on_date argument is a
convenience that sets the from_date and to_date to the same given date. The
on_date has priority: if the on_date is set, the from_date and to_date are
ignored.
The filtering is inclusive: an MP is returned if any part of one of their
Commons memberships falls within the period specified with the from and to
dates.
Parameters
----------
from_date : str or date or NaN, optional
A string or datetime.date representing a date. If a string is used it
should specify the date in ISO 8601 date format e.g. '2000-12-31'. The
default value is numpy.NaN, which means no records are excluded on the
basis of the from_date.
to_date : str or date or NaN, optional
A string or datetime.date representing a date. If a string is used it
should specify the date in ISO 8601 date format e.g. '2000-12-31'. The
default value is np.NaN, which means no records are excluded on the
basis of the to_date.
on_date : str or date or NaN, optional
A string or datetime.date representing a date. If a string is used it
should specify the date in ISO 8601 date format e.g. '2000-12-31'. The
default value is np.NaN, which means no records are excluded on the
basis of the on_date.
Returns
-------
out : DataFrame
A pandas dataframe of key details for each MP, with one row per MP.
"""
# Set from_date and to_date to on_date if set
if not pd.isna(on_date):
from_date = on_date
to_date = on_date
# Fetch key details
mps = fetch_mps_raw()
# Filter based on membership dates if requested
if not pd.isna(from_date) or not pd.isna(to_date):
commons_memberships = fetch_commons_memberships()
matching_memberships = filter.filter_dates(
commons_memberships,
start_col='seat_incumbency_start_date',
end_col='seat_incumbency_end_date',
from_date=from_date,
to_date=to_date)
mps = mps[mps['person_id'].isin(matching_memberships['person_id'])]
# Tidy up and return
mps.sort_values(
by=['family_name'],
inplace=True)
mps.reset_index(drop=True, inplace=True)
return mps
def fetch_commons_memberships(from_date=np.NaN,
to_date=np.NaN,
on_date=np.NaN):
"""Fetch Commons memberships for all MPs.
fetch_commons_memberships fetches data from the data platform showing
Commons memberships for each MP. The memberships are processed to impose
consistent rules on the start and end dates for memberships.
The from_date and to_date arguments can be used to filter the memberships
returned. The on_date argument is a convenience that sets the from_date and
to_date to the same given date. The on_date has priority: if the on_date is
set, the from_date and to_date are ignored.
The filtering is inclusive: a membership is returned if any part
of it falls within the period specified with the from and to dates.
Note that a membership with a NaN end date is still open.
Parameters
----------
from_date : str or date or NaN, optional
A string or datetime.date representing a date. If a string is used it
should specify the date in ISO 8601 date format e.g. '2000-12-31'. The
default value is numpy.NaN, which means no records are excluded on the
basis of the from_date.
to_date : str or date or NaN, optional
A string or datetime.date representing a date. If a string is used it
should specify the date in ISO 8601 date format e.g. '2000-12-31'. The
default value is np.NaN, which means no records are excluded on the
basis of the to_date.
on_date : str or date or NaN, optional
A string or datetime.date representing a date. If a string is used it
should specify the date in ISO 8601 date format e.g. '2000-12-31'. The
default value is np.NaN, which means no records are excluded on the
basis of the on_date.
Returns
-------
out : DataFrame
A pandas dataframe of Commons memberships for each MP, with one row
per Commons membership.
"""
# Set from_date and to_date to on_date if set
if not pd.isna(on_date):
from_date = on_date
to_date = on_date
# Fetch the Commons memberships
commons_memberships = fetch_commons_memberships_raw()
# Get elections and fix the end dates of memberships
end_dates = commons_memberships['seat_incumbency_end_date'].values
general_elections = elections.get_general_elections().values
general_elections_count = len(general_elections)
# If the end date for a membership falls after dissolution adjust it
for i in range(len(end_dates)):
date = end_dates[i]
if pd.isna(date): continue
for j in range(general_elections_count):
dissolution = general_elections[j, 1]
election = general_elections[j, 2]
if date > dissolution and date <= election:
end_dates[i] = dissolution
continue
commons_memberships['seat_incumbency_end_date'] = end_dates
# Filter on dates if requested
if not pd.isna(from_date) or not pd.isna(to_date):
commons_memberships = filter.filter_dates(
commons_memberships,
start_col='seat_incumbency_start_date',
end_col='seat_incumbency_end_date',
from_date=from_date,
to_date=to_date)
# Tidy up and return
commons_memberships.sort_values(
by=['family_name',
'seat_incumbency_start_date'],
inplace=True)
commons_memberships.reset_index(drop=True, inplace=True)
return commons_memberships
def fetch_mps_party_memberships(from_date=np.NaN,
to_date=np.NaN,
on_date=np.NaN,
while_mp=True,
collapse=False):
"""Fetch party memberships for all MPs.
fetch_mps_party_memberships fetches data from the data platform showing
party memberships for each MP.
The from_date and to_date arguments can be used to filter the memberships
returned. The on_date argument is a convenience that sets the from_date and
to_date to the same given date. The on_date has priority: if the on_date is
set, the from_date and to_date are ignored.
The while_mp argument can be used to filter the memberships to include only
those that occurred during the period when each individual was an MP.
The filtering is inclusive: a membership is returned if any part
of it falls within the period specified with the from and to dates.
The collapse argument controls whether memberships are combined so that
there is only one row for each period of continuous membership within the
same party. Combining the memberships in this way means that party
membership ids from the data platform are not included in the dataframe
returned.
Note that a membership with a NaN end date is still open.
Parameters
----------
from_date : str or date or NaN, optional
A string or datetime.date representing a date. If a string is used it
should specify the date in ISO 8601 date format e.g. '2000-12-31'. The
default value is numpy.NaN, which means no records are excluded on the
basis of the from_date.
to_date : str or date or NaN, optional
A string or datetime.date representing a date. If a string is used it
should specify the date in ISO 8601 date format e.g. '2000-12-31'. The
default value is np.NaN, which means no records are excluded on the
basis of the to_date.
on_date : str or date or NaN, optional
A string or datetime.date representing a date. If a string is used it
should specify the date in ISO 8601 date format e.g. '2000-12-31'. The
default value is np.NaN, which means no records are excluded on the
basis of the on_date.
while_mp : bool, optional
A boolean indicating whether to filter the party memberships to include
only those memberships that were held while each individual was serving
as an MP. The default value is True.
collapse: bool, optional
Determines whether to collapse consecutive memberships within the same
party into a single period of continuous party membership. Setting this
to True means that party membership ids are not returned in the
dataframe. The default value is False.
Returns
-------
out : DataFrame
A pandas dataframe of party memberships for each MP, with one row per
party membership. The memberships are processed and merged so that
there is only one party membership for a period of continuous
membership within the same party. A membership with a NaN end date is
still open.
"""
# Set from_date and to_date to on_date if set
if not pd.isna(on_date):
from_date = on_date
to_date = on_date
# Fetch the party memberships
party_memberships = fetch_mps_party_memberships_raw()
# Filter on dates if requested
if not pd.isna(from_date) or not pd.isna(to_date):
party_memberships = filter.filter_dates(
party_memberships,
start_col='party_membership_start_date',
end_col='party_membership_end_date',
from_date=from_date,
to_date=to_date)
# Filter on Commons memberships if requested
if while_mp:
commons_memberships = fetch_commons_memberships()
party_memberships = filter.filter_memberships(
tm=party_memberships,
fm=commons_memberships,
tm_id_col='party_membership_id',
tm_start_col='party_membership_start_date',
tm_end_col='party_membership_end_date',
fm_start_col='seat_incumbency_start_date',
fm_end_col='seat_incumbency_end_date',
join_col='person_id')
# Collapse consecutive memberships and return if requested
if collapse:
return combine.combine_party_memberships(party_memberships)
# Otherwise tidy up and return
party_memberships.sort_values(
by=['family_name',
'party_membership_start_date'],
inplace=True)
party_memberships.reset_index(drop=True, inplace=True)
return party_memberships
def fetch_mps_government_roles(from_date=np.NaN,
to_date=np.NaN,
on_date=np.NaN,
while_mp=True):
"""Fetch government roles for all MPs.
fetch_mps_government_roles fetches data from the data platform showing
government roles for each MP.
The from_date and to_date arguments can be used to filter the roles
returned. The on_date argument is a convenience that sets the from_date and
to_date to the same given date. The on_date has priority: if the on_date is
set, the from_date and to_date are ignored.
The while_mp argument can be used to filter the roles to include only those
that occurred during the period when each individual was an MP.
The filtering is inclusive: a role is returned if any part of it falls
within the period specified with the from and to dates.
Note that a role with a NaN end date is still open.
Parameters
----------
from_date : str or date or NaN, optional
A string or datetime.date representing a date. If a string is used it
should specify the date in ISO 8601 date format e.g. '2000-12-31'. The
default value is numpy.NaN, which means no records are excluded on the
basis of the from_date.
to_date : str or date or NaN, optional
A string or datetime.date representing a date. If a string is used it
should specify the date in ISO 8601 date format e.g. '2000-12-31'. The
default value is np.NaN, which means no records are excluded on the
basis of the to_date.
on_date : str or date or NaN, optional
A string or datetime.date representing a date. If a string is used it
should specify the date in ISO 8601 date format e.g. '2000-12-31'. The
default value is np.NaN, which means no records are excluded on the
basis of the on_date.
while_mp : bool, optional
A boolean indicating whether to filter the government roles to include
only those roles that were held while each individual was serving as an
MP. The default value is True.
Returns
-------
out : DataFrame
A dataframe of government roles for each MP, with one row per role.
"""
# Set from_date and to_date to on_date if set
if not pd.isna(on_date):
from_date = on_date
to_date = on_date
# Fetch the government roles
government_roles = fetch_mps_government_roles_raw()
# Filter on dates if requested
if not pd.isna(from_date) or not pd.isna(to_date):
government_roles = filter.filter_dates(
government_roles,
start_col='government_incumbency_start_date',
end_col='government_incumbency_end_date',
from_date=from_date,
to_date=to_date)
# Filter on Commons memberships if requested
if while_mp:
commons_memberships = fetch_commons_memberships()
government_roles = filter.filter_memberships(
tm=government_roles,
fm=commons_memberships,
tm_id_col='government_incumbency_id',
tm_start_col='government_incumbency_start_date',
tm_end_col='government_incumbency_end_date',
fm_start_col='seat_incumbency_start_date',
fm_end_col='seat_incumbency_end_date',
join_col='person_id')
# Tidy up and return
government_roles.sort_values(
by=['family_name',
'government_incumbency_start_date'],
inplace=True)
government_roles.reset_index(drop=True, inplace=True)
return government_roles
def fetch_mps_opposition_roles(from_date=np.NaN,
to_date=np.NaN,
on_date=np.NaN,
while_mp=True):
"""Fetch opposition roles for all MPs.
fetch_mps_opposition_roles fetches data from the data platform showing
opposition roles for each MP.
The from_date and to_date arguments can be used to filter the roles
returned. The on_date argument is a convenience that sets the from_date and
to_date to the same given date. The on_date has priority: if the on_date is
set, the from_date and to_date are ignored.
The while_mp argument can be used to filter the roles to include only those
that occurred during the period when each individual was an MP.
The filtering is inclusive: a role is returned if any part of it falls
within the period specified with the from and to dates.
Note that a role with a NaN end date is still open.
Parameters
----------
from_date : str or date or NaN, optional
A string or datetime.date representing a date. If a string is used it
should specify the date in ISO 8601 date format e.g. '2000-12-31'. The
default value is numpy.NaN, which means no records are excluded on the
basis of the from_date.
to_date : str or date or NaN, optional
A string or datetime.date representing a date. If a string is used it
should specify the date in ISO 8601 date format e.g. '2000-12-31'. The
default value is np.NaN, which means no records are excluded on the
basis of the to_date.
on_date : str or date or NaN, optional
A string or datetime.date representing a date. If a string is used it
should specify the date in ISO 8601 date format e.g. '2000-12-31'. The
default value is np.NaN, which means no records are excluded on the
basis of the on_date.
while_mp : bool, optional
A boolean indicating whether to filter the opposition roles to include
only those roles that were held while each individual was serving as an
MP. The default value is True.
Returns
-------
out : DataFrame
A dataframe of opposition roles for each MP, with one row per role.
"""
# Set from_date and to_date to on_date if set
if not pd.isna(on_date):
from_date = on_date
to_date = on_date
# Fetch the opposition roles
opposition_roles = fetch_mps_opposition_roles_raw()
# Filter on dates if requested
if not pd.isna(from_date) or not pd.isna(to_date):
opposition_roles = filter.filter_dates(
opposition_roles,
start_col='opposition_incumbency_start_date',
end_col='opposition_incumbency_end_date',
from_date=from_date,
to_date=to_date)
# Filter on Commons memberships if requested
if while_mp:
commons_memberships = fetch_commons_memberships()
opposition_roles = filter.filter_memberships(
tm=opposition_roles,
fm=commons_memberships,
tm_id_col='opposition_incumbency_id',
tm_start_col='opposition_incumbency_start_date',
tm_end_col='opposition_incumbency_end_date',
fm_start_col='seat_incumbency_start_date',
fm_end_col='seat_incumbency_end_date',
join_col='person_id')
# Tidy up and return
opposition_roles.sort_values(
by=['family_name',
'opposition_incumbency_start_date'],
inplace=True)
opposition_roles.reset_index(drop=True, inplace=True)
return opposition_roles
def fetch_mps_committee_memberships(from_date=np.NaN,
to_date=np.NaN,
on_date=np.NaN,
while_mp=True):
"""Fetch committee memberships for all MPs.
fetch_mps_commitee_memberships fetches data from the data platform showing
Parliamentary committee memberships for each MP.
The from_date, to_date arguments can be used to filter the memberships
returned based on the given dates. The on_date argument is a convenience
that sets the from_date and to_date to the same given date. The on_date has
priority: if the on_date is set, the from_date and to_date are ignored.
The while_mp argument can be used to filter the memberships to include only
those that occurred during the period when each individual was an MP.
The filtering is inclusive: a membership is returned if any part of it
falls within the period specified with the from and to dates.
Note that a membership with a NaN end date is still open.
Parameters
----------
from_date : str or date or NaN, optional
A string or datetime.date representing a date. If a string is used it
should specify the date in ISO 8601 date format e.g. '2000-12-31'. The
default value is numpy.NaN, which means no records are excluded on the
basis of the from_date.
to_date : str or date or NaN, optional
A string or datetime.date representing a date. If a string is used it
should specify the date in ISO 8601 date format e.g. '2000-12-31'. The
default value is np.NaN, which means no records are excluded on the
basis of the to_date.
on_date : str or date or NaN, optional
A string or datetime.date representing a date. If a string is used it
should specify the date in ISO 8601 date format e.g. '2000-12-31'. The
default value is np.NaN, which means no records are excluded on the
basis of the on_date.
while_mp : bool, optional
A boolean indicating whether to filter the committee memberships to
include only those memberships that were held while each individual was
serving as an MP. The default value is True.
Returns
-------
out : DataFrame
A dataframe of committee memberships for each MP, with one row per
membership.
"""
# Set from_date and to_date to on_date if set
if not pd.isna(on_date):
from_date = on_date
to_date = on_date
# Fetch the committee memberships
committee_memberships = fetch_mps_committee_memberships_raw()
# Filter on dates if requested
if not pd.isna(from_date) or not pd.isna(to_date):
committee_memberships = filter.filter_dates(
committee_memberships,
start_col='committee_membership_start_date',
end_col='committee_membership_end_date',
from_date=from_date,
to_date=to_date)
# Filter on Commons memberships if requested
if while_mp:
commons_memberships = fetch_commons_memberships()
committee_memberships = filter.filter_memberships(
tm=committee_memberships,
fm=commons_memberships,
tm_id_col='committee_membership_id',
tm_start_col='committee_membership_start_date',
tm_end_col='committee_membership_end_date',
fm_start_col='seat_incumbency_start_date',
fm_end_col='seat_incumbency_end_date',
join_col='person_id')
# Tidy up and return
committee_memberships.sort_values(
by=['family_name',
'committee_membership_start_date'],
inplace=True)
committee_memberships.reset_index(drop=True, inplace=True)
return committee_memberships
|
<reponame>xerion3800/fhempy<filename>FHEM/bindings/python/tests/mocked/test_utils.py
import functools
import pytest
from fhempy.lib import utils
def test_local_ip():
ip = utils.get_local_ip()
assert ip != "127.0.0.1"
def test_encrypt_decrypt():
teststring = "This is a test string"
fhem_unique_id = "a3e36c8ec8622a0de0e11191dc430a34"
encrypted_string = utils.encrypt_string(teststring, fhem_unique_id)
decrypted_string = utils.decrypt_string(encrypted_string, fhem_unique_id)
assert teststring == decrypted_string
return decrypted_string
@pytest.mark.asyncio
async def test_run_blocking():
test_string = await utils.run_blocking(functools.partial(test_encrypt_decrypt))
assert "This is a test string" == test_string
@pytest.mark.asyncio
async def test_run_blocking_task():
foo = None
def set_foo():
nonlocal foo
foo = "bar"
task = utils.run_blocking_task(functools.partial(set_foo))
await task
assert foo == "bar"
@pytest.mark.asyncio
async def test_handle_define_attr(mocker):
async def setDevAttrList(hashname, attrlist):
assert hashname == "test"
assert str(attrlist) == "attr1 attr2 attr3 attr4 attr5:on,off,test"
async def AttrVal(hashname, attr, default):
if attr == "attr3":
return "test33"
return ""
mocker.patch("fhempy.lib.fhem.setDevAttrList", setDevAttrList)
mocker.patch("fhempy.lib.fhem.AttrVal", AttrVal)
class TestClass:
async def set_attr(self, hash):
assert self._attr_attr4 == "test4"
testinstance = TestClass()
attr_conf = {
"attr1": {},
"attr2": {"default": 1, "format": "int"},
"attr3": {"default": "test3"},
"attr4": {"default": "test4", "function": "set_attr"},
"attr5": {"default": "test5", "options": "on,off,test"},
}
hash = {"NAME": "test"}
await utils.handle_define_attr(attr_conf, testinstance, hash)
assert testinstance._attr_attr1 == ""
assert testinstance._attr_attr2 == 1
assert testinstance._attr_attr3 == "test33"
assert testinstance._attr_attr4 == "test4"
assert testinstance._attr_attr5 == "test5"
@pytest.mark.asyncio
async def test_handle_attr():
class TestClass:
async def set_attr(self, hash):
assert self._attr_attr4 == "asdf"
testinstance = TestClass()
attr_conf = {
"attr1": {},
"attr2": {"default": 1, "format": "int"},
"attr3": {"default": "test3"},
"attr4": {"default": "test4", "function": "set_attr"},
"attr5": {"default": "test5", "options": "on,off,test"},
}
hash = {"NAME": "test"}
# set new value
await utils.handle_attr(
attr_conf, testinstance, hash, ["set", "test", "attr1", "NEWVALUE"], {}
)
assert testinstance._attr_attr1 == "NEWVALUE"
# set integer
await utils.handle_attr(
attr_conf, testinstance, hash, ["set", "test", "attr2", "2"], {}
)
assert testinstance._attr_attr2 == 2
# del attribute
await utils.handle_attr(
attr_conf, testinstance, hash, ["del", "test", "attr2", ""], {}
)
assert testinstance._attr_attr2 == 1
await utils.handle_attr(
attr_conf, testinstance, hash, ["set", "test", "attr4", "asdf"], {}
)
assert testinstance._attr_attr4 == "asdf"
def test_flatten_json():
json = {"asdf": {"nested": {"nested2": "x"}}}
flat_json = utils.flatten_json(json)
for element in flat_json:
assert element == "asdf_nested_nested2"
assert flat_json[element] == "x"
def test_convert2format():
listdef = {"format": "int"}
newval = utils.convert2format("3", listdef)
assert newval == 3
listdef = {"format": "int"}
newval = utils.convert2format("3", listdef)
assert newval == 3
assert isinstance(newval, int) == True
listdef = {"format": "str"}
newval = utils.convert2format("3", listdef)
assert newval == "3"
assert isinstance(newval, str) == True
listdef = {"format": "float"}
newval = utils.convert2format("3", listdef)
assert newval == 3.0
assert isinstance(newval, float) == True
listdef = {}
newval = utils.convert2format("3", listdef)
assert newval == "3"
listdef = {}
newval = utils.convert2format(3, listdef)
assert newval == 3
@pytest.mark.asyncio
async def test_handle_set():
set_list_conf = {
"mode": {
"args": ["mode"],
"argsh": ["mode"],
"params": {"mode": {"optional": False}},
"options": "eco,comfort",
},
"desiredTemp": {"args": ["temperature"], "options": "slider,10,1,30"},
"holidayMode": {
"args": ["temperature", "till", "end"],
"params": {"till": {"default": "31.12.2030"}, "end": {"default": "23:59"}},
},
"on": {
"args": ["seconds"],
"params": {"seconds": {"optional": True, "format": "int"}},
},
"off": {},
}
newstate = None
class TestClass:
def __init__(self):
pass
async def set_on(self, hash, params):
nonlocal newstate
if len(params) == 0:
newstate = "on"
else:
newstate = params
async def set_off(self, hash, params):
assert hash["NAME"] == "testhash"
nonlocal newstate
newstate = "off"
async def set_mode(self, hash, params):
nonlocal newstate
newstate = params
async def set_holidayMode(self, hash, params):
nonlocal newstate
newstate = params
testhash = {"NAME": "testhash", "FHEMPYTYPE": "testtype"}
testinstance = TestClass()
retval = await utils.handle_set(
set_list_conf, testinstance, testhash, ["testhash"], {}
)
assert (
retval
== "Unknown argument ?, choose one of mode:eco,comfort desiredTemp:slider,10,1,30 holidayMode on off:noArg"
)
retval = await utils.handle_set(
set_list_conf, testinstance, testhash, ["testhash", "?"], {}
)
assert (
retval
== "Unknown argument ?, choose one of mode:eco,comfort desiredTemp:slider,10,1,30 holidayMode on off:noArg"
)
retval = await utils.handle_set(
set_list_conf,
testinstance,
testhash,
["testhash", "mode"],
{},
)
assert retval == "Required argument mode missing."
retval = await utils.handle_set(
set_list_conf, testinstance, testhash, ["testhash", "nopossiblecommand"], {}
)
assert retval == "Command not available for this device."
retval = await utils.handle_set(
set_list_conf,
testinstance,
testhash,
["testhash", "off", "toomanyarguments"],
{},
)
assert retval == "Too many parameters provided: toomanyarguments"
retval = await utils.handle_set(
set_list_conf,
testinstance,
testhash,
["testhash", "off"],
{},
)
assert newstate == "off"
retval = await utils.handle_set(
set_list_conf,
testinstance,
testhash,
["testhash", "mode", "eco"],
{},
)
assert newstate == {"mode": "eco"}
newstate = None
retval = await utils.handle_set(
set_list_conf,
testinstance,
testhash,
["testhash", "mode"],
{"mode": "eco"},
)
assert newstate == {"mode": "eco"}
newstate = None
retval = await utils.handle_set(
set_list_conf,
testinstance,
testhash,
["testhash", "mode", "nonexistent"],
{},
)
assert retval == None
assert newstate == {"mode": "nonexistent"}
newstate = None
retval = await utils.handle_set(
set_list_conf,
testinstance,
testhash,
["testhash", "holidayMode", "21"],
{},
)
assert retval == None
assert newstate == {"till": "31.12.2030", "end": "23:59", "temperature": "21"}
newstate = None
retval = await utils.handle_set(
set_list_conf,
testinstance,
testhash,
["testhash", "holidayMode", "21", "31.12.2040", "23:38"],
{},
)
assert retval == None
assert newstate == {"till": "31.12.2040", "end": "23:38", "temperature": "21"}
newstate = None
retval = await utils.handle_set(
set_list_conf,
testinstance,
testhash,
["testhash", "on"],
{},
)
assert retval == None
assert newstate == "on"
newstate = None
retval = await utils.handle_set(
set_list_conf,
testinstance,
testhash,
["testhash", "on", "300"],
{},
)
assert retval == None
assert newstate == {"seconds": 300}
|
<filename>playlistgrabber.py
#!/usr/bin/python3
"""YouTube Playlist Backup Script in Python3.
Save a YouTube playlist's video titles into a textfile.
"""
from apiclient.discovery import build
import argparse
import codecs
from datetime import datetime
from math import ceil
from os import linesep
from sys import getfilesystemencoding
RESULTS_PER_REQUEST = 50 # can only be between 1 and 50
OS_ENCODING = getfilesystemencoding()
class PlaylistGrabber:
nextPageToken = ""
def __init__(self, apiKey, playlistId, youtubeObj):
self.apiKey = apiKey
self.playlistId = playlistId
self.youtubeObj = youtubeObj
def parse_autogenerated_video_description(self, description):
"""Parse the auto-generated video's description for the track ID.
Return the properly formatted Artist(, Artist ...) - Title string."""
descriptionLines = description.split('\n\n')
# the first line with the '·' (\u00B7 - MIDDLE DOT) character contains
# both the title and all featured artists
lineWithID = [line for line in descriptionLines if '\u00B7' in line][0]
lineWithID = lineWithID.split('\u00B7')
title = lineWithID[0]
artists = lineWithID[1:]
# trim whitespace
title = title.strip(' ')
artists = [x.strip(' ') for x in artists]
return ', '.join(artists) + ' - ' + title
def forge_request(self):
"""Forge a request to the YouTube API and return it. When the request
is executed, the response will be the requested data in JSON format.
"""
playlistItemsListRequest = self.youtubeObj.playlistItems().list(
playlistId=self.playlistId,
part="snippet",
maxResults=RESULTS_PER_REQUEST,
key=self.apiKey,
pageToken=self.nextPageToken
)
return playlistItemsListRequest
def remove_disallowed_filename_chars(self, filename):
"""Remove characters that can't be inside filenames on most systems.
Used this as a basis: https://stackoverflow.com/a/15908244
"""
final = ""
for char in filename:
if char not in "<>:\"/\|?*" and ord(char) > 31:
final += char
if final.replace(".", "") == "":
raise SystemError("The playlist\'s name is all periods")
return final
def get_date_str(self):
"""Return the current date in a concise string form."""
now = datetime.now()
return str(now.year) + ("{:02d}{:02d}".format(now.month, now.day))
def get_clean_playlist_name(self):
"""Send a request for the playlist's title specifically and sanitize it."""
playlistNameRequest = self.youtubeObj.playlists().list(
id=self.playlistId,
part="snippet",
key=self.apiKey
)
playlistNameResponse = playlistNameRequest.execute()
playlistName = playlistNameResponse["items"][0]["snippet"]["title"]
return self.remove_disallowed_filename_chars(playlistName)
def iterate_playlist(self, playlistItemsListResponse):
"""Forge a request as many times as necessary to append each element
of the playlist to a list. Return the completed list.
"""
totalResults = playlistItemsListResponse["pageInfo"]["totalResults"]
requestsLeft = ceil(totalResults / RESULTS_PER_REQUEST)
itemCounter = 0
playlistItems = []
while True:
for item in playlistItemsListResponse["items"]:
itemCounter += 1
if item["snippet"]["description"].endswith('Auto-generated by YouTube.'):
# YouTube auto-generated track, video title does not contain artist(s),
# so extract artist and title info from the video description
videoTitle = self.parse_autogenerated_video_description(item["snippet"]["description"])
else:
# regular user-uploaded track, title should contain the artist(s) as well
videoTitle = item["snippet"]["title"]
currentLine = str(itemCounter) + ". " + videoTitle
playlistItems.append(currentLine)
# Using OS-specific encoding to avoid encoding exceptions
# when printing to console
currentLine = (currentLine.encode(OS_ENCODING, errors="replace")) \
.decode(OS_ENCODING)
print(currentLine)
# last page, we're done
if requestsLeft == 1:
break
else:
self.nextPageToken = playlistItemsListResponse["nextPageToken"]
requestsLeft -= 1
playlistItemsListResponse = self.forge_request().execute()
return playlistItems
def list_to_file(self, playlistItems):
"""Write the list containing the playlist's items to a textfile."""
filename = self.get_clean_playlist_name() + "_" + self.get_date_str() + ".txt"
f = codecs.open(filename, mode="w", encoding="utf-8")
for item in playlistItems:
f.write(item + linesep)
f.close()
def run(self):
initialResponse = self.forge_request().execute()
self.list_to_file(self.iterate_playlist(initialResponse))
def cmdline_parse():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
description="Save a YouTube playlist's contents into a textfile.")
parser.add_argument("key",
help="Your Google Developer API key")
parser.add_argument("id",
help="The ID of the YouTube playlist")
return parser.parse_args()
def main():
args = cmdline_parse()
youtubeObj = build("youtube", "v3", developerKey=args.key)
grabber = PlaylistGrabber(args.key, args.id, youtubeObj)
grabber.run()
if __name__ == "__main__":
main()
|
<filename>resnet_model.py
from keras.models import Model
from keras.layers import Conv2D
from keras.layers import BatchNormalization
from keras.layers import Activation
from keras.layers import Add
from keras.layers import ZeroPadding2D
from keras.layers import MaxPooling2D
from keras.layers import Input
from keras.layers import GlobalMaxPooling2D
from keras.layers import Dense
from keras_applications.imagenet_utils import _obtain_input_shape
from keras.utils import get_file
weights_collection = [
# ResNet18
{
'model': 'resnet18',
'dataset': 'imagenet',
'classes': 1000,
'include_top': True,
'url': 'https://github.com/qubvel/classification_models/releases/download/0.0.1/resnet18_imagenet_1000.h5',
'name': 'resnet18_imagenet_1000.h5',
'md5': '64da73012bb70e16c901316c201d9803',
},
{
'model': 'resnet18',
'dataset': 'imagenet',
'classes': 1000,
'include_top': False,
'url': 'https://github.com/qubvel/classification_models/releases/download/0.0.1/resnet18_imagenet_1000_no_top.h5',
'name': 'resnet18_imagenet_1000.h5',
'md5': '318e3ac0cd98d51e917526c9f62f0b50',
},
# ResNet34
{
'model': 'resnet34',
'dataset': 'imagenet',
'classes': 1000,
'include_top': True,
'url': 'https://github.com/qubvel/classification_models/releases/download/0.0.1/resnet34_imagenet_1000.h5',
'name': 'resnet34_imagenet_1000.h5',
'md5': '2ac8277412f65e5d047f255bcbd10383',
},
{
'model': 'resnet34',
'dataset': 'imagenet',
'classes': 1000,
'include_top': False,
'url': 'https://github.com/qubvel/classification_models/releases/download/0.0.1/resnet34_imagenet_1000_no_top.h5',
'name': 'resnet34_imagenet_1000_no_top.h5',
'md5': '8caaa0ad39d927cb8ba5385bf945d582',
},
# ResNet50
{
'model': 'resnet50',
'dataset': 'imagenet',
'classes': 1000,
'include_top': True,
'url': 'https://github.com/qubvel/classification_models/releases/download/0.0.1/resnet50_imagenet_1000.h5',
'name': 'resnet50_imagenet_1000.h5',
'md5': 'd0feba4fc650e68ac8c19166ee1ba87f',
},
{
'model': 'resnet50',
'dataset': 'imagenet',
'classes': 1000,
'include_top': False,
'url': 'https://github.com/qubvel/classification_models/releases/download/0.0.1/resnet50_imagenet_1000_no_top.h5',
'name': 'resnet50_imagenet_1000_no_top.h5',
'md5': 'db3b217156506944570ac220086f09b6',
},
{
'model': 'resnet50',
'dataset': 'imagenet11k-places365ch',
'classes': 11586,
'include_top': True,
'url': 'https://github.com/qubvel/classification_models/releases/download/0.0.1/resnet50_places365_11586.h5',
'name': 'resnet50_places365_11586.h5',
'md5': 'bb8963db145bc9906452b3d9c9917275',
},
{
'model': 'resnet50',
'dataset': 'imagenet11k-places365ch',
'classes': 11586,
'include_top': False,
'url': 'https://github.com/qubvel/classification_models/releases/download/0.0.1/resnet50_imagenet_11586_no_top.h5',
'name': 'resnet50_imagenet_11586_no_top.h5',
'md5': 'd8bf4e7ea082d9d43e37644da217324a',
},
# ResNet101
{
'model': 'resnet101',
'dataset': 'imagenet',
'classes': 1000,
'include_top': True,
'url': 'https://github.com/qubvel/classification_models/releases/download/0.0.1/resnet101_imagenet_1000.h5',
'name': 'resnet101_imagenet_1000.h5',
'md5': '9489ed2d5d0037538134c880167622ad',
},
{
'model': 'resnet101',
'dataset': 'imagenet',
'classes': 1000,
'include_top': False,
'url': 'https://github.com/qubvel/classification_models/releases/download/0.0.1/resnet101_imagenet_1000_no_top.h5',
'name': 'resnet101_imagenet_1000_no_top.h5',
'md5': '1016e7663980d5597a4e224d915c342d',
},
# ResNet152
{
'model': 'resnet152',
'dataset': 'imagenet',
'classes': 1000,
'include_top': True,
'url': 'https://github.com/qubvel/classification_models/releases/download/0.0.1/resnet152_imagenet_1000.h5',
'name': 'resnet152_imagenet_1000.h5',
'md5': '1efffbcc0708fb0d46a9d096ae14f905',
},
{
'model': 'resnet152',
'dataset': 'imagenet',
'classes': 1000,
'include_top': False,
'url': 'https://github.com/qubvel/classification_models/releases/download/0.0.1/resnet152_imagenet_1000_no_top.h5',
'name': 'resnet152_imagenet_1000_no_top.h5',
'md5': '5867b94098df4640918941115db93734',
},
{
'model': 'resnet152',
'dataset': 'imagenet11k',
'classes': 11221,
'include_top': True,
'url': 'https://github.com/qubvel/classification_models/releases/download/0.0.1/resnet152_imagenet11k_11221.h5',
'name': 'resnet152_imagenet11k_11221.h5',
'md5': '24791790f6ef32f274430ce4a2ffee5d',
},
{
'model': 'resnet152',
'dataset': 'imagenet11k',
'classes': 11221,
'include_top': False,
'url': 'https://github.com/qubvel/classification_models/releases/download/0.0.1/resnet152_imagenet11k_11221_no_top.h5',
'name': 'resnet152_imagenet11k_11221_no_top.h5',
'md5': '25ab66dec217cb774a27d0f3659cafb3',
},
# ResNeXt50
{
'model': 'resnext50',
'dataset': 'imagenet',
'classes': 1000,
'include_top': True,
'url': 'https://github.com/qubvel/classification_models/releases/download/0.0.1/resnext50_imagenet_1000.h5',
'name': 'resnext50_imagenet_1000.h5',
'md5': '7c5c40381efb044a8dea5287ab2c83db',
},
{
'model': 'resnext50',
'dataset': 'imagenet',
'classes': 1000,
'include_top': False,
'url': 'https://github.com/qubvel/classification_models/releases/download/0.0.1/resnext50_imagenet_1000_no_top.h5',
'name': 'resnext50_imagenet_1000_no_top.h5',
'md5': '7ade5c8aac9194af79b1724229bdaa50',
},
# ResNeXt101
{
'model': 'resnext101',
'dataset': 'imagenet',
'classes': 1000,
'include_top': True,
'url': 'https://github.com/qubvel/classification_models/releases/download/0.0.1/resnext101_imagenet_1000.h5',
'name': 'resnext101_imagenet_1000.h5',
'md5': '432536e85ee811568a0851c328182735',
},
{
'model': 'resnext101',
'dataset': 'imagenet',
'classes': 1000,
'include_top': False,
'url': 'https://github.com/qubvel/classification_models/releases/download/0.0.1/resnext101_imagenet_1000_no_top.h5',
'name': 'resnext101_imagenet_1000_no_top.h5',
'md5': '91fe0126320e49f6ee607a0719828c7e',
},
]
def find_weights(weights_collection, model_name, dataset, include_top):
w = list(filter(lambda x: x['model'] == model_name, weights_collection))
w = list(filter(lambda x: x['dataset'] == dataset, w))
w = list(filter(lambda x: x['include_top'] == include_top, w))
return w
def load_model_weights(weights_collection, model, dataset, classes, include_top):
weights = find_weights(weights_collection, model.name, dataset, include_top)
if weights:
weights = weights[0]
if include_top and weights['classes'] != classes:
raise ValueError('If using `weights` and `include_top`'
' as true, `classes` should be {}'.format(weights['classes']))
weights_path = get_file(weights['name'],
weights['url'],
cache_subdir='models',
md5_hash=weights['md5'])
model.load_weights(weights_path)
else:
raise ValueError('There is no weights for such configuration: ' +
'model = {}, dataset = {}, '.format(model.name, dataset) +
'classes = {}, include_top = {}.'.format(classes, include_top))
def get_conv_params(**params):
default_conv_params = {
'kernel_initializer': 'glorot_uniform',
'use_bias': False,
'padding': 'valid',
}
default_conv_params.update(params)
return default_conv_params
def get_bn_params(**params):
default_bn_params = {
'axis': 3,
'momentum': 0.99,
'epsilon': 2e-5,
'center': True,
'scale': True,
}
default_bn_params.update(params)
return default_bn_params
def handle_block_names(stage, block):
name_base = 'stage{}_unit{}_'.format(stage + 1, block + 1)
conv_name = name_base + 'conv'
bn_name = name_base + 'bn'
relu_name = name_base + 'relu'
sc_name = name_base + 'sc'
return conv_name, bn_name, relu_name, sc_name
def basic_identity_block(filters, stage, block):
"""The identity block is the block that has no conv layer at shortcut.
# Arguments
kernel_size: default 3, the kernel size of
middle conv layer at main path
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
# Returns
Output tensor for the block.
"""
def layer(input_tensor):
conv_params = get_conv_params()
bn_params = get_bn_params()
conv_name, bn_name, relu_name, sc_name = handle_block_names(stage, block)
x = BatchNormalization(name=bn_name + '1', **bn_params)(input_tensor)
x = Activation('relu', name=relu_name + '1')(x)
x = ZeroPadding2D(padding=(1, 1))(x)
x = Conv2D(filters, (3, 3), name=conv_name + '1', **conv_params)(x)
x = BatchNormalization(name=bn_name + '2', **bn_params)(x)
x = Activation('relu', name=relu_name + '2')(x)
x = ZeroPadding2D(padding=(1, 1))(x)
x = Conv2D(filters, (3, 3), name=conv_name + '2', **conv_params)(x)
x = Add()([x, input_tensor])
return x
return layer
def basic_conv_block(filters, stage, block, strides=(2, 2)):
"""The identity block is the block that has no conv layer at shortcut.
# Arguments
input_tensor: input tensor
kernel_size: default 3, the kernel size of
middle conv layer at main path
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
# Returns
Output tensor for the block.
"""
def layer(input_tensor):
conv_params = get_conv_params()
bn_params = get_bn_params()
conv_name, bn_name, relu_name, sc_name = handle_block_names(stage, block)
x = BatchNormalization(name=bn_name + '1', **bn_params)(input_tensor)
x = Activation('relu', name=relu_name + '1')(x)
shortcut = x
x = ZeroPadding2D(padding=(1, 1))(x)
x = Conv2D(filters, (3, 3), strides=strides, name=conv_name + '1', **conv_params)(x)
x = BatchNormalization(name=bn_name + '2', **bn_params)(x)
x = Activation('relu', name=relu_name + '2')(x)
x = ZeroPadding2D(padding=(1, 1))(x)
x = Conv2D(filters, (3, 3), name=conv_name + '2', **conv_params)(x)
shortcut = Conv2D(filters, (1, 1), name=sc_name, strides=strides, **conv_params)(shortcut)
x = Add()([x, shortcut])
return x
return layer
def conv_block(filters, stage, block, strides=(2, 2)):
"""The identity block is the block that has no conv layer at shortcut.
# Arguments
input_tensor: input tensor
kernel_size: default 3, the kernel size of
middle conv layer at main path
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
# Returns
Output tensor for the block.
"""
def layer(input_tensor):
conv_params = get_conv_params()
bn_params = get_bn_params()
conv_name, bn_name, relu_name, sc_name = handle_block_names(stage, block)
x = BatchNormalization(name=bn_name + '1', **bn_params)(input_tensor)
x = Activation('relu', name=relu_name + '1')(x)
shortcut = x
x = Conv2D(filters, (1, 1), name=conv_name + '1', **conv_params)(x)
x = BatchNormalization(name=bn_name + '2', **bn_params)(x)
x = Activation('relu', name=relu_name + '2')(x)
x = ZeroPadding2D(padding=(1, 1))(x)
x = Conv2D(filters, (3, 3), strides=strides, name=conv_name + '2', **conv_params)(x)
x = BatchNormalization(name=bn_name + '3', **bn_params)(x)
x = Activation('relu', name=relu_name + '3')(x)
x = Conv2D(filters*4, (1, 1), name=conv_name + '3', **conv_params)(x)
shortcut = Conv2D(filters*4, (1, 1), name=sc_name, strides=strides, **conv_params)(shortcut)
x = Add()([x, shortcut])
return x
return layer
def identity_block(filters, stage, block):
"""The identity block is the block that has no conv layer at shortcut.
# Arguments
kernel_size: default 3, the kernel size of
middle conv layer at main path
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
# Returns
Output tensor for the block.
"""
def layer(input_tensor):
conv_params = get_conv_params()
bn_params = get_bn_params()
conv_name, bn_name, relu_name, sc_name = handle_block_names(stage, block)
x = BatchNormalization(name=bn_name + '1', **bn_params)(input_tensor)
x = Activation('relu', name=relu_name + '1')(x)
x = Conv2D(filters, (1, 1), name=conv_name + '1', **conv_params)(x)
x = BatchNormalization(name=bn_name + '2', **bn_params)(x)
x = Activation('relu', name=relu_name + '2')(x)
x = ZeroPadding2D(padding=(1, 1))(x)
x = Conv2D(filters, (3, 3), name=conv_name + '2', **conv_params)(x)
x = BatchNormalization(name=bn_name + '3', **bn_params)(x)
x = Activation('relu', name=relu_name + '3')(x)
x = Conv2D(filters*4, (1, 1), name=conv_name + '3', **conv_params)(x)
x = Add()([x, input_tensor])
return x
return layer
def build_resnet(
repetitions=(2, 2, 2, 2),
include_top=True,
input_tensor=None,
input_shape=None,
classes=1000,
block_type='usual',
class_detector_top=False):
"""
TODO
"""
# Determine proper input shape
input_shape = _obtain_input_shape(input_shape,
default_size=224,
min_size=197,
data_format='channels_last',
require_flatten=include_top)
if input_tensor is None:
img_input = Input(shape=input_shape, name='data')
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
# get parameters for model layers
no_scale_bn_params = get_bn_params(scale=False)
bn_params = get_bn_params()
conv_params = get_conv_params()
init_filters = 64
if block_type == 'basic':
conv_block = basic_conv_block
identity_block = basic_identity_block
else:
conv_block = usual_conv_block
identity_block = usual_identity_block
# resnet bottom
x = BatchNormalization(name='bn_data', **no_scale_bn_params)(img_input)
x = ZeroPadding2D(padding=(3, 3))(x)
x = Conv2D(init_filters, (7, 7), strides=(2, 2), name='conv0', **conv_params)(x)
x = BatchNormalization(name='bn0', **bn_params)(x)
x = Activation('relu', name='relu0')(x)
x = ZeroPadding2D(padding=(1, 1))(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='valid', name='pooling0')(x)
# resnet body
for stage, rep in enumerate(repetitions):
for block in range(rep):
filters = init_filters * (2**stage)
# first block of first stage without strides because we have maxpooling before
if block == 0 and stage == 0:
x = conv_block(filters, stage, block, strides=(1, 1))(x)
elif block == 0:
x = conv_block(filters, stage, block, strides=(2, 2))(x)
else:
x = identity_block(filters, stage, block)(x)
x = BatchNormalization(name='bn1', **bn_params)(x)
x = Activation('relu', name='relu1')(x)
# resnet top
if include_top:
x = GlobalAveragePooling2D(name='pool1')(x)
x = Dense(classes, name='fc1')(x)
x = Activation('softmax', name='softmax')(x)
if class_detector_top:
x = GlobalMaxPooling2D()(x)
x = Dense(1, name='fc1')(x)
x = Activation('sigmoid')(x)
# Ensure that the model takes into account any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x)
return model
def ResNet18(input_shape, input_tensor=None, weights=None, classes=1000, include_top=True):
model = build_resnet(input_tensor=input_tensor,
input_shape=input_shape,
repetitions=(2, 2, 2, 2),
classes=classes,
include_top=include_top,
block_type='basic')
model.name = 'resnet18'
if weights:
load_model_weights(weights_collection, model, weights, classes, include_top)
return model
def ResNet34(input_shape, input_tensor=None, weights=None, classes=1000, include_top=True, class_detector_top=False):
model = build_resnet(input_tensor=input_tensor,
input_shape=input_shape,
repetitions=(3, 4, 6, 3),
classes=classes,
include_top=include_top,
block_type='basic',
class_detector_top=class_detector_top)
model.name = 'resnet34'
if weights:
load_model_weights(weights_collection, model, weights, classes, include_top)
return model
def ResNet50(input_shape, input_tensor=None, weights=None, classes=1000, include_top=True):
model = build_resnet(input_tensor=input_tensor,
input_shape=input_shape,
repetitions=(3, 4, 6, 3),
classes=classes,
include_top=include_top)
model.name = 'resnet50'
if weights:
load_model_weights(weights_collection, model, weights, classes, include_top)
return model
def ResNet101(input_shape, input_tensor=None, weights=None, classes=1000, include_top=True):
model = build_resnet(input_tensor=input_tensor,
input_shape=input_shape,
repetitions=(3, 4, 23, 3),
classes=classes,
include_top=include_top)
model.name = 'resnet101'
if weights:
load_model_weights(weights_collection, model, weights, classes, include_top)
return model
def ResNet152(input_shape, input_tensor=None, weights=None, classes=1000, include_top=True):
model = build_resnet(input_tensor=input_tensor,
input_shape=input_shape,
repetitions=(3, 8, 36, 3),
classes=classes,
include_top=include_top)
model.name = 'resnet152'
if weights:
load_model_weights(weights_collection, model, weights, classes, include_top)
return model
|
from typing import Any, Dict, List
import pydantic
import pytest
from modelkit.core.errors import ItemValidationException, ReturnValueValidationException
from modelkit.core.model import AsyncModel, Model
from modelkit.core.settings import LibrarySettings
from modelkit.utils.pydantic import construct_recursive
@pytest.mark.parametrize(
"service_settings",
[
LibrarySettings(),
LibrarySettings(enable_validation=False),
],
)
def test_validate_item_spec_pydantic(service_settings):
class ItemModel(pydantic.BaseModel):
x: int
class SomeValidatedModel(Model[ItemModel, Any]):
def _predict(self, item):
return item
valid_test_item = {"x": 10}
m = SomeValidatedModel(service_settings=service_settings)
assert m(valid_test_item) == valid_test_item
if service_settings.enable_validation:
with pytest.raises(ItemValidationException):
m({"ok": 1})
with pytest.raises(ItemValidationException):
m({"x": "something", "blabli": 10})
else:
m({"ok": 1})
m({"x": "something", "blabli": 10})
assert m.predict_batch([valid_test_item] * 2) == [valid_test_item] * 2
@pytest.mark.asyncio
@pytest.mark.parametrize(
"service_settings",
[
LibrarySettings(),
LibrarySettings(enable_validation=False),
],
)
async def test_validate_item_spec_pydantic_async(service_settings):
class ItemModel(pydantic.BaseModel):
x: int
class AsyncSomeValidatedModel(AsyncModel[ItemModel, Any]):
async def _predict(self, item):
return item
valid_test_item = {"x": 10}
m = AsyncSomeValidatedModel(service_settings=service_settings)
res = await m(valid_test_item)
assert res == valid_test_item
if service_settings.enable_validation:
with pytest.raises(ItemValidationException):
await m({"ok": 1})
with pytest.raises(ItemValidationException):
await m({"x": "something", "blabli": 10})
else:
await m({"ok": 1})
await m({"x": "something", "blabli": 10})
res_list = await m.predict_batch([valid_test_item] * 2)
assert res_list == [valid_test_item] * 2
@pytest.mark.parametrize(
"service_settings",
[
LibrarySettings(),
LibrarySettings(enable_validation=False),
],
)
def test_validate_item_spec_pydantic_default(service_settings):
class ItemType(pydantic.BaseModel):
x: int
y: str = "ok"
class ReturnType(pydantic.BaseModel):
result: int
something_else: str = "ok"
class TypedModel(Model[ItemType, ReturnType]):
def _predict(self, item, **kwargs):
return {"result": item.x + len(item.y)}
m = TypedModel(service_settings=service_settings)
res = m({"x": 10, "y": "okokokokok"})
assert res.result == 20
assert res.something_else == "ok"
res = m({"x": 10})
assert res.result == 12
assert res.something_else == "ok"
if service_settings.enable_validation:
with pytest.raises(ItemValidationException):
m({})
else:
with pytest.raises(AttributeError):
m({})
@pytest.mark.parametrize(
"service_settings",
[
LibrarySettings(),
LibrarySettings(enable_validation=False),
],
)
def test_validate_item_spec_typing(service_settings):
class SomeValidatedModel(Model[Dict[str, int], Any]):
def _predict(self, item):
return item
valid_test_item = {"x": 10}
m = SomeValidatedModel(service_settings=service_settings)
assert m(valid_test_item) == valid_test_item
if service_settings.enable_validation:
with pytest.raises(ItemValidationException):
m.predict_batch(["ok"])
with pytest.raises(ItemValidationException):
m("x")
with pytest.raises(ItemValidationException):
m.predict_batch([1, 2, 1])
else:
m.predict_batch(["ok"])
m("x")
m.predict_batch([1, 2, 1])
assert m.predict_batch([valid_test_item] * 2) == [valid_test_item] * 2
@pytest.mark.parametrize(
"service_settings",
[
LibrarySettings(),
LibrarySettings(enable_validation=False),
],
)
def test_validate_return_spec(service_settings):
class ItemModel(pydantic.BaseModel):
x: int
class SomeValidatedModel(Model[Any, ItemModel]):
def _predict(self, item):
return item
m = SomeValidatedModel(service_settings=service_settings)
ret = m({"x": 10})
assert ret.x == 10
if m.service_settings.enable_validation:
with pytest.raises(ReturnValueValidationException):
m({"x": "something", "blabli": 10})
else:
m.predict({"x": "something", "blabli": 10})
@pytest.mark.parametrize(
"service_settings",
[
LibrarySettings(),
LibrarySettings(enable_validation=False),
],
)
def test_validate_list_items(service_settings):
class ItemModel(pydantic.BaseModel):
x: str
y: str = "ok"
class SomeValidatedModel(Model[ItemModel, Any]):
def __init__(self, *args, **kwargs):
self.counter = 0
super().__init__(*args, **kwargs)
def _predict(self, item):
self.counter += 1
return item
m = SomeValidatedModel(service_settings=service_settings)
m.predict_batch([{"x": 10, "y": "ko"}] * 10)
assert m.counter == 10
m({"x": 10, "y": "ko"})
assert m.counter == 11
@pytest.mark.parametrize(
"service_settings",
[
LibrarySettings(),
LibrarySettings(enable_validation=False),
],
)
def test_validate_none(service_settings):
class SomeValidatedModel(Model):
def _predict(self, item):
return item
m = SomeValidatedModel(service_settings=service_settings)
assert m({"x": 10}) == {"x": 10}
assert m(1) == 1
def test_construct_recursive():
class Item(pydantic.BaseModel):
class SubItem(pydantic.BaseModel):
class SubSubItem(pydantic.BaseModel):
a: str
z: SubSubItem
class ListItem(pydantic.BaseModel):
content: int
x: int
y: SubItem
d: Dict[str, int]
z: List[ListItem]
item_data = {
"x": 1,
"y": {"z": {"a": "ok"}},
"d": {"ok": 1},
"z": [{"content": "content"}],
}
item_construct_recursive = construct_recursive(Item, **item_data)
assert item_construct_recursive.y.z.a == "ok"
assert item_construct_recursive.z[0].content == "content"
|
# Copyright (c) 2013-2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import functools
from os import path
import time
import types
import oslotest.base as oslotest
import six
import six.moves.urllib.parse as urlparse
class BaseTestCase(oslotest.BaseTestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self.order_id = 'order1234'
self.external_project_id = 'keystone1234'
def tearDown(self):
super(BaseTestCase, self).tearDown()
def construct_new_test_function(original_func, name, build_params):
"""Builds a new test function based on parameterized data.
:param original_func: The original test function that is used as a template
:param name: The fullname of the new test function
:param build_params: A dictionary or list containing args or kwargs
for the new test
:return: A new function object
"""
new_func = types.FunctionType(
six.get_function_code(original_func),
six.get_function_globals(original_func),
name=name,
argdefs=six.get_function_defaults(original_func)
)
# Support either an arg list or kwarg dict for our data
build_args = build_params if isinstance(build_params, list) else []
build_kwargs = build_params if isinstance(build_params, dict) else {}
# Build a test wrapper to execute with our kwargs
def test_wrapper(func, test_args, test_kwargs):
@functools.wraps(func)
def wrapper(self):
return func(self, *test_args, **test_kwargs)
return wrapper
return test_wrapper(new_func, build_args, build_kwargs)
def process_parameterized_function(name, func_obj, build_data):
"""Build lists of functions to add and remove to a test case."""
to_remove = []
to_add = []
for subtest_name, params in build_data.items():
# Build new test function
func_name = '{0}_{1}'.format(name, subtest_name)
new_func = construct_new_test_function(func_obj, func_name, params)
# Mark the new function as needed to be added to the class
to_add.append((func_name, new_func))
# Mark key for removal
to_remove.append(name)
return to_remove, to_add
def parameterized_test_case(cls):
"""Class decorator to process parameterized tests
This allows for parameterization to be used for potentially any
unittest compatible runner; including testr and py.test.
"""
tests_to_remove = []
tests_to_add = []
for key, val in vars(cls).items():
# Only process tests with build data on them
if key.startswith('test_') and val.__dict__.get('build_data'):
to_remove, to_add = process_parameterized_function(
name=key,
func_obj=val,
build_data=val.__dict__.get('build_data')
)
tests_to_remove.extend(to_remove)
tests_to_add.extend(to_add)
# Add all new test functions
[setattr(cls, name, func) for name, func in tests_to_add]
# Remove all old test function templates (if they still exist)
[delattr(cls, key) for key in tests_to_remove if hasattr(cls, key)]
return cls
def parameterized_dataset(build_data):
"""Simple decorator to mark a test method for processing."""
def decorator(func):
func.__dict__['build_data'] = build_data
return func
return decorator
def create_timestamp_w_tz_and_offset(timezone=None, days=0, hours=0, minutes=0,
seconds=0):
"""Creates a timestamp with a timezone and offset in days
:param timezone: Timezone used in creation of timestamp
:param days: The offset in days
:param hours: The offset in hours
:param minutes: The offset in minutes
:return: a timestamp
"""
if timezone is None:
timezone = time.strftime("%z")
timestamp = '{time}{timezone}'.format(
time=(datetime.datetime.today() + datetime.timedelta(days=days,
hours=hours,
minutes=minutes,
seconds=seconds)),
timezone=timezone)
return timestamp
def get_limit_and_offset_from_ref(ref):
matches = dict(urlparse.parse_qsl(urlparse.urlparse(ref).query))
ref_limit = matches['limit']
ref_offset = matches['offset']
return ref_limit, ref_offset
def get_tomorrow_timestamp():
tomorrow = (datetime.today() + datetime.timedelta(days=1))
return tomorrow.isoformat()
def string_to_datetime(datetimestring, date_formats=None):
date_formats = date_formats or [
'%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S.%f', '%Y-%m-%dT%H:%M:%S.%fZ',
'%Y-%m-%dT%H:%M:%S.%f', "%Y-%m-%dT%H:%M:%SZ", "%Y-%m-%dT%H:%M:%S"]
for dateformat in date_formats:
try:
return datetime.datetime.strptime(datetimestring, dateformat)
except ValueError:
continue
else:
raise
def get_id_from_ref(ref):
"""Returns id from reference."""
ref_id = None
if ref is not None and len(ref) > 0:
ref_id = path.split(ref)[1]
return ref_id
|
"""Defines basic light string data and functions."""
import os
import sys
import atexit
import inspect
import time
import logging
from typing import Any, Optional, Sequence, Union, overload
from nptyping import NDArray
import numpy as np
from LightBerries.LightBerryExceptions import LightStringException
from LightBerries.RpiWS281xPatch import rpi_ws281x
from LightBerries.LightPixels import Pixel, PixelColors
LOGGER = logging.getLogger("LightBerries")
class LightString(Sequence[np.int_]):
"""Defines basic LED array data and functions."""
def __init__(
self,
ledCount: Optional[int] = None,
pixelStrip: rpi_ws281x.PixelStrip = None,
simulate: bool = False,
) -> None:
"""Creates a pixel array using the rpipixelStrip library and Pixels.
Args:
ledCount: the number of LEDs desired in the LightString
pixelStrip: the ws281x object that actually controls the LED signaling
simulate: dont use GPIO
Raises:
Warning: if something unexpected could happen
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightStringException: if something bad happens
"""
# cant run GPIO stuff without root, tell the user if they forgot
# linux check is just for debugging with fake GPIO on windows
if sys.platform == "linux" and not os.getuid() == 0: # pylint: disable = no-member
raise LightStringException(
"GPIO functionality requires root privilege. Please run command again as root"
)
# catch error cases first
if ledCount is None and pixelStrip is None and simulate is False:
raise LightStringException(
"Cannot create LightString object without ledCount or " + "pixelStrip object being specified"
)
# catch error cases first
# if ledCount is not None and pixelStrip is not None:
# raise Warning(
# "ledCount is overridden when pixelStrip is and ledcount "
# + "are both passed to LightString constructor"
# )
try:
self.simulate = simulate
# use passed led count if it is valid
if ledCount is not None:
self._ledCount = ledCount
# used passed pixel strip if it is not none
if pixelStrip is not None:
self.pixelStrip = pixelStrip
self.pixelStrip.begin()
self._ledCount = self.pixelStrip.numPixels()
LOGGER.debug(
"%s.%s Created WS281X object",
self.__class__.__name__,
inspect.stack()[0][3],
)
except SystemExit: # pylint:disable=try-except-raise
raise
except KeyboardInterrupt: # pylint:disable=try-except-raise
raise
except Exception as ex:
LOGGER.exception(
"%s.%s Exception: %s",
self.__class__.__name__,
inspect.stack()[0][3],
ex,
)
raise LightStringException(str(ex)).with_traceback(ex.__traceback__)
try:
# validate led count
if not isinstance(self._ledCount, int):
raise LightStringException(
f'Cannot create LightString object with LED count "{self._ledCount}"',
)
# if led count is good, create our pixel sequence
self.rgbArray: NDArray[(3, Any), np.int32] = np.zeros((self._ledCount, 3))
self.rgbArray[:] = np.array([Pixel().array for i in range(self._ledCount)])
LOGGER.debug(
"%s.%s Created Numpy Light array",
self.__class__.__name__,
inspect.stack()[0][3],
)
except SystemExit: # pylint:disable=try-except-raise
raise
except KeyboardInterrupt: # pylint:disable=try-except-raise
raise
except Exception as ex:
LOGGER.exception(
"%s.%s Exception: %s",
self.__class__.__name__,
inspect.stack()[0][3],
ex,
)
raise LightStringException(str(ex)).with_traceback(ex.__traceback__)
# try to force cleanup of underlying c objects when user exits
atexit.register(self.__del__)
def __del__(
self,
) -> None:
"""Properly disposes of the rpipixelStrip object.
Prevents memory leaks (hopefully) that were happening in the rpi.PixelStrip module.
Raises:
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightStringException: if something bad happens
"""
# check if pixel strip has been created
if isinstance(self.pixelStrip, rpi_ws281x.PixelStrip):
# turn off leds
self.off()
# cleanup c memory usage
try:
self.pixelStrip._cleanup()
except SystemExit: # pylint:disable=try-except-raise
raise
except KeyboardInterrupt: # pylint:disable=try-except-raise
raise
except Exception as ex:
LOGGER.exception("Failed to clean up WS281X object: %s", str(ex))
raise LightStringException(str(ex)).with_traceback(ex.__traceback__)
def __len__(
self,
) -> int:
"""Return length of the light string (the number of LEDs).
Returns:
the number of LEDs in the array
"""
if self.rgbArray is not None:
return len(self.rgbArray)
else:
return 0
@overload
def __getitem__( # noqa D105
self,
idx: int,
) -> NDArray[(3,), np.int32]:
... # pylint: disable=pointless-statement
@overload
def __getitem__( # noqa D105 # pylint: disable=function-redefined
self,
s: slice,
) -> NDArray[(3, Any), np.int32]:
... # pylint: disable=pointless-statement
def __getitem__( # pylint: disable=function-redefined
self, key: Union[int, slice]
) -> Union[NDArray[(3,), np.int32], NDArray[(3, Any), np.int32]]:
"""Return a LED index or slice from LED array.
Args:
key: an index of a single LED, or a slice specifying a range of LEDs
Returns:
the LED value or values as requested
Raises:
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightStringException: if something bad happens
"""
try:
if isinstance(self.rgbArray, np.ndarray):
return self.rgbArray[key].array
else:
raise LightStringException("Cannot index into uninitialized LightString object")
except SystemExit: # pylint:disable=try-except-raise
raise
except KeyboardInterrupt: # pylint:disable=try-except-raise
raise
except Exception as ex:
LOGGER.exception('Failed to get key "%s" from %s: %s', key, self.rgbArray, ex)
raise LightStringException(str(ex)).with_traceback(ex.__traceback__)
def __setitem__(
self,
key: Union[int, slice],
value: Union[NDArray[(3,), np.int32], NDArray[(3, Any), np.int32]],
) -> None:
"""Set LED value(s) in the array.
Args:
key: the index or slice specifying one or more LED indices
value: the RGB value or values to assign to the given LED indices
Raises:
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightStringException: if something bad happens
"""
try:
if isinstance(self.rgbArray, np.ndarray):
if isinstance(key, slice):
if isinstance(value, np.ndarray):
self.rgbArray.__setitem__(key, value)
elif isinstance(value, Sequence):
self.rgbArray.__setitem__(key, [Pixel(v).array for v in value])
else:
raise LightStringException(
"Cannot assign multiple indices of LightString using a single value"
)
else:
if isinstance(value, np.ndarray):
self.rgbArray.__setitem__(key, value)
elif isinstance(value, Pixel):
self.rgbArray.__setitem__(key, Pixel(value).array)
else:
raise LightStringException(
"Cannot assign single index of LightString using multiple values"
)
else:
raise LightStringException("Cannot index into uninitialized LightString object")
except SystemExit: # pylint:disable=try-except-raise
raise
except KeyboardInterrupt: # pylint:disable=try-except-raise
raise
except Exception as ex:
LOGGER.exception("Failed to set light %s to value %s: %s", key, value, ex)
raise LightStringException(str(ex)).with_traceback(ex.__traceback__)
def __enter__(
self,
) -> "LightString":
"""Get an instance of this object object.
Returns:
an instance of LightString
"""
return self
def __exit__(
self,
*args,
) -> None:
"""Cleanup the instance of this object.
Args:
args: ignored
"""
self.__del__()
def off(
self,
) -> None:
"""Turn all of the LEDs in the LightString off.
Raises:
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightStringException: if something bad happens
"""
for index in range(len(self.rgbArray)):
try:
self[index] = PixelColors.OFF.array
except SystemExit: # pylint:disable=try-except-raise
raise
except KeyboardInterrupt: # pylint:disable=try-except-raise
raise
except Exception as ex:
LOGGER.exception(
"Failed to set pixel %s in WS281X to value %s: %s",
index,
LightString(0),
ex,
)
raise LightStringException(str(ex)).with_traceback(ex.__traceback__)
self.refresh()
def refresh(
self,
) -> None:
"""Update the ws281x signal using the numpy array.
Raises:
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightStringException: if something bad happens
"""
try:
# define callback for map method (fast iterator)
if self.simulate is False:
def SetPixel(irgb):
try:
i = irgb[0]
rgb = irgb[1]
value = (int(rgb[0]) << 16) + (int(rgb[1]) << 8) + int(rgb[2])
self.pixelStrip.setPixelColor(i, value)
except SystemExit: # pylint:disable=try-except-raise
raise
except KeyboardInterrupt: # pylint:disable=try-except-raise
raise
except Exception as ex:
LOGGER.exception(
"Failed to set pixel %d in WS281X to value %d: %s",
i,
value,
str(ex),
)
raise LightStringException(str(ex)).with_traceback(ex.__traceback__)
# copy this class's array into the ws281x array
if self.simulate is False:
list(
map(
SetPixel,
enumerate(self.rgbArray),
)
)
# send the signal out
self.pixelStrip.show()
except SystemExit: # pylint:disable=try-except-raise
raise
except KeyboardInterrupt: # pylint:disable=try-except-raise
raise
except Exception as ex:
LOGGER.exception('Function call "show" in WS281X object failed: %s', str(ex))
raise LightStringException(str(ex)).with_traceback(ex.__traceback__)
if __name__ == "__main__":
LOGGER.info("Running LightString")
# the number of pixels in the light string
PIXEL_COUNT = 100
# GPIO pin to use for PWM signal
GPIO_PWM_PIN = 18
# DMA channel
DMA_CHANNEL = 5
# frequency to run the PWM signal at
PWM_FREQUENCY = 800000
GAMMA = None
LED_STRIP_TYPE = None
INVERT = False
PWM_CHANNEL = 0
with LightString(
pixelStrip=rpi_ws281x.PixelStrip(
num=PIXEL_COUNT,
pin=GPIO_PWM_PIN,
dma=DMA_CHANNEL,
freq_hz=PWM_FREQUENCY,
channel=PWM_CHANNEL,
invert=INVERT,
gamma=GAMMA,
strip_type=LED_STRIP_TYPE,
),
) as liteStr:
liteStr.refresh()
p = Pixel((255, 0, 0))
liteStr[4] = PixelColors.RED
liteStr.refresh()
time.sleep(1)
|
<filename>contributions/applications/experiment4/train.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import argparse
import os
import pandas as pd
import tensorflow as tf
import numpy as np
from dltk.core.metrics import dice
from dltk.networks.segmentation.unet import residual_unet_3d
from dltk.io.abstract_reader import Reader
from reader import read_fn
EVAL_EVERY_N_STEPS = 100
EVAL_STEPS = 1
NUM_CLASSES = 2
NUM_CHANNELS = 1
# NUM_FEATURES_IN_SUMMARIES = min(4, NUM_CHANNELS)
BATCH_SIZE = 16
SHUFFLE_CACHE_SIZE = 64
MAX_STEPS = 50000
def model_fn(features, labels, mode, params):
"""Model function to construct a tf.estimator.EstimatorSpec. It creates a
network given input features (e.g. from a dltk.io.abstract_reader) and
training targets (labels). Further, loss, optimiser, evaluation ops and
custom tensorboard summary ops can be added. For additional information,
please refer to https://www.tensorflow.org/api_docs/python/tf/estimator/Estimator#model_fn.
Args:
features (tf.Tensor): Tensor of input features to train from. Required
rank and dimensions are determined by the subsequent ops
(i.e. the network).
labels (tf.Tensor): Tensor of training targets or labels. Required rank
and dimensions are determined by the network output.
mode (str): One of the tf.estimator.ModeKeys: TRAIN, EVAL or PREDICT
params (dict, optional): A dictionary to parameterise the model_fn
(e.g. learning_rate)
Returns:
tf.estimator.EstimatorSpec: A custom EstimatorSpec for this experiment
"""
print("Setting up U-Net")
# 1. create a model and its outputs
net_output_ops = residual_unet_3d(
inputs=features['x'],
num_classes=NUM_CLASSES,
num_res_units=2,
filters=(16, 32, 64, 128),
strides=((1, 1, 1), (1, 2, 2), (1, 2, 2), (1, 2, 2)),
mode=mode,
kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-4))
# 1.1 Generate predictions only (for `ModeKeys.PREDICT`)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=net_output_ops,
export_outputs={'out': tf.estimator.export.PredictOutput(net_output_ops)})
# 2. set up a loss function
# print(labels['y'])
ce = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=net_output_ops['logits'],
labels=labels['y'])
loss = tf.reduce_mean(ce)
# 3. define a training op and ops for updating moving averages
# (i.e. for batch normalisation)
global_step = tf.train.get_global_step()
optimiser = tf.train.MomentumOptimizer(
learning_rate=params["learning_rate"],
momentum=0.9)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimiser.minimize(loss, global_step=global_step)
# 4.1 (optional) create custom image summaries for tensorboard
my_image_summaries = {'feat_t2': features['x'][0, 0, :, :, 0],
'labels': tf.cast(labels['y'], tf.float32)[0, 0, :, :],
'predictions': tf.cast(net_output_ops['y_'], tf.float32)[0, 0, :, :]}
expected_output_size = [1, 64, 64, 1] # [B, W, H, C]
[tf.summary.image(name, tf.reshape(image, expected_output_size))
for name, image in my_image_summaries.items()]
# 4.2 (optional) create custom metric summaries for tensorboard
dice_tensor = tf.py_func(dice, [net_output_ops['y_'],
labels['y'],
tf.constant(NUM_CLASSES)], tf.float32)
[tf.summary.scalar('dsc_l{}'.format(i), dice_tensor[i])
for i in range(NUM_CLASSES)]
# 5. Return EstimatorSpec object
return tf.estimator.EstimatorSpec(mode=mode,
predictions=net_output_ops,
loss=loss,
train_op=train_op,
eval_metric_ops=None)
def train(args):
np.random.seed(42)
tf.set_random_seed(42)
print('Setting up...')
# Parse csv files for file names
all_filenames = pd.read_csv(
args.train_csv,
dtype=object,
keep_default_na=False,
na_values=[]).as_matrix()
train_filenames = all_filenames[1:10]
val_filenames = all_filenames[10:12]
# Set up a data reader to handle the file i/o.
reader_params = {'n_examples': 16,
'example_size': [1, 64, 64],
'extract_examples': True}
reader_example_shapes = {'features': {'x': reader_params['example_size'] + [NUM_CHANNELS, ]},
'labels': {'y': reader_params['example_size']}}
reader = Reader(read_fn,
{'features': {'x': tf.float32},
'labels': {'y': tf.int32}})
# Get input functions and queue initialisation hooks for training and
# validation data
train_input_fn, train_qinit_hook = reader.get_inputs(
file_references=train_filenames,
mode=tf.estimator.ModeKeys.TRAIN,
example_shapes=reader_example_shapes,
batch_size=BATCH_SIZE,
shuffle_cache_size=SHUFFLE_CACHE_SIZE,
params=reader_params)
val_input_fn, val_qinit_hook = reader.get_inputs(
file_references=val_filenames,
mode=tf.estimator.ModeKeys.EVAL,
example_shapes=reader_example_shapes,
batch_size=BATCH_SIZE,
shuffle_cache_size=SHUFFLE_CACHE_SIZE,
params=reader_params)
# Instantiate the neural network estimator
nn = tf.estimator.Estimator(
model_fn=model_fn,
model_dir=args.model_path,
params={"learning_rate": 0.001},
config=tf.estimator.RunConfig())
# Hooks for validation summaries
val_summary_hook = tf.contrib.training.SummaryAtEndHook(
os.path.join(args.model_path, 'eval'))
step_cnt_hook = tf.train.StepCounterHook(
every_n_steps=EVAL_EVERY_N_STEPS,
output_dir=args.model_path)
print('Starting training...')
try:
for _ in range(MAX_STEPS // EVAL_EVERY_N_STEPS):
nn.train(
input_fn=train_input_fn,
hooks=[train_qinit_hook, step_cnt_hook],
steps=EVAL_EVERY_N_STEPS)
if args.run_validation:
results_val = nn.evaluate(
input_fn=val_input_fn,
hooks=[val_qinit_hook, val_summary_hook],
steps=EVAL_STEPS)
print('Step = {}; val loss = {:.5f};'.format(
results_val['global_step'], results_val['loss']))
except KeyboardInterrupt:
pass
print('Stopping now.')
export_dir = nn.export_savedmodel(
export_dir_base=args.model_path,
serving_input_receiver_fn=reader.serving_input_receiver_fn(reader_example_shapes))
print('Model saved to {}.'.format(export_dir))
if __name__ == '__main__':
# Set up argument parser
parser = argparse.ArgumentParser(description="Contribution: dHCP GM segmentation training script")
parser.add_argument('--run_validation', default=True)
parser.add_argument('--restart', default=False, action='store_true')
parser.add_argument('--verbose', default=False, action='store_true')
parser.add_argument('--cuda_devices', '-c', default='7')
parser.add_argument('--model_path', '-p', default='/home/sb17/DLTK/contributions/applications/experiment4/experiment4_model_cgm/')
parser.add_argument('--train_csv', default='/home/sb17/DLTK/contributions/applications/experiment4/experiment_3.csv')
args = parser.parse_args()
if args.verbose:
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
tf.logging.set_verbosity(tf.logging.INFO)
else:
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf.logging.set_verbosity(tf.logging.ERROR)
# GPU allocation options
os.environ["CUDA_VISIBLE_DEVICES"] = args.cuda_devices
# Handle restarting and resuming training
if args.restart:
print('Restarting training from scratch.')
os.system('rm -rf {}'.format(args.model_path))
if not os.path.isdir(args.model_path):
os.system('mkdir -p {}'.format(args.model_path))
else:
print('Resuming training on model_path {}'.format(args.model_path))
# Call training
train(args)
|
<reponame>yclin99/CS251A_final_gem5
# Copyright 2004-2006 The Regents of The University of Michigan
# Copyright 2010-20013 Advanced Micro Devices, Inc.
# Copyright 2013 <NAME> and <NAME>
# Copyright 2017-2020 ARM Limited
# Copyright 2021 Google, Inc.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import importlib
import os.path
import sys
import importer
from code_formatter import code_formatter
parser = argparse.ArgumentParser()
parser.add_argument('modpath', help='module the simobject belongs to')
parser.add_argument('param_hh', help='parameter header file to generate')
args = parser.parse_args()
basename = os.path.basename(args.param_hh)
sim_object_name = os.path.splitext(basename)[0]
importer.install()
module = importlib.import_module(args.modpath)
sim_object = getattr(module, sim_object_name)
from m5.objects.SimObject import SimObject
from m5.params import Enum
code = code_formatter()
# The 'local' attribute restricts us to the params declared in
# the object itself, not including inherited params (which
# will also be inherited from the base class's param struct
# here). Sort the params based on their key
params = list(map(lambda k_v: k_v[1],
sorted(sim_object._params.local.items())))
ports = sim_object._ports.local
try:
ptypes = [p.ptype for p in params]
except:
print(sim_object, p, p.ptype_str)
print(params)
raise
warned_about_nested_templates = False
class CxxClass(object):
def __init__(self, sig, template_params=[]):
# Split the signature into its constituent parts. This could
# potentially be done with regular expressions, but
# it's simple enough to pick appart a class signature
# manually.
parts = sig.split('<', 1)
base = parts[0]
t_args = []
if len(parts) > 1:
# The signature had template arguments.
text = parts[1].rstrip(' \t\n>')
arg = ''
# Keep track of nesting to avoid splitting on ","s embedded
# in the arguments themselves.
depth = 0
for c in text:
if c == '<':
depth = depth + 1
if depth > 0 and not warned_about_nested_templates:
warned_about_nested_templates = True
print('Nested template argument in cxx_class.'
' This feature is largely untested and '
' may not work.')
elif c == '>':
depth = depth - 1
elif c == ',' and depth == 0:
t_args.append(arg.strip())
arg = ''
else:
arg = arg + c
if arg:
t_args.append(arg.strip())
# Split the non-template part on :: boundaries.
class_path = base.split('::')
# The namespaces are everything except the last part of the class path.
self.namespaces = class_path[:-1]
# And the class name is the last part.
self.name = class_path[-1]
self.template_params = template_params
self.template_arguments = []
# Iterate through the template arguments and their values. This
# will likely break if parameter packs are used.
for arg, param in zip(t_args, template_params):
type_keys = ('class', 'typename')
# If a parameter is a type, parse it recursively. Otherwise
# assume it's a constant, and store it verbatim.
if any(param.strip().startswith(kw) for kw in type_keys):
self.template_arguments.append(CxxClass(arg))
else:
self.template_arguments.append(arg)
def declare(self, code):
# First declare any template argument types.
for arg in self.template_arguments:
if isinstance(arg, CxxClass):
arg.declare(code)
# Re-open the target namespace.
for ns in self.namespaces:
code('namespace $ns {')
# If this is a class template...
if self.template_params:
code('template <${{", ".join(self.template_params)}}>')
# The actual class declaration.
code('class ${{self.name}};')
# Close the target namespaces.
for ns in reversed(self.namespaces):
code('} // namespace $ns')
code('''\
#ifndef __PARAMS__${sim_object}__
#define __PARAMS__${sim_object}__
''')
# The base SimObject has a couple of params that get
# automatically set from Python without being declared through
# the normal Param mechanism; we slip them in here (needed
# predecls now, actual declarations below)
if sim_object == SimObject:
code('''#include <string>''')
cxx_class = CxxClass(sim_object._value_dict['cxx_class'],
sim_object._value_dict['cxx_template_params'])
# A forward class declaration is sufficient since we are just
# declaring a pointer.
cxx_class.declare(code)
for param in params:
param.cxx_predecls(code)
for port in ports.values():
port.cxx_predecls(code)
code()
if sim_object._base:
code('#include "params/${{sim_object._base.type}}.hh"')
code()
for ptype in ptypes:
if issubclass(ptype, Enum):
code('#include "enums/${{ptype.__name__}}.hh"')
code()
code('namespace gem5')
code('{')
code('')
# now generate the actual param struct
code("struct ${sim_object}Params")
if sim_object._base:
code(" : public ${{sim_object._base.type}}Params")
code("{")
if not hasattr(sim_object, 'abstract') or not sim_object.abstract:
if 'type' in sim_object.__dict__:
code(" ${{sim_object.cxx_type}} create() const;")
code.indent()
if sim_object == SimObject:
code('''
SimObjectParams() {}
virtual ~SimObjectParams() {}
std::string name;
''')
for param in params:
param.cxx_decl(code)
for port in ports.values():
port.cxx_decl(code)
code.dedent()
code('};')
code()
code('} // namespace gem5')
code()
code('#endif // __PARAMS__${sim_object}__')
code.write(args.param_hh)
|
<filename>invana_engine/gremlin/schema.py<gh_stars>1-10
from .base import GremlinOperationBase, CRUDOperationsBase
from gremlin_python.process.strategies import *
from gremlin_python.process.traversal import Order
class SchemaOps(GremlinOperationBase):
def get_all_vertices_schema(self):
_ = self.gremlin_client.execute_query(
"g.V().group().by(label).by(properties().label().dedup().fold())",
serialize_elements=False
)
schema_data = []
for schema in _:
for k, v in schema.items():
schema_data.append(
{
"label": k,
"propertyKeys": v
}
)
return schema_data
def get_vertex_label_schema(self, label: str, namespace: str = None):
# TODO - fix performance
_ = self.gremlin_client.execute_query(
"g.V().group().by(label).by(properties().label().dedup().fold())",
serialize_elements=False
)
return {"label": label, "propertyKeys": _[0].get(label, [])}
def get_all_edges_schema(self):
_ = self.gremlin_client.execute_query(
"g.E().group().by(label).by(properties().label().dedup().fold())",
serialize_elements=False
)
schema_data = []
for schema in _:
for k, v in schema.items():
schema_data.append(
{
"label": k,
"propertyKeys": v
}
)
return schema_data
def get_edge_label_schema(self, label: str, namespace: str = None):
# TODO - fix performance
_ = self.gremlin_client.execute_query(
"g.E().group().by(label).by(properties().label().dedup().fold())",
serialize_elements=False
)
return {"label": label, "propertyKeys": _[0].get(label, [])}
def create_vertex_label_schema(self, label: str, namespace: str = None):
try:
_ = self.gremlin_client.execute_query(
f"""
mgmt = graph.openManagement()
person = mgmt.makeVertexLabel('{label}').make()
mgmt.commit()
""",
# "person = graph.addVertex(label, '" + label + "')",
serialize_elements=False
)
return {"status": True, "message": "ok"}
except Exception as e:
return {"status": False, "message": e.__str__()}
def create_edge_label_schema(self, label: str, multiplicity: str = None, namespace: str = None):
# https://docs.janusgraph.org/basics/schema/#edge-label-multiplicity
query = f"""
mgmt = graph.openManagement()
person = mgmt.makeEdgeLabel('{label}')"""
if multiplicity:
query += f".cardinality(Cardinality.{multiplicity.upper()})"
query += ".make()"
query += f"""
mgmt.commit()
"""
try:
_ = self.gremlin_client.execute_query(
query,
serialize_elements=False
)
return {"status": True, "message": "ok"}
except Exception as e:
return {"status": False, "message": e.__str__()}
def create_vertex_property_schema(self,
label: str,
property_key: str,
data_type: str,
cardinality: str):
"""
:param label:
:param property_key:
:param data_type:
:param cardinality: SINGLE, LIST , SET
:return:
"""
query = f"""
mgmt = graph.openManagement()
{property_key}_prop = mgmt.makePropertyKey('{property_key}')
"""
if data_type:
query += f".dataType({data_type}.class)"
if cardinality:
query += f".cardinality(Cardinality.{cardinality.upper()})"
query += ".make()"
query += f"""
{label}_label = mgmt.getVertexLabel("{label}")
mgmt.addProperties({label}_label, {property_key}_prop)
mgmt.commit()
"""
print("====", query)
try:
_ = self.gremlin_client.execute_query(
query,
serialize_elements=False
)
print("====", _)
return {"status": True, "message": "ok"}
except Exception as e:
return {"status": False, "message": e.__str__()}
def create_edge_property_schema(self,
label: str,
property_key: str,
data_type: str,
cardinality: str):
"""
:param label:
:param property_key:
:param data_type:
:param cardinality: SINGLE, LIST , SET
:return:
"""
query = f"""
mgmt = graph.openManagement()
{property_key}_prop = mgmt.makePropertyKey('{property_key}')
"""
if data_type:
query += f".dataType({data_type}.class)"
if cardinality:
query += f".cardinality(Cardinality.{cardinality.upper()})"
query += ".make()"
query += f"""
{label}_label = mgmt.getEdgeLabel("{label}")
mgmt.addProperties({label}_label, {property_key}_prop)
mgmt.commit()
"""
try:
_ = self.gremlin_client.execute_query(
query,
serialize_elements=False
)
print("====", _)
return {"status": True, "message": "ok"}
except Exception as e:
return {"status": False, "message": e.__str__()}
|
import argparse
import sys
import json
import asyncio
import enum
import re
import base64
from typing import Optional
from dataclasses import dataclass
import httpx
from pure_protobuf.dataclasses_ import field, optional_field, message
from pure_protobuf.types import int32
# - Protobuf schemas
# Converted from https://onlinesequencer.net/sequence.proto and
# https://onlinesequencer.net/note_type.proto
# C0 = 0, CS0 = 1, ..., B8 = 107
NoteType = enum.IntEnum(
"NoteType",
[
f"{note}{octave}"
for octave in range(9)
for note in "C CS D DS E F FS G GS A AS B".split()
],
start=0,
)
@message
@dataclass
class Note:
type: NoteType = field(1, default=NoteType.C0)
time: float = field(2, default=0.0)
length: float = field(3, default=0.0)
instrument: int32 = field(4, default=0)
volume: float = field(5, default=0.0)
@message
@dataclass
class Marker:
time: float = field(1, default=0.0)
setting: int32 = field(2, default=0)
instrument: int32 = field(3, default=0)
value: float = field(4, default=0.0)
blend: bool = field(5, default=False)
@message
@dataclass
class InstrumentSettings:
volume: float = field(1, default=0.0)
delay: bool = field(2, default=False)
reverb: bool = field(3, default=False)
pan: float = field(4, default=0.0)
enable_eq: bool = field(5, default=False)
eq_low: float = field(6, default=0.0)
eq_mid: float = field(7, default=0.0)
eq_high: float = field(8, default=0.0)
detune: float = field(9, default=0.0)
@message
@dataclass
class InstrumentSettingsPair:
key: Optional[int32] = optional_field(1)
value: Optional[InstrumentSettings] = optional_field(2)
@message
@dataclass
class SequenceSettings:
bpm: int32 = field(1, default=0)
time_signature: int32 = field(2, default=0)
# Maps aren't implemented in pure_protobuf yet but we can still parse them
# thanks to
# https://developers.google.com/protocol-buffers/docs/proto3#backwards_compatibility
instruments: list[InstrumentSettingsPair] = field(3, default_factory=list)
# Storing volume as (1 - volume) so it defaults to volume=1.
one_minus_volume: float = field(4, default=0.0)
@message
@dataclass
class Sequence:
settings: SequenceSettings = field(1, default_factory=SequenceSettings)
notes: list[Note] = field(2, default_factory=list)
markers: list[Marker] = field(3, default_factory=list)
# - Helpers
def _extract_data(text):
"""Extracts the base64 encoded string from the site's JavaScript"""
# This is more fragile than a nuclear bomb
return base64.b64decode(re.search(r"var data = '([^']*)';", text)[1])
def _int_or_float(num):
if num % 1 == 0:
return round(num)
return num
def _get_notes(song):
"""Converts a song into a list of notes"""
bpm = song.settings.bpm
all_volume = 1 - song.settings.one_minus_volume
# Convert to a dict for constant instrument settings retrieval
instrument_settings = {
kv.key: kv.value
for kv in song.settings.instruments
if kv.key is not None and kv.value is not None
}
return [
{
"instrument": note.instrument,
"type": note.type.name.replace("S", "#"),
"time": _int_or_float(note.time * (60/bpm/4)),
"length": _int_or_float(note.length * (60/bpm/4)),
"volume": _int_or_float(
note.volume
* all_volume
* (
instrument_settings[note.instrument].volume
if note.instrument in instrument_settings
else 1
)
),
}
for note in song.notes
]
# - "Public" API
async def get_note_infos(url):
async with httpx.AsyncClient() as client:
response = await client.get(url)
text = response.text
def _get_note_infos():
data = _extract_data(text)
song = Sequence.loads(data)
note_infos = _get_notes(song)
return note_infos
return await asyncio.to_thread(_get_note_infos)
# - Command line
parser = argparse.ArgumentParser(
description="Gets all notes from an Online Sequencer song.",
)
parser.add_argument(
"url",
help="link to the song to extract note infos from",
)
if __name__ == "__main__":
args = parser.parse_args()
note_infos = asyncio.run(get_note_infos(args.url))
json.dump(note_infos, sys.stdout, separators=[",",":"])
|
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.exc import DatabaseError
from sqlalchemy.sql import func
from socket import inet_aton, inet_ntoa
from struct import unpack, pack, error as struct_error
from passlib.hash import bcrypt_sha256
import datetime
import hashlib
import json
def sha512(string):
return hashlib.sha512(string).hexdigest()
def ip2long(ip):
return unpack('!i', inet_aton(ip))[0]
def long2ip(ip_int):
try:
return inet_ntoa(pack('!i', ip_int))
except struct_error:
# Backwards compatibility with old CTFd databases
return inet_ntoa(pack('!I', ip_int))
db = SQLAlchemy()
class Pages(db.Model):
id = db.Column(db.Integer, primary_key=True)
route = db.Column(db.String(80), unique=True)
html = db.Column(db.Text)
def __init__(self, route, html):
self.route = route
self.html = html
def __repr__(self):
return "<Pages {0} for challenge {1}>".format(self.tag, self.chal)
class Containers(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80))
buildfile = db.Column(db.Text)
def __init__(self, name, buildfile):
self.name = name
self.buildfile = buildfile
def __repr__(self):
return "<Container ID:(0) {1}>".format(self.id, self.name)
class Challenges(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80))
description = db.Column(db.Text)
value = db.Column(db.Integer)
category = db.Column(db.String(80))
flags = db.Column(db.Text)
hidden = db.Column(db.Boolean)
def __init__(self, name, description, value, category, flags):
self.name = name
self.description = description
self.value = value
self.category = category
self.flags = json.dumps(flags)
def __repr__(self):
return '<chal %r>' % self.name
class Awards(db.Model):
id = db.Column(db.Integer, primary_key=True)
teamid = db.Column(db.Integer, db.ForeignKey('teams.id'))
name = db.Column(db.String(80))
description = db.Column(db.Text)
date = db.Column(db.DateTime, default=datetime.datetime.utcnow)
value = db.Column(db.Integer)
category = db.Column(db.String(80))
icon = db.Column(db.Text)
def __init__(self, teamid, name, value):
self.teamid = teamid
self.name = name
self.value = value
def __repr__(self):
return '<award %r>' % self.name
class Tags(db.Model):
id = db.Column(db.Integer, primary_key=True)
chal = db.Column(db.Integer, db.ForeignKey('challenges.id'))
tag = db.Column(db.String(80))
def __init__(self, chal, tag):
self.chal = chal
self.tag = tag
def __repr__(self):
return "<Tag {0} for challenge {1}>".format(self.tag, self.chal)
class Files(db.Model):
id = db.Column(db.Integer, primary_key=True)
chal = db.Column(db.Integer, db.ForeignKey('challenges.id'))
location = db.Column(db.Text)
def __init__(self, chal, location):
self.chal = chal
self.location = location
def __repr__(self):
return "<File {0} for challenge {1}>".format(self.location, self.chal)
class Keys(db.Model):
id = db.Column(db.Integer, primary_key=True)
chal = db.Column(db.Integer, db.ForeignKey('challenges.id'))
key_type = db.Column(db.Integer)
flag = db.Column(db.Text)
def __init__(self, chal, flag, key_type):
self.chal = chal
self.flag = flag
self.key_type = key_type
def __repr__(self):
return self.flag
class Teams(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128), unique=True)
email = db.Column(db.String(128), unique=True)
password = db.Column(db.String(128))
website = db.Column(db.String(128))
affiliation = db.Column(db.String(128))
country = db.Column(db.String(32))
bracket = db.Column(db.String(32))
banned = db.Column(db.Boolean, default=False)
verified = db.Column(db.Boolean, default=False)
admin = db.Column(db.Boolean, default=False)
joined = db.Column(db.DateTime, default=datetime.datetime.utcnow)
def __init__(self, name, email, password):
self.name = name
self.email = email
self.password = <PASSWORD>(str(password))
def __repr__(self):
return '<team %r>' % self.name
def score(self):
score = db.func.sum(Challenges.value).label('score')
team = db.session.query(Solves.teamid, score).join(Teams).join(Challenges).filter(Teams.banned == False, Teams.id==self.id).group_by(Solves.teamid).first()
award_score = db.func.sum(Awards.value).label('award_score')
award = db.session.query(award_score).filter_by(teamid=self.id).first()
if team:
return int(team.score or 0) + int(award.award_score or 0)
else:
return 0
def place(self):
score = db.func.sum(Challenges.value).label('score')
quickest = db.func.max(Solves.date).label('quickest')
teams = db.session.query(Solves.teamid).join(Teams).join(Challenges).filter(Teams.banned == False).group_by(Solves.teamid).order_by(score.desc(), quickest).all()
#http://codegolf.stackexchange.com/a/4712
try:
i = teams.index((self.id,)) + 1
k = i % 10
return "%d%s" % (i, "tsnrhtdd"[(i / 10 % 10 != 1) * (k < 4) * k::4])
except ValueError:
return 0
class Solves(db.Model):
__table_args__ = (db.UniqueConstraint('chalid', 'teamid'), {})
id = db.Column(db.Integer, primary_key=True)
chalid = db.Column(db.Integer, db.ForeignKey('challenges.id'))
teamid = db.Column(db.Integer, db.ForeignKey('teams.id'))
ip = db.Column(db.Integer)
flag = db.Column(db.Text)
date = db.Column(db.DateTime, default=datetime.datetime.utcnow)
team = db.relationship('Teams', foreign_keys="Solves.teamid", lazy='joined')
chal = db.relationship('Challenges', foreign_keys="Solves.chalid", lazy='joined')
# value = db.Column(db.Integer)
def __init__(self, chalid, teamid, ip, flag):
self.ip = ip2long(ip)
self.chalid = chalid
self.teamid = teamid
self.flag = flag
# self.value = value
def __repr__(self):
return '<solves %r>' % self.chal
class WrongKeys(db.Model):
id = db.Column(db.Integer, primary_key=True)
chalid = db.Column(db.Integer, db.ForeignKey('challenges.id'))
teamid = db.Column(db.Integer, db.ForeignKey('teams.id'))
date = db.Column(db.DateTime, default=datetime.datetime.utcnow)
flag = db.Column(db.Text)
chal = db.relationship('Challenges', foreign_keys="WrongKeys.chalid", lazy='joined')
def __init__(self, teamid, chalid, flag):
self.teamid = teamid
self.chalid = chalid
self.flag = flag
def __repr__(self):
return '<wrong %r>' % self.flag
class Tracking(db.Model):
id = db.Column(db.Integer, primary_key=True)
ip = db.Column(db.BigInteger)
team = db.Column(db.Integer, db.ForeignKey('teams.id'))
date = db.Column(db.DateTime, default=datetime.datetime.utcnow)
def __init__(self, ip, team):
self.ip = ip2long(ip)
self.team = team
def __repr__(self):
return '<ip %r>' % self.team
class Config(db.Model):
id = db.Column(db.Integer, primary_key=True)
key = db.Column(db.Text)
value = db.Column(db.Text)
def __init__(self, key, value):
self.key = key
self.value = value
|
<reponame>erhan-/pokefarm
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Data/Capture/CaptureProbability.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from POGOProtos.Inventory import ItemId_pb2 as POGOProtos_dot_Inventory_dot_ItemId__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Data/Capture/CaptureProbability.proto',
package='POGOProtos.Data.Capture',
syntax='proto3',
serialized_pb=_b('\n0POGOProtos/Data/Capture/CaptureProbability.proto\x12\x17POGOProtos.Data.Capture\x1a!POGOProtos/Inventory/ItemId.proto\"\x88\x01\n\x12\x43\x61ptureProbability\x12\x33\n\rpokeball_type\x18\x01 \x03(\x0e\x32\x1c.POGOProtos.Inventory.ItemId\x12\x1b\n\x13\x63\x61pture_probability\x18\x02 \x03(\x02\x12 \n\x18reticle_difficulty_scale\x18\x0c \x01(\x01\x62\x06proto3')
,
dependencies=[POGOProtos_dot_Inventory_dot_ItemId__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_CAPTUREPROBABILITY = _descriptor.Descriptor(
name='CaptureProbability',
full_name='POGOProtos.Data.Capture.CaptureProbability',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='pokeball_type', full_name='POGOProtos.Data.Capture.CaptureProbability.pokeball_type', index=0,
number=1, type=14, cpp_type=8, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='capture_probability', full_name='POGOProtos.Data.Capture.CaptureProbability.capture_probability', index=1,
number=2, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='reticle_difficulty_scale', full_name='POGOProtos.Data.Capture.CaptureProbability.reticle_difficulty_scale', index=2,
number=12, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=113,
serialized_end=249,
)
_CAPTUREPROBABILITY.fields_by_name['pokeball_type'].enum_type = POGOProtos_dot_Inventory_dot_ItemId__pb2._ITEMID
DESCRIPTOR.message_types_by_name['CaptureProbability'] = _CAPTUREPROBABILITY
CaptureProbability = _reflection.GeneratedProtocolMessageType('CaptureProbability', (_message.Message,), dict(
DESCRIPTOR = _CAPTUREPROBABILITY,
__module__ = 'POGOProtos.Data.Capture.CaptureProbability_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Data.Capture.CaptureProbability)
))
_sym_db.RegisterMessage(CaptureProbability)
# @@protoc_insertion_point(module_scope)
|
<reponame>hschwane/offline_production
#!/usr/bin/env python
"""
Run MuonGun with a small target surface surrounding DeepCore, and
plot the generated tracks to illustrate the part of the detector
volume that goes un-simulated.
"""
from argparse import ArgumentParser
from os.path import expandvars
parser = ArgumentParser()
parser.add_argument("-g", "--gcd", default=expandvars('$I3_TESTDATA/GCD/GeoCalibDetectorStatus_IC86.55697_corrected_V2.i3.gz'))
parser.add_argument("outfile", help="save plot to file")
args = parser.parse_args()
from icecube import icetray, dataclasses, dataio
from icecube import phys_services, simclasses, MuonGun
from I3Tray import I3Tray
from os.path import expandvars
tray = I3Tray()
tray.context['I3RandomService'] = phys_services.I3GSLRandomService(1337)
from icecube.MuonGun.segments import GenerateBundles
outer = MuonGun.Cylinder(1600, 800)
inner = MuonGun.Cylinder(300, 150, dataclasses.I3Position(0,0,-350))
spectrum = MuonGun.OffsetPowerLaw(5, 1e3, 1e1, 1e4)
model = MuonGun.load_model('GaisserH4a_atmod12_SIBYLL')
generator = MuonGun.EnergyDependentSurfaceInjector(outer, model.flux, spectrum, model.radius,
MuonGun.ConstantSurfaceScalingFunction(inner))
tray.AddSegment(GenerateBundles, 'BundleGen', NEvents=1000, Generator=generator,
GCDFile=expandvars('$I3_TESTDATA/GCD/GeoCalibDetectorStatus_IC86.55697_corrected_V2.i3.gz'))
class Harvest(icetray.I3ConditionalModule):
def __init__(self, context):
super(Harvest, self).__init__(context)
self.AddOutBox("OutBox")
def Configure(self):
self.tracks = []
def DAQ(self, frame):
self.tracks.append(dataclasses.get_most_energetic_track(frame['I3MCTree']))
self.PushFrame(frame)
@staticmethod
def cylinder_patch(cyl, view='side', **kwargs):
from matplotlib.patches import Circle, Rectangle
if view == 'side':
return Rectangle((cyl.center[0]-cyl.radius, cyl.center[2]-cyl.length/2.), cyl.radius*2, cyl.length, **kwargs)
elif view == 'top':
return Circle((cyl.center.x, cyl.center.y), radius=cyl.radius, **kwargs)
def Finish(self):
import matplotlib
matplotlib.use('agg')
import pylab
fig = pylab.figure(figsize=(10, 4))
fig.subplots_adjust(wspace=0.3, bottom=0.15)
ax = pylab.subplot(1,2,1)
for track in self.tracks[:50]:
l = inner.intersection(track.pos, track.dir).first
pylab.arrow(track.pos.x, track.pos.y, l*track.dir.x, l*track.dir.y, edgecolor='k', head_width=10, width=1e-2)
pylab.scatter([track.pos.x], [track.pos.y])
ax.add_artist(self.cylinder_patch(outer, 'top', facecolor="None", edgecolor='r'))
ax.add_artist(self.cylinder_patch(inner, 'top', facecolor="None", edgecolor='r'))
ax.set_aspect('equal')
ax.set_xlabel('x [m]')
ax.set_ylabel('y [m]')
ax.set_ylim((-1000, 1000))
ax.set_xlim((-1000, 1000))
ax = pylab.subplot(1,2,2)
for track in self.tracks:
if abs(track.pos.y) < 20:
l = inner.intersection(track.pos, track.dir).first
pylab.arrow(track.pos.x, track.pos.z, l*track.dir.x, l*track.dir.z, edgecolor='k', head_width=10, width=1e-2)
pylab.scatter([track.pos.x], [track.pos.z])
ax.add_artist(self.cylinder_patch(outer, 'side', facecolor="None", edgecolor='r'))
ax.add_artist(self.cylinder_patch(inner, 'side', facecolor="None", edgecolor='r'))
ax.set_aspect('equal')
ax.set_xlabel('x [m]')
ax.set_ylabel('z [m]')
ax.set_ylim((-1000, 1000))
ax.set_xlim((-1000, 1000))
pylab.savefig(args.outfile)
tray.AddModule(Harvest)
tray.Execute()
|
<reponame>pranasziaukas/advent-of-code-2021
import unittest
from monad_unit import Monad
class FooTest(unittest.TestCase):
def setUp(self):
instructions = [
"inp w",
"mul x 0",
"add x z",
"mod x 26",
"div z 1",
"add x 12",
"eql x w",
"eql x 0",
"mul y 0",
"add y 25",
"mul y x",
"add y 1",
"mul z y",
"mul y 0",
"add y w",
"add y 4",
"mul y x",
"add z y",
"inp w",
"mul x 0",
"add x z",
"mod x 26",
"div z 1",
"add x 11",
"eql x w",
"eql x 0",
"mul y 0",
"add y 25",
"mul y x",
"add y 1",
"mul z y",
"mul y 0",
"add y w",
"add y 11",
"mul y x",
"add z y",
"inp w",
"mul x 0",
"add x z",
"mod x 26",
"div z 1",
"add x 13",
"eql x w",
"eql x 0",
"mul y 0",
"add y 25",
"mul y x",
"add y 1",
"mul z y",
"mul y 0",
"add y w",
"add y 5",
"mul y x",
"add z y",
"inp w",
"mul x 0",
"add x z",
"mod x 26",
"div z 1",
"add x 11",
"eql x w",
"eql x 0",
"mul y 0",
"add y 25",
"mul y x",
"add y 1",
"mul z y",
"mul y 0",
"add y w",
"add y 11",
"mul y x",
"add z y",
"inp w",
"mul x 0",
"add x z",
"mod x 26",
"div z 1",
"add x 14",
"eql x w",
"eql x 0",
"mul y 0",
"add y 25",
"mul y x",
"add y 1",
"mul z y",
"mul y 0",
"add y w",
"add y 14",
"mul y x",
"add z y",
"inp w",
"mul x 0",
"add x z",
"mod x 26",
"div z 26",
"add x -10",
"eql x w",
"eql x 0",
"mul y 0",
"add y 25",
"mul y x",
"add y 1",
"mul z y",
"mul y 0",
"add y w",
"add y 7",
"mul y x",
"add z y",
"inp w",
"mul x 0",
"add x z",
"mod x 26",
"div z 1",
"add x 11",
"eql x w",
"eql x 0",
"mul y 0",
"add y 25",
"mul y x",
"add y 1",
"mul z y",
"mul y 0",
"add y w",
"add y 11",
"mul y x",
"add z y",
"inp w",
"mul x 0",
"add x z",
"mod x 26",
"div z 26",
"add x -9",
"eql x w",
"eql x 0",
"mul y 0",
"add y 25",
"mul y x",
"add y 1",
"mul z y",
"mul y 0",
"add y w",
"add y 4",
"mul y x",
"add z y",
"inp w",
"mul x 0",
"add x z",
"mod x 26",
"div z 26",
"add x -3",
"eql x w",
"eql x 0",
"mul y 0",
"add y 25",
"mul y x",
"add y 1",
"mul z y",
"mul y 0",
"add y w",
"add y 6",
"mul y x",
"add z y",
"inp w",
"mul x 0",
"add x z",
"mod x 26",
"div z 1",
"add x 13",
"eql x w",
"eql x 0",
"mul y 0",
"add y 25",
"mul y x",
"add y 1",
"mul z y",
"mul y 0",
"add y w",
"add y 5",
"mul y x",
"add z y",
"inp w",
"mul x 0",
"add x z",
"mod x 26",
"div z 26",
"add x -5",
"eql x w",
"eql x 0",
"mul y 0",
"add y 25",
"mul y x",
"add y 1",
"mul z y",
"mul y 0",
"add y w",
"add y 9",
"mul y x",
"add z y",
"inp w",
"mul x 0",
"add x z",
"mod x 26",
"div z 26",
"add x -10",
"eql x w",
"eql x 0",
"mul y 0",
"add y 25",
"mul y x",
"add y 1",
"mul z y",
"mul y 0",
"add y w",
"add y 12",
"mul y x",
"add z y",
"inp w",
"mul x 0",
"add x z",
"mod x 26",
"div z 26",
"add x -4",
"eql x w",
"eql x 0",
"mul y 0",
"add y 25",
"mul y x",
"add y 1",
"mul z y",
"mul y 0",
"add y w",
"add y 14",
"mul y x",
"add z y",
"inp w",
"mul x 0",
"add x z",
"mod x 26",
"div z 26",
"add x -5",
"eql x w",
"eql x 0",
"mul y 0",
"add y 25",
"mul y x",
"add y 1",
"mul z y",
"mul y 0",
"add y w",
"add y 14",
"mul y x",
"add z y",
]
self.monad = Monad(instructions)
def test_maximum(self):
self.assertEqual(92915979999498, self.monad.maximize())
def test_minimum(self):
self.assertEqual(21611513911181, self.monad.minimize())
if __name__ == "__main__":
unittest.main()
|
import os
import sys
from collections import deque
from logging import getLogger
from tatau_core.models import TaskAssignment
from tatau_core.nn.tatau.model import Model
from tatau_core.nn.tatau.progress import TrainProgress
from tatau_core.utils import configure_logging
from tatau_core.utils.ipfs import IPFS, Downloader
from .session import Session, SessionValue
configure_logging()
logger = getLogger('tatau_core.trainer')
class TrainSession(Session):
train_history = SessionValue()
init_weights_path = SessionValue()
chunk_dirs = SessionValue()
train_weights_path = SessionValue()
def __init__(self, uuid=None):
super(TrainSession, self).__init__(module=__name__, uuid=uuid)
def process_assignment(self, assignment: TaskAssignment, *args, **kwargs):
logger.info('Train Task: {}'.format(assignment))
train_result = assignment.train_result
assert train_result
logger.info('Train data: {}'.format(assignment.train_data))
downloader = Downloader(assignment.task_declaration_id)
downloader.add_to_download_list(assignment.train_data.model_code_ipfs, 'model.py')
initial_weight_file_name = None
if assignment.train_data.weights_ipfs is not None:
initial_weight_file_name = 'initial_weight_{}'.format(assignment.train_data.current_iteration)
downloader.add_to_download_list(assignment.train_data.weights_ipfs, initial_weight_file_name)
else:
logger.info('Initial weights are not set')
batch_size = assignment.train_data.batch_size
epochs = assignment.train_data.epochs
chunk_dirs = deque()
for index, chunk_ipfs in enumerate(assignment.train_data.train_chunks_ipfs):
dir_name = 'chunk_{}'.format(index)
downloader.add_to_download_list(chunk_ipfs, dir_name)
chunk_dirs.append(downloader.resolve_path(dir_name))
downloader.download_all()
logger.info('Dataset downloaded')
self.model_path = downloader.resolve_path('model.py')
self.init_weights_path = None if initial_weight_file_name is None \
else downloader.resolve_path(initial_weight_file_name)
self.chunk_dirs = chunk_dirs
logger.info('Start training')
self._run(batch_size, epochs, assignment.train_data.current_iteration)
train_result.train_history = self.train_history
train_result.loss = train_result.train_history['loss'][-1]
train_result.accuracy = train_result.train_history['acc'][-1]
ipfs = IPFS()
ipfs_file = ipfs.add_file(self.train_weights_path)
logger.info('Result weights_ipfs are uploaded')
train_result.weights_ipfs = ipfs_file.multihash
def main(self):
logger.info('Start training')
batch_size = int(sys.argv[2])
nb_epochs = int(sys.argv[3])
current_iteration = int(sys.argv[4])
model = Model.load_model(path=self.model_path)
init_weights_path = self.init_weights_path
if init_weights_path is not None:
model.load_weights(init_weights_path)
else:
logger.info('Initial weights are not set')
progress = TrainProgress()
train_history = model.train(
chunk_dirs=self.chunk_dirs,
batch_size=batch_size, nb_epochs=nb_epochs,
train_progress=progress, current_iteration=current_iteration
)
train_weights_path = os.path.join(self.base_dir, 'result_weights')
model.save_weights(train_weights_path)
self.train_weights_path = train_weights_path
self.train_history = train_history
if __name__ == '__main__':
logger.info("Start trainer")
TrainSession.run()
|
#!/usr/bin/python
import httplib, urllib
import webbrowser
import requests
import time
import base64
import json
from finally_importer import *
from finally_helpers import *
class FinallySubimporter:
def importLibrary(self):
raise ValueError("FinallySubimporter no-op must override importLibrary")
class FinallySpotifyImporter(FinallySubimporter):
oauthCodeFolderPath = None
oauthCodeFilePath = None
logger = None
def __init__(self, logger):
self.oauthCodeFolderPath = os.path.join(os.getcwd(), "oauth")
self.oauthCodeFilePath = os.path.join(self.oauthCodeFolderPath, "spotify.txt")
self.logger = logger
def importLibrary(self):
oauthCodeImporter = FinallySpotifyOAuthCodeImporter(self.logger, self.oauthCodeFilePath)
oauthCode = oauthCodeImporter.importLibrary()
if oauthCode is None:
self.logger.error("FinallySpotifyOAuthCodeImporter failed to get oauthCode")
return None
else:
self.logger.log("\nFinallySpotifyLibraryImporter oauthCode = " + str(oauthCode))
oauthTokenImporter = FinallySpotifyOAuthTokenImporter(self.logger, oauthCode)
oauthToken = oauthTokenImporter.importLibrary()
if oauthToken is None:
self.logger.log("\nFinallySpotifyLibraryImporter deleting existing oauth code and trying again")
oauthCodeImporter.deleteExistingOAuthCode() # invalid code probs
return self.importLibrary() # gross
else:
self.logger.log("\nFinallySpotifyLibraryImporter oauthToken = " + str(oauthToken))
authedImporter = FinallySpotifyLibraryImporter(self.logger, oauthToken)
authedLibrary = authedImporter.importLibrary()
if authedLibrary is None:
self.logger.error("FinallySpotifyLibraryImporter failed to get authedLibrary")
return None
else:
self.logger.log("\nFinallySpotifyLibraryImporter authedLibrary complete!")
return authedLibrary
class FinallySpotifyOAuthCodeImporter(FinallySubimporter):
oauthCodePath = None
logger = None
def __init__(self, logger, oauthCodePath):
self.oauthCodePath = oauthCodePath
self.logger = logger
def deleteExistingOAuthCode(self):
os.remove(self.oauthCodePath)
def spotifyAPIURL(self):
return "accounts.spotify.com"
def spotifyEndpointPath(self):
return "/authorize/"
def spotifyOAuthClientID(self):
return "<KEY>"
def spotifyOAuthRedirectURI(self):
return "http://127.0.0.1:5000/spotify"
def spotifyOAuthScope(self):
return "user-library-read"
def spotifyOAuthResponseType(self):
return "code"
def spotifyEndpointHeaders(self):
return {"Content-Type" : "application/x-www-form-urlencoded"}
def spotifyEndpointParams(self):
params = {"client_id" : self.spotifyOAuthClientID(), "response_type" : self.spotifyOAuthResponseType(), "redirect_uri" : self.spotifyOAuthRedirectURI(), "scope" : self.spotifyOAuthScope()}
return urllib.urlencode(params)
def importLibrary(self):
return self.attemptToImportLibrary(3)
def attemptToImportLibrary(self, attempts):
savedToken = self.getSavedOAuthCode()
if savedToken is None:
if attempts <= 0:
self.logger.error("FinallySpotifyOAuthCodeImporter attemptToImportLibrary No more attempts!")
return None
else:
self.sendSpotifyOAuthCodeEndpoint()
time.sleep(2)
return self.attemptToImportLibrary(attempts-1)
else:
return savedToken
def getSavedOAuthCode(self):
try:
with open(self.oauthCodePath) as contents:
return contents.read()
except Exception, e:
self.logger.log("\nFinallySpotifyOAuthCodeImporter getSavedOAuthCode except = " + str(e))
return None
def sendSpotifyOAuthCodeEndpoint(self):
spotifyURL = self.spotifyAPIURL()
spotifyEndpoint = self.spotifyEndpointPath()
spotifyParams = self.spotifyEndpointParams()
requestURLResponse = requests.get('https://'+spotifyURL+spotifyEndpoint, params=spotifyParams)
requestURL = requestURLResponse.url
self.logger.log("\nOpening " + requestURL + "...")
webbrowser.open(requestURL, new=2)
class FinallySpotifyOAuthTokenImporter(FinallySubimporter):
oauthCode = None
logger = None
def __init__(self, logger, oauthCode):
self.oauthCode = oauthCode
self.logger = logger
def spotifyAPIURL(self):
return "accounts.spotify.com"
def spotifyEndpointPath(self):
return "/api/token"
def spotifyTokenGrantType(self):
return "authorization_code"
def spotifyOAuthRedirectURI(self):
return "http://127.0.0.1:5000/spotify"
def spotifyBase64EncodedClientCreds(self):
return "MmJiZDhkZGQ1ODFkNDRlZWJiZDBkN2YwYTQyYzMzZDI6YjM3NGJhMTY4ZjFjNDNmYThkMjhkY2Q1MjY0Mzc2MjQ="
def spotifyOAuthClientID(self):
return "<KEY>"
def spotifyOAuthClientSecret(self):
return "<KEY>"
def spotifyEndpointHeaders(self):
authorizationHeaderString = "Basic " + self.spotifyBase64EncodedClientCreds()
return {"Content-Type" : "application/x-www-form-urlencoded"}
def spotifyEndpointParams(self):
params = {"grant_type" : self.spotifyTokenGrantType(), "code" : self.oauthCode, "redirect_uri" : self.spotifyOAuthRedirectURI(), "client_id" : self.spotifyOAuthClientID(), "client_secret" : self.spotifyOAuthClientSecret()}
return urllib.urlencode(params)
def importLibrary(self):
return self.sendSpotifyTokenEndpoint()
def sendSpotifyTokenEndpoint(self):
spotifyURL = self.spotifyAPIURL()
spotifyEndpoint = self.spotifyEndpointPath()
spotifyParams = self.spotifyEndpointParams()
spotifyHeaders = self.spotifyEndpointHeaders()
self.logger.log("\nFinallySpotifyOAuthTokenImporter sendSpotifyTokenEndpoint Sending token endpoint w spotifyURL = " + str(spotifyURL) + " endpoint = " + str(spotifyEndpoint) + " params = " + str(spotifyParams) + " headers = " + str(spotifyHeaders))
spotifyAPIConnection = httplib.HTTPSConnection(spotifyURL)
spotifyAPIConnection.request("POST", spotifyEndpoint, spotifyParams, spotifyHeaders)
spotifyAPIResponse = spotifyAPIConnection.getresponse()
self.logger.log("\nspotifyAPIResponse =" + str(spotifyAPIResponse.status) + ", " + str(spotifyAPIResponse.reason))
spotifyData = spotifyAPIResponse.read()
spotifyAPIConnection.close()
self.logger.log("\nFinallySpotifyOAuthTokenImporter sendSpotifyTokenEndpoint = " + str(spotifyData))
jsonLoad = json.loads(spotifyData)
try:
error = jsonLoad["error"]
self.logger.log("\nToken request error = " + str(error))
return None
except Exception, e:
self.logger.log("\nToken request has no JSON error")
try:
return jsonLoad["access_token"]
except Exception, e:
self.logger.log("\nToken request has no access_token??")
return None
class FinallySpotifyLibraryImporter(FinallySubimporter):
oauthToken = None
attemptToRecurse = None
logger = None
def __init__(self, logger, oauthToken, attemptToRecurse=True):
self.oauthToken = oauthToken
self.attemptToRecurse = attemptToRecurse
self.logger = logger
def spotifyAPIURL(self):
return "api.spotify.com"
def spotifyEndpointPath(self):
return "/v1/me/tracks/"
def spotifyEndpointPathWithParams(self, limit, offset):
return self.spotifyEndpointPath() + "?limit=" + str(limit) + "&offset=" + str(offset)
def spotifyEndpointHeaders(self):
authHeaderValue = "Bearer " + self.oauthToken
return {"Accept" : "application/json", "Content-Type" : "application/x-www-form-urlencoded", "Authorization" : authHeaderValue}
def singleSendSpotifyTracksEndpoint(self, u, e, h):
print "\nFinallySpotifyLibraryImporter Sending tracks endpoint w spotifyURL = " + str(u) + " endpoint = " + str(e) + " headers = " + str(h)
spotifyAPIConnection = httplib.HTTPSConnection(u)
spotifyAPIConnection.request("GET", e, {}, h)
spotifyAPIResponse = spotifyAPIConnection.getresponse()
spotifyData = spotifyAPIResponse.read()
spotifyAPIConnection.close()
jsonLoad = None
try:
jsonLoad = json.loads(spotifyData)
self.logger.log("\nFinallySpotifyLibraryImporter singleSendSpotifyTracksEndpoint done = " + str(len(jsonLoad)))
except Exception, e:
self.logger.log("\nFinallySpotifyLibraryImporter error with json " + str(spotifyData) + " = " + str(e))
return None
try:
error = jsonLoad["error"]["status"]
self.logger.log("\nFinallySpotifyLibraryImporter error = " + str(error))
return None
except Exception, e:
return jsonLoad
def sendSpotifyTracksEndpoint(self):
spotifyURL = self.spotifyAPIURL()
spotifyParams = self.spotifyParams()
spotifyEndpoint = self.spotifyEndpointPath()
spotifyHeaders = self.spotifyEndpointHeaders()
return self.singleSendSpotifyTracksEndpoint(spotifyURL, spotifyEndpoint, spotifyHeaders, spotifyParams)
def extractEndpointFromNextURL(self, nextURL):
offsetValueBeginIndex = nextURL.find("offset=")
offsetValueLength = len("offset=")
offsetValueEndIndex = nextURL.find("&", offsetValueBeginIndex)
if offsetValueEndIndex < 0:
offsetValueEndIndex = len(nextURL)-1
limitValueBeginIndex = nextURL.find("limit=")
limitValueLength = len("limit=")
limitValueEndIndex = nextURL.find("&", limitValueBeginIndex)
if limitValueEndIndex < 0:
limitValueEndIndex = len(nextURL)-1
offsetValue = nextURL[offsetValueBeginIndex+offsetValueLength:offsetValueEndIndex+1]
limitValue = nextURL[limitValueBeginIndex+limitValueLength:limitValueEndIndex+1]
#offsetStr = str(offsetValue)
#limitStr = str(limitValue)
#params = urllib.urlencode({"offset" : offsetStr, "limit" : limitStr})
return self.spotifyEndpointPathWithParams(limitValue, offsetValue)
def unrollRecursiveResults(self, results):
if results is None:
return None
if len(results) is 1:
return results[0]
else:
baseResult = results[0]
baseItemsArray = baseResult["items"]
for i in range(1, len(results)):
subresult = results[i]
try:
subresultItems = subresult["items"]
for subresultItem in subresultItems:
baseItemsArray.append(subresultItem)
except Exception, e:
self.logger.error("Unable to grab the items from subresult = " + str(subresult))
self.logger.error("unrollRecursiveResults e " + str(e))
baseResult["items"] = baseItemsArray
return baseResult
def recursivelySendSpotifyTracksEndpoint(self, endpoint, foundResults=[]):
spotifyURL = self.spotifyAPIURL()
spotifyHeaders = self.spotifyEndpointHeaders()
jsonResponse = self.singleSendSpotifyTracksEndpoint(spotifyURL, endpoint, spotifyHeaders)
if jsonResponse is not None:
foundResults.append(jsonResponse)
try:
nextURL = jsonResponse['next']
if nextURL is None:
return foundResults
else:
self.logger.log("\nFinallySpotifyLibraryImporter found next URL = " + str(nextURL) + " so far have " + str(len(foundResults)))
nextEndpoint = self.extractEndpointFromNextURL(nextURL)
return self.recursivelySendSpotifyTracksEndpoint(nextEndpoint, foundResults)
except Exception, e:
self.logger.log("\nFinallySpotifyLibraryImporter NO NEXT URL FOUND in = " + str(jsonResponse.keys()) + "\n error = " + str(e))
return foundResults
else:
return foundResults
def importLibrary(self):
self.logger.log("\nFinallySpotifyLibraryImporter beginning import, attemptToRecurse = " + str(self.attemptToRecurse))
parsedJSONResponse = None
if self.attemptToRecurse is True:
recursiveResults = self.recursivelySendSpotifyTracksEndpoint(self.spotifyEndpointPathWithParams(50, 0))
parsedJSONResponse = self.unrollRecursiveResults(recursiveResults)
else:
parsedJSONResponse = self.sendSpotifyTracksEndpoint()
return json.dumps(parsedJSONResponse)
if __name__ == "__main__":
i = FinallySpotifyImporter(FinallyLogger())
print "\n\n\n" + i.importLibrary() |
<gh_stars>1-10
##############################################################################
# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
from __future__ import print_function
from __future__ import absolute_import
import os
import logging
import yaml
import yardstick.ssh as ssh
from yardstick.benchmark.scenarios import base
from yardstick.common import constants as consts
import yardstick.common.openstack_utils as op_utils
from yardstick.common.task_template import TaskTemplate
LOG = logging.getLogger(__name__)
class GetServerXmlConf(base.Scenario):
"""Get a server's XML configuration file
Parameters
server - instance of the server
type: dict
unit: N/A
default: null
pod_file - path to pod configuration file
type: string
unit: N/A
default: null
Outputs:
server_xml - XML configuation of the server instance
type: XML
unit: N/A
"""
__scenario_type__ = "GetServerXmlConf"
def __init__(self, scenario_cfg, context_cfg):
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
self.options = self.scenario_cfg['options']
self.server = self.options.get("server")
self.server_id = self.server["id"]
self.server_host = self.server["OS-EXT-SRV-ATTR:host"]
self.connection = None
pod_file = os.path.join(consts.YARDSTICK_ROOT_PATH,
self.options.get("pod_file"))
with open(pod_file) as f:
nodes = yaml.safe_load(TaskTemplate.render(f.read()))
self.nodes = {a['host_name']: a for a in nodes['nodes']}
self.setup_done = False
def _ssh_host(self, server_host):
"""establish a ssh connection to the host node"""
# ssh host
node = self.nodes.get(server_host, None)
user = str(node.get('user', 'ubuntu'))
ssh_port = str(node.get("ssh_port", ssh.DEFAULT_PORT))
ip = str(node.get('ip', None))
pwd = node.get('password', None)
key_fname = node.get('key_filename', '/root/.ssh/id_rsa')
if pwd is not None:
LOG.debug("Log in via pw, user:%s, host:%s, password:%s",
user, ip, pwd)
self.connection = ssh.SSH(user, ip, password=pwd, port=ssh_port)
else:
LOG.debug("Log in via key, user:%s, host:%s, key_filename:%s",
user, ip, key_fname)
self.connection = ssh.SSH(user, ip, key_filename=key_fname,
port=ssh_port)
self.connection.wait(timeout=600)
def setup(self):
"""scenario setup"""
self._ssh_host(self.server_host)
self.setup_done = True
def run(self, result):
"""execute the benchmark"""
if not self.setup_done:
self.setup()
cmd = "sudo virsh dumpxml %s" % self.server_id
LOG.debug("Dumping server's XML configration file: %s", cmd)
status, stdout, stderr = self.connection.execute(cmd)
if status:
raise RuntimeError(stderr)
LOG.info("Get server's XML configuration file successful!")
values = [stdout]
keys = self.scenario_cfg.get('output', '').split()
return self._push_to_outputs(keys, values)
|
<filename>utils/tests/test_flare.py
# Unless explicitly stated otherwise all files in this repository are licensed
# under the Apache License Version 2.0.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2018 Datadog, Inc.
import mock
import pytest
import os
import datetime
import tempfile
import shutil
import zipfile
import requests
from utils.flare import Flare
CONTENTS = {
'marvel': 'Avengers: Ironman, Hulk, Spiderman, Thor, Captain American,...',
'dc': 'Justice League: Superman, Batman, WonderWoman, Flash, Aquaman...',
}
CASE_NO = 12345
@pytest.fixture
def requests_ok():
resp = requests.Response()
resp._content = "{{\"case_id\": {}}}".format(CASE_NO).encode()
resp.status_code = 200
return resp
@pytest.fixture
def requests_ok_no_case():
resp = requests.Response()
resp.status_code = 200
return resp
@pytest.fixture
def requests_nok():
resp = requests.Response()
resp.status_code = 400
return resp
@pytest.fixture(scope="module")
def zip_contents():
zip_location = tempfile.mkdtemp(suffix='flare')
for key, value in CONTENTS.items():
with open(os.path.join(zip_location, key), 'w') as fp:
fp.write(value)
yield zip_location # provide the fixture value
if os.path.exists(zip_location):
shutil.rmtree(zip_location)
def test_flare_basic(zip_contents, requests_ok, requests_ok_no_case):
my_flare = Flare(paths=[zip_contents])
expected_flare_path = "datadog-agent-{}.zip".format(
datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S"))
assert os.path.basename(my_flare.get_archive_path()) == expected_flare_path
flare_path = my_flare.create_archive()
assert os.path.exists(flare_path)
assert zipfile.is_zipfile(flare_path)
with zipfile.ZipFile(flare_path, 'r') as flare_zip:
archive_contents = flare_zip.infolist()
for content in archive_contents:
assert os.path.basename(content.filename) in CONTENTS
assert content.compress_type == zipfile.ZIP_DEFLATED
with mock.patch('requests.post', return_value=requests_ok):
success, case = my_flare.submit()
assert success
assert case == CASE_NO
with mock.patch('requests.post', return_value=requests_ok_no_case):
success, case = my_flare.submit()
assert success
assert case is None
my_flare.cleanup()
assert not os.path.exists(flare_path)
def test_flare_400(zip_contents, requests_nok):
my_flare = Flare(paths=[zip_contents])
my_flare.create_archive()
with mock.patch('requests.post', return_value=requests_nok):
success, case = my_flare.submit()
assert not success
assert case is None
my_flare.cleanup()
assert not os.path.exists(my_flare.get_archive_path())
def test_flare_proxy_timeout(zip_contents):
my_flare = Flare(paths=[zip_contents])
my_flare.create_archive()
with mock.patch('requests.post') as requests_mock:
requests_mock.side_effect = requests.exceptions.Timeout('fake proxy timeout')
success, case = my_flare.submit()
assert not success
assert case is None
my_flare.cleanup()
assert not os.path.exists(my_flare.get_archive_path())
def test_flare_too_large(zip_contents):
my_flare = Flare(paths=[zip_contents])
my_flare.MAX_UPLOAD_SIZE = 1
my_flare.create_archive()
assert not my_flare._validate_size()
with mock.patch('requests.post', return_value=requests_ok):
success, case = my_flare.submit()
assert not success
assert case is None
my_flare.cleanup()
assert not os.path.exists(my_flare.get_archive_path())
|
<filename>PyMesh/third_party/libigl/python/tutorial/405_AsRigidAsPossible.py
#!/usr/bin/env python
#
# This file is part of libigl, a simple c++ geometry processing library.
#
# Copyright (C) 2017 <NAME> <<EMAIL>> and <NAME> <<EMAIL>>
#
# This Source Code Form is subject to the terms of the Mozilla Public License
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
import sys, os
from math import sin, cos, pi
# Add the igl library to the modules search path
sys.path.insert(0, os.getcwd() + "/../")
import pyigl as igl
from shared import TUTORIAL_SHARED_PATH, check_dependencies
dependencies = ["glfw"]
check_dependencies(dependencies)
sea_green = igl.eigen.MatrixXd([[70. / 255., 252. / 255., 167. / 255.]])
V = igl.eigen.MatrixXd()
U = igl.eigen.MatrixXd()
F = igl.eigen.MatrixXi()
S = igl.eigen.MatrixXd()
b = igl.eigen.MatrixXi()
mid = igl.eigen.MatrixXd()
anim_t = 0.0
anim_t_dir = 0.03
arap_data = igl.ARAPData()
def pre_draw(viewer):
global anim_t
bc = igl.eigen.MatrixXd(b.size(), V.cols())
for i in range(0, b.size()):
bc.setRow(i, V.row(b[i]))
if S[b[i]] == 0:
r = mid[0] * 0.25
bc[i, 0] += r * sin(0.5 * anim_t * 2. * pi)
bc[i, 1] = bc[i, 1] - r + r * cos(pi + 0.5 * anim_t * 2. * pi)
elif S[b[i]] == 1:
r = mid[1] * 0.15
bc[i, 1] = bc[i, 1] + r + r * cos(pi + 0.15 * anim_t * 2. * pi)
bc[i, 2] -= r * sin(0.15 * anim_t * 2. * pi)
elif S[b[i]] == 2:
r = mid[1] * 0.15
bc[i, 2] = bc[i, 2] + r + r * cos(pi + 0.35 * anim_t * 2. * pi)
bc[i, 0] += r * sin(0.35 * anim_t * 2. * pi)
igl.arap_solve(bc, arap_data, U)
viewer.data().set_vertices(U)
viewer.data().compute_normals()
if viewer.core.is_animating:
anim_t += anim_t_dir
return False
def key_down(viewer, key, mods):
if key == ord(' '):
viewer.core.is_animating = not viewer.core.is_animating
return True
return False
igl.readOFF(TUTORIAL_SHARED_PATH + "decimated-knight.off", V, F)
U = igl.eigen.MatrixXd(V)
igl.readDMAT(TUTORIAL_SHARED_PATH + "decimated-knight-selection.dmat", S)
# Vertices in selection
b = igl.eigen.MatrixXd([[t[0] for t in [(i, S[i]) for i in range(0, V.rows())] if t[1] >= 0]]).transpose().castint()
# Centroid
mid = 0.5 * (V.colwiseMaxCoeff() + V.colwiseMinCoeff())
# Precomputation
arap_data.max_iter = 100
igl.arap_precomputation(V, F, V.cols(), b, arap_data)
# Set color based on selection
C = igl.eigen.MatrixXd(F.rows(), 3)
purple = igl.eigen.MatrixXd([[80.0 / 255.0, 64.0 / 255.0, 255.0 / 255.0]])
gold = igl.eigen.MatrixXd([[255.0 / 255.0, 228.0 / 255.0, 58.0 / 255.0]])
for f in range(0, F.rows()):
if S[F[f, 0]] >= 0 and S[F[f, 1]] >= 0 and S[F[f, 2]] >= 0:
C.setRow(f, purple)
else:
C.setRow(f, gold)
# Plot the mesh with pseudocolors
viewer = igl.glfw.Viewer()
viewer.data().set_mesh(U, F)
viewer.data().set_colors(C)
viewer.callback_pre_draw = pre_draw
viewer.callback_key_down = key_down
viewer.core.is_animating = True
viewer.core.animation_max_fps = 30.
print("Press [space] to toggle animation")
viewer.launch()
|
<filename>backend/hackathon/views.py
from .serializers import BenchSerializer
from .models import Bench
from django.shortcuts import render
from rest_framework.views import APIView
from rest_framework import viewsets
from rest_framework.response import Response
from django.core import serializers
from .computation import CoordinateHandler
import os
import requests
import csv
import json
class ClosestEatery(APIView):
"""
Handles getting the closest restaurant from the user after the user hits the go button.
Params:
-------
APIView : Django superclass that points user requests to the right endpoint.
"""
def get(self, request, format=None):
"""
Returns the single closest restaurant to the user's coordinates.
Params:
-------
request : JSON request from frontend.
"""
user_long = float(request.query_params["long"])
user_lat = float(request.query_params["lat"])
res = {}
rad = 10000
# try finding results close first, and increase radius if no results are found
while rad <= 50000:
print("Search with radius = " + str(rad))
res = self.findPlace(lat=user_lat, long=user_long, radius=rad)
if(len(res["results"]) != 0): break
rad += 10000
# check if no results were found
if len(res["status"]) == 'ZERO_RESULTS':
# no results were found at any radius
print("No eatery results were found.")
return Response(None)
# find the closest out of the fetched results
comp = CoordinateHandler()
# default values
min_dist = -1
closest_eatery = None
# traverse benches in database, calc distance for each one
for result in res["results"]:
dist = comp.distance(float(result["geometry"]["location"]["lat"]), float(result["geometry"]["location"]["lng"]), user_lat, user_long)
if(min_dist == -1 or min_dist > dist):
min_dist = dist
closest_eatery = result
return Response(closest_eatery)
def findPlace(self, lat, long, radius=10000):
"""
Calls a nearbysearch request to Google's Places API.
Params:
-------
lat : latitude to search at.
long : longitude to search at.
radius : radius to search within. Defaults to max (50,000 meters).
"""
type = "restaurant"
opennow = "" # can be changed to fetch only open restaurants
APIKEY = os.getenv("GOOGLE_API_KEY")
url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json?location={lat},{long}&radius={radius}&type={type}&opennow={opennow}&key={APIKEY}".format(lat = lat, long = long, radius = radius, type = type, opennow = opennow, APIKEY = APIKEY)
response = requests.get(url)
res = json.loads(response.text)
#print(len(res))
#print(res)
return res
class ClosestBench(APIView):
"""
Handles getting the closest bench from the user after the user hits the go button.
Params:
-------
APIView : Django superclass that points user requests to the right endpoint.
"""
def get(self, request, format=None):
"""
Returns the single closest west-facing bench to the user's coordinates.
Params:
-------
request : JSON request from frontend.
"""
comp = CoordinateHandler()
user_long = float(request.query_params["long"]) # convert to float from string
user_lat = float(request.query_params["lat"])
bench_list = Bench.objects.all().values() # gets all benches from DB
# default values
min_dist = -1
closest_bench = None
# traverse benches in database, calc distance for each one
for bench in bench_list:
dist = comp.distance(float(bench['latitude']), float(bench['longitude']), user_lat, user_long)
if(min_dist == -1 or min_dist > dist):
min_dist = dist
closest_bench = bench
return Response(closest_bench)
class PopulateDatabase(APIView):
"""
Handles populating the DB.
Params:
-------
APIView : Django superclass that points user requests to the right endpoint.
"""
def post(self, request, format=None):
"""
Wipes then populates the DB with bench data.
Params:
-------
request : JSON request from frontend.
"""
Bench.objects.all().delete()
benches = []
index = 0
print('Deleted all benches')
with open('data/benches.csv') as file:
reader = csv.DictReader(file)
for row in reader:
if(row["ORIENTATION"] == "NW" or row["ORIENTATION"] == "W" or row["ORIENTATION"] == "SW"): # get only west benches
bench = Bench(
pk=index,
latitude=row['latitude'],
longitude=row['longitude'],
location_detail=row['LOCATION_DETAIL'],
orientation=row['ORIENTATION'],
life_cycle_status=row['LIFE_CYCLE_STATUS']
)
benches.append(bench)
index += 1
print('Created all benches from csv file')
Bench.objects.bulk_create(benches)
serializer = BenchSerializer(Bench.objects.all(), many=True)
return Response(serializer.data)
class BenchView(viewsets.ReadOnlyModelViewSet):
"""
Handles benches in the DB.
Params:
-------
viewsets.ReadOnlyModelViewSet : Django superclass that handles DB usage.
"""
serializer_class = BenchSerializer
queryset = Bench.objects.all() |
#!/usr/bin/python
# Based upon: https://raw.githubusercontent.com/Quihico/handy.stuff/master/language.py
# https://forum.kodi.tv/showthread.php?tid=268081&highlight=generate+.po+python+gettext
_strings = {}
if __name__ == "__main__":
# running as standalone script
import os
import re
import subprocess
import polib
# print(f"PATH: {sys.path}")
# print(f"executable: {sys.executable}")
dir_path = os.getcwd()
folder_name = os.path.basename(dir_path)
print(f"current directory is : {dir_path}")
# print(f"Directory name is : {folder_name}")
string_file = "../language/resource.language.en_gb/strings.po"
print(f"input file: {string_file}")
po = polib.pofile(string_file, wrapwidth=500)
try:
command = ["grep", "-hnr", "_([\'\"]", "..\\.."]
# print(f"grep command: {command}")
r = subprocess.check_output(command, text=True)
print(r)
# print("End grep")
strings = re.compile('_\(f?["\'](.*?)["\']\)', re.IGNORECASE).findall(r)
translated = [m.msgid.lower().replace("'", "\\'") for m in po]
missing = set([s for s in strings if s.lower() not in translated])
ids_range = list(range(30000, 35000))
# ids_reserved = [int(m.msgctxt[1:]) for m in po]
ids_reserved = []
for m in po:
# print(f"msgctxt: {m.msgctxt}")
if str(m.msgctxt).startswith("#"):
ids_reserved.append(int(m.msgctxt[1:]))
ids_available = [x for x in ids_range if x not in ids_reserved]
# print(f"IDs Reserved: {ids_reserved}")
print(f"Available IDs: {ids_available}")
print(f"Missing: {missing}")
if missing:
print(f"WARNING: adding missing translation for '{missing}'")
for text in missing:
id = ids_available.pop(0)
entry = polib.POEntry(msgid=text, msgstr='', msgctxt=f"#{id}")
po.append(entry)
po.save(string_file)
except Exception as e:
print(f"Exception: {e}")
content = []
with open(__file__, "r") as me:
content = me.readlines()
content = content[:content.index("# GENERATED\n") + 1]
with open(__file__, "w", newline="\n") as f:
f.writelines(content)
for m in po:
if m.msgctxt.startswith("#"):
line = "_strings['{0}'] = {1}\n".format(m.msgid.lower().replace("'", "\\'"), m.msgctxt.replace("#", "").strip())
f.write(line)
else:
# running as Kodi module
from resources.lib import STRDEBUG, ADDON, xbmc
def get_string(t):
string_id = _strings.get(t.lower())
if not string_id:
xbmc.log(f"[script.service.hue] LANGUAGE: missing translation for '{t.lower()}'")
return t
if STRDEBUG:
return f"STR:{string_id} {ADDON.getLocalizedString(string_id)}"
return ADDON.getLocalizedString(string_id)
# GENERATED
_strings['video actions'] = 32100
_strings['audio actions'] = 32102
_strings['start/resume'] = 32201
_strings['pause'] = 32202
_strings['stop'] = 32203
_strings['scene name:'] = 32510
_strings['scene id'] = 32511
_strings['select...'] = 32512
_strings['bridge'] = 30500
_strings['discover hue bridge'] = 30501
_strings['bridge ip'] = 30502
_strings['bridge user'] = 30503
_strings['enable schedule (24h format)'] = 30505
_strings['start time:'] = 30506
_strings['end time:'] = 30507
_strings['disable during daylight'] = 30508
_strings['activate during playback at sunset'] = 30509
_strings['general'] = 30510
_strings['schedule'] = 30511
_strings['scenes'] = 30512
_strings['play scene enabled'] = 30513
_strings['pause scene enabled'] = 30514
_strings['stop scene enabled'] = 30515
_strings['disable time check if any light already on'] = 30516
_strings['don\'t enable scene if any light is off'] = 30517
_strings['[b][i]warning: not supported on all hardware[/b][/i]'] = 30521
_strings['cpu & hue performance'] = 30522
_strings['ambilight'] = 30523
_strings['advanced'] = 32101
_strings['separate debug log'] = 32105
_strings['video activation'] = 32106
_strings['select lights'] = 6101
_strings['enabled'] = 30520
_strings['press connect button on hue bridge'] = 9001
_strings['create scene'] = 9007
_strings['delete scene'] = 9008
_strings['hue service'] = 30000
_strings['check your bridge and network'] = 30004
_strings['hue connected'] = 30006
_strings['press link button on bridge'] = 30007
_strings['bridge not found'] = 30008
_strings['waiting for 90 seconds...'] = 30009
_strings['user not found'] = 30010
_strings['complete!'] = 30011
_strings['cancelled'] = 30013
_strings['select hue lights...'] = 30015
_strings['found bridge: '] = 30017
_strings['discover bridge...'] = 30018
_strings['bridge connection failed'] = 30021
_strings['discovery started'] = 30022
_strings['bridge not configured'] = 30023
_strings['check hue bridge configuration'] = 30024
_strings['error: scene not deleted'] = 30025
_strings['scene created'] = 30026
_strings['are you sure you want to delete this scene:[cr][b]{scene[1]}[/b]'] = 30027
_strings['delete hue scene'] = 30028
_strings['enter scene name'] = 30030
_strings['transition time:'] = 30031
_strings['{} secs.'] = 30033
_strings['cancel'] = 30034
_strings['lights:'] = 30035
_strings['scene name:'] = 30036
_strings['save'] = 30037
_strings['create hue scene'] = 30038
_strings['scene not created.'] = 30002
_strings['set a fade time in seconds, or 0 for an instant transition.'] = 30039
_strings['scene deleted'] = 30040
_strings['you may now assign your scene to player actions.'] = 30041
_strings['fade time (seconds)'] = 30042
_strings['error'] = 30043
_strings['create new scene'] = 30044
_strings['scene successfully created!'] = 30045
_strings['adjust lights to desired state in the hue app to save as new scene.'] = 30046
_strings['connection lost. check settings. shutting down'] = 30047
_strings['connection lost. trying again in 2 minutes'] = 30048
_strings['scene name'] = 30049
_strings['n-upnp discovery...'] = 30050
_strings['upnp discovery...'] = 30051
_strings['searching for bridge...'] = 30005
_strings['invalid start or end time, schedule disabled'] = 30052
_strings['set brightness on start'] = 30056
_strings['force on'] = 30057
_strings['light names:'] = 30058
_strings['light gamut:'] = 30059
_strings['update interval (ms)'] = 30065
_strings['hue transition time (ms)'] = 30066
_strings['frame capture size'] = 30067
_strings['show hue bridge capacity errors'] = 30068
_strings['minimum duration (minutes)'] = 30800
_strings['enable for movies'] = 30801
_strings['enable for tv episodes'] = 30802
_strings['enable for music videos'] = 30803
_strings['enable for other videos (discs)'] = 30804
_strings['enable for live tv'] = 30805
_strings['saturation'] = 30809
_strings['minimum brightness'] = 30810
_strings['maximum brightness'] = 30811
_strings['disable connection message'] = 30812
_strings['average image processing time:'] = 30813
_strings['on playback stop'] = 30814
_strings['resume light state'] = 30815
_strings['resume transition time (secs.)'] = 30816
_strings['only colour lights are supported'] = 30071
_strings['unsupported hue bridge'] = 30072
_strings['disabled'] = 30055
_strings['play'] = 30060
_strings['hue status: '] = 30061
_strings['settings'] = 30062
_strings['disabled by daylight'] = 30063
_strings['error: scene not found'] = 30064
_strings['the following error occurred:'] = 30080
_strings['automatically report this error?'] = 30081
_strings['no lights selected for ambilight.'] = 30069
_strings['ok'] = 30070
_strings['hue bridge over capacity'] = 30075
_strings['network not ready'] = 30076
_strings['the hue bridge is over capacity. increase refresh rate or reduce the number of ambilights.'] = 30077
_strings['bridge not found[cr]check your bridge and network.'] = 30078
_strings['press link button on bridge. waiting for 90 seconds...'] = 30082
_strings['unknown'] = 30083
_strings['user found![cr]saving settings...'] = 30084
_strings['adjust lights to desired state in the hue app to save as new scene.[cr]set a fade time in seconds, or 0 for an instant transition.'] = 30085
_strings['user not found[cr]check your bridge and network.'] = 30086
_strings['scene successfully created![cr]you may now assign your scene to player actions.'] = 30087
_strings['do not show again'] = 30073
_strings['disable hue labs during playback'] = 30074
_strings['hue bridge v1 (round) is unsupported. hue bridge v2 (square) is required.'] = 30001
_strings['bridge api: {api_version}, update your bridge'] = 30003
_strings['unknown colour gamut for light {light}'] = 30012
_strings['report errors'] = 30016
_strings['never report errors'] = 30020
_strings['hue service error'] = 30032
_strings['connection error'] = 30029
_strings['error: lights incompatible with ambilight'] = 30014
|
<reponame>mhoangvslev/audio2score
import re
import numpy as np
from pathlib import Path
from itertools import cycle
classic_tempos = {
"grave" : 32,
"largoassai" : 40,
"largo" : 50,
"pocolargo" : 60,
"adagio" : 71,
"pocoadagio" : 76,
"andante" : 92,
"andantino" : 100,
"menuetto" : 112,
"moderato" : 114,
"pocoallegretto" : 116,
"allegretto" : 118,
"allegromoderato" : 120,
"pocoallegro" : 124,
"allegro" : 130,
"moltoallegro" : 134,
"allegroassai" : 138,
"vivace" : 140,
"vivaceassai" : 150,
"allegrovivace" : 160,
"allegrovivaceassai" : 170,
"pocopresto" : 180,
"presto" : 186,
"prestoassai" : 200,
}
class Labels(object): # 38 symbols
def __init__(self):
self.labels = [
"+", # ctc blank
"0", "1", "2", "3", "4", "5", "6", "7", "8", "9",
"C", "D", "E", "F", "G", "A", "B", "c", "d", "e", "f", "g", "a", "b",
"r", "#", "-", "=", ".", "[", "_", "]", ";", "\t", "\n",
"<", ">", # seq2seq <sos> and <eos> delimiters
]
self.labels_map = dict([(c, i) for (i, c) in enumerate(self.labels)])
self.labels_map_inv = dict([(i, c) for (i, c) in enumerate(self.labels)])
def ctclen(self, tokens):
count = len(tokens)
count += sum([tokens[i - 1] == tokens[i] for i in range(1, count)])
return count
def encode(self, chars):
tokens = []
for c in chars:
tokens.append(self.labels_map[c])
return tokens
def decode(self, tokens):
return list(filter(None, [self.labels_map_inv.get(t) for t in tokens]))
class LabelsMulti(object): # 148 symbols
def __init__(self, chorales=False):
self.labels = [
"+", # ctc blank
"1","1.","2","2.","4","4.","8","8.","16","16.","32","32.","64","64.","3","6","12","24","48","96",
"BBB#","CC","CC#","DD-","DD","DD#","EE-","EE","EE#","FF-","FF","FF#","GG-","GG","GG#","AA-","AA","AA#","BB-","BB","BB#",
"C-","C","C#","D-","D","D#","E-","E","E#","F-","F","F#","G-","G","G#","A-","A","A#","B-","B","B#",
"c-","c","c#","d-","d","d#","e-","e","e#","f-","f","f#","g-","g","g#","a-","a","a#","b-","b","b#",
"cc-","cc","cc#","dd-","dd","dd#","ee-","ee","ee#","ff-","ff","ff#","gg-","gg","gg#","aa-","aa","aa#",
"bb-","bb","bb#",
"ccc-","ccc","ccc#","ddd-","ddd","ddd#","eee-","eee","eee#","fff-","fff","fff#","ggg-","ggg","ggg#","aaa-","aaa","aaa#",
"bbb-","bbb","bbb#",
"cccc-","cccc","cccc#","dddd-","dddd","dddd#","eeee-","eeee","eeee#","ffff-","ffff",
"r", "=", ".", "[", "_", "]", ";", "\t", "\n",
"<sos>", "<eos>", # seq2seq delimiters
]
self.labels_map = dict([(c, i) for (i, c) in enumerate(self.labels)])
self.labels_map_inv = dict([(i, c) for (i, c) in enumerate(self.labels)])
def ctclen(self, tokens):
return len(tokens)
def encode(self, chars):
tokens = []
for line in chars.splitlines():
items = line.split('\t')
for item in items:
if len(item) == 1:
tokens.append(self.labels_map[item])
else:
matchobj = re.fullmatch(r'(\[?)(\d+\.*)([a-gA-Gr]{1,4}[\-#]*)(;?)([\]_]?)', item)
if not matchobj:
raise Exception(f'Item {item} in {line} does not match')
for m in [matchobj[1], matchobj[2], matchobj[3], matchobj[4], matchobj[5]]:
if m:
tokens.append(self.labels_map[m])
tokens.append(self.labels_map['\t'])
tokens[-1] = self.labels_map['\n']
tokens.pop(-1)
return tokens
def decode(self, tokens):
return list(filter(None, [self.labels_map_inv.get(t) for t in tokens]))
class LabelsMulti2(object): # 9147 symbols
def __init__(self):
durations = ["1","1.","2","2.","4","4.","8","8.","16","16.","32","32.","64","64.","3","6","12","24","48","96"]
notes = ["BBB#","CC","CC#","DD-","DD","DD#","EE-","EE","EE#","FF-","FF","FF#","GG-","GG","GG#","AA-","AA","AA#","BB-","BB","BB#",
"C-","C","C#","D-","D","D#","E-","E","E#","F-","F","F#","G-","G","G#","A-","A","A#","B-","B","B#",
"c-","c","c#","d-","d","d#","e-","e","e#","f-","f","f#","g-","g","g#","a-","a","a#","b-","b","b#",
"cc-","cc","cc#","dd-","dd","dd#","ee-","ee","ee#","ff-","ff","ff#","gg-","gg","gg#","aa-","aa","aa#",
"bb-","bb","bb#",
"ccc-","ccc","ccc#","ddd-","ddd","ddd#","eee-","eee","eee#","fff-","fff","fff#","ggg-","ggg","ggg#","aaa-","aaa","aaa#",
"bbb-","bbb","bbb#",
"cccc-","cccc","cccc#","dddd-","dddd","dddd#","eeee-","eeee","eeee#"]
ties = ["[", "_", "]"]
self.labels = ['+'] # ctc blank
for d in durations:
for n in notes:
self.labels.append(d + n)
self.labels.append('[' + d + n)
self.labels.append(d + n + '_')
self.labels.append(d + n + ']')
self.labels.append(d + 'r')
self.labels.extend([
"=", ".", "\t", "\n",
"<sos>", "<eos>", # seq2seq delimiters
])
self.labels_map = dict([(c, i) for (i, c) in enumerate(self.labels)])
self.labels_map_inv = dict([(i, c) for (i, c) in enumerate(self.labels)])
def ctclen(self, tokens):
return len(tokens)
def encode(self, chars):
tokens = []
for line in chars.splitlines():
items = line.split('\t')
for item in items:
tokens.append(self.labels_map[item])
tokens.append(self.labels_map['\t'])
tokens[-1] = self.labels_map['\n']
tokens.pop(-1)
return tokens
def decode(self, tokens):
return list(filter(None, [self.labels_map_inv.get(t) for t in tokens]))
class Humdrum(object):
def __init__(self, path=None, data=None):
if path:
data = path.read_text(encoding='iso-8859-1')
lines = data.splitlines()
body_begin = 0
body_end = 0
for i, line in enumerate(lines):
if line.startswith('**'):
body_begin = i + 1
if line.startswith('*-'):
body_end = i
break
self.header = lines[:body_begin]
self.footer = lines[body_end:]
self.body = lines[body_begin:body_end]
self.spine_types = self.header[-1].split('\t')
def save(self, path):
return path.write_text(self.dump(), encoding='iso-8859-1')
def dump(self):
return '\n'.join(self.header + self.body + self.footer)
class SpineInfo(object):
def __init__(self, spine_types):
self.spines = []
for stype in spine_types:
self.spines.append({'type' : stype,
'instrument' : '*',
'clef' : '*',
'keysig' : '*',
'tonality' : '*',
'timesig' : '*',
'metronome' : '*',
})
def update(self, line):
for i, item in enumerate(line.split('\t')):
if item.startswith('*k['):
self.spines[i]['keysig'] = item
elif item.startswith('*clef'):
self.spines[i]['clef'] = item
elif item.startswith('*I'):
self.spines[i]['instrument'] = item
elif item.startswith('*MM'):
self.spines[i]['metronome'] = item
elif item.startswith('*M'):
self.spines[i]['timesig'] = item
elif item.startswith('*CT'):
item = f'*MM{classic_tempos[item[3:]]}'
self.spines[i]['metronome'] = item
elif item.endswith(':'):
self.spines[i]['tonality'] = item
def override_instruments(self, instruments):
pool = cycle(instruments)
inst = instruments[0]
for i in range(len(self.spines)):
if self.spines[i]['type'] == '**kern':
inst = next(pool)
self.spines[i]['instrument'] = f'*I{inst}'
def dump(self):
header = []
for v in ['type', 'instrument', 'clef', 'keysig', 'tonality', 'timesig', 'metronome']:
header.append('\t'.join([x[v] for x in self.spines]))
footer = ['\t'.join(['*-' for x in self.spines])]
return header, footer
def clone(self):
spine_types = [s['type'] for s in self.spines]
spineinfo = SpineInfo(spine_types)
spineinfo.spines = self.spines.copy()
return spineinfo
class Kern(Humdrum):
def __init__(self, path=None, data=None):
super(Kern, self).__init__(path, data)
self.spines = SpineInfo(self.spine_types)
self.first_line = 0
for i, line in enumerate(self.body):
if not line.startswith('*'):
self.first_line = i
break
self.spines.update(line)
def clean(self, remove_pauses=True):
spine_types = self.spine_types.copy()
newbody = []
for line in self.body[self.first_line:]:
if re.search(r'\*[+x^v]', line):
i = 0
remove_spine = False
for item in line.split('\t'):
if item.startswith(('*+', '*x')):
print('Unsupported variable spines')
return False
elif item == '*^':
spine_types.insert(i, '**kern')
i += 1
spine_types[i] = '**split'
elif item == '*v':
if remove_spine:
spine_types.pop(i)
i -= 1
remove_spine = False
else:
remove_spine = True
i += 1
continue
if line.startswith('!'):
newbody.append(line)
continue
# Remove unwanted symbols
newline = []
note_found = False
grace_note_found = False
for i, item in enumerate(line.split('\t')):
if spine_types[i] == '**split':
# Remove spline split
continue
if spine_types[i] == '**kern' and not item.startswith(('*', '=')):
item = item.split()[0] # Take the first note of the chord
item = re.sub(r'[pTtMmWwS$O:]', r'', item) # Remove ornaments
if remove_pauses:
item = re.sub(r';', r'', item) # Remove pauses
item = re.sub(r'[JKkL\\/]', r'', item) # Remove beaming and stems
item = re.sub(r'[(){}xXyY&]', r'', item) # Remove slurs, phrases, elisions and editorial marks
if re.search('[qQP]', item):
grace_note_found = True
elif re.search('[A-Ga-g]', item):
note_found = True
newline.append(item)
# Remove grace note lines unless they contain a non-grace note in the same time line
if grace_note_found and not note_found:
continue
if grace_note_found and note_found:
print(f'Unremovable grace notes {line}')
return False
if not all([x == '.' for x in newline]) and not all([x == '!' for x in newline]):
newbody.append('\t'.join(newline))
header, footer = self.spines.dump()
self.body = header[1:] + newbody
self.first_line = len(header) - 1
return True
def split(self, chunk_sizes, stride=None):
chunks = []
spines = self.spines.clone()
measures = [self.first_line]
for i, line in enumerate(self.body[self.first_line:]):
if re.match(r'^=(\d+|=)[^-]*', line):
measures.append(i + self.first_line + 1)
i = 0
while i < len(measures) - 1:
chunk_size = min(np.random.choice(chunk_sizes), len(measures) - i - 1)
m_begin = measures[i]
m_end = measures[i + chunk_size]
header, footer = spines.dump()
i += stride if stride else chunk_size
if len(measures) - i - 1 < min(chunk_sizes):
body = self.body[m_begin:]
chunk = Kern(data='\n'.join(header + body + footer))
chunks.append(chunk)
break
body = self.body[m_begin:m_end]
chunk = Kern(data='\n'.join(header + body + footer))
chunks.append(chunk)
for line in self.body[m_begin:measures[i]]:
if line.startswith('*'):
spines.update(line)
return chunks
def tosequence(self):
krn = []
for line in self.body[self.first_line:]:
newline = []
if line.startswith('='):
if not re.match(r'^=(\d+|=)[^-]*', line):
continue
newline.append('=')
elif line.startswith(('*', '!')):
continue
else:
line = re.sub(r'[^rA-Ga-g0-9.\[_\]#\-;\t]', r'', line) # Remove undefined symbols
for i, item in enumerate(line.split('\t')):
if self.spine_types[i] == '**kern':
newline.append(item)
krn.append('\t'.join(newline))
krnseq = '\n'.join(krn)
if re.search(r'(#|-|\.){2,}', krnseq):
# Discard double sharps/flats/dots
return None
return krnseq
|
<filename>train/tasks/semantic/modules/data_analysis.py<gh_stars>0
#!/usr/bin/env python3
# This file is covered by the LICENSE file in the root of this project.
import datetime
import os
import time
import imp
import cv2
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.optim as optim
from matplotlib import pyplot as plt
from torch.autograd import Variable
from common.avgmeter import *
from common.logger import Logger
from common.sync_batchnorm.batchnorm import convert_model
from common.warmupLR import *
from tasks.semantic.modules.ioueval import *
from tasks.semantic.modules.SalsaNext import *
from tasks.semantic.modules.SalsaNextAdf import *
from tasks.semantic.modules.Lovasz_Softmax import Lovasz_softmax
import tasks.semantic.modules.adf as adf
def keep_variance_fn(x):
return x + 1e-3
def one_hot_pred_from_label(y_pred, labels):
y_true = torch.zeros_like(y_pred)
ones = torch.ones_like(y_pred)
indexes = [l for l in labels]
y_true[torch.arange(labels.size(0)), indexes] = ones[torch.arange(labels.size(0)), indexes]
return y_true
class SoftmaxHeteroscedasticLoss(torch.nn.Module):
def __init__(self):
super(SoftmaxHeteroscedasticLoss, self).__init__()
self.adf_softmax = adf.Softmax(dim=1, keep_variance_fn=keep_variance_fn)
def forward(self, outputs, targets, eps=1e-5):
mean, var = self.adf_softmax(*outputs)
targets = torch.nn.functional.one_hot(targets, num_classes=20).permute(0,3,1,2).float()
precision = 1 / (var + eps)
return torch.mean(0.5 * precision * (targets - mean) ** 2 + 0.5 * torch.log(var + eps))
def save_to_log(logdir, logfile, message):
f = open(logdir + '/' + logfile, "a")
f.write(message + '\n')
f.close()
return
def save_checkpoint(to_save, logdir, suffix=""):
# Save the weights
torch.save(to_save, logdir +
"/SalsaNext" + suffix)
class DataAnalysis():
def __init__(self, ARCH, DATA, datadir, logdir, path=None,uncertainty=False):
# parameters
self.ARCH = ARCH
self.DATA = DATA
self.datadir = datadir
self.log = logdir
self.path = path
self.uncertainty = uncertainty
self.batch_time_t = AverageMeter()
self.data_time_t = AverageMeter()
self.batch_time_e = AverageMeter()
self.epoch = 0
# put logger where it belongs
self.info = {"train_update": 0,
"train_loss": 0,
"train_acc": 0,
"train_iou": 0,
"valid_loss": 0,
"valid_acc": 0,
"valid_iou": 0,
"best_train_iou": 0,
"best_val_iou": 0}
# get the data
parserModule = imp.load_source("parserModule",
booger.TRAIN_PATH + '/tasks/semantic/dataset/' +
self.DATA["name"] + '/data_extractor.py')
self.parser = parserModule.DataExtractor(root=self.datadir,
train_sequences=self.DATA["split"]["train"],
valid_sequences=self.DATA["split"]["valid"],
test_sequences=None,
labels=self.DATA["labels"],
color_map=self.DATA["color_map"],
learning_map=self.DATA["learning_map"],
learning_map_inv=self.DATA["learning_map_inv"],
sensor=self.ARCH["dataset"]["sensor"],
max_points=self.ARCH["dataset"]["max_points"],
batch_size=self.ARCH["train"]["batch_size"],
workers=16,
gt=True,
shuffle_train=True)
def data_analysis(self):
train_set = self.parser.train_dataset
train_set_size = len(train_set)
segment_angle_average = 0.0
segment_angle_num = 0.0
for i in range(train_set_size):
proj_range, proj_segment_angle, proj_xyz, proj_remission, proj_mask, proj_labels = train_set[i]
segment_angle_valid = proj_segment_angle[proj_segment_angle > 0]
segment_angle_average += torch.sum(segment_angle_valid).item() / segment_angle_valid.shape[0]
segment_angle_num += 1.0
if(i % 100 == 0):
print(segment_angle_average / segment_angle_num)
segment_angle_average /= segment_angle_num
print("average segment angle:", segment_angle_average)
segment_angle_std = 0.0
segment_angle_num = 0.0
for i in range(train_set_size):
proj_range, proj_segment_angle, proj_xyz, proj_remission, proj_mask, proj_labels = train_set[i]
segment_angle_valid = proj_segment_angle[proj_segment_angle > 0]
segment_angle_std += torch.sum((segment_angle_valid - segment_angle_average) ** 2).item() / segment_angle_valid.shape[0]
segment_angle_num += 1.0
if(i % 100 == 0):
print(segment_angle_std / segment_angle_num)
segment_angle_std /= segment_angle_num
segment_angle_std = np.sqrt(segment_angle_std)
print("average segment std:", segment_angle_std)
def datao3d(self):
train_set = self.parser.train_dataset
train_set_size = len(train_set)
proj_range, proj_segment_angle, proj_xyz, proj_remission, proj_mask, proj_labels, points0, scan_file0, pose0 = train_set[0]
proj_range, proj_segment_angle, proj_xyz, proj_remission, proj_mask, proj_labels, points1, scan_file1, pose1 = train_set[1]
print(scan_file0)
print(pose0)
print(scan_file1)
print(pose1)
N0, _ = points0.shape
colors0 = np.tile([1,0,0], (N0, 1))
N1, _ = points1.shape
colors1 = np.tile([0,1,0], (N1, 1))
points = np.vstack((points0, points1))
colors = np.vstack((colors0, colors1))
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(points)
pcd.colors = o3d.utility.Vector3dVector(colors)
return pcd
def getpointspose(self, i):
train_set = self.parser.valid_dataset
proj_range, proj_segment_angle, proj_xyz, proj_remission, proj_mask, proj_labels, points, scan_file, sem_label = train_set[i]
return points, scan_file, sem_label
|
import tensorflow as tf
import os
import numpy as np
import pandas as pd
from skimage import io
from skimage.transform import resize
from skimage.filters import gaussian
# from deepflash import unet, preproc, utils
from df_resources import unet, preproc, utils, pixelshift37
from skimage.measure import label, regionprops, approximate_polygon
from skimage.draw import polygon, polygon_perimeter
from shapely.geometry import MultiPoint, Polygon
import math
import matplotlib.pyplot as plt
def pixel_shift_3d(np_img, dichroic_dictionary, json_file):
# INPUT:
# np_img: numpy image. shape = 3D
# dichroic_dictionary: dictionary where key=channel number, value=name of dichroic_dictionary
# json_file: path to the .json file containing the shift values for each dichroic generated by the shift callibration script
# OUTPUT:
# return: numpy image of shape (n_channels, height, width). height and width may be different than origional image
number_of_channels = np_img.shape[0]
shifted_list = []
# instantiate PixelShifter class object from pixelshift37
shift_obj = pixelshift37.PixelShifter(jsonfilepath=json_file)
# perform shift on each channel
for ch in range(np_img.shape[0]):
ch_img = np_img[ch,:,:]
ch_dichroic = dichroic_dictionary[ch]
for key in shift_obj.shiftdict.keys():
if key.split('_')[-1].lower() in ch_dichroic.lower():
ch_img = shift_obj.shape_images(ch_img)
shiftval = shift_obj.shiftdict[key]
img = shift_obj.align_with_shift_matrix(ch_img,shiftval)
shifted_list.append(img)
# create numpy array where .shape = 3 from list of lists 'shifted_list'
shifted_img = np.dstack(shifted_list)
# rearranges shape of numpy array to look more like origional image
shifted_img = np.moveaxis(shifted_img, -1, 0)
print("shifted img shape: ", shifted_img.shape)
return(shifted_img)
def correct_shift_upsampling(img):
if np.issubdtype(img.dtype, np.dtype('uint16')):
over_values = img > 4095
img[over_values] = 4095
return(img)
elif np.issubdtype(img.dtype, np.dtype('uint8')):
over_values = img > 255
img[over_values] = 255
return(img)
def convert_16_to_8(np_img):
# INPUT:
# np_img: numpy image. shape = 2D or 3D
# OUTPUT:
# return: 8 bit version of origional np_img
info = np.iinfo(np_img.dtype)
if np.issubdtype(np_img.dtype, np.dtype('uint16')):
data = np_img.astype(np.int16)/4095
# print('ORIGIONAL IMG: ', np.max(np_img))
# print('CONVERSION: ', np.max(data), " INFO: ", info)
data = 255 * data
img8 = data.astype(np.uint8)
return(img8)
elif np.issubdtype(np_img.dtype, np.dtype('uint8')):
return(np_img)
def check_input(df, img_directory='./images'):
#CHECK IMAGE SHAPE TO CHANNELS GIVEN
fs = df.file_name.tolist()
cs = df.ch_order.tolist()
shs = [io.imread(os.path.join(img_directory, f)).shape for f in fs]
# for i in range(len(fs)):
print(fs, cs, shs)
def polygons_from_labels(labeled_arr, gene_name):
df = pd.DataFrame(columns=['cell_n', 'gene_name', 'row_pixels', 'col_pixels'])
region_props=regionprops(labeled_arr)
for n, prop in enumerate(region_props):
p = approximate_polygon(region_props[n].coords, tolerance=2)
# OR
# p = region_props[0].coords
r = p[:,0]
c = p[:,1]
# r, c = polygon_perimeter(r,c)
# new_img[r,c] = 200
# load_dict = {'cell_n':n, 'row_pixels':r.tolist(), 'col_pixels':c.tolist()}
# df = df.append(load_dict, ignore_index=True)
pp=[(x,y) for x,y in zip(r.tolist(), c.tolist())]
cent=(sum([p[0] for p in pp])/len(pp),sum([p[1] for p in pp])/len(pp))
pp.sort(key=lambda p: math.atan2(p[1]-cent[1],p[0]-cent[0]))
# print(pp)
shapely_poly = Polygon(pp)
rr, cc = shapely_poly.exterior.coords.xy
rr = np.asarray(rr, dtype=np.int16)
cc = np.asarray(cc, dtype=np.int16)
# print(rr, cc)
load_dict = {'cell_n':n, 'gene_name':gene_name, 'row_pixels':rr.tolist(), 'col_pixels':cc.tolist()}
df = df.append(load_dict, ignore_index=True)
return(df)
def overlap(arr1,arr2,threshold=0.5):
"""
Values:
0 = background for arr1
2 = background for arr2
1 = signal for both arr1 and arr2
"""
arr1r = (arr1>threshold)*1
arr2r = (arr2>threshold)*1
arr2r[arr2r==0] = 2
over = np.sum(arr1r==arr2r)
arr1r_area = np.sum(arr1r==1)
arr2r_area = np.sum(arr2r==1)
arr1r_percent = over/arr1r_area
arr2r_percent = over/arr2r_area
return(over, arr1r_area, arr1r_percent, arr2r_area, arr2r_percent)
def process_images(df,
dichroic={0:'dmqb', 1:'dm4', 2:'dm4', 3:'dmqb', 4:'dmqb'},
jsonfilepath='./df_resources/shiftset.json',
input_directory='./images',
output_directory='./processed',
model_path='./df_resources/full_model.h5',
minx=2040,
miny=2040,
GAUSS=0.1,
TILE_SHAPE=(540,540),
PADDING=(184,184),
SEED=0,
EL_SIZE=[600, 600], #micrometers
BATCH_NORM=True,
LAMBDA=50, #50
V_BAL=0.1, #0.1
SIGMA_BAL=10, #10
SIGMA_SEP=6 #6
):
print(df)
fs = df.file_name.tolist()
full_fs = [os.path.join(input_directory, f) for f in fs]
for i in range(len(fs)):
out_path=os.path.join(output_directory, fs[i].split('.')[0])
if not os.path.exists(out_path):
os.makedirs(out_path)
img=io.imread(full_fs[i])
### SLOW: uncomment before use!!!!!!
# img = pixel_shift_3d(img, dichroic_dictionary=dichroic, json_file=jsonfilepath)
# img = correct_shift_upsampling(img)
img = convert_16_to_8(img)
save_tif_path=os.path.join(out_path, fs[i])
io.imsave(save_tif_path, img)
if len(img.shape) == 3:
image_list = [img[i] for i in range(img.shape[0])]
else:
image_list = [img]
for n in range(len(image_list)):
if image_list[n].shape != (miny, minx):
reshaped_img = resize(image_list[n], (miny, minx), anti_aliasing=True)
if GAUSS is not None:
reshaped_img = gaussian(reshaped_img, sigma=GAUSS)
image_list[n] = reshaped_img
image_list = [np.expand_dims(img, axis=2) for img in image_list]
img_sizes = [i.shape for i in image_list]
X_test = np.empty(((0,) + image_list[0].shape))
X_test = np.append(X_test, np.array(image_list), axis=0)
data_test = [{'rawdata': img, 'element_size_um': EL_SIZE} for img in X_test]
test_generator = preproc.TileGenerator(data = data_test,
instancelabels=None,
tile_shape=TILE_SHAPE,
padding=PADDING,
n_classes=2,
border_weight_sigma_px=SIGMA_SEP,
border_weight_factor=LAMBDA,
foreground_background_ratio=V_BAL)
model = unet.Unet2D(snapshot=model_path,
n_channels=1,
n_classes=2,
n_levels=4,
batch_norm = BATCH_NORM,
upsample=False,
relu_alpha=0.1,
n_features=64, name='U-Net')
prediction = model.predict(test_generator)
print()
# print(img.shape)
return(prediction)
|
<reponame>efrenbg1/rmote.app<gh_stars>0
import socket
import ssl
import threading
import select
import queue
class mqtls:
def __init__(self, host="127.0.0.1", port=2443, user=None, pw=None):
self._host = host
self._port = port
self._user = user
self._pw = pw
self._socket = None
self._broker = None
self._queue = queue.Queue()
self._lock = threading.Lock()
with self._lock:
self.__connect()
def __len2(self, string):
return str(len(string)).zfill(2)
def __connect(self):
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.settimeout(10)
self._broker = ssl.wrap_socket(self._socket)
self._broker.connect((self._host, self._port))
if not self._broker:
raise Exception("Could not connect to broker!")
if self._user and self._pw:
rx = None
try:
self.__send("MQS0" + self.__len2(self._user) +
self._user + self.__len2(self._pw) + self._pw + "1")
rx = self.__receive()
except Exception:
raise Exception("MqTLS: error in auth")
if not rx:
raise Exception("MqTLS: error in auth")
rx = rx.decode("utf-8")
if len(rx) < 4:
raise Exception("MqTLS: error in auth")
if rx[:4] != "MQS0":
raise Exception("MqTLS: error in auth")
def __send(self, data):
self._broker.send(str.encode(data + '\n'))
def __receive(self):
buff = b''
ready = select.select([self._broker], [], [], 1)
if ready[0]:
rx = self._broker.recv(1)
else:
raise Exception(
"Timed out while waiting for response! Is broker up?")
while rx != b'\n':
if rx == b'':
raise Exception("Read invalid character!")
buff += rx
if len(buff) > 210:
break
rx = self._broker.recv(1)
return buff
def publish(self, topic, slot, message):
if self._user is None:
raise Exception(
"MqTLS: error in publish, use mpublish in master mode")
msg = "MQS1" + self.__len2(topic) + topic + \
str(slot) + self.__len2(message) + message
rx = None
with self._lock:
try:
self.__send(msg)
rx = self.__receive()
except Exception:
self.__connect()
self.__send(msg)
rx = self.__receive()
finally:
if rx is None:
raise Exception("MqTLS: error in publish")
rx = rx.decode("utf-8")
if len(rx) < 4:
return False
if rx[:4] == "MQS1":
return True
def retrieve(self, topic, slot):
if self._user is None:
raise Exception(
"MqTLS: error in retrieve, use mretrieve in master mode")
msg = "MQS7" + self.__len2(topic) + topic + str(slot)
rx = None
with self._lock:
try:
self.__send(msg)
rx = self.__receive()
except Exception:
self.__connect()
self.__send(msg)
rx = self.__receive()
finally:
if rx is None:
raise Exception("MqTLS: error in retrieve")
rx = rx.decode("utf-8")
if len(rx) < 4:
return None
if rx[:4] == "MQS7":
return None
if rx[:4] == "MQS2":
return rx[6:6+int(rx[4:6])]
def mpublish(self, topic, slot, message):
msg = "MQS6" + self.__len2(topic) + topic + \
str(slot) + self.__len2(message) + message
rx = None
with self._lock:
try:
self.__send(msg)
rx = self.__receive()
except Exception:
self.__connect()
self.__send(msg)
rx = self.__receive()
finally:
if rx is None:
raise Exception("MqTLS: error in mpublish")
rx = rx.decode("utf-8")
if len(rx) < 4:
return False
if rx[:4] == "MQS6":
return True
def mretrieve(self, topic, slot):
msg = "MQS7" + self.__len2(topic) + topic + str(slot)
rx = None
with self._lock:
try:
self.__send(msg)
rx = self.__receive()
except Exception:
self.__connect()
self.__send(msg)
rx = self.__receive()
finally:
if rx is None:
raise Exception("MqTLS: error in mretrieve")
rx = rx.decode("utf-8")
if len(rx) < 4:
return None
if rx[:4] == "MQS7":
return None
if rx[:4] == "MQS2":
return rx[6:6+int(rx[4:6])]
def muser(self, user):
msg = "MQS8" + self.__len2(user) + user
rx = None
with self._lock:
try:
self.__send(msg)
rx = self.__receive()
except Exception:
self.__connect()
self.__send(msg)
rx = self.__receive()
finally:
if rx is None:
raise Exception("MqTLS: error in user update")
rx = rx.decode("utf-8")
if len(rx) < 4:
return False
if rx[:4] == "MQS8":
return True
return False
def macls(self, user):
msg = "MQS9" + self.__len2(user) + user
rx = None
with self._lock:
try:
self.__send(msg)
rx = self.__receive()
except Exception:
self.__connect()
self.__send(msg)
rx = self.__receive()
finally:
if rx is None:
raise Exception("MqTLS: error in acls update")
rx = rx.decode("utf-8")
if len(rx) < 4:
return False
if rx[:4] == "MQS9":
return True
return False
|
import itertools
import numpy as np
from challenge import Challenge
class ChallengeSolution(Challenge):
def __init__(self):
# Initialise super
super().__init__()
# Define digit masks
self.digits = np.asarray([
[True , True , True , False, True , True , True ], # 0
[False, False, True , False, False, True , False], # 1
[True , False, True , True , True , False, True ], # 2
[True , False, True , True , False, True , True ], # 3
[False, True , True , True , False, True , False], # 4
[True , True , False, True , False, True , True ], # 5
[True , True , False, True , True , True , True ], # 6
[True , False, True , False, False, True , False], # 7
[True , True , True , True , True , True , True ], # 8
[True , True , True , True , False, True , True ], # 9
])
# Length dict
# 1 = len(2), 4 = len(4), 7 = len(3), 8 = len(7)
self.fixed_lengths = {2, 3, 4, 7}
########################################################################
# Load data #
########################################################################
def load(self, path):
# Load data from path
with open(path) as infile:
data = infile.read().strip().split('\n')
# Parse data
for i, item in enumerate(data):
crossed, target = item.split(' | ')
data[i] = (
[set(x) for x in crossed.split()],
[set(x) for x in target .split()],
)
# Return data
return data
########################################################################
# Exercises #
########################################################################
def part_1(self, data):
# Initialise result
result = 0
# Loop over data
for crossed, target in data:
result += sum([len(x) in self.fixed_lengths for x in target])
# Return result
return result
def part_2(self, data):
# Initialise result
result = 0
# Loop over all data
for crossed, target in data:
# Sort crossed by length
crossed = list(sorted(crossed, key=lambda x: len(x)))
# Define each number in crossed
one = crossed[0]
four = crossed[2]
seven = crossed[1]
eight = crossed[9]
three = [x for x in crossed[3:6] if len(x & one ) == 2][0]
six = [x for x in crossed[6:9] if len(x & one ) == 1][0]
zero = [x for x in crossed[6:9] if len(x | three) == 7 and x != six ][0]
nine = [x for x in crossed[6:9] if len(x | three) != 7][0]
two = [x for x in crossed[3:6] if len(x | nine ) == 7 and x != three][0]
five = [x for x in crossed[3:6] if len(x | nine ) == 6 and x != three][0]
# Define numbers
crossed = [zero,one,two,three,four,five,six,seven,eight,nine]
# Check where target equals crossed
for i, x in enumerate(reversed(target)):
result += pow(10, i) * crossed.index(x)
# Return result
return result
def part_2_naive(self, data):
return 0
# Get all possible permutations
permutations = np.asarray([
list(permutation) for permutation in itertools.permutations('abcdefg')
])
result = 0
from tqdm import tqdm
# Loop over all data
for crossed, target in tqdm(data):
# Loop over all permutations
for permutation in permutations:
# Check if the observation represents at least one digit for all observations
if all(
# Check if permutation is correct
(self.digits == np.isin(permutation, observation)).all(axis=-1).any()
# Loop over all observations
for observation in crossed
):
subresult = ''
for digit in target:
digit = np.argwhere((self.digits == np.isin(permutation, digit)).all(axis=-1))[0][0]
subresult += str(digit)
result += int(subresult)
# Stop checking for other permutations
break
# Return result
return result
|
import fiepipelib.encryption.public.publickey
import fiepipelib.locallymanagedtypes.data.abstractmanager
import typing
def FromJSONData(jsondata):
assert isinstance(jsondata,dict)
ret = RegisteredEntity()
ret._fqdn = jsondata['fqdn']
ret._publicKeys = []
for k in jsondata['public_keys']:
key = fiepipelib.encryption.public.publickey.legalentitypublickey()
fiepipelib.encryption.public.publickey.FromJSONData(k, key)
ret._publicKeys.append(key)
ret._revocations = []
for r in jsondata['revocations']:
ret._revocations.append(r)
return ret
def ToJSONData(entity):
assert isinstance(entity, RegisteredEntity)
ret = {}
ret['fqdn'] = entity._fqdn
ret['public_keys'] = []
for k in entity._publicKeys:
assert isinstance(k, fiepipelib.encryption.public.publickey.legalentitypublickey)
ret['public_keys'].append(fiepipelib.encryption.public.publickey.ToJSONData(k))
revocations = []
for r in entity._revocations:
revocations.append(r)
ret['revocations'] = revocations
return ret
def FromParameters(fqdn, publicKeys = [], revocations = []):
ret = RegisteredEntity()
ret._fqdn = fqdn
ret._publicKeys = publicKeys
ret._revocations = revocations
return ret
class localregistry(
fiepipelib.locallymanagedtypes.data.abstractmanager.AbstractUserLocalTypeManager['RegisteredEntity']):
def FromJSONData(self, data):
return FromJSONData(data)
def ToJSONData(self, item):
return ToJSONData(item)
def GetColumns(self):
ret = super().GetColumns()
ret.append(("fqdn","text"))
return ret
def GetPrimaryKeyColumns(self):
return ["fqdn"]
def GetManagedTypeName(self):
return "registered_legal_entity"
def DeleteByFQDN(self, fqdn):
self._Delete("fqdn",str(fqdn))
def GetByFQDN(self, fqdn) -> typing.List['RegisteredEntity']:
return self._Get([("fqdn",fqdn)])
class RegisteredEntity(object):
"""Configuration and functionality of a legal entity of which the local user is a member of some kind.
Typically this is imported or pulled form a trusted location.
When you import or use a registeredlegalentity, you are trusting that the source of that information is who
they say they are.
The registeredlegalenity contains all the public keys and revokation lists on which to base further digital trust.
The registeredlegalentity should be updated regularly and securely, to maintain an up to date revokation list."""
_fqdn: str = None
_publicKeys: typing.List[fiepipelib.encryption.public.publickey.legalentitypublickey] = None
_revocations: typing.List[bytes] = None
def get_fqdn(self):
"""The well known fully qualfied domain name by which this entity is known.
"""
return self._fqdn
def get_public_keys(self):
"""A list of the public keys of this entity."""
return self._publicKeys
def get_revocations(self):
"""A list of the key revocations for this entity."""
return self._revocations
def validate_public_key(self, publicKey):
"""Determines the validity of the given public key, as having been signed by this entity. Or not.
@param publicKey: The key to validate
"""
assert isinstance(publicKey, fiepipelib.encryption.public.publickey.abstractpublickey)
return self.validate_message(publicKey._key, publicKey.GetSignatures())
def validate_message(self, msg, signatures):
"""
Determines the validity of the given message, as being from this entity, using the given signatures.
@param msg: The message to validate
@param signatures: a list of fiepipelib.signature.signature objects for the message.
"""
assert isinstance(signatures,list)
unrevokedSignatures = signatures.copy()
#first we filter out any revoked signatures.
for s in signatures:
for r in self._revocations:
if r == signature.getSignature():
unrevokedSignatures.remove(s)
signerName = fiepipelib.legalentity.authority.data.entity_authority.get_signer_name(self._fqdn)
#now we validate with the right algorithm if we have it.
#walk through the keys
for pk in self._publicKeys:
#only if they're enabled.
if pk.isEnabled():
assert isinstance(pk, fiepipelib.encryption.public.publickey.legalentitypublickey)
#check each signature. It only takes one good one.
algorithm = pk.GetAlgorithm()
for s in unrevokedSignatures:
assert isinstance(s, fiepipelib.signature.signature)
#only bother trying if the signer is the right signer
if s.GetSigner() == signerName:
#only bother if its the right algorithm
if (algorithm == s.GetAlgorithm()):
#check validity move on if it's not valid.
if pk.verify(msg,s):
#if we match, we've validated!
return True
#if we get here we don't have an algorithm that generated a match. It isn't valid.
return False
|
import collections
import csv
import logging
import numpy as np
import torch.utils.data as main_data
import torchvision.transforms as transforms
from FedML.fedml_api.data_preprocessing.base import Cutout, DataLoader, LocalDataset
from .datasets import Landmarks
class LandmarksDataLoader(DataLoader):
IMAGENET_MEAN = [0.5, 0.5, 0.5]
IMAGENET_STD = [0.5, 0.5, 0.5]
def __init__(self, data_dir, train_bs, test_bs, client_number, fed_train_map_file, fed_test_map_file):
super().__init__(data_dir, train_bs, test_bs)
self.client_number = client_number
self.fed_train_map_file = fed_train_map_file,
self.fed_test_map_file = fed_test_map_file
@staticmethod
def _read_csv(path: str):
"""Reads a csv file, and returns the content inside a list of dictionaries.
Args:
path: The path to the csv file.
Returns:
A list of dictionaries. Each row in the csv file will be a list entry. The
dictionary is keyed by the column names.
"""
with open(path, 'r') as f:
return list(csv.DictReader(f))
@classmethod
def _data_transforms(cls):
# IMAGENET_MEAN = [0.5071, 0.4865, 0.4409]
# IMAGENET_STD = [0.2673, 0.2564, 0.2762]
image_size = 224
train_transform = transforms.Compose([
# transforms.ToPILImage(),
transforms.RandomResizedCrop(image_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(cls.IMAGENET_MEAN, cls.IMAGENET_STD),
])
train_transform.transforms.append(Cutout(16))
valid_transform = transforms.Compose([
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(cls.IMAGENET_MEAN, cls.IMAGENET_STD),
])
return train_transform, valid_transform
@classmethod
def get_mapping_per_user(cls, fn):
"""
mapping_per_user is {'user_id': [{'user_id': xxx, 'image_id': xxx, 'class': xxx} ... {}],
'user_id': [{'user_id': xxx, 'image_id': xxx, 'class': xxx} ... {}],
} or
[{'user_id': xxx, 'image_id': xxx, 'class': xxx} ...
{'user_id': xxx, 'image_id': xxx, 'class': xxx} ... ]
}
"""
mapping_table = cls._read_csv(fn)
expected_cols = ['user_id', 'image_id', 'class']
if not all(col in mapping_table[0].keys() for col in expected_cols):
logging.error(f'{mapping_table} has wrong format.')
raise ValueError(f'The mapping file must contain user_id, image_id and class columns. '
f'The existing columns are {",".join(mapping_table[0].keys())}')
data_local_num_dict = dict()
mapping_per_user = collections.defaultdict(list)
data_files = list()
net_dataidx_map = dict()
sum_temp = 0
for row in mapping_table:
user_id = row['user_id']
mapping_per_user[user_id].append(row)
for user_id, data in mapping_per_user.items():
num_local = len(mapping_per_user[user_id])
net_dataidx_map[int(user_id)] = (sum_temp, sum_temp + num_local)
data_local_num_dict[int(user_id)] = num_local
sum_temp += num_local
data_files += mapping_per_user[user_id]
assert sum_temp == len(data_files)
return data_files, data_local_num_dict, net_dataidx_map
# for centralized training
def get_dataloader(self, train_files, test_files, dataidxs=None):
dl_obj = Landmarks
transform_train, transform_test = self._data_transforms()
train_ds = dl_obj(self.data_dir, train_files, dataidxs=dataidxs, transform=transform_train)
test_ds = dl_obj(self.data_dir, test_files, dataidxs=None, transform=transform_test)
train_dl = main_data.DataLoader(dataset=train_ds, batch_size=self.train_bs, shuffle=True, drop_last=False)
test_dl = main_data.DataLoader(dataset=test_ds, batch_size=self.test_bs, shuffle=False, drop_last=False)
return train_dl, test_dl
# for local devices
def get_dataloader_test(self, train_files, test_files, dataidxs_train=None, dataidxs_test=None):
dl_obj = Landmarks
transform_train, transform_test = self._data_transforms()
train_ds = dl_obj(self.data_dir, train_files, dataidxs=dataidxs_train, transform=transform_train)
test_ds = dl_obj(self.data_dir, test_files, dataidxs=dataidxs_test, transform=transform_test)
train_dl = main_data.DataLoader(dataset=train_ds, batch_size=self.train_bs, shuffle=True, drop_last=False)
test_dl = main_data.DataLoader(dataset=test_ds, batch_size=self.test_bs, shuffle=False, drop_last=False)
return train_dl, test_dl
def load_partition_data(self):
train_files, data_local_num_dict, net_dataidx_map = self.get_mapping_per_user(self.fed_train_map_file)
test_files = self._read_csv(self.fed_test_map_file)
class_num = len(np.unique([item['class'] for item in train_files]))
train_data_num = len(train_files)
train_data_global, test_data_global = self.get_dataloader(train_files, test_files)
test_data_num = len(test_files)
# get local dataset
data_local_num_dict = data_local_num_dict
train_data_local_dict = dict()
test_data_local_dict = dict()
for client_idx in range(self.client_number):
dataidxs = net_dataidx_map[client_idx]
train_data_local, test_data_local = self.get_dataloader(train_files, test_files, dataidxs)
train_data_local_dict[client_idx] = train_data_local
test_data_local_dict[client_idx] = test_data_local
return LocalDataset(train_data_num=train_data_num, test_data_num=test_data_num,
train_data_global=train_data_global, test_data_global=test_data_global,
local_data_num_dict=data_local_num_dict, train_data_local_dict=train_data_local_dict,
test_data_local_dict=test_data_local_dict, output_len=class_num)
if __name__ == '__main__':
main_data_dir = './cache/images'
fed_g23k_train_map_file = '../../../data/gld/data_user_dict/gld23k_user_dict_train.csv'
fed_g23k_test_map_file = '../../../data/gld/data_user_dict/gld23k_user_dict_test.csv'
fed_g160k_train_map_file = '../../../data/gld/data_user_dict/gld160k_user_dict_train.csv'
fed_g160k_map_file = '../../../data/gld/data_user_dict/gld160k_user_dict_test.csv'
# noinspection DuplicatedCode
dataset_name = 'g160k'
if dataset_name == 'g23k':
main_client_number = 233
main_fed_train_map_file = fed_g23k_train_map_file
main_fed_test_map_file = fed_g23k_test_map_file
elif dataset_name == 'g160k':
main_client_number = 1262
main_fed_train_map_file = fed_g160k_train_map_file
main_fed_test_map_file = fed_g160k_map_file
else:
raise NotImplementedError
dl = LandmarksDataLoader(main_data_dir, 10, 10, main_client_number, main_fed_train_map_file, main_fed_test_map_file)
ds = dl.load_partition_data()
print(ds.train_data_num, ds.test_data_num, ds.output_len)
print(ds.local_data_num_dict)
for _, (main_data, label) in zip(range(5), ds.train_data_global):
print(main_data)
print(label)
print("=============================\n")
for main_client_idx in range(main_client_number):
for _, (main_data, label) in zip(range(5), ds.train_data_local_dict[main_client_idx]):
print(main_data)
print(label)
|
import matplotlib.pyplot as plt
# from matplotlib import patches
import numpy as np
import scipy as sp
loadFolder = 'theta9000'
monteMatrix = np.load('./' + loadFolder + '/monteArray.npy')
# calculate values necessary for future use of matrix
'''
while True:
print("Input choice to display alpha and gamma histograms, [Y/N]")
histChoice = input()
if histChoice == 'Y' or 'N' or 'y' or 'n':
break
else:
print("Improper input detected")
print("Input choice to display (s)catterplot, (h)istogram, or (n)either")
plotchoice = input()
'''
# TODO: Convert the following function into a comprehensive object
# break separate graphing and output functions into member functions
def carloAnalysis(monteMatrix,
# show determines if relevant plots are displayed or not
# plotAGs determines if alpha & gamma histograms are plotted
show=False, plotAGs=False,
# plotConfidence toggles a monte carlo vs sigma values plot
# jonesPlotType toggles s=scatter, h=2D histogram, c=contour
plotConfidence=False, jonesPlotType='s',
# saveFolder determines output destination folder for graphs
# imageType determines saved image format of graphs
saveFolder='NONE', imageType='png'):
# calculate values necessary for future use of matrix
AGwidth = int(len(np.reshape(monteMatrix[0, 2:, 0], -1))/4)
excitations = ['0', '-45', '-90', '45']
jones = ['xx', 'xy', 'yx', 'yy']
observedSidebands = np.array(np.reshape(monteMatrix[1, 1, :], -1))
MuSigmaArray = np.zeros(AGwidth*4+1)
# create initial figure
# display results for each given sideband
for i in range(len(observedSidebands)):
# for i in range(1):
arrayAppend = np.array(observedSidebands[i])
# plot alpha histogram
# plt.subplot(AGwidth, 3, 1)
# create figure for subplots to go under
fig = plt.figure()
# if not a X1, X2, X3 order band or if a X1X sideband eg 12th
if ((int(observedSidebands[i]) % 10 >= 4) or
(int(observedSidebands[i]) % 10 == 0) or
(np.floor(int(observedSidebands[i]) / 10) % 10 == 1)):
fig.suptitle(str(int(observedSidebands[i])) + 'th order sideband')
else:
# sidebands should always be even I believe but just in case
suffix = ['st', 'nd', 'rd']
fig.suptitle(str(int(observedSidebands[i])) +
suffix[int(observedSidebands[i]) % 10 - 1]
+ ' order sideband')
if (((i == 0) or (i == (len(observedSidebands)-1))) and
plotConfidence):
# create figure onto which plots will be created
conPlot = fig
# generating data points to be plotted
# create array of all monte carl numbers cast to integers
carlo = np.array((monteMatrix[1:, 0, i]).astype(int))
# find even distance between indices excluding 0th
# such that index*100 is final indice address
index = ((len(carlo)-1)/100)
# begin X array with 1st element, need 2 values for a sigma
X = np.array(carlo[0+int(np.round(index))])
# x axis is monte carlo number, sampled 100 times evenly
# goes from 0 to 98
for n in range(99):
# create appendments onto X of evenly spaced monte values
X = np.append(X, carlo[int(np.round((n+2)*index))])
# y axis is sigma value for that number of monte carlo runs
# assign sigma value of monte carlo 0
# xyReal: 12, xyImag: 13, yyReal: 16 , yyImag: 17
jonesIndice = [12, 13, 16, 17]
for jI in range(int(len(jonesIndice))):
# start Y array with standard deviation from 0th to X[0]
Y = np.array(np.std(monteMatrix[1:X[0],
jonesIndice[jI], i]))
for n in range(99):
# fill in the other 99 elements from X[1] to x[99]
Y = np.append(Y, np.std(monteMatrix[1:X[n+1],
jonesIndice[jI], i]))
# add subplot to figure
sbp = conPlot.add_subplot(int(np.round(len(jonesIndice)/2)),
2, jI+1)
sbp.plot(X[1:], Y[1:])
conPlot.show()
plt.show()
# curve fitting the plot
# Xnew = np.linespace(X.min(), X.max(), 300)
# spl = sp.make_interp_spline(X, Y, Xnew)
#
# construct alpha histogram subplots
for j in range(AGwidth):
alphaMu = monteMatrix[0, 2+(j*2), i]
alphaSigma = monteMatrix[0, 3+(j*2), i]
sbp = fig.add_subplot(AGwidth, 3, (3*j+1))
sbp.set_ylabel(excitations[j])
if j == 0:
sbp.set_title('alphas')
sbp.set_yticks([])
aCount, aBins, aIgnored = sbp.hist(
np.reshape(monteMatrix[1:, 2+j, i], -1),
30, density=True)
sbp.plot(aBins, 1/(alphaSigma * np.sqrt(2 * np.pi)) *
np.exp(- (aBins - alphaMu)**2 / (2 * alphaSigma**2)),
linewidth=2, color='r')
# construct gamma histogram subplots
for j in range(AGwidth):
gammaMu = monteMatrix[0, 10+(j*2), i]
gammaSigma = monteMatrix[0, 11+(j*2), i]
sbp = fig.add_subplot(AGwidth, 3, (3*j+2))
sbp.set_ylabel(excitations[j])
if j == 0:
sbp.set_title('gammas')
sbp.set_yticks([])
aCount, aBins, aIgnored = sbp.hist(
np.reshape(monteMatrix[1:, 6+j, i], -1),
30, density=True)
sbp.plot(aBins, 1/(gammaSigma * np.sqrt(2 * np.pi)) *
np.exp(- (aBins - gammaMu)**2 / (2 * gammaSigma**2)),
linewidth=2, color='r')
# construct jones matrix xy axis scatterplot subplots
for j in range(4):
sbp = fig.add_subplot(4, 3, 3*j+3)
sbp2 = sbp.twinx()
sbp2.set_ylabel(jones[j])
if j == 0:
sbp2.set_title('Jones')
sbp.set_ylabel('Imaginary')
sbp.set_xlabel('Real')
sbp.set_yticks([])
sbp.set_xticks([])
sbp2.set_yticks([])
# do some magic here
# real number mean & sigma
jrMu = np.mean(monteMatrix[1:, 10+(2*j), i])
arrayAppend = np.append(arrayAppend, jrMu)
jrSigma = np.std(monteMatrix[1:, 10+(2*j), i])
arrayAppend = np.append(arrayAppend, jrSigma)
# imaginary number mean & sigma
jiMu = np.mean(monteMatrix[1:, 11+(2*j), i])
arrayAppend = np.append(arrayAppend, jiMu)
jiSigma = np.std(monteMatrix[1:, 11+(2*j), i])
arrayAppend = np.append(arrayAppend, jiSigma)
'''
# display mu and sigma values for given jones on sideband slice
print(jones[j]+' jreal: $mu$:' + str(jrMu)
+ ' $sigma$:' + str(jrSigma)
+ ' jimag: $mu$ '+str(jiMu)
+ ' $sigma$ ' + str(jiSigma))
'''
# '''
# scatterplot method
sbp.scatter(monteMatrix[1:, 10+(2*j), i],
monteMatrix[1:, 11+(2*j), i],
s=1,
marker='.')
# single point plot of mean values
sbp.scatter(jrMu, jiMu, c='r', marker="1")
# save scatter plot for that order within folder
fig.savefig(('./' + loadFolder + '/order_' +
str(int(observedSidebands[i]))
+ '_scatterplot.png'))
'''
# 2D distribution would be ellipse with semimajor axis as Sigmas
# confidence_ellipse(jrSigma, jiSigma, sbp)
# twoDSigma = patches.Ellipse(xy=(jrMu, jiMu),
# width=jrSigma, height=jiSigma,
# edgecolor='r')
# sbp.add_patch(twoDSigma)
# 2d histogram method
sbp.hist2d(monteMatrix[1:, 10+(2*j), i],
monteMatrix[1:, 11+(2*j), i],
20)
# save histogram plot to folder
fig.savefig('./' + loadFolder + '/order_' +
str(observedSidebands[i])
+ '_histogram')
'''
# fit plot layout and display
MuSigmaArray = np.vstack((MuSigmaArray, arrayAppend))
fig.tight_layout()
fig.subplots_adjust(top=0.88)
if show is True:
fig.show()
if saveFolder != 'NONE':
# save output text matrix of mu and sigma values
np.savetxt('./' + saveFolder + '/MuSigmaArray', MuSigmaArray[1:])
# plt.show()
carloAnalysis(monteMatrix, saveFolder=loadFolder, plotConfidence=True)
|
"""Overview:
The proxy / server is build as an extension of the asyncore.dispatcher class.
There are two instantiation of SimpleServer to listen on the given ports
for new connection, on for HTTP and the other for STP (ScopeTransferProtocol).
They do dispatch a connection to the appropriate classes, HTTPScopeInterface
for HTTP and ScopeConnection for STP. The client is the application which uses
the HTTP interface to connect to scope, the host is the Opera instance
which exposes the scope interface as a STP connection.
There are also two queues, one for HTTP and one for STP to return a scope
message to the client. Getting a new scope message is performed as GET request
with the path /get-message. If the STP queue is not empty then
the first of that queue is returned, otherwise the request is put
in the HTTP waiting-queue. If a new message arrives on the STP sockets it works
the other way around: if the waiting-queue is not empty, the message is
returned to the first waiting connection, otherwise it's put on
the STP message queue.
In contrast to the previous Java version there is only one waiting connection
for scope messages, the messages are dispatched to the target service
on the client side. The target service is added to the response
as custom header 'X-Scope-Message-Service'.
STP/1 messages have a header and a payload. The header is translated
to custom header fields, the payload is the body of the response:
X-Scope-Message-Service for the service name
X-Scope-Message-Command for the command id
X-Scope-Message-Status for the status code
X-Scope-Message-Tag for the tag
The server is named Dragonkeeper to stay in the started names pace.
The server supports only one host and one client. The main purpose is
developing Opera Dragonfly.
See also http://dragonfly.opera.com/app/scope-interface for more details.
"""
import re
import httpconnection
import os
from time import time
from common import CRLF, RESPONSE_BASIC, RESPONSE_OK_CONTENT
from common import NOT_FOUND, BAD_REQUEST, get_timestamp, Singleton
# from common import pretty_dragonfly_snapshot
from utils import MessageMap, pretty_print_XML, pretty_print
from stpwebsocket import STPWebSocket
from websocket13 import TestWebSocket13, TestWebSocket13HighLoad
# the two queues
connections_waiting = []
scope_messages = []
command_times = {}
SERVICE_LIST = """<services>%s</services>"""
SERVICE_ITEM = """<service name="%s"/>"""
XML_PRELUDE = """<?xml version="1.0"?>%s"""
MSG_TYPE_ERROR = 4
class Scope(Singleton):
"""Access layer for HTTPScopeInterface instances to the scope connection"""
version_map = {
"stp-1": "STP/1",
"stp-0": "STP/0",
}
def __init__(self):
self.send_command = self.empty_call
self.services_enabled = {}
self.version = 'stp-0'
self._service_list = []
self._connection = None
self._http_connection = None
def empty_call(self, msg):
pass
def set_connection(self, connection):
""" to register the scope connection"""
self._connection = connection
self.send_command = connection.send_command_STP_0
def get_scope_connection(self):
return self._connection
def set_service_list(self, list):
"""to register the service list"""
self._service_list = list
def return_service_list(self, http_connection):
"""to get the service list.
in STP/1 the request of the service list does trigger to (re) connect
the client. Only after the Connect command was performed successfully
the service list is returned to the client. any state must be reset"""
# empty the scope message queue
while scope_messages:
scope_messages.pop()
if self.version == 'stp-0':
http_connection.return_service_list(self._service_list)
elif self.version == 'stp-1':
if self._connection:
self._http_connection = http_connection
self._connection.connect_client(self._connect_callback)
else:
http_connection.return_service_list(self._service_list)
else:
print "Unsupported version in scope.return_service_list(conn)"
def set_STP_version(self, version):
"""to register the STP version.
the version gets set as soon as the STP/1 token is received"""
if version == "stp-1":
self.version = "stp-1"
self.send_command = self._connection.send_command_STP_1
else:
print "This stp version is not jet supported"
def get_STP_version(self):
return self.version_map[self.version]
def reset(self):
self._service_list = []
self.send_command = self.empty_call
self.services_enabled = {}
self._connection = None
def _connect_callback(self):
if MessageMap.has_map():
self._http_connection.return_service_list(self._service_list)
self._http_connection = None
else:
MessageMap(self._service_list, self._connection,
self._connect_callback, self._http_connection.context)
scope = Scope()
class HTTPScopeInterface(httpconnection.HTTPConnection):
"""To expose a HTTP interface of the scope interface.
Documentation of the scope interface itself see:
http://dragonfly.opera.com/app/scope-interface/
The first part of the path is the command name, other parts are arguments.
If there is no matching command, the path is served.
GET methods:
STP/0
/services
to get a list of available services
/enable/<service name>
to enable the given service
/get-message
to get a pending message or to wait for the next one
header informations are added as custom header fields like:
X-Scope-Message-Service for the service name
STP/1
/services
to get a list of available services
/get-message
to get a pending message or to wait for the next one
header informations are added as custom header fields like:
X-Scope-Message-Service for the service name
X-Scope-Message-Command for the command id
X-Scope-Message-Status for the status code
X-Scope-Message-Tag for the tag
the response body is the message in JSON format
(except timeout responses which are still sent as xml)
/stp-1-channel
create a web socket channel
POST methods:
STP/0:
/post-command/<service name>
request body: message
STP/1:
/post-command/<service-name>/<command-id>/<tag>
request body: message in JSON format
The STP/1 HTTP interface supports only JSON format for the messages.
"""
# scope specific responses
# RESPONSE_SERVICELIST % ( timestamp, content length content )
# HTTP/1.1 200 OK
# Date: %s
# Server: Dragonkeeper/0.8
# Cache-Control: no-cache
# Content-Type: application/xml
# Content-Length: %s
#
# %s
RESPONSE_SERVICELIST = RESPONSE_OK_CONTENT % (
'%s',
'Cache-Control: no-cache' + CRLF,
'application/xml',
'%s',
'%s',
)
# RESPONSE_OK_OK % ( timestamp )
# HTTP/1.1 200 OK
# Date: %s
# Server: Dragonkeeper/0.8
# Cache-Control: no-cache
# Content-Type: application/xml
# Content-Length: 5
#
# <ok/>
RESPONSE_OK_OK = RESPONSE_OK_CONTENT % (
'%s',
'Cache-Control: no-cache' + CRLF,
'application/xml',
len("<ok/>"),
"<ok/>",
)
# RESPONSE_TIMEOUT % ( timestamp )
# HTTP/1.1 200 OK
# Date: %s
# Server: Dragonkeeper/0.8
# Cache-Control: no-cache
# Content-Type: application/xml
# Content-Length: 10
#
# <timeout/>
RESPONSE_TIMEOUT = RESPONSE_OK_CONTENT % (
'%s',
'Cache-Control: no-cache' + CRLF,
'application/xml',
len('<timeout/>'),
'<timeout/>',
)
# SCOPE_MESSAGE_STP_0 % ( timestamp, service, message length, message )
# HTTP/1.1 200 OK
# Date: %s
# Server: Dragonkeeper/0.8
# Cache-Control: no-cache
# X-Scope-Message-Service: %s
# Content-Type: application/xml
# Content-Length: %s
#
# %s
SCOPE_MESSAGE_STP_0 = RESPONSE_OK_CONTENT % (
'%s',
'Cache-Control: no-cache' + CRLF + \
'X-Scope-Message-Service: %s' + CRLF,
'application/xml',
'%s',
'%s',
)
# SCOPE_MESSAGE_STP_1 % ( timestamp, service, command, status,
# tag, message length, message )
# HTTP/1.1 200 OK
# Date: %s
# Server: Dragonkeeper/0.8
# Cache-Control: no-cache
# X-Scope-Message-Service: %s
# X-Scope-Message-Command: %s
# X-Scope-Message-Status: %s
# X-Scope-Message-Tag: %s
# Content-Type: text/plain
# Content-Length: %s
#
# %s
SCOPE_MESSAGE_STP_1 = RESPONSE_OK_CONTENT % (
'%s',
'Cache-Control: no-cache' + CRLF + \
'X-Scope-Message-Service: %s' + CRLF + \
'X-Scope-Message-Command: %s' + CRLF + \
'X-Scope-Message-Status: %s' + CRLF + \
'X-Scope-Message-Tag: %s' + CRLF,
'text/plain',
'%s',
'%s',
)
def __init__(self, conn, addr, context):
httpconnection.HTTPConnection.__init__(self, conn, addr, context)
self.debug = context.debug
self.debug_format = context.format
self.debug_format_payload = context.format_payload
self.verbose_debug = context.verbose_debug
self.debug_only_errors = context.only_errors
self.context = context
# for backward compatibility
self.scope_message = self.get_message
self.send_command = self.post_command
self.is_timing = context.is_timing
# ============================================================
# GET commands ( first part of the path )
# ============================================================
def services(self):
"""to get the service list"""
if connections_waiting:
print ">>> failed, connections_waiting is not empty"
scope.return_service_list(self)
self.timeout = 0
def return_service_list(self, serviceList):
content = SERVICE_LIST % "".join(
[SERVICE_ITEM % service.encode('utf-8')
for service in serviceList])
self.out_buffer += self.RESPONSE_SERVICELIST % (
get_timestamp(),
len(content),
content)
def get_stp_version(self):
content = scope.get_STP_version()
self.out_buffer += RESPONSE_OK_CONTENT % (
get_timestamp(),
'',
"text/plain",
len(content),
content)
self.timeout = 0
def enable(self):
"""to enable a scope service"""
service = self.arguments[0]
if scope.services_enabled[service]:
print ">>> service is already enabled", service
else:
scope.send_command("*enable %s" % service)
scope.services_enabled[service] = True
if service.startswith('stp-'):
scope.set_STP_version(service)
self.out_buffer += self.RESPONSE_OK_OK % get_timestamp()
self.timeout = 0
def get_message(self):
"""general call to get the next scope message"""
if scope_messages:
if scope.version == 'stp-1':
self.return_scope_message_STP_1(scope_messages.pop(0), self)
else:
self.return_scope_message_STP_0(scope_messages.pop(0), self)
self.timeout = 0
else:
connections_waiting.append(self)
# TODO correct?
def stp_1_channel(self):
if self.headers.get("Upgrade") == "websocket":
self.del_channel()
self.timeout = 0
STPWebSocket(self.socket,
self.headers,
self.in_buffer,
self.path,
self.context,
scope.get_scope_connection())
else:
self.out_buffer += BAD_REQUEST % get_timestamp()
self.timeout = 0
def test_web_sock_13(self):
if self.headers.get("Upgrade") == "websocket":
self.del_channel()
self.timeout = 0
TestWebSocket13(self.socket,
self.headers,
self.in_buffer,
self.path)
else:
self.out_buffer += BAD_REQUEST % get_timestamp()
self.timeout = 0
def test_web_sock_13_high_load(self):
if self.headers.get("Upgrade") == "websocket":
self.del_channel()
self.timeout = 0
TestWebSocket13HighLoad(self.socket,
self.headers,
self.in_buffer,
self.path)
else:
self.out_buffer += BAD_REQUEST % get_timestamp()
self.timeout = 0
# ============================================================
# POST commands
# ============================================================
def post_command(self):
"""send a command to scope"""
raw_data = self.raw_post_data
is_ok = False
if scope.version == "stp-1":
args = self.arguments
"""
message type: 1 = command, 2 = response, 3 = event, 4 = error
message TransportMessage
{
required string service = 1;
required uint32 commandID = 2;
required uint32 format = 3;
optional uint32 status = 4;
optional uint32 tag = 5;
required binary payload = 8;
}
/send-command/" + service + "/" + command_id + "/" + tag
"""
if self.is_timing:
command_times[args[2]] = (args[0], args[1], time() * 1000)
scope.send_command({
0: 1, # message type
1: args[0],
2: int(args[1]),
3: 1,
5: int(args[2]),
8: self.raw_post_data,
})
is_ok = True
else:
service = self.arguments[0]
if service in scope.services_enabled:
if not raw_data.startswith("<?xml") and \
not raw_data.startswith("STP/1"):
raw_data = XML_PRELUDE % raw_data
msg = "%s %s" % (service, raw_data.decode('UTF-8'))
scope.send_command(msg)
is_ok = True
else:
print "tried to send a command before %s was enabled" % service
self.out_buffer += (is_ok and
self.RESPONSE_OK_OK or
BAD_REQUEST) % get_timestamp()
self.timeout = 0
def snapshot(self):
"""store a markup snapshot"""
raw_data = self.raw_post_data
if raw_data:
name, data = raw_data.split(CRLF, 1)
f = open(name + ".xml", 'wb')
# f.write(pretty_dragonfly_snapshot(data))
data = data.replace("'=\"\"", "")
data = re.sub(r'<script(?:[^/>]|/[^>])*/>[ \r\n]*', '', data)
f.write(data.replace("'=\"\"", ""))
f.close()
self.out_buffer += self.RESPONSE_OK_OK % get_timestamp()
self.timeout = 0
def savefile(self):
"""save file"""
raw_data = self.raw_post_data
file_name = self.arguments[0]
print file_name
if not os.path.exists("screenshots"):
os.mkdir("screenshots")
if raw_data:
f = open(os.path.join("screenshots", file_name), 'wb')
f.write(raw_data)
f.close()
self.out_buffer += self.RESPONSE_OK_OK % get_timestamp()
self.timeout = 0
# ============================================================
# STP 0
# ============================================================
def return_scope_message_STP_0(self, msg, sender):
""" return a message to the client"""
service, payload = msg
if self.debug:
pretty_print_XML("\nsend to client: %s" % service, payload, self.debug_format)
self.out_buffer += self.SCOPE_MESSAGE_STP_0 % (
get_timestamp(),
service,
len(payload),
payload)
self.timeout = 0
if not sender == self:
self.handle_write()
# ============================================================
# STP 1
# ============================================================
def return_scope_message_STP_1(self, msg, sender):
""" return a message to the client
message TransportMessage
{
required string service = 1;
required uint32 commandID = 2;
required uint32 format = 3;
optional uint32 status = 4;
optional uint32 tag = 5;
required binary payload = 8;
}
"""
if not msg[8]:
# workaround, status 204 does not work
msg[8] = ' '
if self.debug and (not self.debug_only_errors or msg[4] == MSG_TYPE_ERROR):
pretty_print("send to client:", msg,
self.debug_format, self.debug_format_payload, self.verbose_debug)
if self.is_timing:
tag = str(msg[5])
if tag in command_times:
item = command_times.pop(tag)
print item[0],
print MessageMap.get_cmd_name(item[0], item[1]),
print time() * 1000 - item[2]
self.out_buffer += self.SCOPE_MESSAGE_STP_1 % (
get_timestamp(),
msg[1], # service
msg[2], # command
msg[4], # status
msg[5], # tag
len(msg[8]),
msg[8], # payload
)
self.timeout = 0
if not sender == self:
self.handle_write()
def timeouthandler(self):
if self in connections_waiting:
connections_waiting.remove(self)
if not self.command in ["get_message", "scope_message"]:
print ">>> failed, wrong connection type in queue"
self.out_buffer += self.RESPONSE_TIMEOUT % get_timestamp()
else:
self.out_buffer += NOT_FOUND % (get_timestamp(), 0, '')
self.timeout = 0
def flush(self):
if self.timeout:
self.timeouthandler()
# ============================================================
# Implementations of the asyncore.dispatcher class methods
# ============================================================
def writable(self):
if self.timeout and time() > self.timeout and not self.out_buffer:
self.timeouthandler()
return bool(self.out_buffer)
def handle_close(self):
if self in connections_waiting:
connections_waiting.remove(self)
self.close()
|
import sys
import csv
import json
import re
import string
import copy
if len(sys.argv) < 3:
print("Usage:\npython3 exclude.py <csv file of records to delieneate> <csv file of records to delieneate>")
sys.exit(1)
csvfile = open(sys.argv[1], 'r')
reader = csv.DictReader( csvfile, delimiter="`", quoting=csv.QUOTE_NONE)
PAS = open(sys.argv[1][:-4]+"_PAS.tsv", 'w')
PAS_count = 0
PAS_00X = open(sys.argv[1][:-4]+"_PAS_"+f'{PAS_count:03}'+".tsv", 'w')
n = 0
for line in reader:
out = str(line["ITEM_BARCODE"])
PAS.write(out+"\t\n")
PAS_00X.write(out+"\t\n")
n += 1
if n % 500 == 0:
n = 0
PAS_00X.close()
PAS_count += 1
PAS_00X = open(sys.argv[1][:-4]+"_PAS_"+f'{PAS_count:03}'+".tsv", 'w')
csvfile = open(sys.argv[2], 'r')
reader = csv.DictReader( csvfile, delimiter="`", quoting=csv.QUOTE_NONE)
PAS = open(sys.argv[2][:-4]+"_PAS.tsv", 'w')
PAS_count = 0
PAS_00X = open(sys.argv[2][:-4]+"_PAS_"+f'{PAS_count:03}'+".tsv", 'w')
MFHD_count = 0
MFHD = open(sys.argv[2][:-4]+"_MFHD.tsv", 'w')
MFHD_00X = open(sys.argv[2][:-4]+"_MFHD_"+f'{PAS_count:03}'+".tsv", 'w')
MFHD_list = []
BIB_count = 0
BIB = open(sys.argv[2][:-4]+"_BIB.tsv", 'w')
BIB_00X = open(sys.argv[2][:-4]+"_BIB_"+f'{PAS_count:03}'+".tsv", 'w')
BIB_list = []
n0 = 0
n1 = 0
n2 = 0
for line in reader:
out = str(line["ITEM_BARCODE"])
PAS.write(out+"\n")
PAS_00X.write(out+"\n")
n0 += 1
out = str(line["MFHD_ID"])
if out not in MFHD_list:
MFHD.write(out+"\n")
MFHD_00X.write(out+"\n")
MFHD_list.append(out)
n1 += 1
if n1 % 500 == 0:
n1 = 0
MFHD_00X.close()
MFHD_count += 1
MFHD_00X = open(sys.argv[2][:-4]+"_MFHD_"+f'{MFHD_count:03}'+".tsv", 'w')
out = str(line["BIB_ID"])
if out not in BIB_list:
BIB.write(out+"\n")
BIB_00X.write(out+"\n")
BIB_list.append(out)
n2 += 1
if n2 % 500 == 0:
n2 = 0
BIB_00X.close()
BIB_count += 1
BIB_00X = open(sys.argv[2][:-4]+"_BIB_"+f'{BIB_count:03}'+".tsv", 'w')
if n0 % 500 == 0:
n0 = 0
PAS_00X.close()
PAS_count += 1
PAS_00X = open(sys.argv[2][:-4]+"_PAS_"+f'{PAS_count:03}'+".tsv", 'w')
|
#!/usr/bin/env python
# take a large pcap and dump the data into a CSV so it can be analysed by something like R.
#
# This version we want to know what the source IP is, what the protocol is and based on those
# peices of info run a function to grab that data and write a line to a CSV file
#
# Ignore all traffic sourced from the self IP, pass self ip as on arg
# Ignore incoming response traffic
# Prereqs: pyshark, http://kiminewt.github.io/pyshark/
import pyshark, sys, getopt, csv
from datetime import datetime
from string import maketrans
# Functions
def readpcap(pfile):
return pyshark.FileCapture(pfile,"keep_packets"==False)
#return pyshark.FileCapture(pfile)
def epochconv(tsstr):
# convert the frame time into iso via epoch, clumsy but works better for excel
# return list so we can have both in the CSV, epoch and friendly
retlist=[]
dtobj=datetime.fromtimestamp(float(tsstr))
retlist.append(str(dtobj).strip())
retlist.append(tsstr.strip())
return retlist
def appendcsv(rlist,cfile):
# convert ints and
outputline = ",".join(map(str, rlist))
with open(cfile,"a") as outputfile:
outputfile.write(outputline + "\n")
outputfile.close()
return
def tcpdecode(lyrlst):
tmplist=[]
tmpdict=lyrlst._all_fields
for key in tmpdict:
tmplist.append(tmpdict[key])
wsdecode = "#".join(map(str,tmplist))
# replace commas in decode with spaces
transtab = maketrans(","," ")
wsdecode = wsdecode.translate(transtab)
return wsdecode
def udpdecode(lyrlst):
tmplist=[]
tmpdict=lyrlst._all_fields
for key in tmpdict:
tmplist.append(tmpdict[key])
wsdecode = "#".join(map(str,tmplist))
# replace commas in decode with spaces
transtab = maketrans(","," ")
wsdecode = wsdecode.translate(transtab)
return wsdecode
def parseTCP(tpkt):
#print "running parseTCP"
tmplist = initrow()
if len(tpkt.layers) > 3:
# pass to http module
decoded = tcpdecode(tpkt.layers[3])
tmplist[8]= str(decoded)
tmplist[3]= 6
tmplist[4]= str(tpkt.ip.src).strip()
tmplist[5]= int(tpkt.tcp.dstport)
tmplist[6]= int(tpkt.tcp.srcport)
tmplist[7]= str(tpkt.tcp.flags).strip()
tsstr=str(tpkt.frame_info.time_epoch)
dtobj=datetime.fromtimestamp(float(tsstr))
tmplist[0]= dtobj.strftime("%Y-%m-%d")
tmplist[1]= dtobj.strftime("%H:%M:%S.%f")
tmplist[2]= tsstr
return tmplist
def parseICMP(ipkt):
#print "running parseICMP"
tmplist = initrow()
tmplist[3]= 1
tmplist[4]= str(ipkt.ip.src).strip()
tmplist[5]= int(ipkt.icmp.type)
tmplist[6]= int(ipkt.icmp.code)
tsstr=str(ipkt.frame_info.time_epoch)
dtobj=datetime.fromtimestamp(float(tsstr))
tmplist[0]= dtobj.strftime("%Y-%m-%d")
tmplist[1]= dtobj.strftime("%H:%M:%S.%f")
tmplist[2]= tsstr
return tmplist
def parseUDP(upkt):
#print "running parseUDP"
tmplist = initrow()
if len(upkt.layers) > 3:
# pass to http module
decoded = udpdecode(upkt.layers[3])
tmplist[8]= str(decoded)
tmplist[3]= 17
tmplist[4]= str(upkt.ip.src).strip()
tmplist[5]= int(upkt.udp.dstport)
tmplist[6]= int(upkt.udp.srcport)
tsstr=str(upkt.frame_info.time_epoch)
dtobj=datetime.fromtimestamp(float(tsstr))
tmplist[0]= dtobj.strftime("%Y-%m-%d")
tmplist[1]= dtobj.strftime("%H:%M:%S.%f")
tmplist[2]= tsstr
return tmplist
def parseIPother(ipopkt):
tmplist = initrow()
print "running parseIP Other "
tmplist[3]= int(ipopkt.ip.proto)
tmplist[4]= str(ipopkt.ip.src).strip()
tsstr=str(ipopkt.frame_info.time_epoch)
dtobj=datetime.fromtimestamp(float(tsstr))
tmplist[0]= dtobj.strftime("%Y-%m-%d")
tmplist[1]= dtobj.strftime("%H:%M:%S.%f")
tmplist[2]= tsstr
return tmplist
def protorouter(evalpkt):
# direct
if int(evalpkt.ip.proto) == 6:
pktlist = parseTCP(evalpkt)
elif int(evalpkt.ip.proto) == 1:
pktlist = parseICMP(evalpkt)
elif int(evalpkt.ip.proto) == 17:
pktlist = parseUDP(evalpkt)
else:
pktlist = parseIPother(evalpkt)
return pktlist
#def initrow():
# # iso-tstamp Date, iso-tstamp Time, epoch-tstamp, proto, src-ip, dest port/type, flag/code
# rwlist = [str('iso-date'),str('iso-time'),str('epoch-tstamp'),int(6),str('1.2.3.4'),None,None]
# return rwlist
def initrow():
# iso-tstamp Date, iso-tstamp Time, epoch-tstamp, proto, src-ip, dest port/type, flag/code, src port, payload decode
rwlist = [str('iso-date'),str('iso-time'),str('epoch-tstamp'),int(6),str('1.2.3.4'),None,None,None,None]
return rwlist
def parsefilterfile(filtercsv):
fltrlist=[]
try:
ffh = open(filtercsv,'r')
for line in ffh:
fltrlist.append(tuple(line.strip().split(',')))
except Exception , e:
print e.message
return fltrlist
def csvwrite(pcapdict,thiscsv):
# uncomment if you want to watch processing in a cron log
print pcapdict
for pkt in pcapdict:
appendcsv(protorouter(pkt),thiscsv)
|
"""
================================================================
Continuous and analytical diffusion signal modelling with MAPMRI
================================================================
We show how to model the diffusion signal as a linear combination
of continuous functions from the MAPMRI basis [Ozarslan2013]_.
We also compute the analytical Orientation Distribution Function (ODF),
the the Return To the Origin Probability (RTOP), the Return To the Axis
Probability (RTAP), and the Return To the Plane Probability (RTPP).
First import the necessary modules:
"""
from dipy.reconst.mapmri import MapmriModel
from dipy.viz import fvtk
from dipy.data import fetch_cenir_multib, read_cenir_multib, get_sphere
from dipy.core.gradients import gradient_table
import matplotlib.pyplot as plt
"""
Download and read the data for this tutorial.
MAPMRI requires multi-shell data, to properly fit the radial part of the basis.
The total size of the downloaded data is 1760 MBytes, however you only need to
fetch it once. Parameter ``with_raw`` of function ``fetch_cenir_multib`` is set
to ``False`` to only download eddy-current/motion corrected data:.
"""
fetch_cenir_multib(with_raw=False)
"""
For this example we select only the shell with b-values equal to the one of the
Human Connectome Project (HCP).
"""
bvals = [1000, 2000, 3000]
img, gtab = read_cenir_multib(bvals)
data = img.get_data()
data_small = data[40:65, 50:51, 35:60]
print('data.shape (%d, %d, %d, %d)' % data.shape)
"""
data contains the voxel data and gtab contains a GradientTable
object (gradient information e.g. b-values). For example, to show the b-values
it is possible to write print(gtab.bvals).
Instantiate the MAPMRI Model.
radial_order is the radial order of the MAPMRI basis.
For details regarding the parameters see [Ozarslan2013]_.
"""
radial_order = 4
map_model = MapmriModel(gtab, radial_order=radial_order,
lambd=2e-1, eap_cons=False)
"""
Fit the MAPMRI model to the data
"""
mapfit = map_model.fit(data_small)
"""
Load an odf reconstruction sphere
"""
sphere = get_sphere('symmetric724')
"""
Compute the ODFs
"""
odf = mapfit.odf(sphere)
print('odf.shape (%d, %d, %d, %d)' % odf.shape)
"""
Display the ODFs
"""
r = fvtk.ren()
sfu = fvtk.sphere_funcs(odf, sphere, colormap='jet')
sfu.RotateX(-90)
fvtk.add(r, sfu)
fvtk.record(r, n_frames=1, out_path='odfs.png', size=(600, 600))
"""
.. figure:: odfs.png
:align: center
**Orientation distribution functions**.
With MAPMRI it is also possible to extract the Return To the Origin Probability
(RTOP), the Return To the Axis Probability (RTAP), and the Return To the Plane
Probability (RTPP). These ensemble average propagator (EAP) features directly
reflects microstructural properties of the underlying tissues [Ozarslan2013]_.
"""
rtop = mapfit.rtop()
rtap = mapfit.rtap()
rtpp = mapfit.rtpp()
"""
Show the maps and save them in MAPMRI_maps.png.
"""
fig = plt.figure(figsize=(6, 6))
ax1 = fig.add_subplot(2, 2, 1, title=r'$\sqrt[3]{RTOP}$')
ax1.set_axis_off()
ind = ax1.imshow((rtop[:, 0, :]**(1.0 / 3)).T,
interpolation='nearest', origin='lower', cmap=plt.cm.gray)
plt.colorbar(ind, shrink = 0.8)
ax2 = fig.add_subplot(2, 2, 2, title=r'$\sqrt{RTAP}$')
ax2.set_axis_off()
ind = ax2.imshow((rtap[:, 0, :]**0.5).T,
interpolation='nearest', origin='lower', cmap=plt.cm.gray)
plt.colorbar(ind, shrink = 0.8)
ax3 = fig.add_subplot(2, 2, 3, title=r'$RTPP$')
ax3.set_axis_off()
ind = ax3.imshow(rtpp[:, 0, :].T, interpolation='nearest',
origin='lower', cmap=plt.cm.gray)
plt.colorbar(ind, shrink = 0.8)
plt.savefig('MAPMRI_maps.png')
"""
.. figure:: MAPMRI_maps.png
:align: center
**RTOP, RTAP, and RTPP calculated using MAPMRI**.
.. [Ozarslan2013] <NAME>. et. al, "Mean apparent propagator (MAP) MRI: A novel
diffusion imaging method for mapping tissue microstructure",
NeuroImage, 2013.
.. include:: ../links_names.inc
"""
|
# -*- coding: utf-8 -*-
import numpy as np
import os
from keras.models import Sequential, Model
from keras.layers import Dense, Input, merge
from keras.layers import Reshape,LeakyReLU,ZeroPadding2D
from keras.layers.core import Activation, Dropout
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import UpSampling2D
from keras.layers.convolutional import Convolution2D, MaxPooling2D, Deconvolution2D
from keras.layers.core import Flatten
from keras.layers import Input, merge, Convolution2D, MaxPooling2D, UpSampling2D
from keras.optimizers import SGD, Adagrad
from PIL import Image
from keras import backend as K
from keras.layers.normalization import BatchNormalization
from keras.objectives import binary_crossentropy
import tensorflow as tf
from tqdm import tqdm
import scipy.misc as im
from deform_conv.layers import ConvOffset2D
#K.set_image_dim_ordering('th')
IN_CH = 3
img_cols=256
img_rows=256
def convolution(inputs,filters,step,stride=2,Normal=True):
#use for encoder
encoder = ZeroPadding2D(padding=(1,1))(inputs)
encoder = Convolution2D(filters,4,4,subsample=(stride,stride),name='conv_%d'%step)(encoder)
if Normal:
encoder = BatchNormalization(name='CBat_%d'%step)(encoder)
encoder = LeakyReLU(alpha=0.2,name='CLRelu_%d'%step)(encoder)
return encoder
def convolution_offset_2D(inputs,filters,step,stride=2,Normal=True):
#use for encoder
encoder = ZeroPadding2D(padding=(1,1))(inputs)
encoder = ConvOffset2D(filters,name='conv_%d_offset'%step)(encoder)
encoder = Convolution2D(filters,4,4,subsample=(stride,stride),name='conv_%d'%step)(encoder)
if Normal:
encoder = BatchNormalization(name='CBat_%d'%step)(encoder)
encoder = LeakyReLU(alpha=0.2,name='CLRelu_%d'%step)(encoder)
return encoder
def deconvolution(inputs,filters,step,dropout):
_,height,width,_ = (inputs.get_shape()).as_list()
decoder = Deconvolution2D(filters,4,4,
output_shape=(None,2*height,2*width,filters),
subsample=(2,2),
border_mode='same',
name='Deconv_%d' % (8-step))(inputs)
decoder = BatchNormalization(name='DBat_%d' % (8-step))(decoder)
if step == 8:
decoder = Activation(activation='tanh')(decoder)
else:
decoder = LeakyReLU(alpha=0.2,name='DLRelu_%d' % (8-step))(decoder)
if dropout[step-1] > 0:
decoder = Dropout(dropout[step-1])(decoder)
return decoder
def generator_model():
'''
global BATCH_SIZE
# imgs: input: 256x256xch
# U-Net structure, must change to relu
g_inputs = Input(shape=(256,256,3))
encoder_filter = [64,128,256,512,512,512,512]
Encoder = []
#Number of encoder or decoder (same)
nb_layer = len(encoder_filter)
#reverse from encoder
decoder_filter = encoder_filter[::-1]
dropout = [0.5,0.5,0.5,0,0,0,0,0]
#Buliding encoder layers...
for i in range(nb_layer):
if i == 0:
encoder = convolution(g_inputs,encoder_filter[i],i+1)
else:
encoder = convolution(encoder,encoder_filter[i],i+1)
Encoder.append(encoder)
#Middle layer...
middle = convolution(Encoder[-1],512,8)
#Buliding decoder layers...
for j in range(nb_layer):
if j == 0:
decoder = deconvolution(middle,decoder_filter[j],j+1,dropout)
else:
decoder = merge([decoder,Encoder[nb_layer-j]],mode='concat',concat_axis=-1)
decoder = deconvolution(decoder,decoder_filter[j],j+1,dropout)
#Generate original size's pics
g_output = merge([decoder,Encoder[0]],mode='concat',concat_axis=-1)
g_output = deconvolution(g_output,3,8,dropout)
model = Model(g_inputs,g_output)
return model
'''
g_inputs = Input(shape=(img_cols,img_rows,3))
encoder_filter = [64,128,256,512,512,512,512]
Encoder = []
#Number of encoder or decoder (same)
nb_layer = len(encoder_filter)
#reverse from encoder
decoder_filter = encoder_filter[::-1]
dropout = [0.5,0.5,0.5,0,0,0,0,0]
#Buliding encoder layers...
for i in range(nb_layer):
if i == 0:
encoder = convolution(g_inputs,encoder_filter[i],i+1)
else:
encoder = convolution(encoder,encoder_filter[i],i+1)
Encoder.append(encoder)
#Middle layer...
middle = convolution(Encoder[-1],512,8)
#Buliding decoder layers...
for j in range(nb_layer):
if j == 0:
decoder = deconvolution(middle,decoder_filter[j],j+1,dropout)
else:
decoder = merge([decoder,Encoder[nb_layer-j]],mode='concat',concat_axis=-1)
decoder = deconvolution(decoder,decoder_filter[j],j+1,dropout)
#Generate original size's pics
g_output = merge([decoder,Encoder[0]],mode='concat',concat_axis=-1)
g_output = deconvolution(g_output,3,8,dropout)
model = Model(g_inputs,g_output)
#model.compile(loss='binary_crossentropy',optimizer='Adam')
return model
def discriminator_model():
""" return a (b, 1) logits"""
'''
model = Sequential()
model.add(Convolution2D(64, 4, 4,border_mode='same',input_shape=(img_cols, img_rows,IN_CH*2)))
model.add(BatchNormalization(mode=2))
model.add(Activation('tanh'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(128, 4, 4,border_mode='same'))
model.add(BatchNormalization(mode=2))
model.add(Activation('tanh'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(512, 4, 4,border_mode='same'))
model.add(BatchNormalization(mode=2))
model.add(Activation('tanh'))
model.add(Convolution2D(1, 4, 4,border_mode='same'))
model.add(BatchNormalization(mode=2))
model.add(Activation('tanh'))
model.add(Activation('sigmoid'))
'''
inputs = Input(shape=(img_cols,img_rows,IN_CH*2))
d = ZeroPadding2D(padding=(1,1))(inputs)
d = Convolution2D(64,4,4,subsample=(2,2))(d)
d = LeakyReLU(alpha=0.2)(d)
d = ZeroPadding2D(padding=(1,1))(d)
d = Convolution2D(128,4,4,subsample=(2,2))(d)
d = LeakyReLU(alpha=0.2)(d)
d = ZeroPadding2D(padding=(1,1))(d)
d = Convolution2D(256,4,4,subsample=(2,2))(d)
d = LeakyReLU(alpha=0.2)(d)
d = ZeroPadding2D(padding=(1,1))(d)
d = Convolution2D(512,4,4,subsample=(1,1))(d)
d = LeakyReLU(alpha=0.2)(d)
d = ZeroPadding2D(padding=(1,1))(d)
d = Convolution2D(1,4,4,subsample=(1,1),activation='sigmoid')(d)
model = Model(inputs,d)
return model
def generator_containing_discriminator(generator, discriminator):
inputs = Input((img_cols, img_rows,IN_CH))
x_generator = generator(inputs)
merged = merge([inputs, x_generator], mode='concat',concat_axis=-1)
discriminator.trainable = False
x_discriminator = discriminator(merged)
model = Model(inputs,[x_generator,x_discriminator])
return model
def generate_pic(generator,target,e):
pic = generator.predict(target)
pic = np.squeeze(pic,axis=0)
target = np.squeeze(target,axis=0)
im.imsave('target_%d.png' % e,target)
im.imsave('pic_%d.png' % e,pic)
np.mean
def discriminator_on_generator_loss(y_true,y_pred):
return K.mean(K.binary_crossentropy(y_pred,y_true), axis=(1,2,3))
def generator_l1_loss(y_true,y_pred):
return K.mean(K.abs(y_pred - y_true),axis=(1,2,3))
def train(epochs,batchsize):
pic = np.load('/data4T1/liyh/data/SketchFace/CUHK/npy/s2f_A.npy')
target = np.load('/data4T1/liyh/data/SketchFace/CUHK/npy/s2f_B.npy')
target = np.stack((target,target,target),axis=3)
print(pic.shape)
print(target.shape)
pic = pic.astype('float32')
target = target.astype('float32')
pic = (pic - 127.5) / 127.5
target = (target - 127.5) / 127.5
batchCount = pic.shape[0] / batchsize
print 'Epochs',epochs
print 'Bathc_size',batchsize
print 'Batches per epoch',batchCount
generator = generator_model()
discriminator = discriminator_model()
gan = generator_containing_discriminator(generator,discriminator)
generator.compile(loss=generator_l1_loss, optimizer='RMSprop')
gan.compile(loss=[generator_l1_loss,discriminator_on_generator_loss] , optimizer='RMSprop')
discriminator.trainable = True
discriminator.compile(loss=discriminator_on_generator_loss, optimizer='RMSprop')
G_loss = []
D_loss = []
for e in xrange(1,epochs+1):
print '-'*15 , 'Epoch %d' % e , '-'*15
for _ in tqdm(xrange(batchCount)):
random_number = np.random.randint(1,pic.shape[0],size=batchsize)
batch_pic = pic[random_number]
batch_target = target[random_number]
batch_target2 = np.tile(batch_target,(2,1,1,1))
y_dis = np.zeros((2*batchsize,30,30,1))
y_dis[:batchsize] = 1.0
generated_pic = generator.predict(batch_target)
#Default is concat first dimention
concat_pic = np.concatenate((batch_pic,generated_pic))
dis_input = np.concatenate((concat_pic,batch_target2),axis=-1)
dloss = discriminator.train_on_batch(dis_input,y_dis)
random_number = np.random.randint(1,pic.shape[0],size=batchsize)
train_target = target[random_number]
batch_pic = pic[random_number]
y_gener = np.ones((batchsize,30,30,1))
discriminator.trainable = False
gloss = gan.train_on_batch(train_target,[batch_pic,y_gener])
discriminator.trainable = True
G_loss.append(gloss)
D_loss.append(dloss)
if e % 50 == 0 or e == 1:
generate_pic(generator,target[0:1],e)
generator.save('Model_para/pix2pix_g_epoch_%d.h5' % e)
discriminator.save('Model_para/pix2pix_d_epoch_%d.h5' % e)
gan.save('Model_para/pix2pix_gan_epoch_%d.h5' % e)
D_loss = np.array(D_loss)
G_loss = np.array(G_loss)
np.save('Model_para/dloss.npy',D_loss)
np.save('Model_para/gloss.npy',G_loss)
if __name__ == '__main__':
train(200,12)
g = generator_model()
d = discriminator_model()
gan = generator_containing_discriminator(g,d)
g.load_weights('Model_para/pix2pix_g_epoch_200.h5')
d.load_weights('Model_para/pix2pix_d_epoch_200.h5')
gan.load_weights('Model_para/pix2pix_gan_epoch_200.h5')
|
###############################################################################
# @file pyVerifGUI/gui/editor/editor.py
# @package pyVerifGUI.gui.editor.editor
# @author <NAME>
# @copyright Copyright (c) 2020. Eidetic Communications Inc.
# All rights reserved
# @license Licensed under the BSD 3-Clause license.
# This license message must appear in all versions of this code including
# modified versions.
#
# @brief Definitions for basic editor with varying types of tabs
##############################################################################
from qtpy import QtWidgets, QtGui, QtCore
import os
from .highlighting import Highlighter
class Editor(QtWidgets.QWidget):
"""Widget for editer. Meant to be implemented as a tab under a QTabWidget"""
def __init__(self, parent, config):
super().__init__(parent)
self.config = config
self.old_filename = ""
self.layout = QtWidgets.QVBoxLayout(self)
self.tab_widget = QtWidgets.QTabWidget(self)
self.tab_widget.setTabsClosable(True)
self.tab_widget.tabCloseRequested.connect(self.closeTab)
self.tabs = []
self.layout.addWidget(self.tab_widget)
# Optional view only tab
self.view_only_tab = None
def viewFile(self, filename: str, line_num: int = -1):
"""Opens the file for viewing in a permanent "viewing" tab"""
if self.view_only_tab is None:
self.tabs.insert(0, ViewFileTab(self))
self.tab_widget.insertTab(0, self.tabs[0], "File Viewer")
self.view_only_tab = True
self.tabs[0].openFile(filename, line_num)
def loadFile(self, filename: str, line_num: int = -1, always_new=True):
"""Opens a new editor tab with the given file.
Places a marker at the given line_num, if specified.
"""
# Don't open new file if one is loaded
if not always_new:
for tab in self.tabs:
if tab.filename == filename:
return
editor = EditorTab(self.tab_widget)
editor.openFile(filename, line_num)
self.tabs.append(editor)
self.tab_widget.addTab(editor, filename)
self.tab_widget.setCurrentIndex(len(self.tabs) - 1)
self.tab_widget.tabBar().setCurrentIndex(len(self.tabs) - 1)
def closeTab(self, index: int):
"""Attempts to close given tab
Only fails to close the tab when the user selects Cancel on the popup save prompt
"""
tab = self.tabs[index]
self.tab_widget.setCurrentIndex(index)
self.tab_widget.tabBar().setCurrentIndex(index)
if tab.close():
self.tab_widget.removeTab(index)
self.tabs.remove(tab)
return True
return False
class FileTab(QtWidgets.QWidget):
"""Base-class for other types of tabs"""
def __init__(self, parent):
super().__init__(parent)
self.layout = QtWidgets.QGridLayout(self)
self.setLayout(self.layout)
self.editor = CodeEditor(self)
self.editor.cursorPositionChanged.connect(self.printPosition)
font = QtGui.QFont("monospace")
font.setPixelSize(12)
self.editor.document().setDefaultFont(font)
self.button_widget = QtWidgets.QWidget(self)
self.button_layout = QtWidgets.QHBoxLayout(self.button_widget)
self.position_text = QtWidgets.QLabel(self)
self.file_text = QtWidgets.QLabel(self)
self.file_text.setAlignment(QtCore.Qt.AlignCenter)
self.button_layout.addWidget(self.position_text)
self.button_layout.addWidget(self.file_text)
self.button_layout.setStretch(1, 1)
self.layout.addWidget(self.editor)
self.layout.addWidget(self.button_widget)
self.filename = ""
self.file = None
def openFile(self, filename: str, cursor_position: int = -1):
"""Opens a file and loads it into the editor"""
if self.file:
self.file.close()
self.line = cursor_position
self.filename = filename
self.file = open(self.filename, "r+")
self.editor.setPlainText(self.file.read())
self.file_text.setText(filename)
suffix = filename.split('.')[-1]
self.highlighter = Highlighter(self.editor.document(), suffix)
# Only if we are explicitly given a cursor position do we add a marker
if cursor_position >= 0:
self.editor.highlightLine(cursor_position - 1)
self.editor.setCursorPosition(cursor_position - 1, 0)
self.editor.centerCursor()
def close(self):
"""Performs cleanup actions and returns whether or not it is okay to close this tab"""
self.file.close()
return True
def printPosition(self): # line: int, column: int):
"""Prints the current editor cursor position"""
cursor = self.editor.textCursor()
line = cursor.blockNumber()
column = cursor.columnNumber()
self.position_text.setText(f"Line: {line}, Column: {column}")
class ViewFileTab(FileTab):
"""Tab instance for viewing files"""
def __init__(self, parent):
super().__init__(parent)
# No editing!
self.editor.setReadOnly(True)
# Layout for open button in a convenient fashion
self.open_button = QtWidgets.QPushButton(self.button_widget)
self.open_button.setText("Open")
self.open_button.clicked.connect(self.open)
self.open_button.setEnabled(False)
self.button_layout.addStretch()
self.button_layout.addWidget(self.open_button)
def openFile(self, filename: str, line_number: int = -1):
"""Loads file"""
super().openFile(filename, line_number)
self.open_button.setEnabled(True)
def close(self):
"""We do not want to be able to close this tab"""
return False
def open(self):
"""Opens the current file in a new tab"""
self.parent().parent().parent().loadFile(self.filename, self.line)
class EditorTab(FileTab):
"""Tab instance for opening and editing a file"""
def __init__(self, parent):
super().__init__(parent)
# Layout to present save button in a nice fashion
self.save_button = QtWidgets.QPushButton(self.button_widget)
self.save_button.setText("Save")
self.save_button.clicked.connect(self.save)
self.save_button.setEnabled(False)
self.button_layout.addStretch()
self.button_layout.addWidget(self.save_button)
# Necessary to manage save prompt and button
self.editor.textChanged.connect(self.handleChanged)
def openFile(self, filename: str, line_number: int = -1):
"""Loads file into open window"""
super().openFile(filename, line_number)
self.save_button.setEnabled(False)
def handleChanged(self):
"""On file change, enable the save button"""
# XXX save button always ends up enabled, prompting when no change has been made
self.save_button.setEnabled(True)
def save(self):
"""Dumps contents of editor to file"""
self.file.seek(0)
self.file.write(self.editor.toPlainText())
self.file.truncate()
self.save_button.setEnabled(False)
def close(self) -> bool:
"""If we have not saved (as indicated by the save button status), prompt user
Returns whether it is valid to remove the tab or not
"""
if self.save_button.isEnabled():
prompt = SaveFileDialog()
ok = prompt.exec_()
# Necessary to check here because of "Discard" option
if prompt.save:
self.save()
else:
ok = True
self.file.close()
return ok
class SaveFileDialog(QtWidgets.QDialog):
"""Popup for when a file is closed but not saved"""
def __init__(self):
super().__init__()
self.save = False
self.setWindowTitle("Save File?")
button_options = (QtWidgets.QDialogButtonBox.Save
| QtWidgets.QDialogButtonBox.Cancel
| QtWidgets.QDialogButtonBox.Discard)
self.button_box = QtWidgets.QDialogButtonBox(button_options)
self.button_box.clicked.connect(self.onClick)
self.layout = QtWidgets.QHBoxLayout(self)
self.layout.addWidget(self.button_box)
def onClick(self, button: QtWidgets.QAbstractButton):
"""Necessary because there isn't an easy signal to attach to for Discard button"""
role = self.button_box.buttonRole(button)
if role == self.button_box.AcceptRole:
self.save = True
self.accept()
elif role == self.button_box.DestructiveRole:
self.accept()
elif role == self.button_box.RejectRole:
self.reject()
# CodeEditor to replace QScintill
# (pyside2 has no scintilla wrapper and distribution is a pain with QScintilla)
# "Borrows" from Qt Docs CodeEditor example
class CodeEditor(QtWidgets.QPlainTextEdit):
"""Code editor widget. Implements basic functionality only"""
def __init__(self, parent):
super().__init__(parent)
self.line_number_area = LineNumberArea(self)
self.blockCountChanged.connect(self.updateLineNumberAreaWidth)
self.updateRequest.connect(self.updateLineNumberArea)
self.updateLineNumberAreaWidth(0)
def lineNumberAreaPaintEvent(self, event: QtGui.QPaintEvent):
"""Draws line numbers"""
painter = QtGui.QPainter(self.line_number_area)
painter.fillRect(event.rect(), QtCore.Qt.lightGray)
# Get geometry
block = self.firstVisibleBlock()
block_num = block.blockNumber()
top = round(
self.blockBoundingGeometry(block).translated(
self.contentOffset()).top())
bottom = top + round(self.blockBoundingRect(block).height())
# Update per line
while block.isValid() and top <= event.rect().bottom():
if block.isVisible() and bottom >= event.rect().top():
number = str(block_num + 1)
painter.setPen(QtCore.Qt.black)
painter.drawText(0, top, self.line_number_area.width(),
self.fontMetrics().height(),
QtCore.Qt.AlignRight, number)
block = block.next()
top = bottom
bottom = top + round(self.blockBoundingRect(block).height())
block_num += 1
def lineNumberAreaWidth(self) -> int:
"""Specifies the width of the line number area"""
# find the correct power of 10
digits = 1
max_ = max([1, self.blockCount()])
while (max_ >= 10):
max_ /= 10
digits += 1
return 3 + self.fontMetrics().horizontalAdvance("9") * digits
def resizeEvent(self, event: QtGui.QResizeEvent):
"""Overrides resize event"""
super(QtWidgets.QPlainTextEdit, self).resizeEvent(event)
cr = self.contentsRect()
self.line_number_area.setGeometry(
QtCore.QRect(cr.left(), cr.top(), self.lineNumberAreaWidth(),
cr.height()))
def updateLineNumberAreaWidth(self, newBlockCount: int):
"""Updates line number area based on line count"""
del newBlockCount
self.setViewportMargins(self.lineNumberAreaWidth(), 0, 0, 0)
def highlightLine(self, line: int):
"""Highlights a given line"""
line_colour = QtGui.QColor(QtCore.Qt.yellow).lighter(160)
selection = QtWidgets.QTextEdit.ExtraSelection()
selection.format.setBackground(line_colour)
selection.format.setProperty(QtGui.QTextFormat.FullWidthSelection,
True)
selection.cursor = QtGui.QTextCursor(
self.document().findBlockByLineNumber(line))
self.setExtraSelections([selection])
def updateLineNumberArea(self, rect: QtCore.QRect, dy: int):
"""Updates size of the line number area"""
if dy != 0:
self.line_number_area.scroll(0, dy)
else:
self.line_number_area.scroll(0, rect.y(), rect)
if rect.contains(self.viewport().rect()):
self.updateLineNumberAreaWidth(0)
def setCursorPosition(self, row: int, column: int):
"""Sets cursor position in document"""
cursor = QtGui.QTextCursor(self.document().findBlockByLineNumber(row))
cursor.setPosition(cursor.position() + column)
self.setTextCursor(cursor)
class LineNumberArea(QtWidgets.QWidget):
"""Widget to show line numbers in custom editor widget,
Doesn't contain any business logic itself
"""
def __init__(self, editor):
super().__init__(editor)
self.editor = editor
def paintEvent(self, event: QtGui.QPaintEvent):
"""Uses the editor to draw the line numbers"""
self.editor.lineNumberAreaPaintEvent(event)
|
<gh_stars>10-100
import ipaddress
from functools import wraps
from selvpcclient.exceptions.base import ClientException
def _check_project_exists(client, project_id):
try:
client.projects.show(project_id)
except ClientException:
return False
return True
def _check_user_exists(client, user_id):
for user in client.users.list():
if user.id == user_id:
return True
return False
def _check_user_role(client, project_id, user_id):
roles = client.roles.get_project_roles(project_id)
for role in roles:
if role.user_id == user_id:
return True
return False
def _check_quotas_changes(client, after_quotas, project_id):
before_quotas = client.quotas.get_project_quotas(project_id)
before_quotas_json = before_quotas._info
for key in after_quotas:
for quota in after_quotas[key]:
item = [
item for item in before_quotas_json[key]
if (item["region"] == quota["region"] and
item["zone"] == quota["zone"] and
item["value"] == quota["value"])
]
if not item:
return True
return False
def _check_project_roles(client, roles):
to_add = []
try:
for role in roles:
if role not in [
r._info for r in
client.roles.get_project_roles(role["project_id"])
]:
to_add.append(role)
except ClientException:
raise ClientException(message="No such project")
return to_add
def _check_valid_quantity(objects):
for obj in objects:
if obj["quantity"] < 0:
return False
return True
def _check_valid_ip(floatingip):
# Python 3 compatibility hack
try:
unicode('')
except NameError:
unicode = str
try:
ipaddress.ip_address(unicode(floatingip))
except Exception:
return False
return True
def generate_result_msg(msg):
default = "Desirable state already in project"
return " ".join(msg).capitalize() if msg else default
def get_project_by_name(client, project_name):
projects = client.projects.list()
for project in projects:
if project.name == project_name:
return project
def get_user_by_name(client, username):
users = client.users.list()
for user in users:
if user.name == username:
return user
def get_floatingip_by_ip(client, floatingip):
for fip in client.floatingips.list():
if fip.floating_ip_address == floatingip:
return fip
def compare_existed_and_needed_objects(before, after, force):
"""
Compares two dicts
:param boolean force: param for deleting "ACTIVE" status objects
(if needed)
:param dict before: objects that we have in project
:param dict after: objects that need to create
:return: objects that need to create and dict with quantity objects that
have to be deleted
:rtype: tuple(dict, dict)
"""
possible_task = True
to_create = dict((key, before.get(key))
for key in before if key in after)
to_delete = {}
for n_key in after:
if possible_task:
if n_key not in before:
to_create.update({n_key: after.get(n_key)})
else:
active = before.get(n_key)["ACTIVE"]
down = before.get(n_key)["DOWN"]
before_quantity = active + down
after_quantity = after.get(n_key)
if after_quantity == before_quantity:
to_create.pop(n_key)
elif after_quantity < before_quantity:
to_create.pop(n_key)
if not force:
if (down >= after_quantity - active and
after_quantity >= active):
to_delete.update(
{n_key: down - (after_quantity - active)})
else:
possible_task = False
else:
to_delete.update(
{n_key: before_quantity - after_quantity})
else:
to_create[n_key] = after_quantity - before_quantity
if possible_task:
return to_create, to_delete
return {}, {}
def check_project_id(func):
"""
Decorator checks 'project_id' param and if it's None than tries to find
specific project by 'project_name'. If it's not found than raises
an exception.
:param func: function
:return: decorated func
"""
@wraps(func)
def inner(*args, **kwargs):
module, cli, project_id, project_name = args[:4]
show_list = kwargs.get("show_list")
if show_list:
return func(module, cli, project_id, project_name, *args[4:],
show_list=show_list)
if not project_id:
project = get_project_by_name(cli, project_name)
try:
if not project:
raise ClientException(message="No such project")
project_id = project.id
except ClientException as exp:
module.fail_json(msg=str(exp))
return func(module, cli, project_id, project_name, *args[4:])
return inner
def make_plural(word):
if not word.endswith('s'):
return word + 's'
return word
def clear_quotas(quotas):
to_clear = {"quotas": {}}
for item in quotas:
to_clear["quotas"].update(
{
item["resource"]: {"region": item["region"], "value": 0}
}
)
return to_clear
def abort_partial_response_task(module, client, resp, project_id=None,
is_quotas=False):
"""Delete all created objects and generate message."""
if not is_quotas:
for obj in resp:
obj.delete()
else:
client.quotas.update(project_id, clear_quotas(resp.resources))
res_json = {
"error": "207 Multi-status",
"details": resp.get_fail_info()
}
module.fail_json(result=res_json, changed=False, msg="Task aborted")
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ['from_file', 'image_grid', 'nchw', 'nhwc', 'normalize_to_uint8', 'normalize_to_unit_float', 'to_png']
import io
from typing import Union, BinaryIO, IO
import jax.numpy as jn
import numpy as np
from PIL import Image
from objax.typing import JaxArray
def from_file(file: Union[str, IO[BinaryIO]]) -> np.ndarray:
"""Read an image from a file, convert it RGB and return it as an array.
Args:
file: filename or python file handle of the input file.
Return:
3D numpy array (C, H, W) normalized with normalize_to_unit_float.
"""
image = np.asarray(Image.open(file).convert('RGB'))
return normalize_to_unit_float(image.transpose((2, 0, 1)))
def image_grid(image: np.ndarray) -> np.ndarray:
"""Rearrange array of images (nh, hw, c, h, w) into image grid in a single image (c, nh * h, nh * w)."""
s = image.shape
return image.transpose([2, 0, 3, 1, 4]).reshape([s[2], s[3] * s[0], s[4] * s[1]])
def nchw(x: Union[np.ndarray, JaxArray]) -> Union[np.ndarray, JaxArray]:
"""Converts an array in (N,H,W,C) format to (N,C,H,W) format."""
dims = list(range(x.ndim))
dims.insert(-2, dims.pop())
return x.transpose(dims)
def nhwc(x: Union[np.ndarray, JaxArray]) -> Union[np.ndarray, JaxArray]:
"""Converts an array in (N,C,H,W) format to (N,H,W,C) format."""
dims = list(range(x.ndim))
dims.append(dims.pop(-3))
return x.transpose(dims)
def normalize_to_uint8(x: Union[np.ndarray, JaxArray]) -> Union[np.ndarray, JaxArray]:
"""Map a float image in [1/256-1, 1-1/256] to uint8 {0, 1, ..., 255}."""
return (128 * (x + (1 - 1 / 256))).clip(0, 255).round().astype('uint8')
def normalize_to_unit_float(x: Union[np.ndarray, JaxArray]) -> Union[np.ndarray, JaxArray]:
"""Map an uint8 image in {0, 1, ..., 255} to float interval [1/256-1, 1-1/256]."""
return x * (1 / 128) + (1 / 256 - 1)
def to_png(x: Union[np.ndarray, JaxArray]) -> bytes:
"""Converts numpy array in (C,H,W) format into PNG format."""
if isinstance(x, jn.ndarray):
x = np.array(x)
if x.dtype in (np.float64, np.float32, np.float16):
x = np.transpose(normalize_to_uint8(x), (1, 2, 0))
elif x.dtype != np.uint8:
raise ValueError('Unsupported array type, expecting float or uint8', x.dtype)
if x.shape[2] == 1:
x = np.broadcast_to(x, x.shape[:2] + (3,))
with io.BytesIO() as f:
Image.fromarray(x).save(f, 'png')
return f.getvalue()
|
from datetime import datetime, timedelta
from math import ceil
from typing import List
from .task_rule_definition import MONTH_DAYS, MONTH_DAYS_LEAP, FrequencyEnum
class DatetimeManager:
@staticmethod
def is_time_between(
start_time: datetime, end_time: datetime, current_time: datetime
) -> bool:
return start_time <= current_time <= end_time
@staticmethod
def calculate_next_time(current_time, delta) -> datetime:
return current_time + delta
@staticmethod
def is_leap_year(year: int) -> int:
return (year % 4 == 0 and year % 100 != 0) or year % 400 == 0
@staticmethod
def get_sum_of_month_days(frequency: int, current_datetime: datetime) -> int:
current_month: int = current_datetime.month
total_days = 0
month_list = DatetimeManager.get_list_of_month_days(current_datetime)
if frequency == 1:
return month_list[current_datetime.month]
for month in range(current_month, current_month + frequency):
total_days += month_list[month]
return total_days
@staticmethod
# Check the number of days of a month including leap years
def get_month_days_number(current_datetime: datetime):
month_list = DatetimeManager.get_list_of_month_days(current_datetime)
return month_list[current_datetime.month]
@staticmethod
def get_list_of_month_days(current_datetime: datetime) -> List[int]:
year = current_datetime.year
if DatetimeManager.is_leap_year(year):
return MONTH_DAYS_LEAP
return MONTH_DAYS
@staticmethod
def week_of_month(dt):
first_day = dt.replace(day=1)
dom = dt.day
if first_day.weekday() == 6:
adjusted_dom = dom
else:
adjusted_dom = dom + first_day.weekday()
if adjusted_dom % 7 == 0 and first_day.weekday() != 6:
value = adjusted_dom / 7.0 + 1
elif first_day.weekday() == 6 and adjusted_dom % 7 == 0 and adjusted_dom == 7:
value = 1
else:
value = int(ceil(adjusted_dom / 7.0))
return int(value)
@staticmethod
def is_next_month(current: datetime, new_date: datetime, frequency: int) -> bool:
current_month: int = current.month
new_month: int = new_date.month
return (current_month + frequency) == new_month
@staticmethod
def is_same_week(current: datetime, new_date: datetime) -> bool:
current_week = DatetimeManager.week_of_month(current)
new_week = DatetimeManager.week_of_month(new_date)
return current_week == new_week
@staticmethod
def get_delta_days(current: datetime, frequency: int) -> int:
# Months can have 4 to 6 week but not less than 4
# 4 weeks = 28 days
basic_days = 28 * frequency
new_date = current + timedelta(days=basic_days)
def is_next_month() -> bool:
return DatetimeManager.is_next_month(current, new_date, frequency)
def is_same_week() -> bool:
return DatetimeManager.is_same_week(current, new_date)
while not is_next_month() and not is_same_week():
new_date = new_date + timedelta(days=7)
# get the days of difference between current and new date
days = (new_date - current).days
return days
@staticmethod
def get_timedelta(delta_type, frequency, current_datetime=None):
if delta_type == FrequencyEnum.MINUTELY:
return timedelta(minutes=frequency)
if delta_type == FrequencyEnum.HOURLY:
return timedelta(hours=frequency)
if delta_type == FrequencyEnum.DAILY:
return timedelta(days=frequency)
if delta_type == FrequencyEnum.WEEKLY:
return timedelta(days=frequency * 7)
# Monthly on weekdays
if delta_type == FrequencyEnum.MONTHLY:
if current_datetime is None:
raise ValueError("Month can't be None")
return timedelta(
days=DatetimeManager.get_delta_days(current_datetime, frequency)
)
if delta_type == FrequencyEnum.CUSTOM:
if current_datetime is None:
raise ValueError("Month can't be None")
return timedelta(
days=DatetimeManager.get_delta_days(current_datetime, frequency)
)
return None
|
# Repository: https://gitlab.com/quantify-os/quantify-core
# Licensed according to the LICENCE file on the master branch
"""Module containing the pyqtgraph based plotting monitor."""
import warnings
import pyqtgraph.multiprocess as pgmp
from qcodes import validators as vals
from qcodes.instrument.base import Instrument
from qcodes.instrument.parameter import Parameter
from qcodes.utils.helpers import strip_attrs
from quantify_core.data.handling import get_datadir
from quantify_core.measurement.control import _DATASET_LOCKS_DIR
class PlotMonitor_pyqt(Instrument):
"""
Pyqtgraph based plot monitor instrument.
A plot monitor is intended to provide a real-time visualization of a dataset.
The interaction with this virtual instrument are virtually instantaneous.
All the heavier computations and plotting happens in a separate QtProcess.
"""
def __init__(self, name: str):
"""
Creates an instance of the Measurement Control.
Parameters
----------
name
Name of this instrument instance
"""
super().__init__(name=name)
# pyqtgraph multiprocessing
# We setup a remote process which creates a queue to which
# "commands" will be sent
self.proc = pgmp.QtProcess(processRequests=False)
# quantify_core module(s) in the remote process
timeout = 60
self.remote_quantify = self.proc._import("quantify_core", timeout=timeout)
self.remote_ppr = self.proc._import(
"quantify_core.visualization.pyqt_plotmon_remote", timeout=timeout
)
# the interface to the remote object
self.remote_plotmon = self.remote_ppr.RemotePlotmon(
instr_name=self.name, dataset_locks_dir=_DATASET_LOCKS_DIR
)
self.add_parameter(
name="tuids_max_num",
docstring=(
"The maximum number of auto-accumulated datasets in "
"`.tuids()`.\n"
"Older dataset are discarded when `.tuids_append()` is "
"called [directly or from `.update(tuid)`]"
),
parameter_class=Parameter,
vals=vals.Ints(min_value=1, max_value=100),
set_cmd=self._set_tuids_max_num,
get_cmd=self._get_tuids_max_num,
# avoid set_cmd being called at __init__
initial_cache_value=3,
)
self.add_parameter(
name="tuids",
docstring=(
"The tuids of the auto-accumulated previous datasets when "
"specified through `.tuids_append()`.\n"
"Can also be set to any list `['tuid_one', 'tuid_two', ...]`\n"
"Can be reset by setting to `[]`\n"
"See also `tuids_extra`."
),
parameter_class=Parameter,
get_cmd=self._get_tuids,
set_cmd=self._set_tuids,
# avoid set_cmd being called at __init__
initial_cache_value=[],
)
self.add_parameter(
name="tuids_extra",
docstring=(
"Extra tuids whose datasets are never affected by "
"`.tuids_append()` or `.tuids_max_num()`.\n"
"As opposed to the `.tuids()`, these ones never vanish.\n"
"Can be reset by setting to `[]`.\n"
"Intended to perform realtime measurements and have a "
"live comparison with previously measured datasets."
),
parameter_class=Parameter,
vals=vals.Lists(),
set_cmd=self._set_tuids_extra,
get_cmd=self._get_tuids_extra,
# avoid set_cmd being called at __init__
initial_cache_value=[],
)
# Jupyter notebook support
self.main_QtPlot = QtPlotObjForJupyter(self.remote_plotmon, "main_QtPlot")
self.secondary_QtPlot = QtPlotObjForJupyter(
self.remote_plotmon, "secondary_QtPlot"
)
# Wrappers for the remote methods
# We just put "commands" on a queue that will be consumed by the
# remote_plotmon
# the commands are just a tuple:
# (
# <str: attr to be called in the remote process>,
# <tuple: a tuple with the arguments passed to the attr>
# )
# see `remote_plotmon._exec_queue`
# For consistency we mirror the label of all methods and set_cmd/get_cmd's
# with the remote_plotmon
# NB: before implementing the queue, _callSync="off" could be used
# to avoid waiting for a return
# e.g. self.remote_plotmon.update(tuid, _callSync="off")
def create_plot_monitor(self):
"""
Creates the PyQtGraph plotting monitors.
Can also be used to recreate these when plotting has crashed.
"""
self.remote_plotmon.queue.put(("create_plot_monitor", tuple()))
# Without queue it will be:
# self.remote_plotmon.create_plot_monitor()
def update(self, tuid: str = None):
"""
Updates the curves/heatmaps of a specific dataset.
If the dataset is not specified the latest dataset in `.tuids()` is used.
If `.tuids()` is empty and `tuid` is provided
then `.tuids_append(tuid)` will be called.
NB: this is intended mainly for MC to avoid issues when the file
was not yet created or is empty.
"""
try:
self.remote_plotmon.queue.put(("update", (tuid, get_datadir())))
except Exception as e:
warnings.warn(f"At update encountered: {e}", Warning)
def tuids_append(self, tuid: str = None):
"""
Appends a tuid to `.tuids()` and also discards older datasets
according to `.tuids_max_num()`.
The the corresponding data will be plotted in the main window
with blue circles.
NB: do not call before the corresponding dataset file was created and filled
with data
"""
self.remote_plotmon.queue.put(("tuids_append", (tuid, get_datadir())))
def _set_tuids_max_num(self, val):
self.remote_plotmon.queue.put(("_set_tuids_max_num", (val,)))
def _set_tuids(self, tuids: list):
self.remote_plotmon.queue.put(("_set_tuids", (tuids, get_datadir())))
def _set_tuids_extra(self, tuids: list):
self.remote_plotmon.queue.put(("_set_tuids_extra", (tuids, get_datadir())))
# Blocking calls
# For this ones we wait to get the return
def _get_tuids_max_num(self):
# wait to finish the queue
self.remote_plotmon._exec_queue()
return self.remote_plotmon._get_tuids_max_num()
def _get_tuids(self):
# wait to finish the queue
self.remote_plotmon._exec_queue()
return self.remote_plotmon._get_tuids()
def _get_tuids_extra(self):
# wait to finish the queue
self.remote_plotmon._exec_queue()
return self.remote_plotmon._get_tuids_extra()
# Workaround for test due to pickling issues of certain objects
def _get_curves_config(self):
# wait to finish the queue
self.remote_plotmon._exec_queue()
return self.remote_plotmon._get_curves_config()
def _get_traces_config(self, which="main_QtPlot"):
# wait to finish the queue
self.remote_plotmon._exec_queue()
return self.remote_plotmon._get_traces_config(which)
def close(self) -> None:
"""
(Modified from Instrument class)
Irreversibly stop this instrument and free its resources.
Subclasses should override this if they have other specific
resources to close.
"""
if hasattr(self, "connection") and hasattr(self.connection, "close"):
self.connection.close()
# Essential!!! Close the process
self.proc.join()
strip_attrs(self, whitelist=["_name"])
self.remove_instance(self)
def setGeometry_main(self, x: int, y: int, w: int, h: int):
"""Set the geometry of the main plotmon
Parameters
----------
x
Horizontal position of the top-left corner of the window
y
Vertical position of the top-left corner of the window
w
Width of the window
h
Height of the window
"""
# wait to finish the queue
self.remote_plotmon._exec_queue()
self.remote_plotmon._set_qt_plot_geometry(x, y, w, h, which="main_QtPlot")
def setGeometry_secondary(self, x: int, y: int, w: int, h: int):
"""Set the geometry of the secondary plotmon
Parameters
----------
x
Horizontal position of the top-left corner of the window
y
Vertical position of the top-left corner of the window
w
Width of the window
h
Height of the window
"""
# wait to finish the queue
self.remote_plotmon._exec_queue()
self.remote_plotmon._set_qt_plot_geometry(x, y, w, h, which="secondary_QtPlot")
class QtPlotObjForJupyter:
"""
A wrapper to be able to display a QtPlot window in Jupyter notebooks
"""
def __init__(self, remote_plotmon, attr_name):
# Save reference of the remote object
self.remote_plotmon = remote_plotmon
self.attr_name = attr_name
def _repr_png_(self):
# wait to finish the queue
self.remote_plotmon._exec_queue()
# always get the remote object, avoid keeping object references
return getattr(self.remote_plotmon, self.attr_name)._repr_png_()
|
def get_overflow(sample: dict, obj: dict, all_sub=False) -> list:
"""Returns a list of all fields which exist in obj, but not in sample."""
fields = []
if hasattr(obj, "__iter__"):
for field in obj:
if field not in sample:
fields.append(field)
else:
if all_sub and type(obj[field]) == dict:
fs = get_overflow(sample[field]["embedded_dict"], obj[field], all_sub)
fields.extend(fs)
return fields
else:
return []
def check(sample: dict, obj: dict, parent=None, allow_overflow=False) -> (bool, list):
"""Performs type checking on obj against sample. Returns True or False if obj fits the sample. If obj does not fit, then return an array of errors."""
# Check if there are other fields in obj, than in sample
errors = []
if not sample:
errors.append(f"ERROR: Sampled object cannot be undefined.")
if not obj:
errors.append(f"ERROR: Supplied object cannot be undefined.")
if not sample or not obj:
return False, errors
if parent:
parent_key = parent + "."
else:
parent_key = ""
overflows = get_overflow(sample, obj)
if not allow_overflow and len(overflows) > 0:
if len(overflows) == 1:
errors.append(f"ERROR: Key '{parent_key + overflows[0]}' is not present in sample, but is present in supplied object.")
else:
errors.append(f"ERROR: Keys {[parent_key + o for o in overflows]} are not present in sample, but are present in supplied object.")
for key in sample:
if parent:
parent_key = parent + "." + key
else:
parent_key = key
key_req = sample[key]["required"]
# Key is required and is absent.
if key_req and key not in obj:
errors.append(f"ERROR: Key '{parent_key}' is required, but was absent in supplied object.")
continue
elif key in obj: # Key is present, required or not.
# Type for this key in obj is not in allowed types
if type(obj[key]) not in sample[key]["allowed_types"]:
errors.append(f"ERROR: On key '{parent_key}'', expected one of {[t.__name__ for t in sample[key]['allowed_types']]}, got {type(obj[key]).__name__}")
continue
else: # Type for this key in obj is in allowed types
# If the obj-type is dict (implied from above if-statement, it is allowed), then we try to resursively check.
if type(obj[key]) == dict:
embedded = sample[key]["embedded_dict"]
succ, err = check(embedded, obj[key], parent_key, allow_overflow)
if not succ:
errors.extend(err)
continue
elif type(obj[key]) == list:
# Check all list elements somehow
l_ele = sample[key]["list_element"]
for i, ele in enumerate(obj[key]):
if type(ele) not in l_ele["allowed_types"]:
errors.append(f"ERROR: On key '{parent_key}[{i}]', expected one of {[t.__name__ for t in l_ele['allowed_types']]}, got {type(ele).__name__}")
# ele has to match l_ele.
if type(ele) == dict:
succ, err = check(l_ele["embedded_dict"], ele, parent_key + f"[{i}]", allow_overflow)
if not succ:
errors.extend(err)
elif type(obj[key]) == tuple:
order = sample[key]["tuple_order"]
if len(order) != len(obj[key]):
errors.append(f"ERROR: On key '{parent_key}', expected tuple of length {len(order)}, got tuple of length {len(obj[key])}.")
continue
for i in range(len(order)):
if type(obj[key][i]) != order[i]:
errors.append(f"ERROR: On key '{parent_key}', expected tuple with order ({','.join([t.__name__ for t in order])}), got tuple with order ({','.join([type(t).__name__ for t in obj[key]])}).")
break
for i in range(len(order)):
if type(obj[key][i]) == dict:
succ, err = check(sample[key]["embedded_dict"], obj[key][i], parent_key + f"[{i}]", allow_overflow)
if not succ:
errors.extend(err)
if type(obj[key][i]) == list:
# Check all list elements somehow
l_ele = sample[key]["list_element"]
for i, ele in enumerate(obj[key][i]):
if type(ele) not in l_ele["allowed_types"]:
errors.append(f"ERROR: On key '{parent_key}[{i}]', expected one of {[t.__name__ for t in l_ele['allowed_types']]}, got {type(ele).__name__}.")
# ele has to match l_ele.
if type(ele) == dict:
succ, err = check(l_ele["embedded_dict"], ele, parent_key + f"[{i}]", allow_overflow)
if not succ:
errors.extend(err)
return len(errors) == 0, errors
|
#
# Utility classes for PyBaMM
#
# The code in this file is adapted from Pints
# (see https://github.com/pints-team/pints)
#
import importlib
import numpy as np
import os
import sys
import timeit
import pathlib
import pickle
import pybamm
import numbers
from collections import defaultdict
def root_dir():
""" return the root directory of the PyBaMM install directory """
return str(pathlib.Path(pybamm.__path__[0]).parent)
class FuzzyDict(dict):
def levenshtein_ratio(self, s, t):
"""
Calculates levenshtein distance between two strings s and t.
Uses the formula from
https://www.datacamp.com/community/tutorials/fuzzy-string-python
"""
# Initialize matrix of zeros
rows = len(s) + 1
cols = len(t) + 1
distance = np.zeros((rows, cols), dtype=int)
# Populate matrix of zeros with the indices of each character of both strings
for i in range(1, rows):
for k in range(1, cols):
distance[i][0] = i
distance[0][k] = k
# Iterate over the matrix to compute the cost of deletions, insertions and/or
# substitutions
for col in range(1, cols):
for row in range(1, rows):
if s[row - 1] == t[col - 1]:
# If the characters are the same in the two strings in a given
# position [i,j] then the cost is 0
cost = 0
else:
# In order to align the results with those of the Python Levenshtein
# package, the cost of a substitution is 2.
cost = 2
distance[row][col] = min(
distance[row - 1][col] + 1, # Cost of deletions
distance[row][col - 1] + 1, # Cost of insertions
distance[row - 1][col - 1] + cost, # Cost of substitutions
)
# Computation of the Levenshtein Distance Ratio
ratio = ((len(s) + len(t)) - distance[row][col]) / (len(s) + len(t))
return ratio
def get_best_matches(self, key):
"""Get best matches from keys"""
key = key.lower()
best_three = []
lowest_score = 0
for k in self.keys():
score = self.levenshtein_ratio(k.lower(), key)
# Start filling out the list
if len(best_three) < 3:
best_three.append((k, score))
# Sort once the list has three elements, using scores
if len(best_three) == 3:
best_three.sort(key=lambda x: x[1], reverse=True)
lowest_score = best_three[-1][1]
# Once list is full, start checking new entries
else:
if score > lowest_score:
# Replace last element with new entry
best_three[-1] = (k, score)
# Sort and update lowest score
best_three.sort(key=lambda x: x[1], reverse=True)
lowest_score = best_three[-1][1]
return [x[0] for x in best_three]
def __getitem__(self, key):
try:
return super().__getitem__(key)
except KeyError:
best_matches = self.get_best_matches(key)
raise KeyError(f"'{key}' not found. Best matches are {best_matches}")
def search(self, key, print_values=False):
"""
Search dictionary for keys containing 'key'. If print_values is True, then
both the keys and values will be printed. Otherwise just the values will
be printed. If no results are found, the best matches are printed.
"""
key = key.lower()
# Sort the keys so results are stored in alphabetical order
keys = list(self.keys())
keys.sort()
results = {}
# Check if any of the dict keys contain the key we are searching for
for k in keys:
if key in k.lower():
results[k] = self[k]
if results == {}:
# If no results, return best matches
best_matches = self.get_best_matches(key)
print(
f"No results for search using '{key}'. Best matches are {best_matches}"
)
elif print_values:
# Else print results, including dict items
print("\n".join("{}\t{}".format(k, v) for k, v in results.items()))
else:
# Just print keys
print("\n".join("{}".format(k) for k in results.keys()))
class Timer(object):
"""
Provides accurate timing.
Example
-------
timer = pybamm.Timer()
print(timer.time())
"""
def __init__(self):
self._start = timeit.default_timer()
def reset(self):
"""
Resets this timer's start time.
"""
self._start = timeit.default_timer()
def time(self):
"""
Returns the time (float, in seconds) since this timer was created,
or since meth:`reset()` was last called.
"""
return TimerTime(timeit.default_timer() - self._start)
class TimerTime:
def __init__(self, value):
"""A string whose value prints in human-readable form"""
self.value = value
def __str__(self):
"""
Formats a (non-integer) number of seconds, returns a string like
"5 weeks, 3 days, 1 hour, 4 minutes, 9 seconds", or "0.0019 seconds".
"""
time = self.value
if time < 1e-6:
return "{:.3f} ns".format(time * 1e9)
if time < 1e-3:
return "{:.3f} us".format(time * 1e6)
if time < 1:
return "{:.3f} ms".format(time * 1e3)
elif time < 60:
return "{:.3f} s".format(time)
output = []
time = int(round(time))
units = [(604800, "week"), (86400, "day"), (3600, "hour"), (60, "minute")]
for k, name in units:
f = time // k
if f > 0 or output:
output.append(str(f) + " " + (name if f == 1 else name + "s"))
time -= f * k
output.append("1 second" if time == 1 else str(time) + " seconds")
return ", ".join(output)
def __add__(self, other):
if isinstance(other, numbers.Number):
return TimerTime(self.value + other)
else:
return TimerTime(self.value + other.value)
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
if isinstance(other, numbers.Number):
return TimerTime(self.value - other)
else:
return TimerTime(self.value - other.value)
def __rsub__(self, other):
if isinstance(other, numbers.Number):
return TimerTime(other - self.value)
def __mul__(self, other):
if isinstance(other, numbers.Number):
return TimerTime(self.value * other)
else:
return TimerTime(self.value * other.value)
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
if isinstance(other, numbers.Number):
return TimerTime(self.value / other)
else:
return TimerTime(self.value / other.value)
def __rtruediv__(self, other):
if isinstance(other, numbers.Number):
return TimerTime(other / self.value)
def __eq__(self, other):
return self.value == other.value
def load_function(filename):
"""
Load a python function from a file "function_name.py" called "function_name".
The filename might either be an absolute path, in which case that specific file will
be used, or the file will be searched for relative to PyBaMM root.
Arguments
---------
filename : str
The name of the file containing the function of the same name.
Returns
-------
function
The python function loaded from the file.
"""
if not filename.endswith(".py"):
raise ValueError("Expected filename.py, but got {}".format(filename))
# If it's an absolute path, find that exact file
if os.path.isabs(filename):
if not os.path.isfile(filename):
raise ValueError(
"{} is an absolute path, but the file is not found".format(filename)
)
valid_filename = filename
# Else, search in the whole PyBaMM directory for matches
else:
search_path = pybamm.root_dir()
head, tail = os.path.split(filename)
matching_files = []
for root, _, files in os.walk(search_path):
for file in files:
if file == tail:
full_path = os.path.join(root, file)
if full_path.endswith(filename):
matching_files.append(full_path)
if len(matching_files) == 0:
raise ValueError(
"{} cannot be found in the PyBaMM directory".format(filename)
)
elif len(matching_files) > 1:
raise ValueError(
"{} found multiple times in the PyBaMM directory."
"Consider using absolute file path.".format(filename)
)
valid_filename = matching_files[0]
# Now: we have some /path/to/valid/filename.py
# Add "/path/to/vaid" to the python path, and load the module "filename".
# Then, check "filename" module contains "filename" function. If it does, return
# that function object, or raise an exception
valid_path, valid_leaf = os.path.split(valid_filename)
sys.path.append(valid_path)
# Load the module, which must be the leaf of filename, minus the .py extension
valid_module = valid_leaf.replace(".py", "")
module_object = importlib.import_module(valid_module)
# Check that a function of the same name exists in the loaded module
if valid_module not in dir(module_object):
raise ValueError(
"No function {} found in module {}".format(valid_module, valid_module)
)
# Remove valid_path from sys_path to avoid clashes down the line
sys.path.remove(valid_path)
return getattr(module_object, valid_module)
def rmse(x, y):
"""Calculate the root-mean-square-error between two vectors x and y, ignoring NaNs
"""
# Check lengths
if len(x) != len(y):
raise ValueError("Vectors must have the same length")
return np.sqrt(np.nanmean((x - y) ** 2))
def get_infinite_nested_dict():
"""
Return a dictionary that allows infinite nesting without having to define level by
level.
See:
https://stackoverflow.com/questions/651794/whats-the-best-way-to-initialize-a-dict-of-dicts-in-python/652226#652226
Example
-------
>>> import pybamm
>>> d = pybamm.get_infinite_nested_dict()
>>> d["a"] = 1
>>> d["a"]
1
>>> d["b"]["c"]["d"] = 2
>>> d["b"]["c"] == {"d": 2}
True
"""
return defaultdict(get_infinite_nested_dict)
def load(filename):
"""Load a saved object"""
with open(filename, "rb") as f:
obj = pickle.load(f)
return obj
def get_parameters_filepath(path):
"""Returns path if it exists in current working dir,
otherwise get it from package dir"""
if os.path.exists(path):
return path
else:
return os.path.join(pybamm.__path__[0], path)
|
<reponame>jamiejackherer/pyfilm-gui-no-glade<filename>pyfilm-gui-no-glade/main.py
#!/usr/bin/python3
#-*- coding:utf-8 -*-
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
#list of tuples for each software, containing the software name, initial release, and main programming languages used
software_list = [("0", 1, "Watch Finding Dory 2016 CAM Busy Boyz mp4 - VoDLocker", "http://vodlocker.com/e87rys8iefuvhs"),
("1", 2, "Watch Finding Dory 2016 CAM Busy Boyz mp4 - VoDLocker", "http://vodlocker.com/e87rys8iefuvhs" ),
("2", 3, "Watch Finding Dory 2016 x264 AC3 CPG mkv - VoDLocker", "http://vodlocker.com/e87rys8iefuvhs"),
("3", 4, "Watch Finding Dory 2016 CAM mp4 - VoDLocker", "http://vodlocker.com/e87rys8iefuvhs"),
("4", 5, "Watch The Ellen Generes Show 2016 Finding Dory Week HDTV x264 ...", "http://vodlocker.com/e87rys8iefuvhs"),
("5", 6, "Watch <NAME> Live 2016 Game Night The Cast Finding Dory ...", "http://vodlocker.com/e87rys8iefuvhs"),
("6", 7, "Watch Finding Nemo 2003 720p Blu Ray x264 YIFY mp4 - VoDLocker", "http://vodlocker.com/e87rys8iefuvhs")]
class TreeViewFilterWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="{PyFilm}")
self.set_border_width(10)
#Setting up the self.grid in which the elements are to be positionned
self.grid = Gtk.Grid()
self.grid.set_column_homogeneous(True)
self.grid.set_row_homogeneous(True)
self.add(self.grid)
self.entry = Gtk.Entry()
self.entry.set_text("Hello World")
self.grid.attach(self.entry, 0, 0, 1, 1)
#Creating the ListStore model
self.software_liststore = Gtk.ListStore(str, int, str, str)
for software_ref in software_list:
self.software_liststore.append(list(software_ref))
self.current_filter_language = None
#Creating the filter, feeding it with the liststore model
self.language_filter = self.software_liststore.filter_new()
#setting the filter function, note that we're not using the
self.language_filter.set_visible_func(self.language_filter_func)
#creating the treeview, making it use the filter as a model, and adding the columns
self.treeview = Gtk.TreeView.new_with_model(self.language_filter)
for i, column_title in enumerate(["Index", "Rank", "Title", "Link"]):
renderer = Gtk.CellRendererText()
column = Gtk.TreeViewColumn(column_title, renderer, text=i)
self.treeview.append_column(column)
#setting up the layout, putting the treeview in a scrollwindow, and the buttons in a row
self.scrollable_treelist = Gtk.ScrolledWindow()
self.scrollable_treelist.set_vexpand(True)
self.grid.attach(self.scrollable_treelist, 0, 1, 5, 10)
self.scrollable_treelist.add(self.treeview)
self.select = self.treeview.get_selection()
self.select.connect("changed", self.on_tree_selection_changed)
#creating quit button, and setting up the event
self.quit_button = Gtk.Button(label = "Quit")
self.quit_button.connect("clicked", self.on_quit_button_clicked)
self.grid.attach(self.quit_button, 0, 2, 1, 1)
self.show_all()
def language_filter_func(self, model, iter, data):
"""Tests if the language in the row is the one in the filter"""
if self.current_filter_language is None or self.current_filter_language == "None":
return True
else:
return model[iter][2] == self.current_filter_language
def on_selection_button_clicked(self, widget):
"""Called on any of the button clicks"""
#we set the current language filter to the button's label
self.current_filter_language = widget.get_label()
print("%s language selected!" % self.current_filter_language)
#we update the filter, which updates in turn the view
self.language_filter.refilter()
def on_tree_selection_changed(self, selection):
model, treeiter = selection.get_selected()
if treeiter != None:
print("You selected", model[treeiter][0], model[treeiter][1], model[treeiter][2])
def on_quit_button_clicked(self, button):
print("Quitting application")
Gtk.main_quit()
win = TreeViewFilterWindow()
win.connect("delete-event", Gtk.main_quit)
win.show_all()
Gtk.main()
|
# -*- coding: utf-8 -*-
"""Main module."""
import json
from pathlib import Path
import logging
from typing import Tuple
import numpy as np
from .pyn5 import (
DatasetUINT8,
DatasetUINT16,
DatasetUINT32,
DatasetUINT64,
DatasetINT8,
DatasetINT16,
DatasetINT32,
DatasetINT64,
DatasetFLOAT32,
DatasetFLOAT64,
)
dataset_types = {
np.dtype("uint8"): DatasetUINT8,
np.dtype("uint16"): DatasetUINT16,
np.dtype("uint32"): DatasetUINT32,
np.dtype("uint64"): DatasetUINT64,
np.dtype("int8"): DatasetINT8,
np.dtype("int16"): DatasetINT16,
np.dtype("int32"): DatasetINT32,
np.dtype("int64"): DatasetINT64,
np.dtype("float32"): DatasetFLOAT32,
np.dtype("float64"): DatasetFLOAT64,
}
def open(root_path: str, dataset: str, dtype: str = "", read_only=True):
"""
Returns a Dataset of the corresponding dtype. Leave dtype blank to return
the Dataset with dtype as shown in the attributes.json file
"""
# Check the attributes file:
attributes_file = Path(root_path, dataset, "attributes.json")
if attributes_file.exists():
with attributes_file.open("r") as f:
attributes = json.load(f)
expected_dtype = attributes.get("dataType", None)
if expected_dtype is not None:
if dtype == "":
# Use the expected dtype
return open(root_path, dataset, expected_dtype.upper(), read_only)
elif dtype != expected_dtype.upper():
# When in doubt use the user specified dtype
logging.warning(
"Given dtype {} does not match dtype ({}) in attributes.json".format(
dtype, expected_dtype.upper()
)
)
unsupported_dtype_msg = "Given dtype {} is not supported. Please choose from ({})".format(
dtype,
tuple(dataset_types),
)
if not dtype:
raise ValueError(unsupported_dtype_msg)
if isinstance(dtype, str):
dtype = dtype.lower()
dtype = np.dtype(dtype)
try:
dataset_type = dataset_types[dtype]
return dataset_type(root_path, dataset, read_only)
except KeyError:
raise ValueError(unsupported_dtype_msg)
def read(dataset, bounds: Tuple[np.ndarray, np.ndarray], dtype: type = int):
"""
Temporary hacky method until dataset.read_ndarray returns np.ndarray
Note: passing in dtype is necessary since numpy arrays are float by default.
dataset.get_data_type() could be implemented, but a better solution would
be to have dataset.read_ndarray return a numpy array.
"""
bounds = (bounds[0].astype(int), bounds[1].astype(int))
return (
np.array(dataset.read_ndarray(list(bounds[0]), list(bounds[1] - bounds[0])))
.reshape(list(bounds[1] - bounds[0])[::-1])
.transpose([2, 1, 0])
.astype(dtype)
)
def write(
dataset,
input_bounds: Tuple[np.ndarray, np.ndarray],
input_data: np.ndarray,
dtype=int,
):
"""
Temporary hacky method until dataset.write_ndarray is implemented in rust-n5
and the PyO3 wrapper
"""
input_data = input_data.astype(dtype)
input_bounds = (input_bounds[0].astype(int), input_bounds[1].astype(int))
start_block_index = input_bounds[0] // dataset.block_shape
stop_block_index = (
input_bounds[1] + dataset.block_shape - 1
) // dataset.block_shape
for i in range(start_block_index[0], stop_block_index[0]):
for j in range(start_block_index[1], stop_block_index[1]):
for k in range(start_block_index[2], stop_block_index[2]):
block_index = np.array([i, j, k], dtype=int)
block_bounds = (
block_index * dataset.block_shape,
(block_index + 1) * dataset.block_shape,
)
if all(block_bounds[0] >= input_bounds[0]) and all(
block_bounds[1] <= input_bounds[1]
):
# Overwrite block data entirely
block_data = input_data[
tuple(
map(
slice,
block_bounds[0] - input_bounds[0],
block_bounds[1] - input_bounds[0],
)
)
]
else:
block_data = read(dataset, block_bounds, dtype)
intersection_bounds = (
np.maximum(block_bounds[0], input_bounds[0]),
np.minimum(block_bounds[1], input_bounds[1]),
)
relative_block_bounds = tuple(
map(
slice,
intersection_bounds[0] - block_bounds[0],
intersection_bounds[1] - block_bounds[0],
)
)
relative_data_bounds = tuple(
map(
slice,
intersection_bounds[0] - input_bounds[0],
intersection_bounds[1] - input_bounds[0],
)
)
block_data[relative_block_bounds] = input_data[relative_data_bounds]
dataset.write_block(
block_index, block_data.transpose([2, 1, 0]).flatten()
)
|
"""
CAS (Princeton) Authentication
Some code borrowed from
https://sp.princeton.edu/oit/sdp/CAS/Wiki%20Pages/Python.aspx
"""
import datetime
import re
import urllib.parse
import urllib.request
import uuid
from xml.etree import ElementTree
from django.conf import settings
from django.core.mail import send_mail
from django.http import HttpResponseRedirect
CAS_EMAIL_DOMAIN = "princeton.edu"
CAS_URL= 'https://fed.princeton.edu/cas/'
CAS_LOGOUT_URL = 'https://fed.princeton.edu/cas/logout?service=%s'
CAS_SAML_VALIDATE_URL = 'https://fed.princeton.edu/cas/samlValidate?TARGET=%s'
# eligibility checking
if hasattr(settings, 'CAS_USERNAME'):
CAS_USERNAME = settings.CAS_USERNAME
CAS_PASSWORD = settings.CAS_PASSWORD
CAS_ELIGIBILITY_URL = settings.CAS_ELIGIBILITY_URL
CAS_ELIGIBILITY_REALM = settings.CAS_ELIGIBILITY_REALM
# display tweaks
LOGIN_MESSAGE = "Log in with my NetID"
STATUS_UPDATES = False
def _get_service_url():
# FIXME current URL
from helios_auth import url_names
from django.conf import settings
from django.urls import reverse
return settings.SECURE_URL_HOST + reverse(url_names.AUTH_AFTER)
def get_auth_url(request, redirect_url):
request.session['cas_redirect_url'] = redirect_url
return CAS_URL + 'login?service=' + urllib.parse.quote(_get_service_url())
def get_user_category(user_id):
theurl = CAS_ELIGIBILITY_URL % user_id
auth_handler = urllib.request.HTTPBasicAuthHandler()
auth_handler.add_password(realm=CAS_ELIGIBILITY_REALM, uri= theurl, user= CAS_USERNAME, passwd = <PASSWORD>)
opener = urllib.request.build_opener(auth_handler)
urllib.request.install_opener(opener)
result = urllib.request.urlopen(CAS_ELIGIBILITY_URL % user_id).read().strip()
parsed_result = ElementTree.fromstring(result)
return parsed_result.text
def get_saml_info(ticket):
"""
Using SAML, get all of the information needed
"""
import logging
saml_request = """<?xml version='1.0' encoding='UTF-8'?>
<soap-env:Envelope
xmlns:soap-env='http://schemas.xmlsoap.org/soap/envelope/'>
<soap-env:Header />
<soap-env:Body>
<samlp:Request xmlns:samlp="urn:oasis:names:tc:SAML:1.0:protocol"
MajorVersion="1" MinorVersion="1"
RequestID="%s"
IssueInstant="%sZ">
<samlp:AssertionArtifact>%s</samlp:AssertionArtifact>
</samlp:Request>
</soap-env:Body>
</soap-env:Envelope>
""" % (uuid.uuid1(), datetime.datetime.utcnow().isoformat(), ticket)
url = CAS_SAML_VALIDATE_URL % urllib.parse.quote(_get_service_url())
# by virtue of having a body, this is a POST
req = urllib.request.Request(url, saml_request)
raw_response = urllib.request.urlopen(req).read()
logging.info("RESP:\n%s\n\n" % raw_response)
response = ElementTree.fromstring(raw_response)
# ugly path down the tree of attributes
attributes = response.findall('{http://schemas.xmlsoap.org/soap/envelope/}Body/{urn:oasis:names:tc:SAML:1.0:protocol}Response/{urn:oasis:names:tc:SAML:1.0:assertion}Assertion/{urn:oasis:names:tc:SAML:1.0:assertion}AttributeStatement/{urn:oasis:names:tc:SAML:1.0:assertion}Attribute')
values = {}
for attribute in attributes:
values[str(attribute.attrib['AttributeName'])] = attribute.findtext('{urn:oasis:names:tc:SAML:1.0:assertion}AttributeValue')
# parse response for netid, display name, and employee type (category)
return {'user_id': values.get('mail',None), 'name': values.get('displayName', None), 'category': values.get('employeeType',None)}
def get_user_info(user_id):
url = 'http://dsml.princeton.edu/'
headers = {'SOAPAction': "#searchRequest", 'Content-Type': 'text/xml'}
request_body = """<?xml version='1.0' encoding='UTF-8'?>
<soap-env:Envelope
xmlns:xsd='http://www.w3.org/2001/XMLSchema'
xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
xmlns:soap-env='http://schemas.xmlsoap.org/soap/envelope/'>
<soap-env:Body>
<batchRequest xmlns='urn:oasis:names:tc:DSML:2:0:core'
requestID='searching'>
<searchRequest
dn='o=Princeton University, c=US'
scope='wholeSubtree'
derefAliases='neverDerefAliases'
sizeLimit='200'>
<filter>
<equalityMatch name='uid'>
<value>%s</value>
</equalityMatch>
</filter>
<attributes>
<attribute name="displayName"/>
<attribute name="pustatus"/>
</attributes>
</searchRequest>
</batchRequest>
</soap-env:Body>
</soap-env:Envelope>
""" % user_id
req = urllib.request.Request(url, request_body, headers)
response = urllib.request.urlopen(req).read()
# parse the result
from xml.dom.minidom import parseString
response_doc = parseString(response)
# get the value elements (a bit of a hack but no big deal)
values = response_doc.getElementsByTagName('value')
if len(values)>0:
return {'name' : values[0].firstChild.wholeText, 'category' : values[1].firstChild.wholeText}
else:
return None
def get_user_info_special(ticket):
# fetch the information from the CAS server
val_url = CAS_URL + "validate" + \
'?service=' + urllib.parse.quote(_get_service_url()) + \
'&ticket=' + urllib.parse.quote(ticket)
r = urllib.request.urlopen(val_url).readlines() # returns 2 lines
# success
if len(r) == 2 and re.match("yes", r[0]) is not None:
netid = r[1].strip()
category = get_user_category(netid)
#try:
# user_info = get_user_info(netid)
#except:
# user_info = None
# for now, no need to wait for this request to finish
user_info = None
if user_info:
info = {'name': user_info['name'], 'category': category}
else:
info = {'name': netid, 'category': category}
return {'user_id': netid, 'name': info['name'], 'info': info, 'token': None}
else:
return None
def get_user_info_after_auth(request):
ticket = request.GET.get('ticket', None)
# if no ticket, this is a logout
if not ticket:
return None
#user_info = get_saml_info(ticket)
user_info = get_user_info_special(ticket)
user_info['type'] = 'cas'
return user_info
def do_logout(user):
"""
Perform logout of CAS by redirecting to the CAS logout URL
"""
return HttpResponseRedirect(CAS_LOGOUT_URL % _get_service_url())
def update_status(token, message):
"""
simple update
"""
pass
def send_message(user_id, name, user_info, subject, body):
"""
send email, for now just to Princeton
"""
# if the user_id contains an @ sign already
if "@" in user_id:
email = user_id
else:
email = "%s@%s" % (user_id, CAS_EMAIL_DOMAIN)
if 'name' in user_info:
name = user_info["name"]
else:
name = email
send_mail(subject, body, settings.SERVER_EMAIL, ["%s <%s>" % (name, email)], fail_silently=False)
#
# eligibility
#
def check_constraint(constraint, user):
if 'category' not in user.info:
return False
return constraint['year'] == user.info['category']
def generate_constraint(category_id, user):
"""
generate the proper basic data structure to express a constraint
based on the category string
"""
return {'year': category_id}
def list_categories(user):
current_year = datetime.datetime.now().year
return [{'id': str(y), 'name': 'Class of %s' % y} for y
in range(current_year, current_year+5)]
def eligibility_category_id(constraint):
return constraint['year']
def pretty_eligibility(constraint):
return "Members of the Class of %s" % constraint['year']
#
# Election Creation
#
def can_create_election(user_id, user_info):
return True
|
<filename>python-acoustics/acoustics/standards/iso_tr_25417_2007.py
"""
ISO/TR 25417 2007
=================
ISO/TR 25417:2007 specifies definitions of acoustical quantities and terms used
in noise measurement documents prepared by ISO Technical Committee TC 43,
Acoustics, Subcommittee SC 1, Noise, together with their symbols and units, with
the principal aim of harmonizing the terminology used [ISO25417]_.
.. [ISO24517] http://www.iso.org/iso/home/store/catalogue_tc/catalogue_detail.htm?csnumber=42915
.. inheritance-diagram:: acoustics.standards.iso_tr_25417_2007
"""
import numpy as np
REFERENCE_PRESSURE = 2.0e-5
"""
Reference value of the sound pressure :math:`p_0` is :math:`2 \cdot 10^{-5}` Pa.
"""
def sound_pressure_level(pressure, reference_pressure=REFERENCE_PRESSURE):
"""
Sound pressure level :math:`L_p` in dB.
:param pressure: Instantaneous sound pressure :math:`p`.
:param reference_pressure: Reference value :math:`p_0`.
.. math:: L_p = 10 \\log_{10}{ \\left( \\frac{p^2}{p_0^2} \\right)}
See section 2.2.
"""
return 10.0 * np.log10( pressure**2.0 / reference_pressure**2.0 )
def equivalent_sound_pressure_level(pressure, reference_pressure=REFERENCE_PRESSURE, axis=-1):
"""
Time-averaged sound pressure level :math:`L_{p,T}` or equivalent-continious sound pressure level :math:`L_{p,eqT}` in dB.
:param pressure: Instantaneous sound pressure :math:`p`.
:param reference_pressure: Reference value :math:`p_0`.
:param axis: Axis.
.. math:: L_{p,T} = L_{p,eqT} = 10.0 \\log_{10}{ \\left( \\frac{\\frac{1}{T} \\int_{t_1}^{t_2} p^2 (t) \\mathrm{d} t }{p_0^2} \\right)}
See section 2.3.
"""
return 10.0 * np.log10( (pressure**2.0).mean(axis=axis) / reference_pressure**2.0)
def max_sound_pressure_level(pressure, reference_pressure=REFERENCE_PRESSURE, axis=-1):
"""
Maximum time-averaged sound pressure level :math:`L_{F,max}` in dB.
:param pressure: Instantaneous sound pressure :math:`p`.
:param reference_pressure: Reference value :math:`p_0`.
:param axis: Axis.
.. math:: \mathrm{max}{(L_{p})}
"""
return sound_pressure_level(pressure, reference_pressure=reference_pressure).max(axis=axis)
def peak_sound_pressure(pressure, axis=-1):
"""
Peak sound pressure :math:`p_{peak}` is the greatest absolute sound pressure during a certain time interval.
:param pressure: Instantaneous sound pressure :math:`p`.
:param axis: Axis.
.. math:: p_{peak} = \\mathrm{max}(|p|)
"""
return np.abs(pressure).max(axis=axis)
def peak_sound_pressure_level(pressure, reference_pressure=REFERENCE_PRESSURE, axis=-1):
"""
Peak sound pressure level :math:`L_{p,peak}` in dB.
:param pressure: Instantaneous sound pressure :math:`p`.
:param reference_pressure: Reference value :math:`p_0`.
:param axis: Axis.
.. math:: L_{p,peak} = 10.0 \\log \\frac{p_{peak}^2.0}{p_0^2}
"""
return 10.0 * np.log10 (peak_sound_pressure(pressure, axis=axis)**2.0 / reference_pressure**2.0)
REFERENCE_SOUND_EXPOSURE = 4.0e-10
"""
Reference value of the sound exposure :math:`E_0` is :math:`4 \cdot 10^{-12} \\mathrm{Pa}^2\\mathrm{s}`.
"""
def sound_exposure(pressure, fs, axis=-1):
"""
Sound exposure :math:`E_{T}`.
:param pressure: Instantaneous sound pressure :math:`p`.
:param fs: Sample frequency :math:`f_s`.
:param axis: Axis.
.. math:: E_T = \\int_{t_1}^{t_2} p^2 (t) \\mathrm{d}t
"""
return (pressure**2.0/fs).sum(axis=axis)
def sound_exposure_level(pressure, fs, reference_sound_exposure=REFERENCE_SOUND_EXPOSURE, axis=-1):
"""
Sound exposure level :math:`L_{E,T}` in dB.
:param pressure: Instantaneous sound pressure :math:`p`.
:param fs: Sample frequency :math:`f_s`.
:param sound_exposure: Sound exposure :math:`E_{T}`.
:param reference_sound_exposure: Reference value :math:`E_{0}`
.. math:: L_{E,T} = 10 \\log_{10}{ \\frac{E_T}{E_0} }
"""
return 10.0 * np.log10(sound_exposure(pressure, fs, axis=axis)/reference_sound_exposure)
REFERENCE_POWER = 1.0e-12
"""
Reference value of the sound power :math:`P_0` is 1 pW.
"""
def sound_power_level(power, reference_power=REFERENCE_POWER):
"""
Sound power level :math:`L_{W}`.
:param power: Sound power :math:`P`.
:param reference_power: Reference sound power :math:`P_0`.
.. math:: 10 \\log_{10}{ \\frac{P}{P_0} }
"""
return 10.0 * np.log10(power/reference_power)
def sound_energy(power, axis=-1):
"""
Sound energy :math:`J`..
:param power: Sound power :math:`P`.
.. math:: J = \\int_{t_1}^{t_2} P(t) \\mathrm{d} t
"""
return power.sum(axis=axis)
REFERENCE_ENERGY = 1.0e-12
"""
Reference value of the sound energy :math:`J_0` is 1 pJ.
"""
def sound_energy_level(energy, reference_energy=REFERENCE_ENERGY):
"""
Sound energy level L_{J} in dB.
:param energy: Sound energy :math:`J`.
:param reference_energy: Reference sound energy :math:`J_0`.
.. math:: L_{J} = 10 \\log_{10}{ \\frac{J}{J_0} }
"""
return np.log10( energy/reference_energy )
def sound_intensity(pressure, velocity):
"""
Sound intensity :math:`\\mathbf{i}`.
:param pressure: Sound pressure :math:`p(t)`.
:param velocity: Particle velocity :math:`\\mathbf{u}(t)`.
.. math:: \\mathbf{i} = p(t) \cdot \\mathbf{u}(t)
"""
return pressure * velocity
REFERENCE_INTENSITY = 1.0e-12
"""
Reference value of the sound intensity :math:`I_0` is :math:`\\mathrm{1 pW/m^2}`.
"""
def time_averaged_sound_intensity(intensity, axis=-1):
"""
Time-averaged sound intensity :math:`\\mathbf{I}_T`.
:param intensity: Sound intensity :math:`\\mathbf{i}`.
:param axis: Axis.
.. math:: \\mathbf{I}_T = \\frac{1}{T} \\int_{t_1}^{t_2} \\mathbf{i}(t)
"""
return intensity.mean(axis=axis)
def time_averaged_sound_intensity_level(time_averaged_sound_intensity, reference_intensity=REFERENCE_INTENSITY, axis=-1):
"""
Time-averaged sound intensity level :math:`L_{I,T}`.
:param time_averaged_sound_intensity: Time-averaged sound intensity :math:`\\mathbf{I}_T`.
:param reference_intensity: Reference sound intensity :math:`I_0`.
.. math:: L_{I,T} = 10 \\log_{10} { \\frac{|\\mathbf{I}_T|}{I_0} }
"""
return 10.0 * np.log10( np.linalg.norm(time_averaged_sound_intensity,axis=axis) / reference_intensity )
def normal_time_averaged_sound_intensity(time_averaged_sound_intensity, unit_normal_vector):
"""
Normal time-averaged sound intensity :math:`I_{n,T}`.
:param time_averaged_sound_intensity: Time-averaged sound intensity :math:`\\mathbf{I}_T`.
:param unit_normal_vector: Unit normal vector :math:`\\mathbf{n}`.
.. math:: I_{n,T} = \\mathbf{I}_T \\cdot \\mathbf{n}
"""
return time_averaged_sound_intensity.dot(unit_normal_vector)
def normal_time_averaged_sound_intensity_level(normal_time_averaged_sound_intensity, reference_intensity=REFERENCE_INTENSITY):
"""
Normal time-averaged sound intensity level :math:`L_{In,T}` in dB.
:param normal_time_averaged_sound_intensity: Normal time-averaged sound intensity :math:`I{n,T}`.
:param reference_intensity: Reference sound intensity :math:`I_0`.
.. math:: I_{n,T} = 10 \\log_{10} { \\frac{|I_{n,T}|}{I_0}}
"""
return 10.0 * np.log10( np.abs(normal_time_averaged_sound_intensity / reference_intensity) )
|
<reponame>stochasticnetworkcontrol/snc
import pytest
import numpy as np
import tensorflow as tf
from copy import deepcopy
from tf_agents.drivers.dynamic_episode_driver import DynamicEpisodeDriver
from tf_agents.replay_buffers.tf_uniform_replay_buffer import TFUniformReplayBuffer
from tf_agents.specs.tensor_spec import BoundedTensorSpec
import snc.utils.snc_tools as snc
from snc.environments.rl_environment_wrapper import rl_env_from_snc_env
from snc.agents.rl.agents import create_ppo_agent
from snc.environments.scenarios import load_scenario
@pytest.mark.parametrize(
'env_name,expected_action_spec_shape', [('single_server_queue', tf.TensorShape((2)))]
)
def test_ppo_agent_init(env_name, expected_action_spec_shape):
"""
Tests agent set up and initialisation.
"""
# Set up environment using default parameters.
# Environment parameters do not affect the test result here.
tf_env, _ = rl_env_from_snc_env(load_scenario(env_name, job_gen_seed=10)[1],
discount_factor=0.99, normalise_observations=False)
# Instantiate and initialise a PPO agent for the environment.
ppo_agent = create_ppo_agent(tf_env, num_epochs=10)
ppo_agent.initialize()
# Validate initialisation by checking relevant properties of the initialised agent.
assert isinstance(ppo_agent.action_spec, BoundedTensorSpec)
assert ppo_agent.action_spec.shape == expected_action_spec_shape
assert ppo_agent.name == "PPO_Agent"
assert ppo_agent.time_step_spec == tf_env.time_step_spec()
def test_ppo_agent_init_with_multiple_resource_sets():
"""
Tests agent set up and initialisation with multiple action subspaces (multiple resource sets).
"""
# Set the environment name for this case as the asserts are difficult to make as variables.
env_name = 'double_reentrant_line_shared_res_homogeneous_cost'
# Set up the environment parameters.
# Environment parameters do not affect the test result here.
tf_env, _ = rl_env_from_snc_env(load_scenario(env_name, job_gen_seed=10)[1],
discount_factor=0.99, normalise_observations=False)
# Instantiate and initialise a PPO agent for the environment.
ppo_agent = create_ppo_agent(tf_env, num_epochs=10)
ppo_agent.initialize()
# Validate initialisation by checking some properties of the initialised agent.
assert isinstance(ppo_agent.action_spec, tuple)
assert len(ppo_agent.action_spec) == 2
assert isinstance(ppo_agent.action_spec[0], BoundedTensorSpec)
assert isinstance(ppo_agent.action_spec[1], BoundedTensorSpec)
assert ppo_agent.action_spec[0].shape == tf.TensorShape((3))
assert ppo_agent.action_spec[1].shape == tf.TensorShape((3))
assert ppo_agent.name == "PPO_Agent"
assert ppo_agent.time_step_spec == tf_env.time_step_spec()
# Parameterise with environments which cover the cases of a single resource set and multiple
# resource sets.
@pytest.mark.parametrize(
'env_name',
['single_server_queue', 'double_reentrant_line_shared_res_homogeneous_cost']
)
def test_ppo_agent_play(env_name):
"""
Extension of the agent set up and initialisation test to include playing episodes.
"""
# Set up environment using default parameters.
# Environment parameters do not affect the test result here.
tf_env, action_dims = rl_env_from_snc_env(load_scenario(env_name, job_gen_seed=10)[1],
discount_factor=0.99, normalise_observations=False)
# Instantiate and initialise a PPO agent for the environment.
ppo_agent = create_ppo_agent(tf_env, num_epochs=10)
ppo_agent.initialize()
# Reset the environment
tf_env.reset()
# Play 5 time steps in the environment.
for _ in range(5):
# Since we do not have the state stored at this point we capture it from the environment
# fresh each time step as a TimeStep object (a named tuple).
time_step = tf_env.current_time_step()
# Attain our agent's action.
action_step = ppo_agent.collect_policy.action(time_step)
# Ensure that the action is one-hot as expected
if isinstance(action_step.action, tuple):
action = tf.concat(action_step.action, axis=-1)
else:
action = action_step.action
# Ensure that the action is binary as expected.
assert snc.is_binary(action)
# Play the action out in the environment.
tf_env.step(action_step.action)
# Parameterise with environments which cover the cases of a single resource set and multiple
# resource sets.
@pytest.mark.parametrize(
'env_name',
['single_server_queue', 'double_reentrant_line_shared_res_homogeneous_cost']
)
def test_ppo_agent_learning(env_name):
"""
Extension of the play test for an agent playing in the environment to include training.
Note: This does not test that training improves the policy. It simply tests that the training
loop runs effectively and changes the policy parameters.
"""
# Set up environment using default parameters.
# Environment parameters do not affect the test result here.
tf_env, _ = rl_env_from_snc_env(
load_scenario(env_name, job_gen_seed=10, override_env_params={'max_episode_length': 25})[1],
discount_factor=0.99, normalise_observations=False
)
# Set up a training step counter.
global_step = tf.compat.v1.train.get_or_create_global_step()
# Instantiate a PPO agent
ppo_agent = create_ppo_agent(tf_env, num_epochs=10, training_step_counter=global_step)
# Instantiate a replay buffer.
replay_buffer = TFUniformReplayBuffer(
data_spec=ppo_agent.collect_data_spec,
batch_size=tf_env.batch_size,
max_length=1000)
# Use a driver to handle data collection for the agent. This handles a lot of the backend
# TensorFlow set up and solves previous errors with episodes of differing lengths.
collect_driver = DynamicEpisodeDriver(
tf_env,
ppo_agent.collect_policy,
observers=[replay_buffer.add_batch],
num_episodes=2)
# collect_driver.run = tf.function(collect_driver.run)
# Get the initial states of the agent and environment before training.
time_step = tf_env.reset()
policy_state = ppo_agent.collect_policy.get_initial_state(tf_env.batch_size)
# Take a copy of the variables in order to ensure that training does lead to parameter changes.
initial_vars = deepcopy(ppo_agent.trainable_variables)
assert len(initial_vars) > 0, "Agent has no trainable variables."
# Set up a minimal training loop to simply test training mechanics work.
for _ in range(5):
# Collect experience.
time_step, policy_state = collect_driver.run(
time_step=time_step,
policy_state=policy_state
)
# Now the replay buffer should have data in it so we can collect the data and train the
# agent.
experience = replay_buffer.gather_all()
ppo_agent.train(experience)
# Clear the replay buffer and return to play.
replay_buffer.clear()
# Check that training has had some effect
for v1, v2 in zip(initial_vars, ppo_agent.trainable_variables):
assert not np.allclose(v1.numpy(), v2.numpy())
|
<reponame>hemprakash1994hp/detox
from __future__ import with_statement, print_function
import sys
import time
import eventlet
import py
import pytest
from eventlet.green.subprocess import Popen
from textwrap import dedent as d
from detox.proc import Detox
from detox.cli import main as detox_main, tox_prepare
pytest_plugins = "pytester"
def create_example1(tmpdir):
tmpdir.join("setup.py").write(
d(
"""
from setuptools import setup
def main():
setup(
name='example1',
description='example1 project for testing detox',
version='0.4',
packages=['example1',],
)
if __name__ == '__main__':
main()
"""
)
)
tmpdir.join("tox.ini").write(
d(
"""
[testenv:py]
"""
)
)
tmpdir.join("example1", "__init__.py").ensure()
def create_example2(tmpdir):
tmpdir.join("tox.ini").write(
d(
"""
[tox]
skipsdist = True
[testenv:py]
"""
)
)
tmpdir.join("example2", "__init__.py").ensure()
def create_example3(tmpdir):
tmpdir.join("tox.ini").write(
d(
"""
[tox]
skipsdist = True
[testenv]
commands = python -c 'import time; time.sleep(1)'
[testenv:py1]
[testenv:py2]
"""
)
)
tmpdir.join("example3", "__init__.py").ensure()
def pytest_configure(config):
config.addinivalue_line("markers", "example1: use example1 for setup")
config.addinivalue_line("markers", "example2: use example2 for setup")
config.addinivalue_line(
"markers",
"timeout(N): stop test function " "after N seconds, throwing a Timeout.",
)
@pytest.fixture
def exampledir(request, tmpdir):
for x in dir(request.function):
if x.startswith("example"):
exampledir = tmpdir.mkdir(x)
globals()["create_" + x](exampledir)
print("%s created at %s" % (x, exampledir))
break
else:
raise request.LookupError("test function has example")
return exampledir
@pytest.fixture
def detox(exampledir):
old = exampledir.chdir()
try:
return Detox(tox_prepare([]))
finally:
old.chdir()
@pytest.fixture
def cmd(request, exampledir):
cmd = Cmd(exampledir, request)
return cmd
class Cmd:
def __init__(self, basedir, request):
self.basedir = basedir
self.tmpdir = basedir.mkdir(".cmdtmp")
self.request = request
def main(self, *args):
self.basedir.chdir()
return detox_main(args)
def rundetox(self, *args):
self.basedir.chdir()
script = py.path.local.sysfind("detox")
assert script, "could not find 'detox' script"
return self._run(script, *args)
def _run(self, *cmdargs):
from _pytest.pytester import RunResult, getdecoded
cmdargs = [str(x) for x in cmdargs]
p1 = self.tmpdir.join("stdout")
p2 = self.tmpdir.join("stderr")
print("running", cmdargs, "curdir=", py.path.local())
f1 = p1.open("wb")
f2 = p2.open("wb")
now = time.time()
popen = Popen(
cmdargs, stdout=f1, stderr=f2, close_fds=(sys.platform != "win32")
)
ret = popen.wait()
f1.close()
f2.close()
out = p1.read("rb")
out = getdecoded(out).splitlines()
err = p2.read("rb")
err = getdecoded(err).splitlines()
def dump_lines(lines, fp):
try:
for line in lines:
print(line, file=fp)
except UnicodeEncodeError:
print("couldn't print to %s because of encoding" % (fp,))
dump_lines(out, sys.stdout)
dump_lines(err, sys.stderr)
return RunResult(ret, out, err, time.time() - now)
@pytest.fixture(autouse=True)
def with_timeout(request):
marker = request.node.get_closest_marker("timeout")
timeout = marker.args[0] if marker else 5.0
with eventlet.Timeout(timeout):
yield
def test_hang(testdir):
p = py.path.local(__file__).dirpath("conftest.py")
p.copy(testdir.tmpdir.join(p.basename))
testdir.makepyfile(
"""
import pytest
from eventlet.green import time
@pytest.mark.timeout(0.01)
def test_hang():
time.sleep(3.0)
"""
)
result = testdir.runpytest()
assert "failed to timeout" not in result.stdout.str()
result.stdout.fnmatch_lines(["*Timeout: 0.01*"])
|
<reponame>ut-ras/r5-2019
"""
Holds stuff specific to representing this year's game field.
"""
from r5engine.object import SimulationObject, MASK_CIRCULAR, MASK_RECT
import r5engine.graphics as graphics
import r5engine.settings as settings
import r5engine.util as util
OBSTACLE_RADIUS = 0.75
OBSTACLE_COLOR = (128, 128, 128)
BLOCK_WIDTH = 1.5
BLOCK_HEIGHT = BLOCK_WIDTH
BLOCK_COLOR = (255, 255, 255)
MOTHERSHIP_WIDTH = 13.5
MOTHERSHIP_HEIGHT = 8.5
MOTHERSHIP_COLOR = (128, 128, 128)
OBJECT_SAFE_DISTANCE = 6
ROUND_OBJECT_COUNTS = (
(2, 5), # Block count, obstacle count
(4, 10),
(6, 15)
)
class Obstacle(SimulationObject):
"""
Represents the dowel/ping pong ball obstacles. Circular collision mask, no
heading.
"""
def __init__(self, x, y):
"""
Parameters
----------
x: float
horizontal position in units
y: float
vertical position in units
"""
SimulationObject.__init__(self, x, y, 0,
int(settings.PIXELS_PER_UNIT * OBSTACLE_RADIUS * 2),
int(settings.PIXELS_PER_UNIT * OBSTACLE_RADIUS * 2),
OBSTACLE_COLOR, MASK_CIRCULAR)
self.autoscale = False # For preserving ellipse precision
self.dims[0] /= settings.PIXELS_PER_UNIT
self.dims[1] /= settings.PIXELS_PER_UNIT
class Block(SimulationObject):
"""
Represents the lettered cubes. Rectangular collision mask.
"""
def __init__(self, x, y, theta=0):
"""
Parameters
----------
x: float
horizontal position in units
y: float
vertical position in units
theta: float
heading in radians
"""
SimulationObject.__init__(self, x, y, theta, BLOCK_WIDTH, BLOCK_HEIGHT,
BLOCK_COLOR, MASK_RECT)
self.letter = ""
def draw(self, display):
"""
Draws the object to a surface.
"""
SimulationObject.draw(self, display)
graphics.draw_set_color(0, 0, 0)
graphics.draw_text_field(display, ["Block " + self.letter], self.pose[0],
self.pose[1] - 1, align="center")
class Mothership(SimulationObject):
"""
Represents the mothership. Rectangular collision mask. TODO: the mothership is actually a composite rectangle
"""
def __init__(self, x, y, theta=0):
"""
Parameters
----------
x: float
horizontal position in units
y: float
vertical position in units
theta: float
heading in radians
"""
SimulationObject.__init__(self, x, y, theta, MOTHERSHIP_WIDTH,
MOTHERSHIP_HEIGHT, MOTHERSHIP_COLOR, MASK_RECT)
self.blocks = []
def draw(self, display):
"""
Draws the object to a surface.
"""
SimulationObject.draw(self, display)
graphics.draw_set_color(0, 0, 0)
graphics.draw_text_field(display, ["Mothership", "blocks=" +\
str(self.blocks)], self.pose[0], self.pose[1])
def build_field(round):
"""
Creates a random arrangement of the correct number of game elements according to round.
Parameters
----------
round: int
game round (0, 1, or 2)
Returns
-------
list
list of SimulationObjects to incorporate into the simulation
"""
objects = []
# Place blocks
for i in range(ROUND_OBJECT_COUNTS[round][0]):
block = util.place_safe(objects, lambda x, y: Block(x, y),
OBJECT_SAFE_DISTANCE)
block.letter = chr(65 + i)
# Place obstacles
for i in range(ROUND_OBJECT_COUNTS[round][1]):
util.place_safe(objects, lambda x, y: Obstacle(x, y),
OBJECT_SAFE_DISTANCE)
# Place mothership
util.place_safe(objects, lambda x, y: Mothership(x, y),
OBJECT_SAFE_DISTANCE)
return objects
|
from posixpath import realpath
from typing import NewType
from django.db.models.fields import CommaSeparatedIntegerField
from django.shortcuts import render, redirect
from django.utils.timezone import datetime
from django.http import HttpResponse
import re
from dmdd_pictures.forms import LogForm
from dmdd_pictures.models import PictureInfo, Gene
from dmdd_project.settings import STATIC_ROOT
from django.views.decorators.csrf import csrf_exempt
import os
def home(request):
return HttpResponse("Hello, Django")
# def loadData(request):
# df = pd.read_csv('/Users/reidtaylor/Documents/Senior Thesis/DMDD Project/python_study/assignments.csv')
# for index, row in df.iterrows():
# gene1 = row['gene']
# identifier1 = row['identifier']
# assignee1 = row['assignee']
# finished1 = row['finished']
# message = Gene(
# gene = gene1,
# identifier = identifier1,
# assignee = assignee1,
# finished = finished1
# )
# message.save()
# return HttpResponse(f"Success, {message}")
def showPicture(request, id):
dog = Gene.objects.filter(assignee=id).exclude(finished=1)
if (os.path.isfile(os.getcwd()+f'/static/img/{dog[0].gene}/images/DMDD{dog[0].identifier}-0875--_-.jpeg')):
coding = '-_-'
elif os.path.isfile(os.getcwd()+f'/static/img/{dog[0].gene}/images/DMDD{dog[0].identifier}-0875--_+.jpeg'):
coding = '-_+'
elif os.path.isfile(os.getcwd()+f'/static/img/{dog[0].gene}/images/DMDD{dog[0].identifier}-0875-+_-.jpeg'):
coding = '+_-'
else:
coding = '+_+'
return render(
request,
'dmdd_pictures/show_picture.html',
{
'name': id,
'date': datetime.now(),
'form': 'd',
'remaining': dog.count(),
'total': Gene.objects.filter(assignee=id).count(),
'csrf_token': '<PASSWORD>',
'STATIC_ROOT': 'http://127.0.0.1:8000/static',
'gene1': dog[0].gene,
'id1': dog[0].identifier,
'slide1': '0750',
'coding1': coding,
'gene2': dog[0].gene,
'id2': dog[0].identifier,
'slide2': '0750',
'coding2': coding,
}
)
@csrf_exempt
def log_message(request):
form = LogForm(request.POST or None)
if request.method == "POST":
if form.is_valid():
message = form.save(commit=False)
message.submission = datetime.now()
message.save()
print('success')
return redirect()
else:
message = PictureInfo(
gene= form.cleaned_data['gene'],
identifier= form.cleaned_data['identifier'],
slide= form.cleaned_data['slide'],
imgsrc= 'this is null data',
mutated= bool(form.cleaned_data['mutated']),
comment= form.cleaned_data['comment'],
submission= datetime.now(),
author= form.cleaned_data['author'],
wildtype= form.cleaned_data['wildtype'],
center= form.cleaned_data['center'],
leftPresent= bool(form.cleaned_data['leftPresent']),
leftBeginning= form.cleaned_data['leftBeginning'],
rightPresent= bool(form.cleaned_data['rightPresent']),
rightBeginning= form.cleaned_data['rightBeginning'],
)
message.save()
Gene.objects.filter(gene=form.cleaned_data['gene']).filter(identifier=form.cleaned_data['identifier']).update(finished=1)
return render(request, "dmdd_pictures/show_picture.html", {"form":form})
else:
return render(request, "dmdd_pictures/begin.html", {"form":form}) |
<filename>src/Bubot_CoAP/layers/message_layer.py
import logging
import random
import time
import socket
from .. import utils
from .. import defines
from ..messages.request import Request
from ..transaction import Transaction
from ..utils import generate_random_token
# import asyncio
__author__ = '<NAME>'
logger = logging.getLogger(__name__)
class MessageLayer(object):
"""
Handles matching between messages (Message ID) and request/response (Token)
"""
def __init__(self, server, starting_mid):
"""
Set the layer internal structure.
:param starting_mid: the first mid used to send messages.
"""
# self.lock = asyncio.Lock()
self.server = server
self._transactions = {}
self._transactions_token = {}
self._transactions_sent = {}
self._transactions_sent_token = {}
if starting_mid is not None:
self._current_mid = starting_mid
else:
self._current_mid = random.randint(1, 1000)
@staticmethod
def fetch_token():
return generate_random_token(8)
def fetch_mid(self):
"""
Gets the next valid MID.
:return: the mid to use
"""
current_mid = self._current_mid
self._current_mid += 1
self._current_mid %= 65535
return current_mid
def purge_sent(self, k):
del self._transactions_sent_token[k]
self.server.block_layer.purge_sent(k)
def purge(self, timeout_time=defines.EXCHANGE_LIFETIME):
for k in list(self._transactions.keys()):
now = time.time()
transaction = self._transactions[k]
if transaction.timestamp + timeout_time < now:
logger.debug("Delete transaction")
del self._transactions[k]
for k in list(self._transactions_token.keys()):
now = time.time()
transaction = self._transactions_token[k]
if transaction.timestamp + timeout_time < now:
logger.debug("Delete transaction")
del self._transactions_token[k]
self.server.block_layer.purge(k)
async def receive_request(self, request):
"""
Handle duplicates and store received messages.
:type request: Request
:param request: the incoming request
:rtype : Transaction
:return: the edited transaction
"""
logger.info("receive_request - " + str(request))
try:
host, port = request.source
except AttributeError:
return
if request.multicast:
key_mid = request.mid
key_token = request.token # skip duplicated from net interfaces
if key_token in list(self._transactions_token.keys()):
# Duplicated multicast request
self._transactions_token[key_token].request.duplicated = True
return self._transactions_token[key_token]
else:
key_mid = utils.str_append_hash(host, port, request.mid)
key_token = utils.str_append_hash(host, port, request.token)
if key_mid in list(self._transactions.keys()):
# Duplicated
self._transactions[key_mid].request.duplicated = True
return self._transactions[key_mid]
request.timestamp = time.time()
transaction = Transaction(request=request, timestamp=request.timestamp)
# async with transaction.lock:
# self._transactions[key_mid] = transaction
# self._transactions_token[key_token] = transaction
## async with self.lock:
if key_token in self._transactions_token \
and self._transactions_token[key_token].response is not None: # вычитываем результат
transaction = self._transactions_token[key_token]
async with transaction.lock:
self._transactions_token[key_token].request = request
self._transactions[key_mid] = transaction
else:
transaction = Transaction(request=request, timestamp=request.timestamp)
async with transaction.lock:
self._transactions_token[key_token] = transaction
self._transactions[key_mid] = transaction
return transaction
def receive_response(self, response):
"""
Pair responses with requests.
:type response: Response
:param response: the received response
:rtype : Transaction
:return: the transaction to which the response belongs to
"""
logger.info("receive_response - " + str(response))
try:
host, port = response.source
except AttributeError:
return
# all_coap_nodes = defines.ALL_COAP_NODES_IPV6 if socket.getaddrinfo(host, None)[0][0] == socket.AF_INET6 else defines.ALL_COAP_NODES
key_mid = utils.str_append_hash(host, port, response.mid)
# key_mid_multicast = utils.str_append_hash(all_coap_nodes, port, response.mid)
key_token = utils.str_append_hash(host, port, response.token)
key_token_multicast = utils.str_append_hash(response.destination[0], response.destination[1], response.token)
if key_mid in list(self._transactions_sent.keys()):
transaction = self._transactions_sent[key_mid]
if response.token != transaction.request.token:
logger.warning("Tokens does not match - response message " + str(host) + ":" + str(port))
return None, False
elif key_token in self._transactions_sent_token:
transaction = self._transactions_sent_token[key_token]
# elif key_mid_multicast in list(self._transactions_sent.keys()):
# transaction = self._transactions_sent[key_mid_multicast]
elif key_token_multicast in self._transactions_sent_token:
transaction = self._transactions_sent_token[key_token_multicast]
if response.token != transaction.request.token:
logger.warning("Tokens does not match - response message " + str(host) + ":" + str(port))
return None, False
else:
logger.warning("Un-Matched incoming response message " + str(host) + ":" + str(port))
return None, False
send_ack = False
if response.type == defines.Types["CON"]:
send_ack = True
transaction.request.acknowledged = True
transaction.completed = True
transaction.response = response
if transaction.retransmit_stop is not None:
transaction.retransmit_stop.set()
return transaction, send_ack
def receive_empty(self, message):
"""
Pair ACKs with requests.
:type message: Message
:param message: the received message
:rtype : Transaction
:return: the transaction to which the message belongs to
"""
logger.info("receive_empty - " + str(message))
try:
host, port = message.source
except AttributeError:
return
all_coap_nodes = defines.ALL_COAP_NODES_IPV6 if socket.getaddrinfo(host, None)[0][
0] == socket.AF_INET6 else defines.ALL_COAP_NODES
key_mid = utils.str_append_hash(host, port, message.mid)
key_mid_multicast = utils.str_append_hash(all_coap_nodes, port, message.mid)
key_token = utils.str_append_hash(host, port, message.token)
key_token_multicast = utils.str_append_hash(all_coap_nodes, port, message.token)
if key_mid in list(self._transactions.keys()):
transaction = self._transactions[key_mid]
elif key_token in self._transactions_token:
transaction = self._transactions_token[key_token]
elif key_mid_multicast in list(self._transactions.keys()):
transaction = self._transactions[key_mid_multicast]
elif key_token_multicast in self._transactions_token:
transaction = self._transactions_token[key_token_multicast]
else:
logger.warning("Un-Matched incoming empty message " + str(host) + ":" + str(port))
return None
if message.type == defines.Types["ACK"]:
if not transaction.request.acknowledged:
transaction.request.acknowledged = True
elif (transaction.response is not None) and (not transaction.response.acknowledged):
transaction.response.acknowledged = True
elif message.type == defines.Types["RST"]:
if not transaction.request.acknowledged:
transaction.request.rejected = True
elif not transaction.response.acknowledged:
transaction.response.rejected = True
elif message.type == defines.Types["CON"]:
# implicit ACK (might have been lost)
logger.debug("Implicit ACK on received CON for waiting transaction")
transaction.request.acknowledged = True
else:
logger.warning("Unhandled message type...")
if transaction.retransmit_stop is not None:
transaction.retransmit_stop.set()
return transaction
def send_request(self, request):
"""
Create the transaction and fill it with the outgoing request.
:type request: Request
:param request: the request to send
:rtype : Transaction
:return: the created transaction
"""
assert isinstance(request, Request)
try:
host, port = request.destination
except AttributeError:
return
request.timestamp = time.time()
transaction = Transaction(request=request, timestamp=request.timestamp)
if transaction.request.type is None:
transaction.request.type = defines.Types["CON"]
if transaction.request.mid is None:
transaction.request.mid = self.fetch_mid()
if transaction.request.token is None:
transaction.request.token = self.fetch_token()
# logger.info("send_request - " + str(request))
if request.multicast:
key_token = utils.str_append_hash(request.source[0], request.source[1], request.token)
self._transactions_sent_token[key_token] = transaction
return self._transactions_sent_token[key_token]
else:
key_mid = utils.str_append_hash(host, port, request.mid)
self._transactions_sent[key_mid] = transaction
key_token = utils.str_append_hash(host, port, request.token)
self._transactions_sent_token[key_token] = transaction
return self._transactions_sent[key_mid]
def send_response(self, transaction):
"""
Set the type, the token and eventually the MID for the outgoing response
:type transaction: Transaction
:param transaction: the transaction that owns the response
:rtype : Transaction
:return: the edited transaction
"""
logger.info("send_response - " + str(transaction.response))
if transaction.response.type is None:
if transaction.request.type == defines.Types["CON"] and not transaction.request.acknowledged:
transaction.response.type = defines.Types["ACK"]
transaction.response.mid = transaction.request.mid
transaction.response.acknowledged = True
transaction.completed = True
elif transaction.request.type == defines.Types["NON"]:
transaction.response.type = defines.Types["NON"]
else:
transaction.response.type = defines.Types["CON"]
transaction.response.token = transaction.request.token
if transaction.response.mid is None:
transaction.response.mid = self.fetch_mid()
try:
host, port = transaction.response.destination
except AttributeError:
return
key_mid = utils.str_append_hash(host, port, transaction.response.mid)
self._transactions[key_mid] = transaction
transaction.request.acknowledged = True
return transaction
def send_empty(self, transaction, related, message):
"""
Manage ACK or RST related to a transaction. Sets if the transaction has been acknowledged or rejected.
:param transaction: the transaction
:param related: if the ACK/RST message is related to the request or the response. Must be equal to
transaction.request or to transaction.response or None
:type message: Message
:param message: the ACK or RST message to send
"""
logger.info("send_empty - " + str(message))
if transaction is None:
try:
host, port = message.destination
except AttributeError:
return
key_mid = utils.str_append_hash(host, port, message.mid)
key_token = utils.str_append_hash(host, port, message.token)
if key_mid in self._transactions:
transaction = self._transactions[key_mid]
related = transaction.response
elif key_token in self._transactions_token:
transaction = self._transactions_token[key_token]
related = transaction.response
else:
return message
if message.type == defines.Types["ACK"]:
if transaction.request == related:
transaction.request.acknowledged = True
transaction.completed = True
message.mid = transaction.request.mid
message.code = 0
message.destination = transaction.request.source
message.source = transaction.request.destination
elif transaction.response == related:
transaction.response.acknowledged = True
transaction.completed = True
message.mid = transaction.response.mid
message.code = 0
message.token = transaction.response.token
message.destination = transaction.response.source
message.source = transaction.response.destination
elif message.type == defines.Types["RST"]:
if transaction.request == related:
transaction.request.rejected = True
message._mid = transaction.request.mid
if message.mid is None:
message.mid = self.fetch_mid()
message.code = 0
message.token = transaction.request.token
message.destination = transaction.request.source
elif transaction.response == related:
transaction.response.rejected = True
transaction.completed = True
message._mid = transaction.response.mid
if message.mid is None:
message.mid = self.fetch_mid()
message.code = 0
message.token = transaction.response.token
message.destination = transaction.response.source
return message
|
# -*- coding: utf-8 -*-
import logging
import sys
import sets
import traceback
from django.core.management.base import BaseCommand
from django.core.management.base import CommandError
from django.db.models import Q
from frontend.models import EmailMessage
from frontend.models import ImportLog
from frontend.models import OrgBookmark
from frontend.models import Profile
from frontend.models import SearchBookmark
from frontend.models import User
from frontend.views import bookmark_utils
logger = logging.getLogger(__name__)
class Command(BaseCommand):
args = ''
help = ''' Send monthly emails based on bookmarks. With no arguments, sends
an email to every user for each of their bookmarks, for the
current month. With arguments, sends a test email to the specified
user for the specified organisation.'''
def add_arguments(self, parser):
parser.add_argument(
'--recipient-email',
help=('A single alert recipient to which the batch should be sent')
)
parser.add_argument(
'--recipient-email-file',
help=('The subset of alert recipients to which the batch should '
'be sent. One email per line.'))
parser.add_argument(
'--skip-email-file',
help=('The subset of alert recipients to which the batch should '
'NOT be sent. One email per line.'))
parser.add_argument(
'--ccg',
help=('If specified, a CCG code for which a test alert should be '
'sent to `recipient-email`')
)
parser.add_argument(
'--practice',
help=('If specified, a Practice code for which a test alert '
'should be sent to `recipient-email`'))
parser.add_argument(
'--search-name',
help=('If specified, a name (could be anything) for a test search '
'alert about `url` which should be sent to '
'`recipient-email`'))
parser.add_argument(
'--url',
help=('If specified, a URL for a test search '
'alert with name `search-name` which should be sent to '
'`recipient-email`'))
parser.add_argument(
'--max_errors',
help='Max number of permitted errors before aborting the batch',
default=3)
def get_org_bookmarks(self, now_month, **options):
"""Get approved OrgBookmarks for active users who have not been sent a
message tagged with `now_month`
"""
query = (
Q(approved=True, user__is_active=True) &
~Q(user__emailmessage__tags__contains=['measures', now_month]))
if options['recipient_email'] and (
options['ccg'] or options['practice']):
dummy_user = User(email=options['recipient_email'], id='dummyid')
dummy_user.profile = Profile(key='dummykey')
bookmarks = [OrgBookmark(
user=dummy_user,
pct_id=options['ccg'],
practice_id=options['practice']
)]
logger.info("Created a single test org bookmark")
elif options['recipient_email'] or options['recipient_email_file']:
recipients = []
if options['recipient_email_file']:
with open(options['recipient_email_file'], 'r') as f:
recipients = [x.strip() for x in f]
else:
recipients = [options['recipient_email']]
query = query & Q(user__email__in=recipients)
bookmarks = OrgBookmark.objects.filter(query)
logger.info("Found %s matching org bookmarks" % bookmarks.count())
else:
bookmarks = OrgBookmark.objects.filter(query)
if options['skip_email_file']:
with open(options['skip_email_file'], 'r') as f:
skip = [x.strip() for x in f]
bookmarks = bookmarks.exclude(user__email__in=skip)
logger.info("Found %s matching org bookmarks" % bookmarks.count())
return bookmarks
def get_search_bookmarks(self, now_month, **options):
query = (
Q(approved=True, user__is_active=True) &
~Q(user__emailmessage__tags__contains=['analyse', now_month]))
if options['recipient_email'] and options['url']:
dummy_user = User(email=options['recipient_email'], id='dummyid')
dummy_user.profile = Profile(key='dummykey')
bookmarks = [SearchBookmark(
user=dummy_user,
url=options['url'],
name=options['search_name']
)]
logger.info("Created a single test search bookmark")
elif not options['recipient_email']:
bookmarks = SearchBookmark.objects.filter(query)
logger.info(
"Found %s matching search bookmarks" % bookmarks.count())
else:
query = query & Q(user__email=options['recipient_email'])
bookmarks = SearchBookmark.objects.filter(query)
logger.info(
"Found %s matching search bookmarks" % bookmarks.count())
return bookmarks
def validate_options(self, **options):
if ((options['url'] or options['ccg'] or options['practice']) and
not options['recipient_email']):
raise CommandError(
"You must specify a test recipient email if you want to "
"specify a test CCG, practice, or URL")
if options['url'] and (options['practice'] or options['ccg']):
raise CommandError(
"You must specify either a URL, or one of a ccg or a practice"
)
def handle(self, *args, **options):
self.validate_options(**options)
now_month = ImportLog.objects.latest_in_category(
'prescribing').current_at.strftime('%Y-%m-%d').lower()
with EmailRetrier(options['max_errors']) as email_retrier:
for org_bookmark in self.get_org_bookmarks(now_month, **options):
def callback():
stats = bookmark_utils.InterestingMeasureFinder(
practice=org_bookmark.practice or options['practice'],
pct=org_bookmark.pct or options['ccg']
).context_for_org_email()
msg = bookmark_utils.make_org_email(
org_bookmark, stats, tag=now_month)
msg = EmailMessage.objects.create_from_message(msg)
msg.send()
logger.info("Sent org bookmark alert to %s about %s" % (
msg.to, org_bookmark.id))
email_retrier.try_email(callback)
for search_bookmark in self.get_search_bookmarks(
now_month, **options):
def callback():
recipient_id = search_bookmark.user.id
msg = bookmark_utils.make_search_email(
search_bookmark, tag=now_month)
msg = EmailMessage.objects.create_from_message(msg)
msg.send()
logger.info("Sent search bookmark alert to %s about %s" % (
recipient_id, search_bookmark.id))
email_retrier.try_email(callback)
class BatchedEmailErrors(Exception):
def __init__(self, exceptions):
individual_messages = sets.Set()
for exception in exceptions:
individual_messages.add(
"".join(traceback.format_exception_only(
exception[0], exception[1])).strip())
if len(exceptions) > 1:
msg = ("Encountered %s mail exceptions "
"(showing last traceback only): `%s`" % (
len(exceptions),
", ".join(individual_messages)))
else:
msg = individual_messages.pop()
super(BatchedEmailErrors, self).__init__(msg)
class EmailRetrier(object):
def __init__(self, max_errors=3):
self.exceptions = []
self.max_errors = max_errors
def try_email(self, callback):
try:
callback()
except Exception as e:
self.exceptions.append(sys.exc_info())
logger.exception(e)
if len(self.exceptions) > self.max_errors:
raise (BatchedEmailErrors(self.exceptions),
None, self.exceptions[-1][2])
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.exceptions:
exception = BatchedEmailErrors(self.exceptions)
raise (exception,
None,
self.exceptions[-1][2])
|
<filename>python/number_theory.py<gh_stars>0
"""
Number theory functions.
"""
import sys
from functools import reduce
from itertools import count, islice
from math import sqrt, gcd
from operator import mul
def prod(seq):
return reduce(mul, seq, 1)
def is_prime(n):
if n < 2 or n%2==0:
return n==2
for m in range(3,int(sqrt(n))+1,2):
if n%m==0:
return False
return True
def primes_less_than(m):
primes = []
for n in range(2,m):
found_prime = True
for p in primes:
if p*p > n:
break
if n % p == 0:
found_prime = False
break
if found_prime:
primes.append(n)
return primes
def primes():
"""
Generate prime numbers
"""
primes = []
for n in count(2):
found_prime = True
for p in primes:
if p*p > n:
break
if n % p == 0:
found_prime = False
break
if found_prime:
primes.append(n)
yield n
def nth(seq, n):
return next(islice(seq, n-1, None))
def even(n):
return n%2 == 0
def odd(n):
return n%2 == 1
def factor(n):
"""
Factor an integer n returning a list of prime factors
"""
f = 2
fs = iter(range(3, int(sqrt(n))+1, 2))
factors = []
r = n
try:
while r > 1:
while r%f==0:
r = r//f
factors.append(f)
f = next(fs)
except StopIteration:
if r > 1:
factors.append(r)
return factors
def test_factor():
assert factor(100) == [2,2,5,5]
assert factor(23) == [23]
assert factor(871) == [13,67]
assert factor(40) == [2, 2, 2, 5]
assert factor(2*3*5*7*11*13*17*19*23*29*31) == [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31]
def collatz_sequence(n):
"""
The Collatz sequence for n is a generated by iterating:
a_n = (1/2) * a_n-1 if a_n-1 is even
a_n = 3*a_n-1 + 1 if a_n-1 is odd
...a sequence which is conjectured to always wind up at 1.
"""
s = []
x = n
while x>1:
s.append(x)
if x % 2 == 0:
x = x//2
else:
x = 3*x + 1
return s
def test_collatz_sequence():
for n in range(1,100):
print(collatz_sequence(n))
print(max(len(collatz_sequence(n)) for n in range(1000)))
def euler_phi(n):
return sum(gcd(n, k) == 1 for k in range(1, n + 1))
def order_g_mod_m(g, m):
for x in range(1, m):
if pow(g, x, m) == 1:
break
return x
def primitive_roots(m):
phi = euler_phi(m)
return [g for g in range(2, m) if order_g_mod_m(g, m) == phi]
def is_primitive_root(a, m):
return order_g_mod_m(a, m) == euler_phi(m)
def primitive_root_permutations(m):
phi = euler_phi(m)
return [
[pow(a, x, m) for x in range(1, m)]
for a in range(2, m)
if order_g_mod_m(a, m) == phi]
|
import datetime
import re
class TransactionEvent(object):
'''Storage object for transaction Events. It seems more organized than keeping
a bunch of lists of tuples of lists and dicts. There are some useful external methods
such as is_dividend() to return a boolean if the transaction is a dividend payment.'''
DIVIDEND_FLAG = 'div'
BROKER_FLAG = 'b'
DT_FLAG = 'dt'
TIME_FLAG = 't'
DATE_FLAG = 'date'
DEFAULT_BROKER = 'robinhood'
def __init__(self, ticker,amount,cost_basis,flags=[]):
self._ticker = ticker
self._amount = amount
self._cost_basis = cost_basis
self.flag_list = flags #list of tuples
#set datetime value
self._datetime = None
self._set_datetime()
def _set_datetime(self):
"""sets datetime value from flags and also deletes those flags from self.flag_list after it's done"""
if len(self.flag_list) > 0:
flags,vals = zip(*self.flag_list) #split list of tuples into two lists
else:
self._datetime = datetime.datetime.utcnow()
return
#----
indexes_to_delete = []
if (self.DATE_FLAG in flags) and (self.TIME_FLAG not in flags):
idx = flags.index(self.DATE_FLAG)
val = vals[idx]
date = datetime.datetime.fromisoformat(val)
time = datetime.time(hour=8,minute=0)
self._datetime = datetime.datetime.combine(date,time)
indexes_to_delete.append(idx)
elif (self.TIME_FLAG in flags) and (self.DATE_FLAG not in flags):
idx = flags.index(self.TIME_FLAG)
val = vals[idx]
date = datetime.datetime.utcnow().date()
time = datetime.time.fromisoformat(val)
self._datetime = datetime.datetime.combine(date,time)
indexes_to_delete.append(idx)
elif (self.DATE_FLAG in flags) and (self.TIME_FLAG in flags):
idx_date = flags.index(self.DATE_FLAG)
idx_time = flags.index(self.self.TIME_FLAG)
date_str = vals[idx_date]
time_str = vals[idx_time]
dt_str = f"{date_str} {time_str}"
self._datetime = datetime.datetime.fromisoformat(dt_str)
indexes_to_delete.append(idx_date)
indexes_to_delete.append(idx_time)
else: #when neither date or time flags are present
if self.DT_FLAG in flags:
idx = flags.index(self.DT_FLAG)
val = vals[idx]
#print(f"\n**ERROR CATCHER:\n--> self.flags={self.flags}\n--> idx={idx}\n--> val={val}\n")
self._datetime = datetime.datetime.fromisoformat(val)
indexes_to_delete.append(idx)
else:
#self._datetime = datetime.datetime.utcnow()
self._datetime = None #return none for now. It will be handled server side on mysql
#here I delete the flags from flag list since they have a dedicated variable
for idx_pos in sorted(indexes_to_delete,reverse=True):
del self.flag_list[idx_pos]
def is_dividend(self):
if len(self.flags) < 1:
return False
flags,vals = zip(*self.flags)
if self.DIVIDEND_FLAG in flags:
return True
else:
return False
def get_broker(self):
if len(self.flags) < 1:
return self.DEFAULT_BROKER
flags,vals = zip(*self.flags)
if self.BROKER_FLAG in flags:
idx = flags.index(self.BROKER_FLAG)
broker = vals[idx]
else:
broker = 'robinhood' #default broker for now
return broker
#-----Read-only methods
@property
def ticker(self):
return self._ticker
@property
def amount(self):
return self._amount
@property
def cost_basis(self):
return self._cost_basis
@property
def datetime(self):
return self._datetime
@property
def date(self):
try:
date = self.datetime.date()
except AttributeError:
date = None
return date
@property
def time(self):
try:
time = self.datetime.time()
except AttributeError:
time = None
return time
@property
def flags(self):
return self.flag_list
@property
def broker(self):
return self.get_broker()
def __repr__(self):
return f"<TransactionEvent>: {self.ticker:>5} {float(self.amount):.7f} {float(self.cost_basis):.4f} {self.date} {self.time:%H:%M:%S} {self.flags}"
def command_parser(cmd_str): #aka get_ticker_dict_data
"""Processes a command str without the instruction word ('add','sell','sub','buy',etc)"""
#clean-up process
cmd_str = cmd_str.lower().strip()
raw_transactions = [transaction.strip() for transaction in cmd_str.split(',')] #each transaction is separated by a comma
#patterns
ticker_pattern = r"[a-zA-Z]+" #could use some alteration of r"(?<!-)[a-zA-Z]+" to ignore flags, but it is already taken care of
#================MAIN LOOP
last_ticker = ''
transactions_dict = {} #dict to map ticker symbol to list of TransactionEvent objects
flag_idx=0
for idx, trans_str in enumerate(raw_transactions):
#Parse flags
flags,trans_str = get_flags(trans_str) #returns flags and a new string without flags
# Match the ticker symbol
res = re.match(ticker_pattern,trans_str)
if res:
ticker = res.group()
trans_str = re.sub(ticker,'',trans_str).strip() #remove ticker from string
if ticker!=last_ticker: #reset flag index only when new ticker is different (to prevent cases when user enters same ticker explicitely i.e: add nrz 0.3 200, nrz 4 10.5)
flag_idx =0
else:
flag_idx += 1
else:
if len(last_ticker) > 0:
ticker = last_ticker
flag_idx += 1
else:
print(f"Could not find valid ticker symbol in '{trans_str}' string.\n" + \
f"Full command string:\n'{cmd_str}'")
raise ValueError
#Determine the parameters of the transaction (amount, cost_basis, etc)
trans_items = trans_str.split()
num_shares = trans_items[0]
cost_basis = trans_items[1]
t = TransactionEvent(ticker=ticker,
amount=num_shares,
cost_basis=cost_basis,
flags=flags)
try:
transactions_dict[ticker].append(t)
except KeyError:
transactions_dict[ticker] = [t] #create list with first item
last_ticker = ticker
return transactions_dict
def command_engine(command_str):
'''
Parses String containing stock transactions. To simplify things, there is no
instruction word, just a list of instructions separated by commas. The old function
used to use semi-colons to separate between instructions (add, sub, buy, sell), but
since they are not used anymore, the code will raise an exemption for it.
Parameters
----------
command_str: str
The string to parse. It should contain comma-separated transactions. For example:
aapl 3.5 148.5 -div -b robinhood, msft 10 220.8 -b robinhood, nrz 5 9.68
Returns
-------
dict
Dictionary mapping ticker symbols to a list of TransactionEvent objects. Example:
{
'aapl':[<TransactionEvent>, <TransactionEvent>, <TransactionEvent>, ...],
'msft':[<TransactionEvent>,<TransactionEvent>, ...],
'nrz':[<TransactionEvent>]
}
'''
#this function is mostly a wrapper for command_parser,
#but I'm keeping this way in case I later need to pre-process the data.
#replace semi-colons for commas
#command_str = command_str.replace(';',',')
tickers_dict = command_parser(command_str)
return tickers_dict
def get_flags(flag_string):
"""
Sample string:
s = 'python main.py -f hello -d -broker robinhood -t 2000-10-01 14:44:20.999 -D -R'
result from re.findall(wp,s):
[('f', 'hello'),
('d', ''),
('broker', 'robinhood'),
('t', '2000-10-01 14:44:20.999'),
('D', ''),
('R', '')]
"""
word_pattern = r"[a-zA-Z]+"
#time_pattern = r"[0-9]+\:[0-9]+\:[0-9]+\.[0-9]+" #this pattern is not flexible
time_pattern = r"[0-9\:\.]+"
datetime_pattern = f"[0-9]+\-[0-9]+\-[0-9]+\s*{time_pattern}"
#wp = r"\s*-([a-zA-Z]+)\s*([a-zA-Z]+|[0-9]+\-[0-9]+\-[0-9]+\s*[0-9\:\.]+|\s*)" #this string was tested to work sufficiently well for a well formatted string like
wp = f"\s*-([a-zA-Z]+)\s*({word_pattern}|{datetime_pattern}|{time_pattern}|\s*)"
flags = re.findall(wp,flag_string)
clean_string = re.sub(wp,'',flag_string)
#print(f"--> get_flags():\n\t{flags}\n\tOriginal String: {flag_string}")
return flags,clean_string |
import os
import pandas as pd
import datetime
import csv
import re
import calendar
import numpy as np
import json
H5_FOLDER = os.path.join(os.getcwd(),'data/h5')
def parse(csv_file, session_id,file_type):
#VEC validation
#only powercor files are supported
#if session/hash exists - skip
if os.path.exists(H5_FOLDER + '/' + session_id + '.h5'):
return
#validate csv
if (validate(csv_file,'powercor')):
csv_data = pd.read_csv(csv_file, usecols=[1, 2, 4, 7], header=None,
names=['date', 'time', 'meter', 'usage'])
csv_data['date'] = pd.to_datetime(csv_data['date'], format='%d/%m/%Y')
csv_data['date'] = pd.DatetimeIndex(csv_data['date'])
csv_data['dayofweek'] = csv_data['date'].map(lambda x: x.weekday())
csv_data['time'] = csv_data['time'].map(
lambda x: datetime.timedelta(hours=datetime.datetime.strptime(x, '%H:%M').hour,
minutes=datetime.datetime.strptime(x, '%H:%M').minute))
csv_data['date'] = csv_data['date'] + csv_data['time']
csv_data['weekno'] = csv_data['date'].map(lambda x: x.date().isocalendar()[1])
del csv_data['time']
csv_grouped_data = csv_data.groupby(csv_data['date'].dt.date).sum().reset_index()
csv_grouped_data['date'] = pd.DatetimeIndex(csv_grouped_data['date'])
csv_grouped_data['dayofweek'] = csv_grouped_data['date'].map(lambda x: x.weekday())
csv_grouped_data['weekno'] = csv_grouped_data['date'].map(lambda x: x.date().isocalendar()[1])
hdf_store = pd.HDFStore(H5_FOLDER + '/' + session_id + '.h5')
hdf_store.put('master', csv_data, format='table', append=False)
hdf_store.put('grouped', csv_grouped_data, format='table', append=False)
hdf_store.close()
def validate(csv_file, file_type):
# get the first row
firstrow = []
with open(csv_file,'r') as csvfile:
csv_file_reader = csv.reader(csvfile,delimiter=',')
firstrow = next(csv_file_reader)
if file_type == 'powercor':
#check column 1,2,4,7
#first check for 12 columns
if len(firstrow) != 12:
return False
#check date format for column 1
if not bool(re.match(r'\d\d\/\d\d\/\d\d\d\d',firstrow[1])):
return False
#check time format for column 2
if not bool(re.match(r'\d\d:\d\d',firstrow[2])):
return False
#check meter/consumption column
#skipped due to not currently used
#check usage column
isfloat = None
try:
float(firstrow[7])
isfloat = True
except:
isfloat = False
if isfloat is False:
return False
return True
def generateHeatmapData(session_id):
path = os.path.join(H5_FOLDER,session_id+'.h5')
grouped = pd.read_hdf(path,key='grouped')
uniqueyears = list(grouped['date'].dt.year.unique())
monthyearbreakdowns = []
yvalues = []
xval = [x for x in range(1,32)]
for year in uniqueyears:
for month in range(1,13):
value = []
yvalue = str(calendar.month_name[month]) + "/" + str(year)
for day in calendar.Calendar().itermonthdays(year,month):
if day == 0:
continue
dayval = grouped[(grouped['date'].dt.date == datetime.date(year,month,day))]['usage'].sum()
value.append(dayval)
if (len(value) > 0 and not all (item == np.float64(0) for item in value)):
monthyearbreakdowns.append(list(value))
yvalues.append(yvalue)
monthyearbreakdowns.reverse()
yvalues.reverse()
data = {'x': xval,'y': yvalues, 'z': monthyearbreakdowns, 'type':'heatmap'}
return data
def generateweekdayweekendData(session_id):
path = os.path.join(H5_FOLDER,session_id+'.h5')
grouped = pd.read_hdf(path,key='grouped')
yvalues = [grouped[(grouped['dayofweek']>= 0) & (grouped['dayofweek'] <= 4)].mean()['usage'],grouped[(grouped['dayofweek'] >= 5) & (grouped['dayofweek'] <=6)].mean()['usage']]
xvalues = ['Weekdays','Weekend']
data = {'x': xvalues, 'y': yvalues, 'type': 'bar'}
return data
def generatedaybreakdown(session_id):
path = os.path.join(H5_FOLDER,session_id+'.h5')
grouped = pd.read_hdf(path,key='grouped')
dayvalues = []
for day in range(0,7):
dayvalue = grouped[(grouped['dayofweek'] == day)]
dayvalues.append(dayvalue['usage'].mean())
data = {'x': list(calendar.day_name), 'y': dayvalues, 'type': 'bar'}
return data
def generatetimeofdaybreakdown(session_id):
path = os.path.join(H5_FOLDER,session_id+'.h5')
master = pd.read_hdf(path,key='master')
uniqueyears = list(master['date'].dt.year.unique())
#scatter with morning (00:00 to 12:00)/afternoon (12:30 to 18:00)/night (18:30 to 23:30) over months
morningvalues = []
afternoonvalues = []
nightvalues = []
xvalues=[]
for year in uniqueyears:
uniquemonths = list(master[(master['date'].dt.year == year)]['date'].dt.month.unique())
for month in uniquemonths:
monthvalues = master[(master['date'].dt.year == year) & (master['date'].dt.month == month)]
morning = monthvalues[(monthvalues['date'].dt.time >= datetime.time(0,0)) & (monthvalues['date'].dt.time <= datetime.time(12,0))]['usage'].mean()
afternoon = monthvalues[(monthvalues['date'].dt.time >= datetime.time(12,30)) & (monthvalues['date'].dt.time <= datetime.time(18,0))]['usage'].mean()
night = monthvalues[(master['date'].dt.time >= datetime.time(18,30)) & (monthvalues['date'].dt.time <= datetime.time(23,30))]['usage'].mean()
xvalue = str(calendar.month_name[month]) + "/" + str(year)
monthdays = calendar.monthrange(year,month)[1]
if not (len(monthvalues['date'].dt.date.unique()) != monthdays):
morningvalues.append(morning)
afternoonvalues.append(afternoon)
nightvalues.append(night)
xvalues.append(xvalue)
datalist = []
morninggraphdata = {'x': xvalues, 'y': morningvalues, 'mode': 'lines+markers', 'name':'morning' }
afternoongraphdata = {'x': xvalues, 'y': afternoonvalues, 'mode': 'lines+markers', 'name':'afternoon' }
nightgraphdata = {'x': xvalues, 'y': nightvalues, 'mode': 'lines+markers', 'name':'night' }
datalist.append(morninggraphdata)
datalist.append(afternoongraphdata)
datalist.append(nightgraphdata)
return datalist
def generatesummarydata(session_id):
path = os.path.join(H5_FOLDER,session_id+'.h5')
grouped = pd.read_hdf(path,key='grouped')
summarydata = {}
usagestats = dict(grouped['usage'].describe())
sortedusage = grouped.sort_values(by='usage', ascending=False)
highestusage = flattenusagedict(sortedusage.head(1).to_dict(orient='list'))
lowestusage = flattenusagedict(sortedusage.tail(1).to_dict(orient='list'))
usagesum = grouped['usage'].sum()
summarydata['session_id'] = session_id
summarydata['usagesum'] = round(float(usagesum),3)
summarydata['collectiondays'] = usagestats['count']
summarydata['usageaverage'] = round(float(usagestats['mean']),3)
summarydata['highestusage'] = highestusage
summarydata['lowestusage'] = lowestusage
summarydata['startdate'] = list(grouped.head(1)['date'].to_dict().values())[0]
summarydata['enddate'] = list(grouped.tail(1)['date'].to_dict().values())[0]
return summarydata
def flattenusagedict(usagedict):
usagedict['date'] = usagedict['date'][0]
usagedict['dayofweek'] = usagedict['dayofweek'][0]
usagedict['usage'] = round(float(usagedict['usage'][0]),3)
usagedict['weekno'] = usagedict['weekno'][0]
return usagedict |
#!/usr/bin/env python3
"""
A script to run on your phone (running in Termux under Android).
"""
# TODO local sync with SFTP to an isolated location on the computer? Some watcher would pick up the files.
# TODO sync to cloud
# - get last filename from scaleaway
# - send encrypted chunks (contain entire photos)
# - encrypted manifest of files
# - can we use minio or nextcloud? Then encryption wouldn't be necessary.
# - would be nice if VM drive would be encrypted
# TODO make it possible to specify the device by the hostname. Look it up first? How?
# TODO Getting the script args with Typer would be better, but I'd need to package it with the dependencies.
# Or vendor typer into this repo.
import argparse
from datetime import datetime
from pathlib import Path
import shlex
import shutil
import subprocess
from typing import List, Optional
MEDIA_DIR = Path('/storage/9C33-6BBD/DCIM/Camera/')
PC_PHOTOS_DIR = '/data/zdjęcia_i_filmiki/telefon/'
# if I'll add libraries I should use this https://github.com/ActiveState/appdirs
TRANSFER_LOGS_DIR = Path('~/.local/share/phone_media_transfer/').expanduser()
def _get_latest_synced_media_file(pc_ip: str) -> str:
get_photo_command = shlex.split(f'ssh butla@{pc_ip} "~/bin/ostatnia_fota"')
photo_path = subprocess.check_output(get_photo_command).decode().strip()
return Path(photo_path).name
def _transfer_photos(files_to_send: List[Path], pc_ip: str, target_path: str = PC_PHOTOS_DIR):
# Prepending ./ so that rsync knows this is a local file path.
# TODO make this timezone aware
media_list_filename = TRANSFER_LOGS_DIR / f'media_to_transfer_on_{datetime.now().isoformat()}.txt'
with open(media_list_filename, 'w') as media_list_file:
for path in files_to_send:
# Taking only the name of the file, so that all paths are relative to the media directory,
# so that rsync won't put the files in any subdirectories.
# There shouldn't be any subdirectories there.
media_list_file.write(path.name + '\n')
print(f'Transferring the files listed in {media_list_filename} with rsync...')
subprocess.run(
[
'rsync', '--update', '--no-owner', '--progress',
'--files-from', media_list_filename,
str(MEDIA_DIR), f'butla@{pc_ip}:{target_path}'
],
check=True,
)
def _get_files_to_send(
media_folder: Path,
older_than_file_name: str,
up_to_file: Optional[str] = None,
) -> List[Path]:
"""
Args:
media_folder: folder we'll take the media files from
older_than_file_name: this file name won't be included in the set
up_to_file: this file will be included in the set
"""
files_without_upper_boundary = (
path for path in media_folder.iterdir()
if path.name > older_than_file_name
)
if up_to_file:
files = (path for path in files_without_upper_boundary if path.name <= up_to_file)
else:
files = files_without_upper_boundary
return sorted(files)
def _parse_program_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description='Transfers media files to another machine.')
# TODO make this a "source"
# Can either be a local dir (if you're using simple-mtpfs) or an IP address.
parser.add_argument('pc_ip', type=str,
help='IP of the machine we are transferring the photos to')
parser.add_argument('files_after', nargs='?', type=str, default='',
help='Take media files after this one.')
parser.add_argument('up_to_file', nargs='?', type=str, default='',
help='Take media files up to and including this one.')
# TODO maybe there should be a mapping of target locations for given hostnames
parser.add_argument('--dest-dir', type=str, default=PC_PHOTOS_DIR)
return parser.parse_args()
def send_over_wlan(
pc_ip: str,
destination_directory: str,
last_synced_file: Optional[str] = None,
up_to_file: Optional[str] = None,
):
if not last_synced_file:
print('Checking the last photo on the PC...')
last_synced_file = _get_latest_synced_media_file(pc_ip)
print('Last photo on the PC is', last_synced_file)
up_to_file_message = up_to_file if up_to_file else 'latest'
print('Syncing photos coming after', last_synced_file, 'up to', up_to_file_message)
files_to_send = _get_files_to_send(
media_folder=MEDIA_DIR,
older_than_file_name=last_synced_file,
up_to_file=up_to_file,
)
_transfer_photos(files_to_send, pc_ip, target_path=destination_directory)
print('Success!')
def pull_over_cable_with_mtp():
# first mount the mtp filesystem with simple-mtpfs -v --device 1 ~/Downloads/bla
# Keep the process in the background. Stop it after the transfer.
# It might take a while before the files start getting read.
files = _get_files_to_send(
media_folder=Path('/home/butla/Downloads/bla/Card/DCIM/Camera'),
older_than_file_name='20210425',
)
print('there are', len(files), 'files')
for file in files:
shutil.copy(file, 'transfer_target')
print('Copied', file)
if __name__ == '__main__':
# pull_over_cable_with_mtp()
arg_parser = _parse_program_args()
send_over_wlan(
pc_ip=arg_parser.pc_ip,
destination_directory=arg_parser.dest_dir,
last_synced_file=arg_parser.files_after,
up_to_file=arg_parser.up_to_file,
)
|
#===============================================================================
# Imports
#===============================================================================
from ..logic import (
Mutex,
)
import itertools
from ..util import (
defaultdict,
Dict,
OrderedDict,
OrderedDefaultDict,
)
#===============================================================================
# Globals/Aliases
#===============================================================================
Names = [
'Bitfield',
'Struct',
'Union',
'Enum',
'Char',
'Byte',
'WideChar',
'SignedShort',
'UnsignedShort',
'HResult',
'SignedLong',
'UnsignedLong',
'SignedLongLong',
'UnsignedLongLong',
'Float',
'Array',
'CString',
'WideCString',
'FunctionPointer',
'PointerToPointer',
'VoidPointer',
'DataPointer',
]
#===============================================================================
# Invalid Line Exceptions
#===============================================================================
class InvalidLine(Exception):
def __init__(self, line=None, part=None):
self.line = line
self.part = part
class InvalidBitfieldLine(InvalidLine):
pass
class InvalidStructLine(InvalidLine):
pass
class InvalidUnionLine(InvalidLine):
pass
class InvalidEnumLine(InvalidLine):
pass
class InvalidCharLine(InvalidLine):
pass
class InvalidByteLine(InvalidLine):
pass
class InvalidWideCharLine(InvalidLine):
pass
class InvalidHResultLine(InvalidLine):
pass
class InvalidSignedShortLine(InvalidLine):
pass
class InvalidUnsignedShortLine(InvalidLine):
pass
class InvalidSignedLongLine(InvalidLine):
pass
class InvalidUnsignedLongLine(InvalidLine):
pass
class InvalidSignedLongLongLine(InvalidLine):
pass
class InvalidUnsignedLongLongLine(InvalidLine):
pass
class InvalidFloatLine(InvalidLine):
pass
class InvalidArrayLine(InvalidLine):
pass
class InvalidCStringLine(InvalidLine):
pass
class InvalidWideCStringLine(InvalidLine):
pass
class InvalidFunctionPointerLine(InvalidLine):
pass
class InvalidPointerToPointerLine(InvalidLine):
pass
class InvalidVoidPointerLine(InvalidLine):
pass
class InvalidDataPointerLine(InvalidLine):
pass
#===============================================================================
# Type Helpers
#===============================================================================
def extract_type(line):
parts = line.split(' ')
first = parts[0]
last = parts[-1]
m = Mutex()
m.is_bitfield = (first == 'Bitfield')
m.is_union = (first == 'union')
m.is_struct = (first == 'struct')
m.is_enum = (first == 'Enum')
m.is_char = (first == 'Char')
m.is_byte = (first == 'UChar')
m.is_wide_char = (first == 'Wchar')
m.is_short = (first == 'Int2B')
m.is_ushort = (first == 'Uint2B')
m.is_hresult = (first == 'HRESULT')
m.is_long = (first == 'Int4B')
m.is_ulong = (first == 'Uint4B')
m.is_longlong = (first == 'Int8B')
m.is_ulonglong = (first == 'Uint8B')
m.is_float = (first == 'Float')
m.is_array = (first[0] == '[')
m.is_function_pointer = (line.startswith('Ptr64 to '))
m.is_pointer_to_pointer = (line.startswith('Ptr64 to Ptr64'))
m.is_void_pointer = (line.startswith('Ptr64 to Void'))
m.is_data_pointer = (
line.startswith('Ptr64 to ') and
not line.startswith('Ptr64 to ') and
not line.startswith('Ptr64 to Ptr64') and
not line.startswith('Ptr64 to Char') and
not line.startswith('Ptr64 to Wchar') and
not line.startswith('Ptr64 to Void')
)
m.is_cstring = (line.startswith('Ptr64 to Char'))
m.is_wide_cstring = (line.startswith('Ptr64 to Wchar'))
with m as m:
if m.is_bitfield:
t = BitfieldLine(line)
elif m.is_union:
t = UnionLine(line)
elif m.is_struct:
t = StructLine(line)
elif m.is_enum:
t = EnumLine(line)
elif m.is_char:
t = CharLine(line)
elif m.is_byte:
t = ByteLine(line)
elif m.is_wide_char:
t = WideCharLine(line)
elif m.is_short:
t = SignedShortLine(line)
elif m.is_ushort:
t = UnsignedShortLine(line)
elif m.is_hresult:
t = HResultLine(line)
elif m.is_long:
t = SignedLongLine(line)
elif m.is_ulong:
t = UnsignedLongLine(line)
elif m.is_longlong:
t = SignedLongLongLine(line)
elif m.is_ulonglong:
t = UnsignedLongLongLine(line)
elif m.is_float:
t = FloatLine(line)
elif m.is_array:
t = ArrayLine(line)
elif m.is_cstring:
t = CStringLine(line)
elif m.is_wide_cstring:
t = WideCStringLine(line)
elif m.is_function_pointer:
t = FunctionPointerLine(line)
elif m.is_pointer_to_pointer:
t = PointerToPointerLine(line)
elif m.is_void_pointer:
t = VoidPointerLine(line)
elif m.is_data_pointer:
t = DataPointerLine(line)
return t
#===============================================================================
# Classes
#===============================================================================
class BaseLine(object):
name = None
line = None
offset = None
field_name = None
is_numeric = False
is_integer = False
is_decimal = False
is_pointer = False
is_unnamed = False
is_bitfield = False
is_character = False
is_composite = False
__keys__ = []
__default_keys__ = [
'name',
'line',
'offset',
'field_name',
'size_in_bytes',
'number_of_elements',
]
def __init__(self, line):
self.line = line
try:
parsed = self.parse(line)
except AttributeError:
parsed = None
if not parsed:
return
for (key, value) in parsed.items():
setattr(self, key, value)
def __repr__(self):
keys = self.__keys__ + self.__default_keys__
keys = [
key for key in keys if
hasattr(self, key) and
getattr(self, key)
]
return '<%s %s>' % (
self.__class__.__name__,
', '.join(
'%s=%r' % (k, v)
for (k, v) in (
(k, getattr(self, k))
for k in keys
)
)
)
class BitfieldLine(BaseLine):
is_bitfield = True
bit_position = None
number_of_bits = None
__keys__ = [
'bit_position',
'number_of_bits',
]
@classmethod
def parse(cls, line):
parts = line.split(', ')
(left, right) = parts
prefix = 'Bitfield Pos '
if not left.startswith(prefix):
raise InvalidBitfieldLine(line=line)
try:
bit_position = int(left.replace(prefix, ''))
except ValueError:
raise InvalidBitfieldLine(part=left)
if not right.endswith(' Bit') and not right.endswith(' Bits'):
raise InvalidBitfieldLine(part=right)
bit_part = right.split(' ')[0]
try:
number_of_bits = int(bit_part)
except ValueError:
raise InvalidBitfieldLine(part=bit_part)
return {
'bit_position': bit_position,
'number_of_bits': number_of_bits,
}
class StructLine(BaseLine):
type_name = None
struct_name = None
is_composite = True
size_in_bytes = None
number_of_elements = None
__keys__ = [
'type_name',
'struct_name',
]
@classmethod
def parse(cls, line):
parts = line.split(', ')
(left, center, right) = parts
if not left.startswith('struct '):
raise InvalidStructLine(part=left)
if not center.endswith(' element') and not center.endswith(' elements'):
raise InvalidStructLine(part=center)
if not right.endswith(' byte') and not right.endswith(' bytes'):
raise InvalidStructLine(part=right)
type_name = None
struct_name = left[len('struct '):]
if struct_name[0] == '_':
type_name = struct_name[1:]
name = (type_name if type_name else struct_name)
element_part = center.split(' ')[0]
try:
number_of_elements = int(element_part)
except ValueError:
raise InvalidStructLine(part=element_part)
size_part = right.split(' ')[0]
try:
size_in_bytes = int(size_part, 16)
except ValueError:
raise InvalidStructLine(part=size_part)
return {
'name': name,
'type_name': type_name,
'struct_name': struct_name,
'size_in_bytes': size_in_bytes,
'number_of_elements': number_of_elements,
}
class UnionLine(BaseLine):
type_name = None
union_name = None
is_composite = True
size_in_bytes = None
number_of_elements = None
__keys__ = [
'type_name',
'struct_name',
]
@classmethod
def parse(cls, line):
parts = line.split(', ')
(left, center, right) = parts
if not left.startswith('union '):
raise InvalidUnionLine(part=left)
if not center.endswith(' element') and not center.endswith(' elements'):
raise InvalidUnionLine(part=center)
if not right.endswith(' byte') and not right.endswith(' bytes'):
raise InvalidUnionLine(part=right)
type_name = None
union_name = left[len('union '):]
if union_name[0] == '_':
type_name = union_name[1:]
name = (type_name if type_name else union_name)
element_part = center.split(' ')[0]
try:
number_of_elements = int(element_part)
except ValueError:
raise InvalidStructLine(part=element_part)
size_part = right.split(' ')[0]
try:
size_in_bytes = int(size_part, 16)
except ValueError:
raise InvalidStructLine(part=size_part)
return {
'name': name,
'type_name': type_name,
'union_name': union_name,
'size_in_bytes': size_in_bytes,
'number_of_elements': number_of_elements,
}
class EnumLine(BaseLine):
is_integer = True
is_numeric = True
type_name = None
enum_name = None
size_in_bytes = 4
number_of_enums = None
__keys__ = [
'type_name',
'enum_name',
'number_of_enums',
]
@classmethod
def parse(cls, line):
parts = line.split(', ')
(left, right) = parts
if not left.startswith('Enum '):
raise InvalidEnumLine(part=left)
suffixes = (
' total enum',
' total enums',
)
if not right.endswith(suffixes):
raise InvalidEnumLine(part=right)
type_name = None
enum_name = left[len('Enum '):]
if enum_name[0] == '_':
type_name = enum_name[1:]
name = (type_name if type_name else enum_name)
enum_part = right.split(' ')[0]
try:
number_of_enums = int(enum_part)
except ValueError:
raise InvalidEnumLine(part=enum_part)
return {
'name': name,
'type_name': type_name,
'enum_name': enum_name,
'number_of_enums': number_of_enums,
}
class CharLine(BaseLine):
is_character = True
size_in_bytes = 1
class ByteLine(BaseLine):
size_in_bytes = 1
class WideCharLine(BaseLine):
size_in_bytes = 2
class BaseIntegerLine(BaseLine):
is_signed = None
is_integer = True
is_numeric = True
def __hash__(self):
return (
self.is_signed ^
self.is_numeric ^
self.is_integer ^
self.size_in_bytes
)
class SignedShortLine(BaseIntegerLine):
is_signed = True
size_in_bytes = 2
class UnsignedShortLine(BaseIntegerLine):
is_signed = False
size_in_bytes = 2
class SignedLongLine(BaseIntegerLine):
is_signed = True
size_in_bytes = 4
class HResultLine(SignedLongLine):
pass
class UnsignedLongLine(BaseIntegerLine):
is_signed = False
size_in_bytes = 4
class SignedLongLongLine(BaseIntegerLine):
is_signed = True
size_in_bytes = 8
class UnsignedLongLongLine(BaseIntegerLine):
is_signed = False
size_in_bytes = 8
class FloatLine(BaseLine):
is_numeric = True
size_in_bytes = 8
class ArrayLine(BaseLine):
shape = None
element_type = None
size_in_bytes = None
number_of_dimensions = None
element_size_in_bytes = None
total_number_of_elements = None
__keys__ = [
'shape',
'element_type',
'number_of_dimensions',
'element_size_in_bytes',
'total_number_of_elements',
]
@classmethod
def parse(cls, line):
open_count = line.count('[')
close_count = line.count(']')
assert open_count == close_count, (open_count, close_count)
assert line.startswith('['), line
last = 0
elems = 0
count = 0
parts = []
while True:
ix = line.find(' ', last+1)
if ix == -1:
assert count == open_count, (count, open_count)
break
prev = line[ix-1]
if prev != ']':
assert count == open_count, (count, open_count)
break
part = line[last+1:ix-1]
part = int(part)
if not elems:
elems = part
else:
elems *= part
parts.append(part)
count = count + 1
last = ix + 1
prefix = '%s ' % ' '.join('[%d]' % i for i in parts)
remaining = line.replace(prefix, '')
shape = tuple(parts)
number_of_dimensions = len(parts)
element_type = extract_type(remaining)
element_size_in_bytes = element_type.size_in_bytes
total_number_of_elements = elems
size_in_bytes = (
element_size_in_bytes *
total_number_of_elements
)
result = {
'shape': shape,
'element_type': element_type,
'size_in_bytes': size_in_bytes,
'number_of_dimensions': number_of_dimensions,
'element_size_in_bytes': element_size_in_bytes,
'total_number_of_elements': total_number_of_elements,
}
return result
class BaseStringLine(BaseLine):
is_string = True
size_in_bytes = 8
class CStringLine(BaseStringLine):
pass
class WideCStringLine(BaseStringLine):
pass
class BasePointerLine(BaseLine):
size_in_bytes = 8
class FunctionPointerLine(BasePointerLine):
pass
class PointerToPointerLine(BasePointerLine):
pass
class VoidPointerLine(BasePointerLine):
pass
class DataPointerLine(BasePointerLine):
pass
class Bitmap(object):
def __init__(self, offset):
self.offset = offset
self.names = []
self.bitfields = []
self.finalized = False
self.last_position = None
self.name_to_bitfield = {}
self.last_number_of_bits = None
self.total_number_of_bits = 0
self._size_in_bytes = None
self._implicit_padding_bits = None
def __repr__(self):
fmt = (
"<%s offset=%d"
" num_bitfields=%d"
" total_number_of_bits=%d"
)
values = [
self.__class__.__name__,
self.offset,
len(self.bitfields),
self.total_number_of_bits,
]
if not self.finalized:
fmt += " finalized=False"
else:
fmt += (
" size_in_bytes=%d"
" implicit_padding_bits=%d"
)
values += [
self.size_in_bytes,
self.implicit_padding_bits,
]
fmt += " names=%r>"
values.append(self.names)
return fmt % tuple(values)
def add_bitfield(self, offset, name, bitfield):
assert not self.finalized
assert isinstance(bitfield, BitfieldLine)
assert offset == self.offset, (offset, self.offset)
if self.last_position is None:
assert bitfield.bit_position == 0, bitfield.bit_position
else:
assert bitfield.bit_position == self.expected_next_bit_position
assert name not in self.name_to_bitfield
bitfield.name = name
self.names.append(name)
self.name_to_bitfield[name] = bitfield
self.bitfields.append(bitfield)
self.total_number_of_bits += bitfield.number_of_bits
self.last_position = bitfield.bit_position
self.last_number_of_bits = bitfield.number_of_bits
def finalize(self):
assert not self.finalized
if self.total_number_of_bits not in (8, 16, 32, 64):
if self.total_number_of_bits < 8:
self._size_in_bytes = 1
self._implicit_padding_bits = 8 - self.total_number_of_bits
elif self.total_number_of_bits < 16:
self._size_in_bytes = 2
self._implicit_padding_bits = 16 - self.total_number_of_bits
elif self.total_number_of_bits < 32:
self._size_in_bytes = 4
self._implicit_padding_bits = 32 - self.total_number_of_bits
else:
assert self.total_number_of_bits < 64
self._size_in_bytes = 8
self._implicit_padding_bits = 64 - self.total_number_of_bits
else:
self._size_in_bytes = self.total_number_of_bits / 8
self._implicit_padding_bits = 0
self.finalized = True
@property
def number_of_bitfields(self):
return len(self.bitfields)
@property
def expected_next_bit_position(self):
return self.last_position + self.last_number_of_bits
@property
def size_in_bytes(self):
assert self.finalized
return self._size_in_bytes
@property
def implicit_padding_bits(self):
assert self.finalized
return self._implicit_padding_bits
class ImplicitPadding(object):
size_to_line = {
1: 'UChar',
2: 'Wchar',
4: 'Uint4B',
8: 'Uint8B',
}
def __init__(self, offset, expected_offset):
assert offset > expected_offset, (offset, expected_offset)
self.offset = offset
self.expected_offset = expected_offset
size = self.size_in_bytes = offset - expected_offset
assert size >= 1
try:
line = self.size_to_line[size]
except KeyError:
line = '[%d] UChar' % size
self.line = line
self.line_type = extract_type(line)
def __repr__(self):
fmt = "<%s offset=%d expected=%d size_in_bytes=%d line=%s>"
return fmt % (
self.__class__.__name__,
self.offset,
self.expected_offset,
self.size_in_bytes,
self.line,
)
class TrailingPadding(ImplicitPadding):
pass
class _AnonymousStruct(object):
def __init__(self):
self.lines = []
self.line_types = []
self._field_names = None
self._size_in_bytes = None
self.finalized = False
def add_line_type(self, offset, line_type):
assert not self.finalized
self.line_types.append(line_type)
def finalize(self):
assert not self.finalized
self._field_names = tuple(t.field_name for t in self.line_types)
self.finalized = True
@property
def field_names(self):
assert self._finalized
return self._field_names
class AnonymousUnion(object):
def __repr__(self):
fmt = "<%s offset=%d expected=%d size_in_bytes=%d line=%s>"
return fmt % (
self.__class__.__name__,
self.offset,
self.expected_offset,
self.size_in_bytes,
self.line,
)
class Struct(StructLine):
__keys__ = [
'type_name',
'struct_name',
'module_name',
]
def __init__(self, *args, **kwds):
StructLine.__init__(self, *args, **kwds)
self._init()
def _init(self):
self.lines = []
self.dt_line = None
self.module_name = None
self.last_offset = None
self.cumulative_size = 0
self.bitmaps = []
self.offsets = OrderedDefaultDict(list)
self.offset_to_line_type = OrderedDefaultDict(list)
self.offset_to_field_name = OrderedDefaultDict(list)
self.inline_union_offsets = []
self.anonymous_struct_offsets = []
self.inline_bitfields = {}
self.inline_bitfields_by_offset = OrderedDefaultDict(list)
self.enums = {}
self.enums_by_offset = OrderedDefaultDict(list)
self.expected_next_offset = 0
self.line_types = []
self.field_names = []
self.field_name_to_line_type = OrderedDict()
self.last_line_was_bitfield = False
self.bitmaps_by_offset = {}
self.active_bitmap = None
self.last_bitmap = None
self.last_bitmap_offset = None
self.expected_next_offsets = OrderedDefaultDict(list)
self.field_sizes_by_offset = OrderedDefaultDict(list)
self.offset_to_max_size_in_bytes = OrderedDict()
self.implicit_paddings = OrderedDict()
self.trailing_padding = None
self.children_by_offset = OrderedDict()
self.finalized = False
def extract_anonymous_struct(self, offset):
import ipdb
ipdb.set_trace()
count = itertools.count(-1, -1)
for i in count:
line_type = self.line_types[i]
if line_type.offset == offset:
break
field_names = self.field_names[i:]
line_types = self.line_types[i:]
#anon_struct = AnonymousStructure(offset, last_offset)
#for (field_name, line_type) in zip(field_names, line_types):
# anon_struct.add(line_type)
def add_line(self, line):
assert not self.finalized
if not line.startswith(' +0x'):
return
self.lines.append(line)
line = line[4:]
(left, right) = line.split(' : ')
(offset, field_name) = left.rstrip().split(' ')
offset = int(offset, 16)
if self.last_offset:
if offset < self.last_offset:
assert offset in self.offsets, offset
self.anonymous_struct_offsets.append((offset, self.last_offset))
# We've detected an anonymous structure within an anonymous
# union. The union would have started at the byte offset
# indicated by the current value of `offset`. This also marks
# the starting offset of the anonymous structure, which extends
# to and includes the immediately previous field offsets from
# the offset to the last offset.
#anon_struct = self.extract_anonymous_struct(offset)
assert field_name not in self.field_name_to_line_type
self.field_names.append(field_name)
self.offsets[offset].append(line)
t = extract_type(right)
t.offset = offset
t.field_name = field_name
if not t.is_bitfield:
self.line_types.append(t)
# Bitmap/bitfield processing.
m = Mutex()
m.is_first_bitfield = (
t.is_bitfield and
not self.last_line_was_bitfield
)
m.is_bitfield_continuation = (
t.is_bitfield and
self.last_line_was_bitfield
)
m.need_to_finalize_bitmap = (
not t.is_bitfield and
self.last_line_was_bitfield
)
m.no_bitfield_action_required = (
not t.is_bitfield and
not self.last_line_was_bitfield
)
try:
field_size_in_bytes = t.size_in_bytes
except AttributeError:
field_size_in_bytes = 0
with m:
if m.is_first_bitfield:
assert not self.active_bitmap
self.active_bitmap = Bitmap(offset)
self.active_bitmap.add_bitfield(offset, field_name, t)
self.last_line_was_bitfield = True
elif m.is_bitfield_continuation:
assert offset == self.last_offset, (offset, self.last_offset)
assert self.last_line_was_bitfield
self.active_bitmap.add_bitfield(offset, field_name, t)
elif m.need_to_finalize_bitmap:
bitmap = self.active_bitmap
bitmap.finalize()
self.active_bitmap = None
self.last_bitmap = bitmap
self.last_bitmap_offset = offset
self.bitmaps.append(bitmap)
self.bitmaps_by_offset[offset] = bitmap
size = bitmap.size_in_bytes
self.field_sizes_by_offset[self.last_offset].append(size)
self.offset_to_line_type[offset].append(bitmap)
self.line_types.append(bitmap)
self.last_line_was_bitfield = False
elif m.no_bitfield_action_required:
pass
if field_size_in_bytes:
assert not t.is_bitfield
self.field_sizes_by_offset[offset].append(field_size_in_bytes)
self.offset_to_line_type[offset].append(t)
self.offset_to_field_name[offset].append(field_name)
else:
assert t.is_bitfield
self.field_name_to_line_type[field_name] = t
self.last_offset = offset
return t
def finalize(self):
assert not self.finalized
i = -1
first = True
last_sizes = None
total_size = 0
last_offset = 0
expected_offset = 0
is_union = False
last_offset_was_union = False
alternate_expected_offset = None
offsets = self.field_sizes_by_offset.keys()
offset_sizes = self.offset_to_max_size_in_bytes
for offset, sizes in self.field_sizes_by_offset.items():
i = i + 1
if len(sizes) == 1:
is_union = False
size = sizes[0]
else:
is_union = True
self.inline_union_offsets.append(offset)
max_size = max(sizes)
min_size = min(sizes)
try:
next_offset = offsets[i+1]
except IndexError:
next_offset = self.size_in_bytes
size = next_offset - offset
offset_sizes[offset] = size
total_size += size
if first:
first = False
expected_offset = last_offset + size
continue
if offset != expected_offset:
assert offset > expected_offset, (offset, expected_offset)
padding = ImplicitPadding(offset, expected_offset)
new_expected_offset = expected_offset + padding.size_in_bytes
assert offset == new_expected_offset
self.implicit_paddings[expected_offset] = padding
last_offset = offset
expected_offset = last_offset + size
if total_size != self.size_in_bytes:
size_in_bytes = self.size_in_bytes
assert size_in_bytes > total_size, (size_in_bytes, total_size)
padding = TrailingPadding(size_in_bytes, total_size)
new_total_size = total_size + padding.size_in_bytes
assert new_total_size == self.size_in_bytes
self.trailing_padding = padding
self.finalized = True
@property
def has_implicit_padding(self):
return bool(self.implicit_paddings)
@property
def has_trailing_padding(self):
return bool(self.trailing_padding)
@property
def has_padding(self):
return self.has_implicit_padding or self.has_trailing_padding
@classmethod
def load(cls, text):
lines = text.splitlines()
first_lineno = None
for (i, line) in enumerate(lines):
if not line.startswith('struct _'):
continue
first_lineno = i
break
assert first_lineno is not None
struct = cls(lines[first_lineno])
remaining = lines[first_lineno+1:]
for line in remaining:
struct.add_line(line)
struct.finalize()
return struct
@classmethod
def load_from_cdb(cls, module_name, type_name):
from .cdb import run_single
command = 'dt -v %s!%s' % (module_name, type_name)
output = run_single(command)
return cls.load(output)
@classmethod
def load_all_from_text(cls, text):
lines = text.splitlines()
dt_line = None
dt_prefix = '0:000> dt -v '
active_struct = False
start = 0
end = 0
indexes = []
for (i, line) in enumerate(lines):
if not active_struct:
if line.startswith(dt_prefix):
dt_line = line
elif not line.startswith('struct _'):
continue
else:
active_struct = True
start_line = i
continue
else:
if not line or line.startswith(' +0x'):
continue
else:
end_line = i
assert dt_line
assert start_line
indexes.append((dt_line, start_line, end_line))
active_struct = False
start_line = None
if line.startswith(dt_prefix):
dt_line = line
else:
dt_line = None
structs = []
module_names = set()
by_module = defaultdict(dict)
has_padding = list()
has_implicit_padding = list()
has_trailing_padding = list()
for (dt_line, start, end) in indexes:
(left, right) = dt_line.replace('0:000> dt -v ', '').split('!')
struct_line = lines[start]
struct = cls(struct_line)
struct.module_name = left
struct.dt_line = dt_line
for i in range(start+1, end):
line = lines[i]
struct.add_line(line)
struct.finalize()
module_names.add(struct.name)
by_module[struct.module_name][struct.name] = struct
if struct.has_padding:
has_padding.append(struct)
structs.append(struct)
return Dict({
'all': structs,
'by_module': { k: v for (k, v) in by_module.items() },
'has_padding': has_padding,
})
def offset_diff(self, ctypes_struct):
left_offsets = self.offsets
right_offsets = OrderedDict(
(getattr(ctypes_struct, name[0]).offset, name)
for name in ctypes_struct._fields_
)
left_keys = left_offsets.keys()
right_keys = right_offsets.keys()
left = [ str(k) for k in left_keys ]
right = [ str(k) for k in right_keys ]
lines = []
added = {}
missing = {}
from difflib import unified_diff
diff = list(unified_diff(left, right, fromfile='ours', tofile='theirs'))
for line in diff:
if not line:
continue
first = line[0]
try:
second = line[1]
except IndexError:
second = None
if not second:
lines.append(line)
continue
if first == '-':
if second == '-':
lines.append(line)
continue
else:
offset = int(line[1:])
field_name = self.offsets[offset][0]
missing[offset] = field_name
lines.append('%s (%s)' % (line, field_name))
continue
elif first == '+':
if second == '+':
lines.append(line)
continue
else:
offset = int(line[1:])
field_name = right_offsets[offset]
added[offset] = field_name
lines.append('%s (%s)' % (line, field_name))
continue
else:
lines.append(line)
continue
return {
'diff': diff,
'lines': lines,
'added': added,
'missing': missing,
}
def offset_and_fields_diff(self, ctypes_struct):
left_offsets = self.offset_to_field_name
right_offsets = OrderedDict(
(getattr(ctypes_struct, name[0]).offset, name)
for name in ctypes_struct._fields_
)
left = [
"%s\t%s" % (offset, names[0])
for (offset, names) in left_offsets.items()
]
right = [
"%s\t%s" % (offset, name[0])
for (offset, name) in right_offsets.items()
]
lines = []
added = {}
missing = {}
from difflib import unified_diff
diff = list(unified_diff(left, right, fromfile='ours', tofile='theirs'))
for line in diff:
if not line:
continue
first = line[0]
try:
second = line[1]
except IndexError:
second = None
if not second:
lines.append(line)
continue
if first == '-':
if second == '-':
lines.append(line)
continue
else:
offset = int(line[1:line.find('\t', 1)])
field_name = self.offsets[offset][0]
missing[offset] = field_name
lines.append(line)
continue
elif first == '+':
if second == '+':
lines.append(line)
continue
else:
offset = int(line[1:line.find('\t', 1)])
field_name = right_offsets[offset]
added[offset] = field_name
lines.append(line)
continue
else:
lines.append(line)
continue
return {
'diff': diff,
'lines': lines,
'added': added,
'missing': missing,
}
def get_ctypes_decl(self):
pass
def get_ctypes_defi(self):
pass
def as_ctypes_struct(self):
buf = StringIO.StringIO()
w = lambda chunk: buf.write(chunk)
wl = lambda line: buf.write(line + '\n')
class AnonymousStruct(Struct):
pass
# vim:set ts=8 sw=4 sts=4 tw=80 et :
|
"""Tensor Class."""
import functools
import operator
import numpy as np
# PyCUDA initialization
import pycuda.driver as cuda
import pycuda.autoinit
from pycuda.compiler import SourceModule
from .gpu_kernels import add, arithmetic
from .states import TensorState
ops = {"+": operator.add, "-": operator.sub, "*": operator.mul, "/": operator.truediv}
class GPUConnectMixin:
"""Mixin for GPU connect"""
def _alloc_device_memory(self, shape):
"""_alloc_device_memory.
Allocate memory to device.
Args:
data:
"""
_nbytes = np.prod(shape) * 4
_device_data = cuda.mem_alloc(int(_nbytes))
_device_data.shape = tuple(shape)
_device_data.dtype = np.float32
return _device_data
def _memory_host_to_device(self, device_data, data):
"""_memory_host_to_device.
Copy memory host to device(GPU).
Args:
data:
device_data:
"""
cuda.memcpy_htod(device_data, data)
return
@staticmethod
def _idiv(a, b):
return a // b + 1
@staticmethod
def get_kernel(kernel, function):
"""get_kernel.
get the kernel.
Args:
kernel:
function:
"""
return kernel.get_function(function)
class GradientMixin:
""" Gradient Mixin class with grad tools. """
def _walk(self, leaf_out_node):
"""_walk.
Reverse Graph Traversal with gradients.
Args:
leaf_out_node: Leaf Node.
in_grad: Input Gradient.
"""
self.visited.add(leaf_out_node)
for node in leaf_out_node._child_nodes:
if node not in self.visited:
self._walk(node)
self.nodes.append(leaf_out_node)
return
def backward(self):
"""backward.
Backward Function with Input Gradient set 1.
Args:
out_node: Leaf Output Node.
"""
self.visited = set()
self.nodes = []
self._walk(self)
self.grad = 1.0
for node in reversed(self.nodes):
node._backward(node.grad)
return self.grad
class Tensor(GPUConnectMixin, GradientMixin):
"""Tensor Class."""
BLOCKSIZE = 256
"""
The dict wastes a lot of RAM. Python can’t just allocate a static amount of memory at
object creation to store all the attributes. Therefore it sucks a lot of RAM if you
create a lot of objects (I am talking in thousands and millions).
Still there is a way to circumvent this issue.
It involves the usage of __slots__ to tell Python not to use a dict,
and only allocate space for a fixed set of attributes.
"""
__slots__ = (
"_data",
"_name",
"_n",
"_dtype",
"_shape",
"gpu",
"state",
"device_name",
)
def __init__(self, data, name=None, dtype=None):
"""__init__.
Initializes Tensor Class.
Args:
data: list or np.array data.
gpu: use gpu?
::
Example:
>> a = Tensor([1, 2])
>> b = Tensor([2,3])
>> print(a + b)
(dp.Tensor, shape=(2,), dtype = int32, numpy:([3,5], dtype = int32)
"""
self.state = TensorState.HOST
if isinstance(data, (list, float, int)):
data = np.array(data, dtype=dtype if dtype else np.float32)
elif isinstance(data, pycuda._driver.DeviceAllocation):
self.state = TensorState.DEVICE
elif not (isinstance(data, np.ndarray) or isinstance(data, np.float32)):
raise TypeError(f"numpy excepted but {type(data)} passed.")
self._data = data
self._dtype = data.dtype
self._shape = data.shape
self._name = name
self.gpu = False
self.grad = 0.0
self._child_nodes = tuple()
def _backward(in_grad=0.0):
self.grad = in_grad
return (in_grad,)
self._backward = _backward
self.device_name = "cpu:0"
def detach(self):
"""detach.
Detach state.
"""
self.state = TensorState.DETACH
# TODO(kartik4949) : Write ME.
return Tensor(self._data)
@property
def shape(self):
return self._shape
@property
def name(self):
return self._name
@property
def data(self):
return self._data
@property
def dtype(self):
return self._dtype
@property
def where(self):
return self._device()
def _device(self):
if self.state == TensorState.DEVICE:
_cuda_device = "gpu"
if self.state == TensorState.HOST:
_cuda_device = "cpu"
return _cuda_device
def asarray(self, data: list = None, dtype: tuple = None):
"""asarray.
convert array to DP array.
Args:
data (list): data
dtype (tuple): dtype
"""
# Depracted!
return Tensor(np.asarray(data, dtype=dtype))
def device(self, name: str = None):
"""device.
register the data on device.
Args:
name (str): name of device
"""
assert name.startswith("cpu") or name.startswith("gpu"), "Wrong Device!!"
# set precision to float32.
assert (
self.dtype == np.float32
), "Only single precision is supported i.e float32"
if self.state != TensorState.DEVICE:
self.state = TensorState.DEVICE
self.device_name = name
data = self._alloc_device_memory(self.shape)
self._memory_host_to_device(data, self._data)
self._shape = self._data.shape
self._dtype = self._data.dtype
self._data = data
return self
def cpu(
self,
):
"""cpu.
copy buffer from device to cpu.
"""
_host_out_arry = np.empty(self.shape, dtype=np.float32)
cuda.memcpy_dtoh(_host_out_arry, self._data)
cuda.Context.synchronize()
return Tensor(_host_out_arry)
def sigmoid(self):
"""Sigmoid function."""
sig = 1 / (1 + np.exp(-self._data))
ret = Tensor(sig)
ret._child_nodes = (self,)
def _backward(in_grad):
self.grad += in_grad * (ret._data * (1 - ret._data))
return self.grad
ret._backward = _backward
return ret
def relu(self):
"""Relu function."""
_data = np.maximum(self._data, 0)
out = Tensor(_data)
out._child_nodes = (self,)
def _backward(in_grad):
self.grad += (out._data > 0) * in_grad
return (self.grad,)
out._backward = _backward
return out
def tanh(self):
"""Tanh Function."""
t2 = Tensor(
np.zeros(self.shape, dtype=self.data.dtype) + 2,
)
t1 = Tensor(np.zeros(self.shape, dtype=self.data.dtype))
return self.mul(t2).sigmoid().mul(t2) - t1 # 2*sigmoid(2*x)-1
def add(self, tensor):
"""add.
Vector Addition which adds Tensor with given Tensor.
Args:
tensor: Tensor class
"""
def _backward(in_grad):
self.grad += in_grad
tensor.grad += in_grad
return in_grad, in_grad
return self.arithmetic(tensor, _backward, "+")
def sub(self, tensor):
"""sub.
Vector Addition which substracts Tensor with given Tensor.
Args:
tensor: Tensor class
"""
def _backward(in_grad):
self.grad += in_grad
tensor.grad += -in_grad
return in_grad, -in_grad
return self.arithmetic(tensor, _backward, "-")
def mul(self, tensor):
"""mul.
Vector Addition which multiplies Tensor with given Tensor.
Args:
tensor: Tensor class
"""
def _backward(in_grad):
self_grad = in_grad * tensor._data
tensor_grad = in_grad * self._data
self.grad += self_grad
tensor.grad += tensor_grad
return self_grad, tensor_grad
return self.arithmetic(tensor, _backward, "*")
def arithmetic(self, tensor, backward=None, operation: str = "+"):
"""Arithmetic.
Vector arithmetic operations on given Tensor.
Args:
tensor: Tensor class
"""
if self.state != TensorState.DEVICE:
ret = Tensor(ops[operation](self._data, tensor.data))
ret._child_nodes = (self, tensor)
if backward:
ret._backward = backward
return ret
assert isinstance(
tensor, self.__class__
), f"Tensor is required but passed {type(tensor)}"
ret = self._alloc_device_memory(self.shape)
N = max(self.shape)
blockDim = (self.BLOCKSIZE, 1, 1)
gridDim = (self._idiv(N, self.BLOCKSIZE), 1, 1)
_vec_kernel = self.get_kernel(arithmetic(operation), "device_arithmetic")
_vec_kernel(
ret,
self._data,
tensor.data,
np.int32(N),
block=blockDim,
grid=gridDim,
)
ret = Tensor(ret)
ret._dtype = self.dtype
ret._shape = self.shape
return ret
def __pow__(self, value):
out = Tensor(self.data ** value)
out._child_nodes = (self,)
def _backward(in_grad):
self.grad += (value * self._data ** (value - 1)) * in_grad
return (self.grad,)
out._backward = _backward
return out
def __add__(self, tensor):
tensor = tensor if isinstance(tensor, Tensor) else Tensor(tensor)
return self.add(tensor)
def __radd__(self, tensor):
return self + tensor
def __mul__(self, tensor):
return self.mul(tensor)
def __sub__(self, tensor):
return self.sub(tensor)
def __neg__(self):
return self * -1
def __rsub__(self, tensor):
return tensor + (-self)
def __rmul__(self, tensor):
return self * tensor
def __truediv__(self, value):
return self * value ** -1
def __rtruediv__(self, vale):
return value * self ** -1
def __repr__(self):
return "Tensor( %s shape: %s, numpy: (%s, dtype=%s), device: %s)" % (
f"name: {self.name}, " if self.name else "",
self.shape,
self._data,
self.dtype,
self.where,
)
|
import pytest
import numpy as np
import mchammer as mch
@pytest.fixture(
params=(
(mch.Atom(id=0, element_string='N'), 0, 'N'),
(mch.Atom(id=65, element_string='P'), 65, 'P'),
(mch.Atom(id=2, element_string='C'), 2, 'C'),
)
)
def atom_info(request):
return request.param
@pytest.fixture(
params=(
(mch.Bond(id=0, atom_ids=(0, 1)), 0, 0, 1),
(mch.Bond(id=65, atom_ids=(2, 3)), 65, 2, 3),
(mch.Bond(id=2, atom_ids=(3, 4)), 2, 3, 4),
(mch.Bond(id=3, atom_ids=(0, 9)), 3, 0, 9),
)
)
def bond_info(request):
return request.param
@pytest.fixture
def atoms():
return [
mch.Atom(0, 'C'), mch.Atom(1, 'C'), mch.Atom(2, 'C'),
mch.Atom(3, 'C'), mch.Atom(4, 'C'), mch.Atom(5, 'C'),
]
@pytest.fixture
def bonds():
return [
mch.Bond(0, (0, 1)), mch.Bond(1, (0, 2)), mch.Bond(2, (0, 3)),
mch.Bond(3, (3, 4)), mch.Bond(4, (3, 5))
]
@pytest.fixture
def position_matrix():
return np.array([
[0, 1, 0],
[1, 1, 0],
[-1, 1, 0],
[0, 10, 0],
[1, 10, 0],
[-1, 10, 0],
])
@pytest.fixture
def position_matrix2():
return np.array([
[0, 1, 0],
[1, 1, 0],
[-1, 1, 0],
[0, 20, 0],
[1, 20, 0],
[-1, 20, 0],
])
@pytest.fixture
def centroid():
return np.array([0, 5.5, 0])
@pytest.fixture
def molecule(atoms, bonds, position_matrix):
return mch.Molecule(
atoms=atoms,
bonds=bonds,
position_matrix=position_matrix
)
@pytest.fixture
def bond_vector():
return np.array([0, 9, 0])
@pytest.fixture
def bond_potentials():
return [50, 0, 50, 200, 450, 800, 1250]
@pytest.fixture
def nonbond_potentials():
return [
34.559999999999995, 4.319999999999999, 1.2799999999999998,
0.5399999999999999, 0.27647999999999995, 0.15999999999999998,
0.10075801749271138,
]
@pytest.fixture
def nonbonded_potential():
return 147.2965949864993
@pytest.fixture
def system_potential():
return 2597.2965949864993
@pytest.fixture
def subunits():
return {0: {0, 1, 2}, 1: {3, 4, 5}}
@pytest.fixture
def position_matrix3():
return np.array([
[0, 1, 0],
[1, 1, 0],
[-1, 1, 0],
[0, 5, 0],
[1, 5, 0],
[-1, 5, 0],
])
@pytest.fixture
def optimizer():
return mch.Optimizer(
step_size=0.1,
target_bond_length=2.0,
num_steps=100
)
@pytest.fixture
def coll_atoms():
return [
mch.Atom(0, 'C'), mch.Atom(1, 'C'), mch.Atom(2, 'C'),
mch.Atom(3, 'C'), mch.Atom(4, 'C'), mch.Atom(5, 'C'),
]
@pytest.fixture
def coll_bonds():
return [
mch.Bond(0, (0, 1)), mch.Bond(1, (0, 2)), mch.Bond(2, (0, 3)),
mch.Bond(3, (3, 4)), mch.Bond(4, (3, 5))
]
@pytest.fixture
def coll_position_matrix():
return np.array([
[0, 1, 0],
[1, 1, 0],
[-1, 1, 0],
[0, 10, 0],
[1, 10, 0],
[-1, 10, 0],
])
@pytest.fixture
def coll_vectors(request):
return {0: np.array([2, -1.5, 0]), 1: np.array([0, 2.5, 0])}
@pytest.fixture
def coll_su_dists(request):
return [
9, 9.055385138137417, 9.055385138137417, 9.055385138137417,
9, 9.219544457292887, 9.055385138137417, 9.219544457292887,
9,
]
@pytest.fixture
def coll_scales(request):
return {0: 0.2, 1: 1.0}
@pytest.fixture
def coll_step(request):
return 1.5
@pytest.fixture
def coll_position_matrix2():
return np.array([
[-0.6, 1.45, 0],
[0.4, 1.45, 0],
[-1.6, 1.45, 0],
[0, 6.25, 0],
[1, 6.25, 0],
[-1, 6.25, 0],
])
@pytest.fixture
def su_vectors(request):
return {0: np.array([0, -4.5, 0]), 1: np.array([0, 4.5, 0])}
@pytest.fixture
def su_scales(request):
return {0: 1.0, 1: 1.0}
@pytest.fixture
def coll_molecule(coll_atoms, coll_bonds, coll_position_matrix):
return mch.Molecule(
atoms=coll_atoms,
bonds=coll_bonds,
position_matrix=coll_position_matrix
)
@pytest.fixture
def coll_subunits():
return {0: {0, 1, 2}, 1: {3, 4, 5}}
@pytest.fixture
def coll_final_position_matrix():
return np.array([
[0., 4.75262477, 0.],
[1., 4.75262477, 0.],
[-1., 4.75262477, 0.],
[0., 6.24737523, 0.],
[1., 6.24737523, 0.],
[-1., 6.24737523, 0.],
])
@pytest.fixture
def collapser():
return mch.Collapser(
step_size=0.05,
distance_threshold=1.5,
scale_steps=True,
)
|
<filename>examples/05_glm_second_level/plot_oasis.py
"""Voxel-Based Morphometry on Oasis dataset
========================================
This example uses Voxel-Based Morphometry (VBM) to study the relationship
between aging, sex and gray matter density.
The data come from the `OASIS <http://www.oasis-brains.org/>`_ project.
If you use it, you need to agree with the data usage agreement available
on the website.
It has been run through a standard VBM pipeline (using SPM8 and
NewSegment) to create VBM maps, which we study here.
VBM analysis of aging
---------------------
We run a standard GLM analysis to study the association between age
and gray matter density from the VBM data. We use only 100 subjects
from the OASIS dataset to limit the memory usage.
Note that more power would be obtained from using a larger sample of subjects.
"""
# Authors: <NAME>, <<EMAIL>>, July 2018
# <NAME>, <<EMAIL>>, Apr. 2014
# <NAME>, <<EMAIL>>, Apr 2014
# <NAME>, Apr 2014
n_subjects = 100 # more subjects requires more memory
############################################################################
# Load Oasis dataset
# ------------------
from nilearn import datasets
oasis_dataset = datasets.fetch_oasis_vbm(n_subjects=n_subjects)
gray_matter_map_filenames = oasis_dataset.gray_matter_maps
age = oasis_dataset.ext_vars['age'].astype(float)
###############################################################################
# Sex is encoded as 'M' or 'F'. Hence, we make it a binary variable.
sex = oasis_dataset.ext_vars['mf'] == b'F'
###############################################################################
# Print basic information on the dataset.
print('First gray-matter anatomy image (3D) is located at: %s' %
oasis_dataset.gray_matter_maps[0]) # 3D data
print('First white-matter anatomy image (3D) is located at: %s' %
oasis_dataset.white_matter_maps[0]) # 3D data
###############################################################################
# Get a mask image: A mask of the cortex of the ICBM template.
gm_mask = datasets.fetch_icbm152_brain_gm_mask()
###############################################################################
# Resample the images, since this mask has a different resolution.
from nilearn.image import resample_to_img
mask_img = resample_to_img(
gm_mask, gray_matter_map_filenames[0], interpolation='nearest')
#############################################################################
# Analyse data
# ------------
#
# First, we create an adequate design matrix with three columns: 'age',
# 'sex', 'intercept'.
import pandas as pd
import numpy as np
intercept = np.ones(n_subjects)
design_matrix = pd.DataFrame(np.vstack((age, sex, intercept)).T,
columns=['age', 'sex', 'intercept'])
#############################################################################
# Let's plot the design matrix.
from nilearn.plotting import plot_design_matrix
ax = plot_design_matrix(design_matrix)
ax.set_title('Second level design matrix', fontsize=12)
ax.set_ylabel('maps')
##########################################################################
# Next, we specify and fit the second-level model when loading the data and
# also smooth a little bit to improve statistical behavior.
from nilearn.glm.second_level import SecondLevelModel
second_level_model = SecondLevelModel(smoothing_fwhm=2.0, mask_img=mask_img)
second_level_model.fit(gray_matter_map_filenames,
design_matrix=design_matrix)
##########################################################################
# Estimating the contrast is very simple. We can just provide the column
# name of the design matrix.
z_map = second_level_model.compute_contrast(second_level_contrast=[1, 0, 0],
output_type='z_score')
###########################################################################
# We threshold the second level contrast at uncorrected p < 0.001 and plot it.
from nilearn import plotting
from nilearn.glm import threshold_stats_img
_, threshold = threshold_stats_img(
z_map, alpha=.05, height_control='fdr')
print('The FDR=.05-corrected threshold is: %.3g' % threshold)
display = plotting.plot_stat_map(
z_map, threshold=threshold, colorbar=True, display_mode='z',
cut_coords=[-4, 26],
title='age effect on grey matter density (FDR = .05)')
plotting.show()
###########################################################################
# We can also study the effect of sex by computing the contrast, thresholding
# it and plot the resulting map.
z_map = second_level_model.compute_contrast(second_level_contrast='sex',
output_type='z_score')
_, threshold = threshold_stats_img(
z_map, alpha=.05, height_control='fdr')
plotting.plot_stat_map(
z_map, threshold=threshold, colorbar=True,
title='sex effect on grey matter density (FDR = .05)')
###########################################################################
# Note that there does not seem to be any significant effect of sex on
# grey matter density on that dataset.
###########################################################################
# Generating a report
# -------------------
# It can be useful to quickly generate a
# portable, ready-to-view report with most of the pertinent information.
# This is easy to do if you have a fitted model and the list of contrasts,
# which we do here.
from nilearn.reporting import make_glm_report
icbm152_2009 = datasets.fetch_icbm152_2009()
report = make_glm_report(model=second_level_model,
contrasts=['age', 'sex'],
bg_img=icbm152_2009['t1'],
)
#########################################################################
# We have several ways to access the report:
# report # This report can be viewed in a notebook
# report.save_as_html('report.html')
# report.open_in_browser()
|
<reponame>gilbertohasnofb/auxjad
import abjad
def extract_trivial_tuplets(selection: abjad.Selection) -> None:
r"""Mutates an input |abjad.Selection| in place and has no return value;
this function looks for tuplets filled with rests or with tied notes or
chords and replaces them with a single leaf.
Basic usage:
Usage is simple:
>>> staff = abjad.Staff(
... r"\times 2/3 {r4 r2} \times 2/3 {c'8 ~ c'8 ~ c'2}"
... )
>>> abjad.show(container)
.. docs::
{
\times 2/3
{
r4
r2
}
\times 2/3
{
c'8
~
c'8
~
c'2
}
}
.. figure:: ../_images/extract_trivial_tuplets-4htz2xebxwf.png
>>> auxjad.mutate(container[:]).extract_trivial_tuplets()
>>> abjad.show(container)
.. docs::
{
r2
c'2
}
.. figure:: ../_images/extract_trivial_tuplets-2dbuwo4erhb.png
It also works with containers with tuplets within tuplets.
>>> container = abjad.Container(r"\times 4/5 {r2. \times 2/3 {r2 r4}}")
>>> abjad.show(container)
.. docs::
{
\times 4/5
{
r2.
\times 2/3
{
r2
r4
}
}
}
.. figure:: ../_images/extract_trivial_tuplets-8d5bcyxcmhc.png
>>> auxjad.mutate(container[:]).extract_trivial_tuplets()
>>> abjad.show(container)
.. docs::
{
r1
}
.. figure:: ../_images/extract_trivial_tuplets-2a2fvwimyrx.png
>>> container = abjad.Container(
... r"\times 4/5 {c'2. ~ \times 2/3 {c'2 ~ c'4}}"
... )
>>> abjad.show(container)
.. docs::
{
\times 4/5
{
c'2.
~
\times 2/3
{
c'2
~
c'4
}
}
}
.. figure:: ../_images/extract_trivial_tuplets-xka6r5iyo4l.png
>>> auxjad.mutate(staff[:]).extract_trivial_tuplets()
>>> abjad.show(container)
.. docs::
{
c'1
}
.. figure:: ../_images/extract_trivial_tuplets-f1qxi44xcsw.png
.. note::
Auxjad automatically adds this function as an extension function to
|abjad.mutate|. It can thus be used from either |auxjad.mutate|_ or
|abjad.mutate| namespaces. Therefore, the two lines below are
equivalent:
>>> auxjad.mutate(staff[:]).extract_trivial_tuplets()
>>> abjad.mutate(staff[:]).extract_trivial_tuplets()
Partial extraction:
This function also extracts tuplets within tuplets.
>>> container = abjad.Container(
... r"r2 \times 2/3 {r2 r4} \times 4/5 {c'2. \times 2/3 {r2 r4}}"
... )
>>> abjad.show(container)
.. docs::
{
r2
\times 2/3
{
r2
r4
}
\times 4/5
{
c'2.
\times 2/3
{
r2
r4
}
}
}
.. figure:: ../_images/extract_trivial_tuplets-adibnkb1mbs.png
>>> auxjad.mutate(container[:]).extract_trivial_tuplets()
>>> abjad.show(container)
.. docs::
{
r2
r2
\times 4/5
{
c'2.
r2
}
}
.. figure:: ../_images/extract_trivial_tuplets-xldohyedqs.png
.. tip::
Use |auxjad.mutate.rests_to_multimeasure_rest()| to replace measures
filled with rests by a single multi-measure rest. That function makes
use of |auxjad.mutate.extract_trivial_tuplets()|, so it is not
necessary to flatten the empty tuplets beforehand.
Time signature changes:
Works with measures with any time signature.
>>> container = abjad.Staff(r"\time 3/4 r2. \times 3/2 {r4 r4}")
>>> auxjad.mutate(container[:]).extract_trivial_tuplets()
>>> abjad.show(container)
.. docs::
\new Staff
{
\time 3/4
r2.
r2.
}
.. figure:: ../_images/extract_trivial_tuplets-sa1tqmvtkx.png
Non-assignable durations:
This function also extracts tuplets which sum up to a non-assignable
duration. In this case, it creates multiple leaves and substitutes them
for the original tuplet. Indicators are passed on to the first leaf of
the new leaves.
>>> staff = abjad.Staff(r"\time 6/4 c'4\f \times 5/6 {g1.\p}")
>>> abjad.show(staff)
.. docs::
\new Staff
{
\time 6/4
c'4
\f
\tweak text #tuplet-number::calc-fraction-text
\times 5/6
{
g1.
\p
}
}
.. figure:: ../_images/extract_trivial_tuplets-l4kp9g5v7m.png
>>> abjad.mutate(staff[:]).extract_trivial_tuplets()
>>> abjad.show(staff)
.. docs::
\new Staff
{
\time 6/4
c'4
\f
g1
\p
~
g4
}
.. figure:: ../_images/extract_trivial_tuplets-8r40ndemvpn.png
.. note::
When using |abjad.Container|'s, all time signatures in the output will
be commented out with ``%%%.`` This is because Abjad only applies time
signatures to containers that belong to a |abjad.Staff|. The present
function works with either |abjad.Container| and |abjad.Staff|.
>>> container = abjad.Container(r"\time 3/4 c'4 d'4 e'4")
>>> abjad.show(container)
.. docs::
{
%%% \time 3/4 %%%
c'4
d'4
e'4
}
.. figure:: ../_images/extract_trivial_tuplets-6wymsb7z1n4.png
>>> staff = abjad.Staff([container])
>>> abjad.show(container)
.. docs::
{
\time 3/4
c'4
d'4
e'4
}
.. figure:: ../_images/extract_trivial_tuplets-moavfyqtxza.png
.. warning::
The input selection must be a contiguous logical voice. When dealing
with a container with multiple subcontainers (e.g. a score containing
multiple staves), the best approach is to cycle through these
subcontainers, applying this function to them individually.
"""
if not isinstance(selection, abjad.Selection):
raise TypeError("argument must be 'abjad.Selection'")
tuplets = selection.tuplets()
if len(tuplets) == 0:
return
for tuplet in tuplets:
leaves = abjad.select(tuplet).leaves()
if (all(isinstance(leaf, abjad.Rest) for leaf in leaves)
and len(leaves) > 1):
duration = tuplet.multiplied_duration
rests = abjad.LeafMaker()(None, duration)
time_signature = abjad.get.indicator(
leaves[0],
abjad.TimeSignature,
)
if time_signature is not None:
abjad.attach(time_signature, rests[0])
abjad.mutate.replace(tuplet, rests)
if abjad.get.sustained(tuplet):
duration = tuplet.multiplied_duration
n_elements = len(tuplet)
after_tie = abjad.get.indicator(leaves[-1], abjad.Tie)
for _ in range(n_elements - 1):
tuplet.pop(-1)
if not after_tie:
abjad.detach(abjad.Tie, leaves[0])
if duration.is_assignable:
leaves[0].written_duration = duration
abjad.mutate.extract(tuplet)
elif duration.implied_prolation == 1:
if isinstance(leaves[0], abjad.Note):
pitch = leaves[0].written_pitch
elif isinstance(leaves[0], abjad.Chord):
pitch = leaves[0].written_pitches
else:
pitch = None
notes = abjad.LeafMaker()(pitch, duration)
indicators = abjad.get.indicators(leaves[0])
for indicator in indicators:
abjad.attach(indicator, notes[0])
abjad.mutate.replace(leaves[0], notes)
abjad.mutate.extract(tuplet)
else:
continue
for tuplet in tuplets:
if tuplet.trivializable():
tuplet.trivialize()
abjad.mutate.extract(tuplet)
|
from __future__ import print_function, absolute_import, division
import os
import sys
import time
import signal
import traceback
from socket import gethostname
from getpass import getuser
from datetime import datetime
from six import iteritems
from six.moves import cStringIO
from sqlalchemy import func
from sklearn.base import clone, BaseEstimator
from . import __version__
from .config import Config
from .trials import Trial
from .fit_estimator import fit_and_score_estimator
from .utils import Unbuffered, format_timedelta, current_pretty_time
from .utils import is_msmbuilder_estimator
def execute(args, parser):
start_time = datetime.now()
sys.stdout = Unbuffered(sys.stdout)
# Load the config file and extract the fields
print_header()
config = Config(args.config)
estimator = config.estimator()
searchspace = config.search_space()
strategy = config.strategy()
config_sha1 = config.sha1()
scoring = config.scoring()
if is_msmbuilder_estimator(estimator):
print_msmbuilder_version()
print('\nLoading dataset...\n')
X, y = config.dataset()
print('Dataset contains %d elements with %s labels'
% (len(X), 'out' if y is None else ''))
print('Instantiated estimator:')
print(' %r' % estimator)
print(searchspace)
# set up cross-validation
cv = config.cv(X, y)
statuses = [None for _ in range(args.n_iters)]
# install a signal handler to print the footer before exiting
# from sigterm (e.g. PBS job kill)
def signal_hander(signum, frame):
print_footer(statuses, start_time, signum)
sys.exit(1)
signal.signal(signal.SIGTERM, signal_hander)
for i in range(args.n_iters):
print('\n' + '-'*70)
print('Beginning iteration %50s' % ('%d / %d' % (i+1, args.n_iters)))
print('-'*70)
trial_id, params = initialize_trial(
strategy, searchspace, estimator, config_sha1=config_sha1,
sessionbuilder=config.trialscontext)
s = run_single_trial(
estimator=estimator, params=params, trial_id=trial_id,
scoring=scoring, X=X, y=y, cv=cv,
sessionbuilder=config.trialscontext)
statuses[i] = s
print_footer(statuses, start_time)
def initialize_trial(strategy, searchspace, estimator, config_sha1,
sessionbuilder):
with sessionbuilder() as session:
# requery the history ever iteration, because another worker
# process may have written to it in the mean time
history = [[t.parameters, t.mean_test_score, t.status]
for t in session.query(Trial).all()]
print('History contains: %d trials' % len(history))
print('Choosing next hyperparameters with %s...' % strategy.short_name)
start = time.time()
params = strategy.suggest(history, searchspace)
print(' %r' % params)
print('(%s took %.3f s)\n' % (strategy.short_name,
time.time() - start))
assert len(params) == searchspace.n_dims
# make sure we get _all_ the parameters, including defaults on the
# estimator class, to save in the database
params = clone(estimator).set_params(**params).get_params()
params = dict((k, v) for k, v in iteritems(params)
if not isinstance(v, BaseEstimator))
t = Trial(status='PENDING', parameters=params, host=gethostname(),
user=getuser(), started=datetime.now(),
config_sha1=config_sha1)
session.add(t)
session.commit()
trial_id = t.id
return trial_id, params
def run_single_trial(estimator, params, trial_id, scoring, X, y, cv,
sessionbuilder):
status = None
try:
score = fit_and_score_estimator(
estimator, params, cv=cv, scoring=scoring, X=X, y=y, verbose=1)
with sessionbuilder() as session:
trial = session.query(Trial).get(trial_id)
trial.mean_test_score = score['mean_test_score']
trial.mean_train_score = score['mean_train_score']
trial.test_scores = score['test_scores']
trial.train_scores = score['train_scores']
trial.n_test_samples = score['n_test_samples']
trial.n_train_samples = score['n_train_samples']
trial.status = 'SUCCEEDED'
best_so_far = session.query(
func.max(Trial.mean_test_score)).first()
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Success! Model score = %f' % trial.mean_test_score)
print('(best score so far = %f)' %
max(trial.mean_test_score, best_so_far[0]))
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
trial.completed = datetime.now()
trial.elapsed = trial.completed - trial.started
session.commit()
status = trial.status
except Exception:
buf = cStringIO()
traceback.print_exc(file=buf)
with sessionbuilder() as session:
trial = session.query(Trial).get(trial_id)
trial.traceback = buf.getvalue()
trial.status = 'FAILED'
print('-'*78, file=sys.stderr)
print('Exception encountered while fitting model')
print('-'*78, file=sys.stderr)
traceback.print_exc(file=sys.stderr)
print('-'*78, file=sys.stderr)
session.commit()
status = trial.status
except (KeyboardInterrupt, SystemExit):
with sessionbuilder() as session:
trial = session.query(Trial).get(trial_id)
trial.status = 'FAILED'
session.commit()
sys.exit(1)
return status
def print_header():
print('='*70)
print('= osprey is a tool for machine learning '
'hyperparameter optimization. =')
print('='*70)
print()
print('osprey version: %s' % __version__)
print('time: %s' % current_pretty_time())
print('hostname: %s' % gethostname())
print('cwd: %s' % os.path.abspath(os.curdir))
print('pid: %s' % os.getpid())
print()
def print_msmbuilder_version():
from msmbuilder.version import full_version as msmb_version
from mdtraj.version import full_version as mdtraj_version
print()
print('msmbuilder version: %s' % msmb_version)
print('mdtraj version: %s' % mdtraj_version)
print()
def print_footer(statuses, start_time, signum=None):
n_successes = sum(s == 'SUCCEEDED' for s in statuses)
elapsed = format_timedelta(datetime.now() - start_time)
print()
if signum is not None:
sigmap = dict((k, v) for v, k in iteritems(signal.__dict__)
if v.startswith('SIG'))
signame = sigmap.get(signum, 'Unknown')
print('== osprey worker received signal %s!' % signame,
file=sys.stderr)
print('== exiting immediately.', file=sys.stderr)
print('%d/%d models fit successfully.' % (n_successes, len(statuses)))
print('time: %s' % current_pretty_time())
print('elapsed: %s.' % elapsed)
print('osprey worker exiting.')
|
<filename>monitoring/prober/scd/test_operation_references_error_cases.py
"""Operation References corner cases error tests:
"""
import datetime
import json
import uuid
import yaml
from monitoring.monitorlib.infrastructure import default_scope
from monitoring.monitorlib import scd
from monitoring.monitorlib.scd import SCOPE_SC
from monitoring.prober.infrastructure import for_api_versions, register_resource_type
OP_TYPE = register_resource_type(6, 'Primary operational intent')
OP_TYPE2 = register_resource_type(7, 'Conflicting operational intent')
@for_api_versions(scd.API_0_3_5)
def test_ensure_clean_workspace_v5(ids, scd_api, scd_session):
for op_id in (ids(OP_TYPE), ids(OP_TYPE2)):
resp = scd_session.get('/operation_references/{}'.format(op_id), scope=SCOPE_SC)
if resp.status_code == 200:
resp = scd_session.delete('/operation_references/{}'.format(op_id), scope=SCOPE_SC)
assert resp.status_code == 200, resp.content
elif resp.status_code == 404:
# As expected.
pass
else:
assert False, resp.content
@for_api_versions(scd.API_0_3_15)
def test_ensure_clean_workspace_v15(ids, scd_api, scd_session):
for op_id in (ids(OP_TYPE), ids(OP_TYPE2)):
resp = scd_session.get('/operational_intent_references/{}'.format(op_id), scope=SCOPE_SC)
if resp.status_code == 200:
resp = scd_session.delete('/operational_intent_references/{}'.format(op_id), scope=SCOPE_SC)
assert resp.status_code == 200, resp.content
elif resp.status_code == 404:
# As expected.
pass
else:
assert False, resp.content
@for_api_versions(scd.API_0_3_5)
@default_scope(SCOPE_SC)
def test_op_ref_area_too_large_v5(scd_api, scd_session):
with open('./scd/resources/op_ref_area_too_large.json', 'r') as f:
req = json.load(f)
resp = scd_session.post('/operation_references/query', json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_15)
@default_scope(SCOPE_SC)
def test_op_ref_area_too_large_v15(scd_api, scd_session):
with open('./scd/resources/op_ref_area_too_large_v15.json', 'r') as f:
req = json.load(f)
resp = scd_session.post('/operational_intent_reference/query', json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_5)
@default_scope(SCOPE_SC)
def test_op_ref_start_end_times_past_v5(scd_api, scd_session):
with open('./scd/resources/op_ref_start_end_times_past.json', 'r') as f:
req = json.load(f)
resp = scd_session.post('/operation_references/query', json=req)
# It is ok (and useful) to query for past Operations that may not yet have
# been explicitly deleted. This is unlike remote ID where ISAs are
# auto-removed from the perspective of the client immediately after their end
# time.
assert resp.status_code == 200, resp.content
@for_api_versions(scd.API_0_3_15)
@default_scope(SCOPE_SC)
def test_op_ref_start_end_times_past_v15(scd_api, scd_session):
with open('./scd/resources/op_ref_start_end_times_past_v15.json', 'r') as f:
req = json.load(f)
resp = scd_session.post('/operational_intent_reference/query', json=req)
# It is ok (and useful) to query for past Operations that may not yet have
# been explicitly deleted. This is unlike remote ID where ISAs are
# auto-removed from the perspective of the client immediately after their end
# time.
assert resp.status_code == 200, resp.content
@for_api_versions(scd.API_0_3_5)
@default_scope(SCOPE_SC)
def test_op_ref_incorrect_units(scd_api, scd_session):
with open('./scd/resources/op_ref_incorrect_units.json', 'r') as f:
req = json.load(f)
resp = scd_session.post('/operation_references/query', json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_15)
@default_scope(SCOPE_SC)
def test_op_ref_incorrect_units_v15(scd_api, scd_session):
with open('./scd/resources/op_ref_incorrect_units_v15.json', 'r') as f:
req = json.load(f)
resp = scd_session.post('/operational_intent_reference/query', json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_5)
@default_scope(SCOPE_SC)
def test_op_ref_incorrect_altitude_ref_v5(scd_api, scd_session):
with open('./scd/resources/op_ref_incorrect_altitude_ref.json', 'r') as f:
req = json.load(f)
resp = scd_session.post('/operation_references/query', json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_15)
@default_scope(SCOPE_SC)
def test_op_ref_incorrect_altitude_ref_v15(scd_api, scd_session):
with open('./scd/resources/op_ref_incorrect_altitude_ref_v15.json', 'r') as f:
req = json.load(f)
resp = scd_session.post('/operational_intent_reference/query', json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_5)
@default_scope(SCOPE_SC)
def test_op_uss_base_url_non_tls_v5(ids, scd_api, scd_session):
with open('./scd/resources/op_uss_base_url_non_tls.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operation_references/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_15)
@default_scope(SCOPE_SC)
def test_op_uss_base_url_non_tls_v15(ids, scd_api, scd_session):
with open('./scd/resources/op_uss_base_url_non_tls_v15.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operational_intent_reference/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_5)
@default_scope(SCOPE_SC)
def test_op_bad_subscription_id_v5(ids, scd_api, scd_session):
with open('./scd/resources/op_bad_subscription.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operation_references/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_15)
@default_scope(SCOPE_SC)
def test_op_bad_subscription_id_v15(ids, scd_api, scd_session):
with open('./scd/resources/op_bad_subscription_v15.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operational_intent_reference/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_5)
@default_scope(SCOPE_SC)
def test_op_bad_subscription_id_random_v5(ids, scd_api, scd_session):
with open('./scd/resources/op_bad_subscription.json', 'r') as f:
req = json.load(f)
req['subscription_id'] = uuid.uuid4().hex
resp = scd_session.put('/operation_references/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_15)
@default_scope(SCOPE_SC)
def test_op_bad_subscription_id_random_v15(ids, scd_api, scd_session):
with open('./scd/resources/op_bad_subscription_v15.json', 'r') as f:
req = json.load(f)
req['subscription_id'] = uuid.uuid4().hex
resp = scd_session.put('/operational_intent_reference/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_5)
@default_scope(SCOPE_SC)
def test_op_new_and_existing_subscription_v5(ids, scd_api, scd_session):
with open('./scd/resources/op_new_and_existing_subscription.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operation_references/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_15)
@default_scope(SCOPE_SC)
def test_op_new_and_existing_subscription_v15(ids, scd_api, scd_session):
with open('./scd/resources/op_new_and_existing_subscription_v15.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operational_intent_reference/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_5)
@default_scope(SCOPE_SC)
def test_op_end_time_past_v5(ids, scd_api, scd_session):
with open('./scd/resources/op_end_time_past.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operation_references/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_15)
@default_scope(SCOPE_SC)
def test_op_end_time_past_v15(ids, scd_api, scd_session):
with open('./scd/resources/op_end_time_past_v15.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operational_intent_reference/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_5)
@default_scope(SCOPE_SC)
def test_op_already_exists_v5(ids, scd_api, scd_session):
with open('./scd/resources/op_request_1.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operation_references/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 200, resp.content
resp = scd_session.put('/operation_references/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 409, resp.content
# Delete operation
resp = scd_session.delete('/operation_references/{}'.format(ids(OP_TYPE)))
assert resp.status_code == 200, resp.content
# Verify deletion
resp = scd_session.get('/operation_references/{}'.format(ids(OP_TYPE)))
assert resp.status_code == 404, resp.content
@for_api_versions(scd.API_0_3_15)
@default_scope(SCOPE_SC)
def test_op_already_exists_v15(ids, scd_api, scd_session):
with open('./scd/resources/op_request_1_v15.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operational_intent_reference/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 200, resp.content
resp = scd_session.put('/operational_intent_reference/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 409, resp.content
# Delete operation
resp = scd_session.delete('/operational_intent_reference/{}'.format(ids(OP_TYPE)))
assert resp.status_code == 200, resp.content
# Verify deletion
resp = scd_session.get('/operational_intent_reference/{}'.format(ids(OP_TYPE)))
assert resp.status_code == 404, resp.content
@for_api_versions(scd.API_0_3_5)
@default_scope(SCOPE_SC)
def test_op_404_version1_v5(ids, scd_api, scd_session):
with open('./scd/resources/op_404_version1.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operation_references/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 404, resp.content
@for_api_versions(scd.API_0_3_15)
@default_scope(SCOPE_SC)
def test_op_404_version1_v15(ids, scd_api, scd_session):
with open('./scd/resources/op_404_version1_v15.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operational_intent_reference/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 404, resp.content
@for_api_versions(scd.API_0_3_5)
@default_scope(SCOPE_SC)
def test_op_bad_state_version0_v5(ids, scd_api, scd_session):
with open('./scd/resources/op_bad_state_version0.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operation_references/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_15)
@default_scope(SCOPE_SC)
def test_op_bad_state_version0_v15(ids, scd_api, scd_session):
with open('./scd/resources/op_bad_state_version0_v15.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operational_intent_reference/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_5)
@default_scope(SCOPE_SC)
def test_op_bad_lat_lon_range_v5(ids, scd_api, scd_session):
with open('./scd/resources/op_bad_lat_lon_range.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operation_references/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_15)
@default_scope(SCOPE_SC)
def test_op_bad_lat_lon_range_v15(ids, scd_api, scd_session):
with open('./scd/resources/op_bad_lat_lon_range_v15.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operational_intent_reference/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_5)
@default_scope(SCOPE_SC)
def test_op_area_too_large_put_v5(ids, scd_api, scd_session):
with open('./scd/resources/op_area_too_large_put.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operation_references/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_15)
@default_scope(SCOPE_SC)
def test_op_area_too_large_put_v15(ids, scd_api, scd_session):
with open('./scd/resources/op_area_too_large_put_v15.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operational_intent_reference/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_5)
@default_scope(SCOPE_SC)
def test_op_bad_time_format_v5(ids, scd_api, scd_session):
with open('./scd/resources/op_bad_time_format.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operation_references/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_15)
@default_scope(SCOPE_SC)
def test_op_bad_time_format_v15(ids, scd_api, scd_session):
with open('./scd/resources/op_bad_time_format_v15.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operational_intent_reference/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_5)
@default_scope(SCOPE_SC)
def test_op_repeated_requests_v5(ids, scd_api, scd_session):
with open('./scd/resources/op_request_1.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operation_references/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 200, resp.content
with open('./scd/resources/op_request_1.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operation_references/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 409, resp.content
# Delete operation
resp = scd_session.delete('/operation_references/{}'.format(ids(OP_TYPE)))
assert resp.status_code == 200, resp.content
@for_api_versions(scd.API_0_3_15)
@default_scope(SCOPE_SC)
def test_op_repeated_requests_v15(ids, scd_api, scd_session):
with open('./scd/resources/op_request_1_v15.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operational_intent_reference/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 200, resp.content
with open('./scd/resources/op_request_1.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operational_intent_reference/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 409, resp.content
# Delete operation
resp = scd_session.delete('/operational_intent_reference/{}'.format(ids(OP_TYPE)))
assert resp.status_code == 200, resp.content
@for_api_versions(scd.API_0_3_5)
@default_scope(SCOPE_SC)
def test_op_invalid_id_v5(scd_api, scd_session):
with open('./scd/resources/op_request_1.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operation_references/not_uuid_format', json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_15)
@default_scope(SCOPE_SC)
def test_op_invalid_id_v15(scd_api, scd_session):
with open('./scd/resources/op_request_1_v15.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operational_intent_reference/not_uuid_format', json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_5)
@default_scope(SCOPE_SC)
def test_missing_conflicted_operation_v5(ids, scd_api, scd_session):
# Emplace the initial version of Operation 1
with open('./scd/resources/op_missing_initial.yaml', 'r') as f:
req = yaml.full_load(f)
dt = datetime.datetime.utcnow() - scd.start_of(req['extents'])
req['extents'] = scd.offset_time(req['extents'], dt)
resp = scd_session.put('/operation_references/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 200, resp.content
ovn1a = resp.json()['operation_reference']['ovn']
sub_id = resp.json()['operation_reference']['subscription_id']
# Emplace the pre-existing Operation that conflicted in the original observation
with open('./scd/resources/op_missing_preexisting_unknown.yaml', 'r') as f:
req = yaml.full_load(f)
req['extents'] = scd.offset_time(req['extents'], dt)
req['key'] = [ovn1a]
resp = scd_session.put('/operation_references/{}'.format(ids(OP_TYPE2)), json=req)
assert resp.status_code == 200, resp.content
# Attempt to update Operation 1 without OVN for the pre-existing Operation
with open('./scd/resources/op_missing_update.json', 'r') as f:
req = json.load(f)
req['extents'] = scd.offset_time(req['extents'], dt)
req['key'] = [ovn1a]
req['subscription_id'] = sub_id
resp = scd_session.put('/operation_references/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 409, resp.content
conflicts = []
for conflict in resp.json()['entity_conflicts']:
if conflict.get('operation_reference', None):
conflicts.append(conflict['operation_reference']['id'])
assert ids(OP_TYPE2) in conflicts, resp.content
# Perform an area-based query on the area occupied by Operation 1
with open('./scd/resources/op_missing_query.json', 'r') as f:
req = json.load(f)
req['area_of_interest'] = scd.offset_time([req['area_of_interest']], dt)[0]
resp = scd_session.post('/operation_references/query', json=req)
assert resp.status_code == 200, resp.content
ops = [op['id'] for op in resp.json()['operation_references']]
assert ids(OP_TYPE) in ops, resp.content
# ids(OP_ID2) not expected here because its ceiling is <575m whereas query floor is
# >591m.
assert ids(OP_TYPE2) not in ops, resp.content
@for_api_versions(scd.API_0_3_15)
@default_scope(SCOPE_SC)
def test_missing_conflicted_operation_v15(ids, scd_api, scd_session):
# Emplace the initial version of Operation 1
with open('./scd/resources/op_missing_initial.yaml', 'r') as f:
req = yaml.full_load(f)
dt = datetime.datetime.utcnow() - scd.start_of(req['extents'])
req['extents'] = scd.offset_time(req['extents'], dt)
resp = scd_session.put('/operational_intent_reference/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 200, resp.content
ovn1a = resp.json()['operational_intent_reference']['ovn']
sub_id = resp.json()['operational_intent_reference']['subscription_id']
# Emplace the pre-existing Operation that conflicted in the original observation
with open('./scd/resources/op_missing_preexisting_unknown.yaml', 'r') as f:
req = yaml.full_load(f)
req['extents'] = scd.offset_time(req['extents'], dt)
req['key'] = [ovn1a]
resp = scd_session.put('/operational_intent_reference/{}'.format(ids(OP_TYPE2)), json=req)
assert resp.status_code == 200, resp.content
# Attempt to update Operation 1 without OVN for the pre-existing Operation
with open('./scd/resources/op_missing_update.json', 'r') as f:
req = json.load(f)
req['extents'] = scd.offset_time(req['extents'], dt)
req['key'] = [ovn1a]
req['subscription_id'] = sub_id
resp = scd_session.put('/operational_intent_reference/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 409, resp.content
# TODO: entity_conflicts is not there in v15 response. What is the replacement key?
# conflicts = []
# for conflict in resp.json()['entity_conflicts']:
# if conflict.get('operation_reference', None):
# conflicts.append(conflict['operation_reference']['id'])
# assert ids(OP_ID2) in conflicts, resp.content
# Perform an area-based query on the area occupied by Operation 1
with open('./scd/resources/op_missing_query.json', 'r') as f:
req = json.load(f)
req['area_of_interest'] = scd.offset_time([req['area_of_interest']], dt)[0]
resp = scd_session.post('/operational_intent_reference/query', json=req)
assert resp.status_code == 200, resp.content
ops = [op['id'] for op in resp.json()['operational_intent_reference']]
assert ids(OP_TYPE) in ops, resp.content
# ids(OP_ID2) not expected here because its ceiling is <575m whereas query floor is
# >591m.
assert ids(OP_TYPE2) not in ops, resp.content
@for_api_versions(scd.API_0_3_5)
@default_scope(SCOPE_SC)
def test_big_operation_search_v5(scd_api, scd_session):
"""
This test reproduces a case where a search resulted in 503 because the
underlying gRPC backend had crashed.
"""
with open('./scd/resources/op_big_operation.json', 'r') as f:
req = json.load(f)
dt = datetime.datetime.utcnow() - scd.start_of([req['area_of_interest']])
req['area_of_interest'] = scd.offset_time([req['area_of_interest']], dt)[0]
resp = scd_session.post('/operation_references/query', json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_15)
@default_scope(SCOPE_SC)
def test_big_operation_search_v15(scd_api, scd_session):
with open('./scd/resources/op_big_operation.json', 'r') as f:
req = json.load(f)
dt = datetime.datetime.utcnow() - scd.start_of([req['area_of_interest']])
req['area_of_interest'] = scd.offset_time([req['area_of_interest']], dt)[0]
resp = scd_session.post('/operational_intent_reference/query', json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_5)
@default_scope(SCOPE_SC)
def test_ensure_clean_workspace_v5(ids, scd_api, scd_session):
for op_id in (ids(OP_TYPE), ids(OP_TYPE2)):
resp = scd_session.get('/operation_references/{}'.format(op_id), scope=SCOPE_SC)
if resp.status_code == 200:
# only the owner of the subscription can delete a operation reference.
resp = scd_session.delete('/operation_references/{}'.format(op_id), scope=SCOPE_SC)
assert resp.status_code == 200, resp.content
elif resp.status_code == 404:
# As expected.
pass
else:
assert False, resp.content
@for_api_versions(scd.API_0_3_15)
@default_scope(SCOPE_SC)
def test_ensure_clean_workspace_v15(ids, scd_api, scd_session):
for op_id in (ids(OP_TYPE), ids(OP_TYPE2)):
resp = scd_session.get('/operational_intent_reference/{}'.format(op_id), scope=SCOPE_SC)
if resp.status_code == 200:
# only the owner of the subscription can delete a operation reference.
resp = scd_session.delete('/operational_intent_reference/{}'.format(op_id), scope=SCOPE_SC)
assert resp.status_code == 200, resp.content
elif resp.status_code == 404:
# As expected.
pass
else:
assert False, resp.content
|
from typing import List
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
from sklearn.datasets import make_blobs, make_classification, make_swiss_roll, make_moons
import sys
# Create a class for k-means clustering algorithm
class KMeansClustering(object):
def __init__(self, K:int, max_iter:int = 200) -> None:
super().__init__()
self.K = K
self.max_iter = max_iter
self.num_datapoints, self.num_feat = X.shape
self.fitted_centroids = None
self.inertia = 0
def init_centroids(self, X:np.ndarray) -> np.ndarray:
# centroids = np.zeros(shape=(self.K, self.num_feat))
# for k in range(self.K):
# centroid = X[np.random.randint(1,len(X))]
# centroids[k] = centroid
# return centroids
centroids = []
centroids.append(X[np.random.randint(1,len(X))])
for _ in range(self.K-1):
distances = []
for x in X:
d = sys.maxsize
for i in range(len(centroids)):
temp_distance = np.sqrt(np.sum((x - centroids[i])**2))
if temp_distance < d:
d = temp_distance
distances.append(d)
distances = np.array(distances)
max_idx = np.argmax(distances)
centroids.append(X[max_idx])
distances = []
return np.array(centroids)
def create_clusters(self, X:np.ndarray, centroids:np.ndarray) -> List[list]:
clusters = [[] for _ in range(self.K)] # Create K empty clusters
for p_idx, p in enumerate(X):
closest_centroid = np.argmin(np.sqrt(np.sum((p - centroids)**2, axis=1))) # Find closest centroid for each point using Euclidian distance
clusters[closest_centroid].append(p_idx) # assign each data point_idx to the cluster(Centroid)
return clusters
def update_centroid(self, X:np.ndarray, clusters:List[list])-> np.ndarray:
centroids = np.zeros(shape=(self.K, self.num_feat))
for idx, cluster in enumerate(clusters):
new_centroid = np.mean(X[cluster], axis=0)
centroids[idx] = new_centroid
return centroids
def plot_cluster(self, centroids, x, y):
plt.scatter(x[:,0], x[:,1], c=y, s=50, cmap='viridis')
plt.scatter(centroids[:,0], centroids[:,1], c='black', s=100, alpha=0.7, marker='x')
plt.show()
def plot_3d_cluster(self, centroids, x, y):
ax = plt.axes(projection='3d')
ax.scatter3D(x[:,0], x[:,1], x[:,2], c=y, s=20, alpha =0.3,cmap='viridis')
ax.scatter3D(centroids[:,0], centroids[:,1], centroids[:,2], c='black', s=100, alpha=1.0, marker='o')
plt.show()
def get_y_label(self, clusters:List[list], X:np.ndarray):
y_label = np.zeros(shape=(self.num_datapoints))
for idx, cluster in enumerate(clusters):
for point_idx in cluster:
y_label[point_idx] = idx
return y_label
def predict(self, X:np.ndarray):
pass
def fit(self, X:np.ndarray):
centroids = self.init_centroids(X)
for i in range(self.max_iter):
clusters = self.create_clusters(X, centroids)
prev_centroids = centroids
centroids = self.update_centroid(X, clusters)
print(f'Centroids at iter {i+1}: {centroids[0]}')
diff = prev_centroids - centroids
if diff.any() < 0.0001:
break
self.fitted_centroids_ = centroids
y_label = self.get_y_label(clusters, X)
if self.num_feat == 2:
self.plot_cluster(centroids,X, y_label)
elif self.num_feat == 3:
self.plot_3d_cluster(centroids, X, y_label)
return y_label
if __name__ == "__main__":
np.random.seed(45)
K = 3
num_of_features = 3
num_of_samples = 1000
X, _ = make_blobs(n_samples=num_of_samples, centers=K, n_features=num_of_features, cluster_std=2.0, random_state=1)
# X, _ = make_classification(n_samples=num_of_samples, n_features=num_of_features, n_redundant=0, n_informative=2, n_classes=K, n_clusters_per_class=1)
# X, _ = make_moons(n_samples=num_of_samples, noise=0.1)
kmeans = KMeansClustering(K, max_iter=30)
y_label = kmeans.fit(X)
|
import importlib
import sys
import pytest
from openff.toolkit.topology import Molecule
from openff.bespokefit.utilities.molecule import (
_oe_canonical_atom_order,
_oe_get_atom_symmetries,
_rd_canonical_atom_order,
_rd_get_atom_symmetries,
canonical_order_atoms,
get_atom_symmetries,
get_torsion_indices,
group_valence_by_symmetry,
)
@pytest.fixture()
def with_oe_backend(monkeypatch):
oechem = pytest.importorskip("openeye.oechem")
if not oechem.OEChemIsLicensed():
pytest.skip("OE is not licensed")
monkeypatch.setitem(sys.modules, "rdkit", None)
@pytest.fixture()
def with_rd_backend(monkeypatch):
pytest.importorskip("rdkit")
monkeypatch.setitem(sys.modules, "openeye", None)
monkeypatch.setitem(sys.modules, "openeye.oechem", None)
def test_oe_fixture(with_oe_backend):
"""Ensure that our fixture really does ensure only OE can be used"""
with pytest.raises(ModuleNotFoundError):
importlib.import_module("rdkit")
@pytest.mark.parametrize(
"module, package",
[("openeye", None), ("openeye.oechem", None), ("openeye", "oechem")],
)
def test_rd_fixture(with_rd_backend, module, package):
"""Ensure that our fixture really does ensure only RDKit can be used"""
with pytest.raises(ModuleNotFoundError):
importlib.import_module(module, package)
@pytest.mark.parametrize(
"get_symmetries, backend",
[
(_oe_get_atom_symmetries, "with_oe_backend"),
(_rd_get_atom_symmetries, "with_rd_backend"),
(get_atom_symmetries, "with_oe_backend"),
(get_atom_symmetries, "with_rd_backend"),
],
)
def test_get_atom_symmetries(get_symmetries, backend, request):
molecule = Molecule.from_mapped_smiles("[H:1][C:2]([H:3])([H:4])[O:5][H:6]")
request.getfixturevalue(backend)
try:
atom_symmetries = get_symmetries(molecule)
except ModuleNotFoundError as e:
pytest.skip(f"missing optional dependency - {e.name}")
return
assert len({atom_symmetries[i] for i in (0, 2, 3)}) == 1
assert len({atom_symmetries[i] for i in (1, 4, 5)}) == 3
@pytest.mark.parametrize(
"canonical_order_func",
[_oe_canonical_atom_order, _rd_canonical_atom_order],
)
def test_canonical_atom_order(canonical_order_func):
molecule = Molecule.from_mapped_smiles("[H:1][C:2]([H:3])([H:4])[O:5][H:6]")
try:
atom_order = canonical_order_func(molecule)
except ModuleNotFoundError as e:
pytest.skip(f"missing optional dependency - {e.name}")
return
# In general the canonical order should have H ranked first and heavy atoms last.
assert sorted(atom_order[i] for i in [0, 2, 3, 5]) == [0, 1, 2, 3]
assert sorted(atom_order[i] for i in [1, 4]) == [4, 5]
@pytest.mark.parametrize("backend", ["with_rd_backend", "with_oe_backend"])
def test_canonical_order_atoms(backend, request):
molecule = Molecule.from_mapped_smiles("[H:1][C:2]([H:3])([H:4])[O:5][H:6]")
molecule.properties["atom_map"] = {i: i + 1 for i in range(molecule.n_atoms)}
request.getfixturevalue(backend)
canonical_molecule = canonical_order_atoms(molecule)
assert [a.atomic_number for a in canonical_molecule.atoms] in (
[6, 8, 1, 1, 1, 1],
[8, 6, 1, 1, 1, 1],
)
canonical_atom_map = canonical_molecule.properties["atom_map"]
# Make sure the atom map has been updated to reflect that the heavy atoms are now at
# the beginning of the molecule
assert (canonical_atom_map[0], canonical_atom_map[1]) in [(2, 5), (5, 2)]
@pytest.mark.parametrize(
"valence_terms, expected_values",
[
(
[(0,), (1,), (2,), (3,), (4,), (5,)],
[[(0,), (2,), (3,)], [(1,)], [(4,)], [(5,)]],
),
(
[(1, 0), (1, 2), (1, 3), (1, 4), (4, 5)],
[[(1, 0), (1, 2), (1, 3)], [(1, 4)], [(4, 5)]],
),
],
)
def test_group_by_symmetry(valence_terms, expected_values):
molecule = Molecule.from_mapped_smiles("[H:1][C:2]([H:3])([H:4])[O:5][H:6]")
valence_groups = group_valence_by_symmetry(molecule, valence_terms)
assert len(valence_groups) == len(expected_values)
actual_values = [*valence_groups.values()]
assert sorted(actual_values) == sorted(expected_values)
@pytest.mark.parametrize(
"smiles, central_bond, expected_values",
[
(
"[H:1][C:2]([H:3])=[C:4]([H:5])[H:6]",
None,
[(0, 1, 3, 4), (0, 1, 3, 5), (2, 1, 3, 4), (2, 1, 3, 5)],
),
(
"[H:1][C:2]([H:3])=[C:4]([H:5])[H:6]",
(1, 3),
[(0, 1, 3, 4), (0, 1, 3, 5), (2, 1, 3, 4), (2, 1, 3, 5)],
),
(
"[H:1][C:2]([H:3])=[C:4]=[C:5]([H:6])[H:7]",
(1, 3),
[(0, 1, 3, 4), (2, 1, 3, 4)],
),
],
)
def test_get_torsion_indices(smiles, central_bond, expected_values):
molecule = Molecule.from_mapped_smiles(smiles)
torsion_indices = get_torsion_indices(molecule, central_bond)
assert sorted(torsion_indices) == sorted(expected_values)
|
# Copyright 2016 the GPflow authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.from __future__ import print_function
import gpflow
import numpy as np
import unittest
import tensorflow as tf
from testing.gpflow_testcase import GPflowTestCase
class TestGaussian(GPflowTestCase):
def setUp(self):
with self.test_session():
self.rng = np.random.RandomState(0)
self.X = self.rng.randn(100,2)
self.Y = self.rng.randn(100, 1)
self.kern = gpflow.kernels.Matern32(2) + gpflow.kernels.White(1)
self.Xtest = self.rng.randn(10, 2)
self.Ytest = self.rng.randn(10, 1)
# make a Gaussian model
self.m = gpflow.gpr.GPR(self.X, self.Y, kern=self.kern)
def test_all(self):
with self.test_session():
mu_f, var_f = self.m.predict_f(self.Xtest)
mu_y, var_y = self.m.predict_y(self.Xtest)
self.assertTrue(np.allclose(mu_f, mu_y))
self.assertTrue(np.allclose(var_f, var_y - 1.))
def test_density(self):
with self.test_session():
mu_y, var_y = self.m.predict_y(self.Xtest)
density = self.m.predict_density(self.Xtest, self.Ytest)
density_hand = -0.5*np.log(2*np.pi) - 0.5*np.log(var_y) - 0.5*np.square(mu_y - self.Ytest)/var_y
self.assertTrue(np.allclose(density_hand, density))
def test_recompile(self):
with self.test_session():
mu_f, var_f = self.m.predict_f(self.Xtest)
mu_y, var_y = self.m.predict_y(self.Xtest)
density = self.m.predict_density(self.Xtest, self.Ytest)
#change a fix and see if these things still compile
self.m.likelihood.variance = 0.2
self.m.likelihood.variance.fixed = True
#this will fail unless a recompile has been triggered
mu_f, var_f = self.m.predict_f(self.Xtest)
mu_y, var_y = self.m.predict_y(self.Xtest)
density = self.m.predict_density(self.Xtest, self.Ytest)
class TestFullCov(GPflowTestCase):
"""
this base class requires inherriting to specify the model.
This test structure is more complex that, say, looping over the models, but
makses all the tests much smaller and so less prone to erroring out. Also,
if a test fails, it should be clearer where the error is.
"""
def setUp(self):
with self.test_session():
self.input_dim = 3
self.output_dim = 2
self.N = 20
self.Ntest = 30
self.M = 5
rng = np.random.RandomState(0)
self.num_samples = 5
self.samples_shape = (self.num_samples, self.Ntest, self.output_dim)
self.covar_shape = (self.Ntest, self.Ntest, self.output_dim)
self.X, self.Y, self.Z, self.Xtest = (
rng.randn(self.N, self.input_dim),
rng.randn(self.N, self.output_dim),
rng.randn(self.M, self.input_dim),
rng.randn(self.Ntest, self.input_dim))
self.k = lambda: gpflow.kernels.Matern32(self.input_dim)
self.model = gpflow.gpr.GPR(self.X, self.Y, kern=self.k())
def test_cov(self):
with self.test_session():
mu1, var = self.model.predict_f(self.Xtest)
mu2, covar = self.model.predict_f_full_cov(self.Xtest)
self.assertTrue(np.all(mu1 == mu2))
self.assertTrue(covar.shape == self.covar_shape)
self.assertTrue(var.shape == (self.Ntest, self.output_dim))
for i in range(self.output_dim):
self.assertTrue(np.allclose(var[:, i], np.diag(covar[:, :, i])))
def test_samples(self):
with self.test_session():
samples = self.model.predict_f_samples(self.Xtest, self.num_samples)
self.assertTrue(samples.shape == self.samples_shape)
class TestFullCovSGPR(TestFullCov):
def setUp(self):
TestFullCov.setUp(self)
with self.test_session():
self.model = gpflow.sgpr.SGPR(self.X, self.Y, Z=self.Z, kern=self.k())
class TestFullCovGPRFITC(TestFullCov):
def setUp(self):
TestFullCov.setUp(self)
with self.test_session():
self.model = gpflow.sgpr.GPRFITC(
self.X, self.Y,
Z=self.Z, kern=self.k())
class TestFullCovSVGP1(TestFullCov):
def setUp(self):
TestFullCov.setUp(self)
with self.test_session():
self.model = gpflow.svgp.SVGP(
self.X, self.Y, Z=self.Z, kern=self.k(),
likelihood=gpflow.likelihoods.Gaussian(),
whiten=False, q_diag=True)
class TestFullCovSVGP2(TestFullCov):
def setUp(self):
TestFullCov.setUp(self)
with self.test_session():
self.model = gpflow.svgp.SVGP(
self.X, self.Y, Z=self.Z, kern=self.k(),
likelihood=gpflow.likelihoods.Gaussian(),
whiten=True, q_diag=False)
class TestFullCovSVGP3(TestFullCov):
def setUp(self):
TestFullCov.setUp(self)
with self.test_session():
self.model = gpflow.svgp.SVGP(
self.X, self.Y, Z=self.Z, kern=self.k(),
likelihood=gpflow.likelihoods.Gaussian(),
whiten=True, q_diag=True)
class TestFullCovSVGP4(TestFullCov):
def setUp(self):
TestFullCov.setUp(self)
with self.test_session():
self.model = gpflow.svgp.SVGP(
self.X, self.Y, Z=self.Z, kern=self.k(),
likelihood=gpflow.likelihoods.Gaussian(),
whiten=True, q_diag=False)
class TestFullCovVGP(TestFullCov):
def setUp(self):
TestFullCov.setUp(self)
with self.test_session():
self.model = gpflow.vgp.VGP(
self.X, self.Y, kern=self.k(),
likelihood=gpflow.likelihoods.Gaussian())
class TestFullCovGPMC(TestFullCov):
def setUp(self):
TestFullCov.setUp(self)
with self.test_session():
self.model = gpflow.gpmc.GPMC(
self.X, self.Y, kern=self.k(),
likelihood=gpflow.likelihoods.Gaussian())
class TestFullCovSGPMC(TestFullCov):
def setUp(self):
TestFullCov.setUp(self)
with self.test_session():
self.model = gpflow.sgpmc.SGPMC(
self.X, self.Y, kern=self.k(),
likelihood=gpflow.likelihoods.Gaussian(),
Z=self.Z)
if __name__ == "__main__":
unittest.main()
|
<reponame>null-pi/flatland-challenge
# gridmap
#
# Reads graph maps.
#
# For graph, state is pair of (x,y) tuple
# @author: mike
# @created: 2020-07-14
#
from lib_piglet.utils.tools import eprint
import os,sys
class vertex:
id:int
coordinate: tuple
adjacent: dict
def __init__(self, id:int, coordinate:tuple):
self.id = id
self.coordinate = coordinate
self.adjacent = {}
def __str__(self):
# return str(self.id) + ' adjacent: ' + str([(x.id,cost) for x,cost in self.get_connections()])
return str(self.id) + ':' + str(self.coordinate)
def __repr__(self):
return str(self.id) +": " +str(self.coordinate)
def print_connections(self):
print(str(self.id) + ' adjacent: ' + str([(x.id,cost) for x,cost in self.get_connections()]))
def add_neighbor(self, neighbor, weight=0):
self.adjacent[neighbor] = weight
def get_connections(self):
return self.adjacent.items()
def get_id(self):
return self.id
def get_weight(self, neighbor):
return self.adjacent[neighbor]
def get_location(self):
return self.coordinate
def set_location(self, coordinate:tuple):
self.coordinate = coordinate
def __eq__(self, other):
return self.get_id() == other.get_id()
def __hash__(self):
return self.get_id()
class graph:
vert_dict: dict
num_vertices: int
domain_file_ :str
def __init__(self,filename: str == None):
self.vert_dict = {}
self.num_vertices = 0
if filename is not None:
self.load(filename)
def load(self, filename: str):
if not os.path.exists(filename):
eprint("err; file {} not exist".format(filename))
exit(1)
self.domain_file_=filename
print("Loading graph file ... ...")
f = open(filename)
for line in f:
content = line.strip().split()
if len(content) == 0 or (content[0].strip() != "a" and content[0].strip() != "v"):
continue
if len(content) != 4:
eprint("err; line {} should have 4 element".format(line))
exit(1)
if content[0].strip() == "v":
try:
id = int(content[1])
x = int(content[2])
y = int(content[3])
except:
eprint("err; can not convert elements of {} to integer ".format(line))
exit(1)
if id in self.vert_dict:
v: vertex = self.get_vertex(id)
v.set_location((x, y))
else:
self.add_vertex(id, (x, y))
if content[0].strip() == "a":
try:
n1 = int(content[1])
n2 = int(content[2])
cost = int(content[3])
except:
eprint("err; can not convert elements of {} to integer ".format(line))
exit(1)
self.add_edge(n1, n2, cost)
sys.stdout.write("\033[F")
def add_vertex(self, id:int, coordinates:tuple):
self.num_vertices = self.num_vertices + 1
new_vertex = vertex(id,coordinates)
self.vert_dict[id] = new_vertex
return new_vertex
def get_vertex(self, n):
if n in self.vert_dict:
return self.vert_dict[n]
else:
return None
def add_edge(self, frm, to, cost=0):
if frm not in self.vert_dict:
self.add_vertex(frm,())
if to not in self.vert_dict:
self.add_vertex(to,())
self.vert_dict[frm].add_neighbor(self.vert_dict[to], cost)
self.vert_dict[to].add_neighbor(self.vert_dict[frm], cost)
def get_vertices(self):
return self.vert_dict.keys()
def __iter__(self):
return iter(self.vert_dict.values())
|
<filename>tests/test_sftpserver.py
# coding: utf-8
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import sys
import os
import uuid
import shutil
import paramiko
from django.contrib.auth import get_user_model
from django.test import TestCase
from django_sftpserver import models, sftpserver, storage_sftpserver
if sys.version_info[0] == 2:
import backports.unittest_mock
backports.unittest_mock.install()
from unittest.mock import Mock # NOQA
class TestDjango_sftpserver_sftpserver(TestCase):
valid_username = 'user'
invalid_username = 'user2'
valid_root_name = 'root'
valid_root_name_2 = 'root_2'
def setUp(self):
self.user = get_user_model().objects.create(username=self.valid_username)
self.valid_key = Mock()
self.valid_key.get_name = Mock(return_value='ssh-rsa')
self.valid_key.get_base64 = Mock(return_value='public_key')
self.invalid_key = Mock()
self.invalid_key.get_name = Mock(return_value='ssh-rsa')
self.invalid_key.get_base64 = Mock(return_value='public_key2')
models.AuthorizedKey.objects.create(user=self.user, key_type='ssh-rsa', key='public_key')
root = models.Root.objects.create(name=self.valid_root_name)
root.users.add(self.user)
models.Root.objects.create(name=self.valid_root_name_2)
def test_auth_all(self):
server = sftpserver.StubServer()
self.assertEqual(server.check_auth_publickey(self.valid_username, self.valid_key),
paramiko.AUTH_SUCCESSFUL)
self.assertEqual(server.check_auth_publickey(self.valid_username, self.invalid_key),
paramiko.AUTH_FAILED)
self.assertEqual(server.check_auth_publickey(self.invalid_username, self.valid_key),
paramiko.AUTH_FAILED)
self.assertEqual(server.check_auth_publickey(self.invalid_username, self.invalid_key),
paramiko.AUTH_FAILED)
def test_auth_root(self):
server = sftpserver.StubServer()
name = '{}/{}'.format(self.valid_username, self.valid_root_name)
self.assertEqual(server.check_auth_publickey(name, self.valid_key),
paramiko.AUTH_SUCCESSFUL)
name = '{}/{}'.format(self.valid_username, self.valid_root_name_2)
self.assertEqual(server.check_auth_publickey(name, self.valid_key),
paramiko.AUTH_FAILED)
name = '{}/{}invalid'.format(self.valid_username, self.valid_root_name)
self.assertEqual(server.check_auth_publickey(name, self.valid_key),
paramiko.AUTH_FAILED)
def test_auth_root_with_branch(self):
pass
class TestDjango_sftpserver_sftpserver_with_root(TestCase):
def setUp(self):
self.root = models.Root.objects.create(name="root_example")
self.server = sftpserver.StubServer()
self.server.user = None
self.server.root = self.root
self.sftpserver = sftpserver.StubSFTPServer(self.server)
self.sftpserver.session_started()
class TestDjango_sftpserver_sftpserver_without_root(TestCase):
def setUp(self):
self.user = get_user_model().objects.create(username="user")
self.root0 = models.Root.objects.create(name="root0")
self.root1 = models.Root.objects.create(name="root1")
self.root2 = models.Root.objects.create(name="root2")
self.root0.users.add(self.user)
self.root1.users.add(self.user)
self.server = sftpserver.StubServer()
self.server.user = self.user
self.server.root = None
self.sftpserver = sftpserver.StubSFTPServer(self.server)
self.sftpserver.session_started()
def test_list_folder(self):
print(self.sftpserver.list_folder('/'))
print(self.sftpserver.list_folder('/root0'))
print(self.sftpserver.list_folder('/root1'))
# print(self.sftpserver.list_folder('/root2'))
def test_stat(self):
self.sftpserver.stat('/')
self.root0.put("/a/b", b"c")
self.sftpserver.stat('/root0/')
self.sftpserver.stat('/root0/a')
self.sftpserver.stat('/root0/a/b')
def test_open(self):
self.root0.put("/a/b", b"b")
self.sftpserver.list_folder('/root0/a')
self.sftpserver.list_folder('/root0/a/b')
print(self.sftpserver.open('/root0/a/b', os.O_RDONLY, None).readfile.getvalue())
self.sftpserver.open('/root0/a/c', os.O_WRONLY, None).write(0, b'c')
def test_remove(self):
pass
def test_rename(self):
pass
def test_mkdir(self):
pass
def test_rmdir(self):
pass
class TestDjango_sftpserver_storage_sftpserver_with_root(TestCase):
storage_root = '/tmp/django_sftpserver_test-{}'.format(uuid.uuid4().hex)
def setUp(self):
if os.path.exists(self.storage_root):
shutil.rmtree(self.storage_root)
os.mkdir(self.storage_root)
self.storage_access_info = models.StorageAccessInfo.objects.create(
name="storage_example",
storage_class="django.core.files.storage.FileSystemStorage",
kwargs="location: {}".format(self.storage_root),
)
self.server = storage_sftpserver.StubServer()
self.server.user = None
self.server.storage_access_info = self.storage_access_info
self.sftpserver = storage_sftpserver.StubSFTPServer(self.server)
self.sftpserver.session_started()
def tearDown(self):
if os.path.exists(self.storage_root):
shutil.rmtree(self.storage_root)
def test_list_folder(self):
print([x.filename for x in self.sftpserver.list_folder("/")])
|
<reponame>pacargile/charm<filename>charm/model.py<gh_stars>0
import numpy as np
from scipy.stats import norm as gaussian
from astropy import units as u
from astropy.coordinates import SkyCoord
def gauss(args):
x,mu,sigma = args
return 1/(sigma*np.sqrt(2*np.pi))*np.exp(-(x - mu)**2 / (2*sigma**2))
class clustermodel(object):
"""docstring for clustermodel"""
def __init__(self, inarr, Nsamp, modeltype='gaussian'):
super(clustermodel, self).__init__()
self.inarr = inarr
self.Nstars = len(self.inarr)
self.starid = range(self.Nstars)
self.Nsamp = Nsamp
self.modeltype = modeltype
# generate grid of samples for each star
# self.starsamples = np.empty( (self.Nstars, self.Nsamp) )
# for idx in self.starid:
# self.starsamples[idx,:] = gaussian(
# loc=self.inarr['Parallax'][idx],
# scale=self.inarr['Parallax_Error'][idx]).rvs(size=self.Nsamp)
"""
self.starsamples = np.array([{} for _ in range(self.Nstars)])
for idx in self.starid:
RAdist = gaussian(
loc=self.inarr['RA'][idx],
scale=self.inarr['RA_Error'][idx]).rvs(size=self.Nsamp)
Decdist = gaussian(
loc=self.inarr['Dec'][idx],
scale=self.inarr['Dec_Error'][idx]).rvs(size=self.Nsamp)
Distdist = 1000.0/gaussian(
loc=self.inarr['Parallax'][idx],
scale=self.inarr['Parallax_Error'][idx]).rvs(size=self.Nsamp)
Xarr = []
Yarr = []
Zarr = []
for ra_i,dec_i,dist_i in zip(RAdist,Decdist,Distdist):
c = SkyCoord(ra=ra_i*u.deg,dec=dec_i*u.deg,distance=dist_i*u.pc)
Xarr.append(float(c.galactocentric.x.value))
Yarr.append(float(c.galactocentric.y.value))
Zarr.append(float(c.galactocentric.z.value))
self.starsamples[idx] = ({
'X':np.array(Xarr),
'Y':np.array(Yarr),
'Z':np.array(Zarr),
})
"""
self.starsamples = np.empty((3,self.Nstars,self.Nsamp))
for idx in range(self.Nstars):
RAdist = gaussian(
loc=self.inarr['RA'][idx],
scale=self.inarr['RA_Error'][idx]).rvs(size=self.Nsamp)
Decdist = gaussian(
loc=self.inarr['Dec'][idx],
scale=self.inarr['Dec_Error'][idx]).rvs(size=self.Nsamp)
Distdist = 1000.0/gaussian(
loc=self.inarr['Parallax'][idx],
scale=self.inarr['Parallax_Error'][idx]).rvs(size=self.Nsamp)
for idd,dim in enumerate(['x','y','z']):
c = SkyCoord(ra=RAdist*u.deg,dec=Decdist*u.deg,distance=Distdist*u.pc)
if dim == 'x':
self.starsamples[idd,idx,:] = np.array(c.galactocentric.x.value)
elif dim == 'y':
self.starsamples[idd,idx,:] = np.array(c.galactocentric.y.value)
elif dim == 'z':
self.starsamples[idd,idx,:] = np.array(c.galactocentric.z.value)
else:
raise IOError
def likefn(self,arg):
# dist,sigma_dist = arg
x,sigma_x,y,sigma_y,z,sigma_z = arg
# calculate like for all stars
if self.modeltype == 'gaussian':
# Gaussian model
like = (
((1.0/(np.sqrt(2.0*np.pi)*sigma_x)) *
np.exp( -0.5 * ((self.starsamples[0,...] -x)**2.0)*(sigma_x**-2.0) )) +
((1.0/(np.sqrt(2.0*np.pi)*sigma_y)) *
np.exp( -0.5 * ((self.starsamples[1,...]-y)**2.0)*(sigma_y**-2.0) )) +
((1.0/(np.sqrt(2.0*np.pi)*sigma_z)) *
np.exp( -0.5 * ((self.starsamples[2,...]-z)**2.0)*(sigma_z**-2.0) ))
)
elif self.modeltype == 'cauchy':
# Cauchy model
like = (
((1.0/(np.pi*sigma_x)) *
(sigma_x**2.0)/( ((self.starsamples[0,...]-x)**2.0) + (sigma_x**2.0) )) +
((1.0/(np.pi*sigma_y)) *
(sigma_y**2.0)/( ((self.starsamples[1,...]-y)**2.0) + (sigma_y**2.0) )) +
((1.0/(np.pi*sigma_z)) *
(sigma_z**2.0)/( ((self.starsamples[2,...]-z)**2.0) + (sigma_z**2.0) ))
)
elif self.modeltype == 'plummer':
# Plummer model
like = (
( (1.0/(sigma_x**3.0)) *
((1.0 + (((self.starsamples[0,...]-x)/sigma_x)**2.0))**(-5.0/2.0)) ) +
( (1.0/(sigma_y**3.0)) *
((1.0 + (((self.starsamples[1,...]-y)/sigma_y)**2.0))**(-5.0/2.0)) ) +
( (1.0/(sigma_z**3.0)) *
((1.0 + (((self.starsamples[2,...]-z)/sigma_z)**2.0))**(-5.0/2.0)) )
)
else:
print('Did not understand model type')
raise IOError
if np.min(like) <= np.finfo(np.float).eps:
return -np.inf
like = (like.T*self.inarr['Prob']).T
lnp = np.sum(np.log(like))
return lnp |
<reponame>qingqinl/Movie_recommendation_system
from Network import *
def get_batches(Xs, ys, batch_size):
for start in range(0, len(Xs), batch_size):
end = min(start + batch_size, len(Xs))
yield Xs[start:end], ys[start:end]
## 训练网络
#%matplotlib inline
#%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
import time
import datetime
losses = {'train':[], 'test':[]}
with tf.Session(graph=train_graph) as sess:
#搜集数据给tensorBoard用
# Keep track of gradient values and sparsity
grad_summaries = []
for g, v in gradients:
if g is not None:
grad_hist_summary = tf.summary.histogram("{}/grad/hist".format(v.name.replace(':', '_')), g)
sparsity_summary = tf.summary.scalar("{}/grad/sparsity".format(v.name.replace(':', '_')), tf.nn.zero_fraction(g))
grad_summaries.append(grad_hist_summary)
grad_summaries.append(sparsity_summary)
grad_summaries_merged = tf.summary.merge(grad_summaries)
# Output directory for models and summaries
timestamp = str(int(time.time()))
out_dir = os.path.abspath(os.path.join(os.path.curdir, "runs", timestamp))
print("Writing to {}\n".format(out_dir))
# Summaries for loss and accuracy
loss_summary = tf.summary.scalar("loss", loss)
# Train Summaries
train_summary_op = tf.summary.merge([loss_summary, grad_summaries_merged])
train_summary_dir = os.path.join(out_dir, "summaries", "train")
train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)
# Inference summaries
inference_summary_op = tf.summary.merge([loss_summary])
inference_summary_dir = os.path.join(out_dir, "summaries", "inference")
inference_summary_writer = tf.summary.FileWriter(inference_summary_dir, sess.graph)
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
for epoch_i in range(num_epochs):
#将数据集分成训练集和测试集,随机种子不固定
train_X,test_X, train_y, test_y = train_test_split(features,
targets_values,
test_size = 0.2,
random_state = 0)
train_batches = get_batches(train_X, train_y, batch_size)
test_batches = get_batches(test_X, test_y, batch_size)
#训练的迭代,保存训练损失
for batch_i in range(len(train_X) // batch_size):
x, y = next(train_batches)
categories = np.zeros([batch_size, 18])
for i in range(batch_size):
categories[i] = x.take(6,1)[i]
titles = np.zeros([batch_size, sentences_size])
for i in range(batch_size):
titles[i] = x.take(5,1)[i]
feed = {
uid: np.reshape(x.take(0,1), [batch_size, 1]),
user_gender: np.reshape(x.take(2,1), [batch_size, 1]),
user_age: np.reshape(x.take(3,1), [batch_size, 1]),
user_job: np.reshape(x.take(4,1), [batch_size, 1]),
movie_id: np.reshape(x.take(1,1), [batch_size, 1]),
movie_categories: categories, #x.take(6,1)
movie_titles: titles, #x.take(5,1)
targets: np.reshape(y, [batch_size, 1]),
dropout_keep_prob: dropout_keep, #dropout_keep
lr: learning_rate}
step, train_loss, summaries, _ = sess.run([global_step, loss, train_summary_op, train_op], feed) #cost
losses['train'].append(train_loss)
train_summary_writer.add_summary(summaries, step) #
# Show every <show_every_n_batches> batches
if (epoch_i * (len(train_X) // batch_size) + batch_i) % show_every_n_batches == 0:
time_str = datetime.datetime.now().isoformat()
print('{}: Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
time_str,
epoch_i,
batch_i,
(len(train_X) // batch_size),
train_loss))
#使用测试数据的迭代
for batch_i in range(len(test_X) // batch_size):
x, y = next(test_batches)
categories = np.zeros([batch_size, 18])
for i in range(batch_size):
categories[i] = x.take(6,1)[i]
titles = np.zeros([batch_size, sentences_size])
for i in range(batch_size):
titles[i] = x.take(5,1)[i]
feed = {
uid: np.reshape(x.take(0,1), [batch_size, 1]),
user_gender: np.reshape(x.take(2,1), [batch_size, 1]),
user_age: np.reshape(x.take(3,1), [batch_size, 1]),
user_job: np.reshape(x.take(4,1), [batch_size, 1]),
movie_id: np.reshape(x.take(1,1), [batch_size, 1]),
movie_categories: categories, #x.take(6,1)
movie_titles: titles, #x.take(5,1)
targets: np.reshape(y, [batch_size, 1]),
dropout_keep_prob: 1,
lr: learning_rate}
step, test_loss, summaries = sess.run([global_step, loss, inference_summary_op], feed) #cost
#保存测试损失
losses['test'].append(test_loss)
inference_summary_writer.add_summary(summaries, step) #
time_str = datetime.datetime.now().isoformat()
if (epoch_i * (len(test_X) // batch_size) + batch_i) % show_every_n_batches == 0:
print('{}: Epoch {:>3} Batch {:>4}/{} test_loss = {:.3f}'.format(
time_str,
epoch_i,
batch_i,
(len(test_X) // batch_size),
test_loss))
# Save Model
saver.save(sess, save_dir) #, global_step=epoch_i
print('Model Trained and Saved') |
import logging
import os
from astropy import units as u
from astropy.io import fits
from astropy.wcs import WCS
from astropy.modeling import models, fitting
import shlex
from ...spectra import Spectrum1D
from ..registers import data_loader
__all__ = ['wcs1d_fits_loader', 'wcs1d_fits_writer', 'non_linear_wcs1d_fits']
def identify_wcs1d_fits(origin, *args, **kwargs):
# check if file can be opened with this reader
# args[0] = filename
return (isinstance(args[0], str) and
os.path.splitext(args[0].lower())[1] == '.fits' and
# check if number of axes is one
fits.getheader(args[0])['NAXIS'] == 1 and
fits.getheader(args[0])['WCSDIM'] == 1 and
'WAT1_001' not in fits.getheader(args[0]) and
# check if CTYPE1 kep is in the header
'CTYPE1' in fits.getheader(args[0])
)
@data_loader("wcs1d-fits", identifier=identify_wcs1d_fits,
dtype=Spectrum1D, extensions=['fits'])
def wcs1d_fits_loader(file_name, spectral_axis_unit=None, flux_unit=None,
hdu_idx=0, **kwargs):
"""
Loader for single spectrum-per-HDU spectra in FITS files, with the spectral
axis stored in the header as FITS-WCS. The flux unit of the spectrum is
determined by the 'BUNIT' keyword of the HDU (if present), while the
spectral axis unit is set by the WCS's 'CUNIT'.
Parameters
----------
file_name : str
The path to the FITS file.
spectral_axis_unit: str or `~astropy.Unit`, optional
Units of the spectral axis. If not given (or None), the unit will be
inferred from the CUNIT in the WCS. Not that if this is providded it
will *override* any units the CUNIT provides.
flux_unit: str or `~astropy.Unit`, optional
Units of the flux for this spectrum. If not given (or None), the unit
will be inferred from the BUNIT keyword in the header. Note that this
unit will attempt to convert from BUNIT if BUNIT is present
hdu_idx : int
The index of the HDU to load into this spectrum.
Notes
-----
Loader contributed by <NAME>.
"""
logging.info("Spectrum file looks like wcs1d-fits")
with fits.open(file_name, **kwargs) as hdulist:
header = hdulist[hdu_idx].header
wcs = WCS(header)
if wcs.naxis != 1:
raise ValueError('FITS fle input to wcs1d_fits_loader is not 1D')
if 'BUNIT' in header:
data = u.Quantity(hdulist[hdu_idx].data, unit=header['BUNIT'])
if flux_unit is not None:
data = data.to(flux_unit)
else:
data = u.Quantity(hdulist[hdu_idx].data, unit=flux_unit)
if spectral_axis_unit is not None:
wcs.wcs.cunit[0] = str(spectral_axis_unit)
meta = {'header': header}
return Spectrum1D(flux=data, wcs=wcs, meta=meta)
def identify_iraf_wcs(origin, *args):
"""IRAF WCS identifier
The difference of this with respect to wcs1d is that this can work with
WCSDIM == 2
"""
return (isinstance(args[0], str) and
'WAT1_001' in fits.getheader(args[0]))
@data_loader('iraf', identifier=identify_iraf_wcs, dtype=Spectrum1D,
extensions=['fits'])
def non_linear_wcs1d_fits(file_name, spectral_axis_unit=None, flux_unit=None,
**kwargs):
"""Read wcs from files written by IRAF
IRAF does not strictly follow the fits standard specially for non-linear
wavelength solutions
Parameters
----------
file_name : str
Name of file to load
spectral_axis_unit : `~astropy.Unit`, optional
Spectral axis unit, default is None in which case will search for it
in the header under the keyword 'WAT1_001'
flux_unit : `~astropy.Unit`, optional
Flux units, default is None. If not specified will attempt to read it
using the keyword 'BUNIT' and if this keyword does not exist it will
assume 'ADU'.
Returns
-------
`specutils.Spectrum1D`
"""
logging.info('Loading 1D non-linear fits solution')
with fits.open(file_name, **kwargs) as hdulist:
header = hdulist[0].header
for wcsdim in range(1, header['WCSDIM'] + 1):
ctypen = header['CTYPE{:d}'.format(wcsdim)]
if ctypen == 'LINEAR':
logging.info("linear Solution: Try using "
"`format='wcs1d-fits'` instead")
wcs = WCS(header)
spectral_axis = _read_linear_iraf_wcs(wcs=wcs,
dc_flag=header['DC-FLAG'])
elif ctypen == 'MULTISPE':
logging.info("Multi spectral or non-linear solution")
spectral_axis = _read_non_linear_iraf_wcs(header=header,
wcsdim=wcsdim)
else:
raise NotImplementedError
if flux_unit is not None:
data = hdulist[0].data * flux_unit
elif 'BUNIT' in header:
data = u.Quantity(hdulist[0].data, unit=header['BUNIT'])
else:
logging.info("Flux unit was not provided, neither it was in the"
"header. Assuming ADU.")
data = u.Quantity(hdulist[0].data, unit='adu')
if spectral_axis_unit is not None:
spectral_axis *= spectral_axis_unit
else:
wat_head = header['WAT1_001']
wat_dict = dict()
for pair in wat_head.split(' '):
wat_dict[pair.split('=')[0]] = pair.split('=')[1]
if wat_dict['units'] == 'angstroms':
logging.info("Found spectral axis units to be angstrom")
spectral_axis *= u.angstrom
meta = {'header': header}
return Spectrum1D(flux=data, spectral_axis=spectral_axis, meta=meta)
def _read_linear_iraf_wcs(wcs, dc_flag):
"""Linear solution reader
This method read the appropriate keywords. Calls the method _set_math_model
which decides what is the appropriate mathematical model to be used and
creates and then evaluates the model for an array.
Parameters
----------
wcs : `~astropy.wcs.WCS`
Contains wcs information extracted from the header
dc_flag : int
Extracted from the header under the keyword DC-FLAG which defines what
kind of solution is described. For linear solutions it is 0 or 1.
Returns
-------
spectral_axis : `~numpy.ndarray`
Mathematical model of wavelength solution evluated for each pixel
position
"""
wcs_dict = {'crval': wcs.wcs.crval[0],
'crpix': wcs.wcs.crpix[0],
'cdelt': wcs.wcs.cd[0],
'dtype': dc_flag,
'pnum': wcs._naxis[0]}
math_model = _set_math_model(wcs_dict=wcs_dict)
spectral_axis = math_model(range(wcs_dict['pnum']))
return spectral_axis
def _read_non_linear_iraf_wcs(header, wcsdim):
"""Read non-linear wavelength solutions written by IRAF
Extracts the appropriate information and organize it in a dictionary for
calling the method _set_math_model which decides what is the appropriate
mathematical model to be used according the the type of wavelength solution
it is dealing with.
Parameters
----------
header : `~astropy.io.fits.header.Header`
Full header of file being loaded
wcsdim : int
Number of the wcs dimension to be read.
Returns
-------
spectral_axis : `~numpy.ndarray`
Mathematical model of wavelength solution evluated for each pixel
position
"""
wat_wcs_dict = {}
ctypen = header['CTYPE{:d}'.format(wcsdim)]
logging.info('Attempting to read CTYPE{:d}: {:s}'.format(wcsdim, ctypen))
if ctypen == 'MULTISPE':
# TODO (simon): What is the * (asterisc) doing here?.
wat_head = header['WAT{:d}*'.format(wcsdim)]
if len(wat_head) == 1:
logging.debug('Get units')
wat_array = wat_head[0].split(' ')
for pair in wat_array:
split_pair = pair.split('=')
wat_wcs_dict[split_pair[0]] = split_pair[1]
# print(wat_head[0].split(' '))
elif len(wat_head) > 1:
wat_string = ''
for key in wat_head:
wat_string += header[key]
wat_array = shlex.split(wat_string.replace('=', ' '))
if len(wat_array) % 2 == 0:
for i in range(0, len(wat_array), 2):
# if wat_array[i] not in wcs_dict.keys():
wat_wcs_dict[wat_array[i]] = wat_array[i + 1]
# print(wat_array[i], wat_array[i + 1])
for key in wat_wcs_dict.keys():
logging.debug("{:d} -{:s}- {:s}".format(wcsdim,
key,
wat_wcs_dict[key]))
if 'spec1' in wat_wcs_dict.keys():
spec = wat_wcs_dict['spec1'].split()
aperture = int(spec[0])
beam = int(spec[1])
disp_type = int(spec[2])
disp_start = float(spec[3])
disp_del_av = float(spec[4])
pix_num = int(spec[5])
dopp_fact = float(spec[6])
aper_low = int(float(spec[7]))
aper_high = int(float(spec[8]))
weight = float(spec[9])
zeropoint = float(spec[10])
function_type = int(spec[11])
order = int(float(spec[12]))
min_pix_val = int(float(spec[13]))
max_pix_val = int(float(spec[14]))
params = [float(i) for i in spec[15:]]
wcs_dict = {'aperture': aperture,
'beam': beam,
'dtype': disp_type,
'dstart': disp_start,
'avdelt': disp_del_av,
'pnum': pix_num,
'z': dopp_fact,
'alow': aper_low,
'ahigh': aper_high,
'weight': weight,
'zeropoint': zeropoint,
'ftype': function_type,
'order': order,
'pmin': min_pix_val,
'pmax': max_pix_val,
'fpar': params}
logging.info('Retrieving model')
math_model = _set_math_model(wcs_dict=wcs_dict)
spectral_axis = math_model(range(1, wcs_dict['pnum'] + 1))
return spectral_axis
def _set_math_model(wcs_dict):
"""Defines a mathematical model of the wavelength solution
Uses 2 keywords to decide which model is to be built and calls the
appropriate function.
dtype:
-1: None, no wavelength solution available
0: Linear wavelength solution
1: Log-Linear wavelength solution (not implemented)
2: Non-Linear solutions
ftype:
1: Chebyshev
2: Legendre
3: Linear Spline (not implemented)
4: Cubic Spline (not implemented)
5: Pixel Coordinates (not implemented)
Not implemented models could be implemented on user-request.
Parameters
----------
wcs_dict : dict
Contains all the necessary wcs information needed for building any of
models supported.
Returns
-------
The mathematical model which describes the transformation from pixel to
wavelength. An instance of `~astropy.modeling.Model`.
"""
if wcs_dict['dtype'] == -1:
return _none()
elif wcs_dict['dtype'] == 0:
return _linear_solution(wcs_dict=wcs_dict)
elif wcs_dict['dtype'] == 1:
return _log_linear(wcs_dict=wcs_dict)
elif wcs_dict['dtype'] == 2:
if wcs_dict['ftype'] == 1:
return _chebyshev(wcs_dict=wcs_dict)
elif wcs_dict['ftype'] == 2:
return _non_linear_legendre(wcs_dict=wcs_dict)
elif wcs_dict['ftype'] == 3:
return _non_linear_cspline(wcs_dict=wcs_dict)
elif wcs_dict['ftype'] == 4:
return _non_linear_lspline(wcs_dict=wcs_dict)
elif wcs_dict['ftype'] == 5:
# pixel coordinates
raise NotImplementedError
elif wcs_dict['ftype'] == 6:
# sampled coordinate array
raise NotImplementedError
else:
raise SyntaxError('ftype {:d} is not defined in the '
'standard'.format(wcs_dict['ftype']))
else:
raise SyntaxError('dtype {:d} is not defined in the '
'standard'.format(wcs_dict['dtype']))
def _none():
"""Required to handle No-wavelength solution
No wavelength solution is considered in the FITS standard (dtype = -1)
This will return the identity function. It does not use
`~astropy.modeling.models.Identity` because is not simpler to instantiate.
Instead it uses `~astropy.modeling.models.Linear1D`
Rretuns
-------
A mathematical model instance of `~astropy.modeling.models.Linear1D`
with slope 1 and intercept 0.
"""
model = models.Linear1D(slope=1,
intercept=0)
return model
def _linear_solution(wcs_dict):
"""Constructs a Linear1D model based on the WCS information obtained
from the header.
"""
intercept = wcs_dict['crval'] - \
(wcs_dict['crpix'] - 1) * \
wcs_dict['cdelt']
model = models.Linear1D(slope=wcs_dict['cdelt'],
intercept=intercept)
return model
def _log_linear(wcs_dict):
"""Returns a log linear model of the wavelength solution.
Not implemented
Raises
------
NotImplementedError
"""
raise NotImplementedError
def _chebyshev(wcs_dict):
"""Returns a chebyshev model of the wavelength solution.
Constructs a Chebyshev1D mathematical model
Parameters
----------
wcs_dict : dict
Dictionary containing all the wcs information decoded from the header and
necessary for constructing the Chebyshev1D model.
Returns
-------
`~astropy.modeling.Model`
"""
model = models.Chebyshev1D(degree=wcs_dict['order'] - 1,
domain=[wcs_dict['pmin'],
wcs_dict['pmax']], )
for param_index in range(wcs_dict['order']):
model.parameters[param_index] = wcs_dict['fpar'][
param_index]
return model
def _non_linear_legendre(wcs_dict):
"""Returns a legendre model
Constructs a Legendre1D mathematical model
Parameters
----------
wcs_dict : dict
Dictionary containing all the wcs information decoded from the header and
necessary for constructing the Legendre1D model.
Returns
-------
`~astropy.modeling.Model`
"""
model = models.Legendre1D(degree=wcs_dict['order'] - 1,
domain=[wcs_dict['pmin'],
wcs_dict['pmax']], )
for param_index in range(wcs_dict['order']):
model.parameters[param_index] = wcs_dict['fpar'][
param_index]
return model
def _non_linear_lspline(wcs_dict):
"""Returns a linear spline model of the wavelength solution
Not implemented
This function should extract certain parameters from the `wcs_dict`
parameter and construct a mathematical model that makes the conversion from
pixel to wavelength. All the necessary information is already contained in
the dictionary so the only work to be done is to make the instantiation of
the appropriate subclass of `~astropy.modeling.Model`.
Parameters
----------
wcs_dict : dict
Contains all the WCS information decoded from an IRAF fits header.
Raises
------
NotImplementedError
"""
raise NotImplementedError('Linear spline is not implemented')
def _non_linear_cspline(wcs_dict):
"""Returns a cubic spline model of the wavelength solution.
This function should extract certain parameters from the `wcs_dict`
parameter and construct a mathematical model that makes the conversion from
pixel to wavelength. All the necessary information is already contained in
the dictionary so the only work to be done is to make the instantiation of
the appropriate subclass of `~astropy.modeling.Model`.
Not implemented
Parameters
----------
wcs_dict : dict
Contains all the WCS information decoded from an IRAF fits header.
Raises
------
NotImplementedError
"""
raise NotImplementedError('Cubic spline is not implemented')
|
<reponame>rpauszek/smtirf
# -*- coding: utf-8 -*-
"""
@author: <NAME>, Ph.D. (2020)
smtirf >> traces
"""
import numpy as np
import scipy.stats
import json, warnings
from abc import ABC, abstractmethod
import smtirf
from . import SMSpotCoordinate, SMJsonEncoder
from . import HiddenMarkovModel
# ==============================================================================
# BASE TRACE CLASSES
# ==============================================================================
class BaseTrace(ABC):
def __init__(self, trcID, data, frameLength, pk, bleed, gamma, clusterIndex=-1,
isSelected=False, limits=None, offsets=None, model=None, deBlur=False, deSpike=False):
self._id = trcID
self._set_data(data)
self.set_frame_length(frameLength) # => set self.t
self._bleed = bleed
self._gamma = gamma
self.set_offsets(offsets) # => triggers _correct_signals()
self.set_limits(limits, refreshStatePath=False)
self.pk = SMSpotCoordinate(pk)
self.isSelected = isSelected
self.set_cluster_index(clusterIndex)
self.model = HiddenMarkovModel.from_json(model)
self.deBlur = deBlur
self.deSpike = deSpike
self.dwells = smtirf.results.DwellTable(self) if self.model is not None else None
def __str__(self):
return f"{self.__class__.__name__}\tID={self._id} selected={self.isSelected}"
def __len__(self):
return self.D0.size
def _set_data(self, data):
self.D0, self.A0, self.S0, self._SP = data
@property
def _raw_data(self):
return np.vstack((self.D0, self.A0, self.S0, self._SP))
@property
def _attr_dict(self):
return {"pk" : self.pk,
"clusterIndex" : self.clusterIndex,
"frameLength" : self.frameLength,
"bleed" : self.bleed,
"gamma" : self.gamma,
"limits" : self.limits,
"offsets" : self.offsets,
"isSelected" : self.isSelected,
"deBlur" : self.deBlur,
"deSpike" : self.deSpike}
def _as_json(self):
return json.dumps(self._attr_dict, cls=SMJsonEncoder)
@property
def SP(self): # state path
return self._SP[self.limits].astype(np.int)
def set_statepath(self, sp):
SP = np.full(self._SP.shape, -1)
SP[self.limits] = sp
self._SP = SP
@property
def frameLength(self):
return self._frameLength
@property
def bleed(self):
return self._bleed
@property
def gamma(self):
return self._gamma
def set_frame_length(self, val):
self._frameLength = val
self.t = np.arange(len(self))*self._frameLength
def set_bleed(self, val):
if val >= 0 and val <=1:
self._bleed = val
self._correct_signals()
else:
raise ValueError("donor bleedthrough must be between 0 and 1")
def set_gamma(self, val):
if val > 0 and val <=2:
self._gamma = val
self._correct_signals()
else:
raise ValueError("gamma must be between 0 and 2")
@property
def offsets(self):
return self._offsets
def set_offsets(self, values):
if values is None:
values = np.zeros(2)
elif len(values) !=2:
raise ValueError("must provide offsets for both (2) channels")
self._offsets = np.array(values)
self._correct_signals()
def _correct_signals(self):
D = self.D0 - self._offsets[0]
A = self.A0 - self._offsets[1]
self.D = D * self._gamma
self.A = A - (D*self._bleed)
self.I = self.D + self.A
@property
def limits(self):
return self._limits # Slice instance
def set_limits(self, values, refreshStatePath=True):
if values is None:
self._limits = slice(*np.array([0, len(self)]))
elif not isinstance(values, slice):
values = np.array(values)
if values.size !=2:
raise ValueError("must provide offsets for both (2) channels")
values = np.sort(values)
if values[0] < 0: values[0] = 0 # TODO: add warning?
if values[1] > len(self): values[1] = len(self) # TODO: add warning?
if np.diff(values) <= 2:
warnings.warn("range must be >2 frames. resetting to full trace")
values = np.array([0, len(self)]) # TODO: maybe just don't update?
self._limits = slice(*values)
else:
self._limits = values
if refreshStatePath:
self.label_statepath()
@property
def clusterIndex(self):
return self._clusterIndex
def set_cluster_index(self, val):
#TODO => catch ValueError for non-int val
self._clusterIndex = int(val)
@property
def movID(self):
return self._id.movID
@property
def I0(self):
return self.D0 + self.A0
@property
def corrcoef(self):
return scipy.stats.pearsonr(self.D[self.limits], self.A[self.limits])[0]
def _time2frame(self, t): # copy from old
# find frame index closest to time t
return np.argmin(np.abs(self.t-t))
def set_offset_time_window(self, start, stop):
rng = slice(self._time2frame(start), self._time2frame(stop))
self.set_offsets([np.median(self.D0[rng]), np.median(self.A0[rng])])
def set_start_time(self, time, refreshStatePath=True):
fr = self._time2frame(time)
self.set_limits([fr, self.limits.stop], refreshStatePath=refreshStatePath)
def set_stop_time(self, time, refreshStatePath=True):
fr = self._time2frame(time)
self.set_limits([self.limits.start, fr], refreshStatePath=refreshStatePath)
def toggle(self):
self.isSelected = not self.isSelected
def set_signal_labels(self, sp, where="first", correctOffsets=True):
self.S0 = sp
if correctOffsets:
if np.any(sp == 2):
rng, = np.where(sp == 2)
elif np.any(sp == 1):
rng, = np.where(sp == 1)
try:
self.set_offsets([np.median(self.D0[rng]), np.median(self.A0[rng])])
except UnboundLocalError:
pass
# find indices of signal dwells
if where.lower() in ("first", "longest"):
ix = smtirf.where(sp == 0)
# set limits
if where.lower() == "first":
self.set_limits(ix[0])
elif where.lower() == "longest":
dt = np.diff(ix, axis=1).squeeze()
self.set_limits(ix[np.argmax(dt)])
elif where.lower() == "all":
pass
else:
raise ValueError("where keyword unrecognized")
# def reset_signal_labels(self):
# self.S0 = np.zeros(self.S0.shape)
# self._correct_signals() # this really should be implemented as a setter for all S0 changes
#
def reset_offsets(self):
self.set_offsets((0, 0))
def reset_limits(self):
self.set_limits((0, len(self)))
@property
@abstractmethod
def X(self):
...
def train(self, modelType, K, sharedVariance=True, **kwargs):
theta = smtirf.HiddenMarkovModel.train_new(modelType, self.X, K, sharedVariance, **kwargs)
self.model = theta
self.label_statepath()
def label_statepath(self):
if self.model is not None:
self.set_statepath(self.model.label(self.X, deBlur=self.deBlur, deSpike=self.deSpike))
self.dwells = smtirf.results.DwellTable(self)
@property
def EP(self):
return self.model.get_emission_path(self.SP)
@abstractmethod
def get_export_data(self):
...
def export(self, savename):
data, fmt, header = self.get_export_data()
np.savetxt(savename, data, fmt=fmt, delimiter='\t', header=header)
# ==============================================================================
# Experiment Trace Subclasses
# ==============================================================================
class SingleColorTrace(BaseTrace):
def __init__(self, trcID, data, frameLength, pk, bleed, gamma, channel=1, **kwargs):
self.channel = channel
super().__init__(trcID, data, frameLength, pk, bleed, gamma, **kwargs)
@property
def _attr_dict(self):
d = super()._attr_dict
d["channel"] = self.channel
return d
def __str__(self):
s = super().__str__()
s += f" [Channel {self.channel}]"
return s
@property
def X(self):
return self.D[self.limits] if self.channel == 1 else self.A[self.limits]
class PifeTrace(SingleColorTrace):
classLabel = "pife"
def get_export_data(self):
pass
class MultimerTrace(SingleColorTrace):
classLabel = "multimer"
def train(self, K, sharedVariance=True, **kwargs):
theta = smtirf.HiddenMarkovModel.train_new("multimer", self.X, K, sharedVariance, **kwargs)
self.model = theta
self.label_statepath()
def get_export_data(self):
pass
class FretTrace(BaseTrace):
classLabel = "fret"
@property
def X(self):
return self.E[self.limits]
def _correct_signals(self):
super()._correct_signals()
with np.errstate(divide='ignore', invalid='ignore'):
self.E = self.A / self.I
def get_export_data(self):
E = np.full(self.E.shape, np.nan)
S = np.full(self.E.shape, -1)
F = E.copy()
E[self.limits] = self.X
try:
S[self.limits] = self.SP
F[self.limits] = self.EP
except AttributeError:
pass
data = np.vstack((self.t, self.D, self.A, E, S, F)).T
fmt = ('%.3f', '%.3f', '%.3f', '%.5f', '%3d', '%.5f')
header = "Time (sec)\tDonor\tAcceptor\tFRET\tState\tFit"
return data, fmt, header
class PiecewiseTrace(FretTrace):
classLabel = "piecewise"
@property
def X(self):
return 1
def _correct_signals(self):
super()._correct_signals()
self._E = self.E.copy() # store a normal version of FRET efficiency, without masking
self.E[self.S0 != 0] = np.nan
|
import math
import torch
import torch.nn as nn
from core.diff_crop_layer import DiffCropOneImage
from backbones.resnet import ViewDense, resnet18
# from stn.spatial_transformer import SpatialTransformer
class GazeSinCosLSTMLstmScaling(nn.Module):
"""
Here, we predict sin(yaw),cos(yaw),sin(pitch) and a variance signal. Same variance signal is broadcasted to all
three
"""
def __init__(self, seq_len: int, scaling_dict, fc2: int = 256):
"""
Args:
freeze_layer_idx: all layers before this are not trainable for base model.
"""
super(GazeSinCosLSTMLstmScaling, self).__init__()
assert seq_len % 2 == 1
for t in range(seq_len):
assert t in scaling_dict
for i in range(1, len(scaling_dict[t])):
assert scaling_dict[t][i] <= scaling_dict[t][i - 1]
self._seq_len = seq_len
self._scales = scaling_dict
self.img_feature_dim = fc2 # the dimension of the CNN feature to represent each frame
# self.base_model = nn.Sequential(
# # nn.AvgPool2d(2),
# nn.Conv2d(3, 32, kernel_size=3),
# nn.ReLU(True),
# nn.Conv2d(32, 32, kernel_size=3),
# nn.ReLU(True),
# nn.MaxPool2d(2, stride=2),
# nn.Conv2d(32, 64, kernel_size=3),
# nn.ReLU(True),
# nn.Conv2d(64, 64, kernel_size=3),
# nn.ReLU(True),
# nn.MaxPool2d(2, stride=2),
# nn.Conv2d(64, 128, kernel_size=3),
# nn.ReLU(True),
# nn.MaxPool2d(2, stride=2),
# nn.Conv2d(128, self.img_feature_dim, kernel_size=3),
# nn.MaxPool2d(2, stride=2),
# nn.ReLU(True),
# nn.AdaptiveAvgPool2d((1, 1)),
# ViewDense(),
# )
self.base_model = resnet18(pretrained=True)
self.base_model.fc2 = nn.Linear(1000, self.img_feature_dim)
self.lstm_attention = nn.LSTM(
self.img_feature_dim, self.img_feature_dim // 2, bidirectional=True, num_layers=2, batch_first=True)
self.lstm = nn.LSTM(
self.img_feature_dim, self.img_feature_dim, bidirectional=True, num_layers=2, batch_first=True)
# The linear layer that maps the LSTM with the 4 outputs
self.last_layer = nn.Linear(2 * self.img_feature_dim, 4)
self._scales_layer_dict = nn.ModuleList([DiffCropOneImage(self._scales[t]) for t in range(self._seq_len)])
print(f'[{self.__class__.__name__}] seq_len:{self._seq_len} fc2:{fc2} ScalingDict:{scaling_dict}')
def scale_invariant_features_V2(self, input):
static_shape = (-1, 3) + input.size()[-2:]
scales = self._scales[0]
input_as_static = input.view(static_shape)
# N*T,T_attention,C,H,W
base_model_input = self._scales_layer_dict[0](input_as_static)
# N*T*T_attention, img_feature_dim
base_model_output = self.base_model(base_model_input.view(static_shape))
attention_T = len(scales)
lstm_attention_shape = (-1, attention_T, self.img_feature_dim)
# N*T,T_attention,img_feature_dim
lstm_input = base_model_output.view(lstm_attention_shape)
lstm_output, _ = self.lstm_attention(lstm_input)
# N*T,img_feature_dim
lstm_output = lstm_output[:, lstm_input.shape[1] // 2, :]
return lstm_output.view((input.size(0), self._seq_len, self.img_feature_dim))
# def scale_invariant_features(self, input):
# lstm_shape = ((input.size(0), self._seq_len, 3) + input.size()[-2:])
# static_shape = (-1, 3) + input.size()[-2:]
# features = []
# input_as_lstm = input.view(lstm_shape)
# for t in range(self._seq_len):
# base_model_input = self._scales_layer_dict[t](input_as_lstm[:, t, ...])
# base_model_output = self.base_model(base_model_input.view(static_shape))
# attention_T = len(self._scales[t])
# lstm_attention_shape = (input.shape[0], attention_T, self.img_feature_dim)
# lstm_input = base_model_output.view(lstm_attention_shape)
# # print(t, self._scales[t], lstm_input.shape)
# # import pdb
# # pdb.set_trace()
# lstm_output, _ = self.lstm_attention(lstm_input)
# # NOTE: check here for lstm_input.shape
# lstm_output = lstm_output[:, lstm_input.shape[1] // 2, :]
# features.append(lstm_output[:, None, ...])
# return torch.cat(features, dim=1)
def forward(self, input):
self.lstm.flatten_parameters()
self.lstm_attention.flatten_parameters()
# import pdb
# pdb.set_trace()
scale_inv_inp = self.scale_invariant_features_V2(input)
# scale_inv_inp = self.scale_invariant_features(input)
# temp = self.base_model(input.view(-1, 3, 224, 224))
# scale_inv_inp = temp.view((input.size(0), self._seq_len, -1))
lstm_out, _ = self.lstm(scale_inv_inp)
# import pdb
# pdb.set_trace()
lstm_out = lstm_out[:, self._seq_len // 2, :]
output = self.last_layer(lstm_out).view(-1, 4)
angular_output = nn.Tanh()(output[:, :3])
var = math.pi * nn.Sigmoid()(output[:, 3:])
var = var.view(-1, 1).expand(var.size(0), 3)
return angular_output, var
class GazeSinCosLSTMLstmScalingV2(nn.Module):
"""
"""
def __init__(self, seq_len: int, num_scales, fc2: int = 256):
"""
Args:
freeze_layer_idx: all layers before this are not trainable for base model.
"""
super(GazeSinCosLSTMLstmScalingV2, self).__init__()
assert seq_len % 2 == 1
self._num_scales = num_scales
self._seq_len = seq_len
self.img_feature_dim = fc2 # the dimension of the CNN feature to represent each frame
self.base_model = resnet18(pretrained=True)
self.base_model.fc2 = nn.Linear(1000, self.img_feature_dim)
self.lstm_attention = nn.LSTM(
self.img_feature_dim, self.img_feature_dim // 2, bidirectional=True, num_layers=2, batch_first=True)
self.lstm = nn.LSTM(
self.img_feature_dim, self.img_feature_dim, bidirectional=True, num_layers=2, batch_first=True)
# The linear layer that maps the LSTM with the 4 outputs
self.last_layer = nn.Linear(2 * self.img_feature_dim, 4)
print(f'[{self.__class__.__name__}] seq_len:{self._seq_len} fc2:{fc2} NumScales:{self._num_scales}')
def scale_invariant_features(self, input):
# input.shape (N,T*T_attention*3,sz,sz)
static_shape = (-1, 3) + input.size()[-2:]
# N*T*T_attention, img_feature_dim
base_model_output = self.base_model(input.view(static_shape))
lstm_attention_shape = (-1, self._num_scales, self.img_feature_dim)
# N*T,T_attention,img_feature_dim
lstm_input = base_model_output.view(lstm_attention_shape)
lstm_output, _ = self.lstm_attention(lstm_input)
# N*T,img_feature_dim
lstm_output = lstm_output[:, lstm_input.shape[1] // 2, :]
return lstm_output.view((input.size(0), self._seq_len, self.img_feature_dim))
def forward(self, input):
self.lstm.flatten_parameters()
self.lstm_attention.flatten_parameters()
scale_inv_inp = self.scale_invariant_features(input)
lstm_out, _ = self.lstm(scale_inv_inp)
lstm_out = lstm_out[:, self._seq_len // 2, :]
output = self.last_layer(lstm_out).view(-1, 4)
angular_output = nn.Tanh()(output[:, :3])
var = math.pi * nn.Sigmoid()(output[:, 3:])
var = var.view(-1, 1).expand(var.size(0), 3)
return angular_output, var
|
from __future__ import annotations
from typing import Optional, TYPE_CHECKING
from spark_auto_mapper_fhir.fhir_types.date_time import FhirDateTime
from spark_auto_mapper_fhir.fhir_types.list import FhirList
from spark_auto_mapper_fhir.fhir_types.integer import FhirInteger
from spark_auto_mapper_fhir.fhir_types.string import FhirString
from spark_auto_mapper_fhir.extensions.extension_base import ExtensionBase
from spark_auto_mapper_fhir.fhir_types.uri import FhirUri
from spark_auto_mapper_fhir.base_types.fhir_backbone_element_base import (
FhirBackboneElementBase,
)
if TYPE_CHECKING:
pass
# id_ (string)
# extension (Extension)
# modifierExtension (Extension)
# identifier (uri)
# timestamp (dateTime)
# total (integer)
# offset (integer)
# parameter (ValueSet.Parameter)
from spark_auto_mapper_fhir.backbone_elements.value_set_parameter import (
ValueSetParameter,
)
# contains (ValueSet.Contains)
from spark_auto_mapper_fhir.backbone_elements.value_set_contains import (
ValueSetContains,
)
# This file is auto-generated by generate_classes so do not edit manually
# noinspection PyPep8Naming
class ValueSetExpansion(FhirBackboneElementBase):
"""
ValueSet.Expansion
A ValueSet resource instance specifies a set of codes drawn from one or more code systems, intended for use in a particular context. Value sets link between [[[CodeSystem]]] definitions and their use in [coded elements](terminologies.html).
"""
# noinspection PyPep8Naming
def __init__(
self,
*,
id_: Optional[FhirString] = None,
extension: Optional[FhirList[ExtensionBase]] = None,
modifierExtension: Optional[FhirList[ExtensionBase]] = None,
identifier: Optional[FhirUri] = None,
timestamp: FhirDateTime,
total: Optional[FhirInteger] = None,
offset: Optional[FhirInteger] = None,
parameter: Optional[FhirList[ValueSetParameter]] = None,
contains: Optional[FhirList[ValueSetContains]] = None,
) -> None:
"""
A ValueSet resource instance specifies a set of codes drawn from one or more
code systems, intended for use in a particular context. Value sets link
between [[[CodeSystem]]] definitions and their use in [coded
elements](terminologies.html).
:param id_: None
:param extension: May be used to represent additional information that is not part of the basic
definition of the element. To make the use of extensions safe and manageable,
there is a strict set of governance applied to the definition and use of
extensions. Though any implementer can define an extension, there is a set of
requirements that SHALL be met as part of the definition of the extension.
:param modifierExtension: May be used to represent additional information that is not part of the basic
definition of the element and that modifies the understanding of the element
in which it is contained and/or the understanding of the containing element's
descendants. Usually modifier elements provide negation or qualification. To
make the use of extensions safe and manageable, there is a strict set of
governance applied to the definition and use of extensions. Though any
implementer can define an extension, there is a set of requirements that SHALL
be met as part of the definition of the extension. Applications processing a
resource are required to check for modifier extensions.
Modifier extensions SHALL NOT change the meaning of any elements on Resource
or DomainResource (including cannot change the meaning of modifierExtension
itself).
:param identifier: An identifier that uniquely identifies this expansion of the valueset, based
on a unique combination of the provided parameters, the system default
parameters, and the underlying system code system versions etc. Systems may
re-use the same identifier as long as those factors remain the same, and the
expansion is the same, but are not required to do so. This is a business
identifier.
:param timestamp: The time at which the expansion was produced by the expanding system.
:param total: The total number of concepts in the expansion. If the number of concept nodes
in this resource is less than the stated number, then the server can return
more using the offset parameter.
:param offset: If paging is being used, the offset at which this resource starts. I.e. this
resource is a partial view into the expansion. If paging is not being used,
this element SHALL NOT be present.
:param parameter: A parameter that controlled the expansion process. These parameters may be
used by users of expanded value sets to check whether the expansion is
suitable for a particular purpose, or to pick the correct expansion.
:param contains: The codes that are contained in the value set expansion.
"""
super().__init__(
id_=id_,
extension=extension,
modifierExtension=modifierExtension,
identifier=identifier,
timestamp=timestamp,
total=total,
offset=offset,
parameter=parameter,
contains=contains,
)
|
<filename>src/pretix/control/forms/event.py
#
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 <NAME> and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
# This file is based on an earlier version of pretix which was released under the Apache License 2.0. The full text of
# the Apache License 2.0 can be obtained at <http://www.apache.org/licenses/LICENSE-2.0>.
#
# This file may have since been changed and any changes are released under the terms of AGPLv3 as described above. A
# full history of changes and contributors is available at <https://github.com/pretix/pretix>.
#
# This file contains Apache-licensed contributions copyrighted by: <NAME>, Daniel, <NAME>, Ian
# Williams, <NAME>, <NAME>, <NAME>, Sohalt, <NAME>, <EMAIL>, luto,
# nelkenwelk, pajowu
#
# Unless required by applicable law or agreed to in writing, software distributed under the Apache License 2.0 is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under the License.
from urllib.parse import urlencode, urlparse
from django import forms
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.validators import validate_email
from django.db.models import Prefetch, Q, prefetch_related_objects
from django.forms import CheckboxSelectMultiple, formset_factory
from django.urls import reverse
from django.utils.functional import cached_property
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.timezone import get_current_timezone_name
from django.utils.translation import gettext, gettext_lazy as _, pgettext_lazy
from django_countries.fields import LazyTypedChoiceField
from i18nfield.forms import (
I18nForm, I18nFormField, I18nFormSetMixin, I18nTextarea, I18nTextInput,
)
from pytz import common_timezones, timezone
from pretix.base.channels import get_all_sales_channels
from pretix.base.email import get_available_placeholders
from pretix.base.forms import I18nModelForm, PlaceholderValidator, SettingsForm
from pretix.base.models import Event, Organizer, TaxRule, Team
from pretix.base.models.event import EventMetaValue, SubEvent
from pretix.base.reldate import RelativeDateField, RelativeDateTimeField
from pretix.base.settings import (
PERSON_NAME_SCHEMES, PERSON_NAME_TITLE_GROUPS, validate_event_settings,
)
from pretix.control.forms import (
MultipleLanguagesWidget, SlugWidget, SplitDateTimeField,
SplitDateTimePickerWidget,
)
from pretix.control.forms.widgets import Select2
from pretix.helpers.countries import CachedCountries
from pretix.multidomain.models import KnownDomain
from pretix.multidomain.urlreverse import build_absolute_uri
from pretix.plugins.banktransfer.payment import BankTransfer
class EventWizardFoundationForm(forms.Form):
locales = forms.MultipleChoiceField(
choices=settings.LANGUAGES,
label=_("Use languages"),
widget=MultipleLanguagesWidget,
help_text=_('Choose all languages that your event should be available in.')
)
has_subevents = forms.BooleanField(
label=_("This is an event series"),
required=False,
)
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
self.session = kwargs.pop('session')
super().__init__(*args, **kwargs)
qs = Organizer.objects.all()
if not self.user.has_active_staff_session(self.session.session_key):
qs = qs.filter(
id__in=self.user.teams.filter(can_create_events=True).values_list('organizer', flat=True)
)
self.fields['organizer'] = forms.ModelChoiceField(
label=_("Organizer"),
queryset=qs,
widget=Select2(
attrs={
'data-model-select2': 'generic',
'data-select2-url': reverse('control:organizers.select2') + '?can_create=1',
'data-placeholder': _('Organizer')
}
),
empty_label=None,
required=True
)
self.fields['organizer'].widget.choices = self.fields['organizer'].choices
if len(self.fields['organizer'].choices) == 1:
organizer = self.fields['organizer'].queryset.first()
self.fields['organizer'].initial = organizer
self.fields['locales'].initial = organizer.settings.locales
class EventWizardBasicsForm(I18nModelForm):
error_messages = {
'duplicate_slug': _("You already used this slug for a different event. Please choose a new one."),
}
timezone = forms.ChoiceField(
choices=((a, a) for a in common_timezones),
label=_("Event timezone"),
)
locale = forms.ChoiceField(
choices=settings.LANGUAGES,
label=_("Default language"),
)
tax_rate = forms.DecimalField(
label=_("Sales tax rate"),
help_text=_("Do you need to pay sales tax on your tickets? In this case, please enter the applicable tax rate "
"here in percent. If you have a more complicated tax situation, you can add more tax rates and "
"detailed configuration later."),
required=False
)
team = forms.ModelChoiceField(
label=_("Grant access to team"),
help_text=_("You are allowed to create events under this organizer, however you do not have permission "
"to edit all events under this organizer. Please select one of your existing teams that will"
" be granted access to this event."),
queryset=Team.objects.none(),
required=False,
empty_label=_('Create a new team for this event with me as the only member')
)
class Meta:
model = Event
fields = [
'name',
'slug',
'currency',
'date_from',
'date_to',
'presale_start',
'presale_end',
'location',
'geo_lat',
'geo_lon',
]
field_classes = {
'date_from': SplitDateTimeField,
'date_to': SplitDateTimeField,
'presale_start': SplitDateTimeField,
'presale_end': SplitDateTimeField,
}
widgets = {
'date_from': SplitDateTimePickerWidget(),
'date_to': SplitDateTimePickerWidget(attrs={'data-date-after': '#id_basics-date_from_0'}),
'presale_start': SplitDateTimePickerWidget(),
'presale_end': SplitDateTimePickerWidget(attrs={'data-date-after': '#id_basics-presale_start_0'}),
'slug': SlugWidget,
}
def __init__(self, *args, **kwargs):
self.organizer = kwargs.pop('organizer')
self.locales = kwargs.get('locales')
self.has_subevents = kwargs.pop('has_subevents')
self.user = kwargs.pop('user')
self.session = kwargs.pop('session')
super().__init__(*args, **kwargs)
if 'timezone' not in self.initial:
self.initial['timezone'] = get_current_timezone_name()
self.fields['locale'].choices = [(a, b) for a, b in settings.LANGUAGES if a in self.locales]
self.fields['location'].widget.attrs['rows'] = '3'
self.fields['location'].widget.attrs['placeholder'] = _(
'Sample Conference Center\nHeidelberg, Germany'
)
self.fields['slug'].widget.prefix = build_absolute_uri(self.organizer, 'presale:organizer.index')
if self.has_subevents:
del self.fields['presale_start']
del self.fields['presale_end']
del self.fields['date_to']
if self.has_control_rights(self.user, self.organizer, self.session):
del self.fields['team']
else:
self.fields['team'].queryset = self.user.teams.filter(organizer=self.organizer)
if not self.organizer.settings.get("event_team_provisioning", True, as_type=bool):
self.fields['team'].required = True
self.fields['team'].empty_label = None
self.fields['team'].initial = 0
def clean(self):
data = super().clean()
if data.get('locale') not in self.locales:
raise ValidationError({
'locale': _('Your default locale must also be enabled for your event (see box above).')
})
if data.get('timezone') not in common_timezones:
raise ValidationError({
'timezone': _('Your default locale must be specified.')
})
# change timezone
zone = timezone(data.get('timezone'))
data['date_from'] = self.reset_timezone(zone, data.get('date_from'))
data['date_to'] = self.reset_timezone(zone, data.get('date_to'))
data['presale_start'] = self.reset_timezone(zone, data.get('presale_start'))
data['presale_end'] = self.reset_timezone(zone, data.get('presale_end'))
return data
@staticmethod
def reset_timezone(tz, dt):
return tz.localize(dt.replace(tzinfo=None)) if dt is not None else None
def clean_slug(self):
slug = self.cleaned_data['slug']
if Event.objects.filter(slug__iexact=slug, organizer=self.organizer).exists():
raise forms.ValidationError(
self.error_messages['duplicate_slug'],
code='duplicate_slug'
)
return slug
@staticmethod
def has_control_rights(user, organizer, session):
return user.teams.filter(
organizer=organizer, all_events=True, can_change_event_settings=True, can_change_items=True,
can_change_orders=True, can_change_vouchers=True
).exists() or user.has_active_staff_session(session.session_key)
class EventChoiceMixin:
def label_from_instance(self, obj):
return mark_safe('{}<br /><span class="text-muted">{} · {}</span>'.format(
escape(str(obj)),
obj.get_date_range_display() if not obj.has_subevents else _("Event series"),
obj.slug
))
class EventChoiceField(forms.ModelChoiceField):
pass
class SafeEventMultipleChoiceField(EventChoiceMixin, forms.ModelMultipleChoiceField):
def __init__(self, queryset, *args, **kwargs):
queryset = queryset.model.objects.none()
super().__init__(queryset, *args, **kwargs)
class EventWizardCopyForm(forms.Form):
@staticmethod
def copy_from_queryset(user, session):
if user.has_active_staff_session(session.session_key):
return Event.objects.all()
return Event.objects.filter(
Q(organizer_id__in=user.teams.filter(
all_events=True, can_change_event_settings=True, can_change_items=True
).values_list('organizer', flat=True)) | Q(id__in=user.teams.filter(
can_change_event_settings=True, can_change_items=True
).values_list('limit_events__id', flat=True))
)
def __init__(self, *args, **kwargs):
kwargs.pop('organizer')
kwargs.pop('locales')
self.session = kwargs.pop('session')
kwargs.pop('has_subevents')
self.user = kwargs.pop('user')
super().__init__(*args, **kwargs)
self.fields['copy_from_event'] = EventChoiceField(
label=_("Copy configuration from"),
queryset=EventWizardCopyForm.copy_from_queryset(self.user, self.session),
widget=Select2(
attrs={
'data-model-select2': 'event',
'data-select2-url': reverse('control:events.typeahead') + '?can_copy=1',
'data-placeholder': _('Do not copy')
}
),
empty_label=_('Do not copy'),
required=False
)
self.fields['copy_from_event'].widget.choices = self.fields['copy_from_event'].choices
class EventMetaValueForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
self.property = kwargs.pop('property')
self.disabled = kwargs.pop('disabled')
super().__init__(*args, **kwargs)
if self.property.allowed_values:
self.fields['value'] = forms.ChoiceField(
label=self.property.name,
choices=[
('', _('Default ({value})').format(value=self.property.default) if self.property.default else ''),
] + [(a.strip(), a.strip()) for a in self.property.allowed_values.splitlines()],
)
else:
self.fields['value'].label = self.property.name
self.fields['value'].widget.attrs['placeholder'] = self.property.default
self.fields['value'].widget.attrs['data-typeahead-url'] = (
reverse('control:events.meta.typeahead') + '?' + urlencode({
'property': self.property.name,
'organizer': self.property.organizer.slug,
})
)
self.fields['value'].required = False
if self.disabled:
self.fields['value'].widget.attrs['readonly'] = 'readonly'
def clean_slug(self):
if self.disabled:
return self.instance.value if self.instance else None
return self.cleaned_data['slug']
class Meta:
model = EventMetaValue
fields = ['value']
widgets = {
'value': forms.TextInput()
}
class EventUpdateForm(I18nModelForm):
def __init__(self, *args, **kwargs):
self.change_slug = kwargs.pop('change_slug', False)
self.domain = kwargs.pop('domain', False)
kwargs.setdefault('initial', {})
self.instance = kwargs['instance']
if self.domain and self.instance:
initial_domain = self.instance.domains.first()
if initial_domain:
kwargs['initial'].setdefault('domain', initial_domain.domainname)
super().__init__(*args, **kwargs)
if not self.change_slug:
self.fields['slug'].widget.attrs['readonly'] = 'readonly'
self.fields['location'].widget.attrs['rows'] = '3'
self.fields['location'].widget.attrs['placeholder'] = _(
'Sample Conference Center\nHeidelberg, Germany'
)
if self.domain:
self.fields['domain'] = forms.CharField(
max_length=255,
label=_('Custom domain'),
required=False,
help_text=_('You need to configure the custom domain in the webserver beforehand.')
)
self.fields['sales_channels'] = forms.MultipleChoiceField(
label=self.fields['sales_channels'].label,
help_text=self.fields['sales_channels'].help_text,
required=self.fields['sales_channels'].required,
initial=self.fields['sales_channels'].initial,
choices=(
(c.identifier, c.verbose_name) for c in get_all_sales_channels().values()
),
widget=forms.CheckboxSelectMultiple
)
def clean_domain(self):
d = self.cleaned_data['domain']
if d:
if d == urlparse(settings.SITE_URL).hostname:
raise ValidationError(
_('You cannot choose the base domain of this installation.')
)
if KnownDomain.objects.filter(domainname=d).exclude(event=self.instance.pk).exists():
raise ValidationError(
_('This domain is already in use for a different event or organizer.')
)
return d
def save(self, commit=True):
instance = super().save(commit)
if self.domain:
current_domain = instance.domains.first()
if self.cleaned_data['domain']:
if current_domain and current_domain.domainname != self.cleaned_data['domain']:
current_domain.delete()
KnownDomain.objects.create(
organizer=instance.organizer, event=instance, domainname=self.cleaned_data['domain']
)
elif not current_domain:
KnownDomain.objects.create(
organizer=instance.organizer, event=instance, domainname=self.cleaned_data['domain']
)
elif current_domain:
current_domain.delete()
instance.cache.clear()
return instance
def clean_slug(self):
if self.change_slug:
return self.cleaned_data['slug']
return self.instance.slug
class Meta:
model = Event
localized_fields = '__all__'
fields = [
'name',
'slug',
'currency',
'date_from',
'date_to',
'date_admission',
'is_public',
'presale_start',
'presale_end',
'location',
'geo_lat',
'geo_lon',
'sales_channels'
]
field_classes = {
'date_from': SplitDateTimeField,
'date_to': SplitDateTimeField,
'date_admission': SplitDateTimeField,
'presale_start': SplitDateTimeField,
'presale_end': SplitDateTimeField,
}
widgets = {
'date_from': SplitDateTimePickerWidget(),
'date_to': SplitDateTimePickerWidget(attrs={'data-date-after': '#id_date_from_0'}),
'date_admission': SplitDateTimePickerWidget(attrs={'data-date-default': '#id_date_from_0'}),
'presale_start': SplitDateTimePickerWidget(),
'presale_end': SplitDateTimePickerWidget(attrs={'data-date-after': '#id_presale_start_0'}),
'sales_channels': CheckboxSelectMultiple(),
}
class EventSettingsForm(SettingsForm):
timezone = forms.ChoiceField(
choices=((a, a) for a in common_timezones),
label=_("Event timezone"),
)
name_scheme = forms.ChoiceField(
label=_("Name format"),
help_text=_("This defines how pretix will ask for human names. Changing this after you already received "
"orders might lead to unexpected behavior when sorting or changing names."),
required=True,
)
name_scheme_titles = forms.ChoiceField(
label=_("Allowed titles"),
help_text=_("If the naming scheme you defined above allows users to input a title, you can use this to "
"restrict the set of selectable titles."),
required=False,
)
auto_fields = [
'imprint_url',
'checkout_email_helptext',
'presale_has_ended_text',
'voucher_explanation_text',
'checkout_success_text',
'show_dates_on_frontpage',
'show_date_to',
'show_times',
'show_items_outside_presale_period',
'display_net_prices',
'presale_start_show_date',
'locales',
'locale',
'region',
'show_quota_left',
'waiting_list_enabled',
'waiting_list_hours',
'waiting_list_auto',
'waiting_list_names_asked',
'waiting_list_names_required',
'waiting_list_phones_asked',
'waiting_list_phones_required',
'waiting_list_phones_explanation_text',
'max_items_per_order',
'reservation_time',
'contact_mail',
'show_variations_expanded',
'hide_sold_out',
'meta_noindex',
'redirect_to_checkout_directly',
'frontpage_subevent_ordering',
'event_list_type',
'event_list_available_only',
'frontpage_text',
'event_info_text',
'attendee_names_asked',
'attendee_names_required',
'attendee_emails_asked',
'attendee_emails_required',
'attendee_company_asked',
'attendee_company_required',
'attendee_addresses_asked',
'attendee_addresses_required',
'attendee_data_explanation_text',
'order_phone_asked',
'order_phone_required',
'checkout_phone_helptext',
'banner_text',
'banner_text_bottom',
'order_email_asked_twice',
'last_order_modification_date',
'allow_modifications_after_checkin',
'checkout_show_copy_answers_button',
'show_checkin_number_user',
'primary_color',
'theme_color_success',
'theme_color_danger',
'theme_color_background',
'theme_round_borders',
'primary_font',
'logo_image',
'logo_image_large',
'logo_show_title',
'og_image',
]
def _resolve_virtual_keys_input(self, data, prefix=''):
# set all dependants of virtual_keys and
# delete all virtual_fields to prevent them from being saved
for virtual_key in self.virtual_keys:
if prefix + virtual_key not in data:
continue
base_key = prefix + virtual_key.rsplit('_', 2)[0]
asked_key = base_key + '_asked'
required_key = base_key + '_required'
if data[prefix + virtual_key] == 'optional':
data[asked_key] = True
data[required_key] = False
elif data[prefix + virtual_key] == 'required':
data[asked_key] = True
data[required_key] = True
# Explicitly check for 'do_not_ask'.
# Do not overwrite as default-behaviour when no value for virtual field is transmitted!
elif data[prefix + virtual_key] == 'do_not_ask':
data[asked_key] = False
data[required_key] = False
# hierarkey.forms cannot handle non-existent keys in cleaned_data => do not delete, but set to None
if not prefix:
data[virtual_key] = None
return data
def clean(self):
data = super().clean()
settings_dict = self.event.settings.freeze()
settings_dict.update(data)
data = self._resolve_virtual_keys_input(data)
validate_event_settings(self.event, data)
return data
def __init__(self, *args, **kwargs):
self.event = kwargs['obj']
super().__init__(*args, **kwargs)
self.fields['name_scheme'].choices = (
(k, _('Ask for {fields}, display like {example}').format(
fields=' + '.join(str(vv[1]) for vv in v['fields']),
example=v['concatenation'](v['sample'])
))
for k, v in PERSON_NAME_SCHEMES.items()
)
self.fields['name_scheme_titles'].choices = [('', _('Free text input'))] + [
(k, '{scheme}: {samples}'.format(
scheme=v[0],
samples=', '.join(v[1])
))
for k, v in PERSON_NAME_TITLE_GROUPS.items()
]
if not self.event.has_subevents:
del self.fields['frontpage_subevent_ordering']
del self.fields['event_list_type']
del self.fields['event_list_available_only']
# create "virtual" fields for better UX when editing <name>_asked and <name>_required fields
self.virtual_keys = []
for asked_key in [key for key in self.fields.keys() if key.endswith('_asked')]:
required_key = asked_key.rsplit('_', 1)[0] + '_required'
virtual_key = asked_key + '_required'
if required_key not in self.fields or virtual_key in self.fields:
# either no matching required key or
# there already is a field with virtual_key defined manually, so do not overwrite
continue
asked_field = self.fields[asked_key]
self.fields[virtual_key] = forms.ChoiceField(
label=asked_field.label,
help_text=asked_field.help_text,
required=True,
widget=forms.RadioSelect,
choices=[
# default key needs a value other than '' because with '' it would also overwrite even if combi-field is not transmitted
('do_not_ask', _('Do not ask')),
('optional', _('Ask, but do not require input')),
('required', _('Ask and require input'))
]
)
self.virtual_keys.append(virtual_key)
if self.initial[required_key]:
self.initial[virtual_key] = 'required'
elif self.initial[asked_key]:
self.initial[virtual_key] = 'optional'
else:
self.initial[virtual_key] = 'do_not_ask'
@cached_property
def changed_data(self):
data = []
# We need to resolve the mapping between our "virtual" fields and the "real"fields here, otherwise
# they are detected as "changed" on every save even though they aren't.
in_data = self._resolve_virtual_keys_input(self.data.copy(), prefix=f'{self.prefix}-' if self.prefix else '')
for name, field in self.fields.items():
prefixed_name = self.add_prefix(name)
data_value = field.widget.value_from_datadict(in_data, self.files, prefixed_name)
if not field.show_hidden_initial:
# Use the BoundField's initial as this is the value passed to
# the widget.
initial_value = self[name].initial
else:
initial_prefixed_name = self.add_initial_prefix(name)
hidden_widget = field.hidden_widget()
try:
initial_value = field.to_python(hidden_widget.value_from_datadict(
self.data, self.files, initial_prefixed_name))
except ValidationError:
# Always assume data has changed if validation fails.
data.append(name)
continue
if field.has_changed(initial_value, data_value):
data.append(name)
return data
class CancelSettingsForm(SettingsForm):
auto_fields = [
'cancel_allow_user',
'cancel_allow_user_until',
'cancel_allow_user_paid',
'cancel_allow_user_paid_until',
'cancel_allow_user_paid_keep',
'cancel_allow_user_paid_keep_fees',
'cancel_allow_user_paid_keep_percentage',
'cancel_allow_user_paid_adjust_fees',
'cancel_allow_user_paid_adjust_fees_explanation',
'cancel_allow_user_paid_adjust_fees_step',
'cancel_allow_user_paid_refund_as_giftcard',
'cancel_allow_user_paid_require_approval',
'change_allow_user_variation',
'change_allow_user_price',
'change_allow_user_until',
'change_allow_user_addons',
]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.obj.settings.giftcard_expiry_years is not None:
self.fields['cancel_allow_user_paid_refund_as_giftcard'].help_text = gettext(
'You have configured gift cards to be valid {} years plus the year the gift card is issued in.'
).format(self.obj.settings.giftcard_expiry_years)
class PaymentSettingsForm(SettingsForm):
auto_fields = [
'payment_term_mode',
'payment_term_days',
'payment_term_weekdays',
'payment_term_minutes',
'payment_term_last',
'payment_term_expire_automatically',
'payment_term_accept_late',
'payment_pending_hidden',
'payment_explanation',
]
tax_rate_default = forms.ModelChoiceField(
queryset=TaxRule.objects.none(),
label=_('Tax rule for payment fees'),
required=False,
help_text=_("The tax rule that applies for additional fees you configured for single payment methods. This "
"will set the tax rate and reverse charge rules, other settings of the tax rule are ignored.")
)
def clean_payment_term_days(self):
value = self.cleaned_data.get('payment_term_days')
if self.cleaned_data.get('payment_term_mode') == 'days' and value is None:
raise ValidationError(_("This field is required."))
return value
def clean_payment_term_minutes(self):
value = self.cleaned_data.get('payment_term_minutes')
if self.cleaned_data.get('payment_term_mode') == 'minutes' and value is None:
raise ValidationError(_("This field is required."))
return value
def clean(self):
data = super().clean()
settings_dict = self.obj.settings.freeze()
settings_dict.update(data)
validate_event_settings(self.obj, data)
return data
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['tax_rate_default'].queryset = self.obj.tax_rules.all()
class ProviderForm(SettingsForm):
"""
This is a SettingsForm, but if fields are set to required=True, validation
errors are only raised if the payment method is enabled.
"""
def __init__(self, *args, **kwargs):
self.settingspref = kwargs.pop('settingspref')
self.provider = kwargs.pop('provider', None)
super().__init__(*args, **kwargs)
def prepare_fields(self):
for k, v in self.fields.items():
v._required = v.required
v.required = False
v.widget.is_required = False
if isinstance(v, I18nFormField):
v._required = v.one_required
v.one_required = False
v.widget.enabled_locales = self.locales
elif isinstance(v, (RelativeDateTimeField, RelativeDateField)):
v.set_event(self.obj)
if hasattr(v, '_as_type'):
self.initial[k] = self.obj.settings.get(k, as_type=v._as_type, default=v.initial)
def clean(self):
cleaned_data = super().clean()
enabled = cleaned_data.get(self.settingspref + '_enabled')
if not enabled:
return
if cleaned_data.get(self.settingspref + '_hidden_url', None):
cleaned_data[self.settingspref + '_hidden_url'] = None
for k, v in self.fields.items():
val = cleaned_data.get(k)
if v._required and not val:
self.add_error(k, _('This field is required.'))
if self.provider:
cleaned_data = self.provider.settings_form_clean(cleaned_data)
return cleaned_data
class InvoiceSettingsForm(SettingsForm):
auto_fields = [
'invoice_address_asked',
'invoice_address_required',
'invoice_address_vatid',
'invoice_address_company_required',
'invoice_address_beneficiary',
'invoice_address_custom_field',
'invoice_name_required',
'invoice_address_not_asked_free',
'invoice_include_free',
'invoice_show_payments',
'invoice_reissue_after_modify',
'invoice_generate',
'invoice_attendee_name',
'invoice_event_location',
'invoice_include_expire_date',
'invoice_numbers_consecutive',
'invoice_numbers_prefix',
'invoice_numbers_prefix_cancellations',
'invoice_numbers_counter_length',
'invoice_address_explanation_text',
'invoice_email_attachment',
'invoice_email_organizer',
'invoice_address_from_name',
'invoice_address_from',
'invoice_address_from_zipcode',
'invoice_address_from_city',
'invoice_address_from_country',
'invoice_address_from_tax_id',
'invoice_address_from_vat_id',
'invoice_introductory_text',
'invoice_additional_text',
'invoice_footer_text',
'invoice_eu_currencies',
'invoice_logo_image',
]
invoice_generate_sales_channels = forms.MultipleChoiceField(
label=_('Generate invoices for Sales channels'),
choices=[],
widget=forms.CheckboxSelectMultiple,
help_text=_("If you have enabled invoice generation in the previous setting, you can limit it here to specific "
"sales channels.")
)
invoice_renderer = forms.ChoiceField(
label=_("Invoice style"),
required=True,
choices=[]
)
invoice_language = forms.ChoiceField(
widget=forms.Select, required=True,
label=_("Invoice language"),
choices=[('__user__', _('The user\'s language'))] + settings.LANGUAGES,
)
def __init__(self, *args, **kwargs):
event = kwargs.get('obj')
super().__init__(*args, **kwargs)
self.fields['invoice_renderer'].choices = [
(r.identifier, r.verbose_name) for r in event.get_invoice_renderers().values()
]
self.fields['invoice_numbers_prefix'].widget.attrs['placeholder'] = event.slug.upper() + '-'
if event.settings.invoice_numbers_prefix:
self.fields['invoice_numbers_prefix_cancellations'].widget.attrs['placeholder'] = event.settings.invoice_numbers_prefix
else:
self.fields['invoice_numbers_prefix_cancellations'].widget.attrs['placeholder'] = event.slug.upper() + '-'
locale_names = dict(settings.LANGUAGES)
self.fields['invoice_language'].choices = [('__user__', _('The user\'s language'))] + [(a, locale_names[a]) for a in event.settings.locales]
self.fields['invoice_generate_sales_channels'].choices = (
(c.identifier, c.verbose_name) for c in get_all_sales_channels().values()
)
def clean(self):
data = super().clean()
settings_dict = self.obj.settings.freeze()
settings_dict.update(data)
validate_event_settings(self.obj, data)
return data
def multimail_validate(val):
s = val.split(',')
for part in s:
validate_email(part.strip())
return s
def contains_web_channel_validate(val):
if "web" not in val:
raise ValidationError(_("The online shop must be selected to receive these emails."))
class MailSettingsForm(SettingsForm):
auto_fields = [
'mail_prefix',
'mail_from_name',
'mail_attach_ical',
'mail_attach_tickets',
'mail_attachment_new_order',
'mail_attach_ical_paid_only',
'mail_attach_ical_description',
]
mail_sales_channel_placed_paid = forms.MultipleChoiceField(
choices=lambda: [(ident, sc.verbose_name) for ident, sc in get_all_sales_channels().items()],
label=_('Sales channels for checkout emails'),
help_text=_('The order placed and paid emails will only be send to orders from these sales channels. '
'The online shop must be enabled.'),
widget=forms.CheckboxSelectMultiple(
attrs={'class': 'scrolling-multiple-choice'}
),
validators=[contains_web_channel_validate],
)
mail_sales_channel_download_reminder = forms.MultipleChoiceField(
choices=lambda: [(ident, sc.verbose_name) for ident, sc in get_all_sales_channels().items()],
label=_('Sales channels'),
help_text=_('This email will only be send to orders from these sales channels. The online shop must be enabled.'),
widget=forms.CheckboxSelectMultiple(
attrs={'class': 'scrolling-multiple-choice'}
),
validators=[contains_web_channel_validate],
)
mail_bcc = forms.CharField(
label=_("Bcc address"),
help_text=_("All emails will be sent to this address as a Bcc copy"),
validators=[multimail_validate],
required=False,
max_length=255
)
mail_text_signature = I18nFormField(
label=_("Signature"),
required=False,
widget=I18nTextarea,
help_text=_("This will be attached to every email. Available placeholders: {event}"),
validators=[PlaceholderValidator(['{event}'])],
widget_kwargs={'attrs': {
'rows': '4',
'placeholder': _(
'e.g. your contact details'
)
}}
)
mail_html_renderer = forms.ChoiceField(
label=_("HTML mail renderer"),
required=True,
choices=[]
)
mail_text_order_placed = I18nFormField(
label=_("Text sent to order contact address"),
required=False,
widget=I18nTextarea,
)
mail_send_order_placed_attendee = forms.BooleanField(
label=_("Send an email to attendees"),
help_text=_('If the order contains attendees with email addresses different from the person who orders the '
'tickets, the following email will be sent out to the attendees.'),
required=False,
)
mail_text_order_placed_attendee = I18nFormField(
label=_("Text sent to attendees"),
required=False,
widget=I18nTextarea,
)
mail_text_order_paid = I18nFormField(
label=_("Text sent to order contact address"),
required=False,
widget=I18nTextarea,
)
mail_send_order_paid_attendee = forms.BooleanField(
label=_("Send an email to attendees"),
help_text=_('If the order contains attendees with email addresses different from the person who orders the '
'tickets, the following email will be sent out to the attendees.'),
required=False,
)
mail_text_order_paid_attendee = I18nFormField(
label=_("Text sent to attendees"),
required=False,
widget=I18nTextarea,
)
mail_text_order_free = I18nFormField(
label=_("Text sent to order contact address"),
required=False,
widget=I18nTextarea,
)
mail_send_order_free_attendee = forms.BooleanField(
label=_("Send an email to attendees"),
help_text=_('If the order contains attendees with email addresses different from the person who orders the '
'tickets, the following email will be sent out to the attendees.'),
required=False,
)
mail_text_order_free_attendee = I18nFormField(
label=_("Text sent to attendees"),
required=False,
widget=I18nTextarea,
)
mail_text_order_changed = I18nFormField(
label=_("Text"),
required=False,
widget=I18nTextarea,
)
mail_text_resend_link = I18nFormField(
label=_("Text (sent by admin)"),
required=False,
widget=I18nTextarea,
)
mail_text_resend_all_links = I18nFormField(
label=_("Text (requested by user)"),
required=False,
widget=I18nTextarea,
)
mail_days_order_expire_warning = forms.IntegerField(
label=_("Number of days"),
required=True,
min_value=0,
help_text=_("This email will be sent out this many days before the order expires. If the "
"value is 0, the mail will never be sent.")
)
mail_text_order_expire_warning = I18nFormField(
label=_("Text"),
required=False,
widget=I18nTextarea,
)
mail_text_waiting_list = I18nFormField(
label=_("Text"),
required=False,
widget=I18nTextarea,
)
mail_text_order_canceled = I18nFormField(
label=_("Text"),
required=False,
widget=I18nTextarea,
)
mail_text_order_custom_mail = I18nFormField(
label=_("Text"),
required=False,
widget=I18nTextarea,
)
mail_text_download_reminder = I18nFormField(
label=_("Text sent to order contact address"),
required=False,
widget=I18nTextarea,
)
mail_send_download_reminder_attendee = forms.BooleanField(
label=_("Send an email to attendees"),
help_text=_('If the order contains attendees with email addresses different from the person who orders the '
'tickets, the following email will be sent out to the attendees.'),
required=False,
)
mail_text_download_reminder_attendee = I18nFormField(
label=_("Text sent to attendees"),
required=False,
widget=I18nTextarea,
)
mail_days_download_reminder = forms.IntegerField(
label=_("Number of days"),
required=False,
min_value=0,
help_text=_("This email will be sent out this many days before the order event starts. If the "
"field is empty, the mail will never be sent.")
)
mail_text_order_placed_require_approval = I18nFormField(
label=_("Received order"),
required=False,
widget=I18nTextarea,
)
mail_text_order_approved = I18nFormField(
label=_("Approved order"),
required=False,
widget=I18nTextarea,
help_text=_("This will only be sent out for non-free orders. Free orders will receive the free order "
"template from below instead."),
)
mail_text_order_approved_free = I18nFormField(
label=_("Approved free order"),
required=False,
widget=I18nTextarea,
help_text=_("This will only be sent out for free orders. Non-free orders will receive the non-free order "
"template from above instead."),
)
mail_text_order_denied = I18nFormField(
label=_("Denied order"),
required=False,
widget=I18nTextarea,
)
base_context = {
'mail_text_order_placed': ['event', 'order', 'payment'],
'mail_text_order_placed_attendee': ['event', 'order', 'position'],
'mail_text_order_placed_require_approval': ['event', 'order'],
'mail_text_order_approved': ['event', 'order'],
'mail_text_order_approved_free': ['event', 'order'],
'mail_text_order_denied': ['event', 'order', 'comment'],
'mail_text_order_paid': ['event', 'order', 'payment_info'],
'mail_text_order_paid_attendee': ['event', 'order', 'position'],
'mail_text_order_free': ['event', 'order'],
'mail_text_order_free_attendee': ['event', 'order', 'position'],
'mail_text_order_changed': ['event', 'order'],
'mail_text_order_canceled': ['event', 'order', 'comment'],
'mail_text_order_expire_warning': ['event', 'order'],
'mail_text_order_custom_mail': ['event', 'order'],
'mail_text_download_reminder': ['event', 'order'],
'mail_text_download_reminder_attendee': ['event', 'order', 'position'],
'mail_text_resend_link': ['event', 'order'],
'mail_text_waiting_list': ['event', 'waiting_list_entry'],
'mail_text_resend_all_links': ['event', 'orders'],
'mail_attach_ical_description': ['event', 'event_or_subevent'],
}
def _set_field_placeholders(self, fn, base_parameters):
phs = [
'{%s}' % p
for p in sorted(get_available_placeholders(self.event, base_parameters).keys())
]
ht = _('Available placeholders: {list}').format(
list=', '.join(phs)
)
if self.fields[fn].help_text:
self.fields[fn].help_text += ' ' + str(ht)
else:
self.fields[fn].help_text = ht
self.fields[fn].validators.append(
PlaceholderValidator(phs)
)
def __init__(self, *args, **kwargs):
self.event = event = kwargs.get('obj')
super().__init__(*args, **kwargs)
self.fields['mail_html_renderer'].choices = [
(r.identifier, r.verbose_name) for r in event.get_html_mail_renderers().values()
]
prefetch_related_objects([self.event.organizer], Prefetch('meta_properties'))
self.event.meta_values_cached = self.event.meta_values.select_related('property').all()
for k, v in self.base_context.items():
self._set_field_placeholders(k, v)
for k, v in list(self.fields.items()):
if k.endswith('_attendee') and not event.settings.attendee_emails_asked:
# If we don't ask for attendee emails, we can't send them anything and we don't need to clutter
# the user interface with it
del self.fields[k]
class TicketSettingsForm(SettingsForm):
auto_fields = [
'ticket_download',
'ticket_download_date',
'ticket_download_addons',
'ticket_download_nonadm',
'ticket_download_pending',
'ticket_download_require_validated_email',
'ticket_secret_length',
]
ticket_secret_generator = forms.ChoiceField(
label=_("Ticket code generator"),
help_text=_("For advanced users, usually does not need to be changed."),
required=True,
widget=forms.RadioSelect,
choices=[]
)
def __init__(self, *args, **kwargs):
event = kwargs.get('obj')
super().__init__(*args, **kwargs)
self.fields['ticket_secret_generator'].choices = [
(r.identifier, r.verbose_name) for r in event.ticket_secret_generators.values()
]
def prepare_fields(self):
# See clean()
for k, v in self.fields.items():
v._required = v.required
v.required = False
v.widget.is_required = False
if isinstance(v, I18nFormField):
v._required = v.one_required
v.one_required = False
v.widget.enabled_locales = self.locales
def clean(self):
# required=True files should only be required if the feature is enabled
cleaned_data = super().clean()
enabled = cleaned_data.get('ticket_download') == 'True'
if not enabled:
return
for k, v in self.fields.items():
val = cleaned_data.get(k)
if v._required and (val is None or val == ""):
self.add_error(k, _('This field is required.'))
class CommentForm(I18nModelForm):
def __init__(self, *args, **kwargs):
self.readonly = kwargs.pop('readonly', None)
super().__init__(*args, **kwargs)
if self.readonly:
self.fields['comment'].widget.attrs['readonly'] = 'readonly'
class Meta:
model = Event
fields = ['comment']
widgets = {
'comment': forms.Textarea(attrs={
'rows': 3,
'class': 'helper-width-100',
}),
}
class CountriesAndEU(CachedCountries):
override = {
'ZZ': _('Any country'),
'EU': _('European Union')
}
first = ['ZZ', 'EU']
cache_subkey = 'with_any_or_eu'
class TaxRuleLineForm(I18nForm):
country = LazyTypedChoiceField(
choices=CountriesAndEU(),
required=False
)
address_type = forms.ChoiceField(
choices=[
('', _('Any customer')),
('individual', _('Individual')),
('business', _('Business')),
('business_vat_id', _('Business with valid VAT ID')),
],
required=False
)
action = forms.ChoiceField(
choices=[
('vat', _('Charge VAT')),
('reverse', _('Reverse charge')),
('no', _('No VAT')),
('block', _('Sale not allowed')),
('require_approval', _('Order requires approval')),
],
)
rate = forms.DecimalField(
label=_('Deviating tax rate'),
max_digits=10, decimal_places=2,
required=False
)
invoice_text = I18nFormField(
label=_('Text on invoice'),
required=False,
widget=I18nTextInput
)
class I18nBaseFormSet(I18nFormSetMixin, forms.BaseFormSet):
# compatibility shim for django-i18nfield library
def __init__(self, *args, **kwargs):
self.event = kwargs.pop('event', None)
if self.event:
kwargs['locales'] = self.event.settings.get('locales')
super().__init__(*args, **kwargs)
TaxRuleLineFormSet = formset_factory(
TaxRuleLineForm, formset=I18nBaseFormSet,
can_order=True, can_delete=True, extra=0
)
class TaxRuleForm(I18nModelForm):
class Meta:
model = TaxRule
fields = ['name', 'rate', 'price_includes_tax', 'eu_reverse_charge', 'home_country', 'internal_name', 'keep_gross_if_rate_changes']
class WidgetCodeForm(forms.Form):
subevent = forms.ModelChoiceField(
label=pgettext_lazy('subevent', "Date"),
required=False,
queryset=SubEvent.objects.none()
)
language = forms.ChoiceField(
label=_("Language"),
required=True,
choices=settings.LANGUAGES
)
voucher = forms.CharField(
label=_("Pre-selected voucher"),
required=False,
help_text=_("If set, the widget will show products as if this voucher has been entered and when a product is "
"bought via the widget, this voucher will be used. This can for example be used to provide "
"widgets that give discounts or unlock secret products.")
)
compatibility_mode = forms.BooleanField(
label=_("Compatibility mode"),
required=False,
help_text=_("Our regular widget doesn't work in all website builders. If you run into trouble, try using "
"this compatibility mode.")
)
def __init__(self, *args, **kwargs):
self.event = kwargs.pop('event')
super().__init__(*args, **kwargs)
if self.event.has_subevents:
self.fields['subevent'].queryset = self.event.subevents.all()
else:
del self.fields['subevent']
self.fields['language'].choices = [(l, n) for l, n in settings.LANGUAGES if l in self.event.settings.locales]
def clean_voucher(self):
v = self.cleaned_data.get('voucher')
if not v:
return
if not self.event.vouchers.filter(code=v).exists():
raise ValidationError(_('The given voucher code does not exist.'))
return v
class EventDeleteForm(forms.Form):
error_messages = {
'slug_wrong': _("The slug you entered was not correct."),
}
slug = forms.CharField(
max_length=255,
label=_("Event slug"),
)
def __init__(self, *args, **kwargs):
self.event = kwargs.pop('event')
super().__init__(*args, **kwargs)
def clean_slug(self):
slug = self.cleaned_data.get('slug')
if slug != self.event.slug:
raise forms.ValidationError(
self.error_messages['slug_wrong'],
code='slug_wrong',
)
return slug
class QuickSetupForm(I18nForm):
show_quota_left = forms.BooleanField(
label=_("Show number of tickets left"),
help_text=_("Publicly show how many tickets of a certain type are still available."),
required=False
)
waiting_list_enabled = forms.BooleanField(
label=_("Waiting list"),
help_text=_("Once a ticket is sold out, people can add themselves to a waiting list. As soon as a ticket "
"becomes available again, it will be reserved for the first person on the waiting list and this "
"person will receive an email notification with a voucher that can be used to buy a ticket."),
required=False
)
ticket_download = forms.BooleanField(
label=_("Ticket downloads"),
help_text=_("Your customers will be able to download their tickets in PDF format."),
required=False
)
attendee_names_required = forms.BooleanField(
label=_("Require all attendees to fill in their names"),
help_text=_("By default, we will ask for names but not require them. You can turn this off completely in the "
"settings."),
required=False
)
imprint_url = forms.URLField(
label=_("Imprint URL"),
help_text=_("This should point e.g. to a part of your website that has your contact details and legal "
"information."),
required=False,
)
contact_mail = forms.EmailField(
label=_("Contact address"),
required=False,
help_text=_("We'll show this publicly to allow attendees to contact you.")
)
total_quota = forms.IntegerField(
label=_("Total capacity"),
min_value=0,
widget=forms.NumberInput(
attrs={
'placeholder': '∞'
}
),
required=False
)
payment_stripe__enabled = forms.BooleanField(
label=_("Payment via Stripe"),
help_text=_("Stripe is an online payments processor supporting credit cards and lots of other payment options. "
"To accept payments via Stripe, you will need to set up an account with them, which takes less "
"than five minutes using their simple interface."),
required=False
)
payment_banktransfer__enabled = forms.BooleanField(
label=_("Payment by bank transfer"),
help_text=_("Your customers will be instructed to wire the money to your account. You can then import your "
"bank statements to process the payments within pretix, or mark them as paid manually."),
required=False
)
btf = BankTransfer.form_fields()
payment_banktransfer_bank_details_type = btf['bank_details_type']
payment_banktransfer_bank_details_sepa_name = btf['bank_details_sepa_name']
payment_banktransfer_bank_details_sepa_iban = btf['bank_details_sepa_iban']
payment_banktransfer_bank_details_sepa_bic = btf['bank_details_sepa_bic']
payment_banktransfer_bank_details_sepa_bank = btf['bank_details_sepa_bank']
payment_banktransfer_bank_details = btf['bank_details']
def __init__(self, *args, **kwargs):
self.obj = kwargs.pop('event', None)
self.locales = self.obj.settings.get('locales') if self.obj else kwargs.pop('locales', None)
kwargs['locales'] = self.locales
super().__init__(*args, **kwargs)
if not self.obj.settings.payment_stripe_connect_client_id:
del self.fields['payment_stripe__enabled']
self.fields['payment_banktransfer_bank_details'].required = False
for f in self.fields.values():
if 'data-required-if' in f.widget.attrs:
del f.widget.attrs['data-required-if']
def clean(self):
cleaned_data = super().clean()
if cleaned_data.get('payment_banktransfer__enabled'):
provider = BankTransfer(self.obj)
cleaned_data = provider.settings_form_clean(cleaned_data)
return cleaned_data
class QuickSetupProductForm(I18nForm):
name = I18nFormField(
max_length=200, # Max length of Quota.name
label=_("Product name"),
widget=I18nTextInput
)
default_price = forms.DecimalField(
label=_("Price (optional)"),
max_digits=7, decimal_places=2, required=False,
localize=True,
widget=forms.TextInput(
attrs={
'placeholder': _('Free')
}
),
)
quota = forms.IntegerField(
label=_("Quantity available"),
min_value=0,
widget=forms.NumberInput(
attrs={
'placeholder': '∞'
}
),
initial=100,
required=False
)
class BaseQuickSetupProductFormSet(I18nFormSetMixin, forms.BaseFormSet):
def __init__(self, *args, **kwargs):
event = kwargs.pop('event', None)
if event:
kwargs['locales'] = event.settings.get('locales')
super().__init__(*args, **kwargs)
QuickSetupProductFormSet = formset_factory(
QuickSetupProductForm,
formset=BaseQuickSetupProductFormSet,
can_order=False, can_delete=True, extra=0
)
class ItemMetaPropertyForm(forms.ModelForm):
class Meta:
fields = ['name', 'default']
widgets = {
'default': forms.TextInput()
}
class ConfirmTextForm(I18nForm):
text = I18nFormField(
widget=I18nTextarea,
widget_kwargs={'attrs': {'rows': '2'}},
)
class BaseConfirmTextFormSet(I18nFormSetMixin, forms.BaseFormSet):
def __init__(self, *args, **kwargs):
event = kwargs.pop('event', None)
if event:
kwargs['locales'] = event.settings.get('locales')
super().__init__(*args, **kwargs)
ConfirmTextFormset = formset_factory(
ConfirmTextForm,
formset=BaseConfirmTextFormSet,
can_order=True, can_delete=True, extra=0
)
|
#!/usr/bin/env python
from pathlib import Path
import csv
import warnings
import pandas as pd
class Phenotype:
"""
Load BIDS phenotype data
Matching the subject list to a dataframe.
Parameters
----------
phenotype_path : str | Path
path to BIDS dir `phenotype`
subject_info : str | Path
path to the subject info TSV file containing subject number and session info
one session per row
header : dict
relevant header name in subject_info mapped to participant_id and ses
Example:
{"participant_id": "<relevant header name in subject_info>",
"ses": "<relevant header name in subject_info>"}
Attributes
----------
`phenotype_path` : Path
path object to BIDS dir `phenotype`
`subject_info` : dict
subject number with associated sessions
`sesssion` : bool
session information supplied or not
`index_keys` : list
keys in assessment file that should be the index
default: ["participant_id", "ses"]
"""
def __init__(self, phenotype_path, subject_info, header):
"""Default parameters."""
self.phenotype_path = (
Path(phenotype_path)
if isinstance(phenotype_path, str)
else phenotype_path
)
self.subject_info = _parseinfo(subject_info, header)
self.session = "ses" in header
self.index_keys = ["participant_id"]
if self.session:
self.index_keys.append("ses")
def parse(self, assessments_info):
"""
Load assessments
Parameters
----------
assessments_info: dict
{"<assessment_name>": ["selected_var1", "selected_var1"]}
Returns
-------
pandas.DataFrame
selected assessment variables in suplied sample
"""
collect_data = []
for assessment, var in assessments_info.items():
data = self._load_single(assessment, var)
if data is not None:
collect_data.append(data)
if collect_data:
return pd.concat(collect_data, axis=1)
else:
return None
def _load_single(self, assessment, var=None):
"""
Load relevant subject and session from one assessment
set subject and seesion as indices
"""
df = pd.read_csv(self.phenotype_path / f"{assessment}.tsv", sep="\t")
idx = _quicksearch(df, self.subject_info, self.session)
if idx and var:
return df.loc[idx, self.index_keys.copy() + var].set_index(
self.index_keys
)
elif idx:
return df.loc[idx, :].set_index(self.index_keys)
else:
warnings.warn(
f"no matching subject with related session {assessment}"
)
def _parseinfo(path, header):
"""
Parse subject info
path:
path to the subject info TSV file containing subject number and session info
one session per row
header:
header name for subject id and session number in dict
{"participant_id": "<headername>", "ses": <headername> or None}
"""
subject_info = {}
with open(path, "r") as f:
reader = csv.DictReader(f, delimiter="\t")
for row in reader:
sub = row[header["participant_id"]]
if sub not in subject_info:
subject_info[sub] = [] # add new subject
if "ses" in header:
ses = row[header["ses"]]
subject_info[sub].append(ses)
return subject_info
def _quicksearch(assessment, subject_info, session=False):
"""
quick pass on BIDS compatable assesment file to find subject and session
matched in our list.
assessement: pd.DataFrame
assessment data including "participant_id", "ses" in headers
subject_info:
dictonanry containing:
{"<subject-id>": ["<session-name1>", "<session-name2>"]}
"""
info_header = ["participant_id"]
if session:
info_header.append("ses")
try:
df = assessment.loc[:, info_header]
except KeyError:
raise (KeyError)
match_index = []
for sub in subject_info:
sessions = subject_info[sub]
search_val = [sub] + sessions
data_exist = df.isin(search_val).sum(axis=1) == df.shape[1]
valid_idx = df.index[data_exist].tolist()
if len(valid_idx) == len(sessions):
match_index += valid_idx
elif len(valid_idx) > 1:
warnings.warn(
f"Duplicated entry: {search_val}, please check if your raw data is dirty"
)
return match_index
|
"""Internal bases for sessions to make it easier to call dataset methods on the session object."""
from functools import wraps
from typing import Optional, Iterable, Tuple, TypeVar, TYPE_CHECKING, Type, Sequence
import numpy as np
from nilspodlib.dataset import Dataset
from nilspodlib.utils import path_t, inplace_or_copy, remove_docstring_indent
T = TypeVar("T")
if TYPE_CHECKING:
from nilspodlib.datastream import Datastream # noqa: F401
import pandas as pd # noqa: F401
class CascadingDatasetField:
"""A simple descriptor object to forward attribute access to all datasets of a session."""
name: str
__doc__: str
def __set_name__(self, owner, name):
"""Set the name of the field and update the docstring, by pulling from the Dataset class."""
self.name = name
self.__doc__ = getattr(Dataset, self.name, None).__doc__
def __get__(self, instance, owner):
"""Get the attribute from all nested objects."""
return tuple(getattr(d, self.name) for d in instance.datasets)
def call_dataset(autogen_doc=True): # noqa: D202
"""Forward all method calls to all datasets of a session.
This function respects the inplace feature and will create a copy of the session object if required.
Parameters
----------
autogen_doc :
If True, the docstring of the respective dataset method is copied to the method with short pretext.
If a docstring already exists, the dataset docstring will be appended WITHOUT pretext. (Default value = True)
"""
def _wrapped(method):
@wraps(method)
def _cascading_access(*args, **kwargs):
session = args[0]
return_vals = tuple(getattr(d, method.__name__)(*args[1:], **kwargs) for d in session.datasets)
if all(isinstance(d, Dataset) for d in return_vals):
inplace = kwargs.get("inplace", False)
s = inplace_or_copy(session, inplace)
s.datasets = return_vals
return s
return return_vals
if autogen_doc:
if _cascading_access.__doc__:
_cascading_access.__doc__ += "\n\n"
else:
_cascading_access.__doc__ = (
"Apply `Dataset.{0}` to all datasets of the session.\n\n"
"See :py:meth:`nilspodlib.dataset.Dataset.{0}` for more details. "
"The docstring of this method is included below:\n\n".format(method.__name__)
)
_cascading_access.__doc__ += remove_docstring_indent(getattr(Dataset, method.__name__).__doc__)
return _cascading_access
return _wrapped
class _MultiDataset:
"""Wrapper that holds all attributes and methods that can be simply called on multiple datasets.
Notes
-----
This class should not be used as public interface and is only relevant as base for the session class
This class uses a decorator for methods and a descriptor for attributes to automatically forward all calls to
multiple datasets.
See the implementation of `CascadingDatasetField` and `call_dataset` for details.
"""
path: path_t = CascadingDatasetField()
acc: Tuple[Optional["Datastream"]] = CascadingDatasetField()
gyro: Tuple[Optional["Datastream"]] = CascadingDatasetField()
mag: Tuple[Optional["Datastream"]] = CascadingDatasetField()
baro: Tuple[Optional["Datastream"]] = CascadingDatasetField()
analog: Tuple[Optional["Datastream"]] = CascadingDatasetField()
ecg: Tuple[Optional["Datastream"]] = CascadingDatasetField()
ppg: Tuple[Optional["Datastream"]] = CascadingDatasetField()
temperature: Tuple[Optional["Datastream"]] = CascadingDatasetField()
counter: Tuple[np.ndarray] = CascadingDatasetField()
size: Tuple[int] = CascadingDatasetField()
datastreams: Tuple[Iterable["Datastream"]] = CascadingDatasetField()
ACTIVE_SENSORS: Tuple[Tuple[str]] = CascadingDatasetField()
# This needs to be implemented by the session
datasets: Tuple[Dataset]
@call_dataset()
def cut_to_syncregion( # noqa: D105
self: Type[T], start: bool = True, end: bool = False, warn_thres: Optional[int] = 30, inplace: bool = False
) -> T:
pass
@call_dataset()
def cut( # noqa: D105
self: Type[T],
start: Optional[int] = None,
stop: Optional[int] = None,
step: Optional[int] = None,
inplace: bool = False,
) -> T:
pass
@call_dataset()
def cut_counter_val( # noqa: D105
self: Type[T],
start: Optional[int] = None,
stop: Optional[int] = None,
step: Optional[int] = None,
inplace: bool = False,
) -> T:
pass
@call_dataset()
def downsample(self: Type[T], factor: int, inplace: bool = False) -> T: # noqa: D105
pass
@call_dataset()
def data_as_df( # noqa: D105
self,
datastreams: Optional[Sequence[str]] = None,
index: Optional[str] = None,
include_units: Optional[bool] = True,
) -> Tuple["pd.DataFrame"]:
pass
@call_dataset()
def imu_data_as_df(self, index: Optional[str] = None) -> Tuple["pd.DataFrame"]: # noqa: D105
pass
@call_dataset()
def find_closest_calibration( # noqa: D105
self,
folder: Optional[path_t] = None,
recursive: bool = True,
filter_cal_type: Optional[str] = None,
before_after: Optional[str] = None,
ignore_file_not_found: Optional[bool] = False,
):
pass
@call_dataset()
def find_calibrations( # noqa: D105
self,
folder: Optional[path_t] = None,
recursive: bool = True,
filter_cal_type: Optional[str] = None,
ignore_file_not_found: Optional[bool] = False,
):
pass
|
<gh_stars>0
"""Xiaomi common components for custom device handlers."""
from __future__ import annotations
import logging
import math
from typing import Iterable, Iterator
from zigpy import types as t
import zigpy.device
from zigpy.profiles import zha
from zigpy.quirks import CustomCluster, CustomDevice
from zigpy.zcl.clusters.general import (
AnalogInput,
Basic,
BinaryOutput,
DeviceTemperature,
OnOff,
PowerConfiguration,
)
from zigpy.zcl.clusters.homeautomation import ElectricalMeasurement
from zigpy.zcl.clusters.manufacturer_specific import ManufacturerSpecificCluster
from zigpy.zcl.clusters.measurement import (
IlluminanceMeasurement,
PressureMeasurement,
RelativeHumidity,
TemperatureMeasurement,
)
import zigpy.zcl.foundation as foundation
import zigpy.zdo
from zigpy.zdo.types import NodeDescriptor
from zhaquirks import (
Bus,
LocalDataCluster,
MotionOnEvent,
OccupancyWithReset,
QuickInitDevice,
)
from zhaquirks.const import (
ATTRIBUTE_ID,
ATTRIBUTE_NAME,
COMMAND_ATTRIBUTE_UPDATED,
COMMAND_TRIPLE,
UNKNOWN,
VALUE,
ZHA_SEND_EVENT,
)
BATTERY_LEVEL = "battery_level"
BATTERY_PERCENTAGE_REMAINING = 0x0021
BATTERY_PERCENTAGE_REMAINING_ATTRIBUTE = "battery_percentage"
BATTERY_REPORTED = "battery_reported"
BATTERY_SIZE = "battery_size"
BATTERY_SIZE_ATTR = 0x0031
BATTERY_QUANTITY_ATTR = 0x0033
BATTERY_VOLTAGE_MV = "battery_voltage_mV"
HUMIDITY_MEASUREMENT = "humidity_measurement"
HUMIDITY_REPORTED = "humidity_reported"
LUMI = "LUMI"
MODEL = 5
MOTION_TYPE = 0x000D
OCCUPANCY_STATE = 0
PATH = "path"
POWER = "power"
CONSUMPTION = "consumption"
VOLTAGE = "voltage"
PRESSURE_MEASUREMENT = "pressure_measurement"
PRESSURE_REPORTED = "pressure_reported"
STATE = "state"
TEMPERATURE = "temperature"
TEMPERATURE_MEASUREMENT = "temperature_measurement"
TVOC_MEASUREMENT = "tvoc_measurement"
TEMPERATURE_REPORTED = "temperature_reported"
POWER_REPORTED = "power_reported"
CONSUMPTION_REPORTED = "consumption_reported"
VOLTAGE_REPORTED = "voltage_reported"
ILLUMINANCE_MEASUREMENT = "illuminance_measurement"
ILLUMINANCE_REPORTED = "illuminance_reported"
XIAOMI_AQARA_ATTRIBUTE = 0xFF01
XIAOMI_AQARA_ATTRIBUTE_E1 = 0x00F7
XIAOMI_ATTR_3 = "X-attrib-3"
XIAOMI_ATTR_4 = "X-attrib-4"
XIAOMI_ATTR_5 = "X-attrib-5"
XIAOMI_ATTR_6 = "X-attrib-6"
XIAOMI_MIJA_ATTRIBUTE = 0xFF02
XIAOMI_NODE_DESC = NodeDescriptor(
byte1=2,
byte2=64,
mac_capability_flags=128,
manufacturer_code=4151,
maximum_buffer_size=127,
maximum_incoming_transfer_size=100,
server_mask=0,
maximum_outgoing_transfer_size=100,
descriptor_capability_field=0,
)
ZONE_TYPE = 0x0001
_LOGGER = logging.getLogger(__name__)
class XiaomiCustomDevice(CustomDevice):
"""Custom device representing xiaomi devices."""
def __init__(self, *args, **kwargs):
"""Init."""
self.battery_bus = Bus()
if not hasattr(self, BATTERY_SIZE):
self.battery_size = 10
super().__init__(*args, **kwargs)
class XiaomiQuickInitDevice(XiaomiCustomDevice, QuickInitDevice):
"""Xiaomi devices eligible for QuickInit."""
class XiaomiCluster(CustomCluster):
"""Xiaomi cluster implementation."""
def _iter_parse_attr_report(
self, data: bytes
) -> Iterator[foundation.Attribute, bytes]:
"""Yield all interpretations of the first attribute in an Xiaomi report."""
# Peek at the attribute report
attr_id, data = t.uint16_t.deserialize(data)
attr_type, data = t.uint8_t.deserialize(data)
if (
attr_id
not in (
XIAOMI_AQARA_ATTRIBUTE,
XIAOMI_MIJA_ATTRIBUTE,
XIAOMI_AQARA_ATTRIBUTE_E1,
)
or attr_type != 0x42 # "Character String"
):
# Assume other attributes are reported correctly
data = attr_id.serialize() + attr_type.serialize() + data
attribute, data = foundation.Attribute.deserialize(data)
yield attribute, data
return
# Length of the "string" can be wrong
val_len, data = t.uint8_t.deserialize(data)
# Try every offset. Start with 0 to pass unbroken reports through.
for offset in (0, -1, 1):
fixed_len = val_len + offset
if len(data) < fixed_len:
continue
val, final_data = data[:fixed_len], data[fixed_len:]
attr_val = t.LVBytes(val)
attr_type = 0x41 # The data type should be "Octet String"
yield foundation.Attribute(
attrid=attr_id,
value=foundation.TypeValue(python_type=attr_type, value=attr_val),
), final_data
def _interpret_attr_reports(
self, data: bytes
) -> Iterable[tuple[foundation.Attribute]]:
"""Yield all valid interprations of a Xiaomi attribute report."""
if not data:
yield ()
return
try:
parsed = list(self._iter_parse_attr_report(data))
except (KeyError, ValueError):
return
for attr, remaining_data in parsed:
for remaining_attrs in self._interpret_attr_reports(remaining_data):
yield (attr,) + remaining_attrs
def deserialize(self, data):
"""Deserialize cluster data."""
hdr, data = foundation.ZCLHeader.deserialize(data)
# Only handle attribute reports differently
if (
hdr.frame_control.frame_type != foundation.FrameType.GLOBAL_COMMAND
or hdr.command_id != foundation.Command.Report_Attributes
):
return super().deserialize(hdr.serialize() + data)
reports = list(self._interpret_attr_reports(data))
if not reports:
_LOGGER.warning("Failed to parse Xiaomi attribute report: %r", data)
return super().deserialize(hdr.serialize() + data)
elif len(reports) > 1:
_LOGGER.warning(
"Xiaomi attribute report has multiple valid interpretations: %r",
reports,
)
fixed_data = b"".join(attr.serialize() for attr in reports[0])
return super().deserialize(hdr.serialize() + fixed_data)
def _update_attribute(self, attrid, value):
if attrid in (XIAOMI_AQARA_ATTRIBUTE, XIAOMI_AQARA_ATTRIBUTE_E1):
attributes = self._parse_aqara_attributes(value)
super()._update_attribute(attrid, value)
if self.endpoint.device.model == "lumi.sensor_switch.aq2":
if value == b"\x04!\xa8C\n!\x00\x00":
self.listener_event(ZHA_SEND_EVENT, COMMAND_TRIPLE, [])
elif attrid == XIAOMI_MIJA_ATTRIBUTE:
attributes = self._parse_mija_attributes(value)
else:
super()._update_attribute(attrid, value)
if attrid == MODEL:
# 0x0005 = model attribute.
# Xiaomi sensors send the model attribute when their reset button is
# pressed quickly."""
self.listener_event(
ZHA_SEND_EVENT,
COMMAND_ATTRIBUTE_UPDATED,
{
ATTRIBUTE_ID: attrid,
ATTRIBUTE_NAME: self.attributes.get(attrid, [UNKNOWN])[0],
VALUE: value,
},
)
return
_LOGGER.debug(
"%s - Attribute report. attribute_id: [%s] value: [%s]",
self.endpoint.device.ieee,
attrid,
attributes,
)
if BATTERY_VOLTAGE_MV in attributes:
self.endpoint.device.battery_bus.listener_event(
BATTERY_REPORTED, attributes[BATTERY_VOLTAGE_MV]
)
if TEMPERATURE_MEASUREMENT in attributes:
self.endpoint.device.temperature_bus.listener_event(
TEMPERATURE_REPORTED, attributes[TEMPERATURE_MEASUREMENT]
)
if HUMIDITY_MEASUREMENT in attributes:
self.endpoint.device.humidity_bus.listener_event(
HUMIDITY_REPORTED, attributes[HUMIDITY_MEASUREMENT]
)
if PRESSURE_MEASUREMENT in attributes:
self.endpoint.device.pressure_bus.listener_event(
PRESSURE_REPORTED, attributes[PRESSURE_MEASUREMENT] / 100
)
if POWER in attributes:
self.endpoint.device.power_bus.listener_event(
POWER_REPORTED, attributes[POWER]
)
if CONSUMPTION in attributes:
self.endpoint.device.consumption_bus.listener_event(
CONSUMPTION_REPORTED, attributes[CONSUMPTION]
)
if VOLTAGE in attributes:
self.endpoint.device.voltage_bus.listener_event(
VOLTAGE_REPORTED, attributes[VOLTAGE] * 0.1
)
if ILLUMINANCE_MEASUREMENT in attributes:
self.endpoint.device.illuminance_bus.listener_event(
ILLUMINANCE_REPORTED, attributes[ILLUMINANCE_MEASUREMENT]
)
if TVOC_MEASUREMENT in attributes:
self.endpoint.voc_level.update_attribute(
0x0000, attributes[TVOC_MEASUREMENT]
)
if TEMPERATURE in attributes:
if hasattr(self.endpoint, "device_temperature"):
self.endpoint.device_temperature.update_attribute(
0x0000, attributes[TEMPERATURE] * 100
)
if BATTERY_PERCENTAGE_REMAINING_ATTRIBUTE in attributes:
self.endpoint.device.power_bus_percentage.listener_event(
"update_battery_percentage",
attributes[BATTERY_PERCENTAGE_REMAINING_ATTRIBUTE],
)
def _parse_aqara_attributes(self, value):
"""Parse non standard attributes."""
attributes = {}
attribute_names = {
1: BATTERY_VOLTAGE_MV,
3: TEMPERATURE,
4: XIAOMI_ATTR_4,
5: XIAOMI_ATTR_5,
6: XIAOMI_ATTR_6,
10: PATH,
}
if self.endpoint.device.model in [
"lumi.sensor_ht",
"lumi.sens",
"lumi.weather",
"lumi.airmonitor.acn01",
]:
# Temperature sensors send temperature/humidity/pressure updates through this
# cluster instead of the respective clusters
attribute_names.update(
{
100: TEMPERATURE_MEASUREMENT,
101: HUMIDITY_MEASUREMENT,
102: TVOC_MEASUREMENT
if self.endpoint.device.model == "lumi.airmonitor.acn01"
else PRESSURE_MEASUREMENT,
}
)
elif self.endpoint.device.model in [
"lumi.plug.maus01",
"lumi.relay.c2acn01",
]:
attribute_names.update({149: CONSUMPTION, 150: VOLTAGE, 152: POWER})
elif self.endpoint.device.model == "lumi.sensor_motion.aq2":
attribute_names.update({11: ILLUMINANCE_MEASUREMENT})
elif self.endpoint.device.model == "lumi.curtain.acn002":
attribute_names.update({101: BATTERY_PERCENTAGE_REMAINING_ATTRIBUTE})
result = {}
# Some attribute reports end with a stray null byte
while value not in (b"", b"\x00"):
skey = int(value[0])
svalue, value = foundation.TypeValue.deserialize(value[1:])
result[skey] = svalue.value
for item, val in result.items():
key = (
attribute_names[item]
if item in attribute_names
else "0xff01-" + str(item)
)
attributes[key] = val
return attributes
def _parse_mija_attributes(self, value):
"""Parse non standard attributes."""
attribute_names = (
STATE,
BATTERY_VOLTAGE_MV,
XIAOMI_ATTR_3,
XIAOMI_ATTR_4,
XIAOMI_ATTR_5,
XIAOMI_ATTR_6,
)
result = []
for attr_value in value:
result.append(attr_value.value)
attributes = dict(zip(attribute_names, result))
return attributes
class BasicCluster(XiaomiCluster, Basic):
"""Xiaomi basic cluster implementation."""
class XiaomiAqaraE1Cluster(XiaomiCluster, ManufacturerSpecificCluster):
"""Xiaomi mfg cluster implementation."""
cluster_id = 0xFCC0
class BinaryOutputInterlock(CustomCluster, BinaryOutput):
"""Xiaomi binaryoutput cluster with added interlock attribute."""
manufacturer_attributes = {0xFF06: ("interlock", t.Bool)}
class XiaomiPowerConfiguration(PowerConfiguration, LocalDataCluster):
"""Xiaomi power configuration cluster implementation."""
BATTERY_VOLTAGE_ATTR = 0x0020
BATTERY_PERCENTAGE_REMAINING = 0x0021
MAX_VOLTS_MV = 3100
MIN_VOLTS_MV = 2820
def __init__(self, *args, **kwargs):
"""Init."""
super().__init__(*args, **kwargs)
self.endpoint.device.battery_bus.add_listener(self)
self._CONSTANT_ATTRIBUTES = {
BATTERY_QUANTITY_ATTR: 1,
BATTERY_SIZE_ATTR: getattr(self.endpoint.device, BATTERY_SIZE, 0xFF),
}
self._slope = 200 / (self.MAX_VOLTS_MV - self.MIN_VOLTS_MV)
def battery_reported(self, voltage_mv: int) -> None:
"""Battery reported."""
self._update_attribute(self.BATTERY_VOLTAGE_ATTR, round(voltage_mv / 100, 1))
self._update_battery_percentage(voltage_mv)
def _update_battery_percentage(self, voltage_mv: int) -> None:
voltage_mv = max(voltage_mv, self.MIN_VOLTS_MV)
voltage_mv = min(voltage_mv, self.MAX_VOLTS_MV)
percent = round((voltage_mv - self.MIN_VOLTS_MV) * self._slope)
self.debug(
"Voltage mV: [Min]:%s < [RAW]:%s < [Max]:%s, Battery Percent: %s",
self.MIN_VOLTS_MV,
voltage_mv,
self.MAX_VOLTS_MV,
percent / 2,
)
self._update_attribute(self.BATTERY_PERCENTAGE_REMAINING, percent)
class OccupancyCluster(OccupancyWithReset):
"""Occupancy cluster."""
class MotionCluster(LocalDataCluster, MotionOnEvent):
"""Motion cluster."""
_CONSTANT_ATTRIBUTES = {ZONE_TYPE: MOTION_TYPE}
reset_s: int = 70
class DeviceTemperatureCluster(LocalDataCluster, DeviceTemperature):
"""Device Temperature Cluster."""
class TemperatureMeasurementCluster(CustomCluster, TemperatureMeasurement):
"""Temperature cluster that filters out invalid temperature readings."""
cluster_id = TemperatureMeasurement.cluster_id
ATTR_ID = 0
def __init__(self, *args, **kwargs):
"""Init."""
super().__init__(*args, **kwargs)
self.endpoint.device.temperature_bus.add_listener(self)
def _update_attribute(self, attrid, value):
# drop values above and below documented range for this sensor
# value is in centi degrees
if attrid == self.ATTR_ID and (-6000 <= value <= 6000):
super()._update_attribute(attrid, value)
def temperature_reported(self, value):
"""Temperature reported."""
self._update_attribute(self.ATTR_ID, value)
class RelativeHumidityCluster(CustomCluster, RelativeHumidity):
"""Humidity cluster that filters out invalid humidity readings."""
cluster_id = RelativeHumidity.cluster_id
ATTR_ID = 0
def __init__(self, *args, **kwargs):
"""Init."""
super().__init__(*args, **kwargs)
self.endpoint.device.humidity_bus.add_listener(self)
def _update_attribute(self, attrid, value):
# drop values above and below documented range for this sensor
if attrid == self.ATTR_ID and (0 <= value <= 9999):
super()._update_attribute(attrid, value)
def humidity_reported(self, value):
"""Humidity reported."""
self._update_attribute(self.ATTR_ID, value)
class PressureMeasurementCluster(CustomCluster, PressureMeasurement):
"""Pressure cluster to receive reports that are sent to the basic cluster."""
cluster_id = PressureMeasurement.cluster_id
ATTR_ID = 0
def __init__(self, *args, **kwargs):
"""Init."""
super().__init__(*args, **kwargs)
self.endpoint.device.pressure_bus.add_listener(self)
def _update_attribute(self, attrid, value):
# drop unreasonable values
# value is in hectopascals
if attrid == self.ATTR_ID and (0 <= value <= 1100):
super()._update_attribute(attrid, value)
def pressure_reported(self, value):
"""Pressure reported."""
self._update_attribute(self.ATTR_ID, value)
class AnalogInputCluster(CustomCluster, AnalogInput):
"""Analog input cluster, only used to relay power consumption information to ElectricalMeasurementCluster."""
cluster_id = AnalogInput.cluster_id
def __init__(self, *args, **kwargs):
"""Init."""
self._current_state = {}
super().__init__(*args, **kwargs)
def _update_attribute(self, attrid, value):
super()._update_attribute(attrid, value)
if value is not None and value >= 0:
self.endpoint.device.power_bus.listener_event(POWER_REPORTED, value)
class ElectricalMeasurementCluster(LocalDataCluster, ElectricalMeasurement):
"""Electrical measurement cluster to receive reports that are sent to the basic cluster."""
cluster_id = ElectricalMeasurement.cluster_id
POWER_ID = 0x050B
VOLTAGE_ID = 0x0500
CONSUMPTION_ID = 0x0304
_CONSTANT_ATTRIBUTES = {
0x0402: 1, # power_multiplier
0x0403: 1, # power_divisor
0x0604: 1, # ac_power_multiplier
0x0605: 1, # ac_power_divisor
}
def __init__(self, *args, **kwargs):
"""Init."""
super().__init__(*args, **kwargs)
self.endpoint.device.voltage_bus.add_listener(self)
self.endpoint.device.consumption_bus.add_listener(self)
self.endpoint.device.power_bus.add_listener(self)
def power_reported(self, value):
"""Power reported."""
self._update_attribute(self.POWER_ID, value)
def voltage_reported(self, value):
"""Voltage reported."""
self._update_attribute(self.VOLTAGE_ID, value)
def consumption_reported(self, value):
"""Consumption reported."""
self._update_attribute(self.CONSUMPTION_ID, value)
class IlluminanceMeasurementCluster(CustomCluster, IlluminanceMeasurement):
"""Multistate input cluster."""
cluster_id = IlluminanceMeasurement.cluster_id
ATTR_ID = 0
def __init__(self, *args, **kwargs):
"""Init."""
super().__init__(*args, **kwargs)
self.endpoint.device.illuminance_bus.add_listener(self)
def _update_attribute(self, attrid, value):
if attrid == self.ATTR_ID and value > 0:
value = 10000 * math.log10(value) + 1
super()._update_attribute(attrid, value)
def illuminance_reported(self, value):
"""Illuminance reported."""
self._update_attribute(self.ATTR_ID, value)
class OnOffCluster(OnOff, CustomCluster):
"""Aqara wall switch cluster."""
def command(
self,
command_id: foundation.Command | int | t.uint8_t,
*args,
manufacturer: int | t.uint16_t | None = None,
expect_reply: bool = True,
tsn: int | t.uint8_t | None = None
):
"""Command handler."""
src_ep = 1
dst_ep = self.endpoint.endpoint_id
device = self.endpoint.device
if tsn is None:
tsn = self._endpoint.device.application.get_sequence()
return device.request(
# device,
zha.PROFILE_ID,
OnOff.cluster_id,
src_ep,
dst_ep,
tsn,
bytes([src_ep, tsn, command_id]),
expect_reply=expect_reply,
)
def handle_quick_init(
sender: zigpy.device.Device,
profile: int,
cluster: int,
src_ep: int,
dst_ep: int,
message: bytes,
) -> bool | None:
"""Handle message from an uninitialized device which could be a xiaomi."""
if src_ep == 0:
return
hdr, data = foundation.ZCLHeader.deserialize(message)
sender.debug(
"""Received ZCL while uninitialized on endpoint id %s, cluster 0x%04x """
"""id, hdr: %s, payload: %s""",
src_ep,
cluster,
hdr,
data,
)
if hdr.frame_control.is_cluster:
return
try:
schema = foundation.COMMANDS[hdr.command_id][0]
args, data = t.deserialize(data, schema)
except (KeyError, ValueError):
sender.debug("Failed to deserialize ZCL global command")
return
sender.debug("Uninitialized device command '%s' args: %s", hdr.command_id, args)
if hdr.command_id != foundation.Command.Report_Attributes or cluster != 0:
return
for attr_rec in args[0]:
if attr_rec.attrid == 5:
break
else:
return
model = attr_rec.value.value
if not model:
return
for quirk in zigpy.quirks.get_quirk_list(LUMI, model):
if issubclass(quirk, XiaomiQuickInitDevice):
sender.debug("Found '%s' quirk for '%s' model", quirk.__name__, model)
try:
sender = quirk.from_signature(sender, model)
except (AssertionError, KeyError) as ex:
_LOGGER.debug(
"Found quirk for quick init, but failed to init: %s", str(ex)
)
continue
break
else:
return
sender.cancel_initialization()
sender.application.device_initialized(sender)
sender.info(
"Was quickly initialized from '%s.%s' quirk", quirk.__module__, quirk.__name__
)
return True
zigpy.quirks.register_uninitialized_device_message_handler(handle_quick_init)
|
def prueba5(drone):
f = open("Datos_vuelo_prueba5.txt", "wb")
f.write("Test de despegue y aterrizaje \n")
drone.takeoff()
print("")
print("")
print("")
print("")
print("Estado del drone")
print("")
i= 0
for i in range(0,500):
print "Posicion ", drone.position
print "Velocidad ", drone.speed
print "GPS ", drone.positionGPS
print "Altitud ", drone.altitude
print "Estado ", drone.flyingState, " Bateria ", drone.battery
f.write(str(i) + "\n")
f.write("Posicion " + str(drone.position) + "\n")
f.write("Velocidad " + str(drone.speed) + "\n")
f.write("GPS " + str(drone.positionGPS) + "\n")
f.write("Altitud " + str(drone.altitude) + "\n")
f.write("Estado " + str(drone.flyingState) + "\n")
f.write("\n")
f.write("\n")
print ("Fin de vuelo")
drone.land()
def prueba6(drone):
f = open("Datos_vuelo_prueba6.txt", "wb")
f.write("Test de despegue y cambio de altura \n")
drone.takeoff()
print("")
print("")
print("")
print("")
print("Estado del drone")
print("")
i= 0
for i in range(0,100):
print "Posicion ", drone.position
print "Velocidad ", drone.speed
print "GPS ", drone.positionGPS
print "Altitud ", drone.altitude
print "Estado ", drone.flyingState, " Bateria ", drone.battery
f.write(str(i) + str(drone.time) + "\n")
f.write("Posicion " + str(drone.position) + "\n")
f.write("Velocidad " + str(drone.speed) + "\n")
f.write("GPS " + str(drone.positionGPS) + "\n")
f.write("Altitud " + str(drone.altitude) + "\n")
f.write("Estado " + str(drone.flyingState) + "\n")
f.write("\n")
f.write("\n")
f.write("Cambio de altitud \n")
drone.flyToAltitude(2)
i= 0
for i in range(0,100):
print "Posicion ", drone.position
print "Velocidad ", drone.speed
print "GPS ", drone.positionGPS
print "Altitud ", drone.altitude
print "Estado ", drone.flyingState, " Bateria ", drone.battery
f.write(str(i) + str(drone.time) + "\n")
f.write("Posicion " + str(drone.position) + "\n")
f.write("Velocidad " + str(drone.speed) + "\n")
f.write("GPS " + str(drone.positionGPS) + "\n")
f.write("Altitud " + str(drone.altitude) + "\n")
f.write("Estado " + str(drone.flyingState) + "\n")
f.write("\n")
f.write("\n")
f.write("Estados \n")
#drone.update(cmd=requestAllStatesCmd())
print "Posicion ", drone.position
print "Velocidad ", drone.speed
print "GPS ", drone.positionGPS
print "Altitud ", drone.altitude
print "Estado ", drone.flyingState, " Bateria ", drone.battery
f.write("Posicion " + str(drone.position) + "\n")
f.write("Velocidad " + str(drone.speed) + "\n")
f.write("GPS " + str(drone.positionGPS) + "\n")
f.write("Altitud " + str(drone.altitude) + "\n")
f.write("Estado " + str(drone.flyingState) + "\n")
f.write("\n")
f.write("\n")
print ("Fin de vuelo")
drone.land()
def prueba7(drone, velocidad, eje, tiempo):
import commands as cm
filename = "Datos_vuelo_prueba7" + eje + str(velocidad)+ ".txt"
f = open(filename, "wb")
f.write("Test de despegue y avance, velocidad 1 \n")
drone.takeoff()
print("")
print("")
print("")
print("")
print("Estado del drone")
print("")
i= 0
for i in range(0,50):
print "Posicion ", drone.position
print "Velocidad ", drone.speed
print "GPS ", drone.positionGPS
print "Altitud ", drone.altitude
print "Estado ", drone.flyingState, " Bateria ", drone.battery
f.write("Movimiento \n")
f.write(eje + str(velocidad)+ "\n")
if eje == "cabeceo":
drone.update( cmd=cm.movePCMDCmd( True, 0, velocidad, 0, 0 ) )
elif eje == "balance":
drone.update( cmd=cm.movePCMDCmd( True, velocidad, 0, 0, 0 ) )
elif eje == "guinada":
drone.update( cmd=cm.movePCMDCmd( True, 0, 0, velocidad, 0 ) )
else:
drone.land()
f.write("Vuelo fallido")
i= 0
for i in range(0,tiempo):
drone.update(cmd=cm.requestAllStatesCmd())
print "Posicion ", drone.position
print "Velocidad ", drone.speed
print "GPS ", drone.positionGPS
print "Altitud ", drone.altitude
print "Estado ", drone.flyingState, " Bateria ", drone.battery
f.write(str(i) + "\n")
f.write("Posicion " + str(drone.position) + "\n")
f.write("Velocidad " + str(drone.speed) + "\n")
f.write("Actitud " + str(drone.attitude) + "\n")
f.write("GPS " + str(drone.positionGPS) + "\n")
f.write("Altitud " + str(drone.altitude) + "\n")
f.write("Estado " + str(drone.flyingState) + "\n")
f.write("\n")
f.write("\n")
drone.update( cmd=cm.movePCMDCmd( True, 0, 0, 0, 0 ) )
f.write("Parada de movimiento \n")
i= 0
for i in range(0,20):
print "Posicion ", drone.position
print "Velocidad ", drone.speed
print "GPS ", drone.positionGPS
print "Altitud ", drone.altitude
print "Estado ", drone.flyingState, " Bateria ", drone.battery
f.write(str(i) + "\n")
f.write("Posicion " + str(drone.position) + "\n")
f.write("Velocidad " + str(drone.speed) + "\n")
f.write("GPS " + str(drone.positionGPS) + "\n")
f.write("Altitud " + str(drone.altitude) + "\n")
f.write("Estado " + str(drone.flyingState) + "\n")
f.write("\n")
f.write("\n")
print ("Fin de vuelo")
drone.land()
def pruebaGPS(drone):
f = open("Datos_vuelo_pruebaGPS.txt", "wb")
f.write("Test de posicionGPS \n")
drone.takeoff()
print("")
print("")
print("")
print("")
print("Estado del drone")
print("")
i= 0
for i in range(0,500):
print "Posicion ", drone.position
print "Velocidad ", drone.speed
print "GPS ", drone.positionGPS
print "Altitud ", drone.altitude
print "Estado ", drone.flyingState, " Bateria ", drone.battery
f.write(str(drone.positionGPS)+ "\n")
#f.write(str(i) + "\n")
#f.write("Posicion " + str(drone.position) + "\n")
#f.write("Velocidad " + str(drone.speed) + "\n")
#f.write("GPS " + str(drone.positionGPS) + "\n")
#f.write("Altitud " + str(drone.altitude) + "\n")
#f.write("Estado " + str(drone.flyingState) + "\n")
#f.write("\n")
#f.write("\n")
print ("Fin de vuelo")
drone.land()
class dronefalso():
def __init__(self):
self.position=(0,0,0)
self.speed= (0,0,0)
self.positionGPS=(0,0,0)
self.altitude = 0
self.flyingState=0
self.battery = 0
self.time = 0
def takeoff(self):
return
def land(self):
return
def update(self, asd):
return
def flyToAltitude(self, h):
return
def requestAllStates(self):
return
|
<reponame>leipzig/xd-cwl-utils<gh_stars>0
#
# * This file is subject to the terms and conditions defined in
# * file 'LICENSE.md', which is part of this source code package.
from abc import abstractmethod, ABC
from urllib.parse import urlparse
from ruamel.yaml.comments import CommentedMap
class AttributeBase(ABC):
def __init__(self, *args, **kwargs):
__slots__ = list(self.attrs)
def is_empty(self):
_is_empty = True
for attribute in self.attrs:
if isinstance(attribute, list):
for item in attribute:
if getattr(item, 'attrs'):
if not item.is_empty():
_is_empty = False
break
elif item:
_is_empty = False
break
elif getattr(self, attribute):
_is_empty = False
break
else:
pass
return _is_empty
@staticmethod
@abstractmethod
def _attrs():
return frozenset([])
@property
def attrs(self):
return self._attrs()
def dump(self):
map_object = CommentedMap()
for attribute in self.attrs:
map_object[attribute] = getattr(self, attribute)
return map_object
class CodeRepository(AttributeBase):
def __init__(self, name=None, URL=None):
super().__init__()
self._name = name
self._URL = URL
return
@property
def name(self):
return self._name
@name.setter
def name(self, new_name):
self._name = new_name
return
@property
def URL(self):
return self._URL
@URL.setter
def URL(self, new_URL):
if new_URL:
valid_schemes = ['https', 'http', 'git']
parse_result = urlparse(new_URL)
if parse_result.scheme not in valid_schemes:
raise ValueError(f"URL scheme should be in {valid_schemes}")
else:
new_URL = None
self._URL = new_URL
return
@staticmethod
def _attrs():
return frozenset(['name', 'URL'])
class WebSite(AttributeBase):
def __init__(self, name=None, description=None, URL=None):
super().__init__()
self._name = name
self._description = description
self._URL = URL
@property
def name(self):
return self._name
@name.setter
def name(self, new_name):
self._name = new_name
@property
def description(self):
return self._description
@description.setter
def description(self, new_description):
self._description = new_description
@property
def URL(self):
return self._URL
@URL.setter
def URL(self, new_URL):
if new_URL:
valid_schemes = ['https', 'http']
parse_result = urlparse(new_URL)
if parse_result.scheme not in valid_schemes:
raise ValueError(f"URL scheme should be in {valid_schemes}")
else:
new_URL = None
self._URL = new_URL
return
@staticmethod
def _attrs():
return frozenset(['name', 'description', 'URL'])
class Publication(AttributeBase):
def __init__(self, identifier=None, headline=None):
super().__init__()
self._identifier = identifier
self._headline = headline
return
@property
def identifier(self):
return self._identifier
@identifier.setter
def identifier(self, new_identifier):
self._identifier = new_identifier
return
@property
def headline(self):
return self._headline
@headline.setter
def headline(self, new_headline):
self._headline = new_headline
return
@staticmethod
def _attrs():
return frozenset(['identifier', 'headline'])
class Person(AttributeBase):
def __init__(self, name=None, email=None, identifier=None):
super().__init__()
self._name = name
self._email = email
self._identifier = identifier
@property
def name(self):
return self._name
@name.setter
def name(self, new_name):
self._name = new_name
@property
def email(self):
return self._email
@email.setter
def email(self, new_email):
self._email = new_email
return
@property
def identifier(self):
return self._identifier
@identifier.setter
def identifier(self, new_identifier):
self._identifier = new_identifier
@staticmethod
def _attrs():
return frozenset(['name', 'email', 'identifier'])
class Keyword(AttributeBase):
def __init__(self, *args, **kwargs): # Need to initialize off of *args and **kwargs to handle both forms.
super().__init__()
args_len = len(args)
if args_len == 0:
self._uri = kwargs.get('uri', None)
elif args_len == 1:
self._uri = args[0]
else:
raise ValueError(f"Expected only one argument for uri for keyword. Got {args}")
self._name = kwargs.get('name', None)
self._category = kwargs.get('category', None)
@property
def name(self):
return self._name
@name.setter
def name(self, new_name):
self._name = new_name
@property
def category(self):
return self._category
@category.setter
def category(self, new_category):
category_values = (None, 'topic', 'operation')
if new_category not in category_values:
raise ValueError(f"{new_category} is not a valid category for a keyword. Must be one of {category_values}")
self._category = new_category
@property
def uri(self):
return self._uri
@uri.setter
def uri(self, new_uri):
self._uri = new_uri
def dump(self):
if self.uri:
return self.uri
else:
keyword = CommentedMap([('name', self.name), ('category', self.category)])
return keyword
@staticmethod
def _attrs():
return frozenset(['name', 'category', 'uri'])
class SoftwareVersion(AttributeBase):
def __init__(self, versionName=None, includedVersions=None):
super().__init__()
self.versionName = versionName
self.includedVersions = includedVersions
@property
def versionName(self):
return self._versionName
@versionName.setter
def versionName(self, new_versionName):
if not new_versionName:
raise TypeError(f"versionName must be set.")
self._versionName = str(new_versionName)
@property
def includedVersions(self):
return self._includedVersions
@includedVersions.setter
def includedVersions(self, includedVersions_list):
if includedVersions_list:
self._includedVersions = [str(specific_version) for specific_version in includedVersions_list]
else:
self._includedVersions = []
@staticmethod
def _attrs():
return frozenset(['versionName', 'includedVersions'])
class ParentScript(AttributeBase):
def __init__(self, name=None, version=None, identifier=None):
super().__init__()
self._name = name
self._version = version
self._identifier = identifier
@property
def name(self):
return self._name
@name.setter
def name(self, new_name):
self._name = new_name
@property
def version(self):
return str(self._version)
@version.setter
def version(self, new_version):
self._version = str(new_version)
@property
def identifier(self):
return self._identifier
@identifier.setter
def identifier(self, new_identifier):
self._identifier = new_identifier
@staticmethod
def _attrs():
return frozenset(['name','version', 'identifier'])
class Tool(AttributeBase):
def __init__(self, name=None, version=None, identifier=None, alternateName=None):
super().__init__()
self._name = name
self._alternateName = alternateName
self._version = version
self._identifier = identifier
@property
def name(self):
return self._name
@name.setter
def name(self, new_name):
self._name = new_name
@property
def alternateName(self):
return self._alternateName
@alternateName.setter
def alternateName(self, value):
self._alternateName = value
@property
def version(self):
return str(self._version)
@version.setter
def version(self, new_version):
self._version = str(new_version)
@property
def identifier(self):
return self._identifier
@identifier.setter
def identifier(self, new_identifier):
self._identifier = new_identifier
@staticmethod
def _attrs():
return frozenset(['name', 'alternateName', 'version', 'identifier'])
class CallMap(AttributeBase):
def __init__(self, id_=None, identifier=None):
super().__init__()
self._id = id_
self._identifier = identifier
@property
def id(self):
return self._id
@id.setter
def id(self, new_id):
self._id = new_id
@property
def identifier(self):
return self._identifier
@identifier.setter
def identifier(self, new_identifier):
self._identifier = new_identifier
@staticmethod
def _attrs():
return frozenset(['id', 'identifier'])
class IOObject(AttributeBase):
def __init__(self, identifier=None, path=None):
super().__init__()
self._identifier = identifier
self._path = path
@property
def identifier(self):
return self._identifier
@identifier.setter
def identifier(self, new_identifier):
self._identifier = new_identifier
@property
def path(self):
return self._path
@path.setter
def path(self, new_path):
self._path = new_path
@staticmethod
def _attrs():
return frozenset(['identifier', 'path'])
class IOObjectItem(AttributeBase):
def __init__(self, id_=None, io_object=IOObject()):
super().__init__()
self._id = id_
self._io_object = io_object
@property
def id(self):
return self._id
@id.setter
def id(self, new_id):
self._id = new_id
@property
def identifier(self):
return self._io_object._identifier
@identifier.setter
def identifier(self, new_identifier):
self._io_object._identifier = new_identifier
@property
def path(self):
return self._io_object._path
@path.setter
def path(self, new_path):
self._io_object._path = new_path
@property
def io_object(self):
return self._io_object
@staticmethod
def _attrs():
return frozenset(['id', 'io_object'])
def dump(self):
map_object = CommentedMap()
map_object['id'] = getattr(self, 'id')
map_object.update(self.io_object.dump())
return map_object
class IOArrayItem(AttributeBase):
def __init__(self, id_, objects=None):
super().__init__()
self._id = id_
self._objects = objects if objects else [IOObject()]
@staticmethod
def _attrs():
return frozenset(['id', 'objects'])
object_attributes = (
CodeRepository, Person, Publication, WebSite, Keyword, Tool, ParentScript, IOObjectItem, CallMap, SoftwareVersion) |
import re
from copy import deepcopy
import torch
from torch import nn as nn
from .conv2d_same import *
# Default args for PyTorch BN impl
BN_MOMENTUM_DEFAULT = 0.1
BN_EPS_DEFAULT = 1e-5
def round_channels(channels, depth_multiplier=1.0, depth_divisor=8, min_depth=None):
"""Round number of filters based on depth multiplier."""
if not depth_multiplier:
return channels
channels *= depth_multiplier
min_depth = min_depth or depth_divisor
new_channels = max(
int(channels + depth_divisor / 2) // depth_divisor * depth_divisor,
min_depth)
# Make sure that round down does not go down by more than 10%.
if new_channels < 0.9 * channels:
new_channels += depth_divisor
return new_channels
def swish(x):
return x * torch.sigmoid(x)
def hard_swish(x):
return x * F.relu6(x + 3.) / 6.
def hard_sigmoid(x):
return F.relu6(x + 3.) / 6.
def drop_connect(inputs, training=False, drop_connect_rate=0.):
"""Apply drop connect."""
if not training:
return inputs
keep_prob = 1 - drop_connect_rate
random_tensor = keep_prob + torch.rand(
(inputs.size()[0], 1, 1, 1), dtype=inputs.dtype, device=inputs.device)
random_tensor.floor_() # binarize
output = inputs.div(keep_prob) * random_tensor
return output
class ConvBnAct(nn.Module):
def __init__(self, in_chs, out_chs, kernel_size,
stride=1, act_fn=F.relu,
bn_momentum=BN_MOMENTUM_DEFAULT, bn_eps=BN_EPS_DEFAULT,
folded_bn=False, padding_same=False):
super(ConvBnAct, self).__init__()
assert stride in [1, 2]
self.act_fn = act_fn
padding = padding_arg(get_padding(kernel_size, stride), padding_same)
self.conv = sconv2d(
in_chs, out_chs, kernel_size,
stride=stride, padding=padding, bias=folded_bn)
self.bn1 = None if folded_bn else nn.BatchNorm2d(out_chs, momentum=bn_momentum, eps=bn_eps)
def forward(self, x):
x = self.conv(x)
if self.bn1 is not None:
x = self.bn1(x)
x = self.act_fn(x)
return x
class DepthwiseSeparableConv(nn.Module):
def __init__(self, in_chs, out_chs, kernel_size,
stride=1, act_fn=F.relu, noskip=False, pw_act=False,
se_ratio=0., se_gate_fn=torch.sigmoid,
bn_momentum=BN_MOMENTUM_DEFAULT, bn_eps=BN_EPS_DEFAULT,
folded_bn=False, padding_same=False, drop_connect_rate=0.):
super(DepthwiseSeparableConv, self).__init__()
assert stride in [1, 2]
self.has_residual = (stride == 1 and in_chs == out_chs) and not noskip
self.has_se = se_ratio is not None and se_ratio > 0.
self.has_pw_act = pw_act # activation after point-wise conv
self.act_fn = act_fn
self.drop_connect_rate = drop_connect_rate
dw_padding = padding_arg(kernel_size // 2, padding_same)
pw_padding = padding_arg(0, padding_same)
self.conv_dw = sconv2d(
in_chs, in_chs, kernel_size,
stride=stride, padding=dw_padding, groups=in_chs, bias=folded_bn)
self.bn1 = None if folded_bn else nn.BatchNorm2d(in_chs, momentum=bn_momentum, eps=bn_eps)
if self.has_se:
self.se = SqueezeExcite(
in_chs, reduce_chs=max(1, int(in_chs * se_ratio)), act_fn=act_fn, gate_fn=se_gate_fn)
self.conv_pw = sconv2d(in_chs, out_chs, 1, padding=pw_padding, bias=folded_bn)
self.bn2 = None if folded_bn else nn.BatchNorm2d(out_chs, momentum=bn_momentum, eps=bn_eps)
def forward(self, x):
residual = x
x = self.conv_dw(x)
if self.bn1 is not None:
x = self.bn1(x)
x = self.act_fn(x)
if self.has_se:
x = self.se(x)
x = self.conv_pw(x)
if self.bn2 is not None:
x = self.bn2(x)
if self.has_pw_act:
x = self.act_fn(x)
if self.has_residual:
if self.drop_connect_rate > 0.:
x = drop_connect(x, self.training, self.drop_connect_rate)
x += residual
return x
class SqueezeExcite(nn.Module):
def __init__(self, in_chs, reduce_chs=None, act_fn=F.relu, gate_fn=torch.sigmoid):
super(SqueezeExcite, self).__init__()
self.act_fn = act_fn
self.gate_fn = gate_fn
reduced_chs = reduce_chs or in_chs
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv_reduce = nn.Conv2d(in_chs, reduced_chs, 1, bias=True)
self.conv_expand = nn.Conv2d(reduced_chs, in_chs, 1, bias=True)
def forward(self, x):
# NOTE adaptiveavgpool bad for NVIDIA AMP performance
# tensor.view + mean bad for ONNX export (produces mess of gather ops that break TensorRT)
#x_se = x.view(x.size(0), x.size(1), -1).mean(-1).view(x.size(0), x.size(1), 1, 1)
x_se = self.avg_pool(x)
x_se = self.conv_reduce(x_se)
x_se = self.act_fn(x_se)
x_se = self.conv_expand(x_se)
x = x * self.gate_fn(x_se)
return x
class InvertedResidual(nn.Module):
""" Inverted residual block w/ optional SE"""
def __init__(self, in_chs, out_chs, kernel_size,
stride=1, act_fn=F.relu, exp_ratio=1.0, noskip=False,
se_ratio=0., se_reduce_mid=False, se_gate_fn=torch.sigmoid,
bn_momentum=BN_MOMENTUM_DEFAULT, bn_eps=BN_EPS_DEFAULT,
folded_bn=False, padding_same=False, drop_connect_rate=0.):
super(InvertedResidual, self).__init__()
mid_chs = int(in_chs * exp_ratio)
self.has_se = se_ratio is not None and se_ratio > 0.
self.has_residual = (in_chs == out_chs and stride == 1) and not noskip
self.act_fn = act_fn
self.drop_connect_rate = drop_connect_rate
dw_padding = padding_arg(kernel_size // 2, padding_same)
pw_padding = padding_arg(0, padding_same)
# Point-wise expansion
self.conv_pw = sconv2d(in_chs, mid_chs, 1, padding=pw_padding, bias=folded_bn)
self.bn1 = None if folded_bn else nn.BatchNorm2d(mid_chs, momentum=bn_momentum, eps=bn_eps)
# Depth-wise convolution
self.conv_dw = sconv2d(
mid_chs, mid_chs, kernel_size, padding=dw_padding, stride=stride, groups=mid_chs, bias=folded_bn)
self.bn2 = None if folded_bn else nn.BatchNorm2d(mid_chs, momentum=bn_momentum, eps=bn_eps)
# Squeeze-and-excitation
if self.has_se:
se_base_chs = mid_chs if se_reduce_mid else in_chs
self.se = SqueezeExcite(
mid_chs, reduce_chs=max(1, int(se_base_chs * se_ratio)), act_fn=act_fn, gate_fn=se_gate_fn)
# Point-wise linear projection
self.conv_pwl = sconv2d(mid_chs, out_chs, 1, padding=pw_padding, bias=folded_bn)
self.bn3 = None if folded_bn else nn.BatchNorm2d(out_chs, momentum=bn_momentum, eps=bn_eps)
def forward(self, x):
residual = x
# Point-wise expansion
x = self.conv_pw(x)
if self.bn1 is not None:
x = self.bn1(x)
x = self.act_fn(x)
# Depth-wise convolution
x = self.conv_dw(x)
if self.bn2 is not None:
x = self.bn2(x)
x = self.act_fn(x)
# Squeeze-and-excitation
if self.has_se:
x = self.se(x)
# Point-wise linear projection
x = self.conv_pwl(x)
if self.bn3 is not None:
x = self.bn3(x)
if self.has_residual:
if self.drop_connect_rate > 0.:
x = drop_connect(x, self.training, self.drop_connect_rate)
x += residual
# NOTE maskrcnn_benchmark building blocks have an SE module defined here for some variants
return x
class EfficientNetBuilder:
""" Build Trunk Blocks for Efficient/Mobile Networks
This ended up being somewhat of a cross between
https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mnasnet_models.py
and
https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/modeling/backbone/fbnet_builder.py
"""
def __init__(self, channel_multiplier=1.0, channel_divisor=8, channel_min=None,
drop_connect_rate=0., act_fn=None, se_gate_fn=torch.sigmoid, se_reduce_mid=False,
bn_momentum=BN_MOMENTUM_DEFAULT, bn_eps=BN_EPS_DEFAULT,
folded_bn=False, padding_same=False):
self.channel_multiplier = channel_multiplier
self.channel_divisor = channel_divisor
self.channel_min = channel_min
self.drop_connect_rate = drop_connect_rate
self.act_fn = act_fn
self.se_gate_fn = se_gate_fn
self.se_reduce_mid = se_reduce_mid
self.bn_momentum = bn_momentum
self.bn_eps = bn_eps
self.folded_bn = folded_bn
self.padding_same = padding_same
self.in_chs = None
def _round_channels(self, chs):
return round_channels(chs, self.channel_multiplier, self.channel_divisor, self.channel_min)
def _make_block(self, ba):
bt = ba.pop('block_type')
ba['in_chs'] = self.in_chs
ba['out_chs'] = self._round_channels(ba['out_chs'])
ba['bn_momentum'] = self.bn_momentum
ba['bn_eps'] = self.bn_eps
ba['folded_bn'] = self.folded_bn
ba['padding_same'] = self.padding_same
# block act fn overrides the model default
ba['act_fn'] = ba['act_fn'] if ba['act_fn'] is not None else self.act_fn
if bt == 'ir':
ba['drop_connect_rate'] = self.drop_connect_rate
ba['se_gate_fn'] = self.se_gate_fn
ba['se_reduce_mid'] = self.se_reduce_mid
block = InvertedResidual(**ba)
elif bt == 'ds' or bt == 'dsa':
ba['drop_connect_rate'] = self.drop_connect_rate
block = DepthwiseSeparableConv(**ba)
elif bt == 'cn':
block = ConvBnAct(**ba)
else:
assert False, 'Uknkown block type (%s) while building model.' % bt
self.in_chs = ba['out_chs'] # update in_chs for arg of next block
return block
def _make_stack(self, stack_args):
blocks = []
# each stack (stage) contains a list of block arguments
for block_idx, ba in enumerate(stack_args):
if block_idx >= 1:
# only the first block in any stack/stage can have a stride > 1
ba['stride'] = 1
block = self._make_block(ba)
blocks.append(block)
return nn.Sequential(*blocks)
def __call__(self, in_chs, block_args):
""" Build the blocks
Args:
in_chs: Number of input-channels passed to first block
block_args: A list of lists, outer list delimits stacks (stages),
inner list contains args defining block configuration(s)
Return:
List of block stacks (each stack wrapped in nn.Sequential)
"""
self.in_chs = in_chs
blocks = []
# outer list of arch_args defines the stacks ('stages' by some conventions)
for stack_idx, stack in enumerate(block_args):
assert isinstance(stack, list)
stack = self._make_stack(stack)
blocks.append(stack)
return blocks
def _decode_block_str(block_str, depth_multiplier=1.0):
""" Decode block definition string
Gets a list of block arg (dicts) through a string notation of arguments.
E.g. ir_r2_k3_s2_e1_i32_o16_se0.25_noskip
All args can exist in any order with the exception of the leading string which
is assumed to indicate the block type.
leading string - block type (
ir = InvertedResidual, ds = DepthwiseSep, dsa = DeptwhiseSep with pw act,
ca = Cascade3x3, and possibly more)
r - number of repeat blocks,
k - kernel size,
s - strides (1-9),
e - expansion ratio,
c - output channels,
se - squeeze/excitation ratio
a - activation fn ('re', 'r6', or 'hs')
Args:
block_str: a string representation of block arguments.
Returns:
A list of block args (dicts)
Raises:
ValueError: if the string def not properly specified (TODO)
"""
assert isinstance(block_str, str)
ops = block_str.split('_')
block_type = ops[0] # take the block type off the front
ops = ops[1:]
options = {}
noskip = False
for op in ops:
# string options being checked on individual basis, combine if they grow
if op.startswith('a'):
# activation fn
key = op[0]
v = op[1:]
if v == 're':
value = F.relu
elif v == 'r6':
value = F.relu6
elif v == 'hs':
value = hard_swish
else:
continue
options[key] = value
elif op == 'noskip':
noskip = True
else:
# all numeric options
splits = re.split(r'(\d.*)', op)
if len(splits) >= 2:
key, value = splits[:2]
options[key] = value
# if act_fn is None, the model default (passed to model init) will be used
act_fn = options['a'] if 'a' in options else None
num_repeat = int(options['r'])
# each type of block has different valid arguments, fill accordingly
if block_type == 'ir':
block_args = dict(
block_type=block_type,
kernel_size=int(options['k']),
out_chs=int(options['c']),
exp_ratio=float(options['e']),
se_ratio=float(options['se']) if 'se' in options else None,
stride=int(options['s']),
act_fn=act_fn,
noskip=noskip,
)
if 'g' in options:
block_args['pw_group'] = options['g']
if options['g'] > 1:
block_args['shuffle_type'] = 'mid'
elif block_type == 'ds' or block_type == 'dsa':
block_args = dict(
block_type=block_type,
kernel_size=int(options['k']),
out_chs=int(options['c']),
se_ratio=float(options['se']) if 'se' in options else None,
stride=int(options['s']),
act_fn=act_fn,
noskip=block_type == 'dsa' or noskip,
pw_act=block_type == 'dsa',
)
elif block_type == 'cn':
block_args = dict(
block_type=block_type,
kernel_size=int(options['k']),
out_chs=int(options['c']),
stride=int(options['s']),
act_fn=act_fn,
)
else:
assert False, 'Unknown block type (%s)' % block_type
# return a list of block args expanded by num_repeat and
# scaled by depth_multiplier
num_repeat = int(math.ceil(num_repeat * depth_multiplier))
return [deepcopy(block_args) for _ in range(num_repeat)]
def decode_arch_def(arch_def, depth_multiplier=1.0):
arch_args = []
for stack_idx, block_strings in enumerate(arch_def):
assert isinstance(block_strings, list)
stack_args = []
for block_str in block_strings:
assert isinstance(block_str, str)
stack_args.extend(_decode_block_str(block_str, depth_multiplier))
arch_args.append(stack_args)
return arch_args
|
<reponame>jfear/larval_gonad
"""Set of helper functions for the notebook."""
import os
from pathlib import Path
from datetime import datetime
from subprocess import check_output
import numpy as np
import pandas as pd
import matplotlib as mpl
from IPython import get_ipython
from .config import config, PROJECT_DIR, CONFIG_DIR, REFERENCES_DIR
from .scRNAseq import Seurat
import plotting
class Nb(object):
def __init__(
self,
nb_name=None,
project_dir=None,
subproject_dir=None,
seurat_dir=None,
config_dir=None,
ref_dir=None,
fig_dir=None,
formats=None,
styles=None,
styles_wide=None,
styles_full=None,
watermark=None,
**kwargs,
):
"""Helper method for working consistently in notebook.
Stores a set a bunch of useful attributes. Turns on a bunch of commonly
used notebook magics. If matplotlib stylelib exists in the config_dir
then it imports user defined styles.
Parameters
----------
nb_name : str
Name of the current notebook.
project_dir : str
Name of the project directory.
config_dir : str
Name of the config directory.
ref_dir : str
Name of the references directory.
subproject_dir : str
Name of the subproject directory for placing output.
seurat_dir : str
Name of the directory with seurat output.
fig_dir : str
Name of the figures directory.
formats : str or list
Default list of formats to use for plotting. For example 'png' or
['png', 'svg'].
styles : str or list
Default list of matplotlib.style.library to use for plotting. For
example 'seaborn-notebook' or ['seaborn-notebook',
'seaborn-paper'].
watermark : bool
If true turn on watermarking.
**kwargs
Additional arguments that are stored as attributes
Attributes
----------
nb_name : str
Name of the current notebook.
project_dir : str
Name of the project directory.
subproject_dir : str
Directory to save outputs from this subproject.
seurat_dir : str
Location of Seurat output. Will default to the subproject_dir.
config_dir : str
Name of the config directory.
ref_dir : str
Name of the references directory.
fig_dir : str
Name of the figures directory.
formats : str or list
Default list of formats to use for plotting. For example 'png' or
['png', 'svg'].
styles : str or list
Default list of matplotlib.style.library to use for plotting. For
example 'seaborn-notebook' or ['seaborn-notebook',
'seaborn-paper'].
styles_wide : str or list
Default list of matplotlib.style.library to use for plotting wide
(two column) images. For example 'seaborn-notebook' or
['seaborn-notebook', 'seaborn-paper'].
styles_full : str or list
Default list of matplotlib.style.library to use for plotting wide
(two column) images. For example 'seaborn-notebook' or
['seaborn-notebook', 'seaborn-paper'].
date : str
Current date, generated upon creation.
conda_env : str
Name of the current conda environment location.
fasta : str
Path to fasta file.
chromsizes : str
Path to chromsizes file.
gtf : str
Path to gtf file.
gtf_db : str
Path to gtf_db file.
annot : str
Path to annot file.
syn : str
Path to syn file.
seurat : Seurat
Useful Seurat paths.
"""
self.nb_name = nb_name
self.project_dir = project_dir
self.subproject_dir = subproject_dir
self.seurat_dir = seurat_dir
self.config_dir = config_dir
self.ref_dir = ref_dir
self.fig_dir = fig_dir
self.formats = formats
self.styles = styles
self.styles_wide = styles_wide
self.styles_full = styles_full
self.date = datetime.now().strftime("%Y-%m-%d")
self.conda_env = self.get_conda()
# Add useful reference paths
assembly = kwargs["assembly"]
tag = kwargs["tag"]
self.fasta = os.path.join(self.ref_dir, assembly, tag, "fasta", f"{assembly}_{tag}.fasta")
self.chromsizes = os.path.join(
self.ref_dir, assembly, tag, "fasta", f"{assembly}_{tag}.chromsizes"
)
self.gtf = os.path.join(self.ref_dir, assembly, tag, "gtf", f"{assembly}_{tag}.gtf")
self.gtf_db = os.path.join(self.ref_dir, assembly, tag, "gtf", f"{assembly}_{tag}.gtf.db")
self.annot = os.path.join(
self.ref_dir, assembly, tag, "fb_annotation", f"{assembly}_{tag}.fb_annotation"
)
self.syn = os.path.join(
self.ref_dir, assembly, tag, "fb_synonym", f"{assembly}_{tag}.fb_synonym"
)
if seurat_dir is None:
self.seurat = None
else:
self.seurat = Seurat(seurat_dir)
# Add useful mappers
_annot = pd.read_csv(self.annot, sep="\t", index_col=1).fillna("nan")
self.fbgn2symbol = _annot["gene_symbol"].to_dict()
self.symbol2fbgn = {v: k for k, v in self.fbgn2symbol.items()}
try:
self.fbgn2chrom = pd.read_csv(
os.path.join(self.project_dir, "output/fbgn2chrom.tsv"), sep="\t", index_col=0
)
except Exception:
print(
"Please check output/fbgn2chrom.tsv. " "If it does not exist, run bin/fbgn2chrom.py"
)
# Add any key word args
self._config_attrs = kwargs.keys()
for k, v in kwargs.items():
setattr(self, k, v)
# turn on magics
self._start_magics(watermark=watermark)
# Set up plotting
self._setup_plotting()
# Turn off scientific notation
np.set_printoptions(precision=5, suppress=True)
def _start_magics(self, watermark=None):
"""Start up the notebook magics I commonly use."""
mgc = get_ipython().magic
# Activate the autoreload extension for easy reloading of external
# packages
mgc("reload_ext autoreload")
mgc("autoreload 2")
# Trun on the water mark
if watermark:
mgc("reload_ext watermark")
mgc("watermark -u -d -g")
# Plot inline
mgc("matplotlib inline")
def _setup_plotting(self):
mpl.style.use(["common", "notebook"])
def get_conda(self):
conda_info = check_output(["conda", "info"]).decode("utf-8")
for x in conda_info.split("\n"):
if "envs directories" in x:
return x.split(":")[1].strip()
@classmethod
def setup_notebook(
cls, nb_name=None, subproject_dir=None, seurat_dir=None, watermark=True, **kwargs
):
"""Helper function to consistently setup notebooks.
Functions detects working folder and sets up a larval_gonad.notebook.Nb
object with sane defaults.
Parameters
----------
nb_name : str
Name of the current notebook.
subproject_dir : str
Directory to save outputs from this subproject.
seurat_dir : str
Location of Seurat output. Will default to the subproject_dir.
watermark : bool
If truen then output watermark information.
kwargs
Additional arguments to pass to Nb.
"""
# Set seurat_dir to subproject_dir if it was None.
if subproject_dir is None:
subproject_dir = Path(PROJECT_DIR, "output").as_posix()
fig_dir = Path(subproject_dir, "figures")
fig_dir.mkdir(parents=True, exist_ok=True)
# set defaults
defaults = {
"nb_name": nb_name,
"project_dir": PROJECT_DIR,
"subproject_dir": subproject_dir,
"seurat_dir": seurat_dir,
"config_dir": CONFIG_DIR,
"ref_dir": REFERENCES_DIR,
"fig_dir": fig_dir.as_posix(),
"formats": ["png", "pdf"],
"styles": ["notebook", "talk"],
"watermark": watermark,
}
# Import external config
defaults.update(config)
defaults.update(kwargs)
# Add wide and full styles
_styles = defaults["styles"]
defaults["styles_wide"] = [x + "-wide" for x in _styles]
defaults["styles_full"] = [x + "-full" for x in _styles]
return cls(**defaults)
def fig_name(self, fname):
if self.nb_name is not None:
fname = "_".join([self.nb_name, fname])
return os.path.join(self.fig_dir, fname)
def table_name(self, fname):
if self.nb_name is not None:
fname = "_".join([self.nb_name, fname])
return os.path.join(self.subproject_dir, fname)
def __repr__(self):
return str(self)
def __str__(self):
keys = [
"nb_name",
"project_dir",
"config_dir",
"fig_dir",
"subproject_dir",
"seurat_dir",
"formats",
"styles",
"styles_wide",
"styles_full",
"date",
]
keys.extend(self._config_attrs)
res = []
for key in keys:
value = self.__dict__[key]
if value is None:
value = "None"
res.append("{}:\t{}".format(key, value))
return "\n".join(res)
|
# Copyright (c) 2019, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import cudf
import clx.parsers.zeek as zeek
def test_parse_log_file(tmpdir):
header = "#separator\t\\x09\n\
#set_separator\t,\n\
#empty_field\t(empty)\n\
#unset_field\t-\n\
#path\tconn\n\
#open\t2015-01-24-16-49-04\n\
#fields\tts\tuid\tid.orig_h\tid.orig_p\tid.resp_h\tid.resp_p\tproto\tservice\tduration\torig_bytes\tresp_bytes\tconn_state\tlocal_orig\tmissed_bytes\thistory\torig_pkts\torig_ip_bytes\tresp_pkts\tresp_ip_bytes\ttunnel_parents\n\
#types\ttime\tstring\taddr\tport\taddr\tport\tenum\tstring\tinterval\tcount\tcount\tstring\tbool\tcount\tstring\tcount\tcount\tcount\tcount\tset[string]\n"
actual = cudf.DataFrame()
actual["ts"] = [1421927450.370337, 1421927658.777193]
actual["ts"] = actual["ts"].astype("float64")
actual["uid"] = ["CFlyqZgM1g71BYPB6", "CnKVxKIj403JsAK5k"]
actual["id.orig_h"] = ["172.16.17.32", "192.168.3.11"]
actual["id.orig_p"] = [7177, 24809]
actual["id.orig_p"] = actual["id.orig_p"].astype("int64")
actual["id.resp_h"] = ["192.168.3.11", "172.16.17.32"]
actual["id.resp_p"] = [80, 443]
actual["id.resp_p"] = actual["id.resp_p"].astype("int64")
actual["proto"] = ["tcp", "tcp"]
actual["service"] = ["http", "http"]
actual["duration"] = [0.214392, 2.37679]
actual["duration"] = actual["duration"].astype("float64")
actual["orig_bytes"] = [194, 188]
actual["orig_bytes"] = actual["orig_bytes"].astype("int64")
actual["resp_bytes"] = [12282, 0]
actual["resp_bytes"] = actual["resp_bytes"].astype("int64")
actual["conn_state"] = ["SF", "SF"]
actual["local_orig"] = [False, False]
actual["missed_bytes"] = [12282, 0]
actual["missed_bytes"] = actual["missed_bytes"].astype("int64")
actual["history"] = ["ShADdFfa", "ShADFfa"]
actual["orig_pkts"] = [12, 14]
actual["orig_pkts"] = actual["orig_pkts"].astype("int64")
actual["orig_ip_bytes"] = [900, 1344]
actual["orig_ip_bytes"] = actual["orig_ip_bytes"].astype("int64")
actual["resp_pkts"] = [24, 6]
actual["resp_pkts"] = actual["resp_pkts"].astype("int64")
actual["resp_ip_bytes"] = [25540, 256]
actual["resp_ip_bytes"] = actual["resp_ip_bytes"].astype("int64")
actual["tunnel_parents"] = ["(empty)", "(empty)"]
footer = "#close^I2015-01-24-16-50-35"
fname = tmpdir.mkdir("tmp_clx_zeek_test").join("tst_zeek_conn_log.csv")
actual.to_csv(fname, sep="\t", index=False, header=False)
with open(fname, "r+") as f:
content = f.read()
f.seek(0, 0)
f.write(header + content + footer)
parsed = zeek.parse_log_file(fname)
assert np.allclose(parsed["ts"].values_host, actual["ts"].values_host)
assert parsed["uid"].equals(actual["uid"])
assert parsed["id.orig_h"].equals(actual["id.orig_h"])
assert parsed["id.orig_p"].equals(actual["id.orig_p"])
assert parsed["id.resp_h"].equals(actual["id.resp_h"])
assert parsed["id.resp_p"].equals(actual["id.resp_p"])
assert parsed["proto"].equals(actual["proto"])
assert parsed["service"].equals(actual["service"])
assert np.allclose(parsed["duration"].values_host, actual["duration"].values_host)
assert parsed["orig_bytes"].equals(actual["orig_bytes"])
assert parsed["resp_bytes"].equals(actual["resp_bytes"])
assert parsed["conn_state"].equals(actual["conn_state"])
assert parsed["local_orig"].equals(actual["local_orig"])
assert parsed["missed_bytes"].equals(actual["missed_bytes"])
assert parsed["history"].equals(actual["history"])
assert parsed["orig_pkts"].equals(actual["orig_pkts"])
assert parsed["orig_ip_bytes"].equals(actual["orig_ip_bytes"])
assert parsed["resp_pkts"].equals(actual["resp_pkts"])
assert parsed["resp_ip_bytes"].equals(actual["resp_ip_bytes"])
assert parsed["tunnel_parents"].equals(actual["tunnel_parents"])
|
from _utils import TestBase
from nose.tools import *
from werkzeug.exceptions import NotFound
from werkzeug.routing import Map
import json
class TestRoutes(TestBase):
"""Test routes.py"""
@classmethod
def setup_class(cls):
super(TestRoutes, cls).setup_class()
cls.load_routes()
@classmethod
def load_routes(cls, with_mail_callback=True):
import plugit
class DummyActions:
@plugit.utils.action(route="/", template="home.html")
def dummy_action(request):
return {}
plugit.app.url_map = Map()
plugit.app.view_functions = {}
plugit.load_actions(DummyActions, cls.mail_callback if with_mail_callback else None)
cls.app = plugit.app
def patch_view(self, ip='127.0.0.1', dont_jsonify=False, args=None):
"""Patch the plugit view with special callbacks"""
from plugit import routes
import json
self.plugitroutes = routes
self.bkp_request = self.plugitroutes.request
self.bkp_jsonfy = self.plugitroutes.jsonify
myself = self
class R():
remote_addr = ip
headers = {}
args = {}
form = {}
self = myself
if args:
R.args = args
R.form = args
def false_jsonfy(**obj):
if dont_jsonify:
return obj
return json.dumps(obj)
self.plugitroutes.request = R()
self.plugitroutes.jsonify = false_jsonfy
def unpatch_view(self):
"""Revert changes done to the view"""
self.plugitroutes.request = self.bkp_request
self.plugitroutes.jsonify = self.bkp_jsonfy
def get_rule_by_path(self, path):
for rule in self.app.url_map.iter_rules():
if str(rule) == path:
return rule, self.app.view_functions[rule.endpoint]
def test_ping_vue_created(self):
assert(self.get_rule_by_path('/ping'))
def test_ping_vue_ping(self):
rule, view = self.get_rule_by_path('/ping')
self.patch_view(args={'data': 'p'})
r = json.loads(view())
self.unpatch_view()
assert(r['data'] == 'p')
def test_ping_vue_ip(self):
from plugit import utils
backup_allowed = utils.PI_ALLOWED_NETWORKS
utils.PI_ALLOWED_NETWORKS = ['127.0.0.0/8']
self.patch_view('1.2.3.4')
rule, view = self.get_rule_by_path('/ping')
try:
view()
except NotFound:
self.unpatch_view()
utils.PI_ALLOWED_NETWORKS = backup_allowed
return # Ok :)
self.unpatch_view()
utils.PI_ALLOWED_NETWORKS = backup_allowed
assert(False)
def test_version_vue_created(self):
assert(self.get_rule_by_path('/version'))
def test_version_vue_version(self):
rule, view = self.get_rule_by_path('/version')
self.patch_view()
r = json.loads(view())
self.unpatch_view()
assert(r['result'] == 'Ok')
assert(r['version'] == self.plugitroutes.PI_API_VERSION)
assert(r['protocol'] == self.plugitroutes.PI_API_NAME)
def test_mail_vue_created(self):
assert(self.get_rule_by_path('/mail'))
def test_mail_vue_mail_no_response(self):
rule, view = self.get_rule_by_path('/mail')
self.patch_view(args={'response_id': ''})
r = json.loads(view())
self.unpatch_view()
print(r)
assert(r['result'] == 'Error')
@staticmethod
def mail_callback(request):
request.self.mail_called = True
request.self.mail_response_id = request.form['response_id']
return '{"result": "OkCallback"}'
def test_mail_vue_mail_callback(self):
TestRoutes.load_routes()
rule, view = self.get_rule_by_path('/mail')
self.patch_view(args={'response_id': '42'})
self.mail_called = False
self.mail_response_id = False
r = json.loads(view())
self.unpatch_view()
assert(r['result'] == 'OkCallback')
assert(self.mail_called)
assert(self.mail_response_id == '42')
def test_mail_vue_mail_nocallback(self):
TestRoutes.load_routes(False)
rule, view = self.get_rule_by_path('/mail')
self.patch_view(args={'response_id': '42'})
self.mail_called = False
self.mail_response_id = False
r = json.loads(view())
self.unpatch_view()
assert(r['result'] == 'Ok')
assert(not self.mail_called)
assert(self.mail_response_id != '42')
def test_mail_vue_ip(self):
from plugit import utils
backup_allowed = utils.PI_ALLOWED_NETWORKS
utils.PI_ALLOWED_NETWORKS = ['127.0.0.0/8']
self.patch_view('1.2.3.4')
rule, view = self.get_rule_by_path('/mail')
try:
view()
except NotFound:
self.unpatch_view()
utils.PI_ALLOWED_NETWORKS = backup_allowed
return # Ok :)
self.unpatch_view()
utils.PI_ALLOWED_NETWORKS = backup_allowed
assert(False)
def test_meta_view_created(self):
assert(self.get_rule_by_path('/meta/'))
def test_template_view_created(self):
assert(self.get_rule_by_path('/template/'))
def test_action_view_created(self):
assert(self.get_rule_by_path('/action/'))
|
<filename>tests/test_accumulation_distribution.py
from __future__ import absolute_import
import unittest
import numpy as np
from tests.sample_data import SampleData
from pyti import accumulation_distribution
class TestAccumulationDistribution(unittest.TestCase):
def setUp(self):
"""Create data to use for testing."""
self.close_data = SampleData().get_sample_close_data()
self.high_data = SampleData().get_sample_high_data()
self.low_data = SampleData().get_sample_low_data()
self.volume = SampleData().get_sample_volume()
self.ad_expected = [0.0, 15862.38524593231, -36477.702708130804,
-104733.80651442676, 40104.71949712043, 23939.254528920741,
-62106.183239279555, 112915.24595464926, 39956.002218866488,
-260.35553975040239, -116475.2497883655, -154687.74497142577,
-292093.679550856, -133194.90662509997, -263680.69609877974,
-299353.08397758257, -261929.08397757346, -320893.80364286253,
-471609.22532958613, -708827.18185129401, -694912.50378286431,
-792540.18692894513, -777300.311457203, -699904.16262002871,
-790925.63820386841, -889485.5941157171, -805433.69146085251,
-833917.85340419237, -833917.85340416594, -883999.91462867253,
-1022927.0306024188, -1054143.0894259787, -1058939.2259721591,
-1021271.5606829068, -913860.42762679141, -1035092.7994216527,
-935298.84023796313, -792598.06027727341, -709809.99197603366,
-577648.68356478063, -712915.33741098049, -1014517.9000277666,
-729489.82981907623, -732989.08876652108, -437866.62941692042,
-292705.11886841164, -208275.03902052308, -57775.565753187257,
-41895.022706843985, -197234.22345034953, -217259.22123303646,
-336135.09482219844, -340737.15732219425, -345832.65519452997,
-282367.61014945683, -503852.77093379293, -542493.46938165138,
-293054.95086318324, -349241.3657770899, -310266.53503439715,
-427254.58609822171, -379701.86011959275, -245746.2914921554,
-197685.67265809805, -201802.46902173068, -174468.61134385251,
12276.114846627315, -21316.018077706984, -411927.14032219525,
-465339.18257566751, -517829.09090903884, -526077.9668780705,
-638881.56860968971, -535937.62266374112, -422292.58075894287,
-585404.44057200733, -641343.11799134617, -442699.23563839833,
-237900.56329794819, -15646.76484828876, 257481.51413010465,
266545.23366587964, 296702.51703821769, 382165.68335837772,
433020.58124570741, 559763.67215474963, 556301.17746264767,
683881.16259275773, 671412.22763338394, 702418.43452995166,
1269915.1092287663, 1369598.1903099443, 1813632.4712087966,
1418193.9507626847, 1357868.9773113495, 1173146.9130543284,
1370686.1802957729, 1559763.227204062, 1799516.493737207,
1532620.7033441989, 1699573.5379898732, 1690914.5457118889,
2050665.8326683058, 2199638.0771572837, 2242714.9091572445,
2255814.5304043461, 2304978.8473854959, 2178583.2490768656,
2209187.1812126297, 2279663.6510112993, 2416336.1883247532,
2392955.8321879292, 2448552.5321879075, 2399441.1061789142,
2494347.2758041751, 2406478.6085073589, 2498297.9406373627,
2479423.9027995183, 2386354.6797225843, 2295492.9138876535,
2280017.0204450153, 2238485.2116801129, 2242266.9907233468,
2141284.6836367347, 2083933.3361190266, 2027200.0996044902,
1922004.9567473496]
def test_accumulation_distribution(self):
ad = accumulation_distribution.accumulation_distribution(self.close_data, self.high_data, self.low_data, self.volume)
np.testing.assert_array_equal(ad, self.ad_expected)
def test_ad_invalid_data(self):
self.close_data.append(1)
with self.assertRaises(Exception) as cm:
accumulation_distribution.accumulation_distribution(self.close_data, self.high_data, self.low_data, self.volume)
expected = ("Error: mismatched data lengths, check to ensure that all input data is the same length and valid")
self.assertEqual(str(cm.exception), expected)
|
import time
import numpy
from ortools.constraint_solver import pywrapcp
class CP_Solver_Got:
def __init__(self, problem, solver_type, nr_of_solution_limit, not_optimisation_problem, available_configurations,
time_limit, vmNr):
self.nrComp = problem.nrComp
self.problem = problem
self.nrVM = vmNr#problem.nrVM
self.VM_MaxLoad = 10
self.option = solver_type
self.availableConfig = available_configurations
self.offerSize = len(available_configurations) if self.availableConfig is not None else 0
self.timeLimit = time_limit
self.__nr_of_solution_limit = nr_of_solution_limit
self.__optimization_problem = not_optimisation_problem
self.__solveVariantSMTEncoding = True
self.__defineSolver(self.option)
def __defineSolver(self, option):
#print("--define solver")
# define solver
parameters = pywrapcp.Solver.DefaultSolverParameters()
self.solver = pywrapcp.Solver("maneuver_CP_GOT", parameters)
self.cost = None
# define some cut limits
time_limit = self.timeLimit#500000# 4 minute
#time_limit = 100
branch_limit = 1000000000
failures_limit = 1000000000
solutions_limit = self.__nr_of_solution_limit# 10000
self.limits = self.solver.Limit(time_limit, branch_limit, failures_limit, solutions_limit, True)
self.__defineVarinablesAndGeneralConstraints()
variables = self.vm + self.a + self.PriceProv#+ self.cost
if option == "FIRST_UNBOUND_MIN":
self.decision_builder = self.solver.Phase(variables,
self.solver.CHOOSE_FIRST_UNBOUND,
self.solver.ASSIGN_MIN_VALUE)
elif option == "FIRST_UNBOUND_MAX":
self.decision_builder = self.solver.Phase(variables,
self.solver.CHOOSE_FIRST_UNBOUND,
self.solver.ASSIGN_MAX_VALUE)
elif option == "FIRST_UNBOUND_RANDOM":
self.decision_builder = self.solver.Phase(variables,
self.solver.CHOOSE_FIRST_UNBOUND,
self.solver.ASSIGN_RANDOM_VALUE)
elif option == "LOWEST_MIN_MIN":
self.decision_builder = self.solver.Phase(variables,
self.solver.CHOOSE_LOWEST_MIN,
self.solver.ASSIGN_MIN_VALUE)
elif option == "LOWEST_MIN_MAX":
self.decision_builder = self.solver.Phase(variables,
self.solver.CHOOSE_LOWEST_MIN,
self.solver.ASSIGN_MAX_VALUE)
elif option == "LOWEST_MIN_RANDOM":
self.decision_builder = self.solver.Phase(variables,
self.solver.CHOOSE_LOWEST_MIN,
self.solver.ASSIGN_RANDOM_VALUE)
elif option == "RANDOM_MIN":
self.decision_builder = self.solver.Phase(variables,
self.solver.CHOOSE_RANDOM,
self.solver.ASSIGN_MIN_VALUE)
elif option == "RANDOM_MAX":
self.decision_builder = self.solver.Phase(variables,
self.solver.CHOOSE_RANDOM,
self.solver.ASSIGN_MAX_VALUE)
elif option == "RANDOM_RANDOM":
self.decision_builder = self.solver.Phase(variables,
self.solver.CHOOSE_RANDOM,
self.solver.ASSIGN_RANDOM_VALUE)
def __defineVarinablesAndGeneralConstraints(self):
self.vm = [self.solver.IntVar(0, 1, "VM%i" % j) for j in range(0, self.nrVM)]
self.a = [self.solver.IntVar(0, 1, 'C%i_VM%i' % (i, j)) for i in range(self.nrComp) for j in
range(self.nrVM)]
#print(".....", self.nrVM, self.offerSize)
if self.__solveVariantSMTEncoding:
self.vmType = [self.solver.IntVar(0, self.offerSize, "vmType%i" % j) for j in range(0, self.nrVM)]
#print(self.vmType)
self.ProcProv = [self.solver.IntVar(0, 100, "ProcProv%i" % j) for j in range(0, self.nrVM)]
self.MemProv = [self.solver.IntVar(0, 10000000, "MemProv%i" % j) for j in range(0, self.nrVM)]
self.StorageProv = [self.solver.IntVar(0, 100000, "StorageProv%i" % j) for j in range(0, self.nrVM)]
self.PriceProv = [self.solver.IntVar(0, 100000, "PriceProv%i" % j) for j in range(0, self.nrVM)]
self.__addConstraintsSMT()
self.cost = self.solver.IntVar(0, 10000000, 'cost')
if self.__solveVariantSMTEncoding and (self.availableConfig is not None):
#print(self.cost == self.solver.Sum(self.PriceProv[j] for j in range(self.nrVM)))
self.solver.Add(self.cost == self.solver.Sum(self.PriceProv[j] for j in range(self.nrVM)))
else:
self.solver.Add(self.cost == self.solver.Sum(self.vm[j] for j in range(self.nrVM)))
# self.__GC1()
#self.__GC2()
self.__GC3()
def addFixComponentRestriction(self, compId, vmId):
self.solver.Add(self.a[compId * self.nrVM + vmId] == 1)
def __addConstraintsSMT(self):
if self.availableConfig is None:
return
#print("start smt", self.nrVM)
# for i in range(self.nrVM):
# self.solver.Add((self.vm[i] == 0) == (self.PriceProv[i] == 0))
#print("add 0 price smt")
#print(self.nrVM, self.availableConfig)
#la un assigned vm trebuie sa existe un singur vm type
for i in range(self.nrVM):
self.solver.Add(
self.solver.Sum([
self.solver.Sum([self.vm[i] == 0, self.PriceProv[i] == 0]) == 2,
self.solver.Sum([
self.solver.Sum([self.vm[i] == 1,
self.vmType[i] == t,
self.PriceProv[i] == self.availableConfig[t][4],
self.ProcProv[i] == self.availableConfig[t][1],
self.MemProv[i] == self.availableConfig[t][2],
self.StorageProv[i] == self.availableConfig[t][3]
]) == 6
for t in range(len(self.availableConfig))]) >= 1]) == 1
)
def __GC1(self):
"""At least one instance of a component is deployed on acquired VM"""
for i in range(self.nrComp):
self.solver.Add(self.solver.Sum([self.a[i*self.nrVM+j] for j in range(self.nrVM)]) >= 1)
def __GC2(self):
"""The number of components deployed on a virtual machine is less or equal with VM_MaxLoad"""
for k in range(self.nrVM):
self.solver.Add(self.solver.Sum([self.a[i * self.nrVM + k] for i in range(self.nrComp)]) <= self.VM_MaxLoad)
def __GC3(self):
"""The components are deployed only on acquired VM"""
for j in range(self.nrVM):
for i in range(self.nrComp):
self.solver.Add(self.a[i * self.nrVM + j] <= self.vm[j])
def RestrictionConflict(self, alphaCompID, conflictCompsIDList):
"""
Constrain that describe the constraints between components
:param alphaComponentId - ID of the component that is in conflict with other components,
ID should be in set {1,...,N}
:param conflictCompsIDList - the IDs list of components that alphaComponent is in conflict,
ID should be in set {1,...,N}
"""
for j in range(self.nrVM):
for conflictCompId in conflictCompsIDList:
self.solver.Add(self.solver.Sum([self.a[alphaCompID * self.nrVM + j], self.a[conflictCompId * self.nrVM + j]]) <= 1)
def RestrictionUpperLowerEqualBound(self, compsIdList, n1, operation):
"""
Constrains that defines the number of instances that a component must have
:param compsIdList:
:param n1: a positive limit for components number
:param operation: should be one of the strings {"<=","==",">="}
"<=": sum(compsIdList) <= n1
">=": sum(compsIdList) >= n1
"==": sum(compsIdList) == n1
"""
if operation == "<=":
self.solver.Add(
self.solver.Sum([self.a[compId * self.nrVM + j] for compId in compsIdList for j in range(self.nrVM)]) <= n1)
elif operation == ">=":
self.solver.Add(
self.solver.Sum([self.a[compId * self.nrVM + j] for compId in compsIdList for j in
range(self.nrVM)]) >= n1)
elif operation == "==":
self.solver.Add(
self.solver.Sum([self.a[compId * self.nrVM + j] for compId in compsIdList for j in
range(self.nrVM)]) == n1)
def RestrictionRangeBound(self, compsIdList, n1, n2):
"""
Constrains that defines the number of instances that a component must have
:param compsIdList:
:param n1: a positive lower limit for components number
:param n2: a positive upper limit for components number
"""
self.solver.Add(
self.solver.Sum([self.a[compId * self.nrVM + j] for compId in compsIdList for j in range(self.nrVM)]) >= n1)
self.solver.Add(
self.solver.Sum([self.a[compId * self.nrVM + j] for compId in compsIdList for j in range(self.nrVM)]) <= n2)
def RestrictionFullDeployment(self, alphaCompId, compsIdList):
"""
Component alpha must be deployed on all machines except the ones that contains components that component alpha in in conflict
:param alphaCompId: the component id that must be deployed on all machines
s:param compsIdList: the list of components that component alpha is not in conflict
:return: None
"""
for j in range(self.nrVM):
self.solver.Add(self.solver.Sum([self.a[alphaCompId * self.nrVM + j]]+[self.a[_compId * self.nrVM + j]
for _compId in compsIdList]) == self.vm[j])
def minimumVmsNumberConstraint(self, minNrOfVm):
"""
Minimum VM number that should be aquaired
NOT USED NOW
:param minNrOfVm:
:return: None
"""
self.solver.Add(self.solver.Sum([self.vm[j] for j in range(self.nrVM)]) >= minNrOfVm)
def RestrictionManyToManyDependency(self, alphaCompId, betaCompId, operation):
"""
The number of instance of component alpha depends on the number of instances of component beta
:param alphaCompId: ID of component alpha, ID should be in set {1,...,N}
:param betaCompId: ID of component beta, ID should be in set {1,...,N}
:param operation: one of the strings in set {"==", "<=", ">="}
"==": sum(instances of alpha component) == sum(instances of beta component)
"<=": sum(instances of alpha component) <= sum(instances of beta component)
">=": sum(instances of alpha component) >= sum(instances of beta component)
:return: None
"""
if operation == "<=":
self.solver.Add(
self.solver.Sum([self.a[alphaCompId * self.nrVM + j] for j in range(self.nrVM)]) <=
self.solver.Sum([self.a[betaCompId * self.nrVM + j] for j in range(self.nrVM)]))
elif operation == ">=":
self.solver.Add(
self.solver.Sum([self.a[alphaCompId * self.nrVM + j] for j in range(self.nrVM)]) >=
self.solver.Sum([self.a[betaCompId * self.nrVM + j] for j in range(self.nrVM)]))
elif operation == "==":
self.solver.Add(
self.solver.Sum([self.a[alphaCompId * self.nrVM + j] for j in range(self.nrVM)]) ==
self.solver.Sum([self.a[betaCompId * self.nrVM + j] for j in range(self.nrVM)]))
def RestrictionManyToManyDependencyNew(self, alphaCompId, betaCompId, n, m):
"""
The number of instance of component alpha depends on the number of instances of component beta
:param alphaCompId: ID of component alpha, ID should be in set {1,...,N}
:param betaCompId: ID of component beta, ID should be in set {1,...,N}
:param operation: one of the strings in set {"==", "<=", ">="}
"==": sum(instances of alpha component) == sum(instances of beta component)
"<=": sum(instances of alpha component) <= sum(instances of beta component)
">=": sum(instances of alpha component) >= sum(instances of beta component)
:return: None
"""
self.solver.Add(
m * self.solver.Sum([self.a[betaCompId * self.nrVM + j] for j in range(self.nrVM)]) -
n * self.solver.Sum([self.a[alphaCompId * self.nrVM + j] for j in range(self.nrVM)]) >= 0)
self.solver.Add(
m * self.solver.Sum([self.a[betaCompId * self.nrVM + j] for j in range(self.nrVM)]) -
n * self.solver.Sum([self.a[alphaCompId * self.nrVM + j] for j in range(self.nrVM)]) < n*m)
self.solver.Add(
m * self.solver.Sum([self.a[betaCompId * self.nrVM + j] for j in range(self.nrVM)]) >= n)
self.solver.Add(
n * self.solver.Sum([self.a[alphaCompId * self.nrVM + j] for j in range(self.nrVM)]) <= int(m*self.nrVM))
def RestrictionOneToManyDependency(self, alphaCompId, betaCompId, n):
"""
For one alpha component should be deployed n beta components
old: SC4 Flor
:param alphaCompId: ID of component alpha, ID should be in set {1,...,N}
:param betaCompId: ID of component beta, ID should be in set {1,...,N}
:param n: depending instances number
:return:
"""
self.solver.Add(
n*self.solver.Sum([self.a[alphaCompId * self.nrVM + k] for k in range(self.nrVM)]) -
self.solver.Sum([self.a[betaCompId * self.nrVM + k] for k in range(self.nrVM)]) >= 0) #insteed of > to enshure that for q_n beta components q_n alpha compoments
self.solver.Add(
n * self.solver.Sum([self.a[alphaCompId * self.nrVM + k] for k in range(self.nrVM)]) -
self.solver.Sum([self.a[betaCompId * self.nrVM + k] for k in range(self.nrVM)]) < n) #insteed of <= for same reason like before
### multiplu exact de n => ok nr de alpha
# self.solver.Add(
# n * self.solver.Sum([self.a[alphaCompId * self.nrVM + k] for k in range(self.nrVM)]) -
# self.solver.Sum([self.a[betaCompId * self.nrVM + k] for k in range(self.nrVM)]) >= 0)
#
# self.solver.Add(
# n * self.solver.Sum([self.a[alphaCompId * self.nrVM + k] for k in range(self.nrVM)]) -
# self.solver.Sum([self.a[betaCompId * self.nrVM + k] for k in range(self.nrVM)]) < n)
#subset de sc4 - doua componente depind una de alta (trebuie tot timpul sa fie puse impreuna pe aceeasi masina)
def RestrictionOneToOneDependency(self, alphaCompId, betaCompId):
"""
Components alpha and beta should always be deployed together
:param alphaCompId: ID of component alpha, ID should be in set {1,...,N}
:param betaCompId: ID of component beta, ID should be in set {1,...,N}
:return:
"""
for k in range(self.nrVM):
self.solver.Add(self.a[alphaCompId * self.nrVM + k] == self.a[betaCompId * self.nrVM + k])
def constraintsHardware(self, componentsValues):
if self.availableConfig is None:
return
# if self.availableConfig is None:
# self.problem.logger.debug("Optimize machine number because no offers are available")
# self.solver.Add(self.cost == self.solver.Sum(self.vm[j] for j in range(self.nrVM)))
# return
self.problem.logger.debug("Hardware constaints: componentsValues: {} avalibaleConfigurations: {}".format(
componentsValues, self.availableConfig))
"""
for line in componentsValues:
for i in line:
if i is None:
return
"""
componentsValues =[[0 if val is None else val for val in line] for line in componentsValues]
#pt fiecare tip de configuratie (cp, mem, ..) exista o masina care le respecta pe toate
#print("componentsValues=",componentsValues, "\n availableConfigurationsValues=", availableConfigurationsValues)
hardwareLen = len(componentsValues[0])
availableConfLen = len(self.availableConfig)
#print("availableConfig: ")
if self.__solveVariantSMTEncoding:
for k in range(self.nrVM):
self.solver.Add(self.solver.Sum([self.a[i * self.nrVM + k] * int(componentsValues[i][0])
for i in range(self.nrComp)]) <= self.ProcProv[k])
self.solver.Add(self.solver.Sum([self.a[i * self.nrVM + k] * int(componentsValues[i][1])
for i in range(self.nrComp)]) <= self.MemProv[k])
self.solver.Add(self.solver.Sum([self.a[i * self.nrVM + k] * int(componentsValues[i][2])
for i in range(self.nrComp)]) <= self.StorageProv[k])
else:
print("!!!!!!!!!!!!old check")
# if not self.__optimizePrice:
# for k in range(self.nrVM):
# self.solver.Add(
# self.solver.Sum([
# self.solver.Sum([
# self.solver.Sum([
# self.solver.Sum([self.a[i * self.nrVM + k] * componentsValues[i][h] for i in range(self.nrComp)])
# <= conf[h + 1] for h in range(hardwareLen)])
# == hardwareLen]) for conf in availableConfigurationsValues])
# >= 1
# )
def RestrictionAlphaOrBeta(self, alphaCompId, betaCompId):
self.solver.Add(
self.solver.Sum([
self.solver.Sum([self.a[betaCompId * self.nrVM + j] for j in range(self.nrVM)]) > 0,
self.solver.Sum([self.a[alphaCompId * self.nrVM + j] for j in range(self.nrVM)]) > 0]) == 1)
def RestrictionRequireProvideDependency(self, alphaCompId, betaCompId, n, m):
self.solver.Add(
n * self.solver.Sum([self.a[alphaCompId * self.nrVM + j] for j in range(self.nrVM)]) <=
m * self.solver.Sum([self.a[betaCompId * self.nrVM + j] for j in range(self.nrVM)]))
def __runMinimizationProblem(self):
"""
Minimize mumber of virtual machines
:return:
"""
#print("-----------runminimization problem")
# problem objective is to minimize price
self.objective = self.solver.Minimize(self.cost, 1)
# Create a solution collector.
self.collector = self.solver.LastSolutionCollector()
# Add the decision variables.
self.collector.Add(self.a)
self.collector.Add(self.vm)
self.collector.Add(self.PriceProv)
# Add the objective.
self.collector.AddObjective(self.cost)
startTime = time.process_time()
self.solver.Solve(self.decision_builder, [self.limits, self.objective, self.collector])
#self.solver.Solve(self.decision_builder, [self.objective, self.collector])
stopTime = time.process_time()
return startTime, stopTime
def __runCPProblem(self):
"""
Just run CP problem
:return:
"""
# Create a solution collector.
self.collector = self.solver.AllSolutionCollector()
# Add the decision variables.
self.collector.Add(self.a)
self.collector.Add(self.vm)
self.collector.Add(self.cost)
self.collector.Add(self.PriceProv)
startTime = time.process_time()
self.solver.Solve(self.decision_builder, [self.limits, self.collector])
stopTime = time.process_time()
return startTime, stopTime
def __rebuild_solution(self, solutionIndex):
_vm = []
_a = []
_priceVector = []
for vm in self.vm:
_vm.append(self.collector.Value(solutionIndex, vm))
i = 0
line = []
for el in self.a:
if i % self.nrVM == 0 and len(line) > 0:
_a.append(line)
line = []
line.append(self.collector.Value(solutionIndex, el))
i += 1
_a.append(line)
for p in self.PriceProv:
_priceVector.append(self.collector.Value(solutionIndex, p))
#print("Optimal cost:",_priceVector)
return numpy.sum(_priceVector), _priceVector, numpy.sum(_vm), _a
def run(self):
#print("-----start run")
if self.__optimization_problem:
startTime, stopTime = self.__runMinimizationProblem()
else:
startTime, stopTime = self.__runCPProblem()
#print("end run")
_vm = []
_a = []
_costVector = []
objectiveValue = -1
if self.__optimization_problem:
if self.collector.SolutionCount() > 0:
#print("SOLUTIONS NUMBER: ", self.collector.SolutionCount())
best_solution = self.collector.SolutionCount() - 1
objectiveValue = self.collector.ObjectiveValue(best_solution)
#print("Objective value:", best_nr_of_vm)
cost, _costVector, _vm, _a = self.__rebuild_solution(best_solution)
else: #collec more solutions
if self.collector.SolutionCount() > 0:
#print("SOLUTIONS NUMBER (not optimization): ", self.collector.SolutionCount())
best_nr_of_vm = None
for solIndex in range(self.collector.SolutionCount()):
cost, aux_costVector, aux_vm, aux_a = self.__rebuild_solution(solIndex)
_vm.append(aux_vm)
_a.append(aux_a)
_costVector.append(aux_costVector)
# print('--vm--')
# print(aux_vm)
# print('--a--')
# #if 1 not in aux_a[len(aux_a)-1]:
# for i in aux_a:
# print(i)
if best_nr_of_vm is None:
best_nr_of_vm = numpy.sum(aux_vm)
else:
aux = numpy.sum(aux_vm)
if aux < best_nr_of_vm:
best_nr_of_vm = aux
return objectiveValue, _costVector, _vm, _a
|
<reponame>KatrinaHoffert/stroke-radius-segmentation<filename>segment.py
'''
Runs the segmentation program on all images, resulting in the creation of
binary segmentation images.
'''
import os, re, sys, subprocess
from common import Study
# The Boykov segmentation program is only currently available as a Linux binary,
# but the OneCut segmentation program is only currently available as a Windows
# binary, so we have to flip by platform what we run...
platform = "windows"
def segment(study, originals_loc, strokes_loc, output_loc, run_segmentation_code):
'''
Segments images using the segmenation program.
study: The Study used, which determines naming scheme.
originals_loc: Folder that the original files are in.
strokes_loc: Folder that the stroke/point annoted files are in.
output_loc: Folder that the output files should be placed in.
run_segmentation_code: Lambda that runs the actual segmentation program. Is
given the file ID, the path to the original image, the path to the strokes file,
an output folder, and a calculated output file name.
'''
i = 0
files = os.listdir(strokes_loc)
for file in files:
# Extract data from file name
if study == Study.Rau:
file_re = re.search('(\d+)-(\d+)-(\d+)', file)
else:
file_re = re.search('(\d+)-(\d+)-(\d+)-(\d+)', file)
if file_re == None: continue
participant_id = file_re.group(1)
file_id = file_re.group(2)
if study == Study.Rau:
dilation_radius = file_re.group(3)
else:
time_pressure = file_re.group(3)
dilation_radius = file_re.group(4)
# For all files, determine paths and run through segmentation program
i += 1
print('\rProcessing file', i, 'of', len(files), end='')
sys.stdout.flush()
original_image = originals_loc + '/' + file_id + '.jpg'
stroke_image = strokes_loc + '/' + file
if study == Study.Rau:
output_image = output_loc + '/' + participant_id + '-' + file_id + '-' + dilation_radius + '-segmented.png'
else:
output_image = output_loc + '/' + participant_id + '-' + file_id + '-' + time_pressure + '-' + dilation_radius + '-segmented.png'
# Skip files that have already been segmented
if os.path.exists(output_image): continue
run_segmentation_code(file_id, original_image, stroke_image, output_loc, output_image)
def run_boykov_segmentation(file_id, original_image, stroke_image, output_loc, output_image):
# Note: Python 3.5+ this method is called "run"
subprocess.call(['./segmentation_programs/BoykovMaxFlowGeneric', '--strokefglabel=29',
'--strokebglabel=149', '--outputdir=' + output_loc, original_image, stroke_image])
# Now move the created file to the right name and remove the contour file
created_file_base = output_loc + '/' + file_id + '.jpg'
os.rename(created_file_base + '.segmentation.tif', output_image)
os.remove(created_file_base + '.contours.tif')
def run_onecut_segmentation(file_id, original_image, stroke_image, output_loc, output_image):
# Note: Python 3.5+ this method is called "run"
subprocess.call(['./segmentation_programs/OneCut', original_image, stroke_image,
'--fg-label', '29', '--bg-label', '149', '--output', output_image], stdout=subprocess.DEVNULL)
if platform == "linux":
print('Segmenting with Boykov\'s graph cut:')
print('Processing Rau\'s strokes')
segment(Study.Rau, './rau/originals', './rau/dilated_strokes', './rau/segmented_strokes', run_boykov_segmentation)
print('\nProcessing Rau\'s points')
segment(Study.Rau, './rau/originals', './rau/dilated_points', './rau/segmented_points', run_boykov_segmentation)
print('\nProcessing Yuanxia\'s strokes')
segment(Study.Yuanxia, './yuanxia/originals', './yuanxia/dilated', './yuanxia/segmented', run_boykov_segmentation)
print()
else:
print('Segmenting with OneCut:')
print('Processing Rau\'s strokes')
segment(Study.Rau, './rau/originals', './rau/dilated_strokes', './rau/segmented_strokes_onecut', run_onecut_segmentation)
print('\nProcessing Rau\'s points')
segment(Study.Rau, './rau/originals', './rau/dilated_points', './rau/segmented_points_onecut', run_onecut_segmentation)
print('\nProcessing Yuanxia\'s strokes')
segment(Study.Yuanxia, './yuanxia/originals', './yuanxia/dilated', './yuanxia/segmented_onecut', run_onecut_segmentation)
print() |
<reponame>okxjd/processing_ng
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
TPL_FORMAT = {\
'ten': [
{'1': ('kn', '66:0')}, {'1': ('kn', '66:1')}, {'1': ('kn', '66:2')}, {'1': ('kn', '66:3')},
{'1': ('kn', '66:4')}, {'1': ('kn', '66:5')}, {'1': ('kn', '66:6')}, {'1': ('kn', '66:7')}
],
'district': [
{'1': ('kn', '66:00')}, {'1': ('kn', '66:01')}, {'1': ('kn', '66:02')}, {'1': ('kn', '66:03')},
{'1': ('kn', '66:04')}, {'1': ('kn', '66:05')}, {'1': ('kn', '66:06')}, {'1': ('kn', '66:07')},
{'1': ('kn', '66:08')}, {'1': ('kn', '66:09')}, {'1': ('kn', '66:10')}, {'1': ('kn', '66:11')},
{'1': ('kn', '66:12')}, {'1': ('kn', '66:13')}, {'1': ('kn', '66:14')}, {'1': ('kn', '66:15')},
{'1': ('kn', '66:16')}, {'1': ('kn', '66:17')}, {'1': ('kn', '66:18')}, {'1': ('kn', '66:19')},
{'1': ('kn', '66:20')}, {'1': ('kn', '66:21')}, {'1': ('kn', '66:22')}, {'1': ('kn', '66:23')},
{'1': ('kn', '66:24')}, {'1': ('kn', '66:25')}, {'1': ('kn', '66:26')}, {'1': ('kn', '66:27')},
{'1': ('kn', '66:28')}, {'1': ('kn', '66:29')}, {'1': ('kn', '66:30')}, {'1': ('kn', '66:31')},
{'1': ('kn', '66:32')}, {'1': ('kn', '66:33')}, {'1': ('kn', '66:34')}, {'1': ('kn', '66:35')},
{'1': ('kn', '66:36')}, {'1': ('kn', '66:37')}, {'1': ('kn', '66:38')}, {'1': ('kn', '66:39')},
{'1': ('kn', '66:40')}, {'1': ('kn', '66:42')}, {'1': ('kn', '66:43')}, {'1': ('kn', '66:44')},
{'1': ('kn', '66:45')}, {'1': ('kn', '66:46')}, {'1': ('kn', '66:47')}, {'1': ('kn', '66:48')},
{'1': ('kn', '66:49')}, {'1': ('kn', '66:50')}, {'1': ('kn', '66:51')}, {'1': ('kn', '66:52')},
{'1': ('kn', '66:53')}, {'1': ('kn', '66:54')}, {'1': ('kn', '66:55')}, {'1': ('kn', '66:57')},
{'1': ('kn', '66:58')}, {'1': ('kn', '66:59')}, {'1': ('kn', '66:60')}, {'1': ('kn', '66:61')},
{'1': ('kn', '66:62')}, {'1': ('kn', '66:63')}, {'1': ('kn', '66:64')}, {'1': ('kn', '66:65')},
{'1': ('kn', '66:66')}, {'1': ('kn', '66:67')}, {'1': ('kn', '66:68')}, {'1': ('kn', '66:69')},
{'1': ('kn', '66:70')}, {'1': ('kn', '66:71')}, {'1': ('kn', '66:72')}, {'1': ('kn', '66:73')},
{'1': ('kn', '66:41:00')}, {'1': ('kn', '66:41:01')}, {'1': ('kn', '66:41:02')}, {'1': ('kn', '66:41:03')},
{'1': ('kn', '66:41:04')}, {'1': ('kn', '66:41:05')}, {'1': ('kn', '66:41:06')}, {'1': ('kn', '66:41:07')},
{'1': ('kn', '66:41:08')}, {'1': ('kn', '66:41:09')}, {'1': ('kn', '66:41:1')}, {'1': ('kn', '66:41:2')},
{'1': ('kn', '66:41:3')}, {'1': ('kn', '66:41:4')}, {'1': ('kn', '66:41:5')}, {'1': ('kn', '66:41:6')},
{'1': ('kn', '66:41:7')}, {'1': ('kn', '66:41:8')}, {'1': ('kn', '66:41:9')},
{'1': ('kn', '66:56:00')}, {'1': ('kn', '66:56:01')}, {'1': ('kn', '66:56:02')}, {'1': ('kn', '66:56:03')},
{'1': ('kn', '66:56:04')}, {'1': ('kn', '66:56:05')}, {'1': ('kn', '66:56:06')}, {'1': ('kn', '66:56:07')},
{'1': ('kn', '66:56:08')}, {'1': ('kn', '66:56:09')}, {'1': ('kn', '66:56:1')}, {'1': ('kn', '66:56:2')},
{'1': ('kn', '66:56:3')}, {'1': ('kn', '66:56:4')}, {'1': ('kn', '66:56:5')}, {'1': ('kn', '66:56:6')},
{'1': ('kn', '66:56:7')}, {'1': ('kn', '66:56:8')}, {'1': ('kn', '66:56:9')}
],
'block': [
{'1': ('kn', '66:00:00')},
{'1': ('kn', '66:00:01')},
{'1': ('kn', '66:00:02')},
{'1': ('kn', '66:00:03')},
{'1': ('kn', '66:00:04')},
{'1': ('kn', '66:00:05')},
{'1': ('kn', '66:00:06')},
{'1': ('kn', '66:00:07')},
{'1': ('kn', '66:00:08')},
{'1': ('kn', '66:00:09')},
{'1': ('kn', '66:00:10')},
{'1': ('kn', '66:00:11')},
{'1': ('kn', '66:00:12')},
{'1': ('kn', '66:00:13')},
{'1': ('kn', '66:00:14')},
{'1': ('kn', '66:00:15')},
{'1': ('kn', '66:00:16')},
{'1': ('kn', '66:00:17')},
{'1': ('kn', '66:00:18')},
{'1': ('kn', '66:00:19')},
{'1': ('kn', '66:00:20')},
{'1': ('kn', '66:00:21')},
{'1': ('kn', '66:00:22')},
{'1': ('kn', '66:00:23')},
{'1': ('kn', '66:00:24')},
{'1': ('kn', '66:00:25')},
{'1': ('kn', '66:00:26')},
{'1': ('kn', '66:00:27')},
{'1': ('kn', '66:00:28')},
{'1': ('kn', '66:00:29')},
{'1': ('kn', '66:00:30')},
{'1': ('kn', '66:00:31')},
{'1': ('kn', '66:00:32')},
{'1': ('kn', '66:00:33')},
{'1': ('kn', '66:00:34')},
{'1': ('kn', '66:00:35')},
{'1': ('kn', '66:00:36')},
{'1': ('kn', '66:00:37')},
{'1': ('kn', '66:00:38')},
{'1': ('kn', '66:00:39')},
{'1': ('kn', '66:00:40')},
{'1': ('kn', '66:00:41')},
{'1': ('kn', '66:00:42')},
{'1': ('kn', '66:00:43')},
{'1': ('kn', '66:00:44')},
{'1': ('kn', '66:00:45')},
{'1': ('kn', '66:00:46')},
{'1': ('kn', '66:00:47')},
{'1': ('kn', '66:00:48')},
{'1': ('kn', '66:00:49')},
{'1': ('kn', '66:00:50')},
{'1': ('kn', '66:00:51')},
{'1': ('kn', '66:00:52')},
{'1': ('kn', '66:00:53')},
{'1': ('kn', '66:00:54')},
{'1': ('kn', '66:00:55')},
{'1': ('kn', '66:00:56')},
{'1': ('kn', '66:00:57')},
{'1': ('kn', '66:00:58')},
{'1': ('kn', '66:00:59')},
{'1': ('kn', '66:00:60')},
{'1': ('kn', '66:00:61')},
{'1': ('kn', '66:00:62')},
{'1': ('kn', '66:00:63')},
{'1': ('kn', '66:00:64')},
{'1': ('kn', '66:00:65')},
{'1': ('kn', '66:00:66')},
{'1': ('kn', '66:00:67')},
{'1': ('kn', '66:00:68')},
{'1': ('kn', '66:00:69')},
{'1': ('kn', '66:00:70')},
{'1': ('kn', '66:00:71')},
{'1': ('kn', '66:00:72')},
{'1': ('kn', '66:00:73')},
{'1': ('kn', '66:00:74')},
{'1': ('kn', '66:00:75')},
{'1': ('kn', '66:00:76')},
{'1': ('kn', '66:00:77')},
{'1': ('kn', '66:00:78')},
{'1': ('kn', '66:00:79')},
{'1': ('kn', '66:00:80')},
{'1': ('kn', '66:00:81')},
{'1': ('kn', '66:00:82')},
{'1': ('kn', '66:00:83')},
{'1': ('kn', '66:00:84')},
{'1': ('kn', '66:00:85')},
{'1': ('kn', '66:00:86')},
{'1': ('kn', '66:00:87')},
{'1': ('kn', '66:00:88')},
{'1': ('kn', '66:00:89')},
{'1': ('kn', '66:00:90')},
{'1': ('kn', '66:00:91')},
{'1': ('kn', '66:00:92')},
{'1': ('kn', '66:00:93')},
{'1': ('kn', '66:00:94')},
{'1': ('kn', '66:00:95')},
{'1': ('kn', '66:00:96')},
{'1': ('kn', '66:00:97')},
{'1': ('kn', '66:00:98')},
{'1': ('kn', '66:00:99')},
{'1': ('kn', '66:01:00')},
{'1': ('kn', '66:01:01')},
{'1': ('kn', '66:01:02')},
{'1': ('kn', '66:01:03')},
{'1': ('kn', '66:01:04')},
{'1': ('kn', '66:01:05')},
{'1': ('kn', '66:01:06')},
{'1': ('kn', '66:01:07')},
{'1': ('kn', '66:01:08')},
{'1': ('kn', '66:01:09')},
{'1': ('kn', '66:01:10')},
{'1': ('kn', '66:01:11')},
{'1': ('kn', '66:01:12')},
{'1': ('kn', '66:01:13')},
{'1': ('kn', '66:01:14')},
{'1': ('kn', '66:01:15')},
{'1': ('kn', '66:01:16')},
{'1': ('kn', '66:01:17')},
{'1': ('kn', '66:01:18')},
{'1': ('kn', '66:01:19')},
{'1': ('kn', '66:01:20')},
{'1': ('kn', '66:01:21')},
{'1': ('kn', '66:01:22')},
{'1': ('kn', '66:01:23')},
{'1': ('kn', '66:01:24')},
{'1': ('kn', '66:01:25')},
{'1': ('kn', '66:01:26')},
{'1': ('kn', '66:01:27')},
{'1': ('kn', '66:01:28')},
{'1': ('kn', '66:01:29')},
{'1': ('kn', '66:01:30')},
{'1': ('kn', '66:01:31')},
{'1': ('kn', '66:01:32')},
{'1': ('kn', '66:01:33')},
{'1': ('kn', '66:01:34')},
{'1': ('kn', '66:01:35')},
{'1': ('kn', '66:01:36')},
{'1': ('kn', '66:01:37')},
{'1': ('kn', '66:01:38')},
{'1': ('kn', '66:01:39')},
{'1': ('kn', '66:01:40')},
{'1': ('kn', '66:01:41')},
{'1': ('kn', '66:01:42')},
{'1': ('kn', '66:01:43')},
{'1': ('kn', '66:01:44')},
{'1': ('kn', '66:01:45')},
{'1': ('kn', '66:01:46')},
{'1': ('kn', '66:01:47')},
{'1': ('kn', '66:01:48')},
{'1': ('kn', '66:01:49')},
{'1': ('kn', '66:01:50')},
{'1': ('kn', '66:01:51')},
{'1': ('kn', '66:01:52')},
{'1': ('kn', '66:01:53')},
{'1': ('kn', '66:01:54')},
{'1': ('kn', '66:01:55')},
{'1': ('kn', '66:01:56')},
{'1': ('kn', '66:01:57')},
{'1': ('kn', '66:01:58')},
{'1': ('kn', '66:01:59')},
{'1': ('kn', '66:01:60')},
{'1': ('kn', '66:01:61')},
{'1': ('kn', '66:01:62')},
{'1': ('kn', '66:01:63')},
{'1': ('kn', '66:01:64')},
{'1': ('kn', '66:01:65')},
{'1': ('kn', '66:01:66')},
{'1': ('kn', '66:01:67')},
{'1': ('kn', '66:01:68')},
{'1': ('kn', '66:01:69')},
{'1': ('kn', '66:01:70')},
{'1': ('kn', '66:01:71')},
{'1': ('kn', '66:01:72')},
{'1': ('kn', '66:01:73')},
{'1': ('kn', '66:01:74')},
{'1': ('kn', '66:01:75')},
{'1': ('kn', '66:01:76')},
{'1': ('kn', '66:01:77')},
{'1': ('kn', '66:01:78')},
{'1': ('kn', '66:01:79')},
{'1': ('kn', '66:01:80')},
{'1': ('kn', '66:01:81')},
{'1': ('kn', '66:01:82')},
{'1': ('kn', '66:01:83')},
{'1': ('kn', '66:01:84')},
{'1': ('kn', '66:01:85')},
{'1': ('kn', '66:01:86')},
{'1': ('kn', '66:01:87')},
{'1': ('kn', '66:01:88')},
{'1': ('kn', '66:01:89')},
{'1': ('kn', '66:01:90')},
{'1': ('kn', '66:01:91')},
{'1': ('kn', '66:01:92')},
{'1': ('kn', '66:01:93')},
{'1': ('kn', '66:01:94')},
{'1': ('kn', '66:01:95')},
{'1': ('kn', '66:01:96')},
{'1': ('kn', '66:01:97')},
{'1': ('kn', '66:01:98')},
{'1': ('kn', '66:01:99')},
{'1': ('kn', '66:02:00')},
{'1': ('kn', '66:02:01')},
{'1': ('kn', '66:02:02')},
{'1': ('kn', '66:02:03')},
{'1': ('kn', '66:02:04')},
{'1': ('kn', '66:02:05')},
{'1': ('kn', '66:02:06')},
{'1': ('kn', '66:02:07')},
{'1': ('kn', '66:02:08')},
{'1': ('kn', '66:02:09')},
{'1': ('kn', '66:02:10')},
{'1': ('kn', '66:02:11')},
{'1': ('kn', '66:02:12')},
{'1': ('kn', '66:02:13')},
{'1': ('kn', '66:02:14')},
{'1': ('kn', '66:02:15')},
{'1': ('kn', '66:02:16')},
{'1': ('kn', '66:02:17')},
{'1': ('kn', '66:02:18')},
{'1': ('kn', '66:02:19')},
{'1': ('kn', '66:02:20')},
{'1': ('kn', '66:02:21')},
{'1': ('kn', '66:02:22')},
{'1': ('kn', '66:02:23')},
{'1': ('kn', '66:02:24')},
{'1': ('kn', '66:02:25')},
{'1': ('kn', '66:02:26')},
{'1': ('kn', '66:02:27')},
{'1': ('kn', '66:02:28')},
{'1': ('kn', '66:02:29')},
{'1': ('kn', '66:02:30')},
{'1': ('kn', '66:02:31')},
{'1': ('kn', '66:02:32')},
{'1': ('kn', '66:02:33')},
{'1': ('kn', '66:02:34')},
{'1': ('kn', '66:02:35')},
{'1': ('kn', '66:02:36')},
{'1': ('kn', '66:02:37')},
{'1': ('kn', '66:02:38')},
{'1': ('kn', '66:02:39')},
{'1': ('kn', '66:02:40')},
{'1': ('kn', '66:02:41')},
{'1': ('kn', '66:02:42')},
{'1': ('kn', '66:02:43')},
{'1': ('kn', '66:02:44')},
{'1': ('kn', '66:02:45')},
{'1': ('kn', '66:02:46')},
{'1': ('kn', '66:02:47')},
{'1': ('kn', '66:02:48')},
{'1': ('kn', '66:02:49')},
{'1': ('kn', '66:02:50')},
{'1': ('kn', '66:02:51')},
{'1': ('kn', '66:02:52')},
{'1': ('kn', '66:02:53')},
{'1': ('kn', '66:02:54')},
{'1': ('kn', '66:02:55')},
{'1': ('kn', '66:02:56')},
{'1': ('kn', '66:02:57')},
{'1': ('kn', '66:02:58')},
{'1': ('kn', '66:02:59')},
{'1': ('kn', '66:02:60')},
{'1': ('kn', '66:02:61')},
{'1': ('kn', '66:02:62')},
{'1': ('kn', '66:02:63')},
{'1': ('kn', '66:02:64')},
{'1': ('kn', '66:02:65')},
{'1': ('kn', '66:02:66')},
{'1': ('kn', '66:02:67')},
{'1': ('kn', '66:02:68')},
{'1': ('kn', '66:02:69')},
{'1': ('kn', '66:02:70')},
{'1': ('kn', '66:02:71')},
{'1': ('kn', '66:02:72')},
{'1': ('kn', '66:02:73')},
{'1': ('kn', '66:02:74')},
{'1': ('kn', '66:02:75')},
{'1': ('kn', '66:02:76')},
{'1': ('kn', '66:02:77')},
{'1': ('kn', '66:02:78')},
{'1': ('kn', '66:02:79')},
{'1': ('kn', '66:02:80')},
{'1': ('kn', '66:02:81')},
{'1': ('kn', '66:02:82')},
{'1': ('kn', '66:02:83')},
{'1': ('kn', '66:02:84')},
{'1': ('kn', '66:02:85')},
{'1': ('kn', '66:02:86')},
{'1': ('kn', '66:02:87')},
{'1': ('kn', '66:02:88')},
{'1': ('kn', '66:02:89')},
{'1': ('kn', '66:02:90')},
{'1': ('kn', '66:02:91')},
{'1': ('kn', '66:02:92')},
{'1': ('kn', '66:02:93')},
{'1': ('kn', '66:02:94')},
{'1': ('kn', '66:02:95')},
{'1': ('kn', '66:02:96')},
{'1': ('kn', '66:02:97')},
{'1': ('kn', '66:02:98')},
{'1': ('kn', '66:02:99')},
{'1': ('kn', '66:03:00')},
{'1': ('kn', '66:03:01')},
{'1': ('kn', '66:03:02')},
{'1': ('kn', '66:03:03')},
{'1': ('kn', '66:03:04')},
{'1': ('kn', '66:03:05')},
{'1': ('kn', '66:03:06')},
{'1': ('kn', '66:03:07')},
{'1': ('kn', '66:03:08')},
{'1': ('kn', '66:03:09')},
{'1': ('kn', '66:03:10')},
{'1': ('kn', '66:03:11')},
{'1': ('kn', '66:03:12')},
{'1': ('kn', '66:03:13')},
{'1': ('kn', '66:03:14')},
{'1': ('kn', '66:03:15')},
{'1': ('kn', '66:03:16')},
{'1': ('kn', '66:03:17')},
{'1': ('kn', '66:03:18')},
{'1': ('kn', '66:03:19')},
{'1': ('kn', '66:03:20')},
{'1': ('kn', '66:03:21')},
{'1': ('kn', '66:03:22')},
{'1': ('kn', '66:03:23')},
{'1': ('kn', '66:03:24')},
{'1': ('kn', '66:03:25')},
{'1': ('kn', '66:03:26')},
{'1': ('kn', '66:03:27')},
{'1': ('kn', '66:03:28')},
{'1': ('kn', '66:03:29')},
{'1': ('kn', '66:03:30')},
{'1': ('kn', '66:03:31')},
{'1': ('kn', '66:03:32')},
{'1': ('kn', '66:03:33')},
{'1': ('kn', '66:03:34')},
{'1': ('kn', '66:03:35')},
{'1': ('kn', '66:03:36')},
{'1': ('kn', '66:03:37')},
{'1': ('kn', '66:03:38')},
{'1': ('kn', '66:03:39')},
{'1': ('kn', '66:03:40')},
{'1': ('kn', '66:03:41')},
{'1': ('kn', '66:03:42')},
{'1': ('kn', '66:03:43')},
{'1': ('kn', '66:03:44')},
{'1': ('kn', '66:03:45')},
{'1': ('kn', '66:03:46')},
{'1': ('kn', '66:03:47')},
{'1': ('kn', '66:03:48')},
{'1': ('kn', '66:03:49')},
{'1': ('kn', '66:03:50')},
{'1': ('kn', '66:03:51')},
{'1': ('kn', '66:03:52')},
{'1': ('kn', '66:03:53')},
{'1': ('kn', '66:03:54')},
{'1': ('kn', '66:03:55')},
{'1': ('kn', '66:03:56')},
{'1': ('kn', '66:03:57')},
{'1': ('kn', '66:03:58')},
{'1': ('kn', '66:03:59')},
{'1': ('kn', '66:03:60')},
{'1': ('kn', '66:03:61')},
{'1': ('kn', '66:03:62')},
{'1': ('kn', '66:03:63')},
{'1': ('kn', '66:03:64')},
{'1': ('kn', '66:03:65')},
{'1': ('kn', '66:03:66')},
{'1': ('kn', '66:03:67')},
{'1': ('kn', '66:03:68')},
{'1': ('kn', '66:03:69')},
{'1': ('kn', '66:03:70')},
{'1': ('kn', '66:03:71')},
{'1': ('kn', '66:03:72')},
{'1': ('kn', '66:03:73')},
{'1': ('kn', '66:03:74')},
{'1': ('kn', '66:03:75')},
{'1': ('kn', '66:03:76')},
{'1': ('kn', '66:03:77')},
{'1': ('kn', '66:03:78')},
{'1': ('kn', '66:03:79')},
{'1': ('kn', '66:03:80')},
{'1': ('kn', '66:03:81')},
{'1': ('kn', '66:03:82')},
{'1': ('kn', '66:03:83')},
{'1': ('kn', '66:03:84')},
{'1': ('kn', '66:03:85')},
{'1': ('kn', '66:03:86')},
{'1': ('kn', '66:03:87')},
{'1': ('kn', '66:03:88')},
{'1': ('kn', '66:03:89')},
{'1': ('kn', '66:03:90')},
{'1': ('kn', '66:03:91')},
{'1': ('kn', '66:03:92')},
{'1': ('kn', '66:03:93')},
{'1': ('kn', '66:03:94')},
{'1': ('kn', '66:03:95')},
{'1': ('kn', '66:03:96')},
{'1': ('kn', '66:03:97')},
{'1': ('kn', '66:03:98')},
{'1': ('kn', '66:03:99')},
{'1': ('kn', '66:04:00')},
{'1': ('kn', '66:04:01')},
{'1': ('kn', '66:04:02')},
{'1': ('kn', '66:04:03')},
{'1': ('kn', '66:04:04')},
{'1': ('kn', '66:04:05')},
{'1': ('kn', '66:04:06')},
{'1': ('kn', '66:04:07')},
{'1': ('kn', '66:04:08')},
{'1': ('kn', '66:04:09')},
{'1': ('kn', '66:04:10')},
{'1': ('kn', '66:04:11')},
{'1': ('kn', '66:04:12')},
{'1': ('kn', '66:04:13')},
{'1': ('kn', '66:04:14')},
{'1': ('kn', '66:04:15')},
{'1': ('kn', '66:04:16')},
{'1': ('kn', '66:04:17')},
{'1': ('kn', '66:04:18')},
{'1': ('kn', '66:04:19')},
{'1': ('kn', '66:04:20')},
{'1': ('kn', '66:04:21')},
{'1': ('kn', '66:04:22')},
{'1': ('kn', '66:04:23')},
{'1': ('kn', '66:04:24')},
{'1': ('kn', '66:04:25')},
{'1': ('kn', '66:04:26')},
{'1': ('kn', '66:04:27')},
{'1': ('kn', '66:04:28')},
{'1': ('kn', '66:04:29')},
{'1': ('kn', '66:04:30')},
{'1': ('kn', '66:04:31')},
{'1': ('kn', '66:04:32')},
{'1': ('kn', '66:04:33')},
{'1': ('kn', '66:04:34')},
{'1': ('kn', '66:04:35')},
{'1': ('kn', '66:04:36')},
{'1': ('kn', '66:04:37')},
{'1': ('kn', '66:04:38')},
{'1': ('kn', '66:04:39')},
{'1': ('kn', '66:04:40')},
{'1': ('kn', '66:04:41')},
{'1': ('kn', '66:04:42')},
{'1': ('kn', '66:04:43')},
{'1': ('kn', '66:04:44')},
{'1': ('kn', '66:04:45')},
{'1': ('kn', '66:04:46')},
{'1': ('kn', '66:04:47')},
{'1': ('kn', '66:04:48')},
{'1': ('kn', '66:04:49')},
{'1': ('kn', '66:04:50')},
{'1': ('kn', '66:04:51')},
{'1': ('kn', '66:04:52')},
{'1': ('kn', '66:04:53')},
{'1': ('kn', '66:04:54')},
{'1': ('kn', '66:04:55')},
{'1': ('kn', '66:04:56')},
{'1': ('kn', '66:04:57')},
{'1': ('kn', '66:04:58')},
{'1': ('kn', '66:04:59')},
{'1': ('kn', '66:04:60')},
{'1': ('kn', '66:04:61')},
{'1': ('kn', '66:04:62')},
{'1': ('kn', '66:04:63')},
{'1': ('kn', '66:04:64')},
{'1': ('kn', '66:04:65')},
{'1': ('kn', '66:04:66')},
{'1': ('kn', '66:04:67')},
{'1': ('kn', '66:04:68')},
{'1': ('kn', '66:04:69')},
{'1': ('kn', '66:04:70')},
{'1': ('kn', '66:04:71')},
{'1': ('kn', '66:04:72')},
{'1': ('kn', '66:04:73')},
{'1': ('kn', '66:04:74')},
{'1': ('kn', '66:04:75')},
{'1': ('kn', '66:04:76')},
{'1': ('kn', '66:04:77')},
{'1': ('kn', '66:04:78')},
{'1': ('kn', '66:04:79')},
{'1': ('kn', '66:04:80')},
{'1': ('kn', '66:04:81')},
{'1': ('kn', '66:04:82')},
{'1': ('kn', '66:04:83')},
{'1': ('kn', '66:04:84')},
{'1': ('kn', '66:04:85')},
{'1': ('kn', '66:04:86')},
{'1': ('kn', '66:04:87')},
{'1': ('kn', '66:04:88')},
{'1': ('kn', '66:04:89')},
{'1': ('kn', '66:04:90')},
{'1': ('kn', '66:04:91')},
{'1': ('kn', '66:04:92')},
{'1': ('kn', '66:04:93')},
{'1': ('kn', '66:04:94')},
{'1': ('kn', '66:04:95')},
{'1': ('kn', '66:04:96')},
{'1': ('kn', '66:04:97')},
{'1': ('kn', '66:04:98')},
{'1': ('kn', '66:04:99')},
{'1': ('kn', '66:05:00')},
{'1': ('kn', '66:05:01')},
{'1': ('kn', '66:05:02')},
{'1': ('kn', '66:05:03')},
{'1': ('kn', '66:05:04')},
{'1': ('kn', '66:05:05')},
{'1': ('kn', '66:05:06')},
{'1': ('kn', '66:05:07')},
{'1': ('kn', '66:05:08')},
{'1': ('kn', '66:05:09')},
{'1': ('kn', '66:05:10')},
{'1': ('kn', '66:05:11')},
{'1': ('kn', '66:05:12')},
{'1': ('kn', '66:05:13')},
{'1': ('kn', '66:05:14')},
{'1': ('kn', '66:05:15')},
{'1': ('kn', '66:05:16')},
{'1': ('kn', '66:05:17')},
{'1': ('kn', '66:05:18')},
{'1': ('kn', '66:05:19')},
{'1': ('kn', '66:05:20')},
{'1': ('kn', '66:05:21')},
{'1': ('kn', '66:05:22')},
{'1': ('kn', '66:05:23')},
{'1': ('kn', '66:05:24')},
{'1': ('kn', '66:05:25')},
{'1': ('kn', '66:05:26')},
{'1': ('kn', '66:05:27')},
{'1': ('kn', '66:05:28')},
{'1': ('kn', '66:05:29')},
{'1': ('kn', '66:05:30')},
{'1': ('kn', '66:05:31')},
{'1': ('kn', '66:05:32')},
{'1': ('kn', '66:05:33')},
{'1': ('kn', '66:05:34')},
{'1': ('kn', '66:05:35')},
{'1': ('kn', '66:05:36')},
{'1': ('kn', '66:05:37')},
{'1': ('kn', '66:05:38')},
{'1': ('kn', '66:05:39')},
{'1': ('kn', '66:05:40')},
{'1': ('kn', '66:05:41')},
{'1': ('kn', '66:05:42')},
{'1': ('kn', '66:05:43')},
{'1': ('kn', '66:05:44')},
{'1': ('kn', '66:05:45')},
{'1': ('kn', '66:05:46')},
{'1': ('kn', '66:05:47')},
{'1': ('kn', '66:05:48')},
{'1': ('kn', '66:05:49')},
{'1': ('kn', '66:05:50')},
{'1': ('kn', '66:05:51')},
{'1': ('kn', '66:05:52')},
{'1': ('kn', '66:05:53')},
{'1': ('kn', '66:05:54')},
{'1': ('kn', '66:05:55')},
{'1': ('kn', '66:05:56')},
{'1': ('kn', '66:05:57')},
{'1': ('kn', '66:05:58')},
{'1': ('kn', '66:05:59')},
{'1': ('kn', '66:05:60')},
{'1': ('kn', '66:05:61')},
{'1': ('kn', '66:05:62')},
{'1': ('kn', '66:05:63')},
{'1': ('kn', '66:05:64')},
{'1': ('kn', '66:05:65')},
{'1': ('kn', '66:05:66')},
{'1': ('kn', '66:05:67')},
{'1': ('kn', '66:05:68')},
{'1': ('kn', '66:05:69')},
{'1': ('kn', '66:05:70')},
{'1': ('kn', '66:05:71')},
{'1': ('kn', '66:05:72')},
{'1': ('kn', '66:05:73')},
{'1': ('kn', '66:05:74')},
{'1': ('kn', '66:05:75')},
{'1': ('kn', '66:05:76')},
{'1': ('kn', '66:05:77')},
{'1': ('kn', '66:05:78')},
{'1': ('kn', '66:05:79')},
{'1': ('kn', '66:05:80')},
{'1': ('kn', '66:05:81')},
{'1': ('kn', '66:05:82')},
{'1': ('kn', '66:05:83')},
{'1': ('kn', '66:05:84')},
{'1': ('kn', '66:05:85')},
{'1': ('kn', '66:05:86')},
{'1': ('kn', '66:05:87')},
{'1': ('kn', '66:05:88')},
{'1': ('kn', '66:05:89')},
{'1': ('kn', '66:05:90')},
{'1': ('kn', '66:05:91')},
{'1': ('kn', '66:05:92')},
{'1': ('kn', '66:05:93')},
{'1': ('kn', '66:05:94')},
{'1': ('kn', '66:05:95')},
{'1': ('kn', '66:05:96')},
{'1': ('kn', '66:05:97')},
{'1': ('kn', '66:05:98')},
{'1': ('kn', '66:05:99')},
{'1': ('kn', '66:06:00')},
{'1': ('kn', '66:06:01')},
{'1': ('kn', '66:06:02')},
{'1': ('kn', '66:06:03')},
{'1': ('kn', '66:06:04')},
{'1': ('kn', '66:06:05')},
{'1': ('kn', '66:06:06')},
{'1': ('kn', '66:06:07')},
{'1': ('kn', '66:06:08')},
{'1': ('kn', '66:06:09')},
{'1': ('kn', '66:06:10')},
{'1': ('kn', '66:06:11')},
{'1': ('kn', '66:06:12')},
{'1': ('kn', '66:06:13')},
{'1': ('kn', '66:06:14')},
{'1': ('kn', '66:06:15')},
{'1': ('kn', '66:06:16')},
{'1': ('kn', '66:06:17')},
{'1': ('kn', '66:06:18')},
{'1': ('kn', '66:06:19')},
{'1': ('kn', '66:06:20')},
{'1': ('kn', '66:06:21')},
{'1': ('kn', '66:06:22')},
{'1': ('kn', '66:06:23')},
{'1': ('kn', '66:06:24')},
{'1': ('kn', '66:06:25')},
{'1': ('kn', '66:06:26')},
{'1': ('kn', '66:06:27')},
{'1': ('kn', '66:06:28')},
{'1': ('kn', '66:06:29')},
{'1': ('kn', '66:06:30')},
{'1': ('kn', '66:06:31')},
{'1': ('kn', '66:06:32')},
{'1': ('kn', '66:06:33')},
{'1': ('kn', '66:06:34')},
{'1': ('kn', '66:06:35')},
{'1': ('kn', '66:06:36')},
{'1': ('kn', '66:06:37')},
{'1': ('kn', '66:06:38')},
{'1': ('kn', '66:06:39')},
{'1': ('kn', '66:06:40')},
{'1': ('kn', '66:06:41')},
{'1': ('kn', '66:06:42')},
{'1': ('kn', '66:06:43')},
{'1': ('kn', '66:06:44')},
{'1': ('kn', '66:06:45')},
{'1': ('kn', '66:06:46')},
{'1': ('kn', '66:06:47')},
{'1': ('kn', '66:06:48')},
{'1': ('kn', '66:06:49')},
{'1': ('kn', '66:06:50')},
{'1': ('kn', '66:06:51')},
{'1': ('kn', '66:06:52')},
{'1': ('kn', '66:06:53')},
{'1': ('kn', '66:06:54')},
{'1': ('kn', '66:06:55')},
{'1': ('kn', '66:06:56')},
{'1': ('kn', '66:06:57')},
{'1': ('kn', '66:06:58')},
{'1': ('kn', '66:06:59')},
{'1': ('kn', '66:06:60')},
{'1': ('kn', '66:06:61')},
{'1': ('kn', '66:06:62')},
{'1': ('kn', '66:06:63')},
{'1': ('kn', '66:06:64')},
{'1': ('kn', '66:06:65')},
{'1': ('kn', '66:06:66')},
{'1': ('kn', '66:06:67')},
{'1': ('kn', '66:06:68')},
{'1': ('kn', '66:06:69')},
{'1': ('kn', '66:06:70')},
{'1': ('kn', '66:06:71')},
{'1': ('kn', '66:06:72')},
{'1': ('kn', '66:06:73')},
{'1': ('kn', '66:06:74')},
{'1': ('kn', '66:06:75')},
{'1': ('kn', '66:06:76')},
{'1': ('kn', '66:06:77')},
{'1': ('kn', '66:06:78')},
{'1': ('kn', '66:06:79')},
{'1': ('kn', '66:06:80')},
{'1': ('kn', '66:06:81')},
{'1': ('kn', '66:06:82')},
{'1': ('kn', '66:06:83')},
{'1': ('kn', '66:06:84')},
{'1': ('kn', '66:06:85')},
{'1': ('kn', '66:06:86')},
{'1': ('kn', '66:06:87')},
{'1': ('kn', '66:06:88')},
{'1': ('kn', '66:06:89')},
{'1': ('kn', '66:06:90')},
{'1': ('kn', '66:06:91')},
{'1': ('kn', '66:06:92')},
{'1': ('kn', '66:06:93')},
{'1': ('kn', '66:06:94')},
{'1': ('kn', '66:06:95')},
{'1': ('kn', '66:06:96')},
{'1': ('kn', '66:06:97')},
{'1': ('kn', '66:06:98')},
{'1': ('kn', '66:06:99')},
{'1': ('kn', '66:07:00')},
{'1': ('kn', '66:07:01')},
{'1': ('kn', '66:07:02')},
{'1': ('kn', '66:07:03')},
{'1': ('kn', '66:07:04')},
{'1': ('kn', '66:07:05')},
{'1': ('kn', '66:07:06')},
{'1': ('kn', '66:07:07')},
{'1': ('kn', '66:07:08')},
{'1': ('kn', '66:07:09')},
{'1': ('kn', '66:07:10')},
{'1': ('kn', '66:07:11')},
{'1': ('kn', '66:07:12')},
{'1': ('kn', '66:07:13')},
{'1': ('kn', '66:07:14')},
{'1': ('kn', '66:07:15')},
{'1': ('kn', '66:07:16')},
{'1': ('kn', '66:07:17')},
{'1': ('kn', '66:07:18')},
{'1': ('kn', '66:07:19')},
{'1': ('kn', '66:07:20')},
{'1': ('kn', '66:07:21')},
{'1': ('kn', '66:07:22')},
{'1': ('kn', '66:07:23')},
{'1': ('kn', '66:07:24')},
{'1': ('kn', '66:07:25')},
{'1': ('kn', '66:07:26')},
{'1': ('kn', '66:07:27')},
{'1': ('kn', '66:07:28')},
{'1': ('kn', '66:07:29')},
{'1': ('kn', '66:07:30')},
{'1': ('kn', '66:07:31')},
{'1': ('kn', '66:07:32')},
{'1': ('kn', '66:07:33')},
{'1': ('kn', '66:07:34')},
{'1': ('kn', '66:07:35')},
{'1': ('kn', '66:07:36')},
{'1': ('kn', '66:07:37')},
{'1': ('kn', '66:07:38')},
{'1': ('kn', '66:07:39')},
{'1': ('kn', '66:07:40')},
{'1': ('kn', '66:07:41')},
{'1': ('kn', '66:07:42')},
{'1': ('kn', '66:07:43')},
{'1': ('kn', '66:07:44')},
{'1': ('kn', '66:07:45')},
{'1': ('kn', '66:07:46')},
{'1': ('kn', '66:07:47')},
{'1': ('kn', '66:07:48')},
{'1': ('kn', '66:07:49')},
{'1': ('kn', '66:07:50')},
{'1': ('kn', '66:07:51')},
{'1': ('kn', '66:07:52')},
{'1': ('kn', '66:07:53')},
{'1': ('kn', '66:07:54')},
{'1': ('kn', '66:07:55')},
{'1': ('kn', '66:07:56')},
{'1': ('kn', '66:07:57')},
{'1': ('kn', '66:07:58')},
{'1': ('kn', '66:07:59')},
{'1': ('kn', '66:07:60')},
{'1': ('kn', '66:07:61')},
{'1': ('kn', '66:07:62')},
{'1': ('kn', '66:07:63')},
{'1': ('kn', '66:07:64')},
{'1': ('kn', '66:07:65')},
{'1': ('kn', '66:07:66')},
{'1': ('kn', '66:07:67')},
{'1': ('kn', '66:07:68')},
{'1': ('kn', '66:07:69')},
{'1': ('kn', '66:07:70')},
{'1': ('kn', '66:07:71')},
{'1': ('kn', '66:07:72')},
{'1': ('kn', '66:07:73')},
{'1': ('kn', '66:07:74')},
{'1': ('kn', '66:07:75')},
{'1': ('kn', '66:07:76')},
{'1': ('kn', '66:07:77')},
{'1': ('kn', '66:07:78')},
{'1': ('kn', '66:07:79')},
{'1': ('kn', '66:07:80')},
{'1': ('kn', '66:07:81')},
{'1': ('kn', '66:07:82')},
{'1': ('kn', '66:07:83')},
{'1': ('kn', '66:07:84')},
{'1': ('kn', '66:07:85')},
{'1': ('kn', '66:07:86')},
{'1': ('kn', '66:07:87')},
{'1': ('kn', '66:07:88')},
{'1': ('kn', '66:07:89')},
{'1': ('kn', '66:07:90')},
{'1': ('kn', '66:07:91')},
{'1': ('kn', '66:07:92')},
{'1': ('kn', '66:07:93')},
{'1': ('kn', '66:07:94')},
{'1': ('kn', '66:07:95')},
{'1': ('kn', '66:07:96')},
{'1': ('kn', '66:07:97')},
{'1': ('kn', '66:07:98')},
{'1': ('kn', '66:07:99')},
{'1': ('kn', '66:08:00')},
{'1': ('kn', '66:08:01')},
{'1': ('kn', '66:08:02')},
{'1': ('kn', '66:08:03')},
{'1': ('kn', '66:08:04')},
{'1': ('kn', '66:08:05')},
{'1': ('kn', '66:08:06')},
{'1': ('kn', '66:08:07')},
{'1': ('kn', '66:08:08')},
{'1': ('kn', '66:08:09')},
{'1': ('kn', '66:08:10')},
{'1': ('kn', '66:08:11')},
{'1': ('kn', '66:08:12')},
{'1': ('kn', '66:08:13')},
{'1': ('kn', '66:08:14')},
{'1': ('kn', '66:08:15')},
{'1': ('kn', '66:08:16')},
{'1': ('kn', '66:08:17')},
{'1': ('kn', '66:08:18')},
{'1': ('kn', '66:08:19')},
{'1': ('kn', '66:08:20')},
{'1': ('kn', '66:08:21')},
{'1': ('kn', '66:08:22')},
{'1': ('kn', '66:08:23')},
{'1': ('kn', '66:08:24')},
{'1': ('kn', '66:08:25')},
{'1': ('kn', '66:08:26')},
{'1': ('kn', '66:08:27')},
{'1': ('kn', '66:08:28')},
{'1': ('kn', '66:08:29')},
{'1': ('kn', '66:08:30')},
{'1': ('kn', '66:08:31')},
{'1': ('kn', '66:08:32')},
{'1': ('kn', '66:08:33')},
{'1': ('kn', '66:08:34')},
{'1': ('kn', '66:08:35')},
{'1': ('kn', '66:08:36')},
{'1': ('kn', '66:08:37')},
{'1': ('kn', '66:08:38')},
{'1': ('kn', '66:08:39')},
{'1': ('kn', '66:08:40')},
{'1': ('kn', '66:08:41')},
{'1': ('kn', '66:08:42')},
{'1': ('kn', '66:08:43')},
{'1': ('kn', '66:08:44')},
{'1': ('kn', '66:08:45')},
{'1': ('kn', '66:08:46')},
{'1': ('kn', '66:08:47')},
{'1': ('kn', '66:08:48')},
{'1': ('kn', '66:08:49')},
{'1': ('kn', '66:08:50')},
{'1': ('kn', '66:08:51')},
{'1': ('kn', '66:08:52')},
{'1': ('kn', '66:08:53')},
{'1': ('kn', '66:08:54')},
{'1': ('kn', '66:08:55')},
{'1': ('kn', '66:08:56')},
{'1': ('kn', '66:08:57')},
{'1': ('kn', '66:08:58')},
{'1': ('kn', '66:08:59')},
{'1': ('kn', '66:08:60')},
{'1': ('kn', '66:08:61')},
{'1': ('kn', '66:08:62')},
{'1': ('kn', '66:08:63')},
{'1': ('kn', '66:08:64')},
{'1': ('kn', '66:08:65')},
{'1': ('kn', '66:08:66')},
{'1': ('kn', '66:08:67')},
{'1': ('kn', '66:08:68')},
{'1': ('kn', '66:08:69')},
{'1': ('kn', '66:08:70')},
{'1': ('kn', '66:08:71')},
{'1': ('kn', '66:08:72')},
{'1': ('kn', '66:08:73')},
{'1': ('kn', '66:08:74')},
{'1': ('kn', '66:08:75')},
{'1': ('kn', '66:08:76')},
{'1': ('kn', '66:08:77')},
{'1': ('kn', '66:08:78')},
{'1': ('kn', '66:08:79')},
{'1': ('kn', '66:08:80')},
{'1': ('kn', '66:08:81')},
{'1': ('kn', '66:08:82')},
{'1': ('kn', '66:08:83')},
{'1': ('kn', '66:08:84')},
{'1': ('kn', '66:08:85')},
{'1': ('kn', '66:08:86')},
{'1': ('kn', '66:08:87')},
{'1': ('kn', '66:08:88')},
{'1': ('kn', '66:08:89')},
{'1': ('kn', '66:08:90')},
{'1': ('kn', '66:08:91')},
{'1': ('kn', '66:08:92')},
{'1': ('kn', '66:08:93')},
{'1': ('kn', '66:08:94')},
{'1': ('kn', '66:08:95')},
{'1': ('kn', '66:08:96')},
{'1': ('kn', '66:08:97')},
{'1': ('kn', '66:08:98')},
{'1': ('kn', '66:08:99')},
{'1': ('kn', '66:09:00')},
{'1': ('kn', '66:09:01')},
{'1': ('kn', '66:09:02')},
{'1': ('kn', '66:09:03')},
{'1': ('kn', '66:09:04')},
{'1': ('kn', '66:09:05')},
{'1': ('kn', '66:09:06')},
{'1': ('kn', '66:09:07')},
{'1': ('kn', '66:09:08')},
{'1': ('kn', '66:09:09')},
{'1': ('kn', '66:09:10')},
{'1': ('kn', '66:09:11')},
{'1': ('kn', '66:09:12')},
{'1': ('kn', '66:09:13')},
{'1': ('kn', '66:09:14')},
{'1': ('kn', '66:09:15')},
{'1': ('kn', '66:09:16')},
{'1': ('kn', '66:09:17')},
{'1': ('kn', '66:09:18')},
{'1': ('kn', '66:09:19')},
{'1': ('kn', '66:09:20')},
{'1': ('kn', '66:09:21')},
{'1': ('kn', '66:09:22')},
{'1': ('kn', '66:09:23')},
{'1': ('kn', '66:09:24')},
{'1': ('kn', '66:09:25')},
{'1': ('kn', '66:09:26')},
{'1': ('kn', '66:09:27')},
{'1': ('kn', '66:09:28')},
{'1': ('kn', '66:09:29')},
{'1': ('kn', '66:09:30')},
{'1': ('kn', '66:09:31')},
{'1': ('kn', '66:09:32')},
{'1': ('kn', '66:09:33')},
{'1': ('kn', '66:09:34')},
{'1': ('kn', '66:09:35')},
{'1': ('kn', '66:09:36')},
{'1': ('kn', '66:09:37')},
{'1': ('kn', '66:09:38')},
{'1': ('kn', '66:09:39')},
{'1': ('kn', '66:09:40')},
{'1': ('kn', '66:09:41')},
{'1': ('kn', '66:09:42')},
{'1': ('kn', '66:09:43')},
{'1': ('kn', '66:09:44')},
{'1': ('kn', '66:09:45')},
{'1': ('kn', '66:09:46')},
{'1': ('kn', '66:09:47')},
{'1': ('kn', '66:09:48')},
{'1': ('kn', '66:09:49')},
{'1': ('kn', '66:09:50')},
{'1': ('kn', '66:09:51')},
{'1': ('kn', '66:09:52')},
{'1': ('kn', '66:09:53')},
{'1': ('kn', '66:09:54')},
{'1': ('kn', '66:09:55')},
{'1': ('kn', '66:09:56')},
{'1': ('kn', '66:09:57')},
{'1': ('kn', '66:09:58')},
{'1': ('kn', '66:09:59')},
{'1': ('kn', '66:09:60')},
{'1': ('kn', '66:09:61')},
{'1': ('kn', '66:09:62')},
{'1': ('kn', '66:09:63')},
{'1': ('kn', '66:09:64')},
{'1': ('kn', '66:09:65')},
{'1': ('kn', '66:09:66')},
{'1': ('kn', '66:09:67')},
{'1': ('kn', '66:09:68')},
{'1': ('kn', '66:09:69')},
{'1': ('kn', '66:09:70')},
{'1': ('kn', '66:09:71')},
{'1': ('kn', '66:09:72')},
{'1': ('kn', '66:09:73')},
{'1': ('kn', '66:09:74')},
{'1': ('kn', '66:09:75')},
{'1': ('kn', '66:09:76')},
{'1': ('kn', '66:09:77')},
{'1': ('kn', '66:09:78')},
{'1': ('kn', '66:09:79')},
{'1': ('kn', '66:09:80')},
{'1': ('kn', '66:09:81')},
{'1': ('kn', '66:09:82')},
{'1': ('kn', '66:09:83')},
{'1': ('kn', '66:09:84')},
{'1': ('kn', '66:09:85')},
{'1': ('kn', '66:09:86')},
{'1': ('kn', '66:09:87')},
{'1': ('kn', '66:09:88')},
{'1': ('kn', '66:09:89')},
{'1': ('kn', '66:09:90')},
{'1': ('kn', '66:09:91')},
{'1': ('kn', '66:09:92')},
{'1': ('kn', '66:09:93')},
{'1': ('kn', '66:09:94')},
{'1': ('kn', '66:09:95')},
{'1': ('kn', '66:09:96')},
{'1': ('kn', '66:09:97')},
{'1': ('kn', '66:09:98')},
{'1': ('kn', '66:09:99')},
{'1': ('kn', '66:10:00')},
{'1': ('kn', '66:10:01')},
{'1': ('kn', '66:10:02')},
{'1': ('kn', '66:10:03')},
{'1': ('kn', '66:10:04')},
{'1': ('kn', '66:10:05')},
{'1': ('kn', '66:10:06')},
{'1': ('kn', '66:10:07')},
{'1': ('kn', '66:10:08')},
{'1': ('kn', '66:10:09')},
{'1': ('kn', '66:10:10')},
{'1': ('kn', '66:10:11')},
{'1': ('kn', '66:10:12')},
{'1': ('kn', '66:10:13')},
{'1': ('kn', '66:10:14')},
{'1': ('kn', '66:10:15')},
{'1': ('kn', '66:10:16')},
{'1': ('kn', '66:10:17')},
{'1': ('kn', '66:10:18')},
{'1': ('kn', '66:10:19')},
{'1': ('kn', '66:10:20')},
{'1': ('kn', '66:10:21')},
{'1': ('kn', '66:10:22')},
{'1': ('kn', '66:10:23')},
{'1': ('kn', '66:10:24')},
{'1': ('kn', '66:10:25')},
{'1': ('kn', '66:10:26')},
{'1': ('kn', '66:10:27')},
{'1': ('kn', '66:10:28')},
{'1': ('kn', '66:10:29')},
{'1': ('kn', '66:10:30')},
{'1': ('kn', '66:10:31')},
{'1': ('kn', '66:10:32')},
{'1': ('kn', '66:10:33')},
{'1': ('kn', '66:10:34')},
{'1': ('kn', '66:10:35')},
{'1': ('kn', '66:10:36')},
{'1': ('kn', '66:10:37')},
{'1': ('kn', '66:10:38')},
{'1': ('kn', '66:10:39')},
{'1': ('kn', '66:10:40')},
{'1': ('kn', '66:10:41')},
{'1': ('kn', '66:10:42')},
{'1': ('kn', '66:10:43')},
{'1': ('kn', '66:10:44')},
{'1': ('kn', '66:10:45')},
{'1': ('kn', '66:10:46')},
{'1': ('kn', '66:10:47')},
{'1': ('kn', '66:10:48')},
{'1': ('kn', '66:10:49')},
{'1': ('kn', '66:10:50')},
{'1': ('kn', '66:10:51')},
{'1': ('kn', '66:10:52')},
{'1': ('kn', '66:10:53')},
{'1': ('kn', '66:10:54')},
{'1': ('kn', '66:10:55')},
{'1': ('kn', '66:10:56')},
{'1': ('kn', '66:10:57')},
{'1': ('kn', '66:10:58')},
{'1': ('kn', '66:10:59')},
{'1': ('kn', '66:10:60')},
{'1': ('kn', '66:10:61')},
{'1': ('kn', '66:10:62')},
{'1': ('kn', '66:10:63')},
{'1': ('kn', '66:10:64')},
{'1': ('kn', '66:10:65')},
{'1': ('kn', '66:10:66')},
{'1': ('kn', '66:10:67')},
{'1': ('kn', '66:10:68')},
{'1': ('kn', '66:10:69')},
{'1': ('kn', '66:10:70')},
{'1': ('kn', '66:10:71')},
{'1': ('kn', '66:10:72')},
{'1': ('kn', '66:10:73')},
{'1': ('kn', '66:10:74')},
{'1': ('kn', '66:10:75')},
{'1': ('kn', '66:10:76')},
{'1': ('kn', '66:10:77')},
{'1': ('kn', '66:10:78')},
{'1': ('kn', '66:10:79')},
{'1': ('kn', '66:10:80')},
{'1': ('kn', '66:10:81')},
{'1': ('kn', '66:10:82')},
{'1': ('kn', '66:10:83')},
{'1': ('kn', '66:10:84')},
{'1': ('kn', '66:10:85')},
{'1': ('kn', '66:10:86')},
{'1': ('kn', '66:10:87')},
{'1': ('kn', '66:10:88')},
{'1': ('kn', '66:10:89')},
{'1': ('kn', '66:10:90')},
{'1': ('kn', '66:10:91')},
{'1': ('kn', '66:10:92')},
{'1': ('kn', '66:10:93')},
{'1': ('kn', '66:10:94')},
{'1': ('kn', '66:10:95')},
{'1': ('kn', '66:10:96')},
{'1': ('kn', '66:10:97')},
{'1': ('kn', '66:10:98')},
{'1': ('kn', '66:10:99')},
{'1': ('kn', '66:11:00')},
{'1': ('kn', '66:11:01')},
{'1': ('kn', '66:11:02')},
{'1': ('kn', '66:11:03')},
{'1': ('kn', '66:11:04')},
{'1': ('kn', '66:11:05')},
{'1': ('kn', '66:11:06')},
{'1': ('kn', '66:11:07')},
{'1': ('kn', '66:11:08')},
{'1': ('kn', '66:11:09')},
{'1': ('kn', '66:11:10')},
{'1': ('kn', '66:11:11')},
{'1': ('kn', '66:11:12')},
{'1': ('kn', '66:11:13')},
{'1': ('kn', '66:11:14')},
{'1': ('kn', '66:11:15')},
{'1': ('kn', '66:11:16')},
{'1': ('kn', '66:11:17')},
{'1': ('kn', '66:11:18')},
{'1': ('kn', '66:11:19')},
{'1': ('kn', '66:11:20')},
{'1': ('kn', '66:11:21')},
{'1': ('kn', '66:11:22')},
{'1': ('kn', '66:11:23')},
{'1': ('kn', '66:11:24')},
{'1': ('kn', '66:11:25')},
{'1': ('kn', '66:11:26')},
{'1': ('kn', '66:11:27')},
{'1': ('kn', '66:11:28')},
{'1': ('kn', '66:11:29')},
{'1': ('kn', '66:11:30')},
{'1': ('kn', '66:11:31')},
{'1': ('kn', '66:11:32')},
{'1': ('kn', '66:11:33')},
{'1': ('kn', '66:11:34')},
{'1': ('kn', '66:11:35')},
{'1': ('kn', '66:11:36')},
{'1': ('kn', '66:11:37')},
{'1': ('kn', '66:11:38')},
{'1': ('kn', '66:11:39')},
{'1': ('kn', '66:11:40')},
{'1': ('kn', '66:11:41')},
{'1': ('kn', '66:11:42')},
{'1': ('kn', '66:11:43')},
{'1': ('kn', '66:11:44')},
{'1': ('kn', '66:11:45')},
{'1': ('kn', '66:11:46')},
{'1': ('kn', '66:11:47')},
{'1': ('kn', '66:11:48')},
{'1': ('kn', '66:11:49')},
{'1': ('kn', '66:11:50')},
{'1': ('kn', '66:11:51')},
{'1': ('kn', '66:11:52')},
{'1': ('kn', '66:11:53')},
{'1': ('kn', '66:11:54')},
{'1': ('kn', '66:11:55')},
{'1': ('kn', '66:11:56')},
{'1': ('kn', '66:11:57')},
{'1': ('kn', '66:11:58')},
{'1': ('kn', '66:11:59')},
{'1': ('kn', '66:11:60')},
{'1': ('kn', '66:11:61')},
{'1': ('kn', '66:11:62')},
{'1': ('kn', '66:11:63')},
{'1': ('kn', '66:11:64')},
{'1': ('kn', '66:11:65')},
{'1': ('kn', '66:11:66')},
{'1': ('kn', '66:11:67')},
{'1': ('kn', '66:11:68')},
{'1': ('kn', '66:11:69')},
{'1': ('kn', '66:11:70')},
{'1': ('kn', '66:11:71')},
{'1': ('kn', '66:11:72')},
{'1': ('kn', '66:11:73')},
{'1': ('kn', '66:11:74')},
{'1': ('kn', '66:11:75')},
{'1': ('kn', '66:11:76')},
{'1': ('kn', '66:11:77')},
{'1': ('kn', '66:11:78')},
{'1': ('kn', '66:11:79')},
{'1': ('kn', '66:11:80')},
{'1': ('kn', '66:11:81')},
{'1': ('kn', '66:11:82')},
{'1': ('kn', '66:11:83')},
{'1': ('kn', '66:11:84')},
{'1': ('kn', '66:11:85')},
{'1': ('kn', '66:11:86')},
{'1': ('kn', '66:11:87')},
{'1': ('kn', '66:11:88')},
{'1': ('kn', '66:11:89')},
{'1': ('kn', '66:11:90')},
{'1': ('kn', '66:11:91')},
{'1': ('kn', '66:11:92')},
{'1': ('kn', '66:11:93')},
{'1': ('kn', '66:11:94')},
{'1': ('kn', '66:11:95')},
{'1': ('kn', '66:11:96')},
{'1': ('kn', '66:11:97')},
{'1': ('kn', '66:11:98')},
{'1': ('kn', '66:11:99')},
{'1': ('kn', '66:12:00')},
{'1': ('kn', '66:12:01')},
{'1': ('kn', '66:12:02')},
{'1': ('kn', '66:12:03')},
{'1': ('kn', '66:12:04')},
{'1': ('kn', '66:12:05')},
{'1': ('kn', '66:12:06')},
{'1': ('kn', '66:12:07')},
{'1': ('kn', '66:12:08')},
{'1': ('kn', '66:12:09')},
{'1': ('kn', '66:12:10')},
{'1': ('kn', '66:12:11')},
{'1': ('kn', '66:12:12')},
{'1': ('kn', '66:12:13')},
{'1': ('kn', '66:12:14')},
{'1': ('kn', '66:12:15')},
{'1': ('kn', '66:12:16')},
{'1': ('kn', '66:12:17')},
{'1': ('kn', '66:12:18')},
{'1': ('kn', '66:12:19')},
{'1': ('kn', '66:12:20')},
{'1': ('kn', '66:12:21')},
{'1': ('kn', '66:12:22')},
{'1': ('kn', '66:12:23')},
{'1': ('kn', '66:12:24')},
{'1': ('kn', '66:12:25')},
{'1': ('kn', '66:12:26')},
{'1': ('kn', '66:12:27')},
{'1': ('kn', '66:12:28')},
{'1': ('kn', '66:12:29')},
{'1': ('kn', '66:12:30')},
{'1': ('kn', '66:12:31')},
{'1': ('kn', '66:12:32')},
{'1': ('kn', '66:12:33')},
{'1': ('kn', '66:12:34')},
{'1': ('kn', '66:12:35')},
{'1': ('kn', '66:12:36')},
{'1': ('kn', '66:12:37')},
{'1': ('kn', '66:12:38')},
{'1': ('kn', '66:12:39')},
{'1': ('kn', '66:12:40')},
{'1': ('kn', '66:12:41')},
{'1': ('kn', '66:12:42')},
{'1': ('kn', '66:12:43')},
{'1': ('kn', '66:12:44')},
{'1': ('kn', '66:12:45')},
{'1': ('kn', '66:12:46')},
{'1': ('kn', '66:12:47')},
{'1': ('kn', '66:12:48')},
{'1': ('kn', '66:12:49')},
{'1': ('kn', '66:12:50')},
{'1': ('kn', '66:12:51')},
{'1': ('kn', '66:12:52')},
{'1': ('kn', '66:12:53')},
{'1': ('kn', '66:12:54')},
{'1': ('kn', '66:12:55')},
{'1': ('kn', '66:12:56')},
{'1': ('kn', '66:12:57')},
{'1': ('kn', '66:12:58')},
{'1': ('kn', '66:12:59')},
{'1': ('kn', '66:12:60')},
{'1': ('kn', '66:12:61')},
{'1': ('kn', '66:12:62')},
{'1': ('kn', '66:12:63')},
{'1': ('kn', '66:12:64')},
{'1': ('kn', '66:12:65')},
{'1': ('kn', '66:12:66')},
{'1': ('kn', '66:12:67')},
{'1': ('kn', '66:12:68')},
{'1': ('kn', '66:12:69')},
{'1': ('kn', '66:12:70')},
{'1': ('kn', '66:12:71')},
{'1': ('kn', '66:12:72')},
{'1': ('kn', '66:12:73')},
{'1': ('kn', '66:12:74')},
{'1': ('kn', '66:12:75')},
{'1': ('kn', '66:12:76')},
{'1': ('kn', '66:12:77')},
{'1': ('kn', '66:12:78')},
{'1': ('kn', '66:12:79')},
{'1': ('kn', '66:12:80')},
{'1': ('kn', '66:12:81')},
{'1': ('kn', '66:12:82')},
{'1': ('kn', '66:12:83')},
{'1': ('kn', '66:12:84')},
{'1': ('kn', '66:12:85')},
{'1': ('kn', '66:12:86')},
{'1': ('kn', '66:12:87')},
{'1': ('kn', '66:12:88')},
{'1': ('kn', '66:12:89')},
{'1': ('kn', '66:12:90')},
{'1': ('kn', '66:12:91')},
{'1': ('kn', '66:12:92')},
{'1': ('kn', '66:12:93')},
{'1': ('kn', '66:12:94')},
{'1': ('kn', '66:12:95')},
{'1': ('kn', '66:12:96')},
{'1': ('kn', '66:12:97')},
{'1': ('kn', '66:12:98')},
{'1': ('kn', '66:12:99')},
{'1': ('kn', '66:13:00')},
{'1': ('kn', '66:13:01')},
{'1': ('kn', '66:13:02')},
{'1': ('kn', '66:13:03')},
{'1': ('kn', '66:13:04')},
{'1': ('kn', '66:13:05')},
{'1': ('kn', '66:13:06')},
{'1': ('kn', '66:13:07')},
{'1': ('kn', '66:13:08')},
{'1': ('kn', '66:13:09')},
{'1': ('kn', '66:13:10')},
{'1': ('kn', '66:13:11')},
{'1': ('kn', '66:13:12')},
{'1': ('kn', '66:13:13')},
{'1': ('kn', '66:13:14')},
{'1': ('kn', '66:13:15')},
{'1': ('kn', '66:13:16')},
{'1': ('kn', '66:13:17')},
{'1': ('kn', '66:13:18')},
{'1': ('kn', '66:13:19')},
{'1': ('kn', '66:13:20')},
{'1': ('kn', '66:13:21')},
{'1': ('kn', '66:13:22')},
{'1': ('kn', '66:13:23')},
{'1': ('kn', '66:13:24')},
{'1': ('kn', '66:13:25')},
{'1': ('kn', '66:13:26')},
{'1': ('kn', '66:13:27')},
{'1': ('kn', '66:13:28')},
{'1': ('kn', '66:13:29')},
{'1': ('kn', '66:13:30')},
{'1': ('kn', '66:13:31')},
{'1': ('kn', '66:13:32')},
{'1': ('kn', '66:13:33')},
{'1': ('kn', '66:13:34')},
{'1': ('kn', '66:13:35')},
{'1': ('kn', '66:13:36')},
{'1': ('kn', '66:13:37')},
{'1': ('kn', '66:13:38')},
{'1': ('kn', '66:13:39')},
{'1': ('kn', '66:13:40')},
{'1': ('kn', '66:13:41')},
{'1': ('kn', '66:13:42')},
{'1': ('kn', '66:13:43')},
{'1': ('kn', '66:13:44')},
{'1': ('kn', '66:13:45')},
{'1': ('kn', '66:13:46')},
{'1': ('kn', '66:13:47')},
{'1': ('kn', '66:13:48')},
{'1': ('kn', '66:13:49')},
{'1': ('kn', '66:13:50')},
{'1': ('kn', '66:13:51')},
{'1': ('kn', '66:13:52')},
{'1': ('kn', '66:13:53')},
{'1': ('kn', '66:13:54')},
{'1': ('kn', '66:13:55')},
{'1': ('kn', '66:13:56')},
{'1': ('kn', '66:13:57')},
{'1': ('kn', '66:13:58')},
{'1': ('kn', '66:13:59')},
{'1': ('kn', '66:13:60')},
{'1': ('kn', '66:13:61')},
{'1': ('kn', '66:13:62')},
{'1': ('kn', '66:13:63')},
{'1': ('kn', '66:13:64')},
{'1': ('kn', '66:13:65')},
{'1': ('kn', '66:13:66')},
{'1': ('kn', '66:13:67')},
{'1': ('kn', '66:13:68')},
{'1': ('kn', '66:13:69')},
{'1': ('kn', '66:13:70')},
{'1': ('kn', '66:13:71')},
{'1': ('kn', '66:13:72')},
{'1': ('kn', '66:13:73')},
{'1': ('kn', '66:13:74')},
{'1': ('kn', '66:13:75')},
{'1': ('kn', '66:13:76')},
{'1': ('kn', '66:13:77')},
{'1': ('kn', '66:13:78')},
{'1': ('kn', '66:13:79')},
{'1': ('kn', '66:13:80')},
{'1': ('kn', '66:13:81')},
{'1': ('kn', '66:13:82')},
{'1': ('kn', '66:13:83')},
{'1': ('kn', '66:13:84')},
{'1': ('kn', '66:13:85')},
{'1': ('kn', '66:13:86')},
{'1': ('kn', '66:13:87')},
{'1': ('kn', '66:13:88')},
{'1': ('kn', '66:13:89')},
{'1': ('kn', '66:13:90')},
{'1': ('kn', '66:13:91')},
{'1': ('kn', '66:13:92')},
{'1': ('kn', '66:13:93')},
{'1': ('kn', '66:13:94')},
{'1': ('kn', '66:13:95')},
{'1': ('kn', '66:13:96')},
{'1': ('kn', '66:13:97')},
{'1': ('kn', '66:13:98')},
{'1': ('kn', '66:13:99')},
{'1': ('kn', '66:14:00')},
{'1': ('kn', '66:14:01')},
{'1': ('kn', '66:14:02')},
{'1': ('kn', '66:14:03')},
{'1': ('kn', '66:14:04')},
{'1': ('kn', '66:14:05')},
{'1': ('kn', '66:14:06')},
{'1': ('kn', '66:14:07')},
{'1': ('kn', '66:14:08')},
{'1': ('kn', '66:14:09')},
{'1': ('kn', '66:14:10')},
{'1': ('kn', '66:14:11')},
{'1': ('kn', '66:14:12')},
{'1': ('kn', '66:14:13')},
{'1': ('kn', '66:14:14')},
{'1': ('kn', '66:14:15')},
{'1': ('kn', '66:14:16')},
{'1': ('kn', '66:14:17')},
{'1': ('kn', '66:14:18')},
{'1': ('kn', '66:14:19')},
{'1': ('kn', '66:14:20')},
{'1': ('kn', '66:14:21')},
{'1': ('kn', '66:14:22')},
{'1': ('kn', '66:14:23')},
{'1': ('kn', '66:14:24')},
{'1': ('kn', '66:14:25')},
{'1': ('kn', '66:14:26')},
{'1': ('kn', '66:14:27')},
{'1': ('kn', '66:14:28')},
{'1': ('kn', '66:14:29')},
{'1': ('kn', '66:14:30')},
{'1': ('kn', '66:14:31')},
{'1': ('kn', '66:14:32')},
{'1': ('kn', '66:14:33')},
{'1': ('kn', '66:14:34')},
{'1': ('kn', '66:14:35')},
{'1': ('kn', '66:14:36')},
{'1': ('kn', '66:14:37')},
{'1': ('kn', '66:14:38')},
{'1': ('kn', '66:14:39')},
{'1': ('kn', '66:14:40')},
{'1': ('kn', '66:14:41')},
{'1': ('kn', '66:14:42')},
{'1': ('kn', '66:14:43')},
{'1': ('kn', '66:14:44')},
{'1': ('kn', '66:14:45')},
{'1': ('kn', '66:14:46')},
{'1': ('kn', '66:14:47')},
{'1': ('kn', '66:14:48')},
{'1': ('kn', '66:14:49')},
{'1': ('kn', '66:14:50')},
{'1': ('kn', '66:14:51')},
{'1': ('kn', '66:14:52')},
{'1': ('kn', '66:14:53')},
{'1': ('kn', '66:14:54')},
{'1': ('kn', '66:14:55')},
{'1': ('kn', '66:14:56')},
{'1': ('kn', '66:14:57')},
{'1': ('kn', '66:14:58')},
{'1': ('kn', '66:14:59')},
{'1': ('kn', '66:14:60')},
{'1': ('kn', '66:14:61')},
{'1': ('kn', '66:14:62')},
{'1': ('kn', '66:14:63')},
{'1': ('kn', '66:14:64')},
{'1': ('kn', '66:14:65')},
{'1': ('kn', '66:14:66')},
{'1': ('kn', '66:14:67')},
{'1': ('kn', '66:14:68')},
{'1': ('kn', '66:14:69')},
{'1': ('kn', '66:14:70')},
{'1': ('kn', '66:14:71')},
{'1': ('kn', '66:14:72')},
{'1': ('kn', '66:14:73')},
{'1': ('kn', '66:14:74')},
{'1': ('kn', '66:14:75')},
{'1': ('kn', '66:14:76')},
{'1': ('kn', '66:14:77')},
{'1': ('kn', '66:14:78')},
{'1': ('kn', '66:14:79')},
{'1': ('kn', '66:14:80')},
{'1': ('kn', '66:14:81')},
{'1': ('kn', '66:14:82')},
{'1': ('kn', '66:14:83')},
{'1': ('kn', '66:14:84')},
{'1': ('kn', '66:14:85')},
{'1': ('kn', '66:14:86')},
{'1': ('kn', '66:14:87')},
{'1': ('kn', '66:14:88')},
{'1': ('kn', '66:14:89')},
{'1': ('kn', '66:14:90')},
{'1': ('kn', '66:14:91')},
{'1': ('kn', '66:14:92')},
{'1': ('kn', '66:14:93')},
{'1': ('kn', '66:14:94')},
{'1': ('kn', '66:14:95')},
{'1': ('kn', '66:14:96')},
{'1': ('kn', '66:14:97')},
{'1': ('kn', '66:14:98')},
{'1': ('kn', '66:14:99')},
{'1': ('kn', '66:15:00')},
{'1': ('kn', '66:15:01')},
{'1': ('kn', '66:15:02')},
{'1': ('kn', '66:15:03')},
{'1': ('kn', '66:15:04')},
{'1': ('kn', '66:15:05')},
{'1': ('kn', '66:15:06')},
{'1': ('kn', '66:15:07')},
{'1': ('kn', '66:15:08')},
{'1': ('kn', '66:15:09')},
{'1': ('kn', '66:15:10')},
{'1': ('kn', '66:15:11')},
{'1': ('kn', '66:15:12')},
{'1': ('kn', '66:15:13')},
{'1': ('kn', '66:15:14')},
{'1': ('kn', '66:15:15')},
{'1': ('kn', '66:15:16')},
{'1': ('kn', '66:15:17')},
{'1': ('kn', '66:15:18')},
{'1': ('kn', '66:15:19')},
{'1': ('kn', '66:15:20')},
{'1': ('kn', '66:15:21')},
{'1': ('kn', '66:15:22')},
{'1': ('kn', '66:15:23')},
{'1': ('kn', '66:15:24')},
{'1': ('kn', '66:15:25')},
{'1': ('kn', '66:15:26')},
{'1': ('kn', '66:15:27')},
{'1': ('kn', '66:15:28')},
{'1': ('kn', '66:15:29')},
{'1': ('kn', '66:15:30')},
{'1': ('kn', '66:15:31')},
{'1': ('kn', '66:15:32')},
{'1': ('kn', '66:15:33')},
{'1': ('kn', '66:15:34')},
{'1': ('kn', '66:15:35')},
{'1': ('kn', '66:15:36')},
{'1': ('kn', '66:15:37')},
{'1': ('kn', '66:15:38')},
{'1': ('kn', '66:15:39')},
{'1': ('kn', '66:15:40')},
{'1': ('kn', '66:15:41')},
{'1': ('kn', '66:15:42')},
{'1': ('kn', '66:15:43')},
{'1': ('kn', '66:15:44')},
{'1': ('kn', '66:15:45')},
{'1': ('kn', '66:15:46')},
{'1': ('kn', '66:15:47')},
{'1': ('kn', '66:15:48')},
{'1': ('kn', '66:15:49')},
{'1': ('kn', '66:15:50')},
{'1': ('kn', '66:15:51')},
{'1': ('kn', '66:15:52')},
{'1': ('kn', '66:15:53')},
{'1': ('kn', '66:15:54')},
{'1': ('kn', '66:15:55')},
{'1': ('kn', '66:15:56')},
{'1': ('kn', '66:15:57')},
{'1': ('kn', '66:15:58')},
{'1': ('kn', '66:15:59')},
{'1': ('kn', '66:15:60')},
{'1': ('kn', '66:15:61')},
{'1': ('kn', '66:15:62')},
{'1': ('kn', '66:15:63')},
{'1': ('kn', '66:15:64')},
{'1': ('kn', '66:15:65')},
{'1': ('kn', '66:15:66')},
{'1': ('kn', '66:15:67')},
{'1': ('kn', '66:15:68')},
{'1': ('kn', '66:15:69')},
{'1': ('kn', '66:15:70')},
{'1': ('kn', '66:15:71')},
{'1': ('kn', '66:15:72')},
{'1': ('kn', '66:15:73')},
{'1': ('kn', '66:15:74')},
{'1': ('kn', '66:15:75')},
{'1': ('kn', '66:15:76')},
{'1': ('kn', '66:15:77')},
{'1': ('kn', '66:15:78')},
{'1': ('kn', '66:15:79')},
{'1': ('kn', '66:15:80')},
{'1': ('kn', '66:15:81')},
{'1': ('kn', '66:15:82')},
{'1': ('kn', '66:15:83')},
{'1': ('kn', '66:15:84')},
{'1': ('kn', '66:15:85')},
{'1': ('kn', '66:15:86')},
{'1': ('kn', '66:15:87')},
{'1': ('kn', '66:15:88')},
{'1': ('kn', '66:15:89')},
{'1': ('kn', '66:15:90')},
{'1': ('kn', '66:15:91')},
{'1': ('kn', '66:15:92')},
{'1': ('kn', '66:15:93')},
{'1': ('kn', '66:15:94')},
{'1': ('kn', '66:15:95')},
{'1': ('kn', '66:15:96')},
{'1': ('kn', '66:15:97')},
{'1': ('kn', '66:15:98')},
{'1': ('kn', '66:15:99')},
{'1': ('kn', '66:16:00')},
{'1': ('kn', '66:16:01')},
{'1': ('kn', '66:16:02')},
{'1': ('kn', '66:16:03')},
{'1': ('kn', '66:16:04')},
{'1': ('kn', '66:16:05')},
{'1': ('kn', '66:16:06')},
{'1': ('kn', '66:16:07')},
{'1': ('kn', '66:16:08')},
{'1': ('kn', '66:16:09')},
{'1': ('kn', '66:16:10')},
{'1': ('kn', '66:16:11')},
{'1': ('kn', '66:16:12')},
{'1': ('kn', '66:16:13')},
{'1': ('kn', '66:16:14')},
{'1': ('kn', '66:16:15')},
{'1': ('kn', '66:16:16')},
{'1': ('kn', '66:16:17')},
{'1': ('kn', '66:16:18')},
{'1': ('kn', '66:16:19')},
{'1': ('kn', '66:16:20')},
{'1': ('kn', '66:16:21')},
{'1': ('kn', '66:16:22')},
{'1': ('kn', '66:16:23')},
{'1': ('kn', '66:16:24')},
{'1': ('kn', '66:16:25')},
{'1': ('kn', '66:16:26')},
{'1': ('kn', '66:16:27')},
{'1': ('kn', '66:16:28')},
{'1': ('kn', '66:16:29')},
{'1': ('kn', '66:16:30')},
{'1': ('kn', '66:16:31')},
{'1': ('kn', '66:16:32')},
{'1': ('kn', '66:16:33')},
{'1': ('kn', '66:16:34')},
{'1': ('kn', '66:16:35')},
{'1': ('kn', '66:16:36')},
{'1': ('kn', '66:16:37')},
{'1': ('kn', '66:16:38')},
{'1': ('kn', '66:16:39')},
{'1': ('kn', '66:16:40')},
{'1': ('kn', '66:16:41')},
{'1': ('kn', '66:16:42')},
{'1': ('kn', '66:16:43')},
{'1': ('kn', '66:16:44')},
{'1': ('kn', '66:16:45')},
{'1': ('kn', '66:16:46')},
{'1': ('kn', '66:16:47')},
{'1': ('kn', '66:16:48')},
{'1': ('kn', '66:16:49')},
{'1': ('kn', '66:16:50')},
{'1': ('kn', '66:16:51')},
{'1': ('kn', '66:16:52')},
{'1': ('kn', '66:16:53')},
{'1': ('kn', '66:16:54')},
{'1': ('kn', '66:16:55')},
{'1': ('kn', '66:16:56')},
{'1': ('kn', '66:16:57')},
{'1': ('kn', '66:16:58')},
{'1': ('kn', '66:16:59')},
{'1': ('kn', '66:16:60')},
{'1': ('kn', '66:16:61')},
{'1': ('kn', '66:16:62')},
{'1': ('kn', '66:16:63')},
{'1': ('kn', '66:16:64')},
{'1': ('kn', '66:16:65')},
{'1': ('kn', '66:16:66')},
{'1': ('kn', '66:16:67')},
{'1': ('kn', '66:16:68')},
{'1': ('kn', '66:16:69')},
{'1': ('kn', '66:16:70')},
{'1': ('kn', '66:16:71')},
{'1': ('kn', '66:16:72')},
{'1': ('kn', '66:16:73')},
{'1': ('kn', '66:16:74')},
{'1': ('kn', '66:16:75')},
{'1': ('kn', '66:16:76')},
{'1': ('kn', '66:16:77')},
{'1': ('kn', '66:16:78')},
{'1': ('kn', '66:16:79')},
{'1': ('kn', '66:16:80')},
{'1': ('kn', '66:16:81')},
{'1': ('kn', '66:16:82')},
{'1': ('kn', '66:16:83')},
{'1': ('kn', '66:16:84')},
{'1': ('kn', '66:16:85')},
{'1': ('kn', '66:16:86')},
{'1': ('kn', '66:16:87')},
{'1': ('kn', '66:16:88')},
{'1': ('kn', '66:16:89')},
{'1': ('kn', '66:16:90')},
{'1': ('kn', '66:16:91')},
{'1': ('kn', '66:16:92')},
{'1': ('kn', '66:16:93')},
{'1': ('kn', '66:16:94')},
{'1': ('kn', '66:16:95')},
{'1': ('kn', '66:16:96')},
{'1': ('kn', '66:16:97')},
{'1': ('kn', '66:16:98')},
{'1': ('kn', '66:16:99')},
{'1': ('kn', '66:17:00')},
{'1': ('kn', '66:17:01')},
{'1': ('kn', '66:17:02')},
{'1': ('kn', '66:17:03')},
{'1': ('kn', '66:17:04')},
{'1': ('kn', '66:17:05')},
{'1': ('kn', '66:17:06')},
{'1': ('kn', '66:17:07')},
{'1': ('kn', '66:17:08')},
{'1': ('kn', '66:17:09')},
{'1': ('kn', '66:17:10')},
{'1': ('kn', '66:17:11')},
{'1': ('kn', '66:17:12')},
{'1': ('kn', '66:17:13')},
{'1': ('kn', '66:17:14')},
{'1': ('kn', '66:17:15')},
{'1': ('kn', '66:17:16')},
{'1': ('kn', '66:17:17')},
{'1': ('kn', '66:17:18')},
{'1': ('kn', '66:17:19')},
{'1': ('kn', '66:17:20')},
{'1': ('kn', '66:17:21')},
{'1': ('kn', '66:17:22')},
{'1': ('kn', '66:17:23')},
{'1': ('kn', '66:17:24')},
{'1': ('kn', '66:17:25')},
{'1': ('kn', '66:17:26')},
{'1': ('kn', '66:17:27')},
{'1': ('kn', '66:17:28')},
{'1': ('kn', '66:17:29')},
{'1': ('kn', '66:17:30')},
{'1': ('kn', '66:17:31')},
{'1': ('kn', '66:17:32')},
{'1': ('kn', '66:17:33')},
{'1': ('kn', '66:17:34')},
{'1': ('kn', '66:17:35')},
{'1': ('kn', '66:17:36')},
{'1': ('kn', '66:17:37')},
{'1': ('kn', '66:17:38')},
{'1': ('kn', '66:17:39')},
{'1': ('kn', '66:17:40')},
{'1': ('kn', '66:17:41')},
{'1': ('kn', '66:17:42')},
{'1': ('kn', '66:17:43')},
{'1': ('kn', '66:17:44')},
{'1': ('kn', '66:17:45')},
{'1': ('kn', '66:17:46')},
{'1': ('kn', '66:17:47')},
{'1': ('kn', '66:17:48')},
{'1': ('kn', '66:17:49')},
{'1': ('kn', '66:17:50')},
{'1': ('kn', '66:17:51')},
{'1': ('kn', '66:17:52')},
{'1': ('kn', '66:17:53')},
{'1': ('kn', '66:17:54')},
{'1': ('kn', '66:17:55')},
{'1': ('kn', '66:17:56')},
{'1': ('kn', '66:17:57')},
{'1': ('kn', '66:17:58')},
{'1': ('kn', '66:17:59')},
{'1': ('kn', '66:17:60')},
{'1': ('kn', '66:17:61')},
{'1': ('kn', '66:17:62')},
{'1': ('kn', '66:17:63')},
{'1': ('kn', '66:17:64')},
{'1': ('kn', '66:17:65')},
{'1': ('kn', '66:17:66')},
{'1': ('kn', '66:17:67')},
{'1': ('kn', '66:17:68')},
{'1': ('kn', '66:17:69')},
{'1': ('kn', '66:17:70')},
{'1': ('kn', '66:17:71')},
{'1': ('kn', '66:17:72')},
{'1': ('kn', '66:17:73')},
{'1': ('kn', '66:17:74')},
{'1': ('kn', '66:17:75')},
{'1': ('kn', '66:17:76')},
{'1': ('kn', '66:17:77')},
{'1': ('kn', '66:17:78')},
{'1': ('kn', '66:17:79')},
{'1': ('kn', '66:17:80')},
{'1': ('kn', '66:17:81')},
{'1': ('kn', '66:17:82')},
{'1': ('kn', '66:17:83')},
{'1': ('kn', '66:17:84')},
{'1': ('kn', '66:17:85')},
{'1': ('kn', '66:17:86')},
{'1': ('kn', '66:17:87')},
{'1': ('kn', '66:17:88')},
{'1': ('kn', '66:17:89')},
{'1': ('kn', '66:17:90')},
{'1': ('kn', '66:17:91')},
{'1': ('kn', '66:17:92')},
{'1': ('kn', '66:17:93')},
{'1': ('kn', '66:17:94')},
{'1': ('kn', '66:17:95')},
{'1': ('kn', '66:17:96')},
{'1': ('kn', '66:17:97')},
{'1': ('kn', '66:17:98')},
{'1': ('kn', '66:17:99')},
{'1': ('kn', '66:18:00')},
{'1': ('kn', '66:18:01')},
{'1': ('kn', '66:18:02')},
{'1': ('kn', '66:18:03')},
{'1': ('kn', '66:18:04')},
{'1': ('kn', '66:18:05')},
{'1': ('kn', '66:18:06')},
{'1': ('kn', '66:18:07')},
{'1': ('kn', '66:18:08')},
{'1': ('kn', '66:18:09')},
{'1': ('kn', '66:18:10')},
{'1': ('kn', '66:18:11')},
{'1': ('kn', '66:18:12')},
{'1': ('kn', '66:18:13')},
{'1': ('kn', '66:18:14')},
{'1': ('kn', '66:18:15')},
{'1': ('kn', '66:18:16')},
{'1': ('kn', '66:18:17')},
{'1': ('kn', '66:18:18')},
{'1': ('kn', '66:18:19')},
{'1': ('kn', '66:18:20')},
{'1': ('kn', '66:18:21')},
{'1': ('kn', '66:18:22')},
{'1': ('kn', '66:18:23')},
{'1': ('kn', '66:18:24')},
{'1': ('kn', '66:18:25')},
{'1': ('kn', '66:18:26')},
{'1': ('kn', '66:18:27')},
{'1': ('kn', '66:18:28')},
{'1': ('kn', '66:18:29')},
{'1': ('kn', '66:18:30')},
{'1': ('kn', '66:18:31')},
{'1': ('kn', '66:18:32')},
{'1': ('kn', '66:18:33')},
{'1': ('kn', '66:18:34')},
{'1': ('kn', '66:18:35')},
{'1': ('kn', '66:18:36')},
{'1': ('kn', '66:18:37')},
{'1': ('kn', '66:18:38')},
{'1': ('kn', '66:18:39')},
{'1': ('kn', '66:18:40')},
{'1': ('kn', '66:18:41')},
{'1': ('kn', '66:18:42')},
{'1': ('kn', '66:18:43')},
{'1': ('kn', '66:18:44')},
{'1': ('kn', '66:18:45')},
{'1': ('kn', '66:18:46')},
{'1': ('kn', '66:18:47')},
{'1': ('kn', '66:18:48')},
{'1': ('kn', '66:18:49')},
{'1': ('kn', '66:18:50')},
{'1': ('kn', '66:18:51')},
{'1': ('kn', '66:18:52')},
{'1': ('kn', '66:18:53')},
{'1': ('kn', '66:18:54')},
{'1': ('kn', '66:18:55')},
{'1': ('kn', '66:18:56')},
{'1': ('kn', '66:18:57')},
{'1': ('kn', '66:18:58')},
{'1': ('kn', '66:18:59')},
{'1': ('kn', '66:18:60')},
{'1': ('kn', '66:18:61')},
{'1': ('kn', '66:18:62')},
{'1': ('kn', '66:18:63')},
{'1': ('kn', '66:18:64')},
{'1': ('kn', '66:18:65')},
{'1': ('kn', '66:18:66')},
{'1': ('kn', '66:18:67')},
{'1': ('kn', '66:18:68')},
{'1': ('kn', '66:18:69')},
{'1': ('kn', '66:18:70')},
{'1': ('kn', '66:18:71')},
{'1': ('kn', '66:18:72')},
{'1': ('kn', '66:18:73')},
{'1': ('kn', '66:18:74')},
{'1': ('kn', '66:18:75')},
{'1': ('kn', '66:18:76')},
{'1': ('kn', '66:18:77')},
{'1': ('kn', '66:18:78')},
{'1': ('kn', '66:18:79')},
{'1': ('kn', '66:18:80')},
{'1': ('kn', '66:18:81')},
{'1': ('kn', '66:18:82')},
{'1': ('kn', '66:18:83')},
{'1': ('kn', '66:18:84')},
{'1': ('kn', '66:18:85')},
{'1': ('kn', '66:18:86')},
{'1': ('kn', '66:18:87')},
{'1': ('kn', '66:18:88')},
{'1': ('kn', '66:18:89')},
{'1': ('kn', '66:18:90')},
{'1': ('kn', '66:18:91')},
{'1': ('kn', '66:18:92')},
{'1': ('kn', '66:18:93')},
{'1': ('kn', '66:18:94')},
{'1': ('kn', '66:18:95')},
{'1': ('kn', '66:18:96')},
{'1': ('kn', '66:18:97')},
{'1': ('kn', '66:18:98')},
{'1': ('kn', '66:18:99')},
{'1': ('kn', '66:19:00')},
{'1': ('kn', '66:19:01')},
{'1': ('kn', '66:19:02')},
{'1': ('kn', '66:19:03')},
{'1': ('kn', '66:19:04')},
{'1': ('kn', '66:19:05')},
{'1': ('kn', '66:19:06')},
{'1': ('kn', '66:19:07')},
{'1': ('kn', '66:19:08')},
{'1': ('kn', '66:19:09')},
{'1': ('kn', '66:19:10')},
{'1': ('kn', '66:19:11')},
{'1': ('kn', '66:19:12')},
{'1': ('kn', '66:19:13')},
{'1': ('kn', '66:19:14')},
{'1': ('kn', '66:19:15')},
{'1': ('kn', '66:19:16')},
{'1': ('kn', '66:19:17')},
{'1': ('kn', '66:19:18')},
{'1': ('kn', '66:19:19')},
{'1': ('kn', '66:19:20')},
{'1': ('kn', '66:19:21')},
{'1': ('kn', '66:19:22')},
{'1': ('kn', '66:19:23')},
{'1': ('kn', '66:19:24')},
{'1': ('kn', '66:19:25')},
{'1': ('kn', '66:19:26')},
{'1': ('kn', '66:19:27')},
{'1': ('kn', '66:19:28')},
{'1': ('kn', '66:19:29')},
{'1': ('kn', '66:19:30')},
{'1': ('kn', '66:19:31')},
{'1': ('kn', '66:19:32')},
{'1': ('kn', '66:19:33')},
{'1': ('kn', '66:19:34')},
{'1': ('kn', '66:19:35')},
{'1': ('kn', '66:19:36')},
{'1': ('kn', '66:19:37')},
{'1': ('kn', '66:19:38')},
{'1': ('kn', '66:19:39')},
{'1': ('kn', '66:19:40')},
{'1': ('kn', '66:19:41')},
{'1': ('kn', '66:19:42')},
{'1': ('kn', '66:19:43')},
{'1': ('kn', '66:19:44')},
{'1': ('kn', '66:19:45')},
{'1': ('kn', '66:19:46')},
{'1': ('kn', '66:19:47')},
{'1': ('kn', '66:19:48')},
{'1': ('kn', '66:19:49')},
{'1': ('kn', '66:19:50')},
{'1': ('kn', '66:19:51')},
{'1': ('kn', '66:19:52')},
{'1': ('kn', '66:19:53')},
{'1': ('kn', '66:19:54')},
{'1': ('kn', '66:19:55')},
{'1': ('kn', '66:19:56')},
{'1': ('kn', '66:19:57')},
{'1': ('kn', '66:19:58')},
{'1': ('kn', '66:19:59')},
{'1': ('kn', '66:19:60')},
{'1': ('kn', '66:19:61')},
{'1': ('kn', '66:19:62')},
{'1': ('kn', '66:19:63')},
{'1': ('kn', '66:19:64')},
{'1': ('kn', '66:19:65')},
{'1': ('kn', '66:19:66')},
{'1': ('kn', '66:19:67')},
{'1': ('kn', '66:19:68')},
{'1': ('kn', '66:19:69')},
{'1': ('kn', '66:19:70')},
{'1': ('kn', '66:19:71')},
{'1': ('kn', '66:19:72')},
{'1': ('kn', '66:19:73')},
{'1': ('kn', '66:19:74')},
{'1': ('kn', '66:19:75')},
{'1': ('kn', '66:19:76')},
{'1': ('kn', '66:19:77')},
{'1': ('kn', '66:19:78')},
{'1': ('kn', '66:19:79')},
{'1': ('kn', '66:19:80')},
{'1': ('kn', '66:19:81')},
{'1': ('kn', '66:19:82')},
{'1': ('kn', '66:19:83')},
{'1': ('kn', '66:19:84')},
{'1': ('kn', '66:19:85')},
{'1': ('kn', '66:19:86')},
{'1': ('kn', '66:19:87')},
{'1': ('kn', '66:19:88')},
{'1': ('kn', '66:19:89')},
{'1': ('kn', '66:19:90')},
{'1': ('kn', '66:19:91')},
{'1': ('kn', '66:19:92')},
{'1': ('kn', '66:19:93')},
{'1': ('kn', '66:19:94')},
{'1': ('kn', '66:19:95')},
{'1': ('kn', '66:19:96')},
{'1': ('kn', '66:19:97')},
{'1': ('kn', '66:19:98')},
{'1': ('kn', '66:19:99')},
{'1': ('kn', '66:20:00')},
{'1': ('kn', '66:20:01')},
{'1': ('kn', '66:20:02')},
{'1': ('kn', '66:20:03')},
{'1': ('kn', '66:20:04')},
{'1': ('kn', '66:20:05')},
{'1': ('kn', '66:20:06')},
{'1': ('kn', '66:20:07')},
{'1': ('kn', '66:20:08')},
{'1': ('kn', '66:20:09')},
{'1': ('kn', '66:20:10')},
{'1': ('kn', '66:20:11')},
{'1': ('kn', '66:20:12')},
{'1': ('kn', '66:20:13')},
{'1': ('kn', '66:20:14')},
{'1': ('kn', '66:20:15')},
{'1': ('kn', '66:20:16')},
{'1': ('kn', '66:20:17')},
{'1': ('kn', '66:20:18')},
{'1': ('kn', '66:20:19')},
{'1': ('kn', '66:20:20')},
{'1': ('kn', '66:20:21')},
{'1': ('kn', '66:20:22')},
{'1': ('kn', '66:20:23')},
{'1': ('kn', '66:20:24')},
{'1': ('kn', '66:20:25')},
{'1': ('kn', '66:20:26')},
{'1': ('kn', '66:20:27')},
{'1': ('kn', '66:20:28')},
{'1': ('kn', '66:20:29')},
{'1': ('kn', '66:20:30')},
{'1': ('kn', '66:20:31')},
{'1': ('kn', '66:20:32')},
{'1': ('kn', '66:20:33')},
{'1': ('kn', '66:20:34')},
{'1': ('kn', '66:20:35')},
{'1': ('kn', '66:20:36')},
{'1': ('kn', '66:20:37')},
{'1': ('kn', '66:20:38')},
{'1': ('kn', '66:20:39')},
{'1': ('kn', '66:20:40')},
{'1': ('kn', '66:20:41')},
{'1': ('kn', '66:20:42')},
{'1': ('kn', '66:20:43')},
{'1': ('kn', '66:20:44')},
{'1': ('kn', '66:20:45')},
{'1': ('kn', '66:20:46')},
{'1': ('kn', '66:20:47')},
{'1': ('kn', '66:20:48')},
{'1': ('kn', '66:20:49')},
{'1': ('kn', '66:20:50')},
{'1': ('kn', '66:20:51')},
{'1': ('kn', '66:20:52')},
{'1': ('kn', '66:20:53')},
{'1': ('kn', '66:20:54')},
{'1': ('kn', '66:20:55')},
{'1': ('kn', '66:20:56')},
{'1': ('kn', '66:20:57')},
{'1': ('kn', '66:20:58')},
{'1': ('kn', '66:20:59')},
{'1': ('kn', '66:20:60')},
{'1': ('kn', '66:20:61')},
{'1': ('kn', '66:20:62')},
{'1': ('kn', '66:20:63')},
{'1': ('kn', '66:20:64')},
{'1': ('kn', '66:20:65')},
{'1': ('kn', '66:20:66')},
{'1': ('kn', '66:20:67')},
{'1': ('kn', '66:20:68')},
{'1': ('kn', '66:20:69')},
{'1': ('kn', '66:20:70')},
{'1': ('kn', '66:20:71')},
{'1': ('kn', '66:20:72')},
{'1': ('kn', '66:20:73')},
{'1': ('kn', '66:20:74')},
{'1': ('kn', '66:20:75')},
{'1': ('kn', '66:20:76')},
{'1': ('kn', '66:20:77')},
{'1': ('kn', '66:20:78')},
{'1': ('kn', '66:20:79')},
{'1': ('kn', '66:20:80')},
{'1': ('kn', '66:20:81')},
{'1': ('kn', '66:20:82')},
{'1': ('kn', '66:20:83')},
{'1': ('kn', '66:20:84')},
{'1': ('kn', '66:20:85')},
{'1': ('kn', '66:20:86')},
{'1': ('kn', '66:20:87')},
{'1': ('kn', '66:20:88')},
{'1': ('kn', '66:20:89')},
{'1': ('kn', '66:20:90')},
{'1': ('kn', '66:20:91')},
{'1': ('kn', '66:20:92')},
{'1': ('kn', '66:20:93')},
{'1': ('kn', '66:20:94')},
{'1': ('kn', '66:20:95')},
{'1': ('kn', '66:20:96')},
{'1': ('kn', '66:20:97')},
{'1': ('kn', '66:20:98')},
{'1': ('kn', '66:20:99')},
{'1': ('kn', '66:21:00')},
{'1': ('kn', '66:21:01')},
{'1': ('kn', '66:21:02')},
{'1': ('kn', '66:21:03')},
{'1': ('kn', '66:21:04')},
{'1': ('kn', '66:21:05')},
{'1': ('kn', '66:21:06')},
{'1': ('kn', '66:21:07')},
{'1': ('kn', '66:21:08')},
{'1': ('kn', '66:21:09')},
{'1': ('kn', '66:21:10')},
{'1': ('kn', '66:21:11')},
{'1': ('kn', '66:21:12')},
{'1': ('kn', '66:21:13')},
{'1': ('kn', '66:21:14')},
{'1': ('kn', '66:21:15')},
{'1': ('kn', '66:21:16')},
{'1': ('kn', '66:21:17')},
{'1': ('kn', '66:21:18')},
{'1': ('kn', '66:21:19')},
{'1': ('kn', '66:21:20')},
{'1': ('kn', '66:21:21')},
{'1': ('kn', '66:21:22')},
{'1': ('kn', '66:21:23')},
{'1': ('kn', '66:21:24')},
{'1': ('kn', '66:21:25')},
{'1': ('kn', '66:21:26')},
{'1': ('kn', '66:21:27')},
{'1': ('kn', '66:21:28')},
{'1': ('kn', '66:21:29')},
{'1': ('kn', '66:21:30')},
{'1': ('kn', '66:21:31')},
{'1': ('kn', '66:21:32')},
{'1': ('kn', '66:21:33')},
{'1': ('kn', '66:21:34')},
{'1': ('kn', '66:21:35')},
{'1': ('kn', '66:21:36')},
{'1': ('kn', '66:21:37')},
{'1': ('kn', '66:21:38')},
{'1': ('kn', '66:21:39')},
{'1': ('kn', '66:21:40')},
{'1': ('kn', '66:21:41')},
{'1': ('kn', '66:21:42')},
{'1': ('kn', '66:21:43')},
{'1': ('kn', '66:21:44')},
{'1': ('kn', '66:21:45')},
{'1': ('kn', '66:21:46')},
{'1': ('kn', '66:21:47')},
{'1': ('kn', '66:21:48')},
{'1': ('kn', '66:21:49')},
{'1': ('kn', '66:21:50')},
{'1': ('kn', '66:21:51')},
{'1': ('kn', '66:21:52')},
{'1': ('kn', '66:21:53')},
{'1': ('kn', '66:21:54')},
{'1': ('kn', '66:21:55')},
{'1': ('kn', '66:21:56')},
{'1': ('kn', '66:21:57')},
{'1': ('kn', '66:21:58')},
{'1': ('kn', '66:21:59')},
{'1': ('kn', '66:21:60')},
{'1': ('kn', '66:21:61')},
{'1': ('kn', '66:21:62')},
{'1': ('kn', '66:21:63')},
{'1': ('kn', '66:21:64')},
{'1': ('kn', '66:21:65')},
{'1': ('kn', '66:21:66')},
{'1': ('kn', '66:21:67')},
{'1': ('kn', '66:21:68')},
{'1': ('kn', '66:21:69')},
{'1': ('kn', '66:21:70')},
{'1': ('kn', '66:21:71')},
{'1': ('kn', '66:21:72')},
{'1': ('kn', '66:21:73')},
{'1': ('kn', '66:21:74')},
{'1': ('kn', '66:21:75')},
{'1': ('kn', '66:21:76')},
{'1': ('kn', '66:21:77')},
{'1': ('kn', '66:21:78')},
{'1': ('kn', '66:21:79')},
{'1': ('kn', '66:21:80')},
{'1': ('kn', '66:21:81')},
{'1': ('kn', '66:21:82')},
{'1': ('kn', '66:21:83')},
{'1': ('kn', '66:21:84')},
{'1': ('kn', '66:21:85')},
{'1': ('kn', '66:21:86')},
{'1': ('kn', '66:21:87')},
{'1': ('kn', '66:21:88')},
{'1': ('kn', '66:21:89')},
{'1': ('kn', '66:21:90')},
{'1': ('kn', '66:21:91')},
{'1': ('kn', '66:21:92')},
{'1': ('kn', '66:21:93')},
{'1': ('kn', '66:21:94')},
{'1': ('kn', '66:21:95')},
{'1': ('kn', '66:21:96')},
{'1': ('kn', '66:21:97')},
{'1': ('kn', '66:21:98')},
{'1': ('kn', '66:21:99')},
{'1': ('kn', '66:22:00')},
{'1': ('kn', '66:22:01')},
{'1': ('kn', '66:22:02')},
{'1': ('kn', '66:22:03')},
{'1': ('kn', '66:22:04')},
{'1': ('kn', '66:22:05')},
{'1': ('kn', '66:22:06')},
{'1': ('kn', '66:22:07')},
{'1': ('kn', '66:22:08')},
{'1': ('kn', '66:22:09')},
{'1': ('kn', '66:22:10')},
{'1': ('kn', '66:22:11')},
{'1': ('kn', '66:22:12')},
{'1': ('kn', '66:22:13')},
{'1': ('kn', '66:22:14')},
{'1': ('kn', '66:22:15')},
{'1': ('kn', '66:22:16')},
{'1': ('kn', '66:22:17')},
{'1': ('kn', '66:22:18')},
{'1': ('kn', '66:22:19')},
{'1': ('kn', '66:22:20')},
{'1': ('kn', '66:22:21')},
{'1': ('kn', '66:22:22')},
{'1': ('kn', '66:22:23')},
{'1': ('kn', '66:22:24')},
{'1': ('kn', '66:22:25')},
{'1': ('kn', '66:22:26')},
{'1': ('kn', '66:22:27')},
{'1': ('kn', '66:22:28')},
{'1': ('kn', '66:22:29')},
{'1': ('kn', '66:22:30')},
{'1': ('kn', '66:22:31')},
{'1': ('kn', '66:22:32')},
{'1': ('kn', '66:22:33')},
{'1': ('kn', '66:22:34')},
{'1': ('kn', '66:22:35')},
{'1': ('kn', '66:22:36')},
{'1': ('kn', '66:22:37')},
{'1': ('kn', '66:22:38')},
{'1': ('kn', '66:22:39')},
{'1': ('kn', '66:22:40')},
{'1': ('kn', '66:22:41')},
{'1': ('kn', '66:22:42')},
{'1': ('kn', '66:22:43')},
{'1': ('kn', '66:22:44')},
{'1': ('kn', '66:22:45')},
{'1': ('kn', '66:22:46')},
{'1': ('kn', '66:22:47')},
{'1': ('kn', '66:22:48')},
{'1': ('kn', '66:22:49')},
{'1': ('kn', '66:22:50')},
{'1': ('kn', '66:22:51')},
{'1': ('kn', '66:22:52')},
{'1': ('kn', '66:22:53')},
{'1': ('kn', '66:22:54')},
{'1': ('kn', '66:22:55')},
{'1': ('kn', '66:22:56')},
{'1': ('kn', '66:22:57')},
{'1': ('kn', '66:22:58')},
{'1': ('kn', '66:22:59')},
{'1': ('kn', '66:22:60')},
{'1': ('kn', '66:22:61')},
{'1': ('kn', '66:22:62')},
{'1': ('kn', '66:22:63')},
{'1': ('kn', '66:22:64')},
{'1': ('kn', '66:22:65')},
{'1': ('kn', '66:22:66')},
{'1': ('kn', '66:22:67')},
{'1': ('kn', '66:22:68')},
{'1': ('kn', '66:22:69')},
{'1': ('kn', '66:22:70')},
{'1': ('kn', '66:22:71')},
{'1': ('kn', '66:22:72')},
{'1': ('kn', '66:22:73')},
{'1': ('kn', '66:22:74')},
{'1': ('kn', '66:22:75')},
{'1': ('kn', '66:22:76')},
{'1': ('kn', '66:22:77')},
{'1': ('kn', '66:22:78')},
{'1': ('kn', '66:22:79')},
{'1': ('kn', '66:22:80')},
{'1': ('kn', '66:22:81')},
{'1': ('kn', '66:22:82')},
{'1': ('kn', '66:22:83')},
{'1': ('kn', '66:22:84')},
{'1': ('kn', '66:22:85')},
{'1': ('kn', '66:22:86')},
{'1': ('kn', '66:22:87')},
{'1': ('kn', '66:22:88')},
{'1': ('kn', '66:22:89')},
{'1': ('kn', '66:22:90')},
{'1': ('kn', '66:22:91')},
{'1': ('kn', '66:22:92')},
{'1': ('kn', '66:22:93')},
{'1': ('kn', '66:22:94')},
{'1': ('kn', '66:22:95')},
{'1': ('kn', '66:22:96')},
{'1': ('kn', '66:22:97')},
{'1': ('kn', '66:22:98')},
{'1': ('kn', '66:22:99')},
{'1': ('kn', '66:23:00')},
{'1': ('kn', '66:23:01')},
{'1': ('kn', '66:23:02')},
{'1': ('kn', '66:23:03')},
{'1': ('kn', '66:23:04')},
{'1': ('kn', '66:23:05')},
{'1': ('kn', '66:23:06')},
{'1': ('kn', '66:23:07')},
{'1': ('kn', '66:23:08')},
{'1': ('kn', '66:23:09')},
{'1': ('kn', '66:23:10')},
{'1': ('kn', '66:23:11')},
{'1': ('kn', '66:23:12')},
{'1': ('kn', '66:23:13')},
{'1': ('kn', '66:23:14')},
{'1': ('kn', '66:23:15')},
{'1': ('kn', '66:23:16')},
{'1': ('kn', '66:23:17')},
{'1': ('kn', '66:23:18')},
{'1': ('kn', '66:23:19')},
{'1': ('kn', '66:23:20')},
{'1': ('kn', '66:23:21')},
{'1': ('kn', '66:23:22')},
{'1': ('kn', '66:23:23')},
{'1': ('kn', '66:23:24')},
{'1': ('kn', '66:23:25')},
{'1': ('kn', '66:23:26')},
{'1': ('kn', '66:23:27')},
{'1': ('kn', '66:23:28')},
{'1': ('kn', '66:23:29')},
{'1': ('kn', '66:23:30')},
{'1': ('kn', '66:23:31')},
{'1': ('kn', '66:23:32')},
{'1': ('kn', '66:23:33')},
{'1': ('kn', '66:23:34')},
{'1': ('kn', '66:23:35')},
{'1': ('kn', '66:23:36')},
{'1': ('kn', '66:23:37')},
{'1': ('kn', '66:23:38')},
{'1': ('kn', '66:23:39')},
{'1': ('kn', '66:23:40')},
{'1': ('kn', '66:23:41')},
{'1': ('kn', '66:23:42')},
{'1': ('kn', '66:23:43')},
{'1': ('kn', '66:23:44')},
{'1': ('kn', '66:23:45')},
{'1': ('kn', '66:23:46')},
{'1': ('kn', '66:23:47')},
{'1': ('kn', '66:23:48')},
{'1': ('kn', '66:23:49')},
{'1': ('kn', '66:23:50')},
{'1': ('kn', '66:23:51')},
{'1': ('kn', '66:23:52')},
{'1': ('kn', '66:23:53')},
{'1': ('kn', '66:23:54')},
{'1': ('kn', '66:23:55')},
{'1': ('kn', '66:23:56')},
{'1': ('kn', '66:23:57')},
{'1': ('kn', '66:23:58')},
{'1': ('kn', '66:23:59')},
{'1': ('kn', '66:23:60')},
{'1': ('kn', '66:23:61')},
{'1': ('kn', '66:23:62')},
{'1': ('kn', '66:23:63')},
{'1': ('kn', '66:23:64')},
{'1': ('kn', '66:23:65')},
{'1': ('kn', '66:23:66')},
{'1': ('kn', '66:23:67')},
{'1': ('kn', '66:23:68')},
{'1': ('kn', '66:23:69')},
{'1': ('kn', '66:23:70')},
{'1': ('kn', '66:23:71')},
{'1': ('kn', '66:23:72')},
{'1': ('kn', '66:23:73')},
{'1': ('kn', '66:23:74')},
{'1': ('kn', '66:23:75')},
{'1': ('kn', '66:23:76')},
{'1': ('kn', '66:23:77')},
{'1': ('kn', '66:23:78')},
{'1': ('kn', '66:23:79')},
{'1': ('kn', '66:23:80')},
{'1': ('kn', '66:23:81')},
{'1': ('kn', '66:23:82')},
{'1': ('kn', '66:23:83')},
{'1': ('kn', '66:23:84')},
{'1': ('kn', '66:23:85')},
{'1': ('kn', '66:23:86')},
{'1': ('kn', '66:23:87')},
{'1': ('kn', '66:23:88')},
{'1': ('kn', '66:23:89')},
{'1': ('kn', '66:23:90')},
{'1': ('kn', '66:23:91')},
{'1': ('kn', '66:23:92')},
{'1': ('kn', '66:23:93')},
{'1': ('kn', '66:23:94')},
{'1': ('kn', '66:23:95')},
{'1': ('kn', '66:23:96')},
{'1': ('kn', '66:23:97')},
{'1': ('kn', '66:23:98')},
{'1': ('kn', '66:23:99')},
{'1': ('kn', '66:24:00')},
{'1': ('kn', '66:24:01')},
{'1': ('kn', '66:24:02')},
{'1': ('kn', '66:24:03')},
{'1': ('kn', '66:24:04')},
{'1': ('kn', '66:24:05')},
{'1': ('kn', '66:24:06')},
{'1': ('kn', '66:24:07')},
{'1': ('kn', '66:24:08')},
{'1': ('kn', '66:24:09')},
{'1': ('kn', '66:24:10')},
{'1': ('kn', '66:24:11')},
{'1': ('kn', '66:24:12')},
{'1': ('kn', '66:24:13')},
{'1': ('kn', '66:24:14')},
{'1': ('kn', '66:24:15')},
{'1': ('kn', '66:24:16')},
{'1': ('kn', '66:24:17')},
{'1': ('kn', '66:24:18')},
{'1': ('kn', '66:24:19')},
{'1': ('kn', '66:24:20')},
{'1': ('kn', '66:24:21')},
{'1': ('kn', '66:24:22')},
{'1': ('kn', '66:24:23')},
{'1': ('kn', '66:24:24')},
{'1': ('kn', '66:24:25')},
{'1': ('kn', '66:24:26')},
{'1': ('kn', '66:24:27')},
{'1': ('kn', '66:24:28')},
{'1': ('kn', '66:24:29')},
{'1': ('kn', '66:24:30')},
{'1': ('kn', '66:24:31')},
{'1': ('kn', '66:24:32')},
{'1': ('kn', '66:24:33')},
{'1': ('kn', '66:24:34')},
{'1': ('kn', '66:24:35')},
{'1': ('kn', '66:24:36')},
{'1': ('kn', '66:24:37')},
{'1': ('kn', '66:24:38')},
{'1': ('kn', '66:24:39')},
{'1': ('kn', '66:24:40')},
{'1': ('kn', '66:24:41')},
{'1': ('kn', '66:24:42')},
{'1': ('kn', '66:24:43')},
{'1': ('kn', '66:24:44')},
{'1': ('kn', '66:24:45')},
{'1': ('kn', '66:24:46')},
{'1': ('kn', '66:24:47')},
{'1': ('kn', '66:24:48')},
{'1': ('kn', '66:24:49')},
{'1': ('kn', '66:24:50')},
{'1': ('kn', '66:24:51')},
{'1': ('kn', '66:24:52')},
{'1': ('kn', '66:24:53')},
{'1': ('kn', '66:24:54')},
{'1': ('kn', '66:24:55')},
{'1': ('kn', '66:24:56')},
{'1': ('kn', '66:24:57')},
{'1': ('kn', '66:24:58')},
{'1': ('kn', '66:24:59')},
{'1': ('kn', '66:24:60')},
{'1': ('kn', '66:24:61')},
{'1': ('kn', '66:24:62')},
{'1': ('kn', '66:24:63')},
{'1': ('kn', '66:24:64')},
{'1': ('kn', '66:24:65')},
{'1': ('kn', '66:24:66')},
{'1': ('kn', '66:24:67')},
{'1': ('kn', '66:24:68')},
{'1': ('kn', '66:24:69')},
{'1': ('kn', '66:24:70')},
{'1': ('kn', '66:24:71')},
{'1': ('kn', '66:24:72')},
{'1': ('kn', '66:24:73')},
{'1': ('kn', '66:24:74')},
{'1': ('kn', '66:24:75')},
{'1': ('kn', '66:24:76')},
{'1': ('kn', '66:24:77')},
{'1': ('kn', '66:24:78')},
{'1': ('kn', '66:24:79')},
{'1': ('kn', '66:24:80')},
{'1': ('kn', '66:24:81')},
{'1': ('kn', '66:24:82')},
{'1': ('kn', '66:24:83')},
{'1': ('kn', '66:24:84')},
{'1': ('kn', '66:24:85')},
{'1': ('kn', '66:24:86')},
{'1': ('kn', '66:24:87')},
{'1': ('kn', '66:24:88')},
{'1': ('kn', '66:24:89')},
{'1': ('kn', '66:24:90')},
{'1': ('kn', '66:24:91')},
{'1': ('kn', '66:24:92')},
{'1': ('kn', '66:24:93')},
{'1': ('kn', '66:24:94')},
{'1': ('kn', '66:24:95')},
{'1': ('kn', '66:24:96')},
{'1': ('kn', '66:24:97')},
{'1': ('kn', '66:24:98')},
{'1': ('kn', '66:24:99')},
{'1': ('kn', '66:25:00')},
{'1': ('kn', '66:25:01')},
{'1': ('kn', '66:25:02')},
{'1': ('kn', '66:25:03')},
{'1': ('kn', '66:25:04')},
{'1': ('kn', '66:25:05')},
{'1': ('kn', '66:25:06')},
{'1': ('kn', '66:25:07')},
{'1': ('kn', '66:25:08')},
{'1': ('kn', '66:25:09')},
{'1': ('kn', '66:25:10')},
{'1': ('kn', '66:25:11')},
{'1': ('kn', '66:25:12')},
{'1': ('kn', '66:25:13')},
{'1': ('kn', '66:25:14')},
{'1': ('kn', '66:25:15')},
{'1': ('kn', '66:25:16')},
{'1': ('kn', '66:25:17')},
{'1': ('kn', '66:25:18')},
{'1': ('kn', '66:25:19')},
{'1': ('kn', '66:25:20')},
{'1': ('kn', '66:25:21')},
{'1': ('kn', '66:25:22')},
{'1': ('kn', '66:25:23')},
{'1': ('kn', '66:25:24')},
{'1': ('kn', '66:25:25')},
{'1': ('kn', '66:25:26')},
{'1': ('kn', '66:25:27')},
{'1': ('kn', '66:25:28')},
{'1': ('kn', '66:25:29')},
{'1': ('kn', '66:25:30')},
{'1': ('kn', '66:25:31')},
{'1': ('kn', '66:25:32')},
{'1': ('kn', '66:25:33')},
{'1': ('kn', '66:25:34')},
{'1': ('kn', '66:25:35')},
{'1': ('kn', '66:25:36')},
{'1': ('kn', '66:25:37')},
{'1': ('kn', '66:25:38')},
{'1': ('kn', '66:25:39')},
{'1': ('kn', '66:25:40')},
{'1': ('kn', '66:25:41')},
{'1': ('kn', '66:25:42')},
{'1': ('kn', '66:25:43')},
{'1': ('kn', '66:25:44')},
{'1': ('kn', '66:25:45')},
{'1': ('kn', '66:25:46')},
{'1': ('kn', '66:25:47')},
{'1': ('kn', '66:25:48')},
{'1': ('kn', '66:25:49')},
{'1': ('kn', '66:25:50')},
{'1': ('kn', '66:25:51')},
{'1': ('kn', '66:25:52')},
{'1': ('kn', '66:25:53')},
{'1': ('kn', '66:25:54')},
{'1': ('kn', '66:25:55')},
{'1': ('kn', '66:25:56')},
{'1': ('kn', '66:25:57')},
{'1': ('kn', '66:25:58')},
{'1': ('kn', '66:25:59')},
{'1': ('kn', '66:25:60')},
{'1': ('kn', '66:25:61')},
{'1': ('kn', '66:25:62')},
{'1': ('kn', '66:25:63')},
{'1': ('kn', '66:25:64')},
{'1': ('kn', '66:25:65')},
{'1': ('kn', '66:25:66')},
{'1': ('kn', '66:25:67')},
{'1': ('kn', '66:25:68')},
{'1': ('kn', '66:25:69')},
{'1': ('kn', '66:25:70')},
{'1': ('kn', '66:25:71')},
{'1': ('kn', '66:25:72')},
{'1': ('kn', '66:25:73')},
{'1': ('kn', '66:25:74')},
{'1': ('kn', '66:25:75')},
{'1': ('kn', '66:25:76')},
{'1': ('kn', '66:25:77')},
{'1': ('kn', '66:25:78')},
{'1': ('kn', '66:25:79')},
{'1': ('kn', '66:25:80')},
{'1': ('kn', '66:25:81')},
{'1': ('kn', '66:25:82')},
{'1': ('kn', '66:25:83')},
{'1': ('kn', '66:25:84')},
{'1': ('kn', '66:25:85')},
{'1': ('kn', '66:25:86')},
{'1': ('kn', '66:25:87')},
{'1': ('kn', '66:25:88')},
{'1': ('kn', '66:25:89')},
{'1': ('kn', '66:25:90')},
{'1': ('kn', '66:25:91')},
{'1': ('kn', '66:25:92')},
{'1': ('kn', '66:25:93')},
{'1': ('kn', '66:25:94')},
{'1': ('kn', '66:25:95')},
{'1': ('kn', '66:25:96')},
{'1': ('kn', '66:25:97')},
{'1': ('kn', '66:25:98')},
{'1': ('kn', '66:25:99')},
{'1': ('kn', '66:26:00')},
{'1': ('kn', '66:26:01')},
{'1': ('kn', '66:26:02')},
{'1': ('kn', '66:26:03')},
{'1': ('kn', '66:26:04')},
{'1': ('kn', '66:26:05')},
{'1': ('kn', '66:26:06')},
{'1': ('kn', '66:26:07')},
{'1': ('kn', '66:26:08')},
{'1': ('kn', '66:26:09')},
{'1': ('kn', '66:26:10')},
{'1': ('kn', '66:26:11')},
{'1': ('kn', '66:26:12')},
{'1': ('kn', '66:26:13')},
{'1': ('kn', '66:26:14')},
{'1': ('kn', '66:26:15')},
{'1': ('kn', '66:26:16')},
{'1': ('kn', '66:26:17')},
{'1': ('kn', '66:26:18')},
{'1': ('kn', '66:26:19')},
{'1': ('kn', '66:26:20')},
{'1': ('kn', '66:26:21')},
{'1': ('kn', '66:26:22')},
{'1': ('kn', '66:26:23')},
{'1': ('kn', '66:26:24')},
{'1': ('kn', '66:26:25')},
{'1': ('kn', '66:26:26')},
{'1': ('kn', '66:26:27')},
{'1': ('kn', '66:26:28')},
{'1': ('kn', '66:26:29')},
{'1': ('kn', '66:26:30')},
{'1': ('kn', '66:26:31')},
{'1': ('kn', '66:26:32')},
{'1': ('kn', '66:26:33')},
{'1': ('kn', '66:26:34')},
{'1': ('kn', '66:26:35')},
{'1': ('kn', '66:26:36')},
{'1': ('kn', '66:26:37')},
{'1': ('kn', '66:26:38')},
{'1': ('kn', '66:26:39')},
{'1': ('kn', '66:26:40')},
{'1': ('kn', '66:26:41')},
{'1': ('kn', '66:26:42')},
{'1': ('kn', '66:26:43')},
{'1': ('kn', '66:26:44')},
{'1': ('kn', '66:26:45')},
{'1': ('kn', '66:26:46')},
{'1': ('kn', '66:26:47')},
{'1': ('kn', '66:26:48')},
{'1': ('kn', '66:26:49')},
{'1': ('kn', '66:26:50')},
{'1': ('kn', '66:26:51')},
{'1': ('kn', '66:26:52')},
{'1': ('kn', '66:26:53')},
{'1': ('kn', '66:26:54')},
{'1': ('kn', '66:26:55')},
{'1': ('kn', '66:26:56')},
{'1': ('kn', '66:26:57')},
{'1': ('kn', '66:26:58')},
{'1': ('kn', '66:26:59')},
{'1': ('kn', '66:26:60')},
{'1': ('kn', '66:26:61')},
{'1': ('kn', '66:26:62')},
{'1': ('kn', '66:26:63')},
{'1': ('kn', '66:26:64')},
{'1': ('kn', '66:26:65')},
{'1': ('kn', '66:26:66')},
{'1': ('kn', '66:26:67')},
{'1': ('kn', '66:26:68')},
{'1': ('kn', '66:26:69')},
{'1': ('kn', '66:26:70')},
{'1': ('kn', '66:26:71')},
{'1': ('kn', '66:26:72')},
{'1': ('kn', '66:26:73')},
{'1': ('kn', '66:26:74')},
{'1': ('kn', '66:26:75')},
{'1': ('kn', '66:26:76')},
{'1': ('kn', '66:26:77')},
{'1': ('kn', '66:26:78')},
{'1': ('kn', '66:26:79')},
{'1': ('kn', '66:26:80')},
{'1': ('kn', '66:26:81')},
{'1': ('kn', '66:26:82')},
{'1': ('kn', '66:26:83')},
{'1': ('kn', '66:26:84')},
{'1': ('kn', '66:26:85')},
{'1': ('kn', '66:26:86')},
{'1': ('kn', '66:26:87')},
{'1': ('kn', '66:26:88')},
{'1': ('kn', '66:26:89')},
{'1': ('kn', '66:26:90')},
{'1': ('kn', '66:26:91')},
{'1': ('kn', '66:26:92')},
{'1': ('kn', '66:26:93')},
{'1': ('kn', '66:26:94')},
{'1': ('kn', '66:26:95')},
{'1': ('kn', '66:26:96')},
{'1': ('kn', '66:26:97')},
{'1': ('kn', '66:26:98')},
{'1': ('kn', '66:26:99')},
{'1': ('kn', '66:27:00')},
{'1': ('kn', '66:27:01')},
{'1': ('kn', '66:27:02')},
{'1': ('kn', '66:27:03')},
{'1': ('kn', '66:27:04')},
{'1': ('kn', '66:27:05')},
{'1': ('kn', '66:27:06')},
{'1': ('kn', '66:27:07')},
{'1': ('kn', '66:27:08')},
{'1': ('kn', '66:27:09')},
{'1': ('kn', '66:27:10')},
{'1': ('kn', '66:27:11')},
{'1': ('kn', '66:27:12')},
{'1': ('kn', '66:27:13')},
{'1': ('kn', '66:27:14')},
{'1': ('kn', '66:27:15')},
{'1': ('kn', '66:27:16')},
{'1': ('kn', '66:27:17')},
{'1': ('kn', '66:27:18')},
{'1': ('kn', '66:27:19')},
{'1': ('kn', '66:27:20')},
{'1': ('kn', '66:27:21')},
{'1': ('kn', '66:27:22')},
{'1': ('kn', '66:27:23')},
{'1': ('kn', '66:27:24')},
{'1': ('kn', '66:27:25')},
{'1': ('kn', '66:27:26')},
{'1': ('kn', '66:27:27')},
{'1': ('kn', '66:27:28')},
{'1': ('kn', '66:27:29')},
{'1': ('kn', '66:27:30')},
{'1': ('kn', '66:27:31')},
{'1': ('kn', '66:27:32')},
{'1': ('kn', '66:27:33')},
{'1': ('kn', '66:27:34')},
{'1': ('kn', '66:27:35')},
{'1': ('kn', '66:27:36')},
{'1': ('kn', '66:27:37')},
{'1': ('kn', '66:27:38')},
{'1': ('kn', '66:27:39')},
{'1': ('kn', '66:27:40')},
{'1': ('kn', '66:27:41')},
{'1': ('kn', '66:27:42')},
{'1': ('kn', '66:27:43')},
{'1': ('kn', '66:27:44')},
{'1': ('kn', '66:27:45')},
{'1': ('kn', '66:27:46')},
{'1': ('kn', '66:27:47')},
{'1': ('kn', '66:27:48')},
{'1': ('kn', '66:27:49')},
{'1': ('kn', '66:27:50')},
{'1': ('kn', '66:27:51')},
{'1': ('kn', '66:27:52')},
{'1': ('kn', '66:27:53')},
{'1': ('kn', '66:27:54')},
{'1': ('kn', '66:27:55')},
{'1': ('kn', '66:27:56')},
{'1': ('kn', '66:27:57')},
{'1': ('kn', '66:27:58')},
{'1': ('kn', '66:27:59')},
{'1': ('kn', '66:27:60')},
{'1': ('kn', '66:27:61')},
{'1': ('kn', '66:27:62')},
{'1': ('kn', '66:27:63')},
{'1': ('kn', '66:27:64')},
{'1': ('kn', '66:27:65')},
{'1': ('kn', '66:27:66')},
{'1': ('kn', '66:27:67')},
{'1': ('kn', '66:27:68')},
{'1': ('kn', '66:27:69')},
{'1': ('kn', '66:27:70')},
{'1': ('kn', '66:27:71')},
{'1': ('kn', '66:27:72')},
{'1': ('kn', '66:27:73')},
{'1': ('kn', '66:27:74')},
{'1': ('kn', '66:27:75')},
{'1': ('kn', '66:27:76')},
{'1': ('kn', '66:27:77')},
{'1': ('kn', '66:27:78')},
{'1': ('kn', '66:27:79')},
{'1': ('kn', '66:27:80')},
{'1': ('kn', '66:27:81')},
{'1': ('kn', '66:27:82')},
{'1': ('kn', '66:27:83')},
{'1': ('kn', '66:27:84')},
{'1': ('kn', '66:27:85')},
{'1': ('kn', '66:27:86')},
{'1': ('kn', '66:27:87')},
{'1': ('kn', '66:27:88')},
{'1': ('kn', '66:27:89')},
{'1': ('kn', '66:27:90')},
{'1': ('kn', '66:27:91')},
{'1': ('kn', '66:27:92')},
{'1': ('kn', '66:27:93')},
{'1': ('kn', '66:27:94')},
{'1': ('kn', '66:27:95')},
{'1': ('kn', '66:27:96')},
{'1': ('kn', '66:27:97')},
{'1': ('kn', '66:27:98')},
{'1': ('kn', '66:27:99')},
{'1': ('kn', '66:28:00')},
{'1': ('kn', '66:28:01')},
{'1': ('kn', '66:28:02')},
{'1': ('kn', '66:28:03')},
{'1': ('kn', '66:28:04')},
{'1': ('kn', '66:28:05')},
{'1': ('kn', '66:28:06')},
{'1': ('kn', '66:28:07')},
{'1': ('kn', '66:28:08')},
{'1': ('kn', '66:28:09')},
{'1': ('kn', '66:28:10')},
{'1': ('kn', '66:28:11')},
{'1': ('kn', '66:28:12')},
{'1': ('kn', '66:28:13')},
{'1': ('kn', '66:28:14')},
{'1': ('kn', '66:28:15')},
{'1': ('kn', '66:28:16')},
{'1': ('kn', '66:28:17')},
{'1': ('kn', '66:28:18')},
{'1': ('kn', '66:28:19')},
{'1': ('kn', '66:28:20')},
{'1': ('kn', '66:28:21')},
{'1': ('kn', '66:28:22')},
{'1': ('kn', '66:28:23')},
{'1': ('kn', '66:28:24')},
{'1': ('kn', '66:28:25')},
{'1': ('kn', '66:28:26')},
{'1': ('kn', '66:28:27')},
{'1': ('kn', '66:28:28')},
{'1': ('kn', '66:28:29')},
{'1': ('kn', '66:28:30')},
{'1': ('kn', '66:28:31')},
{'1': ('kn', '66:28:32')},
{'1': ('kn', '66:28:33')},
{'1': ('kn', '66:28:34')},
{'1': ('kn', '66:28:35')},
{'1': ('kn', '66:28:36')},
{'1': ('kn', '66:28:37')},
{'1': ('kn', '66:28:38')},
{'1': ('kn', '66:28:39')},
{'1': ('kn', '66:28:40')},
{'1': ('kn', '66:28:41')},
{'1': ('kn', '66:28:42')},
{'1': ('kn', '66:28:43')},
{'1': ('kn', '66:28:44')},
{'1': ('kn', '66:28:45')},
{'1': ('kn', '66:28:46')},
{'1': ('kn', '66:28:47')},
{'1': ('kn', '66:28:48')},
{'1': ('kn', '66:28:49')},
{'1': ('kn', '66:28:50')},
{'1': ('kn', '66:28:51')},
{'1': ('kn', '66:28:52')},
{'1': ('kn', '66:28:53')},
{'1': ('kn', '66:28:54')},
{'1': ('kn', '66:28:55')},
{'1': ('kn', '66:28:56')},
{'1': ('kn', '66:28:57')},
{'1': ('kn', '66:28:58')},
{'1': ('kn', '66:28:59')},
{'1': ('kn', '66:28:60')},
{'1': ('kn', '66:28:61')},
{'1': ('kn', '66:28:62')},
{'1': ('kn', '66:28:63')},
{'1': ('kn', '66:28:64')},
{'1': ('kn', '66:28:65')},
{'1': ('kn', '66:28:66')},
{'1': ('kn', '66:28:67')},
{'1': ('kn', '66:28:68')},
{'1': ('kn', '66:28:69')},
{'1': ('kn', '66:28:70')},
{'1': ('kn', '66:28:71')},
{'1': ('kn', '66:28:72')},
{'1': ('kn', '66:28:73')},
{'1': ('kn', '66:28:74')},
{'1': ('kn', '66:28:75')},
{'1': ('kn', '66:28:76')},
{'1': ('kn', '66:28:77')},
{'1': ('kn', '66:28:78')},
{'1': ('kn', '66:28:79')},
{'1': ('kn', '66:28:80')},
{'1': ('kn', '66:28:81')},
{'1': ('kn', '66:28:82')},
{'1': ('kn', '66:28:83')},
{'1': ('kn', '66:28:84')},
{'1': ('kn', '66:28:85')},
{'1': ('kn', '66:28:86')},
{'1': ('kn', '66:28:87')},
{'1': ('kn', '66:28:88')},
{'1': ('kn', '66:28:89')},
{'1': ('kn', '66:28:90')},
{'1': ('kn', '66:28:91')},
{'1': ('kn', '66:28:92')},
{'1': ('kn', '66:28:93')},
{'1': ('kn', '66:28:94')},
{'1': ('kn', '66:28:95')},
{'1': ('kn', '66:28:96')},
{'1': ('kn', '66:28:97')},
{'1': ('kn', '66:28:98')},
{'1': ('kn', '66:28:99')},
{'1': ('kn', '66:29:00')},
{'1': ('kn', '66:29:01')},
{'1': ('kn', '66:29:02')},
{'1': ('kn', '66:29:03')},
{'1': ('kn', '66:29:04')},
{'1': ('kn', '66:29:05')},
{'1': ('kn', '66:29:06')},
{'1': ('kn', '66:29:07')},
{'1': ('kn', '66:29:08')},
{'1': ('kn', '66:29:09')},
{'1': ('kn', '66:29:10')},
{'1': ('kn', '66:29:11')},
{'1': ('kn', '66:29:12')},
{'1': ('kn', '66:29:13')},
{'1': ('kn', '66:29:14')},
{'1': ('kn', '66:29:15')},
{'1': ('kn', '66:29:16')},
{'1': ('kn', '66:29:17')},
{'1': ('kn', '66:29:18')},
{'1': ('kn', '66:29:19')},
{'1': ('kn', '66:29:20')},
{'1': ('kn', '66:29:21')},
{'1': ('kn', '66:29:22')},
{'1': ('kn', '66:29:23')},
{'1': ('kn', '66:29:24')},
{'1': ('kn', '66:29:25')},
{'1': ('kn', '66:29:26')},
{'1': ('kn', '66:29:27')},
{'1': ('kn', '66:29:28')},
{'1': ('kn', '66:29:29')},
{'1': ('kn', '66:29:30')},
{'1': ('kn', '66:29:31')},
{'1': ('kn', '66:29:32')},
{'1': ('kn', '66:29:33')},
{'1': ('kn', '66:29:34')},
{'1': ('kn', '66:29:35')},
{'1': ('kn', '66:29:36')},
{'1': ('kn', '66:29:37')},
{'1': ('kn', '66:29:38')},
{'1': ('kn', '66:29:39')},
{'1': ('kn', '66:29:40')},
{'1': ('kn', '66:29:41')},
{'1': ('kn', '66:29:42')},
{'1': ('kn', '66:29:43')},
{'1': ('kn', '66:29:44')},
{'1': ('kn', '66:29:45')},
{'1': ('kn', '66:29:46')},
{'1': ('kn', '66:29:47')},
{'1': ('kn', '66:29:48')},
{'1': ('kn', '66:29:49')},
{'1': ('kn', '66:29:50')},
{'1': ('kn', '66:29:51')},
{'1': ('kn', '66:29:52')},
{'1': ('kn', '66:29:53')},
{'1': ('kn', '66:29:54')},
{'1': ('kn', '66:29:55')},
{'1': ('kn', '66:29:56')},
{'1': ('kn', '66:29:57')},
{'1': ('kn', '66:29:58')},
{'1': ('kn', '66:29:59')},
{'1': ('kn', '66:29:60')},
{'1': ('kn', '66:29:61')},
{'1': ('kn', '66:29:62')},
{'1': ('kn', '66:29:63')},
{'1': ('kn', '66:29:64')},
{'1': ('kn', '66:29:65')},
{'1': ('kn', '66:29:66')},
{'1': ('kn', '66:29:67')},
{'1': ('kn', '66:29:68')},
{'1': ('kn', '66:29:69')},
{'1': ('kn', '66:29:70')},
{'1': ('kn', '66:29:71')},
{'1': ('kn', '66:29:72')},
{'1': ('kn', '66:29:73')},
{'1': ('kn', '66:29:74')},
{'1': ('kn', '66:29:75')},
{'1': ('kn', '66:29:76')},
{'1': ('kn', '66:29:77')},
{'1': ('kn', '66:29:78')},
{'1': ('kn', '66:29:79')},
{'1': ('kn', '66:29:80')},
{'1': ('kn', '66:29:81')},
{'1': ('kn', '66:29:82')},
{'1': ('kn', '66:29:83')},
{'1': ('kn', '66:29:84')},
{'1': ('kn', '66:29:85')},
{'1': ('kn', '66:29:86')},
{'1': ('kn', '66:29:87')},
{'1': ('kn', '66:29:88')},
{'1': ('kn', '66:29:89')},
{'1': ('kn', '66:29:90')},
{'1': ('kn', '66:29:91')},
{'1': ('kn', '66:29:92')},
{'1': ('kn', '66:29:93')},
{'1': ('kn', '66:29:94')},
{'1': ('kn', '66:29:95')},
{'1': ('kn', '66:29:96')},
{'1': ('kn', '66:29:97')},
{'1': ('kn', '66:29:98')},
{'1': ('kn', '66:29:99')},
{'1': ('kn', '66:30:00')},
{'1': ('kn', '66:30:01')},
{'1': ('kn', '66:30:02')},
{'1': ('kn', '66:30:03')},
{'1': ('kn', '66:30:04')},
{'1': ('kn', '66:30:05')},
{'1': ('kn', '66:30:06')},
{'1': ('kn', '66:30:07')},
{'1': ('kn', '66:30:08')},
{'1': ('kn', '66:30:09')},
{'1': ('kn', '66:30:10')},
{'1': ('kn', '66:30:11')},
{'1': ('kn', '66:30:12')},
{'1': ('kn', '66:30:13')},
{'1': ('kn', '66:30:14')},
{'1': ('kn', '66:30:15')},
{'1': ('kn', '66:30:16')},
{'1': ('kn', '66:30:17')},
{'1': ('kn', '66:30:18')},
{'1': ('kn', '66:30:19')},
{'1': ('kn', '66:30:20')},
{'1': ('kn', '66:30:21')},
{'1': ('kn', '66:30:22')},
{'1': ('kn', '66:30:23')},
{'1': ('kn', '66:30:24')},
{'1': ('kn', '66:30:25')},
{'1': ('kn', '66:30:26')},
{'1': ('kn', '66:30:27')},
{'1': ('kn', '66:30:28')},
{'1': ('kn', '66:30:29')},
{'1': ('kn', '66:30:30')},
{'1': ('kn', '66:30:31')},
{'1': ('kn', '66:30:32')},
{'1': ('kn', '66:30:33')},
{'1': ('kn', '66:30:34')},
{'1': ('kn', '66:30:35')},
{'1': ('kn', '66:30:36')},
{'1': ('kn', '66:30:37')},
{'1': ('kn', '66:30:38')},
{'1': ('kn', '66:30:39')},
{'1': ('kn', '66:30:40')},
{'1': ('kn', '66:30:41')},
{'1': ('kn', '66:30:42')},
{'1': ('kn', '66:30:43')},
{'1': ('kn', '66:30:44')},
{'1': ('kn', '66:30:45')},
{'1': ('kn', '66:30:46')},
{'1': ('kn', '66:30:47')},
{'1': ('kn', '66:30:48')},
{'1': ('kn', '66:30:49')},
{'1': ('kn', '66:30:50')},
{'1': ('kn', '66:30:51')},
{'1': ('kn', '66:30:52')},
{'1': ('kn', '66:30:53')},
{'1': ('kn', '66:30:54')},
{'1': ('kn', '66:30:55')},
{'1': ('kn', '66:30:56')},
{'1': ('kn', '66:30:57')},
{'1': ('kn', '66:30:58')},
{'1': ('kn', '66:30:59')},
{'1': ('kn', '66:30:60')},
{'1': ('kn', '66:30:61')},
{'1': ('kn', '66:30:62')},
{'1': ('kn', '66:30:63')},
{'1': ('kn', '66:30:64')},
{'1': ('kn', '66:30:65')},
{'1': ('kn', '66:30:66')},
{'1': ('kn', '66:30:67')},
{'1': ('kn', '66:30:68')},
{'1': ('kn', '66:30:69')},
{'1': ('kn', '66:30:70')},
{'1': ('kn', '66:30:71')},
{'1': ('kn', '66:30:72')},
{'1': ('kn', '66:30:73')},
{'1': ('kn', '66:30:74')},
{'1': ('kn', '66:30:75')},
{'1': ('kn', '66:30:76')},
{'1': ('kn', '66:30:77')},
{'1': ('kn', '66:30:78')},
{'1': ('kn', '66:30:79')},
{'1': ('kn', '66:30:80')},
{'1': ('kn', '66:30:81')},
{'1': ('kn', '66:30:82')},
{'1': ('kn', '66:30:83')},
{'1': ('kn', '66:30:84')},
{'1': ('kn', '66:30:85')},
{'1': ('kn', '66:30:86')},
{'1': ('kn', '66:30:87')},
{'1': ('kn', '66:30:88')},
{'1': ('kn', '66:30:89')},
{'1': ('kn', '66:30:90')},
{'1': ('kn', '66:30:91')},
{'1': ('kn', '66:30:92')},
{'1': ('kn', '66:30:93')},
{'1': ('kn', '66:30:94')},
{'1': ('kn', '66:30:95')},
{'1': ('kn', '66:30:96')},
{'1': ('kn', '66:30:97')},
{'1': ('kn', '66:30:98')},
{'1': ('kn', '66:30:99')},
{'1': ('kn', '66:31:00')},
{'1': ('kn', '66:31:01')},
{'1': ('kn', '66:31:02')},
{'1': ('kn', '66:31:03')},
{'1': ('kn', '66:31:04')},
{'1': ('kn', '66:31:05')},
{'1': ('kn', '66:31:06')},
{'1': ('kn', '66:31:07')},
{'1': ('kn', '66:31:08')},
{'1': ('kn', '66:31:09')},
{'1': ('kn', '66:31:10')},
{'1': ('kn', '66:31:11')},
{'1': ('kn', '66:31:12')},
{'1': ('kn', '66:31:13')},
{'1': ('kn', '66:31:14')},
{'1': ('kn', '66:31:15')},
{'1': ('kn', '66:31:16')},
{'1': ('kn', '66:31:17')},
{'1': ('kn', '66:31:18')},
{'1': ('kn', '66:31:19')},
{'1': ('kn', '66:31:20')},
{'1': ('kn', '66:31:21')},
{'1': ('kn', '66:31:22')},
{'1': ('kn', '66:31:23')},
{'1': ('kn', '66:31:24')},
{'1': ('kn', '66:31:25')},
{'1': ('kn', '66:31:26')},
{'1': ('kn', '66:31:27')},
{'1': ('kn', '66:31:28')},
{'1': ('kn', '66:31:29')},
{'1': ('kn', '66:31:30')},
{'1': ('kn', '66:31:31')},
{'1': ('kn', '66:31:32')},
{'1': ('kn', '66:31:33')},
{'1': ('kn', '66:31:34')},
{'1': ('kn', '66:31:35')},
{'1': ('kn', '66:31:36')},
{'1': ('kn', '66:31:37')},
{'1': ('kn', '66:31:38')},
{'1': ('kn', '66:31:39')},
{'1': ('kn', '66:31:40')},
{'1': ('kn', '66:31:41')},
{'1': ('kn', '66:31:42')},
{'1': ('kn', '66:31:43')},
{'1': ('kn', '66:31:44')},
{'1': ('kn', '66:31:45')},
{'1': ('kn', '66:31:46')},
{'1': ('kn', '66:31:47')},
{'1': ('kn', '66:31:48')},
{'1': ('kn', '66:31:49')},
{'1': ('kn', '66:31:50')},
{'1': ('kn', '66:31:51')},
{'1': ('kn', '66:31:52')},
{'1': ('kn', '66:31:53')},
{'1': ('kn', '66:31:54')},
{'1': ('kn', '66:31:55')},
{'1': ('kn', '66:31:56')},
{'1': ('kn', '66:31:57')},
{'1': ('kn', '66:31:58')},
{'1': ('kn', '66:31:59')},
{'1': ('kn', '66:31:60')},
{'1': ('kn', '66:31:61')},
{'1': ('kn', '66:31:62')},
{'1': ('kn', '66:31:63')},
{'1': ('kn', '66:31:64')},
{'1': ('kn', '66:31:65')},
{'1': ('kn', '66:31:66')},
{'1': ('kn', '66:31:67')},
{'1': ('kn', '66:31:68')},
{'1': ('kn', '66:31:69')},
{'1': ('kn', '66:31:70')},
{'1': ('kn', '66:31:71')},
{'1': ('kn', '66:31:72')},
{'1': ('kn', '66:31:73')},
{'1': ('kn', '66:31:74')},
{'1': ('kn', '66:31:75')},
{'1': ('kn', '66:31:76')},
{'1': ('kn', '66:31:77')},
{'1': ('kn', '66:31:78')},
{'1': ('kn', '66:31:79')},
{'1': ('kn', '66:31:80')},
{'1': ('kn', '66:31:81')},
{'1': ('kn', '66:31:82')},
{'1': ('kn', '66:31:83')},
{'1': ('kn', '66:31:84')},
{'1': ('kn', '66:31:85')},
{'1': ('kn', '66:31:86')},
{'1': ('kn', '66:31:87')},
{'1': ('kn', '66:31:88')},
{'1': ('kn', '66:31:89')},
{'1': ('kn', '66:31:90')},
{'1': ('kn', '66:31:91')},
{'1': ('kn', '66:31:92')},
{'1': ('kn', '66:31:93')},
{'1': ('kn', '66:31:94')},
{'1': ('kn', '66:31:95')},
{'1': ('kn', '66:31:96')},
{'1': ('kn', '66:31:97')},
{'1': ('kn', '66:31:98')},
{'1': ('kn', '66:31:99')},
{'1': ('kn', '66:32:00')},
{'1': ('kn', '66:32:01')},
{'1': ('kn', '66:32:02')},
{'1': ('kn', '66:32:03')},
{'1': ('kn', '66:32:04')},
{'1': ('kn', '66:32:05')},
{'1': ('kn', '66:32:06')},
{'1': ('kn', '66:32:07')},
{'1': ('kn', '66:32:08')},
{'1': ('kn', '66:32:09')},
{'1': ('kn', '66:32:10')},
{'1': ('kn', '66:32:11')},
{'1': ('kn', '66:32:12')},
{'1': ('kn', '66:32:13')},
{'1': ('kn', '66:32:14')},
{'1': ('kn', '66:32:15')},
{'1': ('kn', '66:32:16')},
{'1': ('kn', '66:32:17')},
{'1': ('kn', '66:32:18')},
{'1': ('kn', '66:32:19')},
{'1': ('kn', '66:32:20')},
{'1': ('kn', '66:32:21')},
{'1': ('kn', '66:32:22')},
{'1': ('kn', '66:32:23')},
{'1': ('kn', '66:32:24')},
{'1': ('kn', '66:32:25')},
{'1': ('kn', '66:32:26')},
{'1': ('kn', '66:32:27')},
{'1': ('kn', '66:32:28')},
{'1': ('kn', '66:32:29')},
{'1': ('kn', '66:32:30')},
{'1': ('kn', '66:32:31')},
{'1': ('kn', '66:32:32')},
{'1': ('kn', '66:32:33')},
{'1': ('kn', '66:32:34')},
{'1': ('kn', '66:32:35')},
{'1': ('kn', '66:32:36')},
{'1': ('kn', '66:32:37')},
{'1': ('kn', '66:32:38')},
{'1': ('kn', '66:32:39')},
{'1': ('kn', '66:32:40')},
{'1': ('kn', '66:32:41')},
{'1': ('kn', '66:32:42')},
{'1': ('kn', '66:32:43')},
{'1': ('kn', '66:32:44')},
{'1': ('kn', '66:32:45')},
{'1': ('kn', '66:32:46')},
{'1': ('kn', '66:32:47')},
{'1': ('kn', '66:32:48')},
{'1': ('kn', '66:32:49')},
{'1': ('kn', '66:32:50')},
{'1': ('kn', '66:32:51')},
{'1': ('kn', '66:32:52')},
{'1': ('kn', '66:32:53')},
{'1': ('kn', '66:32:54')},
{'1': ('kn', '66:32:55')},
{'1': ('kn', '66:32:56')},
{'1': ('kn', '66:32:57')},
{'1': ('kn', '66:32:58')},
{'1': ('kn', '66:32:59')},
{'1': ('kn', '66:32:60')},
{'1': ('kn', '66:32:61')},
{'1': ('kn', '66:32:62')},
{'1': ('kn', '66:32:63')},
{'1': ('kn', '66:32:64')},
{'1': ('kn', '66:32:65')},
{'1': ('kn', '66:32:66')},
{'1': ('kn', '66:32:67')},
{'1': ('kn', '66:32:68')},
{'1': ('kn', '66:32:69')},
{'1': ('kn', '66:32:70')},
{'1': ('kn', '66:32:71')},
{'1': ('kn', '66:32:72')},
{'1': ('kn', '66:32:73')},
{'1': ('kn', '66:32:74')},
{'1': ('kn', '66:32:75')},
{'1': ('kn', '66:32:76')},
{'1': ('kn', '66:32:77')},
{'1': ('kn', '66:32:78')},
{'1': ('kn', '66:32:79')},
{'1': ('kn', '66:32:80')},
{'1': ('kn', '66:32:81')},
{'1': ('kn', '66:32:82')},
{'1': ('kn', '66:32:83')},
{'1': ('kn', '66:32:84')},
{'1': ('kn', '66:32:85')},
{'1': ('kn', '66:32:86')},
{'1': ('kn', '66:32:87')},
{'1': ('kn', '66:32:88')},
{'1': ('kn', '66:32:89')},
{'1': ('kn', '66:32:90')},
{'1': ('kn', '66:32:91')},
{'1': ('kn', '66:32:92')},
{'1': ('kn', '66:32:93')},
{'1': ('kn', '66:32:94')},
{'1': ('kn', '66:32:95')},
{'1': ('kn', '66:32:96')},
{'1': ('kn', '66:32:97')},
{'1': ('kn', '66:32:98')},
{'1': ('kn', '66:32:99')},
{'1': ('kn', '66:33:00')},
{'1': ('kn', '66:33:01')},
{'1': ('kn', '66:33:02')},
{'1': ('kn', '66:33:03')},
{'1': ('kn', '66:33:04')},
{'1': ('kn', '66:33:05')},
{'1': ('kn', '66:33:06')},
{'1': ('kn', '66:33:07')},
{'1': ('kn', '66:33:08')},
{'1': ('kn', '66:33:09')},
{'1': ('kn', '66:33:10')},
{'1': ('kn', '66:33:11')},
{'1': ('kn', '66:33:12')},
{'1': ('kn', '66:33:13')},
{'1': ('kn', '66:33:14')},
{'1': ('kn', '66:33:15')},
{'1': ('kn', '66:33:16')},
{'1': ('kn', '66:33:17')},
{'1': ('kn', '66:33:18')},
{'1': ('kn', '66:33:19')},
{'1': ('kn', '66:33:20')},
{'1': ('kn', '66:33:21')},
{'1': ('kn', '66:33:22')},
{'1': ('kn', '66:33:23')},
{'1': ('kn', '66:33:24')},
{'1': ('kn', '66:33:25')},
{'1': ('kn', '66:33:26')},
{'1': ('kn', '66:33:27')},
{'1': ('kn', '66:33:28')},
{'1': ('kn', '66:33:29')},
{'1': ('kn', '66:33:30')},
{'1': ('kn', '66:33:31')},
{'1': ('kn', '66:33:32')},
{'1': ('kn', '66:33:33')},
{'1': ('kn', '66:33:34')},
{'1': ('kn', '66:33:35')},
{'1': ('kn', '66:33:36')},
{'1': ('kn', '66:33:37')},
{'1': ('kn', '66:33:38')},
{'1': ('kn', '66:33:39')},
{'1': ('kn', '66:33:40')},
{'1': ('kn', '66:33:41')},
{'1': ('kn', '66:33:42')},
{'1': ('kn', '66:33:43')},
{'1': ('kn', '66:33:44')},
{'1': ('kn', '66:33:45')},
{'1': ('kn', '66:33:46')},
{'1': ('kn', '66:33:47')},
{'1': ('kn', '66:33:48')},
{'1': ('kn', '66:33:49')},
{'1': ('kn', '66:33:50')},
{'1': ('kn', '66:33:51')},
{'1': ('kn', '66:33:52')},
{'1': ('kn', '66:33:53')},
{'1': ('kn', '66:33:54')},
{'1': ('kn', '66:33:55')},
{'1': ('kn', '66:33:56')},
{'1': ('kn', '66:33:57')},
{'1': ('kn', '66:33:58')},
{'1': ('kn', '66:33:59')},
{'1': ('kn', '66:33:60')},
{'1': ('kn', '66:33:61')},
{'1': ('kn', '66:33:62')},
{'1': ('kn', '66:33:63')},
{'1': ('kn', '66:33:64')},
{'1': ('kn', '66:33:65')},
{'1': ('kn', '66:33:66')},
{'1': ('kn', '66:33:67')},
{'1': ('kn', '66:33:68')},
{'1': ('kn', '66:33:69')},
{'1': ('kn', '66:33:70')},
{'1': ('kn', '66:33:71')},
{'1': ('kn', '66:33:72')},
{'1': ('kn', '66:33:73')},
{'1': ('kn', '66:33:74')},
{'1': ('kn', '66:33:75')},
{'1': ('kn', '66:33:76')},
{'1': ('kn', '66:33:77')},
{'1': ('kn', '66:33:78')},
{'1': ('kn', '66:33:79')},
{'1': ('kn', '66:33:80')},
{'1': ('kn', '66:33:81')},
{'1': ('kn', '66:33:82')},
{'1': ('kn', '66:33:83')},
{'1': ('kn', '66:33:84')},
{'1': ('kn', '66:33:85')},
{'1': ('kn', '66:33:86')},
{'1': ('kn', '66:33:87')},
{'1': ('kn', '66:33:88')},
{'1': ('kn', '66:33:89')},
{'1': ('kn', '66:33:90')},
{'1': ('kn', '66:33:91')},
{'1': ('kn', '66:33:92')},
{'1': ('kn', '66:33:93')},
{'1': ('kn', '66:33:94')},
{'1': ('kn', '66:33:95')},
{'1': ('kn', '66:33:96')},
{'1': ('kn', '66:33:97')},
{'1': ('kn', '66:33:98')},
{'1': ('kn', '66:33:99')},
{'1': ('kn', '66:34:00')},
{'1': ('kn', '66:34:01')},
{'1': ('kn', '66:34:02')},
{'1': ('kn', '66:34:03')},
{'1': ('kn', '66:34:04')},
{'1': ('kn', '66:34:05')},
{'1': ('kn', '66:34:06')},
{'1': ('kn', '66:34:07')},
{'1': ('kn', '66:34:08')},
{'1': ('kn', '66:34:09')},
{'1': ('kn', '66:34:10')},
{'1': ('kn', '66:34:11')},
{'1': ('kn', '66:34:12')},
{'1': ('kn', '66:34:13')},
{'1': ('kn', '66:34:14')},
{'1': ('kn', '66:34:15')},
{'1': ('kn', '66:34:16')},
{'1': ('kn', '66:34:17')},
{'1': ('kn', '66:34:18')},
{'1': ('kn', '66:34:19')},
{'1': ('kn', '66:34:20')},
{'1': ('kn', '66:34:21')},
{'1': ('kn', '66:34:22')},
{'1': ('kn', '66:34:23')},
{'1': ('kn', '66:34:24')},
{'1': ('kn', '66:34:25')},
{'1': ('kn', '66:34:26')},
{'1': ('kn', '66:34:27')},
{'1': ('kn', '66:34:28')},
{'1': ('kn', '66:34:29')},
{'1': ('kn', '66:34:30')},
{'1': ('kn', '66:34:31')},
{'1': ('kn', '66:34:32')},
{'1': ('kn', '66:34:33')},
{'1': ('kn', '66:34:34')},
{'1': ('kn', '66:34:35')},
{'1': ('kn', '66:34:36')},
{'1': ('kn', '66:34:37')},
{'1': ('kn', '66:34:38')},
{'1': ('kn', '66:34:39')},
{'1': ('kn', '66:34:40')},
{'1': ('kn', '66:34:41')},
{'1': ('kn', '66:34:42')},
{'1': ('kn', '66:34:43')},
{'1': ('kn', '66:34:44')},
{'1': ('kn', '66:34:45')},
{'1': ('kn', '66:34:46')},
{'1': ('kn', '66:34:47')},
{'1': ('kn', '66:34:48')},
{'1': ('kn', '66:34:49')},
{'1': ('kn', '66:34:50')},
{'1': ('kn', '66:34:51')},
{'1': ('kn', '66:34:52')},
{'1': ('kn', '66:34:53')},
{'1': ('kn', '66:34:54')},
{'1': ('kn', '66:34:55')},
{'1': ('kn', '66:34:56')},
{'1': ('kn', '66:34:57')},
{'1': ('kn', '66:34:58')},
{'1': ('kn', '66:34:59')},
{'1': ('kn', '66:34:60')},
{'1': ('kn', '66:34:61')},
{'1': ('kn', '66:34:62')},
{'1': ('kn', '66:34:63')},
{'1': ('kn', '66:34:64')},
{'1': ('kn', '66:34:65')},
{'1': ('kn', '66:34:66')},
{'1': ('kn', '66:34:67')},
{'1': ('kn', '66:34:68')},
{'1': ('kn', '66:34:69')},
{'1': ('kn', '66:34:70')},
{'1': ('kn', '66:34:71')},
{'1': ('kn', '66:34:72')},
{'1': ('kn', '66:34:73')},
{'1': ('kn', '66:34:74')},
{'1': ('kn', '66:34:75')},
{'1': ('kn', '66:34:76')},
{'1': ('kn', '66:34:77')},
{'1': ('kn', '66:34:78')},
{'1': ('kn', '66:34:79')},
{'1': ('kn', '66:34:80')},
{'1': ('kn', '66:34:81')},
{'1': ('kn', '66:34:82')},
{'1': ('kn', '66:34:83')},
{'1': ('kn', '66:34:84')},
{'1': ('kn', '66:34:85')},
{'1': ('kn', '66:34:86')},
{'1': ('kn', '66:34:87')},
{'1': ('kn', '66:34:88')},
{'1': ('kn', '66:34:89')},
{'1': ('kn', '66:34:90')},
{'1': ('kn', '66:34:91')},
{'1': ('kn', '66:34:92')},
{'1': ('kn', '66:34:93')},
{'1': ('kn', '66:34:94')},
{'1': ('kn', '66:34:95')},
{'1': ('kn', '66:34:96')},
{'1': ('kn', '66:34:97')},
{'1': ('kn', '66:34:98')},
{'1': ('kn', '66:34:99')},
{'1': ('kn', '66:35:00')},
{'1': ('kn', '66:35:01')},
{'1': ('kn', '66:35:02')},
{'1': ('kn', '66:35:03')},
{'1': ('kn', '66:35:04')},
{'1': ('kn', '66:35:05')},
{'1': ('kn', '66:35:06')},
{'1': ('kn', '66:35:07')},
{'1': ('kn', '66:35:08')},
{'1': ('kn', '66:35:09')},
{'1': ('kn', '66:35:10')},
{'1': ('kn', '66:35:11')},
{'1': ('kn', '66:35:12')},
{'1': ('kn', '66:35:13')},
{'1': ('kn', '66:35:14')},
{'1': ('kn', '66:35:15')},
{'1': ('kn', '66:35:16')},
{'1': ('kn', '66:35:17')},
{'1': ('kn', '66:35:18')},
{'1': ('kn', '66:35:19')},
{'1': ('kn', '66:35:20')},
{'1': ('kn', '66:35:21')},
{'1': ('kn', '66:35:22')},
{'1': ('kn', '66:35:23')},
{'1': ('kn', '66:35:24')},
{'1': ('kn', '66:35:25')},
{'1': ('kn', '66:35:26')},
{'1': ('kn', '66:35:27')},
{'1': ('kn', '66:35:28')},
{'1': ('kn', '66:35:29')},
{'1': ('kn', '66:35:30')},
{'1': ('kn', '66:35:31')},
{'1': ('kn', '66:35:32')},
{'1': ('kn', '66:35:33')},
{'1': ('kn', '66:35:34')},
{'1': ('kn', '66:35:35')},
{'1': ('kn', '66:35:36')},
{'1': ('kn', '66:35:37')},
{'1': ('kn', '66:35:38')},
{'1': ('kn', '66:35:39')},
{'1': ('kn', '66:35:40')},
{'1': ('kn', '66:35:41')},
{'1': ('kn', '66:35:42')},
{'1': ('kn', '66:35:43')},
{'1': ('kn', '66:35:44')},
{'1': ('kn', '66:35:45')},
{'1': ('kn', '66:35:46')},
{'1': ('kn', '66:35:47')},
{'1': ('kn', '66:35:48')},
{'1': ('kn', '66:35:49')},
{'1': ('kn', '66:35:50')},
{'1': ('kn', '66:35:51')},
{'1': ('kn', '66:35:52')},
{'1': ('kn', '66:35:53')},
{'1': ('kn', '66:35:54')},
{'1': ('kn', '66:35:55')},
{'1': ('kn', '66:35:56')},
{'1': ('kn', '66:35:57')},
{'1': ('kn', '66:35:58')},
{'1': ('kn', '66:35:59')},
{'1': ('kn', '66:35:60')},
{'1': ('kn', '66:35:61')},
{'1': ('kn', '66:35:62')},
{'1': ('kn', '66:35:63')},
{'1': ('kn', '66:35:64')},
{'1': ('kn', '66:35:65')},
{'1': ('kn', '66:35:66')},
{'1': ('kn', '66:35:67')},
{'1': ('kn', '66:35:68')},
{'1': ('kn', '66:35:69')},
{'1': ('kn', '66:35:70')},
{'1': ('kn', '66:35:71')},
{'1': ('kn', '66:35:72')},
{'1': ('kn', '66:35:73')},
{'1': ('kn', '66:35:74')},
{'1': ('kn', '66:35:75')},
{'1': ('kn', '66:35:76')},
{'1': ('kn', '66:35:77')},
{'1': ('kn', '66:35:78')},
{'1': ('kn', '66:35:79')},
{'1': ('kn', '66:35:80')},
{'1': ('kn', '66:35:81')},
{'1': ('kn', '66:35:82')},
{'1': ('kn', '66:35:83')},
{'1': ('kn', '66:35:84')},
{'1': ('kn', '66:35:85')},
{'1': ('kn', '66:35:86')},
{'1': ('kn', '66:35:87')},
{'1': ('kn', '66:35:88')},
{'1': ('kn', '66:35:89')},
{'1': ('kn', '66:35:90')},
{'1': ('kn', '66:35:91')},
{'1': ('kn', '66:35:92')},
{'1': ('kn', '66:35:93')},
{'1': ('kn', '66:35:94')},
{'1': ('kn', '66:35:95')},
{'1': ('kn', '66:35:96')},
{'1': ('kn', '66:35:97')},
{'1': ('kn', '66:35:98')},
{'1': ('kn', '66:35:99')},
{'1': ('kn', '66:36:00')},
{'1': ('kn', '66:36:01')},
{'1': ('kn', '66:36:02')},
{'1': ('kn', '66:36:03')},
{'1': ('kn', '66:36:04')},
{'1': ('kn', '66:36:05')},
{'1': ('kn', '66:36:06')},
{'1': ('kn', '66:36:07')},
{'1': ('kn', '66:36:08')},
{'1': ('kn', '66:36:09')},
{'1': ('kn', '66:36:10')},
{'1': ('kn', '66:36:11')},
{'1': ('kn', '66:36:12')},
{'1': ('kn', '66:36:13')},
{'1': ('kn', '66:36:14')},
{'1': ('kn', '66:36:15')},
{'1': ('kn', '66:36:16')},
{'1': ('kn', '66:36:17')},
{'1': ('kn', '66:36:18')},
{'1': ('kn', '66:36:19')},
{'1': ('kn', '66:36:20')},
{'1': ('kn', '66:36:21')},
{'1': ('kn', '66:36:22')},
{'1': ('kn', '66:36:23')},
{'1': ('kn', '66:36:24')},
{'1': ('kn', '66:36:25')},
{'1': ('kn', '66:36:26')},
{'1': ('kn', '66:36:27')},
{'1': ('kn', '66:36:28')},
{'1': ('kn', '66:36:29')},
{'1': ('kn', '66:36:30')},
{'1': ('kn', '66:36:31')},
{'1': ('kn', '66:36:32')},
{'1': ('kn', '66:36:33')},
{'1': ('kn', '66:36:34')},
{'1': ('kn', '66:36:35')},
{'1': ('kn', '66:36:36')},
{'1': ('kn', '66:36:37')},
{'1': ('kn', '66:36:38')},
{'1': ('kn', '66:36:39')},
{'1': ('kn', '66:36:40')},
{'1': ('kn', '66:36:41')},
{'1': ('kn', '66:36:42')},
{'1': ('kn', '66:36:43')},
{'1': ('kn', '66:36:44')},
{'1': ('kn', '66:36:45')},
{'1': ('kn', '66:36:46')},
{'1': ('kn', '66:36:47')},
{'1': ('kn', '66:36:48')},
{'1': ('kn', '66:36:49')},
{'1': ('kn', '66:36:50')},
{'1': ('kn', '66:36:51')},
{'1': ('kn', '66:36:52')},
{'1': ('kn', '66:36:53')},
{'1': ('kn', '66:36:54')},
{'1': ('kn', '66:36:55')},
{'1': ('kn', '66:36:56')},
{'1': ('kn', '66:36:57')},
{'1': ('kn', '66:36:58')},
{'1': ('kn', '66:36:59')},
{'1': ('kn', '66:36:60')},
{'1': ('kn', '66:36:61')},
{'1': ('kn', '66:36:62')},
{'1': ('kn', '66:36:63')},
{'1': ('kn', '66:36:64')},
{'1': ('kn', '66:36:65')},
{'1': ('kn', '66:36:66')},
{'1': ('kn', '66:36:67')},
{'1': ('kn', '66:36:68')},
{'1': ('kn', '66:36:69')},
{'1': ('kn', '66:36:70')},
{'1': ('kn', '66:36:71')},
{'1': ('kn', '66:36:72')},
{'1': ('kn', '66:36:73')},
{'1': ('kn', '66:36:74')},
{'1': ('kn', '66:36:75')},
{'1': ('kn', '66:36:76')},
{'1': ('kn', '66:36:77')},
{'1': ('kn', '66:36:78')},
{'1': ('kn', '66:36:79')},
{'1': ('kn', '66:36:80')},
{'1': ('kn', '66:36:81')},
{'1': ('kn', '66:36:82')},
{'1': ('kn', '66:36:83')},
{'1': ('kn', '66:36:84')},
{'1': ('kn', '66:36:85')},
{'1': ('kn', '66:36:86')},
{'1': ('kn', '66:36:87')},
{'1': ('kn', '66:36:88')},
{'1': ('kn', '66:36:89')},
{'1': ('kn', '66:36:90')},
{'1': ('kn', '66:36:91')},
{'1': ('kn', '66:36:92')},
{'1': ('kn', '66:36:93')},
{'1': ('kn', '66:36:94')},
{'1': ('kn', '66:36:95')},
{'1': ('kn', '66:36:96')},
{'1': ('kn', '66:36:97')},
{'1': ('kn', '66:36:98')},
{'1': ('kn', '66:36:99')},
{'1': ('kn', '66:37:00')},
{'1': ('kn', '66:37:01')},
{'1': ('kn', '66:37:02')},
{'1': ('kn', '66:37:03')},
{'1': ('kn', '66:37:04')},
{'1': ('kn', '66:37:05')},
{'1': ('kn', '66:37:06')},
{'1': ('kn', '66:37:07')},
{'1': ('kn', '66:37:08')},
{'1': ('kn', '66:37:09')},
{'1': ('kn', '66:37:10')},
{'1': ('kn', '66:37:11')},
{'1': ('kn', '66:37:12')},
{'1': ('kn', '66:37:13')},
{'1': ('kn', '66:37:14')},
{'1': ('kn', '66:37:15')},
{'1': ('kn', '66:37:16')},
{'1': ('kn', '66:37:17')},
{'1': ('kn', '66:37:18')},
{'1': ('kn', '66:37:19')},
{'1': ('kn', '66:37:20')},
{'1': ('kn', '66:37:21')},
{'1': ('kn', '66:37:22')},
{'1': ('kn', '66:37:23')},
{'1': ('kn', '66:37:24')},
{'1': ('kn', '66:37:25')},
{'1': ('kn', '66:37:26')},
{'1': ('kn', '66:37:27')},
{'1': ('kn', '66:37:28')},
{'1': ('kn', '66:37:29')},
{'1': ('kn', '66:37:30')},
{'1': ('kn', '66:37:31')},
{'1': ('kn', '66:37:32')},
{'1': ('kn', '66:37:33')},
{'1': ('kn', '66:37:34')},
{'1': ('kn', '66:37:35')},
{'1': ('kn', '66:37:36')},
{'1': ('kn', '66:37:37')},
{'1': ('kn', '66:37:38')},
{'1': ('kn', '66:37:39')},
{'1': ('kn', '66:37:40')},
{'1': ('kn', '66:37:41')},
{'1': ('kn', '66:37:42')},
{'1': ('kn', '66:37:43')},
{'1': ('kn', '66:37:44')},
{'1': ('kn', '66:37:45')},
{'1': ('kn', '66:37:46')},
{'1': ('kn', '66:37:47')},
{'1': ('kn', '66:37:48')},
{'1': ('kn', '66:37:49')},
{'1': ('kn', '66:37:50')},
{'1': ('kn', '66:37:51')},
{'1': ('kn', '66:37:52')},
{'1': ('kn', '66:37:53')},
{'1': ('kn', '66:37:54')},
{'1': ('kn', '66:37:55')},
{'1': ('kn', '66:37:56')},
{'1': ('kn', '66:37:57')},
{'1': ('kn', '66:37:58')},
{'1': ('kn', '66:37:59')},
{'1': ('kn', '66:37:60')},
{'1': ('kn', '66:37:61')},
{'1': ('kn', '66:37:62')},
{'1': ('kn', '66:37:63')},
{'1': ('kn', '66:37:64')},
{'1': ('kn', '66:37:65')},
{'1': ('kn', '66:37:66')},
{'1': ('kn', '66:37:67')},
{'1': ('kn', '66:37:68')},
{'1': ('kn', '66:37:69')},
{'1': ('kn', '66:37:70')},
{'1': ('kn', '66:37:71')},
{'1': ('kn', '66:37:72')},
{'1': ('kn', '66:37:73')},
{'1': ('kn', '66:37:74')},
{'1': ('kn', '66:37:75')},
{'1': ('kn', '66:37:76')},
{'1': ('kn', '66:37:77')},
{'1': ('kn', '66:37:78')},
{'1': ('kn', '66:37:79')},
{'1': ('kn', '66:37:80')},
{'1': ('kn', '66:37:81')},
{'1': ('kn', '66:37:82')},
{'1': ('kn', '66:37:83')},
{'1': ('kn', '66:37:84')},
{'1': ('kn', '66:37:85')},
{'1': ('kn', '66:37:86')},
{'1': ('kn', '66:37:87')},
{'1': ('kn', '66:37:88')},
{'1': ('kn', '66:37:89')},
{'1': ('kn', '66:37:90')},
{'1': ('kn', '66:37:91')},
{'1': ('kn', '66:37:92')},
{'1': ('kn', '66:37:93')},
{'1': ('kn', '66:37:94')},
{'1': ('kn', '66:37:95')},
{'1': ('kn', '66:37:96')},
{'1': ('kn', '66:37:97')},
{'1': ('kn', '66:37:98')},
{'1': ('kn', '66:37:99')},
{'1': ('kn', '66:38:00')},
{'1': ('kn', '66:38:01')},
{'1': ('kn', '66:38:02')},
{'1': ('kn', '66:38:03')},
{'1': ('kn', '66:38:04')},
{'1': ('kn', '66:38:05')},
{'1': ('kn', '66:38:06')},
{'1': ('kn', '66:38:07')},
{'1': ('kn', '66:38:08')},
{'1': ('kn', '66:38:09')},
{'1': ('kn', '66:38:10')},
{'1': ('kn', '66:38:11')},
{'1': ('kn', '66:38:12')},
{'1': ('kn', '66:38:13')},
{'1': ('kn', '66:38:14')},
{'1': ('kn', '66:38:15')},
{'1': ('kn', '66:38:16')},
{'1': ('kn', '66:38:17')},
{'1': ('kn', '66:38:18')},
{'1': ('kn', '66:38:19')},
{'1': ('kn', '66:38:20')},
{'1': ('kn', '66:38:21')},
{'1': ('kn', '66:38:22')},
{'1': ('kn', '66:38:23')},
{'1': ('kn', '66:38:24')},
{'1': ('kn', '66:38:25')},
{'1': ('kn', '66:38:26')},
{'1': ('kn', '66:38:27')},
{'1': ('kn', '66:38:28')},
{'1': ('kn', '66:38:29')},
{'1': ('kn', '66:38:30')},
{'1': ('kn', '66:38:31')},
{'1': ('kn', '66:38:32')},
{'1': ('kn', '66:38:33')},
{'1': ('kn', '66:38:34')},
{'1': ('kn', '66:38:35')},
{'1': ('kn', '66:38:36')},
{'1': ('kn', '66:38:37')},
{'1': ('kn', '66:38:38')},
{'1': ('kn', '66:38:39')},
{'1': ('kn', '66:38:40')},
{'1': ('kn', '66:38:41')},
{'1': ('kn', '66:38:42')},
{'1': ('kn', '66:38:43')},
{'1': ('kn', '66:38:44')},
{'1': ('kn', '66:38:45')},
{'1': ('kn', '66:38:46')},
{'1': ('kn', '66:38:47')},
{'1': ('kn', '66:38:48')},
{'1': ('kn', '66:38:49')},
{'1': ('kn', '66:38:50')},
{'1': ('kn', '66:38:51')},
{'1': ('kn', '66:38:52')},
{'1': ('kn', '66:38:53')},
{'1': ('kn', '66:38:54')},
{'1': ('kn', '66:38:55')},
{'1': ('kn', '66:38:56')},
{'1': ('kn', '66:38:57')},
{'1': ('kn', '66:38:58')},
{'1': ('kn', '66:38:59')},
{'1': ('kn', '66:38:60')},
{'1': ('kn', '66:38:61')},
{'1': ('kn', '66:38:62')},
{'1': ('kn', '66:38:63')},
{'1': ('kn', '66:38:64')},
{'1': ('kn', '66:38:65')},
{'1': ('kn', '66:38:66')},
{'1': ('kn', '66:38:67')},
{'1': ('kn', '66:38:68')},
{'1': ('kn', '66:38:69')},
{'1': ('kn', '66:38:70')},
{'1': ('kn', '66:38:71')},
{'1': ('kn', '66:38:72')},
{'1': ('kn', '66:38:73')},
{'1': ('kn', '66:38:74')},
{'1': ('kn', '66:38:75')},
{'1': ('kn', '66:38:76')},
{'1': ('kn', '66:38:77')},
{'1': ('kn', '66:38:78')},
{'1': ('kn', '66:38:79')},
{'1': ('kn', '66:38:80')},
{'1': ('kn', '66:38:81')},
{'1': ('kn', '66:38:82')},
{'1': ('kn', '66:38:83')},
{'1': ('kn', '66:38:84')},
{'1': ('kn', '66:38:85')},
{'1': ('kn', '66:38:86')},
{'1': ('kn', '66:38:87')},
{'1': ('kn', '66:38:88')},
{'1': ('kn', '66:38:89')},
{'1': ('kn', '66:38:90')},
{'1': ('kn', '66:38:91')},
{'1': ('kn', '66:38:92')},
{'1': ('kn', '66:38:93')},
{'1': ('kn', '66:38:94')},
{'1': ('kn', '66:38:95')},
{'1': ('kn', '66:38:96')},
{'1': ('kn', '66:38:97')},
{'1': ('kn', '66:38:98')},
{'1': ('kn', '66:38:99')},
{'1': ('kn', '66:39:00')},
{'1': ('kn', '66:39:01')},
{'1': ('kn', '66:39:02')},
{'1': ('kn', '66:39:03')},
{'1': ('kn', '66:39:04')},
{'1': ('kn', '66:39:05')},
{'1': ('kn', '66:39:06')},
{'1': ('kn', '66:39:07')},
{'1': ('kn', '66:39:08')},
{'1': ('kn', '66:39:09')},
{'1': ('kn', '66:39:10')},
{'1': ('kn', '66:39:11')},
{'1': ('kn', '66:39:12')},
{'1': ('kn', '66:39:13')},
{'1': ('kn', '66:39:14')},
{'1': ('kn', '66:39:15')},
{'1': ('kn', '66:39:16')},
{'1': ('kn', '66:39:17')},
{'1': ('kn', '66:39:18')},
{'1': ('kn', '66:39:19')},
{'1': ('kn', '66:39:20')},
{'1': ('kn', '66:39:21')},
{'1': ('kn', '66:39:22')},
{'1': ('kn', '66:39:23')},
{'1': ('kn', '66:39:24')},
{'1': ('kn', '66:39:25')},
{'1': ('kn', '66:39:26')},
{'1': ('kn', '66:39:27')},
{'1': ('kn', '66:39:28')},
{'1': ('kn', '66:39:29')},
{'1': ('kn', '66:39:30')},
{'1': ('kn', '66:39:31')},
{'1': ('kn', '66:39:32')},
{'1': ('kn', '66:39:33')},
{'1': ('kn', '66:39:34')},
{'1': ('kn', '66:39:35')},
{'1': ('kn', '66:39:36')},
{'1': ('kn', '66:39:37')},
{'1': ('kn', '66:39:38')},
{'1': ('kn', '66:39:39')},
{'1': ('kn', '66:39:40')},
{'1': ('kn', '66:39:41')},
{'1': ('kn', '66:39:42')},
{'1': ('kn', '66:39:43')},
{'1': ('kn', '66:39:44')},
{'1': ('kn', '66:39:45')},
{'1': ('kn', '66:39:46')},
{'1': ('kn', '66:39:47')},
{'1': ('kn', '66:39:48')},
{'1': ('kn', '66:39:49')},
{'1': ('kn', '66:39:50')},
{'1': ('kn', '66:39:51')},
{'1': ('kn', '66:39:52')},
{'1': ('kn', '66:39:53')},
{'1': ('kn', '66:39:54')},
{'1': ('kn', '66:39:55')},
{'1': ('kn', '66:39:56')},
{'1': ('kn', '66:39:57')},
{'1': ('kn', '66:39:58')},
{'1': ('kn', '66:39:59')},
{'1': ('kn', '66:39:60')},
{'1': ('kn', '66:39:61')},
{'1': ('kn', '66:39:62')},
{'1': ('kn', '66:39:63')},
{'1': ('kn', '66:39:64')},
{'1': ('kn', '66:39:65')},
{'1': ('kn', '66:39:66')},
{'1': ('kn', '66:39:67')},
{'1': ('kn', '66:39:68')},
{'1': ('kn', '66:39:69')},
{'1': ('kn', '66:39:70')},
{'1': ('kn', '66:39:71')},
{'1': ('kn', '66:39:72')},
{'1': ('kn', '66:39:73')},
{'1': ('kn', '66:39:74')},
{'1': ('kn', '66:39:75')},
{'1': ('kn', '66:39:76')},
{'1': ('kn', '66:39:77')},
{'1': ('kn', '66:39:78')},
{'1': ('kn', '66:39:79')},
{'1': ('kn', '66:39:80')},
{'1': ('kn', '66:39:81')},
{'1': ('kn', '66:39:82')},
{'1': ('kn', '66:39:83')},
{'1': ('kn', '66:39:84')},
{'1': ('kn', '66:39:85')},
{'1': ('kn', '66:39:86')},
{'1': ('kn', '66:39:87')},
{'1': ('kn', '66:39:88')},
{'1': ('kn', '66:39:89')},
{'1': ('kn', '66:39:90')},
{'1': ('kn', '66:39:91')},
{'1': ('kn', '66:39:92')},
{'1': ('kn', '66:39:93')},
{'1': ('kn', '66:39:94')},
{'1': ('kn', '66:39:95')},
{'1': ('kn', '66:39:96')},
{'1': ('kn', '66:39:97')},
{'1': ('kn', '66:39:98')},
{'1': ('kn', '66:39:99')},
{'1': ('kn', '66:40:00')},
{'1': ('kn', '66:40:01')},
{'1': ('kn', '66:40:02')},
{'1': ('kn', '66:40:03')},
{'1': ('kn', '66:40:04')},
{'1': ('kn', '66:40:05')},
{'1': ('kn', '66:40:06')},
{'1': ('kn', '66:40:07')},
{'1': ('kn', '66:40:08')},
{'1': ('kn', '66:40:09')},
{'1': ('kn', '66:40:10')},
{'1': ('kn', '66:40:11')},
{'1': ('kn', '66:40:12')},
{'1': ('kn', '66:40:13')},
{'1': ('kn', '66:40:14')},
{'1': ('kn', '66:40:15')},
{'1': ('kn', '66:40:16')},
{'1': ('kn', '66:40:17')},
{'1': ('kn', '66:40:18')},
{'1': ('kn', '66:40:19')},
{'1': ('kn', '66:40:20')},
{'1': ('kn', '66:40:21')},
{'1': ('kn', '66:40:22')},
{'1': ('kn', '66:40:23')},
{'1': ('kn', '66:40:24')},
{'1': ('kn', '66:40:25')},
{'1': ('kn', '66:40:26')},
{'1': ('kn', '66:40:27')},
{'1': ('kn', '66:40:28')},
{'1': ('kn', '66:40:29')},
{'1': ('kn', '66:40:30')},
{'1': ('kn', '66:40:31')},
{'1': ('kn', '66:40:32')},
{'1': ('kn', '66:40:33')},
{'1': ('kn', '66:40:34')},
{'1': ('kn', '66:40:35')},
{'1': ('kn', '66:40:36')},
{'1': ('kn', '66:40:37')},
{'1': ('kn', '66:40:38')},
{'1': ('kn', '66:40:39')},
{'1': ('kn', '66:40:40')},
{'1': ('kn', '66:40:41')},
{'1': ('kn', '66:40:42')},
{'1': ('kn', '66:40:43')},
{'1': ('kn', '66:40:44')},
{'1': ('kn', '66:40:45')},
{'1': ('kn', '66:40:46')},
{'1': ('kn', '66:40:47')},
{'1': ('kn', '66:40:48')},
{'1': ('kn', '66:40:49')},
{'1': ('kn', '66:40:50')},
{'1': ('kn', '66:40:51')},
{'1': ('kn', '66:40:52')},
{'1': ('kn', '66:40:53')},
{'1': ('kn', '66:40:54')},
{'1': ('kn', '66:40:55')},
{'1': ('kn', '66:40:56')},
{'1': ('kn', '66:40:57')},
{'1': ('kn', '66:40:58')},
{'1': ('kn', '66:40:59')},
{'1': ('kn', '66:40:60')},
{'1': ('kn', '66:40:61')},
{'1': ('kn', '66:40:62')},
{'1': ('kn', '66:40:63')},
{'1': ('kn', '66:40:64')},
{'1': ('kn', '66:40:65')},
{'1': ('kn', '66:40:66')},
{'1': ('kn', '66:40:67')},
{'1': ('kn', '66:40:68')},
{'1': ('kn', '66:40:69')},
{'1': ('kn', '66:40:70')},
{'1': ('kn', '66:40:71')},
{'1': ('kn', '66:40:72')},
{'1': ('kn', '66:40:73')},
{'1': ('kn', '66:40:74')},
{'1': ('kn', '66:40:75')},
{'1': ('kn', '66:40:76')},
{'1': ('kn', '66:40:77')},
{'1': ('kn', '66:40:78')},
{'1': ('kn', '66:40:79')},
{'1': ('kn', '66:40:80')},
{'1': ('kn', '66:40:81')},
{'1': ('kn', '66:40:82')},
{'1': ('kn', '66:40:83')},
{'1': ('kn', '66:40:84')},
{'1': ('kn', '66:40:85')},
{'1': ('kn', '66:40:86')},
{'1': ('kn', '66:40:87')},
{'1': ('kn', '66:40:88')},
{'1': ('kn', '66:40:89')},
{'1': ('kn', '66:40:90')},
{'1': ('kn', '66:40:91')},
{'1': ('kn', '66:40:92')},
{'1': ('kn', '66:40:93')},
{'1': ('kn', '66:40:94')},
{'1': ('kn', '66:40:95')},
{'1': ('kn', '66:40:96')},
{'1': ('kn', '66:40:97')},
{'1': ('kn', '66:40:98')},
{'1': ('kn', '66:40:99')},
{'1': ('kn', '66:41:00')},
{'1': ('kn', '66:41:01')},
{'1': ('kn', '66:41:02')},
{'1': ('kn', '66:41:03')},
{'1': ('kn', '66:41:04')},
{'1': ('kn', '66:41:05')},
{'1': ('kn', '66:41:06')},
{'1': ('kn', '66:41:07')},
{'1': ('kn', '66:41:08')},
{'1': ('kn', '66:41:09')},
{'1': ('kn', '66:41:10')},
{'1': ('kn', '66:41:11')},
{'1': ('kn', '66:41:12')},
{'1': ('kn', '66:41:13')},
{'1': ('kn', '66:41:14')},
{'1': ('kn', '66:41:15')},
{'1': ('kn', '66:41:16')},
{'1': ('kn', '66:41:17')},
{'1': ('kn', '66:41:18')},
{'1': ('kn', '66:41:19')},
{'1': ('kn', '66:41:20')},
{'1': ('kn', '66:41:21')},
{'1': ('kn', '66:41:22')},
{'1': ('kn', '66:41:23')},
{'1': ('kn', '66:41:24')},
{'1': ('kn', '66:41:25')},
{'1': ('kn', '66:41:26')},
{'1': ('kn', '66:41:27')},
{'1': ('kn', '66:41:28')},
{'1': ('kn', '66:41:29')},
{'1': ('kn', '66:41:30')},
{'1': ('kn', '66:41:31')},
{'1': ('kn', '66:41:32')},
{'1': ('kn', '66:41:33')},
{'1': ('kn', '66:41:34')},
{'1': ('kn', '66:41:35')},
{'1': ('kn', '66:41:36')},
{'1': ('kn', '66:41:37')},
{'1': ('kn', '66:41:38')},
{'1': ('kn', '66:41:39')},
{'1': ('kn', '66:41:40')},
{'1': ('kn', '66:41:41')},
{'1': ('kn', '66:41:42')},
{'1': ('kn', '66:41:43')},
{'1': ('kn', '66:41:44')},
{'1': ('kn', '66:41:45')},
{'1': ('kn', '66:41:46')},
{'1': ('kn', '66:41:47')},
{'1': ('kn', '66:41:48')},
{'1': ('kn', '66:41:49')},
{'1': ('kn', '66:41:50')},
{'1': ('kn', '66:41:51')},
{'1': ('kn', '66:41:52')},
{'1': ('kn', '66:41:53')},
{'1': ('kn', '66:41:54')},
{'1': ('kn', '66:41:55')},
{'1': ('kn', '66:41:56')},
{'1': ('kn', '66:41:57')},
{'1': ('kn', '66:41:58')},
{'1': ('kn', '66:41:59')},
{'1': ('kn', '66:41:60')},
{'1': ('kn', '66:41:61')},
{'1': ('kn', '66:41:62')},
{'1': ('kn', '66:41:63')},
{'1': ('kn', '66:41:64')},
{'1': ('kn', '66:41:65')},
{'1': ('kn', '66:41:66')},
{'1': ('kn', '66:41:67')},
{'1': ('kn', '66:41:68')},
{'1': ('kn', '66:41:69')},
{'1': ('kn', '66:41:70')},
{'1': ('kn', '66:41:71')},
{'1': ('kn', '66:41:72')},
{'1': ('kn', '66:41:73')},
{'1': ('kn', '66:41:74')},
{'1': ('kn', '66:41:75')},
{'1': ('kn', '66:41:76')},
{'1': ('kn', '66:41:77')},
{'1': ('kn', '66:41:78')},
{'1': ('kn', '66:41:79')},
{'1': ('kn', '66:41:80')},
{'1': ('kn', '66:41:81')},
{'1': ('kn', '66:41:82')},
{'1': ('kn', '66:41:83')},
{'1': ('kn', '66:41:84')},
{'1': ('kn', '66:41:85')},
{'1': ('kn', '66:41:86')},
{'1': ('kn', '66:41:87')},
{'1': ('kn', '66:41:88')},
{'1': ('kn', '66:41:89')},
{'1': ('kn', '66:41:90')},
{'1': ('kn', '66:41:91')},
{'1': ('kn', '66:41:92')},
{'1': ('kn', '66:41:93')},
{'1': ('kn', '66:41:94')},
{'1': ('kn', '66:41:95')},
{'1': ('kn', '66:41:96')},
{'1': ('kn', '66:41:97')},
{'1': ('kn', '66:41:98')},
{'1': ('kn', '66:41:99')},
{'1': ('kn', '66:42:00')},
{'1': ('kn', '66:42:01')},
{'1': ('kn', '66:42:02')},
{'1': ('kn', '66:42:03')},
{'1': ('kn', '66:42:04')},
{'1': ('kn', '66:42:05')},
{'1': ('kn', '66:42:06')},
{'1': ('kn', '66:42:07')},
{'1': ('kn', '66:42:08')},
{'1': ('kn', '66:42:09')},
{'1': ('kn', '66:42:10')},
{'1': ('kn', '66:42:11')},
{'1': ('kn', '66:42:12')},
{'1': ('kn', '66:42:13')},
{'1': ('kn', '66:42:14')},
{'1': ('kn', '66:42:15')},
{'1': ('kn', '66:42:16')},
{'1': ('kn', '66:42:17')},
{'1': ('kn', '66:42:18')},
{'1': ('kn', '66:42:19')},
{'1': ('kn', '66:42:20')},
{'1': ('kn', '66:42:21')},
{'1': ('kn', '66:42:22')},
{'1': ('kn', '66:42:23')},
{'1': ('kn', '66:42:24')},
{'1': ('kn', '66:42:25')},
{'1': ('kn', '66:42:26')},
{'1': ('kn', '66:42:27')},
{'1': ('kn', '66:42:28')},
{'1': ('kn', '66:42:29')},
{'1': ('kn', '66:42:30')},
{'1': ('kn', '66:42:31')},
{'1': ('kn', '66:42:32')},
{'1': ('kn', '66:42:33')},
{'1': ('kn', '66:42:34')},
{'1': ('kn', '66:42:35')},
{'1': ('kn', '66:42:36')},
{'1': ('kn', '66:42:37')},
{'1': ('kn', '66:42:38')},
{'1': ('kn', '66:42:39')},
{'1': ('kn', '66:42:40')},
{'1': ('kn', '66:42:41')},
{'1': ('kn', '66:42:42')},
{'1': ('kn', '66:42:43')},
{'1': ('kn', '66:42:44')},
{'1': ('kn', '66:42:45')},
{'1': ('kn', '66:42:46')},
{'1': ('kn', '66:42:47')},
{'1': ('kn', '66:42:48')},
{'1': ('kn', '66:42:49')},
{'1': ('kn', '66:42:50')},
{'1': ('kn', '66:42:51')},
{'1': ('kn', '66:42:52')},
{'1': ('kn', '66:42:53')},
{'1': ('kn', '66:42:54')},
{'1': ('kn', '66:42:55')},
{'1': ('kn', '66:42:56')},
{'1': ('kn', '66:42:57')},
{'1': ('kn', '66:42:58')},
{'1': ('kn', '66:42:59')},
{'1': ('kn', '66:42:60')},
{'1': ('kn', '66:42:61')},
{'1': ('kn', '66:42:62')},
{'1': ('kn', '66:42:63')},
{'1': ('kn', '66:42:64')},
{'1': ('kn', '66:42:65')},
{'1': ('kn', '66:42:66')},
{'1': ('kn', '66:42:67')},
{'1': ('kn', '66:42:68')},
{'1': ('kn', '66:42:69')},
{'1': ('kn', '66:42:70')},
{'1': ('kn', '66:42:71')},
{'1': ('kn', '66:42:72')},
{'1': ('kn', '66:42:73')},
{'1': ('kn', '66:42:74')},
{'1': ('kn', '66:42:75')},
{'1': ('kn', '66:42:76')},
{'1': ('kn', '66:42:77')},
{'1': ('kn', '66:42:78')},
{'1': ('kn', '66:42:79')},
{'1': ('kn', '66:42:80')},
{'1': ('kn', '66:42:81')},
{'1': ('kn', '66:42:82')},
{'1': ('kn', '66:42:83')},
{'1': ('kn', '66:42:84')},
{'1': ('kn', '66:42:85')},
{'1': ('kn', '66:42:86')},
{'1': ('kn', '66:42:87')},
{'1': ('kn', '66:42:88')},
{'1': ('kn', '66:42:89')},
{'1': ('kn', '66:42:90')},
{'1': ('kn', '66:42:91')},
{'1': ('kn', '66:42:92')},
{'1': ('kn', '66:42:93')},
{'1': ('kn', '66:42:94')},
{'1': ('kn', '66:42:95')},
{'1': ('kn', '66:42:96')},
{'1': ('kn', '66:42:97')},
{'1': ('kn', '66:42:98')},
{'1': ('kn', '66:42:99')},
{'1': ('kn', '66:43:00')},
{'1': ('kn', '66:43:01')},
{'1': ('kn', '66:43:02')},
{'1': ('kn', '66:43:03')},
{'1': ('kn', '66:43:04')},
{'1': ('kn', '66:43:05')},
{'1': ('kn', '66:43:06')},
{'1': ('kn', '66:43:07')},
{'1': ('kn', '66:43:08')},
{'1': ('kn', '66:43:09')},
{'1': ('kn', '66:43:10')},
{'1': ('kn', '66:43:11')},
{'1': ('kn', '66:43:12')},
{'1': ('kn', '66:43:13')},
{'1': ('kn', '66:43:14')},
{'1': ('kn', '66:43:15')},
{'1': ('kn', '66:43:16')},
{'1': ('kn', '66:43:17')},
{'1': ('kn', '66:43:18')},
{'1': ('kn', '66:43:19')},
{'1': ('kn', '66:43:20')},
{'1': ('kn', '66:43:21')},
{'1': ('kn', '66:43:22')},
{'1': ('kn', '66:43:23')},
{'1': ('kn', '66:43:24')},
{'1': ('kn', '66:43:25')},
{'1': ('kn', '66:43:26')},
{'1': ('kn', '66:43:27')},
{'1': ('kn', '66:43:28')},
{'1': ('kn', '66:43:29')},
{'1': ('kn', '66:43:30')},
{'1': ('kn', '66:43:31')},
{'1': ('kn', '66:43:32')},
{'1': ('kn', '66:43:33')},
{'1': ('kn', '66:43:34')},
{'1': ('kn', '66:43:35')},
{'1': ('kn', '66:43:36')},
{'1': ('kn', '66:43:37')},
{'1': ('kn', '66:43:38')},
{'1': ('kn', '66:43:39')},
{'1': ('kn', '66:43:40')},
{'1': ('kn', '66:43:41')},
{'1': ('kn', '66:43:42')},
{'1': ('kn', '66:43:43')},
{'1': ('kn', '66:43:44')},
{'1': ('kn', '66:43:45')},
{'1': ('kn', '66:43:46')},
{'1': ('kn', '66:43:47')},
{'1': ('kn', '66:43:48')},
{'1': ('kn', '66:43:49')},
{'1': ('kn', '66:43:50')},
{'1': ('kn', '66:43:51')},
{'1': ('kn', '66:43:52')},
{'1': ('kn', '66:43:53')},
{'1': ('kn', '66:43:54')},
{'1': ('kn', '66:43:55')},
{'1': ('kn', '66:43:56')},
{'1': ('kn', '66:43:57')},
{'1': ('kn', '66:43:58')},
{'1': ('kn', '66:43:59')},
{'1': ('kn', '66:43:60')},
{'1': ('kn', '66:43:61')},
{'1': ('kn', '66:43:62')},
{'1': ('kn', '66:43:63')},
{'1': ('kn', '66:43:64')},
{'1': ('kn', '66:43:65')},
{'1': ('kn', '66:43:66')},
{'1': ('kn', '66:43:67')},
{'1': ('kn', '66:43:68')},
{'1': ('kn', '66:43:69')},
{'1': ('kn', '66:43:70')},
{'1': ('kn', '66:43:71')},
{'1': ('kn', '66:43:72')},
{'1': ('kn', '66:43:73')},
{'1': ('kn', '66:43:74')},
{'1': ('kn', '66:43:75')},
{'1': ('kn', '66:43:76')},
{'1': ('kn', '66:43:77')},
{'1': ('kn', '66:43:78')},
{'1': ('kn', '66:43:79')},
{'1': ('kn', '66:43:80')},
{'1': ('kn', '66:43:81')},
{'1': ('kn', '66:43:82')},
{'1': ('kn', '66:43:83')},
{'1': ('kn', '66:43:84')},
{'1': ('kn', '66:43:85')},
{'1': ('kn', '66:43:86')},
{'1': ('kn', '66:43:87')},
{'1': ('kn', '66:43:88')},
{'1': ('kn', '66:43:89')},
{'1': ('kn', '66:43:90')},
{'1': ('kn', '66:43:91')},
{'1': ('kn', '66:43:92')},
{'1': ('kn', '66:43:93')},
{'1': ('kn', '66:43:94')},
{'1': ('kn', '66:43:95')},
{'1': ('kn', '66:43:96')},
{'1': ('kn', '66:43:97')},
{'1': ('kn', '66:43:98')},
{'1': ('kn', '66:43:99')},
{'1': ('kn', '66:44:00')},
{'1': ('kn', '66:44:01')},
{'1': ('kn', '66:44:02')},
{'1': ('kn', '66:44:03')},
{'1': ('kn', '66:44:04')},
{'1': ('kn', '66:44:05')},
{'1': ('kn', '66:44:06')},
{'1': ('kn', '66:44:07')},
{'1': ('kn', '66:44:08')},
{'1': ('kn', '66:44:09')},
{'1': ('kn', '66:44:10')},
{'1': ('kn', '66:44:11')},
{'1': ('kn', '66:44:12')},
{'1': ('kn', '66:44:13')},
{'1': ('kn', '66:44:14')},
{'1': ('kn', '66:44:15')},
{'1': ('kn', '66:44:16')},
{'1': ('kn', '66:44:17')},
{'1': ('kn', '66:44:18')},
{'1': ('kn', '66:44:19')},
{'1': ('kn', '66:44:20')},
{'1': ('kn', '66:44:21')},
{'1': ('kn', '66:44:22')},
{'1': ('kn', '66:44:23')},
{'1': ('kn', '66:44:24')},
{'1': ('kn', '66:44:25')},
{'1': ('kn', '66:44:26')},
{'1': ('kn', '66:44:27')},
{'1': ('kn', '66:44:28')},
{'1': ('kn', '66:44:29')},
{'1': ('kn', '66:44:30')},
{'1': ('kn', '66:44:31')},
{'1': ('kn', '66:44:32')},
{'1': ('kn', '66:44:33')},
{'1': ('kn', '66:44:34')},
{'1': ('kn', '66:44:35')},
{'1': ('kn', '66:44:36')},
{'1': ('kn', '66:44:37')},
{'1': ('kn', '66:44:38')},
{'1': ('kn', '66:44:39')},
{'1': ('kn', '66:44:40')},
{'1': ('kn', '66:44:41')},
{'1': ('kn', '66:44:42')},
{'1': ('kn', '66:44:43')},
{'1': ('kn', '66:44:44')},
{'1': ('kn', '66:44:45')},
{'1': ('kn', '66:44:46')},
{'1': ('kn', '66:44:47')},
{'1': ('kn', '66:44:48')},
{'1': ('kn', '66:44:49')},
{'1': ('kn', '66:44:50')},
{'1': ('kn', '66:44:51')},
{'1': ('kn', '66:44:52')},
{'1': ('kn', '66:44:53')},
{'1': ('kn', '66:44:54')},
{'1': ('kn', '66:44:55')},
{'1': ('kn', '66:44:56')},
{'1': ('kn', '66:44:57')},
{'1': ('kn', '66:44:58')},
{'1': ('kn', '66:44:59')},
{'1': ('kn', '66:44:60')},
{'1': ('kn', '66:44:61')},
{'1': ('kn', '66:44:62')},
{'1': ('kn', '66:44:63')},
{'1': ('kn', '66:44:64')},
{'1': ('kn', '66:44:65')},
{'1': ('kn', '66:44:66')},
{'1': ('kn', '66:44:67')},
{'1': ('kn', '66:44:68')},
{'1': ('kn', '66:44:69')},
{'1': ('kn', '66:44:70')},
{'1': ('kn', '66:44:71')},
{'1': ('kn', '66:44:72')},
{'1': ('kn', '66:44:73')},
{'1': ('kn', '66:44:74')},
{'1': ('kn', '66:44:75')},
{'1': ('kn', '66:44:76')},
{'1': ('kn', '66:44:77')},
{'1': ('kn', '66:44:78')},
{'1': ('kn', '66:44:79')},
{'1': ('kn', '66:44:80')},
{'1': ('kn', '66:44:81')},
{'1': ('kn', '66:44:82')},
{'1': ('kn', '66:44:83')},
{'1': ('kn', '66:44:84')},
{'1': ('kn', '66:44:85')},
{'1': ('kn', '66:44:86')},
{'1': ('kn', '66:44:87')},
{'1': ('kn', '66:44:88')},
{'1': ('kn', '66:44:89')},
{'1': ('kn', '66:44:90')},
{'1': ('kn', '66:44:91')},
{'1': ('kn', '66:44:92')},
{'1': ('kn', '66:44:93')},
{'1': ('kn', '66:44:94')},
{'1': ('kn', '66:44:95')},
{'1': ('kn', '66:44:96')},
{'1': ('kn', '66:44:97')},
{'1': ('kn', '66:44:98')},
{'1': ('kn', '66:44:99')},
{'1': ('kn', '66:45:00')},
{'1': ('kn', '66:45:01')},
{'1': ('kn', '66:45:02')},
{'1': ('kn', '66:45:03')},
{'1': ('kn', '66:45:04')},
{'1': ('kn', '66:45:05')},
{'1': ('kn', '66:45:06')},
{'1': ('kn', '66:45:07')},
{'1': ('kn', '66:45:08')},
{'1': ('kn', '66:45:09')},
{'1': ('kn', '66:45:10')},
{'1': ('kn', '66:45:11')},
{'1': ('kn', '66:45:12')},
{'1': ('kn', '66:45:13')},
{'1': ('kn', '66:45:14')},
{'1': ('kn', '66:45:15')},
{'1': ('kn', '66:45:16')},
{'1': ('kn', '66:45:17')},
{'1': ('kn', '66:45:18')},
{'1': ('kn', '66:45:19')},
{'1': ('kn', '66:45:20')},
{'1': ('kn', '66:45:21')},
{'1': ('kn', '66:45:22')},
{'1': ('kn', '66:45:23')},
{'1': ('kn', '66:45:24')},
{'1': ('kn', '66:45:25')},
{'1': ('kn', '66:45:26')},
{'1': ('kn', '66:45:27')},
{'1': ('kn', '66:45:28')},
{'1': ('kn', '66:45:29')},
{'1': ('kn', '66:45:30')},
{'1': ('kn', '66:45:31')},
{'1': ('kn', '66:45:32')},
{'1': ('kn', '66:45:33')},
{'1': ('kn', '66:45:34')},
{'1': ('kn', '66:45:35')},
{'1': ('kn', '66:45:36')},
{'1': ('kn', '66:45:37')},
{'1': ('kn', '66:45:38')},
{'1': ('kn', '66:45:39')},
{'1': ('kn', '66:45:40')},
{'1': ('kn', '66:45:41')},
{'1': ('kn', '66:45:42')},
{'1': ('kn', '66:45:43')},
{'1': ('kn', '66:45:44')},
{'1': ('kn', '66:45:45')},
{'1': ('kn', '66:45:46')},
{'1': ('kn', '66:45:47')},
{'1': ('kn', '66:45:48')},
{'1': ('kn', '66:45:49')},
{'1': ('kn', '66:45:50')},
{'1': ('kn', '66:45:51')},
{'1': ('kn', '66:45:52')},
{'1': ('kn', '66:45:53')},
{'1': ('kn', '66:45:54')},
{'1': ('kn', '66:45:55')},
{'1': ('kn', '66:45:56')},
{'1': ('kn', '66:45:57')},
{'1': ('kn', '66:45:58')},
{'1': ('kn', '66:45:59')},
{'1': ('kn', '66:45:60')},
{'1': ('kn', '66:45:61')},
{'1': ('kn', '66:45:62')},
{'1': ('kn', '66:45:63')},
{'1': ('kn', '66:45:64')},
{'1': ('kn', '66:45:65')},
{'1': ('kn', '66:45:66')},
{'1': ('kn', '66:45:67')},
{'1': ('kn', '66:45:68')},
{'1': ('kn', '66:45:69')},
{'1': ('kn', '66:45:70')},
{'1': ('kn', '66:45:71')},
{'1': ('kn', '66:45:72')},
{'1': ('kn', '66:45:73')},
{'1': ('kn', '66:45:74')},
{'1': ('kn', '66:45:75')},
{'1': ('kn', '66:45:76')},
{'1': ('kn', '66:45:77')},
{'1': ('kn', '66:45:78')},
{'1': ('kn', '66:45:79')},
{'1': ('kn', '66:45:80')},
{'1': ('kn', '66:45:81')},
{'1': ('kn', '66:45:82')},
{'1': ('kn', '66:45:83')},
{'1': ('kn', '66:45:84')},
{'1': ('kn', '66:45:85')},
{'1': ('kn', '66:45:86')},
{'1': ('kn', '66:45:87')},
{'1': ('kn', '66:45:88')},
{'1': ('kn', '66:45:89')},
{'1': ('kn', '66:45:90')},
{'1': ('kn', '66:45:91')},
{'1': ('kn', '66:45:92')},
{'1': ('kn', '66:45:93')},
{'1': ('kn', '66:45:94')},
{'1': ('kn', '66:45:95')},
{'1': ('kn', '66:45:96')},
{'1': ('kn', '66:45:97')},
{'1': ('kn', '66:45:98')},
{'1': ('kn', '66:45:99')},
{'1': ('kn', '66:46:00')},
{'1': ('kn', '66:46:01')},
{'1': ('kn', '66:46:02')},
{'1': ('kn', '66:46:03')},
{'1': ('kn', '66:46:04')},
{'1': ('kn', '66:46:05')},
{'1': ('kn', '66:46:06')},
{'1': ('kn', '66:46:07')},
{'1': ('kn', '66:46:08')},
{'1': ('kn', '66:46:09')},
{'1': ('kn', '66:46:10')},
{'1': ('kn', '66:46:11')},
{'1': ('kn', '66:46:12')},
{'1': ('kn', '66:46:13')},
{'1': ('kn', '66:46:14')},
{'1': ('kn', '66:46:15')},
{'1': ('kn', '66:46:16')},
{'1': ('kn', '66:46:17')},
{'1': ('kn', '66:46:18')},
{'1': ('kn', '66:46:19')},
{'1': ('kn', '66:46:20')},
{'1': ('kn', '66:46:21')},
{'1': ('kn', '66:46:22')},
{'1': ('kn', '66:46:23')},
{'1': ('kn', '66:46:24')},
{'1': ('kn', '66:46:25')},
{'1': ('kn', '66:46:26')},
{'1': ('kn', '66:46:27')},
{'1': ('kn', '66:46:28')},
{'1': ('kn', '66:46:29')},
{'1': ('kn', '66:46:30')},
{'1': ('kn', '66:46:31')},
{'1': ('kn', '66:46:32')},
{'1': ('kn', '66:46:33')},
{'1': ('kn', '66:46:34')},
{'1': ('kn', '66:46:35')},
{'1': ('kn', '66:46:36')},
{'1': ('kn', '66:46:37')},
{'1': ('kn', '66:46:38')},
{'1': ('kn', '66:46:39')},
{'1': ('kn', '66:46:40')},
{'1': ('kn', '66:46:41')},
{'1': ('kn', '66:46:42')},
{'1': ('kn', '66:46:43')},
{'1': ('kn', '66:46:44')},
{'1': ('kn', '66:46:45')},
{'1': ('kn', '66:46:46')},
{'1': ('kn', '66:46:47')},
{'1': ('kn', '66:46:48')},
{'1': ('kn', '66:46:49')},
{'1': ('kn', '66:46:50')},
{'1': ('kn', '66:46:51')},
{'1': ('kn', '66:46:52')},
{'1': ('kn', '66:46:53')},
{'1': ('kn', '66:46:54')},
{'1': ('kn', '66:46:55')},
{'1': ('kn', '66:46:56')},
{'1': ('kn', '66:46:57')},
{'1': ('kn', '66:46:58')},
{'1': ('kn', '66:46:59')},
{'1': ('kn', '66:46:60')},
{'1': ('kn', '66:46:61')},
{'1': ('kn', '66:46:62')},
{'1': ('kn', '66:46:63')},
{'1': ('kn', '66:46:64')},
{'1': ('kn', '66:46:65')},
{'1': ('kn', '66:46:66')},
{'1': ('kn', '66:46:67')},
{'1': ('kn', '66:46:68')},
{'1': ('kn', '66:46:69')},
{'1': ('kn', '66:46:70')},
{'1': ('kn', '66:46:71')},
{'1': ('kn', '66:46:72')},
{'1': ('kn', '66:46:73')},
{'1': ('kn', '66:46:74')},
{'1': ('kn', '66:46:75')},
{'1': ('kn', '66:46:76')},
{'1': ('kn', '66:46:77')},
{'1': ('kn', '66:46:78')},
{'1': ('kn', '66:46:79')},
{'1': ('kn', '66:46:80')},
{'1': ('kn', '66:46:81')},
{'1': ('kn', '66:46:82')},
{'1': ('kn', '66:46:83')},
{'1': ('kn', '66:46:84')},
{'1': ('kn', '66:46:85')},
{'1': ('kn', '66:46:86')},
{'1': ('kn', '66:46:87')},
{'1': ('kn', '66:46:88')},
{'1': ('kn', '66:46:89')},
{'1': ('kn', '66:46:90')},
{'1': ('kn', '66:46:91')},
{'1': ('kn', '66:46:92')},
{'1': ('kn', '66:46:93')},
{'1': ('kn', '66:46:94')},
{'1': ('kn', '66:46:95')},
{'1': ('kn', '66:46:96')},
{'1': ('kn', '66:46:97')},
{'1': ('kn', '66:46:98')},
{'1': ('kn', '66:46:99')},
{'1': ('kn', '66:47:00')},
{'1': ('kn', '66:47:01')},
{'1': ('kn', '66:47:02')},
{'1': ('kn', '66:47:03')},
{'1': ('kn', '66:47:04')},
{'1': ('kn', '66:47:05')},
{'1': ('kn', '66:47:06')},
{'1': ('kn', '66:47:07')},
{'1': ('kn', '66:47:08')},
{'1': ('kn', '66:47:09')},
{'1': ('kn', '66:47:10')},
{'1': ('kn', '66:47:11')},
{'1': ('kn', '66:47:12')},
{'1': ('kn', '66:47:13')},
{'1': ('kn', '66:47:14')},
{'1': ('kn', '66:47:15')},
{'1': ('kn', '66:47:16')},
{'1': ('kn', '66:47:17')},
{'1': ('kn', '66:47:18')},
{'1': ('kn', '66:47:19')},
{'1': ('kn', '66:47:20')},
{'1': ('kn', '66:47:21')},
{'1': ('kn', '66:47:22')},
{'1': ('kn', '66:47:23')},
{'1': ('kn', '66:47:24')},
{'1': ('kn', '66:47:25')},
{'1': ('kn', '66:47:26')},
{'1': ('kn', '66:47:27')},
{'1': ('kn', '66:47:28')},
{'1': ('kn', '66:47:29')},
{'1': ('kn', '66:47:30')},
{'1': ('kn', '66:47:31')},
{'1': ('kn', '66:47:32')},
{'1': ('kn', '66:47:33')},
{'1': ('kn', '66:47:34')},
{'1': ('kn', '66:47:35')},
{'1': ('kn', '66:47:36')},
{'1': ('kn', '66:47:37')},
{'1': ('kn', '66:47:38')},
{'1': ('kn', '66:47:39')},
{'1': ('kn', '66:47:40')},
{'1': ('kn', '66:47:41')},
{'1': ('kn', '66:47:42')},
{'1': ('kn', '66:47:43')},
{'1': ('kn', '66:47:44')},
{'1': ('kn', '66:47:45')},
{'1': ('kn', '66:47:46')},
{'1': ('kn', '66:47:47')},
{'1': ('kn', '66:47:48')},
{'1': ('kn', '66:47:49')},
{'1': ('kn', '66:47:50')},
{'1': ('kn', '66:47:51')},
{'1': ('kn', '66:47:52')},
{'1': ('kn', '66:47:53')},
{'1': ('kn', '66:47:54')},
{'1': ('kn', '66:47:55')},
{'1': ('kn', '66:47:56')},
{'1': ('kn', '66:47:57')},
{'1': ('kn', '66:47:58')},
{'1': ('kn', '66:47:59')},
{'1': ('kn', '66:47:60')},
{'1': ('kn', '66:47:61')},
{'1': ('kn', '66:47:62')},
{'1': ('kn', '66:47:63')},
{'1': ('kn', '66:47:64')},
{'1': ('kn', '66:47:65')},
{'1': ('kn', '66:47:66')},
{'1': ('kn', '66:47:67')},
{'1': ('kn', '66:47:68')},
{'1': ('kn', '66:47:69')},
{'1': ('kn', '66:47:70')},
{'1': ('kn', '66:47:71')},
{'1': ('kn', '66:47:72')},
{'1': ('kn', '66:47:73')},
{'1': ('kn', '66:47:74')},
{'1': ('kn', '66:47:75')},
{'1': ('kn', '66:47:76')},
{'1': ('kn', '66:47:77')},
{'1': ('kn', '66:47:78')},
{'1': ('kn', '66:47:79')},
{'1': ('kn', '66:47:80')},
{'1': ('kn', '66:47:81')},
{'1': ('kn', '66:47:82')},
{'1': ('kn', '66:47:83')},
{'1': ('kn', '66:47:84')},
{'1': ('kn', '66:47:85')},
{'1': ('kn', '66:47:86')},
{'1': ('kn', '66:47:87')},
{'1': ('kn', '66:47:88')},
{'1': ('kn', '66:47:89')},
{'1': ('kn', '66:47:90')},
{'1': ('kn', '66:47:91')},
{'1': ('kn', '66:47:92')},
{'1': ('kn', '66:47:93')},
{'1': ('kn', '66:47:94')},
{'1': ('kn', '66:47:95')},
{'1': ('kn', '66:47:96')},
{'1': ('kn', '66:47:97')},
{'1': ('kn', '66:47:98')},
{'1': ('kn', '66:47:99')},
{'1': ('kn', '66:48:00')},
{'1': ('kn', '66:48:01')},
{'1': ('kn', '66:48:02')},
{'1': ('kn', '66:48:03')},
{'1': ('kn', '66:48:04')},
{'1': ('kn', '66:48:05')},
{'1': ('kn', '66:48:06')},
{'1': ('kn', '66:48:07')},
{'1': ('kn', '66:48:08')},
{'1': ('kn', '66:48:09')},
{'1': ('kn', '66:48:10')},
{'1': ('kn', '66:48:11')},
{'1': ('kn', '66:48:12')},
{'1': ('kn', '66:48:13')},
{'1': ('kn', '66:48:14')},
{'1': ('kn', '66:48:15')},
{'1': ('kn', '66:48:16')},
{'1': ('kn', '66:48:17')},
{'1': ('kn', '66:48:18')},
{'1': ('kn', '66:48:19')},
{'1': ('kn', '66:48:20')},
{'1': ('kn', '66:48:21')},
{'1': ('kn', '66:48:22')},
{'1': ('kn', '66:48:23')},
{'1': ('kn', '66:48:24')},
{'1': ('kn', '66:48:25')},
{'1': ('kn', '66:48:26')},
{'1': ('kn', '66:48:27')},
{'1': ('kn', '66:48:28')},
{'1': ('kn', '66:48:29')},
{'1': ('kn', '66:48:30')},
{'1': ('kn', '66:48:31')},
{'1': ('kn', '66:48:32')},
{'1': ('kn', '66:48:33')},
{'1': ('kn', '66:48:34')},
{'1': ('kn', '66:48:35')},
{'1': ('kn', '66:48:36')},
{'1': ('kn', '66:48:37')},
{'1': ('kn', '66:48:38')},
{'1': ('kn', '66:48:39')},
{'1': ('kn', '66:48:40')},
{'1': ('kn', '66:48:41')},
{'1': ('kn', '66:48:42')},
{'1': ('kn', '66:48:43')},
{'1': ('kn', '66:48:44')},
{'1': ('kn', '66:48:45')},
{'1': ('kn', '66:48:46')},
{'1': ('kn', '66:48:47')},
{'1': ('kn', '66:48:48')},
{'1': ('kn', '66:48:49')},
{'1': ('kn', '66:48:50')},
{'1': ('kn', '66:48:51')},
{'1': ('kn', '66:48:52')},
{'1': ('kn', '66:48:53')},
{'1': ('kn', '66:48:54')},
{'1': ('kn', '66:48:55')},
{'1': ('kn', '66:48:56')},
{'1': ('kn', '66:48:57')},
{'1': ('kn', '66:48:58')},
{'1': ('kn', '66:48:59')},
{'1': ('kn', '66:48:60')},
{'1': ('kn', '66:48:61')},
{'1': ('kn', '66:48:62')},
{'1': ('kn', '66:48:63')},
{'1': ('kn', '66:48:64')},
{'1': ('kn', '66:48:65')},
{'1': ('kn', '66:48:66')},
{'1': ('kn', '66:48:67')},
{'1': ('kn', '66:48:68')},
{'1': ('kn', '66:48:69')},
{'1': ('kn', '66:48:70')},
{'1': ('kn', '66:48:71')},
{'1': ('kn', '66:48:72')},
{'1': ('kn', '66:48:73')},
{'1': ('kn', '66:48:74')},
{'1': ('kn', '66:48:75')},
{'1': ('kn', '66:48:76')},
{'1': ('kn', '66:48:77')},
{'1': ('kn', '66:48:78')},
{'1': ('kn', '66:48:79')},
{'1': ('kn', '66:48:80')},
{'1': ('kn', '66:48:81')},
{'1': ('kn', '66:48:82')},
{'1': ('kn', '66:48:83')},
{'1': ('kn', '66:48:84')},
{'1': ('kn', '66:48:85')},
{'1': ('kn', '66:48:86')},
{'1': ('kn', '66:48:87')},
{'1': ('kn', '66:48:88')},
{'1': ('kn', '66:48:89')},
{'1': ('kn', '66:48:90')},
{'1': ('kn', '66:48:91')},
{'1': ('kn', '66:48:92')},
{'1': ('kn', '66:48:93')},
{'1': ('kn', '66:48:94')},
{'1': ('kn', '66:48:95')},
{'1': ('kn', '66:48:96')},
{'1': ('kn', '66:48:97')},
{'1': ('kn', '66:48:98')},
{'1': ('kn', '66:48:99')},
{'1': ('kn', '66:49:00')},
{'1': ('kn', '66:49:01')},
{'1': ('kn', '66:49:02')},
{'1': ('kn', '66:49:03')},
{'1': ('kn', '66:49:04')},
{'1': ('kn', '66:49:05')},
{'1': ('kn', '66:49:06')},
{'1': ('kn', '66:49:07')},
{'1': ('kn', '66:49:08')},
{'1': ('kn', '66:49:09')},
{'1': ('kn', '66:49:10')},
{'1': ('kn', '66:49:11')},
{'1': ('kn', '66:49:12')},
{'1': ('kn', '66:49:13')},
{'1': ('kn', '66:49:14')},
{'1': ('kn', '66:49:15')},
{'1': ('kn', '66:49:16')},
{'1': ('kn', '66:49:17')},
{'1': ('kn', '66:49:18')},
{'1': ('kn', '66:49:19')},
{'1': ('kn', '66:49:20')},
{'1': ('kn', '66:49:21')},
{'1': ('kn', '66:49:22')},
{'1': ('kn', '66:49:23')},
{'1': ('kn', '66:49:24')},
{'1': ('kn', '66:49:25')},
{'1': ('kn', '66:49:26')},
{'1': ('kn', '66:49:27')},
{'1': ('kn', '66:49:28')},
{'1': ('kn', '66:49:29')},
{'1': ('kn', '66:49:30')},
{'1': ('kn', '66:49:31')},
{'1': ('kn', '66:49:32')},
{'1': ('kn', '66:49:33')},
{'1': ('kn', '66:49:34')},
{'1': ('kn', '66:49:35')},
{'1': ('kn', '66:49:36')},
{'1': ('kn', '66:49:37')},
{'1': ('kn', '66:49:38')},
{'1': ('kn', '66:49:39')},
{'1': ('kn', '66:49:40')},
{'1': ('kn', '66:49:41')},
{'1': ('kn', '66:49:42')},
{'1': ('kn', '66:49:43')},
{'1': ('kn', '66:49:44')},
{'1': ('kn', '66:49:45')},
{'1': ('kn', '66:49:46')},
{'1': ('kn', '66:49:47')},
{'1': ('kn', '66:49:48')},
{'1': ('kn', '66:49:49')},
{'1': ('kn', '66:49:50')},
{'1': ('kn', '66:49:51')},
{'1': ('kn', '66:49:52')},
{'1': ('kn', '66:49:53')},
{'1': ('kn', '66:49:54')},
{'1': ('kn', '66:49:55')},
{'1': ('kn', '66:49:56')},
{'1': ('kn', '66:49:57')},
{'1': ('kn', '66:49:58')},
{'1': ('kn', '66:49:59')},
{'1': ('kn', '66:49:60')},
{'1': ('kn', '66:49:61')},
{'1': ('kn', '66:49:62')},
{'1': ('kn', '66:49:63')},
{'1': ('kn', '66:49:64')},
{'1': ('kn', '66:49:65')},
{'1': ('kn', '66:49:66')},
{'1': ('kn', '66:49:67')},
{'1': ('kn', '66:49:68')},
{'1': ('kn', '66:49:69')},
{'1': ('kn', '66:49:70')},
{'1': ('kn', '66:49:71')},
{'1': ('kn', '66:49:72')},
{'1': ('kn', '66:49:73')},
{'1': ('kn', '66:49:74')},
{'1': ('kn', '66:49:75')},
{'1': ('kn', '66:49:76')},
{'1': ('kn', '66:49:77')},
{'1': ('kn', '66:49:78')},
{'1': ('kn', '66:49:79')},
{'1': ('kn', '66:49:80')},
{'1': ('kn', '66:49:81')},
{'1': ('kn', '66:49:82')},
{'1': ('kn', '66:49:83')},
{'1': ('kn', '66:49:84')},
{'1': ('kn', '66:49:85')},
{'1': ('kn', '66:49:86')},
{'1': ('kn', '66:49:87')},
{'1': ('kn', '66:49:88')},
{'1': ('kn', '66:49:89')},
{'1': ('kn', '66:49:90')},
{'1': ('kn', '66:49:91')},
{'1': ('kn', '66:49:92')},
{'1': ('kn', '66:49:93')},
{'1': ('kn', '66:49:94')},
{'1': ('kn', '66:49:95')},
{'1': ('kn', '66:49:96')},
{'1': ('kn', '66:49:97')},
{'1': ('kn', '66:49:98')},
{'1': ('kn', '66:49:99')},
{'1': ('kn', '66:50:00')},
{'1': ('kn', '66:50:01')},
{'1': ('kn', '66:50:02')},
{'1': ('kn', '66:50:03')},
{'1': ('kn', '66:50:04')},
{'1': ('kn', '66:50:05')},
{'1': ('kn', '66:50:06')},
{'1': ('kn', '66:50:07')},
{'1': ('kn', '66:50:08')},
{'1': ('kn', '66:50:09')},
{'1': ('kn', '66:50:10')},
{'1': ('kn', '66:50:11')},
{'1': ('kn', '66:50:12')},
{'1': ('kn', '66:50:13')},
{'1': ('kn', '66:50:14')},
{'1': ('kn', '66:50:15')},
{'1': ('kn', '66:50:16')},
{'1': ('kn', '66:50:17')},
{'1': ('kn', '66:50:18')},
{'1': ('kn', '66:50:19')},
{'1': ('kn', '66:50:20')},
{'1': ('kn', '66:50:21')},
{'1': ('kn', '66:50:22')},
{'1': ('kn', '66:50:23')},
{'1': ('kn', '66:50:24')},
{'1': ('kn', '66:50:25')},
{'1': ('kn', '66:50:26')},
{'1': ('kn', '66:50:27')},
{'1': ('kn', '66:50:28')},
{'1': ('kn', '66:50:29')},
{'1': ('kn', '66:50:30')},
{'1': ('kn', '66:50:31')},
{'1': ('kn', '66:50:32')},
{'1': ('kn', '66:50:33')},
{'1': ('kn', '66:50:34')},
{'1': ('kn', '66:50:35')},
{'1': ('kn', '66:50:36')},
{'1': ('kn', '66:50:37')},
{'1': ('kn', '66:50:38')},
{'1': ('kn', '66:50:39')},
{'1': ('kn', '66:50:40')},
{'1': ('kn', '66:50:41')},
{'1': ('kn', '66:50:42')},
{'1': ('kn', '66:50:43')},
{'1': ('kn', '66:50:44')},
{'1': ('kn', '66:50:45')},
{'1': ('kn', '66:50:46')},
{'1': ('kn', '66:50:47')},
{'1': ('kn', '66:50:48')},
{'1': ('kn', '66:50:49')},
{'1': ('kn', '66:50:50')},
{'1': ('kn', '66:50:51')},
{'1': ('kn', '66:50:52')},
{'1': ('kn', '66:50:53')},
{'1': ('kn', '66:50:54')},
{'1': ('kn', '66:50:55')},
{'1': ('kn', '66:50:56')},
{'1': ('kn', '66:50:57')},
{'1': ('kn', '66:50:58')},
{'1': ('kn', '66:50:59')},
{'1': ('kn', '66:50:60')},
{'1': ('kn', '66:50:61')},
{'1': ('kn', '66:50:62')},
{'1': ('kn', '66:50:63')},
{'1': ('kn', '66:50:64')},
{'1': ('kn', '66:50:65')},
{'1': ('kn', '66:50:66')},
{'1': ('kn', '66:50:67')},
{'1': ('kn', '66:50:68')},
{'1': ('kn', '66:50:69')},
{'1': ('kn', '66:50:70')},
{'1': ('kn', '66:50:71')},
{'1': ('kn', '66:50:72')},
{'1': ('kn', '66:50:73')},
{'1': ('kn', '66:50:74')},
{'1': ('kn', '66:50:75')},
{'1': ('kn', '66:50:76')},
{'1': ('kn', '66:50:77')},
{'1': ('kn', '66:50:78')},
{'1': ('kn', '66:50:79')},
{'1': ('kn', '66:50:80')},
{'1': ('kn', '66:50:81')},
{'1': ('kn', '66:50:82')},
{'1': ('kn', '66:50:83')},
{'1': ('kn', '66:50:84')},
{'1': ('kn', '66:50:85')},
{'1': ('kn', '66:50:86')},
{'1': ('kn', '66:50:87')},
{'1': ('kn', '66:50:88')},
{'1': ('kn', '66:50:89')},
{'1': ('kn', '66:50:90')},
{'1': ('kn', '66:50:91')},
{'1': ('kn', '66:50:92')},
{'1': ('kn', '66:50:93')},
{'1': ('kn', '66:50:94')},
{'1': ('kn', '66:50:95')},
{'1': ('kn', '66:50:96')},
{'1': ('kn', '66:50:97')},
{'1': ('kn', '66:50:98')},
{'1': ('kn', '66:50:99')},
{'1': ('kn', '66:51:00')},
{'1': ('kn', '66:51:01')},
{'1': ('kn', '66:51:02')},
{'1': ('kn', '66:51:03')},
{'1': ('kn', '66:51:04')},
{'1': ('kn', '66:51:05')},
{'1': ('kn', '66:51:06')},
{'1': ('kn', '66:51:07')},
{'1': ('kn', '66:51:08')},
{'1': ('kn', '66:51:09')},
{'1': ('kn', '66:51:10')},
{'1': ('kn', '66:51:11')},
{'1': ('kn', '66:51:12')},
{'1': ('kn', '66:51:13')},
{'1': ('kn', '66:51:14')},
{'1': ('kn', '66:51:15')},
{'1': ('kn', '66:51:16')},
{'1': ('kn', '66:51:17')},
{'1': ('kn', '66:51:18')},
{'1': ('kn', '66:51:19')},
{'1': ('kn', '66:51:20')},
{'1': ('kn', '66:51:21')},
{'1': ('kn', '66:51:22')},
{'1': ('kn', '66:51:23')},
{'1': ('kn', '66:51:24')},
{'1': ('kn', '66:51:25')},
{'1': ('kn', '66:51:26')},
{'1': ('kn', '66:51:27')},
{'1': ('kn', '66:51:28')},
{'1': ('kn', '66:51:29')},
{'1': ('kn', '66:51:30')},
{'1': ('kn', '66:51:31')},
{'1': ('kn', '66:51:32')},
{'1': ('kn', '66:51:33')},
{'1': ('kn', '66:51:34')},
{'1': ('kn', '66:51:35')},
{'1': ('kn', '66:51:36')},
{'1': ('kn', '66:51:37')},
{'1': ('kn', '66:51:38')},
{'1': ('kn', '66:51:39')},
{'1': ('kn', '66:51:40')},
{'1': ('kn', '66:51:41')},
{'1': ('kn', '66:51:42')},
{'1': ('kn', '66:51:43')},
{'1': ('kn', '66:51:44')},
{'1': ('kn', '66:51:45')},
{'1': ('kn', '66:51:46')},
{'1': ('kn', '66:51:47')},
{'1': ('kn', '66:51:48')},
{'1': ('kn', '66:51:49')},
{'1': ('kn', '66:51:50')},
{'1': ('kn', '66:51:51')},
{'1': ('kn', '66:51:52')},
{'1': ('kn', '66:51:53')},
{'1': ('kn', '66:51:54')},
{'1': ('kn', '66:51:55')},
{'1': ('kn', '66:51:56')},
{'1': ('kn', '66:51:57')},
{'1': ('kn', '66:51:58')},
{'1': ('kn', '66:51:59')},
{'1': ('kn', '66:51:60')},
{'1': ('kn', '66:51:61')},
{'1': ('kn', '66:51:62')},
{'1': ('kn', '66:51:63')},
{'1': ('kn', '66:51:64')},
{'1': ('kn', '66:51:65')},
{'1': ('kn', '66:51:66')},
{'1': ('kn', '66:51:67')},
{'1': ('kn', '66:51:68')},
{'1': ('kn', '66:51:69')},
{'1': ('kn', '66:51:70')},
{'1': ('kn', '66:51:71')},
{'1': ('kn', '66:51:72')},
{'1': ('kn', '66:51:73')},
{'1': ('kn', '66:51:74')},
{'1': ('kn', '66:51:75')},
{'1': ('kn', '66:51:76')},
{'1': ('kn', '66:51:77')},
{'1': ('kn', '66:51:78')},
{'1': ('kn', '66:51:79')},
{'1': ('kn', '66:51:80')},
{'1': ('kn', '66:51:81')},
{'1': ('kn', '66:51:82')},
{'1': ('kn', '66:51:83')},
{'1': ('kn', '66:51:84')},
{'1': ('kn', '66:51:85')},
{'1': ('kn', '66:51:86')},
{'1': ('kn', '66:51:87')},
{'1': ('kn', '66:51:88')},
{'1': ('kn', '66:51:89')},
{'1': ('kn', '66:51:90')},
{'1': ('kn', '66:51:91')},
{'1': ('kn', '66:51:92')},
{'1': ('kn', '66:51:93')},
{'1': ('kn', '66:51:94')},
{'1': ('kn', '66:51:95')},
{'1': ('kn', '66:51:96')},
{'1': ('kn', '66:51:97')},
{'1': ('kn', '66:51:98')},
{'1': ('kn', '66:51:99')},
{'1': ('kn', '66:52:00')},
{'1': ('kn', '66:52:01')},
{'1': ('kn', '66:52:02')},
{'1': ('kn', '66:52:03')},
{'1': ('kn', '66:52:04')},
{'1': ('kn', '66:52:05')},
{'1': ('kn', '66:52:06')},
{'1': ('kn', '66:52:07')},
{'1': ('kn', '66:52:08')},
{'1': ('kn', '66:52:09')},
{'1': ('kn', '66:52:10')},
{'1': ('kn', '66:52:11')},
{'1': ('kn', '66:52:12')},
{'1': ('kn', '66:52:13')},
{'1': ('kn', '66:52:14')},
{'1': ('kn', '66:52:15')},
{'1': ('kn', '66:52:16')},
{'1': ('kn', '66:52:17')},
{'1': ('kn', '66:52:18')},
{'1': ('kn', '66:52:19')},
{'1': ('kn', '66:52:20')},
{'1': ('kn', '66:52:21')},
{'1': ('kn', '66:52:22')},
{'1': ('kn', '66:52:23')},
{'1': ('kn', '66:52:24')},
{'1': ('kn', '66:52:25')},
{'1': ('kn', '66:52:26')},
{'1': ('kn', '66:52:27')},
{'1': ('kn', '66:52:28')},
{'1': ('kn', '66:52:29')},
{'1': ('kn', '66:52:30')},
{'1': ('kn', '66:52:31')},
{'1': ('kn', '66:52:32')},
{'1': ('kn', '66:52:33')},
{'1': ('kn', '66:52:34')},
{'1': ('kn', '66:52:35')},
{'1': ('kn', '66:52:36')},
{'1': ('kn', '66:52:37')},
{'1': ('kn', '66:52:38')},
{'1': ('kn', '66:52:39')},
{'1': ('kn', '66:52:40')},
{'1': ('kn', '66:52:41')},
{'1': ('kn', '66:52:42')},
{'1': ('kn', '66:52:43')},
{'1': ('kn', '66:52:44')},
{'1': ('kn', '66:52:45')},
{'1': ('kn', '66:52:46')},
{'1': ('kn', '66:52:47')},
{'1': ('kn', '66:52:48')},
{'1': ('kn', '66:52:49')},
{'1': ('kn', '66:52:50')},
{'1': ('kn', '66:52:51')},
{'1': ('kn', '66:52:52')},
{'1': ('kn', '66:52:53')},
{'1': ('kn', '66:52:54')},
{'1': ('kn', '66:52:55')},
{'1': ('kn', '66:52:56')},
{'1': ('kn', '66:52:57')},
{'1': ('kn', '66:52:58')},
{'1': ('kn', '66:52:59')},
{'1': ('kn', '66:52:60')},
{'1': ('kn', '66:52:61')},
{'1': ('kn', '66:52:62')},
{'1': ('kn', '66:52:63')},
{'1': ('kn', '66:52:64')},
{'1': ('kn', '66:52:65')},
{'1': ('kn', '66:52:66')},
{'1': ('kn', '66:52:67')},
{'1': ('kn', '66:52:68')},
{'1': ('kn', '66:52:69')},
{'1': ('kn', '66:52:70')},
{'1': ('kn', '66:52:71')},
{'1': ('kn', '66:52:72')},
{'1': ('kn', '66:52:73')},
{'1': ('kn', '66:52:74')},
{'1': ('kn', '66:52:75')},
{'1': ('kn', '66:52:76')},
{'1': ('kn', '66:52:77')},
{'1': ('kn', '66:52:78')},
{'1': ('kn', '66:52:79')},
{'1': ('kn', '66:52:80')},
{'1': ('kn', '66:52:81')},
{'1': ('kn', '66:52:82')},
{'1': ('kn', '66:52:83')},
{'1': ('kn', '66:52:84')},
{'1': ('kn', '66:52:85')},
{'1': ('kn', '66:52:86')},
{'1': ('kn', '66:52:87')},
{'1': ('kn', '66:52:88')},
{'1': ('kn', '66:52:89')},
{'1': ('kn', '66:52:90')},
{'1': ('kn', '66:52:91')},
{'1': ('kn', '66:52:92')},
{'1': ('kn', '66:52:93')},
{'1': ('kn', '66:52:94')},
{'1': ('kn', '66:52:95')},
{'1': ('kn', '66:52:96')},
{'1': ('kn', '66:52:97')},
{'1': ('kn', '66:52:98')},
{'1': ('kn', '66:52:99')},
{'1': ('kn', '66:53:00')},
{'1': ('kn', '66:53:01')},
{'1': ('kn', '66:53:02')},
{'1': ('kn', '66:53:03')},
{'1': ('kn', '66:53:04')},
{'1': ('kn', '66:53:05')},
{'1': ('kn', '66:53:06')},
{'1': ('kn', '66:53:07')},
{'1': ('kn', '66:53:08')},
{'1': ('kn', '66:53:09')},
{'1': ('kn', '66:53:10')},
{'1': ('kn', '66:53:11')},
{'1': ('kn', '66:53:12')},
{'1': ('kn', '66:53:13')},
{'1': ('kn', '66:53:14')},
{'1': ('kn', '66:53:15')},
{'1': ('kn', '66:53:16')},
{'1': ('kn', '66:53:17')},
{'1': ('kn', '66:53:18')},
{'1': ('kn', '66:53:19')},
{'1': ('kn', '66:53:20')},
{'1': ('kn', '66:53:21')},
{'1': ('kn', '66:53:22')},
{'1': ('kn', '66:53:23')},
{'1': ('kn', '66:53:24')},
{'1': ('kn', '66:53:25')},
{'1': ('kn', '66:53:26')},
{'1': ('kn', '66:53:27')},
{'1': ('kn', '66:53:28')},
{'1': ('kn', '66:53:29')},
{'1': ('kn', '66:53:30')},
{'1': ('kn', '66:53:31')},
{'1': ('kn', '66:53:32')},
{'1': ('kn', '66:53:33')},
{'1': ('kn', '66:53:34')},
{'1': ('kn', '66:53:35')},
{'1': ('kn', '66:53:36')},
{'1': ('kn', '66:53:37')},
{'1': ('kn', '66:53:38')},
{'1': ('kn', '66:53:39')},
{'1': ('kn', '66:53:40')},
{'1': ('kn', '66:53:41')},
{'1': ('kn', '66:53:42')},
{'1': ('kn', '66:53:43')},
{'1': ('kn', '66:53:44')},
{'1': ('kn', '66:53:45')},
{'1': ('kn', '66:53:46')},
{'1': ('kn', '66:53:47')},
{'1': ('kn', '66:53:48')},
{'1': ('kn', '66:53:49')},
{'1': ('kn', '66:53:50')},
{'1': ('kn', '66:53:51')},
{'1': ('kn', '66:53:52')},
{'1': ('kn', '66:53:53')},
{'1': ('kn', '66:53:54')},
{'1': ('kn', '66:53:55')},
{'1': ('kn', '66:53:56')},
{'1': ('kn', '66:53:57')},
{'1': ('kn', '66:53:58')},
{'1': ('kn', '66:53:59')},
{'1': ('kn', '66:53:60')},
{'1': ('kn', '66:53:61')},
{'1': ('kn', '66:53:62')},
{'1': ('kn', '66:53:63')},
{'1': ('kn', '66:53:64')},
{'1': ('kn', '66:53:65')},
{'1': ('kn', '66:53:66')},
{'1': ('kn', '66:53:67')},
{'1': ('kn', '66:53:68')},
{'1': ('kn', '66:53:69')},
{'1': ('kn', '66:53:70')},
{'1': ('kn', '66:53:71')},
{'1': ('kn', '66:53:72')},
{'1': ('kn', '66:53:73')},
{'1': ('kn', '66:53:74')},
{'1': ('kn', '66:53:75')},
{'1': ('kn', '66:53:76')},
{'1': ('kn', '66:53:77')},
{'1': ('kn', '66:53:78')},
{'1': ('kn', '66:53:79')},
{'1': ('kn', '66:53:80')},
{'1': ('kn', '66:53:81')},
{'1': ('kn', '66:53:82')},
{'1': ('kn', '66:53:83')},
{'1': ('kn', '66:53:84')},
{'1': ('kn', '66:53:85')},
{'1': ('kn', '66:53:86')},
{'1': ('kn', '66:53:87')},
{'1': ('kn', '66:53:88')},
{'1': ('kn', '66:53:89')},
{'1': ('kn', '66:53:90')},
{'1': ('kn', '66:53:91')},
{'1': ('kn', '66:53:92')},
{'1': ('kn', '66:53:93')},
{'1': ('kn', '66:53:94')},
{'1': ('kn', '66:53:95')},
{'1': ('kn', '66:53:96')},
{'1': ('kn', '66:53:97')},
{'1': ('kn', '66:53:98')},
{'1': ('kn', '66:53:99')},
{'1': ('kn', '66:54:00')},
{'1': ('kn', '66:54:01')},
{'1': ('kn', '66:54:02')},
{'1': ('kn', '66:54:03')},
{'1': ('kn', '66:54:04')},
{'1': ('kn', '66:54:05')},
{'1': ('kn', '66:54:06')},
{'1': ('kn', '66:54:07')},
{'1': ('kn', '66:54:08')},
{'1': ('kn', '66:54:09')},
{'1': ('kn', '66:54:10')},
{'1': ('kn', '66:54:11')},
{'1': ('kn', '66:54:12')},
{'1': ('kn', '66:54:13')},
{'1': ('kn', '66:54:14')},
{'1': ('kn', '66:54:15')},
{'1': ('kn', '66:54:16')},
{'1': ('kn', '66:54:17')},
{'1': ('kn', '66:54:18')},
{'1': ('kn', '66:54:19')},
{'1': ('kn', '66:54:20')},
{'1': ('kn', '66:54:21')},
{'1': ('kn', '66:54:22')},
{'1': ('kn', '66:54:23')},
{'1': ('kn', '66:54:24')},
{'1': ('kn', '66:54:25')},
{'1': ('kn', '66:54:26')},
{'1': ('kn', '66:54:27')},
{'1': ('kn', '66:54:28')},
{'1': ('kn', '66:54:29')},
{'1': ('kn', '66:54:30')},
{'1': ('kn', '66:54:31')},
{'1': ('kn', '66:54:32')},
{'1': ('kn', '66:54:33')},
{'1': ('kn', '66:54:34')},
{'1': ('kn', '66:54:35')},
{'1': ('kn', '66:54:36')},
{'1': ('kn', '66:54:37')},
{'1': ('kn', '66:54:38')},
{'1': ('kn', '66:54:39')},
{'1': ('kn', '66:54:40')},
{'1': ('kn', '66:54:41')},
{'1': ('kn', '66:54:42')},
{'1': ('kn', '66:54:43')},
{'1': ('kn', '66:54:44')},
{'1': ('kn', '66:54:45')},
{'1': ('kn', '66:54:46')},
{'1': ('kn', '66:54:47')},
{'1': ('kn', '66:54:48')},
{'1': ('kn', '66:54:49')},
{'1': ('kn', '66:54:50')},
{'1': ('kn', '66:54:51')},
{'1': ('kn', '66:54:52')},
{'1': ('kn', '66:54:53')},
{'1': ('kn', '66:54:54')},
{'1': ('kn', '66:54:55')},
{'1': ('kn', '66:54:56')},
{'1': ('kn', '66:54:57')},
{'1': ('kn', '66:54:58')},
{'1': ('kn', '66:54:59')},
{'1': ('kn', '66:54:60')},
{'1': ('kn', '66:54:61')},
{'1': ('kn', '66:54:62')},
{'1': ('kn', '66:54:63')},
{'1': ('kn', '66:54:64')},
{'1': ('kn', '66:54:65')},
{'1': ('kn', '66:54:66')},
{'1': ('kn', '66:54:67')},
{'1': ('kn', '66:54:68')},
{'1': ('kn', '66:54:69')},
{'1': ('kn', '66:54:70')},
{'1': ('kn', '66:54:71')},
{'1': ('kn', '66:54:72')},
{'1': ('kn', '66:54:73')},
{'1': ('kn', '66:54:74')},
{'1': ('kn', '66:54:75')},
{'1': ('kn', '66:54:76')},
{'1': ('kn', '66:54:77')},
{'1': ('kn', '66:54:78')},
{'1': ('kn', '66:54:79')},
{'1': ('kn', '66:54:80')},
{'1': ('kn', '66:54:81')},
{'1': ('kn', '66:54:82')},
{'1': ('kn', '66:54:83')},
{'1': ('kn', '66:54:84')},
{'1': ('kn', '66:54:85')},
{'1': ('kn', '66:54:86')},
{'1': ('kn', '66:54:87')},
{'1': ('kn', '66:54:88')},
{'1': ('kn', '66:54:89')},
{'1': ('kn', '66:54:90')},
{'1': ('kn', '66:54:91')},
{'1': ('kn', '66:54:92')},
{'1': ('kn', '66:54:93')},
{'1': ('kn', '66:54:94')},
{'1': ('kn', '66:54:95')},
{'1': ('kn', '66:54:96')},
{'1': ('kn', '66:54:97')},
{'1': ('kn', '66:54:98')},
{'1': ('kn', '66:54:99')},
{'1': ('kn', '66:55:00')},
{'1': ('kn', '66:55:01')},
{'1': ('kn', '66:55:02')},
{'1': ('kn', '66:55:03')},
{'1': ('kn', '66:55:04')},
{'1': ('kn', '66:55:05')},
{'1': ('kn', '66:55:06')},
{'1': ('kn', '66:55:07')},
{'1': ('kn', '66:55:08')},
{'1': ('kn', '66:55:09')},
{'1': ('kn', '66:55:10')},
{'1': ('kn', '66:55:11')},
{'1': ('kn', '66:55:12')},
{'1': ('kn', '66:55:13')},
{'1': ('kn', '66:55:14')},
{'1': ('kn', '66:55:15')},
{'1': ('kn', '66:55:16')},
{'1': ('kn', '66:55:17')},
{'1': ('kn', '66:55:18')},
{'1': ('kn', '66:55:19')},
{'1': ('kn', '66:55:20')},
{'1': ('kn', '66:55:21')},
{'1': ('kn', '66:55:22')},
{'1': ('kn', '66:55:23')},
{'1': ('kn', '66:55:24')},
{'1': ('kn', '66:55:25')},
{'1': ('kn', '66:55:26')},
{'1': ('kn', '66:55:27')},
{'1': ('kn', '66:55:28')},
{'1': ('kn', '66:55:29')},
{'1': ('kn', '66:55:30')},
{'1': ('kn', '66:55:31')},
{'1': ('kn', '66:55:32')},
{'1': ('kn', '66:55:33')},
{'1': ('kn', '66:55:34')},
{'1': ('kn', '66:55:35')},
{'1': ('kn', '66:55:36')},
{'1': ('kn', '66:55:37')},
{'1': ('kn', '66:55:38')},
{'1': ('kn', '66:55:39')},
{'1': ('kn', '66:55:40')},
{'1': ('kn', '66:55:41')},
{'1': ('kn', '66:55:42')},
{'1': ('kn', '66:55:43')},
{'1': ('kn', '66:55:44')},
{'1': ('kn', '66:55:45')},
{'1': ('kn', '66:55:46')},
{'1': ('kn', '66:55:47')},
{'1': ('kn', '66:55:48')},
{'1': ('kn', '66:55:49')},
{'1': ('kn', '66:55:50')},
{'1': ('kn', '66:55:51')},
{'1': ('kn', '66:55:52')},
{'1': ('kn', '66:55:53')},
{'1': ('kn', '66:55:54')},
{'1': ('kn', '66:55:55')},
{'1': ('kn', '66:55:56')},
{'1': ('kn', '66:55:57')},
{'1': ('kn', '66:55:58')},
{'1': ('kn', '66:55:59')},
{'1': ('kn', '66:55:60')},
{'1': ('kn', '66:55:61')},
{'1': ('kn', '66:55:62')},
{'1': ('kn', '66:55:63')},
{'1': ('kn', '66:55:64')},
{'1': ('kn', '66:55:65')},
{'1': ('kn', '66:55:66')},
{'1': ('kn', '66:55:67')},
{'1': ('kn', '66:55:68')},
{'1': ('kn', '66:55:69')},
{'1': ('kn', '66:55:70')},
{'1': ('kn', '66:55:71')},
{'1': ('kn', '66:55:72')},
{'1': ('kn', '66:55:73')},
{'1': ('kn', '66:55:74')},
{'1': ('kn', '66:55:75')},
{'1': ('kn', '66:55:76')},
{'1': ('kn', '66:55:77')},
{'1': ('kn', '66:55:78')},
{'1': ('kn', '66:55:79')},
{'1': ('kn', '66:55:80')},
{'1': ('kn', '66:55:81')},
{'1': ('kn', '66:55:82')},
{'1': ('kn', '66:55:83')},
{'1': ('kn', '66:55:84')},
{'1': ('kn', '66:55:85')},
{'1': ('kn', '66:55:86')},
{'1': ('kn', '66:55:87')},
{'1': ('kn', '66:55:88')},
{'1': ('kn', '66:55:89')},
{'1': ('kn', '66:55:90')},
{'1': ('kn', '66:55:91')},
{'1': ('kn', '66:55:92')},
{'1': ('kn', '66:55:93')},
{'1': ('kn', '66:55:94')},
{'1': ('kn', '66:55:95')},
{'1': ('kn', '66:55:96')},
{'1': ('kn', '66:55:97')},
{'1': ('kn', '66:55:98')},
{'1': ('kn', '66:55:99')},
{'1': ('kn', '66:56:00')},
{'1': ('kn', '66:56:01')},
{'1': ('kn', '66:56:02')},
{'1': ('kn', '66:56:03')},
{'1': ('kn', '66:56:04')},
{'1': ('kn', '66:56:05')},
{'1': ('kn', '66:56:06')},
{'1': ('kn', '66:56:07')},
{'1': ('kn', '66:56:08')},
{'1': ('kn', '66:56:09')},
{'1': ('kn', '66:56:10')},
{'1': ('kn', '66:56:11')},
{'1': ('kn', '66:56:12')},
{'1': ('kn', '66:56:13')},
{'1': ('kn', '66:56:14')},
{'1': ('kn', '66:56:15')},
{'1': ('kn', '66:56:16')},
{'1': ('kn', '66:56:17')},
{'1': ('kn', '66:56:18')},
{'1': ('kn', '66:56:19')},
{'1': ('kn', '66:56:20')},
{'1': ('kn', '66:56:21')},
{'1': ('kn', '66:56:22')},
{'1': ('kn', '66:56:23')},
{'1': ('kn', '66:56:24')},
{'1': ('kn', '66:56:25')},
{'1': ('kn', '66:56:26')},
{'1': ('kn', '66:56:27')},
{'1': ('kn', '66:56:28')},
{'1': ('kn', '66:56:29')},
{'1': ('kn', '66:56:30')},
{'1': ('kn', '66:56:31')},
{'1': ('kn', '66:56:32')},
{'1': ('kn', '66:56:33')},
{'1': ('kn', '66:56:34')},
{'1': ('kn', '66:56:35')},
{'1': ('kn', '66:56:36')},
{'1': ('kn', '66:56:37')},
{'1': ('kn', '66:56:38')},
{'1': ('kn', '66:56:39')},
{'1': ('kn', '66:56:40')},
{'1': ('kn', '66:56:41')},
{'1': ('kn', '66:56:42')},
{'1': ('kn', '66:56:43')},
{'1': ('kn', '66:56:44')},
{'1': ('kn', '66:56:45')},
{'1': ('kn', '66:56:46')},
{'1': ('kn', '66:56:47')},
{'1': ('kn', '66:56:48')},
{'1': ('kn', '66:56:49')},
{'1': ('kn', '66:56:50')},
{'1': ('kn', '66:56:51')},
{'1': ('kn', '66:56:52')},
{'1': ('kn', '66:56:53')},
{'1': ('kn', '66:56:54')},
{'1': ('kn', '66:56:55')},
{'1': ('kn', '66:56:56')},
{'1': ('kn', '66:56:57')},
{'1': ('kn', '66:56:58')},
{'1': ('kn', '66:56:59')},
{'1': ('kn', '66:56:60')},
{'1': ('kn', '66:56:61')},
{'1': ('kn', '66:56:62')},
{'1': ('kn', '66:56:63')},
{'1': ('kn', '66:56:64')},
{'1': ('kn', '66:56:65')},
{'1': ('kn', '66:56:66')},
{'1': ('kn', '66:56:67')},
{'1': ('kn', '66:56:68')},
{'1': ('kn', '66:56:69')},
{'1': ('kn', '66:56:70')},
{'1': ('kn', '66:56:71')},
{'1': ('kn', '66:56:72')},
{'1': ('kn', '66:56:73')},
{'1': ('kn', '66:56:74')},
{'1': ('kn', '66:56:75')},
{'1': ('kn', '66:56:76')},
{'1': ('kn', '66:56:77')},
{'1': ('kn', '66:56:78')},
{'1': ('kn', '66:56:79')},
{'1': ('kn', '66:56:80')},
{'1': ('kn', '66:56:81')},
{'1': ('kn', '66:56:82')},
{'1': ('kn', '66:56:83')},
{'1': ('kn', '66:56:84')},
{'1': ('kn', '66:56:85')},
{'1': ('kn', '66:56:86')},
{'1': ('kn', '66:56:87')},
{'1': ('kn', '66:56:88')},
{'1': ('kn', '66:56:89')},
{'1': ('kn', '66:56:90')},
{'1': ('kn', '66:56:91')},
{'1': ('kn', '66:56:92')},
{'1': ('kn', '66:56:93')},
{'1': ('kn', '66:56:94')},
{'1': ('kn', '66:56:95')},
{'1': ('kn', '66:56:96')},
{'1': ('kn', '66:56:97')},
{'1': ('kn', '66:56:98')},
{'1': ('kn', '66:56:99')},
{'1': ('kn', '66:57:00')},
{'1': ('kn', '66:57:01')},
{'1': ('kn', '66:57:02')},
{'1': ('kn', '66:57:03')},
{'1': ('kn', '66:57:04')},
{'1': ('kn', '66:57:05')},
{'1': ('kn', '66:57:06')},
{'1': ('kn', '66:57:07')},
{'1': ('kn', '66:57:08')},
{'1': ('kn', '66:57:09')},
{'1': ('kn', '66:57:10')},
{'1': ('kn', '66:57:11')},
{'1': ('kn', '66:57:12')},
{'1': ('kn', '66:57:13')},
{'1': ('kn', '66:57:14')},
{'1': ('kn', '66:57:15')},
{'1': ('kn', '66:57:16')},
{'1': ('kn', '66:57:17')},
{'1': ('kn', '66:57:18')},
{'1': ('kn', '66:57:19')},
{'1': ('kn', '66:57:20')},
{'1': ('kn', '66:57:21')},
{'1': ('kn', '66:57:22')},
{'1': ('kn', '66:57:23')},
{'1': ('kn', '66:57:24')},
{'1': ('kn', '66:57:25')},
{'1': ('kn', '66:57:26')},
{'1': ('kn', '66:57:27')},
{'1': ('kn', '66:57:28')},
{'1': ('kn', '66:57:29')},
{'1': ('kn', '66:57:30')},
{'1': ('kn', '66:57:31')},
{'1': ('kn', '66:57:32')},
{'1': ('kn', '66:57:33')},
{'1': ('kn', '66:57:34')},
{'1': ('kn', '66:57:35')},
{'1': ('kn', '66:57:36')},
{'1': ('kn', '66:57:37')},
{'1': ('kn', '66:57:38')},
{'1': ('kn', '66:57:39')},
{'1': ('kn', '66:57:40')},
{'1': ('kn', '66:57:41')},
{'1': ('kn', '66:57:42')},
{'1': ('kn', '66:57:43')},
{'1': ('kn', '66:57:44')},
{'1': ('kn', '66:57:45')},
{'1': ('kn', '66:57:46')},
{'1': ('kn', '66:57:47')},
{'1': ('kn', '66:57:48')},
{'1': ('kn', '66:57:49')},
{'1': ('kn', '66:57:50')},
{'1': ('kn', '66:57:51')},
{'1': ('kn', '66:57:52')},
{'1': ('kn', '66:57:53')},
{'1': ('kn', '66:57:54')},
{'1': ('kn', '66:57:55')},
{'1': ('kn', '66:57:56')},
{'1': ('kn', '66:57:57')},
{'1': ('kn', '66:57:58')},
{'1': ('kn', '66:57:59')},
{'1': ('kn', '66:57:60')},
{'1': ('kn', '66:57:61')},
{'1': ('kn', '66:57:62')},
{'1': ('kn', '66:57:63')},
{'1': ('kn', '66:57:64')},
{'1': ('kn', '66:57:65')},
{'1': ('kn', '66:57:66')},
{'1': ('kn', '66:57:67')},
{'1': ('kn', '66:57:68')},
{'1': ('kn', '66:57:69')},
{'1': ('kn', '66:57:70')},
{'1': ('kn', '66:57:71')},
{'1': ('kn', '66:57:72')},
{'1': ('kn', '66:57:73')},
{'1': ('kn', '66:57:74')},
{'1': ('kn', '66:57:75')},
{'1': ('kn', '66:57:76')},
{'1': ('kn', '66:57:77')},
{'1': ('kn', '66:57:78')},
{'1': ('kn', '66:57:79')},
{'1': ('kn', '66:57:80')},
{'1': ('kn', '66:57:81')},
{'1': ('kn', '66:57:82')},
{'1': ('kn', '66:57:83')},
{'1': ('kn', '66:57:84')},
{'1': ('kn', '66:57:85')},
{'1': ('kn', '66:57:86')},
{'1': ('kn', '66:57:87')},
{'1': ('kn', '66:57:88')},
{'1': ('kn', '66:57:89')},
{'1': ('kn', '66:57:90')},
{'1': ('kn', '66:57:91')},
{'1': ('kn', '66:57:92')},
{'1': ('kn', '66:57:93')},
{'1': ('kn', '66:57:94')},
{'1': ('kn', '66:57:95')},
{'1': ('kn', '66:57:96')},
{'1': ('kn', '66:57:97')},
{'1': ('kn', '66:57:98')},
{'1': ('kn', '66:57:99')},
{'1': ('kn', '66:58:00')},
{'1': ('kn', '66:58:01')},
{'1': ('kn', '66:58:02')},
{'1': ('kn', '66:58:03')},
{'1': ('kn', '66:58:04')},
{'1': ('kn', '66:58:05')},
{'1': ('kn', '66:58:06')},
{'1': ('kn', '66:58:07')},
{'1': ('kn', '66:58:08')},
{'1': ('kn', '66:58:09')},
{'1': ('kn', '66:58:10')},
{'1': ('kn', '66:58:11')},
{'1': ('kn', '66:58:12')},
{'1': ('kn', '66:58:13')},
{'1': ('kn', '66:58:14')},
{'1': ('kn', '66:58:15')},
{'1': ('kn', '66:58:16')},
{'1': ('kn', '66:58:17')},
{'1': ('kn', '66:58:18')},
{'1': ('kn', '66:58:19')},
{'1': ('kn', '66:58:20')},
{'1': ('kn', '66:58:21')},
{'1': ('kn', '66:58:22')},
{'1': ('kn', '66:58:23')},
{'1': ('kn', '66:58:24')},
{'1': ('kn', '66:58:25')},
{'1': ('kn', '66:58:26')},
{'1': ('kn', '66:58:27')},
{'1': ('kn', '66:58:28')},
{'1': ('kn', '66:58:29')},
{'1': ('kn', '66:58:30')},
{'1': ('kn', '66:58:31')},
{'1': ('kn', '66:58:32')},
{'1': ('kn', '66:58:33')},
{'1': ('kn', '66:58:34')},
{'1': ('kn', '66:58:35')},
{'1': ('kn', '66:58:36')},
{'1': ('kn', '66:58:37')},
{'1': ('kn', '66:58:38')},
{'1': ('kn', '66:58:39')},
{'1': ('kn', '66:58:40')},
{'1': ('kn', '66:58:41')},
{'1': ('kn', '66:58:42')},
{'1': ('kn', '66:58:43')},
{'1': ('kn', '66:58:44')},
{'1': ('kn', '66:58:45')},
{'1': ('kn', '66:58:46')},
{'1': ('kn', '66:58:47')},
{'1': ('kn', '66:58:48')},
{'1': ('kn', '66:58:49')},
{'1': ('kn', '66:58:50')},
{'1': ('kn', '66:58:51')},
{'1': ('kn', '66:58:52')},
{'1': ('kn', '66:58:53')},
{'1': ('kn', '66:58:54')},
{'1': ('kn', '66:58:55')},
{'1': ('kn', '66:58:56')},
{'1': ('kn', '66:58:57')},
{'1': ('kn', '66:58:58')},
{'1': ('kn', '66:58:59')},
{'1': ('kn', '66:58:60')},
{'1': ('kn', '66:58:61')},
{'1': ('kn', '66:58:62')},
{'1': ('kn', '66:58:63')},
{'1': ('kn', '66:58:64')},
{'1': ('kn', '66:58:65')},
{'1': ('kn', '66:58:66')},
{'1': ('kn', '66:58:67')},
{'1': ('kn', '66:58:68')},
{'1': ('kn', '66:58:69')},
{'1': ('kn', '66:58:70')},
{'1': ('kn', '66:58:71')},
{'1': ('kn', '66:58:72')},
{'1': ('kn', '66:58:73')},
{'1': ('kn', '66:58:74')},
{'1': ('kn', '66:58:75')},
{'1': ('kn', '66:58:76')},
{'1': ('kn', '66:58:77')},
{'1': ('kn', '66:58:78')},
{'1': ('kn', '66:58:79')},
{'1': ('kn', '66:58:80')},
{'1': ('kn', '66:58:81')},
{'1': ('kn', '66:58:82')},
{'1': ('kn', '66:58:83')},
{'1': ('kn', '66:58:84')},
{'1': ('kn', '66:58:85')},
{'1': ('kn', '66:58:86')},
{'1': ('kn', '66:58:87')},
{'1': ('kn', '66:58:88')},
{'1': ('kn', '66:58:89')},
{'1': ('kn', '66:58:90')},
{'1': ('kn', '66:58:91')},
{'1': ('kn', '66:58:92')},
{'1': ('kn', '66:58:93')},
{'1': ('kn', '66:58:94')},
{'1': ('kn', '66:58:95')},
{'1': ('kn', '66:58:96')},
{'1': ('kn', '66:58:97')},
{'1': ('kn', '66:58:98')},
{'1': ('kn', '66:58:99')},
{'1': ('kn', '66:59:00')},
{'1': ('kn', '66:59:01')},
{'1': ('kn', '66:59:02')},
{'1': ('kn', '66:59:03')},
{'1': ('kn', '66:59:04')},
{'1': ('kn', '66:59:05')},
{'1': ('kn', '66:59:06')},
{'1': ('kn', '66:59:07')},
{'1': ('kn', '66:59:08')},
{'1': ('kn', '66:59:09')},
{'1': ('kn', '66:59:10')},
{'1': ('kn', '66:59:11')},
{'1': ('kn', '66:59:12')},
{'1': ('kn', '66:59:13')},
{'1': ('kn', '66:59:14')},
{'1': ('kn', '66:59:15')},
{'1': ('kn', '66:59:16')},
{'1': ('kn', '66:59:17')},
{'1': ('kn', '66:59:18')},
{'1': ('kn', '66:59:19')},
{'1': ('kn', '66:59:20')},
{'1': ('kn', '66:59:21')},
{'1': ('kn', '66:59:22')},
{'1': ('kn', '66:59:23')},
{'1': ('kn', '66:59:24')},
{'1': ('kn', '66:59:25')},
{'1': ('kn', '66:59:26')},
{'1': ('kn', '66:59:27')},
{'1': ('kn', '66:59:28')},
{'1': ('kn', '66:59:29')},
{'1': ('kn', '66:59:30')},
{'1': ('kn', '66:59:31')},
{'1': ('kn', '66:59:32')},
{'1': ('kn', '66:59:33')},
{'1': ('kn', '66:59:34')},
{'1': ('kn', '66:59:35')},
{'1': ('kn', '66:59:36')},
{'1': ('kn', '66:59:37')},
{'1': ('kn', '66:59:38')},
{'1': ('kn', '66:59:39')},
{'1': ('kn', '66:59:40')},
{'1': ('kn', '66:59:41')},
{'1': ('kn', '66:59:42')},
{'1': ('kn', '66:59:43')},
{'1': ('kn', '66:59:44')},
{'1': ('kn', '66:59:45')},
{'1': ('kn', '66:59:46')},
{'1': ('kn', '66:59:47')},
{'1': ('kn', '66:59:48')},
{'1': ('kn', '66:59:49')},
{'1': ('kn', '66:59:50')},
{'1': ('kn', '66:59:51')},
{'1': ('kn', '66:59:52')},
{'1': ('kn', '66:59:53')},
{'1': ('kn', '66:59:54')},
{'1': ('kn', '66:59:55')},
{'1': ('kn', '66:59:56')},
{'1': ('kn', '66:59:57')},
{'1': ('kn', '66:59:58')},
{'1': ('kn', '66:59:59')},
{'1': ('kn', '66:59:60')},
{'1': ('kn', '66:59:61')},
{'1': ('kn', '66:59:62')},
{'1': ('kn', '66:59:63')},
{'1': ('kn', '66:59:64')},
{'1': ('kn', '66:59:65')},
{'1': ('kn', '66:59:66')},
{'1': ('kn', '66:59:67')},
{'1': ('kn', '66:59:68')},
{'1': ('kn', '66:59:69')},
{'1': ('kn', '66:59:70')},
{'1': ('kn', '66:59:71')},
{'1': ('kn', '66:59:72')},
{'1': ('kn', '66:59:73')},
{'1': ('kn', '66:59:74')},
{'1': ('kn', '66:59:75')},
{'1': ('kn', '66:59:76')},
{'1': ('kn', '66:59:77')},
{'1': ('kn', '66:59:78')},
{'1': ('kn', '66:59:79')},
{'1': ('kn', '66:59:80')},
{'1': ('kn', '66:59:81')},
{'1': ('kn', '66:59:82')},
{'1': ('kn', '66:59:83')},
{'1': ('kn', '66:59:84')},
{'1': ('kn', '66:59:85')},
{'1': ('kn', '66:59:86')},
{'1': ('kn', '66:59:87')},
{'1': ('kn', '66:59:88')},
{'1': ('kn', '66:59:89')},
{'1': ('kn', '66:59:90')},
{'1': ('kn', '66:59:91')},
{'1': ('kn', '66:59:92')},
{'1': ('kn', '66:59:93')},
{'1': ('kn', '66:59:94')},
{'1': ('kn', '66:59:95')},
{'1': ('kn', '66:59:96')},
{'1': ('kn', '66:59:97')},
{'1': ('kn', '66:59:98')},
{'1': ('kn', '66:59:99')},
{'1': ('kn', '66:60:00')},
{'1': ('kn', '66:60:01')},
{'1': ('kn', '66:60:02')},
{'1': ('kn', '66:60:03')},
{'1': ('kn', '66:60:04')},
{'1': ('kn', '66:60:05')},
{'1': ('kn', '66:60:06')},
{'1': ('kn', '66:60:07')},
{'1': ('kn', '66:60:08')},
{'1': ('kn', '66:60:09')},
{'1': ('kn', '66:60:10')},
{'1': ('kn', '66:60:11')},
{'1': ('kn', '66:60:12')},
{'1': ('kn', '66:60:13')},
{'1': ('kn', '66:60:14')},
{'1': ('kn', '66:60:15')},
{'1': ('kn', '66:60:16')},
{'1': ('kn', '66:60:17')},
{'1': ('kn', '66:60:18')},
{'1': ('kn', '66:60:19')},
{'1': ('kn', '66:60:20')},
{'1': ('kn', '66:60:21')},
{'1': ('kn', '66:60:22')},
{'1': ('kn', '66:60:23')},
{'1': ('kn', '66:60:24')},
{'1': ('kn', '66:60:25')},
{'1': ('kn', '66:60:26')},
{'1': ('kn', '66:60:27')},
{'1': ('kn', '66:60:28')},
{'1': ('kn', '66:60:29')},
{'1': ('kn', '66:60:30')},
{'1': ('kn', '66:60:31')},
{'1': ('kn', '66:60:32')},
{'1': ('kn', '66:60:33')},
{'1': ('kn', '66:60:34')},
{'1': ('kn', '66:60:35')},
{'1': ('kn', '66:60:36')},
{'1': ('kn', '66:60:37')},
{'1': ('kn', '66:60:38')},
{'1': ('kn', '66:60:39')},
{'1': ('kn', '66:60:40')},
{'1': ('kn', '66:60:41')},
{'1': ('kn', '66:60:42')},
{'1': ('kn', '66:60:43')},
{'1': ('kn', '66:60:44')},
{'1': ('kn', '66:60:45')},
{'1': ('kn', '66:60:46')},
{'1': ('kn', '66:60:47')},
{'1': ('kn', '66:60:48')},
{'1': ('kn', '66:60:49')},
{'1': ('kn', '66:60:50')},
{'1': ('kn', '66:60:51')},
{'1': ('kn', '66:60:52')},
{'1': ('kn', '66:60:53')},
{'1': ('kn', '66:60:54')},
{'1': ('kn', '66:60:55')},
{'1': ('kn', '66:60:56')},
{'1': ('kn', '66:60:57')},
{'1': ('kn', '66:60:58')},
{'1': ('kn', '66:60:59')},
{'1': ('kn', '66:60:60')},
{'1': ('kn', '66:60:61')},
{'1': ('kn', '66:60:62')},
{'1': ('kn', '66:60:63')},
{'1': ('kn', '66:60:64')},
{'1': ('kn', '66:60:65')},
{'1': ('kn', '66:60:66')},
{'1': ('kn', '66:60:67')},
{'1': ('kn', '66:60:68')},
{'1': ('kn', '66:60:69')},
{'1': ('kn', '66:60:70')},
{'1': ('kn', '66:60:71')},
{'1': ('kn', '66:60:72')},
{'1': ('kn', '66:60:73')},
{'1': ('kn', '66:60:74')},
{'1': ('kn', '66:60:75')},
{'1': ('kn', '66:60:76')},
{'1': ('kn', '66:60:77')},
{'1': ('kn', '66:60:78')},
{'1': ('kn', '66:60:79')},
{'1': ('kn', '66:60:80')},
{'1': ('kn', '66:60:81')},
{'1': ('kn', '66:60:82')},
{'1': ('kn', '66:60:83')},
{'1': ('kn', '66:60:84')},
{'1': ('kn', '66:60:85')},
{'1': ('kn', '66:60:86')},
{'1': ('kn', '66:60:87')},
{'1': ('kn', '66:60:88')},
{'1': ('kn', '66:60:89')},
{'1': ('kn', '66:60:90')},
{'1': ('kn', '66:60:91')},
{'1': ('kn', '66:60:92')},
{'1': ('kn', '66:60:93')},
{'1': ('kn', '66:60:94')},
{'1': ('kn', '66:60:95')},
{'1': ('kn', '66:60:96')},
{'1': ('kn', '66:60:97')},
{'1': ('kn', '66:60:98')},
{'1': ('kn', '66:60:99')},
{'1': ('kn', '66:61:00')},
{'1': ('kn', '66:61:01')},
{'1': ('kn', '66:61:02')},
{'1': ('kn', '66:61:03')},
{'1': ('kn', '66:61:04')},
{'1': ('kn', '66:61:05')},
{'1': ('kn', '66:61:06')},
{'1': ('kn', '66:61:07')},
{'1': ('kn', '66:61:08')},
{'1': ('kn', '66:61:09')},
{'1': ('kn', '66:61:10')},
{'1': ('kn', '66:61:11')},
{'1': ('kn', '66:61:12')},
{'1': ('kn', '66:61:13')},
{'1': ('kn', '66:61:14')},
{'1': ('kn', '66:61:15')},
{'1': ('kn', '66:61:16')},
{'1': ('kn', '66:61:17')},
{'1': ('kn', '66:61:18')},
{'1': ('kn', '66:61:19')},
{'1': ('kn', '66:61:20')},
{'1': ('kn', '66:61:21')},
{'1': ('kn', '66:61:22')},
{'1': ('kn', '66:61:23')},
{'1': ('kn', '66:61:24')},
{'1': ('kn', '66:61:25')},
{'1': ('kn', '66:61:26')},
{'1': ('kn', '66:61:27')},
{'1': ('kn', '66:61:28')},
{'1': ('kn', '66:61:29')},
{'1': ('kn', '66:61:30')},
{'1': ('kn', '66:61:31')},
{'1': ('kn', '66:61:32')},
{'1': ('kn', '66:61:33')},
{'1': ('kn', '66:61:34')},
{'1': ('kn', '66:61:35')},
{'1': ('kn', '66:61:36')},
{'1': ('kn', '66:61:37')},
{'1': ('kn', '66:61:38')},
{'1': ('kn', '66:61:39')},
{'1': ('kn', '66:61:40')},
{'1': ('kn', '66:61:41')},
{'1': ('kn', '66:61:42')},
{'1': ('kn', '66:61:43')},
{'1': ('kn', '66:61:44')},
{'1': ('kn', '66:61:45')},
{'1': ('kn', '66:61:46')},
{'1': ('kn', '66:61:47')},
{'1': ('kn', '66:61:48')},
{'1': ('kn', '66:61:49')},
{'1': ('kn', '66:61:50')},
{'1': ('kn', '66:61:51')},
{'1': ('kn', '66:61:52')},
{'1': ('kn', '66:61:53')},
{'1': ('kn', '66:61:54')},
{'1': ('kn', '66:61:55')},
{'1': ('kn', '66:61:56')},
{'1': ('kn', '66:61:57')},
{'1': ('kn', '66:61:58')},
{'1': ('kn', '66:61:59')},
{'1': ('kn', '66:61:60')},
{'1': ('kn', '66:61:61')},
{'1': ('kn', '66:61:62')},
{'1': ('kn', '66:61:63')},
{'1': ('kn', '66:61:64')},
{'1': ('kn', '66:61:65')},
{'1': ('kn', '66:61:66')},
{'1': ('kn', '66:61:67')},
{'1': ('kn', '66:61:68')},
{'1': ('kn', '66:61:69')},
{'1': ('kn', '66:61:70')},
{'1': ('kn', '66:61:71')},
{'1': ('kn', '66:61:72')},
{'1': ('kn', '66:61:73')},
{'1': ('kn', '66:61:74')},
{'1': ('kn', '66:61:75')},
{'1': ('kn', '66:61:76')},
{'1': ('kn', '66:61:77')},
{'1': ('kn', '66:61:78')},
{'1': ('kn', '66:61:79')},
{'1': ('kn', '66:61:80')},
{'1': ('kn', '66:61:81')},
{'1': ('kn', '66:61:82')},
{'1': ('kn', '66:61:83')},
{'1': ('kn', '66:61:84')},
{'1': ('kn', '66:61:85')},
{'1': ('kn', '66:61:86')},
{'1': ('kn', '66:61:87')},
{'1': ('kn', '66:61:88')},
{'1': ('kn', '66:61:89')},
{'1': ('kn', '66:61:90')},
{'1': ('kn', '66:61:91')},
{'1': ('kn', '66:61:92')},
{'1': ('kn', '66:61:93')},
{'1': ('kn', '66:61:94')},
{'1': ('kn', '66:61:95')},
{'1': ('kn', '66:61:96')},
{'1': ('kn', '66:61:97')},
{'1': ('kn', '66:61:98')},
{'1': ('kn', '66:61:99')},
{'1': ('kn', '66:62:00')},
{'1': ('kn', '66:62:01')},
{'1': ('kn', '66:62:02')},
{'1': ('kn', '66:62:03')},
{'1': ('kn', '66:62:04')},
{'1': ('kn', '66:62:05')},
{'1': ('kn', '66:62:06')},
{'1': ('kn', '66:62:07')},
{'1': ('kn', '66:62:08')},
{'1': ('kn', '66:62:09')},
{'1': ('kn', '66:62:10')},
{'1': ('kn', '66:62:11')},
{'1': ('kn', '66:62:12')},
{'1': ('kn', '66:62:13')},
{'1': ('kn', '66:62:14')},
{'1': ('kn', '66:62:15')},
{'1': ('kn', '66:62:16')},
{'1': ('kn', '66:62:17')},
{'1': ('kn', '66:62:18')},
{'1': ('kn', '66:62:19')},
{'1': ('kn', '66:62:20')},
{'1': ('kn', '66:62:21')},
{'1': ('kn', '66:62:22')},
{'1': ('kn', '66:62:23')},
{'1': ('kn', '66:62:24')},
{'1': ('kn', '66:62:25')},
{'1': ('kn', '66:62:26')},
{'1': ('kn', '66:62:27')},
{'1': ('kn', '66:62:28')},
{'1': ('kn', '66:62:29')},
{'1': ('kn', '66:62:30')},
{'1': ('kn', '66:62:31')},
{'1': ('kn', '66:62:32')},
{'1': ('kn', '66:62:33')},
{'1': ('kn', '66:62:34')},
{'1': ('kn', '66:62:35')},
{'1': ('kn', '66:62:36')},
{'1': ('kn', '66:62:37')},
{'1': ('kn', '66:62:38')},
{'1': ('kn', '66:62:39')},
{'1': ('kn', '66:62:40')},
{'1': ('kn', '66:62:41')},
{'1': ('kn', '66:62:42')},
{'1': ('kn', '66:62:43')},
{'1': ('kn', '66:62:44')},
{'1': ('kn', '66:62:45')},
{'1': ('kn', '66:62:46')},
{'1': ('kn', '66:62:47')},
{'1': ('kn', '66:62:48')},
{'1': ('kn', '66:62:49')},
{'1': ('kn', '66:62:50')},
{'1': ('kn', '66:62:51')},
{'1': ('kn', '66:62:52')},
{'1': ('kn', '66:62:53')},
{'1': ('kn', '66:62:54')},
{'1': ('kn', '66:62:55')},
{'1': ('kn', '66:62:56')},
{'1': ('kn', '66:62:57')},
{'1': ('kn', '66:62:58')},
{'1': ('kn', '66:62:59')},
{'1': ('kn', '66:62:60')},
{'1': ('kn', '66:62:61')},
{'1': ('kn', '66:62:62')},
{'1': ('kn', '66:62:63')},
{'1': ('kn', '66:62:64')},
{'1': ('kn', '66:62:65')},
{'1': ('kn', '66:62:66')},
{'1': ('kn', '66:62:67')},
{'1': ('kn', '66:62:68')},
{'1': ('kn', '66:62:69')},
{'1': ('kn', '66:62:70')},
{'1': ('kn', '66:62:71')},
{'1': ('kn', '66:62:72')},
{'1': ('kn', '66:62:73')},
{'1': ('kn', '66:62:74')},
{'1': ('kn', '66:62:75')},
{'1': ('kn', '66:62:76')},
{'1': ('kn', '66:62:77')},
{'1': ('kn', '66:62:78')},
{'1': ('kn', '66:62:79')},
{'1': ('kn', '66:62:80')},
{'1': ('kn', '66:62:81')},
{'1': ('kn', '66:62:82')},
{'1': ('kn', '66:62:83')},
{'1': ('kn', '66:62:84')},
{'1': ('kn', '66:62:85')},
{'1': ('kn', '66:62:86')},
{'1': ('kn', '66:62:87')},
{'1': ('kn', '66:62:88')},
{'1': ('kn', '66:62:89')},
{'1': ('kn', '66:62:90')},
{'1': ('kn', '66:62:91')},
{'1': ('kn', '66:62:92')},
{'1': ('kn', '66:62:93')},
{'1': ('kn', '66:62:94')},
{'1': ('kn', '66:62:95')},
{'1': ('kn', '66:62:96')},
{'1': ('kn', '66:62:97')},
{'1': ('kn', '66:62:98')},
{'1': ('kn', '66:62:99')},
{'1': ('kn', '66:63:00')},
{'1': ('kn', '66:63:01')},
{'1': ('kn', '66:63:02')},
{'1': ('kn', '66:63:03')},
{'1': ('kn', '66:63:04')},
{'1': ('kn', '66:63:05')},
{'1': ('kn', '66:63:06')},
{'1': ('kn', '66:63:07')},
{'1': ('kn', '66:63:08')},
{'1': ('kn', '66:63:09')},
{'1': ('kn', '66:63:10')},
{'1': ('kn', '66:63:11')},
{'1': ('kn', '66:63:12')},
{'1': ('kn', '66:63:13')},
{'1': ('kn', '66:63:14')},
{'1': ('kn', '66:63:15')},
{'1': ('kn', '66:63:16')},
{'1': ('kn', '66:63:17')},
{'1': ('kn', '66:63:18')},
{'1': ('kn', '66:63:19')},
{'1': ('kn', '66:63:20')},
{'1': ('kn', '66:63:21')},
{'1': ('kn', '66:63:22')},
{'1': ('kn', '66:63:23')},
{'1': ('kn', '66:63:24')},
{'1': ('kn', '66:63:25')},
{'1': ('kn', '66:63:26')},
{'1': ('kn', '66:63:27')},
{'1': ('kn', '66:63:28')},
{'1': ('kn', '66:63:29')},
{'1': ('kn', '66:63:30')},
{'1': ('kn', '66:63:31')},
{'1': ('kn', '66:63:32')},
{'1': ('kn', '66:63:33')},
{'1': ('kn', '66:63:34')},
{'1': ('kn', '66:63:35')},
{'1': ('kn', '66:63:36')},
{'1': ('kn', '66:63:37')},
{'1': ('kn', '66:63:38')},
{'1': ('kn', '66:63:39')},
{'1': ('kn', '66:63:40')},
{'1': ('kn', '66:63:41')},
{'1': ('kn', '66:63:42')},
{'1': ('kn', '66:63:43')},
{'1': ('kn', '66:63:44')},
{'1': ('kn', '66:63:45')},
{'1': ('kn', '66:63:46')},
{'1': ('kn', '66:63:47')},
{'1': ('kn', '66:63:48')},
{'1': ('kn', '66:63:49')},
{'1': ('kn', '66:63:50')},
{'1': ('kn', '66:63:51')},
{'1': ('kn', '66:63:52')},
{'1': ('kn', '66:63:53')},
{'1': ('kn', '66:63:54')},
{'1': ('kn', '66:63:55')},
{'1': ('kn', '66:63:56')},
{'1': ('kn', '66:63:57')},
{'1': ('kn', '66:63:58')},
{'1': ('kn', '66:63:59')},
{'1': ('kn', '66:63:60')},
{'1': ('kn', '66:63:61')},
{'1': ('kn', '66:63:62')},
{'1': ('kn', '66:63:63')},
{'1': ('kn', '66:63:64')},
{'1': ('kn', '66:63:65')},
{'1': ('kn', '66:63:66')},
{'1': ('kn', '66:63:67')},
{'1': ('kn', '66:63:68')},
{'1': ('kn', '66:63:69')},
{'1': ('kn', '66:63:70')},
{'1': ('kn', '66:63:71')},
{'1': ('kn', '66:63:72')},
{'1': ('kn', '66:63:73')},
{'1': ('kn', '66:63:74')},
{'1': ('kn', '66:63:75')},
{'1': ('kn', '66:63:76')},
{'1': ('kn', '66:63:77')},
{'1': ('kn', '66:63:78')},
{'1': ('kn', '66:63:79')},
{'1': ('kn', '66:63:80')},
{'1': ('kn', '66:63:81')},
{'1': ('kn', '66:63:82')},
{'1': ('kn', '66:63:83')},
{'1': ('kn', '66:63:84')},
{'1': ('kn', '66:63:85')},
{'1': ('kn', '66:63:86')},
{'1': ('kn', '66:63:87')},
{'1': ('kn', '66:63:88')},
{'1': ('kn', '66:63:89')},
{'1': ('kn', '66:63:90')},
{'1': ('kn', '66:63:91')},
{'1': ('kn', '66:63:92')},
{'1': ('kn', '66:63:93')},
{'1': ('kn', '66:63:94')},
{'1': ('kn', '66:63:95')},
{'1': ('kn', '66:63:96')},
{'1': ('kn', '66:63:97')},
{'1': ('kn', '66:63:98')},
{'1': ('kn', '66:63:99')},
{'1': ('kn', '66:64:00')},
{'1': ('kn', '66:64:01')},
{'1': ('kn', '66:64:02')},
{'1': ('kn', '66:64:03')},
{'1': ('kn', '66:64:04')},
{'1': ('kn', '66:64:05')},
{'1': ('kn', '66:64:06')},
{'1': ('kn', '66:64:07')},
{'1': ('kn', '66:64:08')},
{'1': ('kn', '66:64:09')},
{'1': ('kn', '66:64:10')},
{'1': ('kn', '66:64:11')},
{'1': ('kn', '66:64:12')},
{'1': ('kn', '66:64:13')},
{'1': ('kn', '66:64:14')},
{'1': ('kn', '66:64:15')},
{'1': ('kn', '66:64:16')},
{'1': ('kn', '66:64:17')},
{'1': ('kn', '66:64:18')},
{'1': ('kn', '66:64:19')},
{'1': ('kn', '66:64:20')},
{'1': ('kn', '66:64:21')},
{'1': ('kn', '66:64:22')},
{'1': ('kn', '66:64:23')},
{'1': ('kn', '66:64:24')},
{'1': ('kn', '66:64:25')},
{'1': ('kn', '66:64:26')},
{'1': ('kn', '66:64:27')},
{'1': ('kn', '66:64:28')},
{'1': ('kn', '66:64:29')},
{'1': ('kn', '66:64:30')},
{'1': ('kn', '66:64:31')},
{'1': ('kn', '66:64:32')},
{'1': ('kn', '66:64:33')},
{'1': ('kn', '66:64:34')},
{'1': ('kn', '66:64:35')},
{'1': ('kn', '66:64:36')},
{'1': ('kn', '66:64:37')},
{'1': ('kn', '66:64:38')},
{'1': ('kn', '66:64:39')},
{'1': ('kn', '66:64:40')},
{'1': ('kn', '66:64:41')},
{'1': ('kn', '66:64:42')},
{'1': ('kn', '66:64:43')},
{'1': ('kn', '66:64:44')},
{'1': ('kn', '66:64:45')},
{'1': ('kn', '66:64:46')},
{'1': ('kn', '66:64:47')},
{'1': ('kn', '66:64:48')},
{'1': ('kn', '66:64:49')},
{'1': ('kn', '66:64:50')},
{'1': ('kn', '66:64:51')},
{'1': ('kn', '66:64:52')},
{'1': ('kn', '66:64:53')},
{'1': ('kn', '66:64:54')},
{'1': ('kn', '66:64:55')},
{'1': ('kn', '66:64:56')},
{'1': ('kn', '66:64:57')},
{'1': ('kn', '66:64:58')},
{'1': ('kn', '66:64:59')},
{'1': ('kn', '66:64:60')},
{'1': ('kn', '66:64:61')},
{'1': ('kn', '66:64:62')},
{'1': ('kn', '66:64:63')},
{'1': ('kn', '66:64:64')},
{'1': ('kn', '66:64:65')},
{'1': ('kn', '66:64:66')},
{'1': ('kn', '66:64:67')},
{'1': ('kn', '66:64:68')},
{'1': ('kn', '66:64:69')},
{'1': ('kn', '66:64:70')},
{'1': ('kn', '66:64:71')},
{'1': ('kn', '66:64:72')},
{'1': ('kn', '66:64:73')},
{'1': ('kn', '66:64:74')},
{'1': ('kn', '66:64:75')},
{'1': ('kn', '66:64:76')},
{'1': ('kn', '66:64:77')},
{'1': ('kn', '66:64:78')},
{'1': ('kn', '66:64:79')},
{'1': ('kn', '66:64:80')},
{'1': ('kn', '66:64:81')},
{'1': ('kn', '66:64:82')},
{'1': ('kn', '66:64:83')},
{'1': ('kn', '66:64:84')},
{'1': ('kn', '66:64:85')},
{'1': ('kn', '66:64:86')},
{'1': ('kn', '66:64:87')},
{'1': ('kn', '66:64:88')},
{'1': ('kn', '66:64:89')},
{'1': ('kn', '66:64:90')},
{'1': ('kn', '66:64:91')},
{'1': ('kn', '66:64:92')},
{'1': ('kn', '66:64:93')},
{'1': ('kn', '66:64:94')},
{'1': ('kn', '66:64:95')},
{'1': ('kn', '66:64:96')},
{'1': ('kn', '66:64:97')},
{'1': ('kn', '66:64:98')},
{'1': ('kn', '66:64:99')},
{'1': ('kn', '66:65:00')},
{'1': ('kn', '66:65:01')},
{'1': ('kn', '66:65:02')},
{'1': ('kn', '66:65:03')},
{'1': ('kn', '66:65:04')},
{'1': ('kn', '66:65:05')},
{'1': ('kn', '66:65:06')},
{'1': ('kn', '66:65:07')},
{'1': ('kn', '66:65:08')},
{'1': ('kn', '66:65:09')},
{'1': ('kn', '66:65:10')},
{'1': ('kn', '66:65:11')},
{'1': ('kn', '66:65:12')},
{'1': ('kn', '66:65:13')},
{'1': ('kn', '66:65:14')},
{'1': ('kn', '66:65:15')},
{'1': ('kn', '66:65:16')},
{'1': ('kn', '66:65:17')},
{'1': ('kn', '66:65:18')},
{'1': ('kn', '66:65:19')},
{'1': ('kn', '66:65:20')},
{'1': ('kn', '66:65:21')},
{'1': ('kn', '66:65:22')},
{'1': ('kn', '66:65:23')},
{'1': ('kn', '66:65:24')},
{'1': ('kn', '66:65:25')},
{'1': ('kn', '66:65:26')},
{'1': ('kn', '66:65:27')},
{'1': ('kn', '66:65:28')},
{'1': ('kn', '66:65:29')},
{'1': ('kn', '66:65:30')},
{'1': ('kn', '66:65:31')},
{'1': ('kn', '66:65:32')},
{'1': ('kn', '66:65:33')},
{'1': ('kn', '66:65:34')},
{'1': ('kn', '66:65:35')},
{'1': ('kn', '66:65:36')},
{'1': ('kn', '66:65:37')},
{'1': ('kn', '66:65:38')},
{'1': ('kn', '66:65:39')},
{'1': ('kn', '66:65:40')},
{'1': ('kn', '66:65:41')},
{'1': ('kn', '66:65:42')},
{'1': ('kn', '66:65:43')},
{'1': ('kn', '66:65:44')},
{'1': ('kn', '66:65:45')},
{'1': ('kn', '66:65:46')},
{'1': ('kn', '66:65:47')},
{'1': ('kn', '66:65:48')},
{'1': ('kn', '66:65:49')},
{'1': ('kn', '66:65:50')},
{'1': ('kn', '66:65:51')},
{'1': ('kn', '66:65:52')},
{'1': ('kn', '66:65:53')},
{'1': ('kn', '66:65:54')},
{'1': ('kn', '66:65:55')},
{'1': ('kn', '66:65:56')},
{'1': ('kn', '66:65:57')},
{'1': ('kn', '66:65:58')},
{'1': ('kn', '66:65:59')},
{'1': ('kn', '66:65:60')},
{'1': ('kn', '66:65:61')},
{'1': ('kn', '66:65:62')},
{'1': ('kn', '66:65:63')},
{'1': ('kn', '66:65:64')},
{'1': ('kn', '66:65:65')},
{'1': ('kn', '66:65:66')},
{'1': ('kn', '66:65:67')},
{'1': ('kn', '66:65:68')},
{'1': ('kn', '66:65:69')},
{'1': ('kn', '66:65:70')},
{'1': ('kn', '66:65:71')},
{'1': ('kn', '66:65:72')},
{'1': ('kn', '66:65:73')},
{'1': ('kn', '66:65:74')},
{'1': ('kn', '66:65:75')},
{'1': ('kn', '66:65:76')},
{'1': ('kn', '66:65:77')},
{'1': ('kn', '66:65:78')},
{'1': ('kn', '66:65:79')},
{'1': ('kn', '66:65:80')},
{'1': ('kn', '66:65:81')},
{'1': ('kn', '66:65:82')},
{'1': ('kn', '66:65:83')},
{'1': ('kn', '66:65:84')},
{'1': ('kn', '66:65:85')},
{'1': ('kn', '66:65:86')},
{'1': ('kn', '66:65:87')},
{'1': ('kn', '66:65:88')},
{'1': ('kn', '66:65:89')},
{'1': ('kn', '66:65:90')},
{'1': ('kn', '66:65:91')},
{'1': ('kn', '66:65:92')},
{'1': ('kn', '66:65:93')},
{'1': ('kn', '66:65:94')},
{'1': ('kn', '66:65:95')},
{'1': ('kn', '66:65:96')},
{'1': ('kn', '66:65:97')},
{'1': ('kn', '66:65:98')},
{'1': ('kn', '66:65:99')},
{'1': ('kn', '66:66:00')},
{'1': ('kn', '66:66:01')},
{'1': ('kn', '66:66:02')},
{'1': ('kn', '66:66:03')},
{'1': ('kn', '66:66:04')},
{'1': ('kn', '66:66:05')},
{'1': ('kn', '66:66:06')},
{'1': ('kn', '66:66:07')},
{'1': ('kn', '66:66:08')},
{'1': ('kn', '66:66:09')},
{'1': ('kn', '66:66:10')},
{'1': ('kn', '66:66:11')},
{'1': ('kn', '66:66:12')},
{'1': ('kn', '66:66:13')},
{'1': ('kn', '66:66:14')},
{'1': ('kn', '66:66:15')},
{'1': ('kn', '66:66:16')},
{'1': ('kn', '66:66:17')},
{'1': ('kn', '66:66:18')},
{'1': ('kn', '66:66:19')},
{'1': ('kn', '66:66:20')},
{'1': ('kn', '66:66:21')},
{'1': ('kn', '66:66:22')},
{'1': ('kn', '66:66:23')},
{'1': ('kn', '66:66:24')},
{'1': ('kn', '66:66:25')},
{'1': ('kn', '66:66:26')},
{'1': ('kn', '66:66:27')},
{'1': ('kn', '66:66:28')},
{'1': ('kn', '66:66:29')},
{'1': ('kn', '66:66:30')},
{'1': ('kn', '66:66:31')},
{'1': ('kn', '66:66:32')},
{'1': ('kn', '66:66:33')},
{'1': ('kn', '66:66:34')},
{'1': ('kn', '66:66:35')},
{'1': ('kn', '66:66:36')},
{'1': ('kn', '66:66:37')},
{'1': ('kn', '66:66:38')},
{'1': ('kn', '66:66:39')},
{'1': ('kn', '66:66:40')},
{'1': ('kn', '66:66:41')},
{'1': ('kn', '66:66:42')},
{'1': ('kn', '66:66:43')},
{'1': ('kn', '66:66:44')},
{'1': ('kn', '66:66:45')},
{'1': ('kn', '66:66:46')},
{'1': ('kn', '66:66:47')},
{'1': ('kn', '66:66:48')},
{'1': ('kn', '66:66:49')},
{'1': ('kn', '66:66:50')},
{'1': ('kn', '66:66:51')},
{'1': ('kn', '66:66:52')},
{'1': ('kn', '66:66:53')},
{'1': ('kn', '66:66:54')},
{'1': ('kn', '66:66:55')},
{'1': ('kn', '66:66:56')},
{'1': ('kn', '66:66:57')},
{'1': ('kn', '66:66:58')},
{'1': ('kn', '66:66:59')},
{'1': ('kn', '66:66:60')},
{'1': ('kn', '66:66:61')},
{'1': ('kn', '66:66:62')},
{'1': ('kn', '66:66:63')},
{'1': ('kn', '66:66:64')},
{'1': ('kn', '66:66:65')},
{'1': ('kn', '66:66:66')},
{'1': ('kn', '66:66:67')},
{'1': ('kn', '66:66:68')},
{'1': ('kn', '66:66:69')},
{'1': ('kn', '66:66:70')},
{'1': ('kn', '66:66:71')},
{'1': ('kn', '66:66:72')},
{'1': ('kn', '66:66:73')},
{'1': ('kn', '66:66:74')},
{'1': ('kn', '66:66:75')},
{'1': ('kn', '66:66:76')},
{'1': ('kn', '66:66:77')},
{'1': ('kn', '66:66:78')},
{'1': ('kn', '66:66:79')},
{'1': ('kn', '66:66:80')},
{'1': ('kn', '66:66:81')},
{'1': ('kn', '66:66:82')},
{'1': ('kn', '66:66:83')},
{'1': ('kn', '66:66:84')},
{'1': ('kn', '66:66:85')},
{'1': ('kn', '66:66:86')},
{'1': ('kn', '66:66:87')},
{'1': ('kn', '66:66:88')},
{'1': ('kn', '66:66:89')},
{'1': ('kn', '66:66:90')},
{'1': ('kn', '66:66:91')},
{'1': ('kn', '66:66:92')},
{'1': ('kn', '66:66:93')},
{'1': ('kn', '66:66:94')},
{'1': ('kn', '66:66:95')},
{'1': ('kn', '66:66:96')},
{'1': ('kn', '66:66:97')},
{'1': ('kn', '66:66:98')},
{'1': ('kn', '66:66:99')},
{'1': ('kn', '66:67:00')},
{'1': ('kn', '66:67:01')},
{'1': ('kn', '66:67:02')},
{'1': ('kn', '66:67:03')},
{'1': ('kn', '66:67:04')},
{'1': ('kn', '66:67:05')},
{'1': ('kn', '66:67:06')},
{'1': ('kn', '66:67:07')},
{'1': ('kn', '66:67:08')},
{'1': ('kn', '66:67:09')},
{'1': ('kn', '66:67:10')},
{'1': ('kn', '66:67:11')},
{'1': ('kn', '66:67:12')},
{'1': ('kn', '66:67:13')},
{'1': ('kn', '66:67:14')},
{'1': ('kn', '66:67:15')},
{'1': ('kn', '66:67:16')},
{'1': ('kn', '66:67:17')},
{'1': ('kn', '66:67:18')},
{'1': ('kn', '66:67:19')},
{'1': ('kn', '66:67:20')},
{'1': ('kn', '66:67:21')},
{'1': ('kn', '66:67:22')},
{'1': ('kn', '66:67:23')},
{'1': ('kn', '66:67:24')},
{'1': ('kn', '66:67:25')},
{'1': ('kn', '66:67:26')},
{'1': ('kn', '66:67:27')},
{'1': ('kn', '66:67:28')},
{'1': ('kn', '66:67:29')},
{'1': ('kn', '66:67:30')},
{'1': ('kn', '66:67:31')},
{'1': ('kn', '66:67:32')},
{'1': ('kn', '66:67:33')},
{'1': ('kn', '66:67:34')},
{'1': ('kn', '66:67:35')},
{'1': ('kn', '66:67:36')},
{'1': ('kn', '66:67:37')},
{'1': ('kn', '66:67:38')},
{'1': ('kn', '66:67:39')},
{'1': ('kn', '66:67:40')},
{'1': ('kn', '66:67:41')},
{'1': ('kn', '66:67:42')},
{'1': ('kn', '66:67:43')},
{'1': ('kn', '66:67:44')},
{'1': ('kn', '66:67:45')},
{'1': ('kn', '66:67:46')},
{'1': ('kn', '66:67:47')},
{'1': ('kn', '66:67:48')},
{'1': ('kn', '66:67:49')},
{'1': ('kn', '66:67:50')},
{'1': ('kn', '66:67:51')},
{'1': ('kn', '66:67:52')},
{'1': ('kn', '66:67:53')},
{'1': ('kn', '66:67:54')},
{'1': ('kn', '66:67:55')},
{'1': ('kn', '66:67:56')},
{'1': ('kn', '66:67:57')},
{'1': ('kn', '66:67:58')},
{'1': ('kn', '66:67:59')},
{'1': ('kn', '66:67:60')},
{'1': ('kn', '66:67:61')},
{'1': ('kn', '66:67:62')},
{'1': ('kn', '66:67:63')},
{'1': ('kn', '66:67:64')},
{'1': ('kn', '66:67:65')},
{'1': ('kn', '66:67:66')},
{'1': ('kn', '66:67:67')},
{'1': ('kn', '66:67:68')},
{'1': ('kn', '66:67:69')},
{'1': ('kn', '66:67:70')},
{'1': ('kn', '66:67:71')},
{'1': ('kn', '66:67:72')},
{'1': ('kn', '66:67:73')},
{'1': ('kn', '66:67:74')},
{'1': ('kn', '66:67:75')},
{'1': ('kn', '66:67:76')},
{'1': ('kn', '66:67:77')},
{'1': ('kn', '66:67:78')},
{'1': ('kn', '66:67:79')},
{'1': ('kn', '66:67:80')},
{'1': ('kn', '66:67:81')},
{'1': ('kn', '66:67:82')},
{'1': ('kn', '66:67:83')},
{'1': ('kn', '66:67:84')},
{'1': ('kn', '66:67:85')},
{'1': ('kn', '66:67:86')},
{'1': ('kn', '66:67:87')},
{'1': ('kn', '66:67:88')},
{'1': ('kn', '66:67:89')},
{'1': ('kn', '66:67:90')},
{'1': ('kn', '66:67:91')},
{'1': ('kn', '66:67:92')},
{'1': ('kn', '66:67:93')},
{'1': ('kn', '66:67:94')},
{'1': ('kn', '66:67:95')},
{'1': ('kn', '66:67:96')},
{'1': ('kn', '66:67:97')},
{'1': ('kn', '66:67:98')},
{'1': ('kn', '66:67:99')},
{'1': ('kn', '66:68:00')},
{'1': ('kn', '66:68:01')},
{'1': ('kn', '66:68:02')},
{'1': ('kn', '66:68:03')},
{'1': ('kn', '66:68:04')},
{'1': ('kn', '66:68:05')},
{'1': ('kn', '66:68:06')},
{'1': ('kn', '66:68:07')},
{'1': ('kn', '66:68:08')},
{'1': ('kn', '66:68:09')},
{'1': ('kn', '66:68:10')},
{'1': ('kn', '66:68:11')},
{'1': ('kn', '66:68:12')},
{'1': ('kn', '66:68:13')},
{'1': ('kn', '66:68:14')},
{'1': ('kn', '66:68:15')},
{'1': ('kn', '66:68:16')},
{'1': ('kn', '66:68:17')},
{'1': ('kn', '66:68:18')},
{'1': ('kn', '66:68:19')},
{'1': ('kn', '66:68:20')},
{'1': ('kn', '66:68:21')},
{'1': ('kn', '66:68:22')},
{'1': ('kn', '66:68:23')},
{'1': ('kn', '66:68:24')},
{'1': ('kn', '66:68:25')},
{'1': ('kn', '66:68:26')},
{'1': ('kn', '66:68:27')},
{'1': ('kn', '66:68:28')},
{'1': ('kn', '66:68:29')},
{'1': ('kn', '66:68:30')},
{'1': ('kn', '66:68:31')},
{'1': ('kn', '66:68:32')},
{'1': ('kn', '66:68:33')},
{'1': ('kn', '66:68:34')},
{'1': ('kn', '66:68:35')},
{'1': ('kn', '66:68:36')},
{'1': ('kn', '66:68:37')},
{'1': ('kn', '66:68:38')},
{'1': ('kn', '66:68:39')},
{'1': ('kn', '66:68:40')},
{'1': ('kn', '66:68:41')},
{'1': ('kn', '66:68:42')},
{'1': ('kn', '66:68:43')},
{'1': ('kn', '66:68:44')},
{'1': ('kn', '66:68:45')},
{'1': ('kn', '66:68:46')},
{'1': ('kn', '66:68:47')},
{'1': ('kn', '66:68:48')},
{'1': ('kn', '66:68:49')},
{'1': ('kn', '66:68:50')},
{'1': ('kn', '66:68:51')},
{'1': ('kn', '66:68:52')},
{'1': ('kn', '66:68:53')},
{'1': ('kn', '66:68:54')},
{'1': ('kn', '66:68:55')},
{'1': ('kn', '66:68:56')},
{'1': ('kn', '66:68:57')},
{'1': ('kn', '66:68:58')},
{'1': ('kn', '66:68:59')},
{'1': ('kn', '66:68:60')},
{'1': ('kn', '66:68:61')},
{'1': ('kn', '66:68:62')},
{'1': ('kn', '66:68:63')},
{'1': ('kn', '66:68:64')},
{'1': ('kn', '66:68:65')},
{'1': ('kn', '66:68:66')},
{'1': ('kn', '66:68:67')},
{'1': ('kn', '66:68:68')},
{'1': ('kn', '66:68:69')},
{'1': ('kn', '66:68:70')},
{'1': ('kn', '66:68:71')},
{'1': ('kn', '66:68:72')},
{'1': ('kn', '66:68:73')},
{'1': ('kn', '66:68:74')},
{'1': ('kn', '66:68:75')},
{'1': ('kn', '66:68:76')},
{'1': ('kn', '66:68:77')},
{'1': ('kn', '66:68:78')},
{'1': ('kn', '66:68:79')},
{'1': ('kn', '66:68:80')},
{'1': ('kn', '66:68:81')},
{'1': ('kn', '66:68:82')},
{'1': ('kn', '66:68:83')},
{'1': ('kn', '66:68:84')},
{'1': ('kn', '66:68:85')},
{'1': ('kn', '66:68:86')},
{'1': ('kn', '66:68:87')},
{'1': ('kn', '66:68:88')},
{'1': ('kn', '66:68:89')},
{'1': ('kn', '66:68:90')},
{'1': ('kn', '66:68:91')},
{'1': ('kn', '66:68:92')},
{'1': ('kn', '66:68:93')},
{'1': ('kn', '66:68:94')},
{'1': ('kn', '66:68:95')},
{'1': ('kn', '66:68:96')},
{'1': ('kn', '66:68:97')},
{'1': ('kn', '66:68:98')},
{'1': ('kn', '66:68:99')},
{'1': ('kn', '66:69:00')},
{'1': ('kn', '66:69:01')},
{'1': ('kn', '66:69:02')},
{'1': ('kn', '66:69:03')},
{'1': ('kn', '66:69:04')},
{'1': ('kn', '66:69:05')},
{'1': ('kn', '66:69:06')},
{'1': ('kn', '66:69:07')},
{'1': ('kn', '66:69:08')},
{'1': ('kn', '66:69:09')},
{'1': ('kn', '66:69:10')},
{'1': ('kn', '66:69:11')},
{'1': ('kn', '66:69:12')},
{'1': ('kn', '66:69:13')},
{'1': ('kn', '66:69:14')},
{'1': ('kn', '66:69:15')},
{'1': ('kn', '66:69:16')},
{'1': ('kn', '66:69:17')},
{'1': ('kn', '66:69:18')},
{'1': ('kn', '66:69:19')},
{'1': ('kn', '66:69:20')},
{'1': ('kn', '66:69:21')},
{'1': ('kn', '66:69:22')},
{'1': ('kn', '66:69:23')},
{'1': ('kn', '66:69:24')},
{'1': ('kn', '66:69:25')},
{'1': ('kn', '66:69:26')},
{'1': ('kn', '66:69:27')},
{'1': ('kn', '66:69:28')},
{'1': ('kn', '66:69:29')},
{'1': ('kn', '66:69:30')},
{'1': ('kn', '66:69:31')},
{'1': ('kn', '66:69:32')},
{'1': ('kn', '66:69:33')},
{'1': ('kn', '66:69:34')},
{'1': ('kn', '66:69:35')},
{'1': ('kn', '66:69:36')},
{'1': ('kn', '66:69:37')},
{'1': ('kn', '66:69:38')},
{'1': ('kn', '66:69:39')},
{'1': ('kn', '66:69:40')},
{'1': ('kn', '66:69:41')},
{'1': ('kn', '66:69:42')},
{'1': ('kn', '66:69:43')},
{'1': ('kn', '66:69:44')},
{'1': ('kn', '66:69:45')},
{'1': ('kn', '66:69:46')},
{'1': ('kn', '66:69:47')},
{'1': ('kn', '66:69:48')},
{'1': ('kn', '66:69:49')},
{'1': ('kn', '66:69:50')},
{'1': ('kn', '66:69:51')},
{'1': ('kn', '66:69:52')},
{'1': ('kn', '66:69:53')},
{'1': ('kn', '66:69:54')},
{'1': ('kn', '66:69:55')},
{'1': ('kn', '66:69:56')},
{'1': ('kn', '66:69:57')},
{'1': ('kn', '66:69:58')},
{'1': ('kn', '66:69:59')},
{'1': ('kn', '66:69:60')},
{'1': ('kn', '66:69:61')},
{'1': ('kn', '66:69:62')},
{'1': ('kn', '66:69:63')},
{'1': ('kn', '66:69:64')},
{'1': ('kn', '66:69:65')},
{'1': ('kn', '66:69:66')},
{'1': ('kn', '66:69:67')},
{'1': ('kn', '66:69:68')},
{'1': ('kn', '66:69:69')},
{'1': ('kn', '66:69:70')},
{'1': ('kn', '66:69:71')},
{'1': ('kn', '66:69:72')},
{'1': ('kn', '66:69:73')},
{'1': ('kn', '66:69:74')},
{'1': ('kn', '66:69:75')},
{'1': ('kn', '66:69:76')},
{'1': ('kn', '66:69:77')},
{'1': ('kn', '66:69:78')},
{'1': ('kn', '66:69:79')},
{'1': ('kn', '66:69:80')},
{'1': ('kn', '66:69:81')},
{'1': ('kn', '66:69:82')},
{'1': ('kn', '66:69:83')},
{'1': ('kn', '66:69:84')},
{'1': ('kn', '66:69:85')},
{'1': ('kn', '66:69:86')},
{'1': ('kn', '66:69:87')},
{'1': ('kn', '66:69:88')},
{'1': ('kn', '66:69:89')},
{'1': ('kn', '66:69:90')},
{'1': ('kn', '66:69:91')},
{'1': ('kn', '66:69:92')},
{'1': ('kn', '66:69:93')},
{'1': ('kn', '66:69:94')},
{'1': ('kn', '66:69:95')},
{'1': ('kn', '66:69:96')},
{'1': ('kn', '66:69:97')},
{'1': ('kn', '66:69:98')},
{'1': ('kn', '66:69:99')},
{'1': ('kn', '66:70:00')},
{'1': ('kn', '66:70:01')},
{'1': ('kn', '66:70:02')},
{'1': ('kn', '66:70:03')},
{'1': ('kn', '66:70:04')},
{'1': ('kn', '66:70:05')},
{'1': ('kn', '66:70:06')},
{'1': ('kn', '66:70:07')},
{'1': ('kn', '66:70:08')},
{'1': ('kn', '66:70:09')},
{'1': ('kn', '66:70:10')},
{'1': ('kn', '66:70:11')},
{'1': ('kn', '66:70:12')},
{'1': ('kn', '66:70:13')},
{'1': ('kn', '66:70:14')},
{'1': ('kn', '66:70:15')},
{'1': ('kn', '66:70:16')},
{'1': ('kn', '66:70:17')},
{'1': ('kn', '66:70:18')},
{'1': ('kn', '66:70:19')},
{'1': ('kn', '66:70:20')},
{'1': ('kn', '66:70:21')},
{'1': ('kn', '66:70:22')},
{'1': ('kn', '66:70:23')},
{'1': ('kn', '66:70:24')},
{'1': ('kn', '66:70:25')},
{'1': ('kn', '66:70:26')},
{'1': ('kn', '66:70:27')},
{'1': ('kn', '66:70:28')},
{'1': ('kn', '66:70:29')},
{'1': ('kn', '66:70:30')},
{'1': ('kn', '66:70:31')},
{'1': ('kn', '66:70:32')},
{'1': ('kn', '66:70:33')},
{'1': ('kn', '66:70:34')},
{'1': ('kn', '66:70:35')},
{'1': ('kn', '66:70:36')},
{'1': ('kn', '66:70:37')},
{'1': ('kn', '66:70:38')},
{'1': ('kn', '66:70:39')},
{'1': ('kn', '66:70:40')},
{'1': ('kn', '66:70:41')},
{'1': ('kn', '66:70:42')},
{'1': ('kn', '66:70:43')},
{'1': ('kn', '66:70:44')},
{'1': ('kn', '66:70:45')},
{'1': ('kn', '66:70:46')},
{'1': ('kn', '66:70:47')},
{'1': ('kn', '66:70:48')},
{'1': ('kn', '66:70:49')},
{'1': ('kn', '66:70:50')},
{'1': ('kn', '66:70:51')},
{'1': ('kn', '66:70:52')},
{'1': ('kn', '66:70:53')},
{'1': ('kn', '66:70:54')},
{'1': ('kn', '66:70:55')},
{'1': ('kn', '66:70:56')},
{'1': ('kn', '66:70:57')},
{'1': ('kn', '66:70:58')},
{'1': ('kn', '66:70:59')},
{'1': ('kn', '66:70:60')},
{'1': ('kn', '66:70:61')},
{'1': ('kn', '66:70:62')},
{'1': ('kn', '66:70:63')},
{'1': ('kn', '66:70:64')},
{'1': ('kn', '66:70:65')},
{'1': ('kn', '66:70:66')},
{'1': ('kn', '66:70:67')},
{'1': ('kn', '66:70:68')},
{'1': ('kn', '66:70:69')},
{'1': ('kn', '66:70:70')},
{'1': ('kn', '66:70:71')},
{'1': ('kn', '66:70:72')},
{'1': ('kn', '66:70:73')},
{'1': ('kn', '66:70:74')},
{'1': ('kn', '66:70:75')},
{'1': ('kn', '66:70:76')},
{'1': ('kn', '66:70:77')},
{'1': ('kn', '66:70:78')},
{'1': ('kn', '66:70:79')},
{'1': ('kn', '66:70:80')},
{'1': ('kn', '66:70:81')},
{'1': ('kn', '66:70:82')},
{'1': ('kn', '66:70:83')},
{'1': ('kn', '66:70:84')},
{'1': ('kn', '66:70:85')},
{'1': ('kn', '66:70:86')},
{'1': ('kn', '66:70:87')},
{'1': ('kn', '66:70:88')},
{'1': ('kn', '66:70:89')},
{'1': ('kn', '66:70:90')},
{'1': ('kn', '66:70:91')},
{'1': ('kn', '66:70:92')},
{'1': ('kn', '66:70:93')},
{'1': ('kn', '66:70:94')},
{'1': ('kn', '66:70:95')},
{'1': ('kn', '66:70:96')},
{'1': ('kn', '66:70:97')},
{'1': ('kn', '66:70:98')},
{'1': ('kn', '66:70:99')},
{'1': ('kn', '66:71:00')},
{'1': ('kn', '66:71:01')},
{'1': ('kn', '66:71:02')},
{'1': ('kn', '66:71:03')},
{'1': ('kn', '66:71:04')},
{'1': ('kn', '66:71:05')},
{'1': ('kn', '66:71:06')},
{'1': ('kn', '66:71:07')},
{'1': ('kn', '66:71:08')},
{'1': ('kn', '66:71:09')},
{'1': ('kn', '66:71:10')},
{'1': ('kn', '66:71:11')},
{'1': ('kn', '66:71:12')},
{'1': ('kn', '66:71:13')},
{'1': ('kn', '66:71:14')},
{'1': ('kn', '66:71:15')},
{'1': ('kn', '66:71:16')},
{'1': ('kn', '66:71:17')},
{'1': ('kn', '66:71:18')},
{'1': ('kn', '66:71:19')},
{'1': ('kn', '66:71:20')},
{'1': ('kn', '66:71:21')},
{'1': ('kn', '66:71:22')},
{'1': ('kn', '66:71:23')},
{'1': ('kn', '66:71:24')},
{'1': ('kn', '66:71:25')},
{'1': ('kn', '66:71:26')},
{'1': ('kn', '66:71:27')},
{'1': ('kn', '66:71:28')},
{'1': ('kn', '66:71:29')},
{'1': ('kn', '66:71:30')},
{'1': ('kn', '66:71:31')},
{'1': ('kn', '66:71:32')},
{'1': ('kn', '66:71:33')},
{'1': ('kn', '66:71:34')},
{'1': ('kn', '66:71:35')},
{'1': ('kn', '66:71:36')},
{'1': ('kn', '66:71:37')},
{'1': ('kn', '66:71:38')},
{'1': ('kn', '66:71:39')},
{'1': ('kn', '66:71:40')},
{'1': ('kn', '66:71:41')},
{'1': ('kn', '66:71:42')},
{'1': ('kn', '66:71:43')},
{'1': ('kn', '66:71:44')},
{'1': ('kn', '66:71:45')},
{'1': ('kn', '66:71:46')},
{'1': ('kn', '66:71:47')},
{'1': ('kn', '66:71:48')},
{'1': ('kn', '66:71:49')},
{'1': ('kn', '66:71:50')},
{'1': ('kn', '66:71:51')},
{'1': ('kn', '66:71:52')},
{'1': ('kn', '66:71:53')},
{'1': ('kn', '66:71:54')},
{'1': ('kn', '66:71:55')},
{'1': ('kn', '66:71:56')},
{'1': ('kn', '66:71:57')},
{'1': ('kn', '66:71:58')},
{'1': ('kn', '66:71:59')},
{'1': ('kn', '66:71:60')},
{'1': ('kn', '66:71:61')},
{'1': ('kn', '66:71:62')},
{'1': ('kn', '66:71:63')},
{'1': ('kn', '66:71:64')},
{'1': ('kn', '66:71:65')},
{'1': ('kn', '66:71:66')},
{'1': ('kn', '66:71:67')},
{'1': ('kn', '66:71:68')},
{'1': ('kn', '66:71:69')},
{'1': ('kn', '66:71:70')},
{'1': ('kn', '66:71:71')},
{'1': ('kn', '66:71:72')},
{'1': ('kn', '66:71:73')},
{'1': ('kn', '66:71:74')},
{'1': ('kn', '66:71:75')},
{'1': ('kn', '66:71:76')},
{'1': ('kn', '66:71:77')},
{'1': ('kn', '66:71:78')},
{'1': ('kn', '66:71:79')},
{'1': ('kn', '66:71:80')},
{'1': ('kn', '66:71:81')},
{'1': ('kn', '66:71:82')},
{'1': ('kn', '66:71:83')},
{'1': ('kn', '66:71:84')},
{'1': ('kn', '66:71:85')},
{'1': ('kn', '66:71:86')},
{'1': ('kn', '66:71:87')},
{'1': ('kn', '66:71:88')},
{'1': ('kn', '66:71:89')},
{'1': ('kn', '66:71:90')},
{'1': ('kn', '66:71:91')},
{'1': ('kn', '66:71:92')},
{'1': ('kn', '66:71:93')},
{'1': ('kn', '66:71:94')},
{'1': ('kn', '66:71:95')},
{'1': ('kn', '66:71:96')},
{'1': ('kn', '66:71:97')},
{'1': ('kn', '66:71:98')},
{'1': ('kn', '66:71:99')},
{'1': ('kn', '66:72:00')},
{'1': ('kn', '66:72:01')},
{'1': ('kn', '66:72:02')},
{'1': ('kn', '66:72:03')},
{'1': ('kn', '66:72:04')},
{'1': ('kn', '66:72:05')},
{'1': ('kn', '66:72:06')},
{'1': ('kn', '66:72:07')},
{'1': ('kn', '66:72:08')},
{'1': ('kn', '66:72:09')},
{'1': ('kn', '66:72:10')},
{'1': ('kn', '66:72:11')},
{'1': ('kn', '66:72:12')},
{'1': ('kn', '66:72:13')},
{'1': ('kn', '66:72:14')},
{'1': ('kn', '66:72:15')},
{'1': ('kn', '66:72:16')},
{'1': ('kn', '66:72:17')},
{'1': ('kn', '66:72:18')},
{'1': ('kn', '66:72:19')},
{'1': ('kn', '66:72:20')},
{'1': ('kn', '66:72:21')},
{'1': ('kn', '66:72:22')},
{'1': ('kn', '66:72:23')},
{'1': ('kn', '66:72:24')},
{'1': ('kn', '66:72:25')},
{'1': ('kn', '66:72:26')},
{'1': ('kn', '66:72:27')},
{'1': ('kn', '66:72:28')},
{'1': ('kn', '66:72:29')},
{'1': ('kn', '66:72:30')},
{'1': ('kn', '66:72:31')},
{'1': ('kn', '66:72:32')},
{'1': ('kn', '66:72:33')},
{'1': ('kn', '66:72:34')},
{'1': ('kn', '66:72:35')},
{'1': ('kn', '66:72:36')},
{'1': ('kn', '66:72:37')},
{'1': ('kn', '66:72:38')},
{'1': ('kn', '66:72:39')},
{'1': ('kn', '66:72:40')},
{'1': ('kn', '66:72:41')},
{'1': ('kn', '66:72:42')},
{'1': ('kn', '66:72:43')},
{'1': ('kn', '66:72:44')},
{'1': ('kn', '66:72:45')},
{'1': ('kn', '66:72:46')},
{'1': ('kn', '66:72:47')},
{'1': ('kn', '66:72:48')},
{'1': ('kn', '66:72:49')},
{'1': ('kn', '66:72:50')},
{'1': ('kn', '66:72:51')},
{'1': ('kn', '66:72:52')},
{'1': ('kn', '66:72:53')},
{'1': ('kn', '66:72:54')},
{'1': ('kn', '66:72:55')},
{'1': ('kn', '66:72:56')},
{'1': ('kn', '66:72:57')},
{'1': ('kn', '66:72:58')},
{'1': ('kn', '66:72:59')},
{'1': ('kn', '66:72:60')},
{'1': ('kn', '66:72:61')},
{'1': ('kn', '66:72:62')},
{'1': ('kn', '66:72:63')},
{'1': ('kn', '66:72:64')},
{'1': ('kn', '66:72:65')},
{'1': ('kn', '66:72:66')},
{'1': ('kn', '66:72:67')},
{'1': ('kn', '66:72:68')},
{'1': ('kn', '66:72:69')},
{'1': ('kn', '66:72:70')},
{'1': ('kn', '66:72:71')},
{'1': ('kn', '66:72:72')},
{'1': ('kn', '66:72:73')},
{'1': ('kn', '66:72:74')},
{'1': ('kn', '66:72:75')},
{'1': ('kn', '66:72:76')},
{'1': ('kn', '66:72:77')},
{'1': ('kn', '66:72:78')},
{'1': ('kn', '66:72:79')},
{'1': ('kn', '66:72:80')},
{'1': ('kn', '66:72:81')},
{'1': ('kn', '66:72:82')},
{'1': ('kn', '66:72:83')},
{'1': ('kn', '66:72:84')},
{'1': ('kn', '66:72:85')},
{'1': ('kn', '66:72:86')},
{'1': ('kn', '66:72:87')},
{'1': ('kn', '66:72:88')},
{'1': ('kn', '66:72:89')},
{'1': ('kn', '66:72:90')},
{'1': ('kn', '66:72:91')},
{'1': ('kn', '66:72:92')},
{'1': ('kn', '66:72:93')},
{'1': ('kn', '66:72:94')},
{'1': ('kn', '66:72:95')},
{'1': ('kn', '66:72:96')},
{'1': ('kn', '66:72:97')},
{'1': ('kn', '66:72:98')},
{'1': ('kn', '66:72:99')},
{'1': ('kn', '66:73:00')},
{'1': ('kn', '66:73:01')},
{'1': ('kn', '66:73:02')},
{'1': ('kn', '66:73:03')},
{'1': ('kn', '66:73:04')},
{'1': ('kn', '66:73:05')},
{'1': ('kn', '66:73:06')},
{'1': ('kn', '66:73:07')},
{'1': ('kn', '66:73:08')},
{'1': ('kn', '66:73:09')},
{'1': ('kn', '66:73:10')},
{'1': ('kn', '66:73:11')},
{'1': ('kn', '66:73:12')},
{'1': ('kn', '66:73:13')},
{'1': ('kn', '66:73:14')},
{'1': ('kn', '66:73:15')},
{'1': ('kn', '66:73:16')},
{'1': ('kn', '66:73:17')},
{'1': ('kn', '66:73:18')},
{'1': ('kn', '66:73:19')},
{'1': ('kn', '66:73:20')},
{'1': ('kn', '66:73:21')},
{'1': ('kn', '66:73:22')},
{'1': ('kn', '66:73:23')},
{'1': ('kn', '66:73:24')},
{'1': ('kn', '66:73:25')},
{'1': ('kn', '66:73:26')},
{'1': ('kn', '66:73:27')},
{'1': ('kn', '66:73:28')},
{'1': ('kn', '66:73:29')},
{'1': ('kn', '66:73:30')},
{'1': ('kn', '66:73:31')},
{'1': ('kn', '66:73:32')},
{'1': ('kn', '66:73:33')},
{'1': ('kn', '66:73:34')},
{'1': ('kn', '66:73:35')},
{'1': ('kn', '66:73:36')},
{'1': ('kn', '66:73:37')},
{'1': ('kn', '66:73:38')},
{'1': ('kn', '66:73:39')},
{'1': ('kn', '66:73:40')},
{'1': ('kn', '66:73:41')},
{'1': ('kn', '66:73:42')},
{'1': ('kn', '66:73:43')},
{'1': ('kn', '66:73:44')},
{'1': ('kn', '66:73:45')},
{'1': ('kn', '66:73:46')},
{'1': ('kn', '66:73:47')},
{'1': ('kn', '66:73:48')},
{'1': ('kn', '66:73:49')},
{'1': ('kn', '66:73:50')},
{'1': ('kn', '66:73:51')},
{'1': ('kn', '66:73:52')},
{'1': ('kn', '66:73:53')},
{'1': ('kn', '66:73:54')},
{'1': ('kn', '66:73:55')},
{'1': ('kn', '66:73:56')},
{'1': ('kn', '66:73:57')},
{'1': ('kn', '66:73:58')},
{'1': ('kn', '66:73:59')},
{'1': ('kn', '66:73:60')},
{'1': ('kn', '66:73:61')},
{'1': ('kn', '66:73:62')},
{'1': ('kn', '66:73:63')},
{'1': ('kn', '66:73:64')},
{'1': ('kn', '66:73:65')},
{'1': ('kn', '66:73:66')},
{'1': ('kn', '66:73:67')},
{'1': ('kn', '66:73:68')},
{'1': ('kn', '66:73:69')},
{'1': ('kn', '66:73:70')},
{'1': ('kn', '66:73:71')},
{'1': ('kn', '66:73:72')},
{'1': ('kn', '66:73:73')},
{'1': ('kn', '66:73:74')},
{'1': ('kn', '66:73:75')},
{'1': ('kn', '66:73:76')},
{'1': ('kn', '66:73:77')},
{'1': ('kn', '66:73:78')},
{'1': ('kn', '66:73:79')},
{'1': ('kn', '66:73:80')},
{'1': ('kn', '66:73:81')},
{'1': ('kn', '66:73:82')},
{'1': ('kn', '66:73:83')},
{'1': ('kn', '66:73:84')},
{'1': ('kn', '66:73:85')},
{'1': ('kn', '66:73:86')},
{'1': ('kn', '66:73:87')},
{'1': ('kn', '66:73:88')},
{'1': ('kn', '66:73:89')},
{'1': ('kn', '66:73:90')},
{'1': ('kn', '66:73:91')},
{'1': ('kn', '66:73:92')},
{'1': ('kn', '66:73:93')},
{'1': ('kn', '66:73:94')},
{'1': ('kn', '66:73:95')},
{'1': ('kn', '66:73:96')},
{'1': ('kn', '66:73:97')},
{'1': ('kn', '66:73:98')},
{'1': ('kn', '66:73:99')} ]
}
TPL_FORMAT2 = [
'66:00:', '66:01:', '66:02:', '66:03:', '66:04:', '66:05:',
'66:06:', '66:07:', '66:08:', '66:09:', '66:10:', '66:11:',
'66:12:', '66:13:', '66:14:', '66:15:', '66:16:', '66:17:',
'66:18:', '66:19:', '66:20:', '66:21:', '66:22:', '66:23:',
'66:24:', '66:25:', '66:26:', '66:27:', '66:28:', '66:29:',
'66:30:', '66:31:', '66:32:', '66:33:', '66:34:', '66:35:',
'66:36:', '66:37:', '66:38:', '66:39:', '66:40:', '66:42:',
'66:43:', '66:44:', '66:45:', '66:46:', '66:47:', '66:48:',
'66:49:', '66:50:', '66:51:', '66:52:', '66:53:', '66:54:',
'66:55:', '66:57:', '66:58:', '66:59:', '66:60:', '66:61:',
'66:62:', '66:63:', '66:64:', '66:65:', '66:66:', '66:67:',
'66:68:', '66:69:', '66:70:', '66:71:', '66:72:', '66:73:',
'66:41:1', '66:41:2', '66:41:3', '66:41:4', '66:41:5',
'66:41:6', '66:41:7', '66:41:8', '66:41:9',
'66:41:00', '66:41:01', '66:41:02', '66:41:03', '66:41:04',
'66:41:05', '66:41:06', '66:41:07', '66:41:08', '66:41:09',
'66:56:1', '66:56:2', '66:56:3', '66:56:4', '66:56:5',
'66:56:6', '66:56:7', '66:56:8', '66:56:9',
'66:56:00', '66:56:01', '66:56:02', '66:56:03', '66:56:04',
'66:56:05', '66:56:06', '66:56:07', '66:56:08', '66:56:09']
TPL_FORMAT_BL = [
'66:00:00',
'66:00:01',
'66:00:02',
'66:00:03',
'66:00:04',
'66:00:05',
'66:00:06',
'66:00:07',
'66:00:08',
'66:00:09',
'66:00:10',
'66:00:11',
'66:00:12',
'66:00:13',
'66:00:14',
'66:00:15',
'66:00:16',
'66:00:17',
'66:00:18',
'66:00:19',
'66:00:20',
'66:00:21',
'66:00:22',
'66:00:23',
'66:00:24',
'66:00:25',
'66:00:26',
'66:00:27',
'66:00:28',
'66:00:29',
'66:00:30',
'66:00:31',
'66:00:32',
'66:00:33',
'66:00:34',
'66:00:35',
'66:00:36',
'66:00:37',
'66:00:38',
'66:00:39',
'66:00:40',
'66:00:41',
'66:00:42',
'66:00:43',
'66:00:44',
'66:00:45',
'66:00:46',
'66:00:47',
'66:00:48',
'66:00:49',
'66:00:50',
'66:00:51',
'66:00:52',
'66:00:53',
'66:00:54',
'66:00:55',
'66:00:56',
'66:00:57',
'66:00:58',
'66:00:59',
'66:00:60',
'66:00:61',
'66:00:62',
'66:00:63',
'66:00:64',
'66:00:65',
'66:00:66',
'66:00:67',
'66:00:68',
'66:00:69',
'66:00:70',
'66:00:71',
'66:00:72',
'66:00:73',
'66:00:74',
'66:00:75',
'66:00:76',
'66:00:77',
'66:00:78',
'66:00:79',
'66:00:80',
'66:00:81',
'66:00:82',
'66:00:83',
'66:00:84',
'66:00:85',
'66:00:86',
'66:00:87',
'66:00:88',
'66:00:89',
'66:00:90',
'66:00:91',
'66:00:92',
'66:00:93',
'66:00:94',
'66:00:95',
'66:00:96',
'66:00:97',
'66:00:98',
'66:00:99',
'66:01:00',
'66:01:01',
'66:01:02',
'66:01:03',
'66:01:04',
'66:01:05',
'66:01:06',
'66:01:07',
'66:01:08',
'66:01:09',
'66:01:10',
'66:01:11',
'66:01:12',
'66:01:13',
'66:01:14',
'66:01:15',
'66:01:16',
'66:01:17',
'66:01:18',
'66:01:19',
'66:01:20',
'66:01:21',
'66:01:22',
'66:01:23',
'66:01:24',
'66:01:25',
'66:01:26',
'66:01:27',
'66:01:28',
'66:01:29',
'66:01:30',
'66:01:31',
'66:01:32',
'66:01:33',
'66:01:34',
'66:01:35',
'66:01:36',
'66:01:37',
'66:01:38',
'66:01:39',
'66:01:40',
'66:01:41',
'66:01:42',
'66:01:43',
'66:01:44',
'66:01:45',
'66:01:46',
'66:01:47',
'66:01:48',
'66:01:49',
'66:01:50',
'66:01:51',
'66:01:52',
'66:01:53',
'66:01:54',
'66:01:55',
'66:01:56',
'66:01:57',
'66:01:58',
'66:01:59',
'66:01:60',
'66:01:61',
'66:01:62',
'66:01:63',
'66:01:64',
'66:01:65',
'66:01:66',
'66:01:67',
'66:01:68',
'66:01:69',
'66:01:70',
'66:01:71',
'66:01:72',
'66:01:73',
'66:01:74',
'66:01:75',
'66:01:76',
'66:01:77',
'66:01:78',
'66:01:79',
'66:01:80',
'66:01:81',
'66:01:82',
'66:01:83',
'66:01:84',
'66:01:85',
'66:01:86',
'66:01:87',
'66:01:88',
'66:01:89',
'66:01:90',
'66:01:91',
'66:01:92',
'66:01:93',
'66:01:94',
'66:01:95',
'66:01:96',
'66:01:97',
'66:01:98',
'66:01:99',
'66:02:00',
'66:02:01',
'66:02:02',
'66:02:03',
'66:02:04',
'66:02:05',
'66:02:06',
'66:02:07',
'66:02:08',
'66:02:09',
'66:02:10',
'66:02:11',
'66:02:12',
'66:02:13',
'66:02:14',
'66:02:15',
'66:02:16',
'66:02:17',
'66:02:18',
'66:02:19',
'66:02:20',
'66:02:21',
'66:02:22',
'66:02:23',
'66:02:24',
'66:02:25',
'66:02:26',
'66:02:27',
'66:02:28',
'66:02:29',
'66:02:30',
'66:02:31',
'66:02:32',
'66:02:33',
'66:02:34',
'66:02:35',
'66:02:36',
'66:02:37',
'66:02:38',
'66:02:39',
'66:02:40',
'66:02:41',
'66:02:42',
'66:02:43',
'66:02:44',
'66:02:45',
'66:02:46',
'66:02:47',
'66:02:48',
'66:02:49',
'66:02:50',
'66:02:51',
'66:02:52',
'66:02:53',
'66:02:54',
'66:02:55',
'66:02:56',
'66:02:57',
'66:02:58',
'66:02:59',
'66:02:60',
'66:02:61',
'66:02:62',
'66:02:63',
'66:02:64',
'66:02:65',
'66:02:66',
'66:02:67',
'66:02:68',
'66:02:69',
'66:02:70',
'66:02:71',
'66:02:72',
'66:02:73',
'66:02:74',
'66:02:75',
'66:02:76',
'66:02:77',
'66:02:78',
'66:02:79',
'66:02:80',
'66:02:81',
'66:02:82',
'66:02:83',
'66:02:84',
'66:02:85',
'66:02:86',
'66:02:87',
'66:02:88',
'66:02:89',
'66:02:90',
'66:02:91',
'66:02:92',
'66:02:93',
'66:02:94',
'66:02:95',
'66:02:96',
'66:02:97',
'66:02:98',
'66:02:99',
'66:03:00',
'66:03:01',
'66:03:02',
'66:03:03',
'66:03:04',
'66:03:05',
'66:03:06',
'66:03:07',
'66:03:08',
'66:03:09',
'66:03:10',
'66:03:11',
'66:03:12',
'66:03:13',
'66:03:14',
'66:03:15',
'66:03:16',
'66:03:17',
'66:03:18',
'66:03:19',
'66:03:20',
'66:03:21',
'66:03:22',
'66:03:23',
'66:03:24',
'66:03:25',
'66:03:26',
'66:03:27',
'66:03:28',
'66:03:29',
'66:03:30',
'66:03:31',
'66:03:32',
'66:03:33',
'66:03:34',
'66:03:35',
'66:03:36',
'66:03:37',
'66:03:38',
'66:03:39',
'66:03:40',
'66:03:41',
'66:03:42',
'66:03:43',
'66:03:44',
'66:03:45',
'66:03:46',
'66:03:47',
'66:03:48',
'66:03:49',
'66:03:50',
'66:03:51',
'66:03:52',
'66:03:53',
'66:03:54',
'66:03:55',
'66:03:56',
'66:03:57',
'66:03:58',
'66:03:59',
'66:03:60',
'66:03:61',
'66:03:62',
'66:03:63',
'66:03:64',
'66:03:65',
'66:03:66',
'66:03:67',
'66:03:68',
'66:03:69',
'66:03:70',
'66:03:71',
'66:03:72',
'66:03:73',
'66:03:74',
'66:03:75',
'66:03:76',
'66:03:77',
'66:03:78',
'66:03:79',
'66:03:80',
'66:03:81',
'66:03:82',
'66:03:83',
'66:03:84',
'66:03:85',
'66:03:86',
'66:03:87',
'66:03:88',
'66:03:89',
'66:03:90',
'66:03:91',
'66:03:92',
'66:03:93',
'66:03:94',
'66:03:95',
'66:03:96',
'66:03:97',
'66:03:98',
'66:03:99',
'66:04:00',
'66:04:01',
'66:04:02',
'66:04:03',
'66:04:04',
'66:04:05',
'66:04:06',
'66:04:07',
'66:04:08',
'66:04:09',
'66:04:10',
'66:04:11',
'66:04:12',
'66:04:13',
'66:04:14',
'66:04:15',
'66:04:16',
'66:04:17',
'66:04:18',
'66:04:19',
'66:04:20',
'66:04:21',
'66:04:22',
'66:04:23',
'66:04:24',
'66:04:25',
'66:04:26',
'66:04:27',
'66:04:28',
'66:04:29',
'66:04:30',
'66:04:31',
'66:04:32',
'66:04:33',
'66:04:34',
'66:04:35',
'66:04:36',
'66:04:37',
'66:04:38',
'66:04:39',
'66:04:40',
'66:04:41',
'66:04:42',
'66:04:43',
'66:04:44',
'66:04:45',
'66:04:46',
'66:04:47',
'66:04:48',
'66:04:49',
'66:04:50',
'66:04:51',
'66:04:52',
'66:04:53',
'66:04:54',
'66:04:55',
'66:04:56',
'66:04:57',
'66:04:58',
'66:04:59',
'66:04:60',
'66:04:61',
'66:04:62',
'66:04:63',
'66:04:64',
'66:04:65',
'66:04:66',
'66:04:67',
'66:04:68',
'66:04:69',
'66:04:70',
'66:04:71',
'66:04:72',
'66:04:73',
'66:04:74',
'66:04:75',
'66:04:76',
'66:04:77',
'66:04:78',
'66:04:79',
'66:04:80',
'66:04:81',
'66:04:82',
'66:04:83',
'66:04:84',
'66:04:85',
'66:04:86',
'66:04:87',
'66:04:88',
'66:04:89',
'66:04:90',
'66:04:91',
'66:04:92',
'66:04:93',
'66:04:94',
'66:04:95',
'66:04:96',
'66:04:97',
'66:04:98',
'66:04:99',
'66:05:00',
'66:05:01',
'66:05:02',
'66:05:03',
'66:05:04',
'66:05:05',
'66:05:06',
'66:05:07',
'66:05:08',
'66:05:09',
'66:05:10',
'66:05:11',
'66:05:12',
'66:05:13',
'66:05:14',
'66:05:15',
'66:05:16',
'66:05:17',
'66:05:18',
'66:05:19',
'66:05:20',
'66:05:21',
'66:05:22',
'66:05:23',
'66:05:24',
'66:05:25',
'66:05:26',
'66:05:27',
'66:05:28',
'66:05:29',
'66:05:30',
'66:05:31',
'66:05:32',
'66:05:33',
'66:05:34',
'66:05:35',
'66:05:36',
'66:05:37',
'66:05:38',
'66:05:39',
'66:05:40',
'66:05:41',
'66:05:42',
'66:05:43',
'66:05:44',
'66:05:45',
'66:05:46',
'66:05:47',
'66:05:48',
'66:05:49',
'66:05:50',
'66:05:51',
'66:05:52',
'66:05:53',
'66:05:54',
'66:05:55',
'66:05:56',
'66:05:57',
'66:05:58',
'66:05:59',
'66:05:60',
'66:05:61',
'66:05:62',
'66:05:63',
'66:05:64',
'66:05:65',
'66:05:66',
'66:05:67',
'66:05:68',
'66:05:69',
'66:05:70',
'66:05:71',
'66:05:72',
'66:05:73',
'66:05:74',
'66:05:75',
'66:05:76',
'66:05:77',
'66:05:78',
'66:05:79',
'66:05:80',
'66:05:81',
'66:05:82',
'66:05:83',
'66:05:84',
'66:05:85',
'66:05:86',
'66:05:87',
'66:05:88',
'66:05:89',
'66:05:90',
'66:05:91',
'66:05:92',
'66:05:93',
'66:05:94',
'66:05:95',
'66:05:96',
'66:05:97',
'66:05:98',
'66:05:99',
'66:06:00',
'66:06:01',
'66:06:02',
'66:06:03',
'66:06:04',
'66:06:05',
'66:06:06',
'66:06:07',
'66:06:08',
'66:06:09',
'66:06:10',
'66:06:11',
'66:06:12',
'66:06:13',
'66:06:14',
'66:06:15',
'66:06:16',
'66:06:17',
'66:06:18',
'66:06:19',
'66:06:20',
'66:06:21',
'66:06:22',
'66:06:23',
'66:06:24',
'66:06:25',
'66:06:26',
'66:06:27',
'66:06:28',
'66:06:29',
'66:06:30',
'66:06:31',
'66:06:32',
'66:06:33',
'66:06:34',
'66:06:35',
'66:06:36',
'66:06:37',
'66:06:38',
'66:06:39',
'66:06:40',
'66:06:41',
'66:06:42',
'66:06:43',
'66:06:44',
'66:06:45',
'66:06:46',
'66:06:47',
'66:06:48',
'66:06:49',
'66:06:50',
'66:06:51',
'66:06:52',
'66:06:53',
'66:06:54',
'66:06:55',
'66:06:56',
'66:06:57',
'66:06:58',
'66:06:59',
'66:06:60',
'66:06:61',
'66:06:62',
'66:06:63',
'66:06:64',
'66:06:65',
'66:06:66',
'66:06:67',
'66:06:68',
'66:06:69',
'66:06:70',
'66:06:71',
'66:06:72',
'66:06:73',
'66:06:74',
'66:06:75',
'66:06:76',
'66:06:77',
'66:06:78',
'66:06:79',
'66:06:80',
'66:06:81',
'66:06:82',
'66:06:83',
'66:06:84',
'66:06:85',
'66:06:86',
'66:06:87',
'66:06:88',
'66:06:89',
'66:06:90',
'66:06:91',
'66:06:92',
'66:06:93',
'66:06:94',
'66:06:95',
'66:06:96',
'66:06:97',
'66:06:98',
'66:06:99',
'66:07:00',
'66:07:01',
'66:07:02',
'66:07:03',
'66:07:04',
'66:07:05',
'66:07:06',
'66:07:07',
'66:07:08',
'66:07:09',
'66:07:10',
'66:07:11',
'66:07:12',
'66:07:13',
'66:07:14',
'66:07:15',
'66:07:16',
'66:07:17',
'66:07:18',
'66:07:19',
'66:07:20',
'66:07:21',
'66:07:22',
'66:07:23',
'66:07:24',
'66:07:25',
'66:07:26',
'66:07:27',
'66:07:28',
'66:07:29',
'66:07:30',
'66:07:31',
'66:07:32',
'66:07:33',
'66:07:34',
'66:07:35',
'66:07:36',
'66:07:37',
'66:07:38',
'66:07:39',
'66:07:40',
'66:07:41',
'66:07:42',
'66:07:43',
'66:07:44',
'66:07:45',
'66:07:46',
'66:07:47',
'66:07:48',
'66:07:49',
'66:07:50',
'66:07:51',
'66:07:52',
'66:07:53',
'66:07:54',
'66:07:55',
'66:07:56',
'66:07:57',
'66:07:58',
'66:07:59',
'66:07:60',
'66:07:61',
'66:07:62',
'66:07:63',
'66:07:64',
'66:07:65',
'66:07:66',
'66:07:67',
'66:07:68',
'66:07:69',
'66:07:70',
'66:07:71',
'66:07:72',
'66:07:73',
'66:07:74',
'66:07:75',
'66:07:76',
'66:07:77',
'66:07:78',
'66:07:79',
'66:07:80',
'66:07:81',
'66:07:82',
'66:07:83',
'66:07:84',
'66:07:85',
'66:07:86',
'66:07:87',
'66:07:88',
'66:07:89',
'66:07:90',
'66:07:91',
'66:07:92',
'66:07:93',
'66:07:94',
'66:07:95',
'66:07:96',
'66:07:97',
'66:07:98',
'66:07:99',
'66:08:00',
'66:08:01',
'66:08:02',
'66:08:03',
'66:08:04',
'66:08:05',
'66:08:06',
'66:08:07',
'66:08:08',
'66:08:09',
'66:08:10',
'66:08:11',
'66:08:12',
'66:08:13',
'66:08:14',
'66:08:15',
'66:08:16',
'66:08:17',
'66:08:18',
'66:08:19',
'66:08:20',
'66:08:21',
'66:08:22',
'66:08:23',
'66:08:24',
'66:08:25',
'66:08:26',
'66:08:27',
'66:08:28',
'66:08:29',
'66:08:30',
'66:08:31',
'66:08:32',
'66:08:33',
'66:08:34',
'66:08:35',
'66:08:36',
'66:08:37',
'66:08:38',
'66:08:39',
'66:08:40',
'66:08:41',
'66:08:42',
'66:08:43',
'66:08:44',
'66:08:45',
'66:08:46',
'66:08:47',
'66:08:48',
'66:08:49',
'66:08:50',
'66:08:51',
'66:08:52',
'66:08:53',
'66:08:54',
'66:08:55',
'66:08:56',
'66:08:57',
'66:08:58',
'66:08:59',
'66:08:60',
'66:08:61',
'66:08:62',
'66:08:63',
'66:08:64',
'66:08:65',
'66:08:66',
'66:08:67',
'66:08:68',
'66:08:69',
'66:08:70',
'66:08:71',
'66:08:72',
'66:08:73',
'66:08:74',
'66:08:75',
'66:08:76',
'66:08:77',
'66:08:78',
'66:08:79',
'66:08:80',
'66:08:81',
'66:08:82',
'66:08:83',
'66:08:84',
'66:08:85',
'66:08:86',
'66:08:87',
'66:08:88',
'66:08:89',
'66:08:90',
'66:08:91',
'66:08:92',
'66:08:93',
'66:08:94',
'66:08:95',
'66:08:96',
'66:08:97',
'66:08:98',
'66:08:99',
'66:09:00',
'66:09:01',
'66:09:02',
'66:09:03',
'66:09:04',
'66:09:05',
'66:09:06',
'66:09:07',
'66:09:08',
'66:09:09',
'66:09:10',
'66:09:11',
'66:09:12',
'66:09:13',
'66:09:14',
'66:09:15',
'66:09:16',
'66:09:17',
'66:09:18',
'66:09:19',
'66:09:20',
'66:09:21',
'66:09:22',
'66:09:23',
'66:09:24',
'66:09:25',
'66:09:26',
'66:09:27',
'66:09:28',
'66:09:29',
'66:09:30',
'66:09:31',
'66:09:32',
'66:09:33',
'66:09:34',
'66:09:35',
'66:09:36',
'66:09:37',
'66:09:38',
'66:09:39',
'66:09:40',
'66:09:41',
'66:09:42',
'66:09:43',
'66:09:44',
'66:09:45',
'66:09:46',
'66:09:47',
'66:09:48',
'66:09:49',
'66:09:50',
'66:09:51',
'66:09:52',
'66:09:53',
'66:09:54',
'66:09:55',
'66:09:56',
'66:09:57',
'66:09:58',
'66:09:59',
'66:09:60',
'66:09:61',
'66:09:62',
'66:09:63',
'66:09:64',
'66:09:65',
'66:09:66',
'66:09:67',
'66:09:68',
'66:09:69',
'66:09:70',
'66:09:71',
'66:09:72',
'66:09:73',
'66:09:74',
'66:09:75',
'66:09:76',
'66:09:77',
'66:09:78',
'66:09:79',
'66:09:80',
'66:09:81',
'66:09:82',
'66:09:83',
'66:09:84',
'66:09:85',
'66:09:86',
'66:09:87',
'66:09:88',
'66:09:89',
'66:09:90',
'66:09:91',
'66:09:92',
'66:09:93',
'66:09:94',
'66:09:95',
'66:09:96',
'66:09:97',
'66:09:98',
'66:09:99',
'66:10:00',
'66:10:01',
'66:10:02',
'66:10:03',
'66:10:04',
'66:10:05',
'66:10:06',
'66:10:07',
'66:10:08',
'66:10:09',
'66:10:10',
'66:10:11',
'66:10:12',
'66:10:13',
'66:10:14',
'66:10:15',
'66:10:16',
'66:10:17',
'66:10:18',
'66:10:19',
'66:10:20',
'66:10:21',
'66:10:22',
'66:10:23',
'66:10:24',
'66:10:25',
'66:10:26',
'66:10:27',
'66:10:28',
'66:10:29',
'66:10:30',
'66:10:31',
'66:10:32',
'66:10:33',
'66:10:34',
'66:10:35',
'66:10:36',
'66:10:37',
'66:10:38',
'66:10:39',
'66:10:40',
'66:10:41',
'66:10:42',
'66:10:43',
'66:10:44',
'66:10:45',
'66:10:46',
'66:10:47',
'66:10:48',
'66:10:49',
'66:10:50',
'66:10:51',
'66:10:52',
'66:10:53',
'66:10:54',
'66:10:55',
'66:10:56',
'66:10:57',
'66:10:58',
'66:10:59',
'66:10:60',
'66:10:61',
'66:10:62',
'66:10:63',
'66:10:64',
'66:10:65',
'66:10:66',
'66:10:67',
'66:10:68',
'66:10:69',
'66:10:70',
'66:10:71',
'66:10:72',
'66:10:73',
'66:10:74',
'66:10:75',
'66:10:76',
'66:10:77',
'66:10:78',
'66:10:79',
'66:10:80',
'66:10:81',
'66:10:82',
'66:10:83',
'66:10:84',
'66:10:85',
'66:10:86',
'66:10:87',
'66:10:88',
'66:10:89',
'66:10:90',
'66:10:91',
'66:10:92',
'66:10:93',
'66:10:94',
'66:10:95',
'66:10:96',
'66:10:97',
'66:10:98',
'66:10:99',
'66:11:00',
'66:11:01',
'66:11:02',
'66:11:03',
'66:11:04',
'66:11:05',
'66:11:06',
'66:11:07',
'66:11:08',
'66:11:09',
'66:11:10',
'66:11:11',
'66:11:12',
'66:11:13',
'66:11:14',
'66:11:15',
'66:11:16',
'66:11:17',
'66:11:18',
'66:11:19',
'66:11:20',
'66:11:21',
'66:11:22',
'66:11:23',
'66:11:24',
'66:11:25',
'66:11:26',
'66:11:27',
'66:11:28',
'66:11:29',
'66:11:30',
'66:11:31',
'66:11:32',
'66:11:33',
'66:11:34',
'66:11:35',
'66:11:36',
'66:11:37',
'66:11:38',
'66:11:39',
'66:11:40',
'66:11:41',
'66:11:42',
'66:11:43',
'66:11:44',
'66:11:45',
'66:11:46',
'66:11:47',
'66:11:48',
'66:11:49',
'66:11:50',
'66:11:51',
'66:11:52',
'66:11:53',
'66:11:54',
'66:11:55',
'66:11:56',
'66:11:57',
'66:11:58',
'66:11:59',
'66:11:60',
'66:11:61',
'66:11:62',
'66:11:63',
'66:11:64',
'66:11:65',
'66:11:66',
'66:11:67',
'66:11:68',
'66:11:69',
'66:11:70',
'66:11:71',
'66:11:72',
'66:11:73',
'66:11:74',
'66:11:75',
'66:11:76',
'66:11:77',
'66:11:78',
'66:11:79',
'66:11:80',
'66:11:81',
'66:11:82',
'66:11:83',
'66:11:84',
'66:11:85',
'66:11:86',
'66:11:87',
'66:11:88',
'66:11:89',
'66:11:90',
'66:11:91',
'66:11:92',
'66:11:93',
'66:11:94',
'66:11:95',
'66:11:96',
'66:11:97',
'66:11:98',
'66:11:99',
'66:12:00',
'66:12:01',
'66:12:02',
'66:12:03',
'66:12:04',
'66:12:05',
'66:12:06',
'66:12:07',
'66:12:08',
'66:12:09',
'66:12:10',
'66:12:11',
'66:12:12',
'66:12:13',
'66:12:14',
'66:12:15',
'66:12:16',
'66:12:17',
'66:12:18',
'66:12:19',
'66:12:20',
'66:12:21',
'66:12:22',
'66:12:23',
'66:12:24',
'66:12:25',
'66:12:26',
'66:12:27',
'66:12:28',
'66:12:29',
'66:12:30',
'66:12:31',
'66:12:32',
'66:12:33',
'66:12:34',
'66:12:35',
'66:12:36',
'66:12:37',
'66:12:38',
'66:12:39',
'66:12:40',
'66:12:41',
'66:12:42',
'66:12:43',
'66:12:44',
'66:12:45',
'66:12:46',
'66:12:47',
'66:12:48',
'66:12:49',
'66:12:50',
'66:12:51',
'66:12:52',
'66:12:53',
'66:12:54',
'66:12:55',
'66:12:56',
'66:12:57',
'66:12:58',
'66:12:59',
'66:12:60',
'66:12:61',
'66:12:62',
'66:12:63',
'66:12:64',
'66:12:65',
'66:12:66',
'66:12:67',
'66:12:68',
'66:12:69',
'66:12:70',
'66:12:71',
'66:12:72',
'66:12:73',
'66:12:74',
'66:12:75',
'66:12:76',
'66:12:77',
'66:12:78',
'66:12:79',
'66:12:80',
'66:12:81',
'66:12:82',
'66:12:83',
'66:12:84',
'66:12:85',
'66:12:86',
'66:12:87',
'66:12:88',
'66:12:89',
'66:12:90',
'66:12:91',
'66:12:92',
'66:12:93',
'66:12:94',
'66:12:95',
'66:12:96',
'66:12:97',
'66:12:98',
'66:12:99',
'66:13:00',
'66:13:01',
'66:13:02',
'66:13:03',
'66:13:04',
'66:13:05',
'66:13:06',
'66:13:07',
'66:13:08',
'66:13:09',
'66:13:10',
'66:13:11',
'66:13:12',
'66:13:13',
'66:13:14',
'66:13:15',
'66:13:16',
'66:13:17',
'66:13:18',
'66:13:19',
'66:13:20',
'66:13:21',
'66:13:22',
'66:13:23',
'66:13:24',
'66:13:25',
'66:13:26',
'66:13:27',
'66:13:28',
'66:13:29',
'66:13:30',
'66:13:31',
'66:13:32',
'66:13:33',
'66:13:34',
'66:13:35',
'66:13:36',
'66:13:37',
'66:13:38',
'66:13:39',
'66:13:40',
'66:13:41',
'66:13:42',
'66:13:43',
'66:13:44',
'66:13:45',
'66:13:46',
'66:13:47',
'66:13:48',
'66:13:49',
'66:13:50',
'66:13:51',
'66:13:52',
'66:13:53',
'66:13:54',
'66:13:55',
'66:13:56',
'66:13:57',
'66:13:58',
'66:13:59',
'66:13:60',
'66:13:61',
'66:13:62',
'66:13:63',
'66:13:64',
'66:13:65',
'66:13:66',
'66:13:67',
'66:13:68',
'66:13:69',
'66:13:70',
'66:13:71',
'66:13:72',
'66:13:73',
'66:13:74',
'66:13:75',
'66:13:76',
'66:13:77',
'66:13:78',
'66:13:79',
'66:13:80',
'66:13:81',
'66:13:82',
'66:13:83',
'66:13:84',
'66:13:85',
'66:13:86',
'66:13:87',
'66:13:88',
'66:13:89',
'66:13:90',
'66:13:91',
'66:13:92',
'66:13:93',
'66:13:94',
'66:13:95',
'66:13:96',
'66:13:97',
'66:13:98',
'66:13:99',
'66:14:00',
'66:14:01',
'66:14:02',
'66:14:03',
'66:14:04',
'66:14:05',
'66:14:06',
'66:14:07',
'66:14:08',
'66:14:09',
'66:14:10',
'66:14:11',
'66:14:12',
'66:14:13',
'66:14:14',
'66:14:15',
'66:14:16',
'66:14:17',
'66:14:18',
'66:14:19',
'66:14:20',
'66:14:21',
'66:14:22',
'66:14:23',
'66:14:24',
'66:14:25',
'66:14:26',
'66:14:27',
'66:14:28',
'66:14:29',
'66:14:30',
'66:14:31',
'66:14:32',
'66:14:33',
'66:14:34',
'66:14:35',
'66:14:36',
'66:14:37',
'66:14:38',
'66:14:39',
'66:14:40',
'66:14:41',
'66:14:42',
'66:14:43',
'66:14:44',
'66:14:45',
'66:14:46',
'66:14:47',
'66:14:48',
'66:14:49',
'66:14:50',
'66:14:51',
'66:14:52',
'66:14:53',
'66:14:54',
'66:14:55',
'66:14:56',
'66:14:57',
'66:14:58',
'66:14:59',
'66:14:60',
'66:14:61',
'66:14:62',
'66:14:63',
'66:14:64',
'66:14:65',
'66:14:66',
'66:14:67',
'66:14:68',
'66:14:69',
'66:14:70',
'66:14:71',
'66:14:72',
'66:14:73',
'66:14:74',
'66:14:75',
'66:14:76',
'66:14:77',
'66:14:78',
'66:14:79',
'66:14:80',
'66:14:81',
'66:14:82',
'66:14:83',
'66:14:84',
'66:14:85',
'66:14:86',
'66:14:87',
'66:14:88',
'66:14:89',
'66:14:90',
'66:14:91',
'66:14:92',
'66:14:93',
'66:14:94',
'66:14:95',
'66:14:96',
'66:14:97',
'66:14:98',
'66:14:99',
'66:15:00',
'66:15:01',
'66:15:02',
'66:15:03',
'66:15:04',
'66:15:05',
'66:15:06',
'66:15:07',
'66:15:08',
'66:15:09',
'66:15:10',
'66:15:11',
'66:15:12',
'66:15:13',
'66:15:14',
'66:15:15',
'66:15:16',
'66:15:17',
'66:15:18',
'66:15:19',
'66:15:20',
'66:15:21',
'66:15:22',
'66:15:23',
'66:15:24',
'66:15:25',
'66:15:26',
'66:15:27',
'66:15:28',
'66:15:29',
'66:15:30',
'66:15:31',
'66:15:32',
'66:15:33',
'66:15:34',
'66:15:35',
'66:15:36',
'66:15:37',
'66:15:38',
'66:15:39',
'66:15:40',
'66:15:41',
'66:15:42',
'66:15:43',
'66:15:44',
'66:15:45',
'66:15:46',
'66:15:47',
'66:15:48',
'66:15:49',
'66:15:50',
'66:15:51',
'66:15:52',
'66:15:53',
'66:15:54',
'66:15:55',
'66:15:56',
'66:15:57',
'66:15:58',
'66:15:59',
'66:15:60',
'66:15:61',
'66:15:62',
'66:15:63',
'66:15:64',
'66:15:65',
'66:15:66',
'66:15:67',
'66:15:68',
'66:15:69',
'66:15:70',
'66:15:71',
'66:15:72',
'66:15:73',
'66:15:74',
'66:15:75',
'66:15:76',
'66:15:77',
'66:15:78',
'66:15:79',
'66:15:80',
'66:15:81',
'66:15:82',
'66:15:83',
'66:15:84',
'66:15:85',
'66:15:86',
'66:15:87',
'66:15:88',
'66:15:89',
'66:15:90',
'66:15:91',
'66:15:92',
'66:15:93',
'66:15:94',
'66:15:95',
'66:15:96',
'66:15:97',
'66:15:98',
'66:15:99',
'66:16:00',
'66:16:01',
'66:16:02',
'66:16:03',
'66:16:04',
'66:16:05',
'66:16:06',
'66:16:07',
'66:16:08',
'66:16:09',
'66:16:10',
'66:16:11',
'66:16:12',
'66:16:13',
'66:16:14',
'66:16:15',
'66:16:16',
'66:16:17',
'66:16:18',
'66:16:19',
'66:16:20',
'66:16:21',
'66:16:22',
'66:16:23',
'66:16:24',
'66:16:25',
'66:16:26',
'66:16:27',
'66:16:28',
'66:16:29',
'66:16:30',
'66:16:31',
'66:16:32',
'66:16:33',
'66:16:34',
'66:16:35',
'66:16:36',
'66:16:37',
'66:16:38',
'66:16:39',
'66:16:40',
'66:16:41',
'66:16:42',
'66:16:43',
'66:16:44',
'66:16:45',
'66:16:46',
'66:16:47',
'66:16:48',
'66:16:49',
'66:16:50',
'66:16:51',
'66:16:52',
'66:16:53',
'66:16:54',
'66:16:55',
'66:16:56',
'66:16:57',
'66:16:58',
'66:16:59',
'66:16:60',
'66:16:61',
'66:16:62',
'66:16:63',
'66:16:64',
'66:16:65',
'66:16:66',
'66:16:67',
'66:16:68',
'66:16:69',
'66:16:70',
'66:16:71',
'66:16:72',
'66:16:73',
'66:16:74',
'66:16:75',
'66:16:76',
'66:16:77',
'66:16:78',
'66:16:79',
'66:16:80',
'66:16:81',
'66:16:82',
'66:16:83',
'66:16:84',
'66:16:85',
'66:16:86',
'66:16:87',
'66:16:88',
'66:16:89',
'66:16:90',
'66:16:91',
'66:16:92',
'66:16:93',
'66:16:94',
'66:16:95',
'66:16:96',
'66:16:97',
'66:16:98',
'66:16:99',
'66:17:00',
'66:17:01',
'66:17:02',
'66:17:03',
'66:17:04',
'66:17:05',
'66:17:06',
'66:17:07',
'66:17:08',
'66:17:09',
'66:17:10',
'66:17:11',
'66:17:12',
'66:17:13',
'66:17:14',
'66:17:15',
'66:17:16',
'66:17:17',
'66:17:18',
'66:17:19',
'66:17:20',
'66:17:21',
'66:17:22',
'66:17:23',
'66:17:24',
'66:17:25',
'66:17:26',
'66:17:27',
'66:17:28',
'66:17:29',
'66:17:30',
'66:17:31',
'66:17:32',
'66:17:33',
'66:17:34',
'66:17:35',
'66:17:36',
'66:17:37',
'66:17:38',
'66:17:39',
'66:17:40',
'66:17:41',
'66:17:42',
'66:17:43',
'66:17:44',
'66:17:45',
'66:17:46',
'66:17:47',
'66:17:48',
'66:17:49',
'66:17:50',
'66:17:51',
'66:17:52',
'66:17:53',
'66:17:54',
'66:17:55',
'66:17:56',
'66:17:57',
'66:17:58',
'66:17:59',
'66:17:60',
'66:17:61',
'66:17:62',
'66:17:63',
'66:17:64',
'66:17:65',
'66:17:66',
'66:17:67',
'66:17:68',
'66:17:69',
'66:17:70',
'66:17:71',
'66:17:72',
'66:17:73',
'66:17:74',
'66:17:75',
'66:17:76',
'66:17:77',
'66:17:78',
'66:17:79',
'66:17:80',
'66:17:81',
'66:17:82',
'66:17:83',
'66:17:84',
'66:17:85',
'66:17:86',
'66:17:87',
'66:17:88',
'66:17:89',
'66:17:90',
'66:17:91',
'66:17:92',
'66:17:93',
'66:17:94',
'66:17:95',
'66:17:96',
'66:17:97',
'66:17:98',
'66:17:99',
'66:18:00',
'66:18:01',
'66:18:02',
'66:18:03',
'66:18:04',
'66:18:05',
'66:18:06',
'66:18:07',
'66:18:08',
'66:18:09',
'66:18:10',
'66:18:11',
'66:18:12',
'66:18:13',
'66:18:14',
'66:18:15',
'66:18:16',
'66:18:17',
'66:18:18',
'66:18:19',
'66:18:20',
'66:18:21',
'66:18:22',
'66:18:23',
'66:18:24',
'66:18:25',
'66:18:26',
'66:18:27',
'66:18:28',
'66:18:29',
'66:18:30',
'66:18:31',
'66:18:32',
'66:18:33',
'66:18:34',
'66:18:35',
'66:18:36',
'66:18:37',
'66:18:38',
'66:18:39',
'66:18:40',
'66:18:41',
'66:18:42',
'66:18:43',
'66:18:44',
'66:18:45',
'66:18:46',
'66:18:47',
'66:18:48',
'66:18:49',
'66:18:50',
'66:18:51',
'66:18:52',
'66:18:53',
'66:18:54',
'66:18:55',
'66:18:56',
'66:18:57',
'66:18:58',
'66:18:59',
'66:18:60',
'66:18:61',
'66:18:62',
'66:18:63',
'66:18:64',
'66:18:65',
'66:18:66',
'66:18:67',
'66:18:68',
'66:18:69',
'66:18:70',
'66:18:71',
'66:18:72',
'66:18:73',
'66:18:74',
'66:18:75',
'66:18:76',
'66:18:77',
'66:18:78',
'66:18:79',
'66:18:80',
'66:18:81',
'66:18:82',
'66:18:83',
'66:18:84',
'66:18:85',
'66:18:86',
'66:18:87',
'66:18:88',
'66:18:89',
'66:18:90',
'66:18:91',
'66:18:92',
'66:18:93',
'66:18:94',
'66:18:95',
'66:18:96',
'66:18:97',
'66:18:98',
'66:18:99',
'66:19:00',
'66:19:01',
'66:19:02',
'66:19:03',
'66:19:04',
'66:19:05',
'66:19:06',
'66:19:07',
'66:19:08',
'66:19:09',
'66:19:10',
'66:19:11',
'66:19:12',
'66:19:13',
'66:19:14',
'66:19:15',
'66:19:16',
'66:19:17',
'66:19:18',
'66:19:19',
'66:19:20',
'66:19:21',
'66:19:22',
'66:19:23',
'66:19:24',
'66:19:25',
'66:19:26',
'66:19:27',
'66:19:28',
'66:19:29',
'66:19:30',
'66:19:31',
'66:19:32',
'66:19:33',
'66:19:34',
'66:19:35',
'66:19:36',
'66:19:37',
'66:19:38',
'66:19:39',
'66:19:40',
'66:19:41',
'66:19:42',
'66:19:43',
'66:19:44',
'66:19:45',
'66:19:46',
'66:19:47',
'66:19:48',
'66:19:49',
'66:19:50',
'66:19:51',
'66:19:52',
'66:19:53',
'66:19:54',
'66:19:55',
'66:19:56',
'66:19:57',
'66:19:58',
'66:19:59',
'66:19:60',
'66:19:61',
'66:19:62',
'66:19:63',
'66:19:64',
'66:19:65',
'66:19:66',
'66:19:67',
'66:19:68',
'66:19:69',
'66:19:70',
'66:19:71',
'66:19:72',
'66:19:73',
'66:19:74',
'66:19:75',
'66:19:76',
'66:19:77',
'66:19:78',
'66:19:79',
'66:19:80',
'66:19:81',
'66:19:82',
'66:19:83',
'66:19:84',
'66:19:85',
'66:19:86',
'66:19:87',
'66:19:88',
'66:19:89',
'66:19:90',
'66:19:91',
'66:19:92',
'66:19:93',
'66:19:94',
'66:19:95',
'66:19:96',
'66:19:97',
'66:19:98',
'66:19:99',
'66:20:00',
'66:20:01',
'66:20:02',
'66:20:03',
'66:20:04',
'66:20:05',
'66:20:06',
'66:20:07',
'66:20:08',
'66:20:09',
'66:20:10',
'66:20:11',
'66:20:12',
'66:20:13',
'66:20:14',
'66:20:15',
'66:20:16',
'66:20:17',
'66:20:18',
'66:20:19',
'66:20:20',
'66:20:21',
'66:20:22',
'66:20:23',
'66:20:24',
'66:20:25',
'66:20:26',
'66:20:27',
'66:20:28',
'66:20:29',
'66:20:30',
'66:20:31',
'66:20:32',
'66:20:33',
'66:20:34',
'66:20:35',
'66:20:36',
'66:20:37',
'66:20:38',
'66:20:39',
'66:20:40',
'66:20:41',
'66:20:42',
'66:20:43',
'66:20:44',
'66:20:45',
'66:20:46',
'66:20:47',
'66:20:48',
'66:20:49',
'66:20:50',
'66:20:51',
'66:20:52',
'66:20:53',
'66:20:54',
'66:20:55',
'66:20:56',
'66:20:57',
'66:20:58',
'66:20:59',
'66:20:60',
'66:20:61',
'66:20:62',
'66:20:63',
'66:20:64',
'66:20:65',
'66:20:66',
'66:20:67',
'66:20:68',
'66:20:69',
'66:20:70',
'66:20:71',
'66:20:72',
'66:20:73',
'66:20:74',
'66:20:75',
'66:20:76',
'66:20:77',
'66:20:78',
'66:20:79',
'66:20:80',
'66:20:81',
'66:20:82',
'66:20:83',
'66:20:84',
'66:20:85',
'66:20:86',
'66:20:87',
'66:20:88',
'66:20:89',
'66:20:90',
'66:20:91',
'66:20:92',
'66:20:93',
'66:20:94',
'66:20:95',
'66:20:96',
'66:20:97',
'66:20:98',
'66:20:99',
'66:21:00',
'66:21:01',
'66:21:02',
'66:21:03',
'66:21:04',
'66:21:05',
'66:21:06',
'66:21:07',
'66:21:08',
'66:21:09',
'66:21:10',
'66:21:11',
'66:21:12',
'66:21:13',
'66:21:14',
'66:21:15',
'66:21:16',
'66:21:17',
'66:21:18',
'66:21:19',
'66:21:20',
'66:21:21',
'66:21:22',
'66:21:23',
'66:21:24',
'66:21:25',
'66:21:26',
'66:21:27',
'66:21:28',
'66:21:29',
'66:21:30',
'66:21:31',
'66:21:32',
'66:21:33',
'66:21:34',
'66:21:35',
'66:21:36',
'66:21:37',
'66:21:38',
'66:21:39',
'66:21:40',
'66:21:41',
'66:21:42',
'66:21:43',
'66:21:44',
'66:21:45',
'66:21:46',
'66:21:47',
'66:21:48',
'66:21:49',
'66:21:50',
'66:21:51',
'66:21:52',
'66:21:53',
'66:21:54',
'66:21:55',
'66:21:56',
'66:21:57',
'66:21:58',
'66:21:59',
'66:21:60',
'66:21:61',
'66:21:62',
'66:21:63',
'66:21:64',
'66:21:65',
'66:21:66',
'66:21:67',
'66:21:68',
'66:21:69',
'66:21:70',
'66:21:71',
'66:21:72',
'66:21:73',
'66:21:74',
'66:21:75',
'66:21:76',
'66:21:77',
'66:21:78',
'66:21:79',
'66:21:80',
'66:21:81',
'66:21:82',
'66:21:83',
'66:21:84',
'66:21:85',
'66:21:86',
'66:21:87',
'66:21:88',
'66:21:89',
'66:21:90',
'66:21:91',
'66:21:92',
'66:21:93',
'66:21:94',
'66:21:95',
'66:21:96',
'66:21:97',
'66:21:98',
'66:21:99',
'66:22:00',
'66:22:01',
'66:22:02',
'66:22:03',
'66:22:04',
'66:22:05',
'66:22:06',
'66:22:07',
'66:22:08',
'66:22:09',
'66:22:10',
'66:22:11',
'66:22:12',
'66:22:13',
'66:22:14',
'66:22:15',
'66:22:16',
'66:22:17',
'66:22:18',
'66:22:19',
'66:22:20',
'66:22:21',
'66:22:22',
'66:22:23',
'66:22:24',
'66:22:25',
'66:22:26',
'66:22:27',
'66:22:28',
'66:22:29',
'66:22:30',
'66:22:31',
'66:22:32',
'66:22:33',
'66:22:34',
'66:22:35',
'66:22:36',
'66:22:37',
'66:22:38',
'66:22:39',
'66:22:40',
'66:22:41',
'66:22:42',
'66:22:43',
'66:22:44',
'66:22:45',
'66:22:46',
'66:22:47',
'66:22:48',
'66:22:49',
'66:22:50',
'66:22:51',
'66:22:52',
'66:22:53',
'66:22:54',
'66:22:55',
'66:22:56',
'66:22:57',
'66:22:58',
'66:22:59',
'66:22:60',
'66:22:61',
'66:22:62',
'66:22:63',
'66:22:64',
'66:22:65',
'66:22:66',
'66:22:67',
'66:22:68',
'66:22:69',
'66:22:70',
'66:22:71',
'66:22:72',
'66:22:73',
'66:22:74',
'66:22:75',
'66:22:76',
'66:22:77',
'66:22:78',
'66:22:79',
'66:22:80',
'66:22:81',
'66:22:82',
'66:22:83',
'66:22:84',
'66:22:85',
'66:22:86',
'66:22:87',
'66:22:88',
'66:22:89',
'66:22:90',
'66:22:91',
'66:22:92',
'66:22:93',
'66:22:94',
'66:22:95',
'66:22:96',
'66:22:97',
'66:22:98',
'66:22:99',
'66:23:00',
'66:23:01',
'66:23:02',
'66:23:03',
'66:23:04',
'66:23:05',
'66:23:06',
'66:23:07',
'66:23:08',
'66:23:09',
'66:23:10',
'66:23:11',
'66:23:12',
'66:23:13',
'66:23:14',
'66:23:15',
'66:23:16',
'66:23:17',
'66:23:18',
'66:23:19',
'66:23:20',
'66:23:21',
'66:23:22',
'66:23:23',
'66:23:24',
'66:23:25',
'66:23:26',
'66:23:27',
'66:23:28',
'66:23:29',
'66:23:30',
'66:23:31',
'66:23:32',
'66:23:33',
'66:23:34',
'66:23:35',
'66:23:36',
'66:23:37',
'66:23:38',
'66:23:39',
'66:23:40',
'66:23:41',
'66:23:42',
'66:23:43',
'66:23:44',
'66:23:45',
'66:23:46',
'66:23:47',
'66:23:48',
'66:23:49',
'66:23:50',
'66:23:51',
'66:23:52',
'66:23:53',
'66:23:54',
'66:23:55',
'66:23:56',
'66:23:57',
'66:23:58',
'66:23:59',
'66:23:60',
'66:23:61',
'66:23:62',
'66:23:63',
'66:23:64',
'66:23:65',
'66:23:66',
'66:23:67',
'66:23:68',
'66:23:69',
'66:23:70',
'66:23:71',
'66:23:72',
'66:23:73',
'66:23:74',
'66:23:75',
'66:23:76',
'66:23:77',
'66:23:78',
'66:23:79',
'66:23:80',
'66:23:81',
'66:23:82',
'66:23:83',
'66:23:84',
'66:23:85',
'66:23:86',
'66:23:87',
'66:23:88',
'66:23:89',
'66:23:90',
'66:23:91',
'66:23:92',
'66:23:93',
'66:23:94',
'66:23:95',
'66:23:96',
'66:23:97',
'66:23:98',
'66:23:99',
'66:24:00',
'66:24:01',
'66:24:02',
'66:24:03',
'66:24:04',
'66:24:05',
'66:24:06',
'66:24:07',
'66:24:08',
'66:24:09',
'66:24:10',
'66:24:11',
'66:24:12',
'66:24:13',
'66:24:14',
'66:24:15',
'66:24:16',
'66:24:17',
'66:24:18',
'66:24:19',
'66:24:20',
'66:24:21',
'66:24:22',
'66:24:23',
'66:24:24',
'66:24:25',
'66:24:26',
'66:24:27',
'66:24:28',
'66:24:29',
'66:24:30',
'66:24:31',
'66:24:32',
'66:24:33',
'66:24:34',
'66:24:35',
'66:24:36',
'66:24:37',
'66:24:38',
'66:24:39',
'66:24:40',
'66:24:41',
'66:24:42',
'66:24:43',
'66:24:44',
'66:24:45',
'66:24:46',
'66:24:47',
'66:24:48',
'66:24:49',
'66:24:50',
'66:24:51',
'66:24:52',
'66:24:53',
'66:24:54',
'66:24:55',
'66:24:56',
'66:24:57',
'66:24:58',
'66:24:59',
'66:24:60',
'66:24:61',
'66:24:62',
'66:24:63',
'66:24:64',
'66:24:65',
'66:24:66',
'66:24:67',
'66:24:68',
'66:24:69',
'66:24:70',
'66:24:71',
'66:24:72',
'66:24:73',
'66:24:74',
'66:24:75',
'66:24:76',
'66:24:77',
'66:24:78',
'66:24:79',
'66:24:80',
'66:24:81',
'66:24:82',
'66:24:83',
'66:24:84',
'66:24:85',
'66:24:86',
'66:24:87',
'66:24:88',
'66:24:89',
'66:24:90',
'66:24:91',
'66:24:92',
'66:24:93',
'66:24:94',
'66:24:95',
'66:24:96',
'66:24:97',
'66:24:98',
'66:24:99',
'66:25:00',
'66:25:01',
'66:25:02',
'66:25:03',
'66:25:04',
'66:25:05',
'66:25:06',
'66:25:07',
'66:25:08',
'66:25:09',
'66:25:10',
'66:25:11',
'66:25:12',
'66:25:13',
'66:25:14',
'66:25:15',
'66:25:16',
'66:25:17',
'66:25:18',
'66:25:19',
'66:25:20',
'66:25:21',
'66:25:22',
'66:25:23',
'66:25:24',
'66:25:25',
'66:25:26',
'66:25:27',
'66:25:28',
'66:25:29',
'66:25:30',
'66:25:31',
'66:25:32',
'66:25:33',
'66:25:34',
'66:25:35',
'66:25:36',
'66:25:37',
'66:25:38',
'66:25:39',
'66:25:40',
'66:25:41',
'66:25:42',
'66:25:43',
'66:25:44',
'66:25:45',
'66:25:46',
'66:25:47',
'66:25:48',
'66:25:49',
'66:25:50',
'66:25:51',
'66:25:52',
'66:25:53',
'66:25:54',
'66:25:55',
'66:25:56',
'66:25:57',
'66:25:58',
'66:25:59',
'66:25:60',
'66:25:61',
'66:25:62',
'66:25:63',
'66:25:64',
'66:25:65',
'66:25:66',
'66:25:67',
'66:25:68',
'66:25:69',
'66:25:70',
'66:25:71',
'66:25:72',
'66:25:73',
'66:25:74',
'66:25:75',
'66:25:76',
'66:25:77',
'66:25:78',
'66:25:79',
'66:25:80',
'66:25:81',
'66:25:82',
'66:25:83',
'66:25:84',
'66:25:85',
'66:25:86',
'66:25:87',
'66:25:88',
'66:25:89',
'66:25:90',
'66:25:91',
'66:25:92',
'66:25:93',
'66:25:94',
'66:25:95',
'66:25:96',
'66:25:97',
'66:25:98',
'66:25:99',
'66:26:00',
'66:26:01',
'66:26:02',
'66:26:03',
'66:26:04',
'66:26:05',
'66:26:06',
'66:26:07',
'66:26:08',
'66:26:09',
'66:26:10',
'66:26:11',
'66:26:12',
'66:26:13',
'66:26:14',
'66:26:15',
'66:26:16',
'66:26:17',
'66:26:18',
'66:26:19',
'66:26:20',
'66:26:21',
'66:26:22',
'66:26:23',
'66:26:24',
'66:26:25',
'66:26:26',
'66:26:27',
'66:26:28',
'66:26:29',
'66:26:30',
'66:26:31',
'66:26:32',
'66:26:33',
'66:26:34',
'66:26:35',
'66:26:36',
'66:26:37',
'66:26:38',
'66:26:39',
'66:26:40',
'66:26:41',
'66:26:42',
'66:26:43',
'66:26:44',
'66:26:45',
'66:26:46',
'66:26:47',
'66:26:48',
'66:26:49',
'66:26:50',
'66:26:51',
'66:26:52',
'66:26:53',
'66:26:54',
'66:26:55',
'66:26:56',
'66:26:57',
'66:26:58',
'66:26:59',
'66:26:60',
'66:26:61',
'66:26:62',
'66:26:63',
'66:26:64',
'66:26:65',
'66:26:66',
'66:26:67',
'66:26:68',
'66:26:69',
'66:26:70',
'66:26:71',
'66:26:72',
'66:26:73',
'66:26:74',
'66:26:75',
'66:26:76',
'66:26:77',
'66:26:78',
'66:26:79',
'66:26:80',
'66:26:81',
'66:26:82',
'66:26:83',
'66:26:84',
'66:26:85',
'66:26:86',
'66:26:87',
'66:26:88',
'66:26:89',
'66:26:90',
'66:26:91',
'66:26:92',
'66:26:93',
'66:26:94',
'66:26:95',
'66:26:96',
'66:26:97',
'66:26:98',
'66:26:99',
'66:27:00',
'66:27:01',
'66:27:02',
'66:27:03',
'66:27:04',
'66:27:05',
'66:27:06',
'66:27:07',
'66:27:08',
'66:27:09',
'66:27:10',
'66:27:11',
'66:27:12',
'66:27:13',
'66:27:14',
'66:27:15',
'66:27:16',
'66:27:17',
'66:27:18',
'66:27:19',
'66:27:20',
'66:27:21',
'66:27:22',
'66:27:23',
'66:27:24',
'66:27:25',
'66:27:26',
'66:27:27',
'66:27:28',
'66:27:29',
'66:27:30',
'66:27:31',
'66:27:32',
'66:27:33',
'66:27:34',
'66:27:35',
'66:27:36',
'66:27:37',
'66:27:38',
'66:27:39',
'66:27:40',
'66:27:41',
'66:27:42',
'66:27:43',
'66:27:44',
'66:27:45',
'66:27:46',
'66:27:47',
'66:27:48',
'66:27:49',
'66:27:50',
'66:27:51',
'66:27:52',
'66:27:53',
'66:27:54',
'66:27:55',
'66:27:56',
'66:27:57',
'66:27:58',
'66:27:59',
'66:27:60',
'66:27:61',
'66:27:62',
'66:27:63',
'66:27:64',
'66:27:65',
'66:27:66',
'66:27:67',
'66:27:68',
'66:27:69',
'66:27:70',
'66:27:71',
'66:27:72',
'66:27:73',
'66:27:74',
'66:27:75',
'66:27:76',
'66:27:77',
'66:27:78',
'66:27:79',
'66:27:80',
'66:27:81',
'66:27:82',
'66:27:83',
'66:27:84',
'66:27:85',
'66:27:86',
'66:27:87',
'66:27:88',
'66:27:89',
'66:27:90',
'66:27:91',
'66:27:92',
'66:27:93',
'66:27:94',
'66:27:95',
'66:27:96',
'66:27:97',
'66:27:98',
'66:27:99',
'66:28:00',
'66:28:01',
'66:28:02',
'66:28:03',
'66:28:04',
'66:28:05',
'66:28:06',
'66:28:07',
'66:28:08',
'66:28:09',
'66:28:10',
'66:28:11',
'66:28:12',
'66:28:13',
'66:28:14',
'66:28:15',
'66:28:16',
'66:28:17',
'66:28:18',
'66:28:19',
'66:28:20',
'66:28:21',
'66:28:22',
'66:28:23',
'66:28:24',
'66:28:25',
'66:28:26',
'66:28:27',
'66:28:28',
'66:28:29',
'66:28:30',
'66:28:31',
'66:28:32',
'66:28:33',
'66:28:34',
'66:28:35',
'66:28:36',
'66:28:37',
'66:28:38',
'66:28:39',
'66:28:40',
'66:28:41',
'66:28:42',
'66:28:43',
'66:28:44',
'66:28:45',
'66:28:46',
'66:28:47',
'66:28:48',
'66:28:49',
'66:28:50',
'66:28:51',
'66:28:52',
'66:28:53',
'66:28:54',
'66:28:55',
'66:28:56',
'66:28:57',
'66:28:58',
'66:28:59',
'66:28:60',
'66:28:61',
'66:28:62',
'66:28:63',
'66:28:64',
'66:28:65',
'66:28:66',
'66:28:67',
'66:28:68',
'66:28:69',
'66:28:70',
'66:28:71',
'66:28:72',
'66:28:73',
'66:28:74',
'66:28:75',
'66:28:76',
'66:28:77',
'66:28:78',
'66:28:79',
'66:28:80',
'66:28:81',
'66:28:82',
'66:28:83',
'66:28:84',
'66:28:85',
'66:28:86',
'66:28:87',
'66:28:88',
'66:28:89',
'66:28:90',
'66:28:91',
'66:28:92',
'66:28:93',
'66:28:94',
'66:28:95',
'66:28:96',
'66:28:97',
'66:28:98',
'66:28:99',
'66:29:00',
'66:29:01',
'66:29:02',
'66:29:03',
'66:29:04',
'66:29:05',
'66:29:06',
'66:29:07',
'66:29:08',
'66:29:09',
'66:29:10',
'66:29:11',
'66:29:12',
'66:29:13',
'66:29:14',
'66:29:15',
'66:29:16',
'66:29:17',
'66:29:18',
'66:29:19',
'66:29:20',
'66:29:21',
'66:29:22',
'66:29:23',
'66:29:24',
'66:29:25',
'66:29:26',
'66:29:27',
'66:29:28',
'66:29:29',
'66:29:30',
'66:29:31',
'66:29:32',
'66:29:33',
'66:29:34',
'66:29:35',
'66:29:36',
'66:29:37',
'66:29:38',
'66:29:39',
'66:29:40',
'66:29:41',
'66:29:42',
'66:29:43',
'66:29:44',
'66:29:45',
'66:29:46',
'66:29:47',
'66:29:48',
'66:29:49',
'66:29:50',
'66:29:51',
'66:29:52',
'66:29:53',
'66:29:54',
'66:29:55',
'66:29:56',
'66:29:57',
'66:29:58',
'66:29:59',
'66:29:60',
'66:29:61',
'66:29:62',
'66:29:63',
'66:29:64',
'66:29:65',
'66:29:66',
'66:29:67',
'66:29:68',
'66:29:69',
'66:29:70',
'66:29:71',
'66:29:72',
'66:29:73',
'66:29:74',
'66:29:75',
'66:29:76',
'66:29:77',
'66:29:78',
'66:29:79',
'66:29:80',
'66:29:81',
'66:29:82',
'66:29:83',
'66:29:84',
'66:29:85',
'66:29:86',
'66:29:87',
'66:29:88',
'66:29:89',
'66:29:90',
'66:29:91',
'66:29:92',
'66:29:93',
'66:29:94',
'66:29:95',
'66:29:96',
'66:29:97',
'66:29:98',
'66:29:99',
'66:30:00',
'66:30:01',
'66:30:02',
'66:30:03',
'66:30:04',
'66:30:05',
'66:30:06',
'66:30:07',
'66:30:08',
'66:30:09',
'66:30:10',
'66:30:11',
'66:30:12',
'66:30:13',
'66:30:14',
'66:30:15',
'66:30:16',
'66:30:17',
'66:30:18',
'66:30:19',
'66:30:20',
'66:30:21',
'66:30:22',
'66:30:23',
'66:30:24',
'66:30:25',
'66:30:26',
'66:30:27',
'66:30:28',
'66:30:29',
'66:30:30',
'66:30:31',
'66:30:32',
'66:30:33',
'66:30:34',
'66:30:35',
'66:30:36',
'66:30:37',
'66:30:38',
'66:30:39',
'66:30:40',
'66:30:41',
'66:30:42',
'66:30:43',
'66:30:44',
'66:30:45',
'66:30:46',
'66:30:47',
'66:30:48',
'66:30:49',
'66:30:50',
'66:30:51',
'66:30:52',
'66:30:53',
'66:30:54',
'66:30:55',
'66:30:56',
'66:30:57',
'66:30:58',
'66:30:59',
'66:30:60',
'66:30:61',
'66:30:62',
'66:30:63',
'66:30:64',
'66:30:65',
'66:30:66',
'66:30:67',
'66:30:68',
'66:30:69',
'66:30:70',
'66:30:71',
'66:30:72',
'66:30:73',
'66:30:74',
'66:30:75',
'66:30:76',
'66:30:77',
'66:30:78',
'66:30:79',
'66:30:80',
'66:30:81',
'66:30:82',
'66:30:83',
'66:30:84',
'66:30:85',
'66:30:86',
'66:30:87',
'66:30:88',
'66:30:89',
'66:30:90',
'66:30:91',
'66:30:92',
'66:30:93',
'66:30:94',
'66:30:95',
'66:30:96',
'66:30:97',
'66:30:98',
'66:30:99',
'66:31:00',
'66:31:01',
'66:31:02',
'66:31:03',
'66:31:04',
'66:31:05',
'66:31:06',
'66:31:07',
'66:31:08',
'66:31:09',
'66:31:10',
'66:31:11',
'66:31:12',
'66:31:13',
'66:31:14',
'66:31:15',
'66:31:16',
'66:31:17',
'66:31:18',
'66:31:19',
'66:31:20',
'66:31:21',
'66:31:22',
'66:31:23',
'66:31:24',
'66:31:25',
'66:31:26',
'66:31:27',
'66:31:28',
'66:31:29',
'66:31:30',
'66:31:31',
'66:31:32',
'66:31:33',
'66:31:34',
'66:31:35',
'66:31:36',
'66:31:37',
'66:31:38',
'66:31:39',
'66:31:40',
'66:31:41',
'66:31:42',
'66:31:43',
'66:31:44',
'66:31:45',
'66:31:46',
'66:31:47',
'66:31:48',
'66:31:49',
'66:31:50',
'66:31:51',
'66:31:52',
'66:31:53',
'66:31:54',
'66:31:55',
'66:31:56',
'66:31:57',
'66:31:58',
'66:31:59',
'66:31:60',
'66:31:61',
'66:31:62',
'66:31:63',
'66:31:64',
'66:31:65',
'66:31:66',
'66:31:67',
'66:31:68',
'66:31:69',
'66:31:70',
'66:31:71',
'66:31:72',
'66:31:73',
'66:31:74',
'66:31:75',
'66:31:76',
'66:31:77',
'66:31:78',
'66:31:79',
'66:31:80',
'66:31:81',
'66:31:82',
'66:31:83',
'66:31:84',
'66:31:85',
'66:31:86',
'66:31:87',
'66:31:88',
'66:31:89',
'66:31:90',
'66:31:91',
'66:31:92',
'66:31:93',
'66:31:94',
'66:31:95',
'66:31:96',
'66:31:97',
'66:31:98',
'66:31:99',
'66:32:00',
'66:32:01',
'66:32:02',
'66:32:03',
'66:32:04',
'66:32:05',
'66:32:06',
'66:32:07',
'66:32:08',
'66:32:09',
'66:32:10',
'66:32:11',
'66:32:12',
'66:32:13',
'66:32:14',
'66:32:15',
'66:32:16',
'66:32:17',
'66:32:18',
'66:32:19',
'66:32:20',
'66:32:21',
'66:32:22',
'66:32:23',
'66:32:24',
'66:32:25',
'66:32:26',
'66:32:27',
'66:32:28',
'66:32:29',
'66:32:30',
'66:32:31',
'66:32:32',
'66:32:33',
'66:32:34',
'66:32:35',
'66:32:36',
'66:32:37',
'66:32:38',
'66:32:39',
'66:32:40',
'66:32:41',
'66:32:42',
'66:32:43',
'66:32:44',
'66:32:45',
'66:32:46',
'66:32:47',
'66:32:48',
'66:32:49',
'66:32:50',
'66:32:51',
'66:32:52',
'66:32:53',
'66:32:54',
'66:32:55',
'66:32:56',
'66:32:57',
'66:32:58',
'66:32:59',
'66:32:60',
'66:32:61',
'66:32:62',
'66:32:63',
'66:32:64',
'66:32:65',
'66:32:66',
'66:32:67',
'66:32:68',
'66:32:69',
'66:32:70',
'66:32:71',
'66:32:72',
'66:32:73',
'66:32:74',
'66:32:75',
'66:32:76',
'66:32:77',
'66:32:78',
'66:32:79',
'66:32:80',
'66:32:81',
'66:32:82',
'66:32:83',
'66:32:84',
'66:32:85',
'66:32:86',
'66:32:87',
'66:32:88',
'66:32:89',
'66:32:90',
'66:32:91',
'66:32:92',
'66:32:93',
'66:32:94',
'66:32:95',
'66:32:96',
'66:32:97',
'66:32:98',
'66:32:99',
'66:33:00',
'66:33:01',
'66:33:02',
'66:33:03',
'66:33:04',
'66:33:05',
'66:33:06',
'66:33:07',
'66:33:08',
'66:33:09',
'66:33:10',
'66:33:11',
'66:33:12',
'66:33:13',
'66:33:14',
'66:33:15',
'66:33:16',
'66:33:17',
'66:33:18',
'66:33:19',
'66:33:20',
'66:33:21',
'66:33:22',
'66:33:23',
'66:33:24',
'66:33:25',
'66:33:26',
'66:33:27',
'66:33:28',
'66:33:29',
'66:33:30',
'66:33:31',
'66:33:32',
'66:33:33',
'66:33:34',
'66:33:35',
'66:33:36',
'66:33:37',
'66:33:38',
'66:33:39',
'66:33:40',
'66:33:41',
'66:33:42',
'66:33:43',
'66:33:44',
'66:33:45',
'66:33:46',
'66:33:47',
'66:33:48',
'66:33:49',
'66:33:50',
'66:33:51',
'66:33:52',
'66:33:53',
'66:33:54',
'66:33:55',
'66:33:56',
'66:33:57',
'66:33:58',
'66:33:59',
'66:33:60',
'66:33:61',
'66:33:62',
'66:33:63',
'66:33:64',
'66:33:65',
'66:33:66',
'66:33:67',
'66:33:68',
'66:33:69',
'66:33:70',
'66:33:71',
'66:33:72',
'66:33:73',
'66:33:74',
'66:33:75',
'66:33:76',
'66:33:77',
'66:33:78',
'66:33:79',
'66:33:80',
'66:33:81',
'66:33:82',
'66:33:83',
'66:33:84',
'66:33:85',
'66:33:86',
'66:33:87',
'66:33:88',
'66:33:89',
'66:33:90',
'66:33:91',
'66:33:92',
'66:33:93',
'66:33:94',
'66:33:95',
'66:33:96',
'66:33:97',
'66:33:98',
'66:33:99',
'66:34:00',
'66:34:01',
'66:34:02',
'66:34:03',
'66:34:04',
'66:34:05',
'66:34:06',
'66:34:07',
'66:34:08',
'66:34:09',
'66:34:10',
'66:34:11',
'66:34:12',
'66:34:13',
'66:34:14',
'66:34:15',
'66:34:16',
'66:34:17',
'66:34:18',
'66:34:19',
'66:34:20',
'66:34:21',
'66:34:22',
'66:34:23',
'66:34:24',
'66:34:25',
'66:34:26',
'66:34:27',
'66:34:28',
'66:34:29',
'66:34:30',
'66:34:31',
'66:34:32',
'66:34:33',
'66:34:34',
'66:34:35',
'66:34:36',
'66:34:37',
'66:34:38',
'66:34:39',
'66:34:40',
'66:34:41',
'66:34:42',
'66:34:43',
'66:34:44',
'66:34:45',
'66:34:46',
'66:34:47',
'66:34:48',
'66:34:49',
'66:34:50',
'66:34:51',
'66:34:52',
'66:34:53',
'66:34:54',
'66:34:55',
'66:34:56',
'66:34:57',
'66:34:58',
'66:34:59',
'66:34:60',
'66:34:61',
'66:34:62',
'66:34:63',
'66:34:64',
'66:34:65',
'66:34:66',
'66:34:67',
'66:34:68',
'66:34:69',
'66:34:70',
'66:34:71',
'66:34:72',
'66:34:73',
'66:34:74',
'66:34:75',
'66:34:76',
'66:34:77',
'66:34:78',
'66:34:79',
'66:34:80',
'66:34:81',
'66:34:82',
'66:34:83',
'66:34:84',
'66:34:85',
'66:34:86',
'66:34:87',
'66:34:88',
'66:34:89',
'66:34:90',
'66:34:91',
'66:34:92',
'66:34:93',
'66:34:94',
'66:34:95',
'66:34:96',
'66:34:97',
'66:34:98',
'66:34:99',
'66:35:00',
'66:35:01',
'66:35:02',
'66:35:03',
'66:35:04',
'66:35:05',
'66:35:06',
'66:35:07',
'66:35:08',
'66:35:09',
'66:35:10',
'66:35:11',
'66:35:12',
'66:35:13',
'66:35:14',
'66:35:15',
'66:35:16',
'66:35:17',
'66:35:18',
'66:35:19',
'66:35:20',
'66:35:21',
'66:35:22',
'66:35:23',
'66:35:24',
'66:35:25',
'66:35:26',
'66:35:27',
'66:35:28',
'66:35:29',
'66:35:30',
'66:35:31',
'66:35:32',
'66:35:33',
'66:35:34',
'66:35:35',
'66:35:36',
'66:35:37',
'66:35:38',
'66:35:39',
'66:35:40',
'66:35:41',
'66:35:42',
'66:35:43',
'66:35:44',
'66:35:45',
'66:35:46',
'66:35:47',
'66:35:48',
'66:35:49',
'66:35:50',
'66:35:51',
'66:35:52',
'66:35:53',
'66:35:54',
'66:35:55',
'66:35:56',
'66:35:57',
'66:35:58',
'66:35:59',
'66:35:60',
'66:35:61',
'66:35:62',
'66:35:63',
'66:35:64',
'66:35:65',
'66:35:66',
'66:35:67',
'66:35:68',
'66:35:69',
'66:35:70',
'66:35:71',
'66:35:72',
'66:35:73',
'66:35:74',
'66:35:75',
'66:35:76',
'66:35:77',
'66:35:78',
'66:35:79',
'66:35:80',
'66:35:81',
'66:35:82',
'66:35:83',
'66:35:84',
'66:35:85',
'66:35:86',
'66:35:87',
'66:35:88',
'66:35:89',
'66:35:90',
'66:35:91',
'66:35:92',
'66:35:93',
'66:35:94',
'66:35:95',
'66:35:96',
'66:35:97',
'66:35:98',
'66:35:99',
'66:36:00',
'66:36:01',
'66:36:02',
'66:36:03',
'66:36:04',
'66:36:05',
'66:36:06',
'66:36:07',
'66:36:08',
'66:36:09',
'66:36:10',
'66:36:11',
'66:36:12',
'66:36:13',
'66:36:14',
'66:36:15',
'66:36:16',
'66:36:17',
'66:36:18',
'66:36:19',
'66:36:20',
'66:36:21',
'66:36:22',
'66:36:23',
'66:36:24',
'66:36:25',
'66:36:26',
'66:36:27',
'66:36:28',
'66:36:29',
'66:36:30',
'66:36:31',
'66:36:32',
'66:36:33',
'66:36:34',
'66:36:35',
'66:36:36',
'66:36:37',
'66:36:38',
'66:36:39',
'66:36:40',
'66:36:41',
'66:36:42',
'66:36:43',
'66:36:44',
'66:36:45',
'66:36:46',
'66:36:47',
'66:36:48',
'66:36:49',
'66:36:50',
'66:36:51',
'66:36:52',
'66:36:53',
'66:36:54',
'66:36:55',
'66:36:56',
'66:36:57',
'66:36:58',
'66:36:59',
'66:36:60',
'66:36:61',
'66:36:62',
'66:36:63',
'66:36:64',
'66:36:65',
'66:36:66',
'66:36:67',
'66:36:68',
'66:36:69',
'66:36:70',
'66:36:71',
'66:36:72',
'66:36:73',
'66:36:74',
'66:36:75',
'66:36:76',
'66:36:77',
'66:36:78',
'66:36:79',
'66:36:80',
'66:36:81',
'66:36:82',
'66:36:83',
'66:36:84',
'66:36:85',
'66:36:86',
'66:36:87',
'66:36:88',
'66:36:89',
'66:36:90',
'66:36:91',
'66:36:92',
'66:36:93',
'66:36:94',
'66:36:95',
'66:36:96',
'66:36:97',
'66:36:98',
'66:36:99',
'66:37:00',
'66:37:01',
'66:37:02',
'66:37:03',
'66:37:04',
'66:37:05',
'66:37:06',
'66:37:07',
'66:37:08',
'66:37:09',
'66:37:10',
'66:37:11',
'66:37:12',
'66:37:13',
'66:37:14',
'66:37:15',
'66:37:16',
'66:37:17',
'66:37:18',
'66:37:19',
'66:37:20',
'66:37:21',
'66:37:22',
'66:37:23',
'66:37:24',
'66:37:25',
'66:37:26',
'66:37:27',
'66:37:28',
'66:37:29',
'66:37:30',
'66:37:31',
'66:37:32',
'66:37:33',
'66:37:34',
'66:37:35',
'66:37:36',
'66:37:37',
'66:37:38',
'66:37:39',
'66:37:40',
'66:37:41',
'66:37:42',
'66:37:43',
'66:37:44',
'66:37:45',
'66:37:46',
'66:37:47',
'66:37:48',
'66:37:49',
'66:37:50',
'66:37:51',
'66:37:52',
'66:37:53',
'66:37:54',
'66:37:55',
'66:37:56',
'66:37:57',
'66:37:58',
'66:37:59',
'66:37:60',
'66:37:61',
'66:37:62',
'66:37:63',
'66:37:64',
'66:37:65',
'66:37:66',
'66:37:67',
'66:37:68',
'66:37:69',
'66:37:70',
'66:37:71',
'66:37:72',
'66:37:73',
'66:37:74',
'66:37:75',
'66:37:76',
'66:37:77',
'66:37:78',
'66:37:79',
'66:37:80',
'66:37:81',
'66:37:82',
'66:37:83',
'66:37:84',
'66:37:85',
'66:37:86',
'66:37:87',
'66:37:88',
'66:37:89',
'66:37:90',
'66:37:91',
'66:37:92',
'66:37:93',
'66:37:94',
'66:37:95',
'66:37:96',
'66:37:97',
'66:37:98',
'66:37:99',
'66:38:00',
'66:38:01',
'66:38:02',
'66:38:03',
'66:38:04',
'66:38:05',
'66:38:06',
'66:38:07',
'66:38:08',
'66:38:09',
'66:38:10',
'66:38:11',
'66:38:12',
'66:38:13',
'66:38:14',
'66:38:15',
'66:38:16',
'66:38:17',
'66:38:18',
'66:38:19',
'66:38:20',
'66:38:21',
'66:38:22',
'66:38:23',
'66:38:24',
'66:38:25',
'66:38:26',
'66:38:27',
'66:38:28',
'66:38:29',
'66:38:30',
'66:38:31',
'66:38:32',
'66:38:33',
'66:38:34',
'66:38:35',
'66:38:36',
'66:38:37',
'66:38:38',
'66:38:39',
'66:38:40',
'66:38:41',
'66:38:42',
'66:38:43',
'66:38:44',
'66:38:45',
'66:38:46',
'66:38:47',
'66:38:48',
'66:38:49',
'66:38:50',
'66:38:51',
'66:38:52',
'66:38:53',
'66:38:54',
'66:38:55',
'66:38:56',
'66:38:57',
'66:38:58',
'66:38:59',
'66:38:60',
'66:38:61',
'66:38:62',
'66:38:63',
'66:38:64',
'66:38:65',
'66:38:66',
'66:38:67',
'66:38:68',
'66:38:69',
'66:38:70',
'66:38:71',
'66:38:72',
'66:38:73',
'66:38:74',
'66:38:75',
'66:38:76',
'66:38:77',
'66:38:78',
'66:38:79',
'66:38:80',
'66:38:81',
'66:38:82',
'66:38:83',
'66:38:84',
'66:38:85',
'66:38:86',
'66:38:87',
'66:38:88',
'66:38:89',
'66:38:90',
'66:38:91',
'66:38:92',
'66:38:93',
'66:38:94',
'66:38:95',
'66:38:96',
'66:38:97',
'66:38:98',
'66:38:99',
'66:39:00',
'66:39:01',
'66:39:02',
'66:39:03',
'66:39:04',
'66:39:05',
'66:39:06',
'66:39:07',
'66:39:08',
'66:39:09',
'66:39:10',
'66:39:11',
'66:39:12',
'66:39:13',
'66:39:14',
'66:39:15',
'66:39:16',
'66:39:17',
'66:39:18',
'66:39:19',
'66:39:20',
'66:39:21',
'66:39:22',
'66:39:23',
'66:39:24',
'66:39:25',
'66:39:26',
'66:39:27',
'66:39:28',
'66:39:29',
'66:39:30',
'66:39:31',
'66:39:32',
'66:39:33',
'66:39:34',
'66:39:35',
'66:39:36',
'66:39:37',
'66:39:38',
'66:39:39',
'66:39:40',
'66:39:41',
'66:39:42',
'66:39:43',
'66:39:44',
'66:39:45',
'66:39:46',
'66:39:47',
'66:39:48',
'66:39:49',
'66:39:50',
'66:39:51',
'66:39:52',
'66:39:53',
'66:39:54',
'66:39:55',
'66:39:56',
'66:39:57',
'66:39:58',
'66:39:59',
'66:39:60',
'66:39:61',
'66:39:62',
'66:39:63',
'66:39:64',
'66:39:65',
'66:39:66',
'66:39:67',
'66:39:68',
'66:39:69',
'66:39:70',
'66:39:71',
'66:39:72',
'66:39:73',
'66:39:74',
'66:39:75',
'66:39:76',
'66:39:77',
'66:39:78',
'66:39:79',
'66:39:80',
'66:39:81',
'66:39:82',
'66:39:83',
'66:39:84',
'66:39:85',
'66:39:86',
'66:39:87',
'66:39:88',
'66:39:89',
'66:39:90',
'66:39:91',
'66:39:92',
'66:39:93',
'66:39:94',
'66:39:95',
'66:39:96',
'66:39:97',
'66:39:98',
'66:39:99',
'66:40:00',
'66:40:01',
'66:40:02',
'66:40:03',
'66:40:04',
'66:40:05',
'66:40:06',
'66:40:07',
'66:40:08',
'66:40:09',
'66:40:10',
'66:40:11',
'66:40:12',
'66:40:13',
'66:40:14',
'66:40:15',
'66:40:16',
'66:40:17',
'66:40:18',
'66:40:19',
'66:40:20',
'66:40:21',
'66:40:22',
'66:40:23',
'66:40:24',
'66:40:25',
'66:40:26',
'66:40:27',
'66:40:28',
'66:40:29',
'66:40:30',
'66:40:31',
'66:40:32',
'66:40:33',
'66:40:34',
'66:40:35',
'66:40:36',
'66:40:37',
'66:40:38',
'66:40:39',
'66:40:40',
'66:40:41',
'66:40:42',
'66:40:43',
'66:40:44',
'66:40:45',
'66:40:46',
'66:40:47',
'66:40:48',
'66:40:49',
'66:40:50',
'66:40:51',
'66:40:52',
'66:40:53',
'66:40:54',
'66:40:55',
'66:40:56',
'66:40:57',
'66:40:58',
'66:40:59',
'66:40:60',
'66:40:61',
'66:40:62',
'66:40:63',
'66:40:64',
'66:40:65',
'66:40:66',
'66:40:67',
'66:40:68',
'66:40:69',
'66:40:70',
'66:40:71',
'66:40:72',
'66:40:73',
'66:40:74',
'66:40:75',
'66:40:76',
'66:40:77',
'66:40:78',
'66:40:79',
'66:40:80',
'66:40:81',
'66:40:82',
'66:40:83',
'66:40:84',
'66:40:85',
'66:40:86',
'66:40:87',
'66:40:88',
'66:40:89',
'66:40:90',
'66:40:91',
'66:40:92',
'66:40:93',
'66:40:94',
'66:40:95',
'66:40:96',
'66:40:97',
'66:40:98',
'66:40:99',
'66:41:00',
'66:41:01',
'66:41:02',
'66:41:03',
'66:41:04',
'66:41:05',
'66:41:06',
'66:41:07',
'66:41:08',
'66:41:09',
'66:41:10',
'66:41:11',
'66:41:12',
'66:41:13',
'66:41:14',
'66:41:15',
'66:41:16',
'66:41:17',
'66:41:18',
'66:41:19',
'66:41:20',
'66:41:21',
'66:41:22',
'66:41:23',
'66:41:24',
'66:41:25',
'66:41:26',
'66:41:27',
'66:41:28',
'66:41:29',
'66:41:30',
'66:41:31',
'66:41:32',
'66:41:33',
'66:41:34',
'66:41:35',
'66:41:36',
'66:41:37',
'66:41:38',
'66:41:39',
'66:41:40',
'66:41:41',
'66:41:42',
'66:41:43',
'66:41:44',
'66:41:45',
'66:41:46',
'66:41:47',
'66:41:48',
'66:41:49',
'66:41:50',
'66:41:51',
'66:41:52',
'66:41:53',
'66:41:54',
'66:41:55',
'66:41:56',
'66:41:57',
'66:41:58',
'66:41:59',
'66:41:60',
'66:41:61',
'66:41:62',
'66:41:63',
'66:41:64',
'66:41:65',
'66:41:66',
'66:41:67',
'66:41:68',
'66:41:69',
'66:41:70',
'66:41:71',
'66:41:72',
'66:41:73',
'66:41:74',
'66:41:75',
'66:41:76',
'66:41:77',
'66:41:78',
'66:41:79',
'66:41:80',
'66:41:81',
'66:41:82',
'66:41:83',
'66:41:84',
'66:41:85',
'66:41:86',
'66:41:87',
'66:41:88',
'66:41:89',
'66:41:90',
'66:41:91',
'66:41:92',
'66:41:93',
'66:41:94',
'66:41:95',
'66:41:96',
'66:41:97',
'66:41:98',
'66:41:99',
'66:42:00',
'66:42:01',
'66:42:02',
'66:42:03',
'66:42:04',
'66:42:05',
'66:42:06',
'66:42:07',
'66:42:08',
'66:42:09',
'66:42:10',
'66:42:11',
'66:42:12',
'66:42:13',
'66:42:14',
'66:42:15',
'66:42:16',
'66:42:17',
'66:42:18',
'66:42:19',
'66:42:20',
'66:42:21',
'66:42:22',
'66:42:23',
'66:42:24',
'66:42:25',
'66:42:26',
'66:42:27',
'66:42:28',
'66:42:29',
'66:42:30',
'66:42:31',
'66:42:32',
'66:42:33',
'66:42:34',
'66:42:35',
'66:42:36',
'66:42:37',
'66:42:38',
'66:42:39',
'66:42:40',
'66:42:41',
'66:42:42',
'66:42:43',
'66:42:44',
'66:42:45',
'66:42:46',
'66:42:47',
'66:42:48',
'66:42:49',
'66:42:50',
'66:42:51',
'66:42:52',
'66:42:53',
'66:42:54',
'66:42:55',
'66:42:56',
'66:42:57',
'66:42:58',
'66:42:59',
'66:42:60',
'66:42:61',
'66:42:62',
'66:42:63',
'66:42:64',
'66:42:65',
'66:42:66',
'66:42:67',
'66:42:68',
'66:42:69',
'66:42:70',
'66:42:71',
'66:42:72',
'66:42:73',
'66:42:74',
'66:42:75',
'66:42:76',
'66:42:77',
'66:42:78',
'66:42:79',
'66:42:80',
'66:42:81',
'66:42:82',
'66:42:83',
'66:42:84',
'66:42:85',
'66:42:86',
'66:42:87',
'66:42:88',
'66:42:89',
'66:42:90',
'66:42:91',
'66:42:92',
'66:42:93',
'66:42:94',
'66:42:95',
'66:42:96',
'66:42:97',
'66:42:98',
'66:42:99',
'66:43:00',
'66:43:01',
'66:43:02',
'66:43:03',
'66:43:04',
'66:43:05',
'66:43:06',
'66:43:07',
'66:43:08',
'66:43:09',
'66:43:10',
'66:43:11',
'66:43:12',
'66:43:13',
'66:43:14',
'66:43:15',
'66:43:16',
'66:43:17',
'66:43:18',
'66:43:19',
'66:43:20',
'66:43:21',
'66:43:22',
'66:43:23',
'66:43:24',
'66:43:25',
'66:43:26',
'66:43:27',
'66:43:28',
'66:43:29',
'66:43:30',
'66:43:31',
'66:43:32',
'66:43:33',
'66:43:34',
'66:43:35',
'66:43:36',
'66:43:37',
'66:43:38',
'66:43:39',
'66:43:40',
'66:43:41',
'66:43:42',
'66:43:43',
'66:43:44',
'66:43:45',
'66:43:46',
'66:43:47',
'66:43:48',
'66:43:49',
'66:43:50',
'66:43:51',
'66:43:52',
'66:43:53',
'66:43:54',
'66:43:55',
'66:43:56',
'66:43:57',
'66:43:58',
'66:43:59',
'66:43:60',
'66:43:61',
'66:43:62',
'66:43:63',
'66:43:64',
'66:43:65',
'66:43:66',
'66:43:67',
'66:43:68',
'66:43:69',
'66:43:70',
'66:43:71',
'66:43:72',
'66:43:73',
'66:43:74',
'66:43:75',
'66:43:76',
'66:43:77',
'66:43:78',
'66:43:79',
'66:43:80',
'66:43:81',
'66:43:82',
'66:43:83',
'66:43:84',
'66:43:85',
'66:43:86',
'66:43:87',
'66:43:88',
'66:43:89',
'66:43:90',
'66:43:91',
'66:43:92',
'66:43:93',
'66:43:94',
'66:43:95',
'66:43:96',
'66:43:97',
'66:43:98',
'66:43:99',
'66:44:00',
'66:44:01',
'66:44:02',
'66:44:03',
'66:44:04',
'66:44:05',
'66:44:06',
'66:44:07',
'66:44:08',
'66:44:09',
'66:44:10',
'66:44:11',
'66:44:12',
'66:44:13',
'66:44:14',
'66:44:15',
'66:44:16',
'66:44:17',
'66:44:18',
'66:44:19',
'66:44:20',
'66:44:21',
'66:44:22',
'66:44:23',
'66:44:24',
'66:44:25',
'66:44:26',
'66:44:27',
'66:44:28',
'66:44:29',
'66:44:30',
'66:44:31',
'66:44:32',
'66:44:33',
'66:44:34',
'66:44:35',
'66:44:36',
'66:44:37',
'66:44:38',
'66:44:39',
'66:44:40',
'66:44:41',
'66:44:42',
'66:44:43',
'66:44:44',
'66:44:45',
'66:44:46',
'66:44:47',
'66:44:48',
'66:44:49',
'66:44:50',
'66:44:51',
'66:44:52',
'66:44:53',
'66:44:54',
'66:44:55',
'66:44:56',
'66:44:57',
'66:44:58',
'66:44:59',
'66:44:60',
'66:44:61',
'66:44:62',
'66:44:63',
'66:44:64',
'66:44:65',
'66:44:66',
'66:44:67',
'66:44:68',
'66:44:69',
'66:44:70',
'66:44:71',
'66:44:72',
'66:44:73',
'66:44:74',
'66:44:75',
'66:44:76',
'66:44:77',
'66:44:78',
'66:44:79',
'66:44:80',
'66:44:81',
'66:44:82',
'66:44:83',
'66:44:84',
'66:44:85',
'66:44:86',
'66:44:87',
'66:44:88',
'66:44:89',
'66:44:90',
'66:44:91',
'66:44:92',
'66:44:93',
'66:44:94',
'66:44:95',
'66:44:96',
'66:44:97',
'66:44:98',
'66:44:99',
'66:45:00',
'66:45:01',
'66:45:02',
'66:45:03',
'66:45:04',
'66:45:05',
'66:45:06',
'66:45:07',
'66:45:08',
'66:45:09',
'66:45:10',
'66:45:11',
'66:45:12',
'66:45:13',
'66:45:14',
'66:45:15',
'66:45:16',
'66:45:17',
'66:45:18',
'66:45:19',
'66:45:20',
'66:45:21',
'66:45:22',
'66:45:23',
'66:45:24',
'66:45:25',
'66:45:26',
'66:45:27',
'66:45:28',
'66:45:29',
'66:45:30',
'66:45:31',
'66:45:32',
'66:45:33',
'66:45:34',
'66:45:35',
'66:45:36',
'66:45:37',
'66:45:38',
'66:45:39',
'66:45:40',
'66:45:41',
'66:45:42',
'66:45:43',
'66:45:44',
'66:45:45',
'66:45:46',
'66:45:47',
'66:45:48',
'66:45:49',
'66:45:50',
'66:45:51',
'66:45:52',
'66:45:53',
'66:45:54',
'66:45:55',
'66:45:56',
'66:45:57',
'66:45:58',
'66:45:59',
'66:45:60',
'66:45:61',
'66:45:62',
'66:45:63',
'66:45:64',
'66:45:65',
'66:45:66',
'66:45:67',
'66:45:68',
'66:45:69',
'66:45:70',
'66:45:71',
'66:45:72',
'66:45:73',
'66:45:74',
'66:45:75',
'66:45:76',
'66:45:77',
'66:45:78',
'66:45:79',
'66:45:80',
'66:45:81',
'66:45:82',
'66:45:83',
'66:45:84',
'66:45:85',
'66:45:86',
'66:45:87',
'66:45:88',
'66:45:89',
'66:45:90',
'66:45:91',
'66:45:92',
'66:45:93',
'66:45:94',
'66:45:95',
'66:45:96',
'66:45:97',
'66:45:98',
'66:45:99',
'66:46:00',
'66:46:01',
'66:46:02',
'66:46:03',
'66:46:04',
'66:46:05',
'66:46:06',
'66:46:07',
'66:46:08',
'66:46:09',
'66:46:10',
'66:46:11',
'66:46:12',
'66:46:13',
'66:46:14',
'66:46:15',
'66:46:16',
'66:46:17',
'66:46:18',
'66:46:19',
'66:46:20',
'66:46:21',
'66:46:22',
'66:46:23',
'66:46:24',
'66:46:25',
'66:46:26',
'66:46:27',
'66:46:28',
'66:46:29',
'66:46:30',
'66:46:31',
'66:46:32',
'66:46:33',
'66:46:34',
'66:46:35',
'66:46:36',
'66:46:37',
'66:46:38',
'66:46:39',
'66:46:40',
'66:46:41',
'66:46:42',
'66:46:43',
'66:46:44',
'66:46:45',
'66:46:46',
'66:46:47',
'66:46:48',
'66:46:49',
'66:46:50',
'66:46:51',
'66:46:52',
'66:46:53',
'66:46:54',
'66:46:55',
'66:46:56',
'66:46:57',
'66:46:58',
'66:46:59',
'66:46:60',
'66:46:61',
'66:46:62',
'66:46:63',
'66:46:64',
'66:46:65',
'66:46:66',
'66:46:67',
'66:46:68',
'66:46:69',
'66:46:70',
'66:46:71',
'66:46:72',
'66:46:73',
'66:46:74',
'66:46:75',
'66:46:76',
'66:46:77',
'66:46:78',
'66:46:79',
'66:46:80',
'66:46:81',
'66:46:82',
'66:46:83',
'66:46:84',
'66:46:85',
'66:46:86',
'66:46:87',
'66:46:88',
'66:46:89',
'66:46:90',
'66:46:91',
'66:46:92',
'66:46:93',
'66:46:94',
'66:46:95',
'66:46:96',
'66:46:97',
'66:46:98',
'66:46:99',
'66:47:00',
'66:47:01',
'66:47:02',
'66:47:03',
'66:47:04',
'66:47:05',
'66:47:06',
'66:47:07',
'66:47:08',
'66:47:09',
'66:47:10',
'66:47:11',
'66:47:12',
'66:47:13',
'66:47:14',
'66:47:15',
'66:47:16',
'66:47:17',
'66:47:18',
'66:47:19',
'66:47:20',
'66:47:21',
'66:47:22',
'66:47:23',
'66:47:24',
'66:47:25',
'66:47:26',
'66:47:27',
'66:47:28',
'66:47:29',
'66:47:30',
'66:47:31',
'66:47:32',
'66:47:33',
'66:47:34',
'66:47:35',
'66:47:36',
'66:47:37',
'66:47:38',
'66:47:39',
'66:47:40',
'66:47:41',
'66:47:42',
'66:47:43',
'66:47:44',
'66:47:45',
'66:47:46',
'66:47:47',
'66:47:48',
'66:47:49',
'66:47:50',
'66:47:51',
'66:47:52',
'66:47:53',
'66:47:54',
'66:47:55',
'66:47:56',
'66:47:57',
'66:47:58',
'66:47:59',
'66:47:60',
'66:47:61',
'66:47:62',
'66:47:63',
'66:47:64',
'66:47:65',
'66:47:66',
'66:47:67',
'66:47:68',
'66:47:69',
'66:47:70',
'66:47:71',
'66:47:72',
'66:47:73',
'66:47:74',
'66:47:75',
'66:47:76',
'66:47:77',
'66:47:78',
'66:47:79',
'66:47:80',
'66:47:81',
'66:47:82',
'66:47:83',
'66:47:84',
'66:47:85',
'66:47:86',
'66:47:87',
'66:47:88',
'66:47:89',
'66:47:90',
'66:47:91',
'66:47:92',
'66:47:93',
'66:47:94',
'66:47:95',
'66:47:96',
'66:47:97',
'66:47:98',
'66:47:99',
'66:48:00',
'66:48:01',
'66:48:02',
'66:48:03',
'66:48:04',
'66:48:05',
'66:48:06',
'66:48:07',
'66:48:08',
'66:48:09',
'66:48:10',
'66:48:11',
'66:48:12',
'66:48:13',
'66:48:14',
'66:48:15',
'66:48:16',
'66:48:17',
'66:48:18',
'66:48:19',
'66:48:20',
'66:48:21',
'66:48:22',
'66:48:23',
'66:48:24',
'66:48:25',
'66:48:26',
'66:48:27',
'66:48:28',
'66:48:29',
'66:48:30',
'66:48:31',
'66:48:32',
'66:48:33',
'66:48:34',
'66:48:35',
'66:48:36',
'66:48:37',
'66:48:38',
'66:48:39',
'66:48:40',
'66:48:41',
'66:48:42',
'66:48:43',
'66:48:44',
'66:48:45',
'66:48:46',
'66:48:47',
'66:48:48',
'66:48:49',
'66:48:50',
'66:48:51',
'66:48:52',
'66:48:53',
'66:48:54',
'66:48:55',
'66:48:56',
'66:48:57',
'66:48:58',
'66:48:59',
'66:48:60',
'66:48:61',
'66:48:62',
'66:48:63',
'66:48:64',
'66:48:65',
'66:48:66',
'66:48:67',
'66:48:68',
'66:48:69',
'66:48:70',
'66:48:71',
'66:48:72',
'66:48:73',
'66:48:74',
'66:48:75',
'66:48:76',
'66:48:77',
'66:48:78',
'66:48:79',
'66:48:80',
'66:48:81',
'66:48:82',
'66:48:83',
'66:48:84',
'66:48:85',
'66:48:86',
'66:48:87',
'66:48:88',
'66:48:89',
'66:48:90',
'66:48:91',
'66:48:92',
'66:48:93',
'66:48:94',
'66:48:95',
'66:48:96',
'66:48:97',
'66:48:98',
'66:48:99',
'66:49:00',
'66:49:01',
'66:49:02',
'66:49:03',
'66:49:04',
'66:49:05',
'66:49:06',
'66:49:07',
'66:49:08',
'66:49:09',
'66:49:10',
'66:49:11',
'66:49:12',
'66:49:13',
'66:49:14',
'66:49:15',
'66:49:16',
'66:49:17',
'66:49:18',
'66:49:19',
'66:49:20',
'66:49:21',
'66:49:22',
'66:49:23',
'66:49:24',
'66:49:25',
'66:49:26',
'66:49:27',
'66:49:28',
'66:49:29',
'66:49:30',
'66:49:31',
'66:49:32',
'66:49:33',
'66:49:34',
'66:49:35',
'66:49:36',
'66:49:37',
'66:49:38',
'66:49:39',
'66:49:40',
'66:49:41',
'66:49:42',
'66:49:43',
'66:49:44',
'66:49:45',
'66:49:46',
'66:49:47',
'66:49:48',
'66:49:49',
'66:49:50',
'66:49:51',
'66:49:52',
'66:49:53',
'66:49:54',
'66:49:55',
'66:49:56',
'66:49:57',
'66:49:58',
'66:49:59',
'66:49:60',
'66:49:61',
'66:49:62',
'66:49:63',
'66:49:64',
'66:49:65',
'66:49:66',
'66:49:67',
'66:49:68',
'66:49:69',
'66:49:70',
'66:49:71',
'66:49:72',
'66:49:73',
'66:49:74',
'66:49:75',
'66:49:76',
'66:49:77',
'66:49:78',
'66:49:79',
'66:49:80',
'66:49:81',
'66:49:82',
'66:49:83',
'66:49:84',
'66:49:85',
'66:49:86',
'66:49:87',
'66:49:88',
'66:49:89',
'66:49:90',
'66:49:91',
'66:49:92',
'66:49:93',
'66:49:94',
'66:49:95',
'66:49:96',
'66:49:97',
'66:49:98',
'66:49:99',
'66:50:00',
'66:50:01',
'66:50:02',
'66:50:03',
'66:50:04',
'66:50:05',
'66:50:06',
'66:50:07',
'66:50:08',
'66:50:09',
'66:50:10',
'66:50:11',
'66:50:12',
'66:50:13',
'66:50:14',
'66:50:15',
'66:50:16',
'66:50:17',
'66:50:18',
'66:50:19',
'66:50:20',
'66:50:21',
'66:50:22',
'66:50:23',
'66:50:24',
'66:50:25',
'66:50:26',
'66:50:27',
'66:50:28',
'66:50:29',
'66:50:30',
'66:50:31',
'66:50:32',
'66:50:33',
'66:50:34',
'66:50:35',
'66:50:36',
'66:50:37',
'66:50:38',
'66:50:39',
'66:50:40',
'66:50:41',
'66:50:42',
'66:50:43',
'66:50:44',
'66:50:45',
'66:50:46',
'66:50:47',
'66:50:48',
'66:50:49',
'66:50:50',
'66:50:51',
'66:50:52',
'66:50:53',
'66:50:54',
'66:50:55',
'66:50:56',
'66:50:57',
'66:50:58',
'66:50:59',
'66:50:60',
'66:50:61',
'66:50:62',
'66:50:63',
'66:50:64',
'66:50:65',
'66:50:66',
'66:50:67',
'66:50:68',
'66:50:69',
'66:50:70',
'66:50:71',
'66:50:72',
'66:50:73',
'66:50:74',
'66:50:75',
'66:50:76',
'66:50:77',
'66:50:78',
'66:50:79',
'66:50:80',
'66:50:81',
'66:50:82',
'66:50:83',
'66:50:84',
'66:50:85',
'66:50:86',
'66:50:87',
'66:50:88',
'66:50:89',
'66:50:90',
'66:50:91',
'66:50:92',
'66:50:93',
'66:50:94',
'66:50:95',
'66:50:96',
'66:50:97',
'66:50:98',
'66:50:99',
'66:51:00',
'66:51:01',
'66:51:02',
'66:51:03',
'66:51:04',
'66:51:05',
'66:51:06',
'66:51:07',
'66:51:08',
'66:51:09',
'66:51:10',
'66:51:11',
'66:51:12',
'66:51:13',
'66:51:14',
'66:51:15',
'66:51:16',
'66:51:17',
'66:51:18',
'66:51:19',
'66:51:20',
'66:51:21',
'66:51:22',
'66:51:23',
'66:51:24',
'66:51:25',
'66:51:26',
'66:51:27',
'66:51:28',
'66:51:29',
'66:51:30',
'66:51:31',
'66:51:32',
'66:51:33',
'66:51:34',
'66:51:35',
'66:51:36',
'66:51:37',
'66:51:38',
'66:51:39',
'66:51:40',
'66:51:41',
'66:51:42',
'66:51:43',
'66:51:44',
'66:51:45',
'66:51:46',
'66:51:47',
'66:51:48',
'66:51:49',
'66:51:50',
'66:51:51',
'66:51:52',
'66:51:53',
'66:51:54',
'66:51:55',
'66:51:56',
'66:51:57',
'66:51:58',
'66:51:59',
'66:51:60',
'66:51:61',
'66:51:62',
'66:51:63',
'66:51:64',
'66:51:65',
'66:51:66',
'66:51:67',
'66:51:68',
'66:51:69',
'66:51:70',
'66:51:71',
'66:51:72',
'66:51:73',
'66:51:74',
'66:51:75',
'66:51:76',
'66:51:77',
'66:51:78',
'66:51:79',
'66:51:80',
'66:51:81',
'66:51:82',
'66:51:83',
'66:51:84',
'66:51:85',
'66:51:86',
'66:51:87',
'66:51:88',
'66:51:89',
'66:51:90',
'66:51:91',
'66:51:92',
'66:51:93',
'66:51:94',
'66:51:95',
'66:51:96',
'66:51:97',
'66:51:98',
'66:51:99',
'66:52:00',
'66:52:01',
'66:52:02',
'66:52:03',
'66:52:04',
'66:52:05',
'66:52:06',
'66:52:07',
'66:52:08',
'66:52:09',
'66:52:10',
'66:52:11',
'66:52:12',
'66:52:13',
'66:52:14',
'66:52:15',
'66:52:16',
'66:52:17',
'66:52:18',
'66:52:19',
'66:52:20',
'66:52:21',
'66:52:22',
'66:52:23',
'66:52:24',
'66:52:25',
'66:52:26',
'66:52:27',
'66:52:28',
'66:52:29',
'66:52:30',
'66:52:31',
'66:52:32',
'66:52:33',
'66:52:34',
'66:52:35',
'66:52:36',
'66:52:37',
'66:52:38',
'66:52:39',
'66:52:40',
'66:52:41',
'66:52:42',
'66:52:43',
'66:52:44',
'66:52:45',
'66:52:46',
'66:52:47',
'66:52:48',
'66:52:49',
'66:52:50',
'66:52:51',
'66:52:52',
'66:52:53',
'66:52:54',
'66:52:55',
'66:52:56',
'66:52:57',
'66:52:58',
'66:52:59',
'66:52:60',
'66:52:61',
'66:52:62',
'66:52:63',
'66:52:64',
'66:52:65',
'66:52:66',
'66:52:67',
'66:52:68',
'66:52:69',
'66:52:70',
'66:52:71',
'66:52:72',
'66:52:73',
'66:52:74',
'66:52:75',
'66:52:76',
'66:52:77',
'66:52:78',
'66:52:79',
'66:52:80',
'66:52:81',
'66:52:82',
'66:52:83',
'66:52:84',
'66:52:85',
'66:52:86',
'66:52:87',
'66:52:88',
'66:52:89',
'66:52:90',
'66:52:91',
'66:52:92',
'66:52:93',
'66:52:94',
'66:52:95',
'66:52:96',
'66:52:97',
'66:52:98',
'66:52:99',
'66:53:00',
'66:53:01',
'66:53:02',
'66:53:03',
'66:53:04',
'66:53:05',
'66:53:06',
'66:53:07',
'66:53:08',
'66:53:09',
'66:53:10',
'66:53:11',
'66:53:12',
'66:53:13',
'66:53:14',
'66:53:15',
'66:53:16',
'66:53:17',
'66:53:18',
'66:53:19',
'66:53:20',
'66:53:21',
'66:53:22',
'66:53:23',
'66:53:24',
'66:53:25',
'66:53:26',
'66:53:27',
'66:53:28',
'66:53:29',
'66:53:30',
'66:53:31',
'66:53:32',
'66:53:33',
'66:53:34',
'66:53:35',
'66:53:36',
'66:53:37',
'66:53:38',
'66:53:39',
'66:53:40',
'66:53:41',
'66:53:42',
'66:53:43',
'66:53:44',
'66:53:45',
'66:53:46',
'66:53:47',
'66:53:48',
'66:53:49',
'66:53:50',
'66:53:51',
'66:53:52',
'66:53:53',
'66:53:54',
'66:53:55',
'66:53:56',
'66:53:57',
'66:53:58',
'66:53:59',
'66:53:60',
'66:53:61',
'66:53:62',
'66:53:63',
'66:53:64',
'66:53:65',
'66:53:66',
'66:53:67',
'66:53:68',
'66:53:69',
'66:53:70',
'66:53:71',
'66:53:72',
'66:53:73',
'66:53:74',
'66:53:75',
'66:53:76',
'66:53:77',
'66:53:78',
'66:53:79',
'66:53:80',
'66:53:81',
'66:53:82',
'66:53:83',
'66:53:84',
'66:53:85',
'66:53:86',
'66:53:87',
'66:53:88',
'66:53:89',
'66:53:90',
'66:53:91',
'66:53:92',
'66:53:93',
'66:53:94',
'66:53:95',
'66:53:96',
'66:53:97',
'66:53:98',
'66:53:99',
'66:54:00',
'66:54:01',
'66:54:02',
'66:54:03',
'66:54:04',
'66:54:05',
'66:54:06',
'66:54:07',
'66:54:08',
'66:54:09',
'66:54:10',
'66:54:11',
'66:54:12',
'66:54:13',
'66:54:14',
'66:54:15',
'66:54:16',
'66:54:17',
'66:54:18',
'66:54:19',
'66:54:20',
'66:54:21',
'66:54:22',
'66:54:23',
'66:54:24',
'66:54:25',
'66:54:26',
'66:54:27',
'66:54:28',
'66:54:29',
'66:54:30',
'66:54:31',
'66:54:32',
'66:54:33',
'66:54:34',
'66:54:35',
'66:54:36',
'66:54:37',
'66:54:38',
'66:54:39',
'66:54:40',
'66:54:41',
'66:54:42',
'66:54:43',
'66:54:44',
'66:54:45',
'66:54:46',
'66:54:47',
'66:54:48',
'66:54:49',
'66:54:50',
'66:54:51',
'66:54:52',
'66:54:53',
'66:54:54',
'66:54:55',
'66:54:56',
'66:54:57',
'66:54:58',
'66:54:59',
'66:54:60',
'66:54:61',
'66:54:62',
'66:54:63',
'66:54:64',
'66:54:65',
'66:54:66',
'66:54:67',
'66:54:68',
'66:54:69',
'66:54:70',
'66:54:71',
'66:54:72',
'66:54:73',
'66:54:74',
'66:54:75',
'66:54:76',
'66:54:77',
'66:54:78',
'66:54:79',
'66:54:80',
'66:54:81',
'66:54:82',
'66:54:83',
'66:54:84',
'66:54:85',
'66:54:86',
'66:54:87',
'66:54:88',
'66:54:89',
'66:54:90',
'66:54:91',
'66:54:92',
'66:54:93',
'66:54:94',
'66:54:95',
'66:54:96',
'66:54:97',
'66:54:98',
'66:54:99',
'66:55:00',
'66:55:01',
'66:55:02',
'66:55:03',
'66:55:04',
'66:55:05',
'66:55:06',
'66:55:07',
'66:55:08',
'66:55:09',
'66:55:10',
'66:55:11',
'66:55:12',
'66:55:13',
'66:55:14',
'66:55:15',
'66:55:16',
'66:55:17',
'66:55:18',
'66:55:19',
'66:55:20',
'66:55:21',
'66:55:22',
'66:55:23',
'66:55:24',
'66:55:25',
'66:55:26',
'66:55:27',
'66:55:28',
'66:55:29',
'66:55:30',
'66:55:31',
'66:55:32',
'66:55:33',
'66:55:34',
'66:55:35',
'66:55:36',
'66:55:37',
'66:55:38',
'66:55:39',
'66:55:40',
'66:55:41',
'66:55:42',
'66:55:43',
'66:55:44',
'66:55:45',
'66:55:46',
'66:55:47',
'66:55:48',
'66:55:49',
'66:55:50',
'66:55:51',
'66:55:52',
'66:55:53',
'66:55:54',
'66:55:55',
'66:55:56',
'66:55:57',
'66:55:58',
'66:55:59',
'66:55:60',
'66:55:61',
'66:55:62',
'66:55:63',
'66:55:64',
'66:55:65',
'66:55:66',
'66:55:67',
'66:55:68',
'66:55:69',
'66:55:70',
'66:55:71',
'66:55:72',
'66:55:73',
'66:55:74',
'66:55:75',
'66:55:76',
'66:55:77',
'66:55:78',
'66:55:79',
'66:55:80',
'66:55:81',
'66:55:82',
'66:55:83',
'66:55:84',
'66:55:85',
'66:55:86',
'66:55:87',
'66:55:88',
'66:55:89',
'66:55:90',
'66:55:91',
'66:55:92',
'66:55:93',
'66:55:94',
'66:55:95',
'66:55:96',
'66:55:97',
'66:55:98',
'66:55:99',
'66:56:00',
'66:56:01',
'66:56:02',
'66:56:03',
'66:56:04',
'66:56:05',
'66:56:06',
'66:56:07',
'66:56:08',
'66:56:09',
'66:56:10',
'66:56:11',
'66:56:12',
'66:56:13',
'66:56:14',
'66:56:15',
'66:56:16',
'66:56:17',
'66:56:18',
'66:56:19',
'66:56:20',
'66:56:21',
'66:56:22',
'66:56:23',
'66:56:24',
'66:56:25',
'66:56:26',
'66:56:27',
'66:56:28',
'66:56:29',
'66:56:30',
'66:56:31',
'66:56:32',
'66:56:33',
'66:56:34',
'66:56:35',
'66:56:36',
'66:56:37',
'66:56:38',
'66:56:39',
'66:56:40',
'66:56:41',
'66:56:42',
'66:56:43',
'66:56:44',
'66:56:45',
'66:56:46',
'66:56:47',
'66:56:48',
'66:56:49',
'66:56:50',
'66:56:51',
'66:56:52',
'66:56:53',
'66:56:54',
'66:56:55',
'66:56:56',
'66:56:57',
'66:56:58',
'66:56:59',
'66:56:60',
'66:56:61',
'66:56:62',
'66:56:63',
'66:56:64',
'66:56:65',
'66:56:66',
'66:56:67',
'66:56:68',
'66:56:69',
'66:56:70',
'66:56:71',
'66:56:72',
'66:56:73',
'66:56:74',
'66:56:75',
'66:56:76',
'66:56:77',
'66:56:78',
'66:56:79',
'66:56:80',
'66:56:81',
'66:56:82',
'66:56:83',
'66:56:84',
'66:56:85',
'66:56:86',
'66:56:87',
'66:56:88',
'66:56:89',
'66:56:90',
'66:56:91',
'66:56:92',
'66:56:93',
'66:56:94',
'66:56:95',
'66:56:96',
'66:56:97',
'66:56:98',
'66:56:99',
'66:57:00',
'66:57:01',
'66:57:02',
'66:57:03',
'66:57:04',
'66:57:05',
'66:57:06',
'66:57:07',
'66:57:08',
'66:57:09',
'66:57:10',
'66:57:11',
'66:57:12',
'66:57:13',
'66:57:14',
'66:57:15',
'66:57:16',
'66:57:17',
'66:57:18',
'66:57:19',
'66:57:20',
'66:57:21',
'66:57:22',
'66:57:23',
'66:57:24',
'66:57:25',
'66:57:26',
'66:57:27',
'66:57:28',
'66:57:29',
'66:57:30',
'66:57:31',
'66:57:32',
'66:57:33',
'66:57:34',
'66:57:35',
'66:57:36',
'66:57:37',
'66:57:38',
'66:57:39',
'66:57:40',
'66:57:41',
'66:57:42',
'66:57:43',
'66:57:44',
'66:57:45',
'66:57:46',
'66:57:47',
'66:57:48',
'66:57:49',
'66:57:50',
'66:57:51',
'66:57:52',
'66:57:53',
'66:57:54',
'66:57:55',
'66:57:56',
'66:57:57',
'66:57:58',
'66:57:59',
'66:57:60',
'66:57:61',
'66:57:62',
'66:57:63',
'66:57:64',
'66:57:65',
'66:57:66',
'66:57:67',
'66:57:68',
'66:57:69',
'66:57:70',
'66:57:71',
'66:57:72',
'66:57:73',
'66:57:74',
'66:57:75',
'66:57:76',
'66:57:77',
'66:57:78',
'66:57:79',
'66:57:80',
'66:57:81',
'66:57:82',
'66:57:83',
'66:57:84',
'66:57:85',
'66:57:86',
'66:57:87',
'66:57:88',
'66:57:89',
'66:57:90',
'66:57:91',
'66:57:92',
'66:57:93',
'66:57:94',
'66:57:95',
'66:57:96',
'66:57:97',
'66:57:98',
'66:57:99',
'66:58:00',
'66:58:01',
'66:58:02',
'66:58:03',
'66:58:04',
'66:58:05',
'66:58:06',
'66:58:07',
'66:58:08',
'66:58:09',
'66:58:10',
'66:58:11',
'66:58:12',
'66:58:13',
'66:58:14',
'66:58:15',
'66:58:16',
'66:58:17',
'66:58:18',
'66:58:19',
'66:58:20',
'66:58:21',
'66:58:22',
'66:58:23',
'66:58:24',
'66:58:25',
'66:58:26',
'66:58:27',
'66:58:28',
'66:58:29',
'66:58:30',
'66:58:31',
'66:58:32',
'66:58:33',
'66:58:34',
'66:58:35',
'66:58:36',
'66:58:37',
'66:58:38',
'66:58:39',
'66:58:40',
'66:58:41',
'66:58:42',
'66:58:43',
'66:58:44',
'66:58:45',
'66:58:46',
'66:58:47',
'66:58:48',
'66:58:49',
'66:58:50',
'66:58:51',
'66:58:52',
'66:58:53',
'66:58:54',
'66:58:55',
'66:58:56',
'66:58:57',
'66:58:58',
'66:58:59',
'66:58:60',
'66:58:61',
'66:58:62',
'66:58:63',
'66:58:64',
'66:58:65',
'66:58:66',
'66:58:67',
'66:58:68',
'66:58:69',
'66:58:70',
'66:58:71',
'66:58:72',
'66:58:73',
'66:58:74',
'66:58:75',
'66:58:76',
'66:58:77',
'66:58:78',
'66:58:79',
'66:58:80',
'66:58:81',
'66:58:82',
'66:58:83',
'66:58:84',
'66:58:85',
'66:58:86',
'66:58:87',
'66:58:88',
'66:58:89',
'66:58:90',
'66:58:91',
'66:58:92',
'66:58:93',
'66:58:94',
'66:58:95',
'66:58:96',
'66:58:97',
'66:58:98',
'66:58:99',
'66:59:00',
'66:59:01',
'66:59:02',
'66:59:03',
'66:59:04',
'66:59:05',
'66:59:06',
'66:59:07',
'66:59:08',
'66:59:09',
'66:59:10',
'66:59:11',
'66:59:12',
'66:59:13',
'66:59:14',
'66:59:15',
'66:59:16',
'66:59:17',
'66:59:18',
'66:59:19',
'66:59:20',
'66:59:21',
'66:59:22',
'66:59:23',
'66:59:24',
'66:59:25',
'66:59:26',
'66:59:27',
'66:59:28',
'66:59:29',
'66:59:30',
'66:59:31',
'66:59:32',
'66:59:33',
'66:59:34',
'66:59:35',
'66:59:36',
'66:59:37',
'66:59:38',
'66:59:39',
'66:59:40',
'66:59:41',
'66:59:42',
'66:59:43',
'66:59:44',
'66:59:45',
'66:59:46',
'66:59:47',
'66:59:48',
'66:59:49',
'66:59:50',
'66:59:51',
'66:59:52',
'66:59:53',
'66:59:54',
'66:59:55',
'66:59:56',
'66:59:57',
'66:59:58',
'66:59:59',
'66:59:60',
'66:59:61',
'66:59:62',
'66:59:63',
'66:59:64',
'66:59:65',
'66:59:66',
'66:59:67',
'66:59:68',
'66:59:69',
'66:59:70',
'66:59:71',
'66:59:72',
'66:59:73',
'66:59:74',
'66:59:75',
'66:59:76',
'66:59:77',
'66:59:78',
'66:59:79',
'66:59:80',
'66:59:81',
'66:59:82',
'66:59:83',
'66:59:84',
'66:59:85',
'66:59:86',
'66:59:87',
'66:59:88',
'66:59:89',
'66:59:90',
'66:59:91',
'66:59:92',
'66:59:93',
'66:59:94',
'66:59:95',
'66:59:96',
'66:59:97',
'66:59:98',
'66:59:99',
'66:60:00',
'66:60:01',
'66:60:02',
'66:60:03',
'66:60:04',
'66:60:05',
'66:60:06',
'66:60:07',
'66:60:08',
'66:60:09',
'66:60:10',
'66:60:11',
'66:60:12',
'66:60:13',
'66:60:14',
'66:60:15',
'66:60:16',
'66:60:17',
'66:60:18',
'66:60:19',
'66:60:20',
'66:60:21',
'66:60:22',
'66:60:23',
'66:60:24',
'66:60:25',
'66:60:26',
'66:60:27',
'66:60:28',
'66:60:29',
'66:60:30',
'66:60:31',
'66:60:32',
'66:60:33',
'66:60:34',
'66:60:35',
'66:60:36',
'66:60:37',
'66:60:38',
'66:60:39',
'66:60:40',
'66:60:41',
'66:60:42',
'66:60:43',
'66:60:44',
'66:60:45',
'66:60:46',
'66:60:47',
'66:60:48',
'66:60:49',
'66:60:50',
'66:60:51',
'66:60:52',
'66:60:53',
'66:60:54',
'66:60:55',
'66:60:56',
'66:60:57',
'66:60:58',
'66:60:59',
'66:60:60',
'66:60:61',
'66:60:62',
'66:60:63',
'66:60:64',
'66:60:65',
'66:60:66',
'66:60:67',
'66:60:68',
'66:60:69',
'66:60:70',
'66:60:71',
'66:60:72',
'66:60:73',
'66:60:74',
'66:60:75',
'66:60:76',
'66:60:77',
'66:60:78',
'66:60:79',
'66:60:80',
'66:60:81',
'66:60:82',
'66:60:83',
'66:60:84',
'66:60:85',
'66:60:86',
'66:60:87',
'66:60:88',
'66:60:89',
'66:60:90',
'66:60:91',
'66:60:92',
'66:60:93',
'66:60:94',
'66:60:95',
'66:60:96',
'66:60:97',
'66:60:98',
'66:60:99',
'66:61:00',
'66:61:01',
'66:61:02',
'66:61:03',
'66:61:04',
'66:61:05',
'66:61:06',
'66:61:07',
'66:61:08',
'66:61:09',
'66:61:10',
'66:61:11',
'66:61:12',
'66:61:13',
'66:61:14',
'66:61:15',
'66:61:16',
'66:61:17',
'66:61:18',
'66:61:19',
'66:61:20',
'66:61:21',
'66:61:22',
'66:61:23',
'66:61:24',
'66:61:25',
'66:61:26',
'66:61:27',
'66:61:28',
'66:61:29',
'66:61:30',
'66:61:31',
'66:61:32',
'66:61:33',
'66:61:34',
'66:61:35',
'66:61:36',
'66:61:37',
'66:61:38',
'66:61:39',
'66:61:40',
'66:61:41',
'66:61:42',
'66:61:43',
'66:61:44',
'66:61:45',
'66:61:46',
'66:61:47',
'66:61:48',
'66:61:49',
'66:61:50',
'66:61:51',
'66:61:52',
'66:61:53',
'66:61:54',
'66:61:55',
'66:61:56',
'66:61:57',
'66:61:58',
'66:61:59',
'66:61:60',
'66:61:61',
'66:61:62',
'66:61:63',
'66:61:64',
'66:61:65',
'66:61:66',
'66:61:67',
'66:61:68',
'66:61:69',
'66:61:70',
'66:61:71',
'66:61:72',
'66:61:73',
'66:61:74',
'66:61:75',
'66:61:76',
'66:61:77',
'66:61:78',
'66:61:79',
'66:61:80',
'66:61:81',
'66:61:82',
'66:61:83',
'66:61:84',
'66:61:85',
'66:61:86',
'66:61:87',
'66:61:88',
'66:61:89',
'66:61:90',
'66:61:91',
'66:61:92',
'66:61:93',
'66:61:94',
'66:61:95',
'66:61:96',
'66:61:97',
'66:61:98',
'66:61:99',
'66:62:00',
'66:62:01',
'66:62:02',
'66:62:03',
'66:62:04',
'66:62:05',
'66:62:06',
'66:62:07',
'66:62:08',
'66:62:09',
'66:62:10',
'66:62:11',
'66:62:12',
'66:62:13',
'66:62:14',
'66:62:15',
'66:62:16',
'66:62:17',
'66:62:18',
'66:62:19',
'66:62:20',
'66:62:21',
'66:62:22',
'66:62:23',
'66:62:24',
'66:62:25',
'66:62:26',
'66:62:27',
'66:62:28',
'66:62:29',
'66:62:30',
'66:62:31',
'66:62:32',
'66:62:33',
'66:62:34',
'66:62:35',
'66:62:36',
'66:62:37',
'66:62:38',
'66:62:39',
'66:62:40',
'66:62:41',
'66:62:42',
'66:62:43',
'66:62:44',
'66:62:45',
'66:62:46',
'66:62:47',
'66:62:48',
'66:62:49',
'66:62:50',
'66:62:51',
'66:62:52',
'66:62:53',
'66:62:54',
'66:62:55',
'66:62:56',
'66:62:57',
'66:62:58',
'66:62:59',
'66:62:60',
'66:62:61',
'66:62:62',
'66:62:63',
'66:62:64',
'66:62:65',
'66:62:66',
'66:62:67',
'66:62:68',
'66:62:69',
'66:62:70',
'66:62:71',
'66:62:72',
'66:62:73',
'66:62:74',
'66:62:75',
'66:62:76',
'66:62:77',
'66:62:78',
'66:62:79',
'66:62:80',
'66:62:81',
'66:62:82',
'66:62:83',
'66:62:84',
'66:62:85',
'66:62:86',
'66:62:87',
'66:62:88',
'66:62:89',
'66:62:90',
'66:62:91',
'66:62:92',
'66:62:93',
'66:62:94',
'66:62:95',
'66:62:96',
'66:62:97',
'66:62:98',
'66:62:99',
'66:63:00',
'66:63:01',
'66:63:02',
'66:63:03',
'66:63:04',
'66:63:05',
'66:63:06',
'66:63:07',
'66:63:08',
'66:63:09',
'66:63:10',
'66:63:11',
'66:63:12',
'66:63:13',
'66:63:14',
'66:63:15',
'66:63:16',
'66:63:17',
'66:63:18',
'66:63:19',
'66:63:20',
'66:63:21',
'66:63:22',
'66:63:23',
'66:63:24',
'66:63:25',
'66:63:26',
'66:63:27',
'66:63:28',
'66:63:29',
'66:63:30',
'66:63:31',
'66:63:32',
'66:63:33',
'66:63:34',
'66:63:35',
'66:63:36',
'66:63:37',
'66:63:38',
'66:63:39',
'66:63:40',
'66:63:41',
'66:63:42',
'66:63:43',
'66:63:44',
'66:63:45',
'66:63:46',
'66:63:47',
'66:63:48',
'66:63:49',
'66:63:50',
'66:63:51',
'66:63:52',
'66:63:53',
'66:63:54',
'66:63:55',
'66:63:56',
'66:63:57',
'66:63:58',
'66:63:59',
'66:63:60',
'66:63:61',
'66:63:62',
'66:63:63',
'66:63:64',
'66:63:65',
'66:63:66',
'66:63:67',
'66:63:68',
'66:63:69',
'66:63:70',
'66:63:71',
'66:63:72',
'66:63:73',
'66:63:74',
'66:63:75',
'66:63:76',
'66:63:77',
'66:63:78',
'66:63:79',
'66:63:80',
'66:63:81',
'66:63:82',
'66:63:83',
'66:63:84',
'66:63:85',
'66:63:86',
'66:63:87',
'66:63:88',
'66:63:89',
'66:63:90',
'66:63:91',
'66:63:92',
'66:63:93',
'66:63:94',
'66:63:95',
'66:63:96',
'66:63:97',
'66:63:98',
'66:63:99',
'66:64:00',
'66:64:01',
'66:64:02',
'66:64:03',
'66:64:04',
'66:64:05',
'66:64:06',
'66:64:07',
'66:64:08',
'66:64:09',
'66:64:10',
'66:64:11',
'66:64:12',
'66:64:13',
'66:64:14',
'66:64:15',
'66:64:16',
'66:64:17',
'66:64:18',
'66:64:19',
'66:64:20',
'66:64:21',
'66:64:22',
'66:64:23',
'66:64:24',
'66:64:25',
'66:64:26',
'66:64:27',
'66:64:28',
'66:64:29',
'66:64:30',
'66:64:31',
'66:64:32',
'66:64:33',
'66:64:34',
'66:64:35',
'66:64:36',
'66:64:37',
'66:64:38',
'66:64:39',
'66:64:40',
'66:64:41',
'66:64:42',
'66:64:43',
'66:64:44',
'66:64:45',
'66:64:46',
'66:64:47',
'66:64:48',
'66:64:49',
'66:64:50',
'66:64:51',
'66:64:52',
'66:64:53',
'66:64:54',
'66:64:55',
'66:64:56',
'66:64:57',
'66:64:58',
'66:64:59',
'66:64:60',
'66:64:61',
'66:64:62',
'66:64:63',
'66:64:64',
'66:64:65',
'66:64:66',
'66:64:67',
'66:64:68',
'66:64:69',
'66:64:70',
'66:64:71',
'66:64:72',
'66:64:73',
'66:64:74',
'66:64:75',
'66:64:76',
'66:64:77',
'66:64:78',
'66:64:79',
'66:64:80',
'66:64:81',
'66:64:82',
'66:64:83',
'66:64:84',
'66:64:85',
'66:64:86',
'66:64:87',
'66:64:88',
'66:64:89',
'66:64:90',
'66:64:91',
'66:64:92',
'66:64:93',
'66:64:94',
'66:64:95',
'66:64:96',
'66:64:97',
'66:64:98',
'66:64:99',
'66:65:00',
'66:65:01',
'66:65:02',
'66:65:03',
'66:65:04',
'66:65:05',
'66:65:06',
'66:65:07',
'66:65:08',
'66:65:09',
'66:65:10',
'66:65:11',
'66:65:12',
'66:65:13',
'66:65:14',
'66:65:15',
'66:65:16',
'66:65:17',
'66:65:18',
'66:65:19',
'66:65:20',
'66:65:21',
'66:65:22',
'66:65:23',
'66:65:24',
'66:65:25',
'66:65:26',
'66:65:27',
'66:65:28',
'66:65:29',
'66:65:30',
'66:65:31',
'66:65:32',
'66:65:33',
'66:65:34',
'66:65:35',
'66:65:36',
'66:65:37',
'66:65:38',
'66:65:39',
'66:65:40',
'66:65:41',
'66:65:42',
'66:65:43',
'66:65:44',
'66:65:45',
'66:65:46',
'66:65:47',
'66:65:48',
'66:65:49',
'66:65:50',
'66:65:51',
'66:65:52',
'66:65:53',
'66:65:54',
'66:65:55',
'66:65:56',
'66:65:57',
'66:65:58',
'66:65:59',
'66:65:60',
'66:65:61',
'66:65:62',
'66:65:63',
'66:65:64',
'66:65:65',
'66:65:66',
'66:65:67',
'66:65:68',
'66:65:69',
'66:65:70',
'66:65:71',
'66:65:72',
'66:65:73',
'66:65:74',
'66:65:75',
'66:65:76',
'66:65:77',
'66:65:78',
'66:65:79',
'66:65:80',
'66:65:81',
'66:65:82',
'66:65:83',
'66:65:84',
'66:65:85',
'66:65:86',
'66:65:87',
'66:65:88',
'66:65:89',
'66:65:90',
'66:65:91',
'66:65:92',
'66:65:93',
'66:65:94',
'66:65:95',
'66:65:96',
'66:65:97',
'66:65:98',
'66:65:99',
'66:66:00',
'66:66:01',
'66:66:02',
'66:66:03',
'66:66:04',
'66:66:05',
'66:66:06',
'66:66:07',
'66:66:08',
'66:66:09',
'66:66:10',
'66:66:11',
'66:66:12',
'66:66:13',
'66:66:14',
'66:66:15',
'66:66:16',
'66:66:17',
'66:66:18',
'66:66:19',
'66:66:20',
'66:66:21',
'66:66:22',
'66:66:23',
'66:66:24',
'66:66:25',
'66:66:26',
'66:66:27',
'66:66:28',
'66:66:29',
'66:66:30',
'66:66:31',
'66:66:32',
'66:66:33',
'66:66:34',
'66:66:35',
'66:66:36',
'66:66:37',
'66:66:38',
'66:66:39',
'66:66:40',
'66:66:41',
'66:66:42',
'66:66:43',
'66:66:44',
'66:66:45',
'66:66:46',
'66:66:47',
'66:66:48',
'66:66:49',
'66:66:50',
'66:66:51',
'66:66:52',
'66:66:53',
'66:66:54',
'66:66:55',
'66:66:56',
'66:66:57',
'66:66:58',
'66:66:59',
'66:66:60',
'66:66:61',
'66:66:62',
'66:66:63',
'66:66:64',
'66:66:65',
'66:66:66',
'66:66:67',
'66:66:68',
'66:66:69',
'66:66:70',
'66:66:71',
'66:66:72',
'66:66:73',
'66:66:74',
'66:66:75',
'66:66:76',
'66:66:77',
'66:66:78',
'66:66:79',
'66:66:80',
'66:66:81',
'66:66:82',
'66:66:83',
'66:66:84',
'66:66:85',
'66:66:86',
'66:66:87',
'66:66:88',
'66:66:89',
'66:66:90',
'66:66:91',
'66:66:92',
'66:66:93',
'66:66:94',
'66:66:95',
'66:66:96',
'66:66:97',
'66:66:98',
'66:66:99',
'66:67:00',
'66:67:01',
'66:67:02',
'66:67:03',
'66:67:04',
'66:67:05',
'66:67:06',
'66:67:07',
'66:67:08',
'66:67:09',
'66:67:10',
'66:67:11',
'66:67:12',
'66:67:13',
'66:67:14',
'66:67:15',
'66:67:16',
'66:67:17',
'66:67:18',
'66:67:19',
'66:67:20',
'66:67:21',
'66:67:22',
'66:67:23',
'66:67:24',
'66:67:25',
'66:67:26',
'66:67:27',
'66:67:28',
'66:67:29',
'66:67:30',
'66:67:31',
'66:67:32',
'66:67:33',
'66:67:34',
'66:67:35',
'66:67:36',
'66:67:37',
'66:67:38',
'66:67:39',
'66:67:40',
'66:67:41',
'66:67:42',
'66:67:43',
'66:67:44',
'66:67:45',
'66:67:46',
'66:67:47',
'66:67:48',
'66:67:49',
'66:67:50',
'66:67:51',
'66:67:52',
'66:67:53',
'66:67:54',
'66:67:55',
'66:67:56',
'66:67:57',
'66:67:58',
'66:67:59',
'66:67:60',
'66:67:61',
'66:67:62',
'66:67:63',
'66:67:64',
'66:67:65',
'66:67:66',
'66:67:67',
'66:67:68',
'66:67:69',
'66:67:70',
'66:67:71',
'66:67:72',
'66:67:73',
'66:67:74',
'66:67:75',
'66:67:76',
'66:67:77',
'66:67:78',
'66:67:79',
'66:67:80',
'66:67:81',
'66:67:82',
'66:67:83',
'66:67:84',
'66:67:85',
'66:67:86',
'66:67:87',
'66:67:88',
'66:67:89',
'66:67:90',
'66:67:91',
'66:67:92',
'66:67:93',
'66:67:94',
'66:67:95',
'66:67:96',
'66:67:97',
'66:67:98',
'66:67:99',
'66:68:00',
'66:68:01',
'66:68:02',
'66:68:03',
'66:68:04',
'66:68:05',
'66:68:06',
'66:68:07',
'66:68:08',
'66:68:09',
'66:68:10',
'66:68:11',
'66:68:12',
'66:68:13',
'66:68:14',
'66:68:15',
'66:68:16',
'66:68:17',
'66:68:18',
'66:68:19',
'66:68:20',
'66:68:21',
'66:68:22',
'66:68:23',
'66:68:24',
'66:68:25',
'66:68:26',
'66:68:27',
'66:68:28',
'66:68:29',
'66:68:30',
'66:68:31',
'66:68:32',
'66:68:33',
'66:68:34',
'66:68:35',
'66:68:36',
'66:68:37',
'66:68:38',
'66:68:39',
'66:68:40',
'66:68:41',
'66:68:42',
'66:68:43',
'66:68:44',
'66:68:45',
'66:68:46',
'66:68:47',
'66:68:48',
'66:68:49',
'66:68:50',
'66:68:51',
'66:68:52',
'66:68:53',
'66:68:54',
'66:68:55',
'66:68:56',
'66:68:57',
'66:68:58',
'66:68:59',
'66:68:60',
'66:68:61',
'66:68:62',
'66:68:63',
'66:68:64',
'66:68:65',
'66:68:66',
'66:68:67',
'66:68:68',
'66:68:69',
'66:68:70',
'66:68:71',
'66:68:72',
'66:68:73',
'66:68:74',
'66:68:75',
'66:68:76',
'66:68:77',
'66:68:78',
'66:68:79',
'66:68:80',
'66:68:81',
'66:68:82',
'66:68:83',
'66:68:84',
'66:68:85',
'66:68:86',
'66:68:87',
'66:68:88',
'66:68:89',
'66:68:90',
'66:68:91',
'66:68:92',
'66:68:93',
'66:68:94',
'66:68:95',
'66:68:96',
'66:68:97',
'66:68:98',
'66:68:99',
'66:69:00',
'66:69:01',
'66:69:02',
'66:69:03',
'66:69:04',
'66:69:05',
'66:69:06',
'66:69:07',
'66:69:08',
'66:69:09',
'66:69:10',
'66:69:11',
'66:69:12',
'66:69:13',
'66:69:14',
'66:69:15',
'66:69:16',
'66:69:17',
'66:69:18',
'66:69:19',
'66:69:20',
'66:69:21',
'66:69:22',
'66:69:23',
'66:69:24',
'66:69:25',
'66:69:26',
'66:69:27',
'66:69:28',
'66:69:29',
'66:69:30',
'66:69:31',
'66:69:32',
'66:69:33',
'66:69:34',
'66:69:35',
'66:69:36',
'66:69:37',
'66:69:38',
'66:69:39',
'66:69:40',
'66:69:41',
'66:69:42',
'66:69:43',
'66:69:44',
'66:69:45',
'66:69:46',
'66:69:47',
'66:69:48',
'66:69:49',
'66:69:50',
'66:69:51',
'66:69:52',
'66:69:53',
'66:69:54',
'66:69:55',
'66:69:56',
'66:69:57',
'66:69:58',
'66:69:59',
'66:69:60',
'66:69:61',
'66:69:62',
'66:69:63',
'66:69:64',
'66:69:65',
'66:69:66',
'66:69:67',
'66:69:68',
'66:69:69',
'66:69:70',
'66:69:71',
'66:69:72',
'66:69:73',
'66:69:74',
'66:69:75',
'66:69:76',
'66:69:77',
'66:69:78',
'66:69:79',
'66:69:80',
'66:69:81',
'66:69:82',
'66:69:83',
'66:69:84',
'66:69:85',
'66:69:86',
'66:69:87',
'66:69:88',
'66:69:89',
'66:69:90',
'66:69:91',
'66:69:92',
'66:69:93',
'66:69:94',
'66:69:95',
'66:69:96',
'66:69:97',
'66:69:98',
'66:69:99',
'66:70:00',
'66:70:01',
'66:70:02',
'66:70:03',
'66:70:04',
'66:70:05',
'66:70:06',
'66:70:07',
'66:70:08',
'66:70:09',
'66:70:10',
'66:70:11',
'66:70:12',
'66:70:13',
'66:70:14',
'66:70:15',
'66:70:16',
'66:70:17',
'66:70:18',
'66:70:19',
'66:70:20',
'66:70:21',
'66:70:22',
'66:70:23',
'66:70:24',
'66:70:25',
'66:70:26',
'66:70:27',
'66:70:28',
'66:70:29',
'66:70:30',
'66:70:31',
'66:70:32',
'66:70:33',
'66:70:34',
'66:70:35',
'66:70:36',
'66:70:37',
'66:70:38',
'66:70:39',
'66:70:40',
'66:70:41',
'66:70:42',
'66:70:43',
'66:70:44',
'66:70:45',
'66:70:46',
'66:70:47',
'66:70:48',
'66:70:49',
'66:70:50',
'66:70:51',
'66:70:52',
'66:70:53',
'66:70:54',
'66:70:55',
'66:70:56',
'66:70:57',
'66:70:58',
'66:70:59',
'66:70:60',
'66:70:61',
'66:70:62',
'66:70:63',
'66:70:64',
'66:70:65',
'66:70:66',
'66:70:67',
'66:70:68',
'66:70:69',
'66:70:70',
'66:70:71',
'66:70:72',
'66:70:73',
'66:70:74',
'66:70:75',
'66:70:76',
'66:70:77',
'66:70:78',
'66:70:79',
'66:70:80',
'66:70:81',
'66:70:82',
'66:70:83',
'66:70:84',
'66:70:85',
'66:70:86',
'66:70:87',
'66:70:88',
'66:70:89',
'66:70:90',
'66:70:91',
'66:70:92',
'66:70:93',
'66:70:94',
'66:70:95',
'66:70:96',
'66:70:97',
'66:70:98',
'66:70:99',
'66:71:00',
'66:71:01',
'66:71:02',
'66:71:03',
'66:71:04',
'66:71:05',
'66:71:06',
'66:71:07',
'66:71:08',
'66:71:09',
'66:71:10',
'66:71:11',
'66:71:12',
'66:71:13',
'66:71:14',
'66:71:15',
'66:71:16',
'66:71:17',
'66:71:18',
'66:71:19',
'66:71:20',
'66:71:21',
'66:71:22',
'66:71:23',
'66:71:24',
'66:71:25',
'66:71:26',
'66:71:27',
'66:71:28',
'66:71:29',
'66:71:30',
'66:71:31',
'66:71:32',
'66:71:33',
'66:71:34',
'66:71:35',
'66:71:36',
'66:71:37',
'66:71:38',
'66:71:39',
'66:71:40',
'66:71:41',
'66:71:42',
'66:71:43',
'66:71:44',
'66:71:45',
'66:71:46',
'66:71:47',
'66:71:48',
'66:71:49',
'66:71:50',
'66:71:51',
'66:71:52',
'66:71:53',
'66:71:54',
'66:71:55',
'66:71:56',
'66:71:57',
'66:71:58',
'66:71:59',
'66:71:60',
'66:71:61',
'66:71:62',
'66:71:63',
'66:71:64',
'66:71:65',
'66:71:66',
'66:71:67',
'66:71:68',
'66:71:69',
'66:71:70',
'66:71:71',
'66:71:72',
'66:71:73',
'66:71:74',
'66:71:75',
'66:71:76',
'66:71:77',
'66:71:78',
'66:71:79',
'66:71:80',
'66:71:81',
'66:71:82',
'66:71:83',
'66:71:84',
'66:71:85',
'66:71:86',
'66:71:87',
'66:71:88',
'66:71:89',
'66:71:90',
'66:71:91',
'66:71:92',
'66:71:93',
'66:71:94',
'66:71:95',
'66:71:96',
'66:71:97',
'66:71:98',
'66:71:99',
'66:72:00',
'66:72:01',
'66:72:02',
'66:72:03',
'66:72:04',
'66:72:05',
'66:72:06',
'66:72:07',
'66:72:08',
'66:72:09',
'66:72:10',
'66:72:11',
'66:72:12',
'66:72:13',
'66:72:14',
'66:72:15',
'66:72:16',
'66:72:17',
'66:72:18',
'66:72:19',
'66:72:20',
'66:72:21',
'66:72:22',
'66:72:23',
'66:72:24',
'66:72:25',
'66:72:26',
'66:72:27',
'66:72:28',
'66:72:29',
'66:72:30',
'66:72:31',
'66:72:32',
'66:72:33',
'66:72:34',
'66:72:35',
'66:72:36',
'66:72:37',
'66:72:38',
'66:72:39',
'66:72:40',
'66:72:41',
'66:72:42',
'66:72:43',
'66:72:44',
'66:72:45',
'66:72:46',
'66:72:47',
'66:72:48',
'66:72:49',
'66:72:50',
'66:72:51',
'66:72:52',
'66:72:53',
'66:72:54',
'66:72:55',
'66:72:56',
'66:72:57',
'66:72:58',
'66:72:59',
'66:72:60',
'66:72:61',
'66:72:62',
'66:72:63',
'66:72:64',
'66:72:65',
'66:72:66',
'66:72:67',
'66:72:68',
'66:72:69',
'66:72:70',
'66:72:71',
'66:72:72',
'66:72:73',
'66:72:74',
'66:72:75',
'66:72:76',
'66:72:77',
'66:72:78',
'66:72:79',
'66:72:80',
'66:72:81',
'66:72:82',
'66:72:83',
'66:72:84',
'66:72:85',
'66:72:86',
'66:72:87',
'66:72:88',
'66:72:89',
'66:72:90',
'66:72:91',
'66:72:92',
'66:72:93',
'66:72:94',
'66:72:95',
'66:72:96',
'66:72:97',
'66:72:98',
'66:72:99',
'66:73:00',
'66:73:01',
'66:73:02',
'66:73:03',
'66:73:04',
'66:73:05',
'66:73:06',
'66:73:07',
'66:73:08',
'66:73:09',
'66:73:10',
'66:73:11',
'66:73:12',
'66:73:13',
'66:73:14',
'66:73:15',
'66:73:16',
'66:73:17',
'66:73:18',
'66:73:19',
'66:73:20',
'66:73:21',
'66:73:22',
'66:73:23',
'66:73:24',
'66:73:25',
'66:73:26',
'66:73:27',
'66:73:28',
'66:73:29',
'66:73:30',
'66:73:31',
'66:73:32',
'66:73:33',
'66:73:34',
'66:73:35',
'66:73:36',
'66:73:37',
'66:73:38',
'66:73:39',
'66:73:40',
'66:73:41',
'66:73:42',
'66:73:43',
'66:73:44',
'66:73:45',
'66:73:46',
'66:73:47',
'66:73:48',
'66:73:49',
'66:73:50',
'66:73:51',
'66:73:52',
'66:73:53',
'66:73:54',
'66:73:55',
'66:73:56',
'66:73:57',
'66:73:58',
'66:73:59',
'66:73:60',
'66:73:61',
'66:73:62',
'66:73:63',
'66:73:64',
'66:73:65',
'66:73:66',
'66:73:67',
'66:73:68',
'66:73:69',
'66:73:70',
'66:73:71',
'66:73:72',
'66:73:73',
'66:73:74',
'66:73:75',
'66:73:76',
'66:73:77',
'66:73:78',
'66:73:79',
'66:73:80',
'66:73:81',
'66:73:82',
'66:73:83',
'66:73:84',
'66:73:85',
'66:73:86',
'66:73:87',
'66:73:88',
'66:73:89',
'66:73:90',
'66:73:91',
'66:73:92',
'66:73:93',
'66:73:94',
'66:73:95',
'66:73:96',
'66:73:97',
'66:73:98',
'66:73:99'
] |
""" Utilities for generating and retrieving image thumbnails """
from hashlib import md5
from io import BytesIO, IOBase
from logging import getLogger
from os import makedirs, scandir
from os.path import dirname, isfile, join, normpath, getsize, splitext
from shutil import copyfileobj, rmtree
from typing import BinaryIO, Optional, Tuple, Union
import requests
from PIL import Image
from PIL.ImageOps import exif_transpose, flip
from tesseractXplore.constants import (
EXIF_ORIENTATION_ID,
THUMBNAILS_DIR,
THUMBNAIL_SIZE_DEFAULT,
THUMBNAIL_SIZES,
THUMBNAIL_DEFAULT_FORMAT,
)
from tesseractXplore.validation import format_file_size
logger = getLogger().getChild(__name__)
def get_thumbnail(source: str, **kwargs) -> str:
"""
Get a cached thumbnail for an image, if one already exists; otherwise, generate a new one.
See :py:func:`.generate_thumbnail` for size options.
Args:
source: File path or URI for image source
Returns:
Path to thumbnail image
"""
thumbnail_path = get_thumbnail_path(source)
if isfile(thumbnail_path):
return thumbnail_path
else:
return generate_thumbnail(source, thumbnail_path, **kwargs)
def get_thumbnail_if_exists(source: str) -> Optional[str]:
"""
Get a cached thumbnail for an image, if one already exists, but if not, don't generate a new one
Args:
source: File path or URI for image source
Returns:
The path of the new thumbnail, if found; otherwise ``None``
"""
if not source:
return None
thumbnail_path = get_thumbnail_path(source)
if isfile(thumbnail_path):
logger.debug(f'Found existing thumbnail for {source}')
return thumbnail_path
elif normpath(dirname(source)) == normpath(THUMBNAILS_DIR) or source.startswith('atlas://'):
logger.debug(f'Image is already a thumbnail: {source}')
return source
else:
return None
def get_thumbnail_hash(source: str) -> str:
""" Get a unique string based on the source to use as a filename or atlas resource ID """
if not isinstance(source, bytes):
source = source.encode()
return md5(source).hexdigest()
def get_thumbnail_size(size: str) -> Tuple[int, int]:
""" Get one of the predefined thumbnail dimensions from a size string
Args:
size: One of: 'small', 'medium', 'large'
Returns:
X and Y dimensions of thumbnail size
"""
return THUMBNAIL_SIZES.get(size, THUMBNAIL_SIZE_DEFAULT)
def get_thumbnail_path(source: str) -> str:
"""
Determine the thumbnail filename based on a hash of the original file path
Args:
source: File path or URI for image source
"""
makedirs(THUMBNAILS_DIR, exist_ok=True)
thumbnail_hash = get_thumbnail_hash(source)
ext = get_format(source)
return join(THUMBNAILS_DIR, f'{thumbnail_hash}.{ext}')
def get_format(source: str) -> str:
"""
Account for various edge cases when getting an image format based on a file extension
Args:
source: File path or URI for image source
Returns:
Format, if found; otherwise, defaults to ``jpg``
"""
if isinstance(source, bytes):
source = source.decode('utf-8')
# Strip off request params if path is a URL
source = source.split('?')[0]
ext = splitext(source)[-1] or THUMBNAIL_DEFAULT_FORMAT
# Note: PIL only accepts 'jpeg' (not 'jpg'), and Kivy is the opposite
return ext.lower().replace('.', '').replace('jpeg', 'jpg') or 'jpg'
def generate_thumbnail_from_url(url: str, size: str):
""" Like :py:func:`.generate_thumbnail`, but downloads an image from a URL """
logger.info(f'Downloading: {url}')
r = requests.get(url, stream=True)
if r.status_code == 200:
image_bytes = BytesIO()
r.raw.decode_content = True
copyfileobj(r.raw, image_bytes)
generate_thumbnail_from_bytes(image_bytes, url, size=size, default_flip=False)
else:
logger.info(f'Request failed: {str(r)}')
def generate_thumbnail_from_bytes(image_bytes, source: str, **kwargs):
""" Like :py:func:`.generate_thumbnail`, but takes raw image bytes instead of a path """
image_bytes.seek(0)
fmt = get_format(source)
thumbnail_path = get_thumbnail_path(source)
if len(image_bytes.getvalue()) > 0:
return generate_thumbnail(image_bytes, thumbnail_path, fmt=fmt, **kwargs)
else:
logger.error(f'Failed to save image bytes to thumbnail for {source}')
return None
def generate_thumbnail(
source: Union[BinaryIO, str],
thumbnail_path: str,
fmt: str = None,
size: str = 'medium',
default_flip: bool = True,
):
"""
Generate and store a thumbnail from the source image
Args:
source (str): File path or URI for image source
thumbnail_path (str): Destination path for thumbnail
fmt (str): Image format to specify to PIL, if it can't be auto-detected
size (str): One of: 'small', 'medium', 'large'
Returns:
str: The path of the new thumbnail
"""
target_size = get_thumbnail_size(size)
logger.info(f'Generating {target_size} thumbnail for {source}:\n {thumbnail_path}')
# Resize if necessary, or just copy the image to the cache if it's already thumbnail size
try:
image = get_orientated_image(source, default_flip=default_flip)
if image.size[0] > target_size[0] or image.size[1] > target_size[1]:
image.thumbnail(target_size)
else:
logger.debug(f'Image is already thumbnail size: ({image.size})')
image.save(thumbnail_path, format=fmt.replace('jpg', 'jpeg') if fmt else None)
return thumbnail_path
# If we're unable to generate a thumbnail, just return the original image source
except RuntimeError:
logger.exception('Failed to generate thumbnail')
return source
def get_orientated_image(source, default_flip: bool = True) -> Image:
"""
Load and rotate/transpose image according to EXIF orientation, if any. If missing orientation
and the image was fetched from iNat, it will be vertically mirrored. (?)
"""
image = Image.open(source)
exif = image.getexif()
if exif.get(EXIF_ORIENTATION_ID):
image = exif_transpose(image)
# TODO: In the future there may be more cases than just local images and remote images from iNat
elif default_flip and isinstance(source, IOBase):
image = flip(image)
return image
def delete_thumbnails():
"""Delete call cached thumbnails"""
rmtree(THUMBNAILS_DIR)
makedirs(THUMBNAILS_DIR)
def get_thumbnail_cache_size() -> Tuple[int, str]:
"""Get the current size of the thumbnail cache, in number of files and human-readable
total file size
"""
makedirs(THUMBNAILS_DIR, exist_ok=True)
files = [f for f in scandir(THUMBNAILS_DIR) if isfile(f)]
file_size = sum(getsize(f) for f in files)
return len(files), format_file_size(file_size)
def flip_all(path: str):
""" Vertically flip all images in a directory. Mainly for debugging purposes. """
from tesseractXplore.image_glob import get_images_from_dir
for source in get_images_from_dir(path):
image = Image.open(source)
image = flip(image)
image.save(source)
image.close()
def to_monochrome(source, fmt):
""" Convert an image to monochrome """
img = Image.open(source)
img.convert(mode='1')
img.save(source, format=fmt.replace('jpg', 'jpeg') if fmt else None)
return source
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
assemble.py
This module finds and forms essential structure components, which are the
smallest building blocks that form every repeat in the song.
These functions ensure that each time step of a song is contained in at most
one of the song's essential structure components by checking that there are no
overlapping repeats in time. When repeats overlap, they undergo a process
where they are divided until there are only non-overlapping pieces left.
The module contains the following functions:
* breakup_overlaps_by_intersect
Extracts repeats in input_pattern_obj that has the starting indices
of the repeats, into the essential structure components using bw_vec,
that has the lengths of each repeat.
* check_overlaps
Compares every pair of groups, determining if there are any repeats
in any pairs of the groups that overlap.
* __compare_and_cut
Compares two rows of repeats labeled RED and BLUE, and determines if
there are any overlaps in time between them. If there are overlaps,
we cut the repeats in RED and BLUE into up to 3 pieces.
* __num_of_parts
Determines the number of blocks of consecutive time steps in a list
of time steps. A block of consecutive time steps represents a
distilled section of a repeat.
* __inds_to_rows
Expands a vector containing the starting indices of a piece or two
of a repeat into a matrix representation recording when these pieces
occur in the song with 1's. All remaining entries are marked with
0's.
* __merge_based_on_length
Merges repeats that are the same length, as set by full_bandwidth,
and are repeats of the same piece of structure.
* __merge_rows
Merges rows that have at least one common repeat. These common
repeat(s) must occur at the same time step and be of a common length.
* hierarchical_structure
Distills the repeats encoded in matrix_no_overlaps (and key_no_overlaps)
to the essential structure components and then builds the hierarchical
representation. Optionally outputs visualizations of the hierarchical
representations.
"""
import numpy as np
from inspect import signature
from .search import find_all_repeats, find_complete_list_anno_only
from .utilities import reconstruct_full_block, get_annotation_lst, get_y_labels
from .transform import remove_overlaps
import matplotlib.pyplot as plt
import matplotlib.ticker as plticker
def breakup_overlaps_by_intersect(input_pattern_obj, bw_vec, thresh_bw):
"""
Extracts repeats in input_pattern_obj that has the starting indices of the
repeats, into the essential structure components using bw_vec, that has the
lengths of each repeat. The essential structure components are the
smallest building blocks that form every repeat in the song.
Args
----
input_pattern_obj : np.ndarray
Binary matrix with 1's where repeats begin
and 0's otherwise.
bw_vec : np.ndarray
Vector containing the lengths of the repeats
encoded in input_pattern_obj.
thresh_bw : int
Smallest allowable repeat length.
Returns
-------
pattern_no_overlaps : np.ndrray
Binary matrix with 1's where repeats of
essential structure components begin.
pattern_no_overlaps_key : np.ndarray
Vector containing the lengths of the repeats
of essential structure components in
pattern_no_overlaps.
"""
sig = signature(breakup_overlaps_by_intersect)
params = sig.parameters
if len(params) < 3:
T = 0
else:
T = thresh_bw
if bw_vec.ndim == 1:
# Convert a 1D array into 2D vector
bw_vec = bw_vec[None, :].reshape(-1, 1)
# Initialize input_pattern_obj
pno = input_pattern_obj
# Sort bw_vec and pattern_no_overlaps (pno) so that we process the
# biggest pieces first
# Part 1: Sort the lengths in bw_vec in descending order
desc_bw_vec = np.sort(bw_vec)[::-1] # [::-1] reverses order
# Part 2: Sort the indices of bw_vec in descending order
bw_inds = np.flip(np.argsort(bw_vec, axis=0))
row_bw_inds = np.transpose(bw_inds).flatten()
pno = pno[row_bw_inds, :]
T_inds = np.nonzero(bw_vec == T)
T_inds = np.array(T_inds) - 1
if T_inds.size == 0:
T_inds = max(bw_vec.shape)
pno_block = reconstruct_full_block(pno, desc_bw_vec)
# Check stopping condition -- Are there overlaps?
while np.sum(np.sum(pno_block[:T_inds, :], axis=0) > 1) > 0:
# Find all overlaps by comparing the rows of repeats pairwise
overlaps_pno_block = check_overlaps(pno_block)
# Remove the rows with bandwidth T or less from consideration
overlaps_pno_block[T_inds:, ] = 0
overlaps_pno_block[:, T_inds:] = 0
# Find the first two groups of repeats that overlap, calling one group
# RED and the other group BLUE
[ri, bi] = overlaps_pno_block.nonzero()
ri = ri[0]
bi = bi[0]
# RED overlap
red = pno[ri, :]
RL = desc_bw_vec[ri, :]
# BLUE overlap
blue = pno[bi, :]
BL = desc_bw_vec[bi, :]
# Compare the repeats in RED and BLUE, cutting the repeats in those
# groups into non-overlapping pieces
union_mat, union_length = __compare_and_cut(red, RL, blue, BL)
pno = np.delete(pno, [ri, bi], axis=0)
bw_vec = np.delete(desc_bw_vec, [ri, bi], axis=0)
# Stack the new repeats
if union_mat.size != 0:
pno = np.vstack((pno, union_mat))
bw_vec = np.vstack((bw_vec, union_length))
# Check there are any repeats of length 1 that should be merged into
# other groups of repeats of length 1 and merge them if necessary
if sum(union_length == 1) > 0:
pno, bw_vec = __merge_based_on_length(pno, bw_vec, 1)
# AGAIN, Sort bw_vec and pno so that we process the biggest
# pieces first
# Part 1: Sort the lengths in bw_vec and indices in descending order
desc_bw_vec = np.sort(bw_vec, axis=0)[::-1]
bw_inds = np.flip(np.argsort(bw_vec, axis=0))
row_bw_inds = np.transpose(bw_inds).flatten()
pno = pno[row_bw_inds, :]
# Find the first row that contains repeats of length less than T and
# remove these rows from consideration during the next check of the
# stopping condition
T_inds = np.amin(desc_bw_vec == T) - 1
if T_inds < 0:
T_inds = np.array([])
else:
T_inds = np.array(T_inds) # T_inds is converted into an array
if T_inds.size == 0:
T_inds = max(desc_bw_vec.shape)
pno_block = reconstruct_full_block(pno, desc_bw_vec)
# Sort the lengths in bw_vec in ascending order
bw_vec = np.sort(desc_bw_vec, axis=0)
# Sort the indices of bw_vec in ascending order
bw_inds = np.argsort(desc_bw_vec, axis=0)
pattern_no_overlaps = pno[bw_inds, :].reshape((pno.shape[0], -1))
pattern_no_overlaps_key = bw_vec
output = (pattern_no_overlaps, pattern_no_overlaps_key)
return output
def check_overlaps(input_mat):
"""
Compares every pair of groups and determines if there are any repeats in
any pairs of the groups that overlap.
Args
----
input_mat : np.array[int]
Matrix to be checked for overlaps.
Returns
-------
overlaps_yn : np.array[bool]
Logical array where (i,j) = 1 if row i of input matrix and row j
of input matrix overlap and (i,j) = 0 elsewhere.
"""
# Get the number of rows and columns
rs = input_mat.shape[0]
ws = input_mat.shape[1]
# compare_left -- Every row of input_mat is repeated rs times to create
# a sub-matrix. We stack these sub-matrices on top of each other.
compare_left = np.zeros(((rs * rs), ws))
for i in range(rs):
compare_add = input_mat[i, :]
compare_add_mat = np.tile(compare_add, (rs, 1))
a = i * rs
b = (i + 1) * rs
compare_left[a:b, :] = compare_add_mat
# compare_right -- Stack rs copies of input_mat on top of itself
compare_right = np.tile(input_mat, (rs, 1))
# If input_mat is not binary, create binary temporary objects
compare_left = compare_left > 0
compare_right = compare_right > 0
# Empty matrix to store overlaps
compare_all = np.zeros((compare_left.shape[0], 1))
# For each row
for i in range(compare_left.shape[0]):
# Create new counter
num_overlaps = 0
for j in range(compare_left.shape[1]):
if compare_left[i, j] == 1 and compare_right[i, j] == 1:
# inc count
num_overlaps = num_overlaps + 1
# Append num_overlaps to matrix
compare_all[i, 0] = num_overlaps
compare_all = compare_all > 0
overlap_mat = np.reshape(compare_all, (rs, rs))
# If overlap_mat is symmetric, only keep the upper-triangular portion.
# If not, keep all of overlap_mat.
check_mat = np.allclose(overlap_mat, overlap_mat.T)
if check_mat:
overlap_mat = np.triu(overlap_mat, 1)
overlaps_yn = overlap_mat
return overlaps_yn
def __compare_and_cut(red, red_len, blue, blue_len):
"""
Compares two rows of repeats labeled RED and BLUE, and determines if there
are any overlaps in time between them. If there is, then we cut the
repeats in RED and BLUE into up to 3 pieces.
Args
----
red : np.ndarray
Binary row vector encoding a set of repeats with 1's where each
repeat starts and 0's otherwise.
red_len : int
Length of repeats encoded in red.
blue : np.ndarray
Binary row vector encoding a set of repeats with 1's where each
repeat starts and 0's otherwise.
blue_len : int
Length of repeats encoded in blue.
Returns
-------
union_mat : np.ndarray
Binary matrix representation of up to three rows encoding
non-overlapping repeats cut from red and blue.
union_length : np.ndarray
Vector containing the lengths of the repeats encoded in union_mat.
"""
# Find the total time steps in red
sn = red.shape[0]
assert sn == blue.shape[0]
# Find all starting indices in red and store them as a 2d array
start_red = np.flatnonzero(red)
start_red = start_red[None, :]
# Find all starting indices in blue and store them as a 2d array
start_blue = np.flatnonzero(blue)
start_blue = start_blue[None, :]
# Determine if the rows have any intersections
red_block = reconstruct_full_block(red, red_len)
blue_block = reconstruct_full_block(blue, blue_len)
# Find the intersection of red and blue
red_block = red_block > 0
blue_block = blue_block > 0
purple_block = np.logical_and(red_block, blue_block)
# If there is any intersection between the rows, then start comparing one
# repeat in red to one repeat in blue
if purple_block.sum() > 0:
# Find the number of blocks in red and in blue
lsr = max(start_red.shape)
lsb = max(start_blue.shape)
# Build the pairs of starting indices to search, where each pair
# contains a starting index in red and a starting index in blue
red_inds = np.tile(start_red.transpose(), (lsb, 1))
blue_inds = np.tile(start_blue, (lsr, 1))
tem_blue = blue_inds[0][0]
for i in range(0, blue_inds.shape[1]):
for j in range(0, blue_inds.shape[0]):
tem_blue = np.vstack((tem_blue, blue_inds[j][i]))
tem_blue = np.delete(tem_blue, 1, 0)
compare_inds = np.concatenate((tem_blue, red_inds), axis=1)
# Initialize the output variables union_mat and union_length
union_mat = np.array([])
union_length = np.array([])
# Loop over all pairs of starting indices
for start_ind in range(0, lsr * lsb):
# Isolate one repeat in red and one repeat in blue
ri = compare_inds[start_ind, 1]
bi = compare_inds[start_ind, 0]
red_ri = np.arange(ri, ri + red_len)
blue_bi = np.arange(bi, bi + blue_len)
# Determine if the blocks intersect and call the intersection
# purple
purple = np.intersect1d(red_ri, blue_bi)
if purple.size != 0:
# Remove purple from red_ri, call it red_minus_purple
red_minus_purple = np.setdiff1d(red_ri, purple)
# If red_minus_purple is not empty, then see if there are one
# or two parts in red_minus_purple.
# Then cut purple out of all of the repeats in red.
if red_minus_purple.size != 0:
# red_length_vec will have the length(s) of the parts in
# new_red
red_start_mat, red_length_vec = __num_of_parts(
red_minus_purple, ri, start_red
)
# If there are two parts left in red_minus_purple, then
# the new variable new_red, which holds the part(s) of
# red_minus_purple, should have two rows with 1's for the
# starting indices of the resulting pieces and 0's
# elsewhere.
new_red = __inds_to_rows(red_start_mat, sn)
else:
# If red_minus_purple is empty, then set new_red and
# red_length_vec to empty
new_red = np.array([])
red_length_vec = np.array([])
# Noting that purple is only one part and in both red_ri and
# blue_bi, then we need to find where the purple starting
# indices are in all the red_ri
purple_in_red_mat, purple_length_vec = __num_of_parts(
purple, ri, start_red
)
blue_minus_purple = np.setdiff1d(blue_bi, purple)
# If blue_minus_purple is not empty, then see if there are one
# or two parts in blue_minus_purple. Then cut purple out of
# all of the repeats in blue.
if blue_minus_purple.size != 0:
blue_start_mat, blue_length_vec = __num_of_parts(
blue_minus_purple, bi, start_blue
)
new_blue = __inds_to_rows(blue_start_mat, sn)
# If there are two parts left in blue_minus_purple, then the
# new variable new_blue, which holds the part(s) of
# blue_minus_purple, should have two rows with 1's for the
# starting indices of the resulting pieces and 0's elsewhere.
else:
# If blue_minus_purple is empty, then set new_blue and
# blue_length_vec to empty
new_blue = np.array([])
# Also blue_length_vec will have the length(s) of the
# parts in new_blue.
blue_length_vec = np.array([])
# Recalling that purple is only one part and in both red_rd
# and blue_bi, then we need to find where the purple starting
# indices are in all the blue_ri
purple_in_blue_mat, purple_length = __num_of_parts(
purple, bi, start_blue
)
# Union purple_in_red_mat and purple_in_blue_mat to get
# purple_start, which stores all the purple indices
purple_start = np.union1d(purple_in_red_mat[0],
purple_in_blue_mat[0])
# Use purple_start to get new_purple with 1's where the repeats
# in the purple rows start and 0 otherwise.
new_purple = __inds_to_rows(purple_start, sn)
if new_red.size != 0 or new_blue.size != 0:
# Form the outputs
# Use the condition check to avoid errors when stacking
# an empty array
if new_red.size != 0 and new_blue.size == 0:
union_mat = np.vstack((new_red, new_purple))
union_length = np.vstack((red_length_vec,
purple_length))
elif new_red.size == 0 and new_blue.size != 0:
union_mat = np.vstack((new_blue, new_purple))
union_length = np.vstack((blue_length_vec,
purple_length))
else:
union_mat = np.vstack((new_red, new_blue, new_purple))
union_length = np.vstack(
(red_length_vec, blue_length_vec, purple_length)
)
# Merge repeats that are the same length
union_mat, union_length = __merge_based_on_length(
union_mat, union_length, union_length
)
# When we find union_mat and union_length in this group,
# we break out of the for loop to add them to our final
# output
break
elif new_red.size == 0 and new_blue.size == 0:
new_purple_block = reconstruct_full_block(
new_purple, np.array([purple_length])
)
# Only add the new repeat which has no overlaps
if max(new_purple_block[0]) < 2:
union_mat = new_purple
union_length = np.array([purple_length])
break
# Check that there are no overlaps in each row of union_mat
union_mat_add = np.empty((0, sn), int)
union_mat_add_length = np.empty((0, 1), int)
union_mat_rminds = np.empty((0, 1), int)
# Isolate one row at a time, call it union_row
for i in range(0, union_mat.shape[0]):
union_row = union_mat[i, :]
union_row_width = np.array([union_length[i]])
union_row_block = reconstruct_full_block(union_row, union_row_width)
# If there is at least one overlap, then compare and cut that row
# until there are no overlaps
if (np.sum(union_row_block[0] > 1)) > 0:
union_mat_rminds = np.vstack((union_mat_rminds, i))
union_row_new, union_row_new_length = __compare_and_cut(
union_row, union_row_width, union_row, union_row_width
)
# Add union_row_new and union_row_new_length to union_mat_add and
# union_mat_add_length, respectively
union_mat_add = np.vstack((union_mat_add, union_row_new))
union_mat_add_length = np.vstack(
(union_mat_add_length, union_row_new_length)
)
# Remove the old rows from union_mat (as well as the old lengths from
# union_length)
if union_mat_rminds.size != 0:
union_mat = np.delete(union_mat, union_mat_rminds, axis=0)
union_length = np.delete(union_length, union_mat_rminds)
# Add union_row_new and union_row_new_length to union_mat and
# union_length, respectively, such that union_mat is in order by
# lengths in union_length
if union_mat_add.size != 0:
union_mat = np.vstack((union_mat, union_mat_add))
if union_mat_add_length.size != 0:
union_length = np.vstack((np.array([union_length]).T,
union_mat_add_length))
# Make sure union_length is a 2d vector
if union_length.ndim == 1:
union_length = np.array([union_length]).T
if union_mat.size != 0:
total_array = np.hstack((union_mat, union_length))
# Sort the total_array and form the final output
total_array = total_array[np.argsort(total_array[:, -1])]
union_mat = total_array[:, 0:sn]
union_length = np.array([total_array[:, -1]]).T
output = (union_mat, union_length)
return output
def __num_of_parts(input_vec, input_start, input_all_starts):
"""
Determines the number of blocks of consecutive
time steps in a list of time steps. A block of consecutive time steps
represents a distilled section of a repeat. This distilled section will be
replicated and the starting indices of the repeats within it will be
returned.
Args
----
input_vec : np.ndarray
Vector that contains one or two parts of a repeat that are
overlap(s) in time that may need to be replicated
input_start : np.ndarray
Starting index for the part to be replicated.
input_all_starts : np.ndarray
Starting indices for replication.
Returns
-------
start_mat : np.ndarray
Array of one or two rows, containing the starting indices of the
replicated repeats.
length_vec : np.ndarray
Column vector containing the lengths of the replicated parts.
"""
# Determine where input_vec has a break
diff_vec = np.subtract(input_vec[1:], input_vec[:-1])
diff_vec = np.insert(diff_vec, 0, 1)
break_mark = np.where(diff_vec > 1)[0]
# If input_vec is consecutive
if sum(break_mark) == 0:
# Initialize start_vec and end_vec
start_vec = input_vec[0]
end_vec = input_vec[-1]
# Find the difference between the starts
add_vec = start_vec - input_start
# Find the new start of the distilled section
start_mat = input_all_starts + add_vec
# Else if input_vec has a break
else:
# Initialize start_vec and end_vec
start_vec = np.zeros((2, 1))
end_vec = np.zeros((2, 1))
# Find the start and end time step of the first part
start_vec[0] = input_vec[0]
end_vec[0] = input_vec[break_mark - 1]
# Find the start and end time step of the second part
start_vec[1] = input_vec[break_mark]
end_vec[1] = input_vec[-1]
# Find the difference between the starts
add_vec = np.array(start_vec - input_start).astype(int)
# Make sure input_all_starts contains only integers
input_all_starts = np.array(input_all_starts).astype(int)
# Create start_mat with two parts
start_mat = np.vstack(
(input_all_starts + add_vec[0], input_all_starts + add_vec[1])
)
# Get the length of the new repeats
length_vec = (end_vec - start_vec + 1).astype(int)
# Create output
output = (start_mat, length_vec)
return output
def __inds_to_rows(start_mat, row_length):
"""
Expands a vector containing the starting indices of a piece or two of a
repeat into a matrix representation recording when these pieces occur in
the song with 1's. All remaining entries are marked with 0's.
Args
----
start_mat : np.ndarray
Matrix of one or two rows, containing the starting indices.
row_length : int
Length of the rows.
Returns
-------
new_mat : np.ndarray
Matrix of one or two rows, with 1's where the starting indices
and 0's otherwise.
"""
if start_mat.ndim == 1:
# Convert a 1D array into 2D array
start_mat = start_mat[None, :]
# Initialize mat_rows and new_mat
mat_rows = start_mat.shape[0]
new_mat = np.zeros((mat_rows, row_length))
for i in range(0, mat_rows):
inds = start_mat[i, :]
# Let the starting indices be 1
new_mat[i, inds] = 1
return new_mat.astype(int)
def __merge_based_on_length(full_mat, full_bw, target_bw):
"""
Merges repeats that are the same length, as set by full_bandwidth,
and are repeats of the same piece of structure.
Args
----
full_mat : np.ndarray
Binary matrix with ones where repeats start and zeroes otherwise.
full_bw : np.ndarray
Length of repeats encoded in input_mat.
target_bw : np.ndarray
Lengths of repeats that we seek to merge.
Returns
-------
out_mat : np.ndarray
Binary matrix with ones where repeats start and zeros otherwise
with rows of full_mat merged if appropriate.
one_length_vec : np.ndarray
Length of the repeats encoded in out_mat.
"""
# Sort the elements of full_bandwidth
temp_bandwidth = np.sort(full_bw, axis=None)
# Return the indices that would sort full_bandwidth
bnds = np.argsort(full_bw, axis=None)
temp_mat = full_mat[bnds, :]
# Find the unique elements of target_bandwidth
target_bandwidth = np.unique(target_bw)
# Number of columns
target_size = target_bandwidth.shape[0]
for i in range(1, target_size + 1):
test_bandwidth = target_bandwidth[i - 1]
# Check if temp_bandwidth is equal to test_bandwidth
inds = (temp_bandwidth == test_bandwidth)
# If the sum of all inds elements is greater than 1, then execute this
# if statement
if inds.sum() > 1:
# Isolate rows that correspond to test_bandwidth and merge them
merge_bw = temp_mat[inds, :]
merged_mat = __merge_rows(merge_bw, np.array([test_bandwidth]))
# Number of columns
bandwidth_add_size = merged_mat.shape[0]
bandwidth_add = test_bandwidth * np.ones((bandwidth_add_size,
1)).astype(int)
if np.any(inds):
# Convert the boolean array inds into an array of integers
inds = np.array(inds).astype(int)
remove_inds = np.where(inds == 1)
# Delete the rows that meet the condition set by remove_inds
temp_mat = np.delete(temp_mat, remove_inds, axis=0)
temp_bandwidth = np.delete(temp_bandwidth, remove_inds, axis=0)
# Combine rows into a single matrix
temp_mat = np.vstack((temp_mat, merged_mat))
# Indicates temp_bandwidth is an empty array
if temp_bandwidth.size == 0:
temp_bandwidth = np.concatenate(bandwidth_add)
# Indicates temp_bandwidth is not an empty array
elif temp_bandwidth.size > 0:
temp_bandwidth = np.concatenate(
(temp_bandwidth, bandwidth_add.flatten())
)
# Return the indices that would sort temp_bandwidth
bnds = np.argsort(temp_bandwidth)
# Sort the elements of temp_bandwidth
temp_bandwidth = np.sort(temp_bandwidth)
temp_mat = temp_mat[bnds, ]
# Create output
out_mat = temp_mat
out_length_vec = temp_bandwidth
if out_length_vec.size != 1:
out_length_vec = out_length_vec.reshape(-1, 1)
output = (out_mat, out_length_vec)
return output
def __merge_rows(input_mat, input_width):
"""
Merges rows that have at least one common repeat; said common repeat(s)
must occur at the same time step and be of common length.
Args
----
input_mat : np.ndarray
Binary matrix with ones where repeats start and zeroes otherwise.
input_width : int
Length of repeats encoded in input_mat.
Returns
-------
merge_mat : np.ndarray
Binary matrix with ones where repeats start and zeroes otherwise.
"""
# Step 0: initialize temporary variables
not_merge = input_mat # Everything must be checked
merge_mat = np.empty((0, input_mat.shape[1]), int) # Nothing has been merged
merge_key = np.empty(1, int)
rows = input_mat.shape[0] # How many rows to merge?
# Step 1: has every row been checked?
while rows > 0:
# Step 2: start merge process
# Step 2a: choose first unmerged row
row2check = not_merge[0, :]
# Create a comparison matrix
# with copies of row2check stacked
# so that r2c_mat is the same
# size as the set of rows waiting
# to be merged
r2c_mat = np.kron(np.ones((rows, 1)), row2check)
# Step 2b: find indices of unmerged overlapping rows
merge_inds = np.sum(((r2c_mat + not_merge) == 2), axis=1) > 0
# Step 2c: union rows with starting indices in common with row2check
# and remove those rows from input_mat
union_merge = np.sum(not_merge[merge_inds, :], axis=0) > 0
union_merge = union_merge.astype(int)
not_merge = np.delete(not_merge, np.where(merge_inds == 1), 0)
# Step 2d: check that newly merged rows do not cause overlaps within
# row
# If there are conflicts, rerun compare_and_cut
merge_block = reconstruct_full_block(union_merge, input_width)
if np.max(merge_block) > 1:
(union_merge, union_merge_key) = __compare_and_cut(
union_merge, input_width, union_merge, input_width
)
else:
union_merge_key = input_width
# Step 2e: add unions to merge_mat and merge_key
merge_mat = np.vstack((merge_mat, union_merge))
merge_key = np.vstack((merge_key, union_merge_key))
# Step 3: reinitialize rs for stopping condition
rows = not_merge.shape[0]
if np.ndim(merge_mat) == 1:
# Make sure the output is a 2d array
merge_mat = np.array([merge_mat])
return merge_mat.astype(int)
def hierarchical_structure(matrix_no_overlaps, key_no_overlaps, sn, vis=False):
"""
Distills the repeats encoded in matrix_no_overlaps (and key_no_overlaps)
to the essential structure components and then builds the hierarchical
representation. Optionally shows visualizations of the hierarchical structure
via the vis argument.
Args
-----
matrix_no_overlaps : np.ndarray[int]
Binary matrix with 1's where repeats begin and 0's otherwise.
key_no_overlaps : np.ndarray[int]
Vector containing the lengths of the repeats encoded in matrix_no_overlaps.
sn : int
Song length, which is the number of audio shingles.
vis : bool
Shows visualizations if True (default = False).
Returns
-----
full_visualization : np.ndarray[int]
Binary matrix representation for full_matrix_no_overlaps
with blocks of 1's equal to the length's prescribed
in full_key.
full_key : np.ndarray[int]
Vector containing the lengths of the hierarchical
structure encoded in full_matrix_no_overlaps.
full_matrix_no_overlaps : np.ndarray[int]
Binary matrix with 1's where hierarchical
structure begins and 0's otherwise.
full_anno_lst : np.ndarray[int]
Vector containing the annotation markers of the
hierarchical structure encoded in each row of
full_matrix_no_overlaps.
"""
breakup_tuple = breakup_overlaps_by_intersect(matrix_no_overlaps, key_no_overlaps, 0)
# Using pno and pno_key, we build a vector that tells us the order of the
# repeats of the essential structure components
pno = breakup_tuple[0]
pno_key = breakup_tuple[1]
# Get the block representation for pno, called pno_block
pno_block = reconstruct_full_block(pno, pno_key)
if vis:
# IMAGE 1 construction
pno_anno = get_annotation_lst(pno_key)
pno_y_labels = get_y_labels(pno_key, pno_anno)
num_pno_rows = np.size(pno, axis=0)
twos = np.full((num_pno_rows, sn), 2, dtype=int)
vis_array = twos - (pno_block + pno)
fig, ax = plt.subplots(1, 1)
sdm = ax.imshow(vis_array, cmap="gray", aspect=10)
plt.title("Essential Structure Components")
# Set the number of ticks and set tick intervals to be equal
ax.set_yticks(np.arange(0,np.size(pno_y_labels)-1))
# Set the ticklabels along the y axis and remove 0 in vis_y_labels
ax.set_yticklabels(pno_y_labels[1:])
plt.show()
# Assign a unique (nonzero) number for each row in PNO. We refer these
# unique numbers COLORS.
num_colors = pno.shape[0]
num_timesteps = pno.shape[1]
# Create unique color identifier for num_colors
color_lst = np.arange(1, num_colors + 1)
# Turn it into a column
color_lst = color_lst.reshape(np.size(color_lst), 1)
color_mat = np.tile(color_lst, (1, num_timesteps))
# For each time step in row i that equals 1, change the value at that time
# step to i
pno_color = color_mat * pno
pno_color_vec = pno_color.sum(axis=0)
# Find where repeats exist in time, paying special attention to the starts
# and ends of each repeat of an essential structure component
# take sums down columns --- conv to logical
pno_block_vec = (np.sum(pno_block, axis=0)) > 0
pno_block_vec = pno_block_vec.astype(np.float32)
one_vec = pno_block_vec[0 : sn - 1] - pno_block_vec[1:sn]
# Find all the blocks of consecutive time steps that are not contained in
# any of the essential structure components
# We call these blocks zero blocks
# Shift pno_block_vec so that the zero blocks are marked at the correct
# time steps with 1's
if pno_block_vec[0] == 0:
one_vec = np.insert(one_vec, 0, 1)
elif pno_block_vec[0] == 1:
one_vec = np.insert(one_vec, 0, 0)
# Assign one new unique number to all the zero blocks
pno_color_vec[one_vec == 1] = num_colors + 1
# We are only concerned with the order that repeats of the essential
# structure components occur in. So we create a vector that only contains
# the starting indices for each repeat of the essential structure
# components.
# We isolate the starting index of each repeat of the essential structure
# components and save a binary vector with 1 at a time step if a repeat of
# any essential structure component occurs there
non_zero_inds = (pno_color_vec > 0)
num_nzi = non_zero_inds.sum(axis=0)
pno_color_inds_only = pno_color_vec[non_zero_inds]
# For indices that signals the start of a zero block, turn those indices
# back to 0
zero_inds_short = (pno_color_inds_only == (num_colors + 1))
pno_color_inds_only[zero_inds_short] = 0
# Create a binary matrix symm_pno_inds_only such that the (i,j) entry is 1
# if the following three conditions are true:
# 1) a repeat of an essential structure component is the i-th thing in
# the ordering
# 2) a repeat of an essential structure component is the j-th thing in
# the ordering
# 3) the repeat occurring in the i-th place of the ordering and the
# one occurring in the j-th place of the ordering are repeats of the
# same essential structure component.
# If any of the above conditions are not true, then the (i,j) entry of
# symm_pno_inds_only is 0.
# Turn our pattern row into a square matrix by stacking that row the
# number of times equal to the columns in that row
pno_io_mat = np.tile(pno_color_inds_only, (num_nzi, 1))
pno_io_mat = pno_io_mat.astype(np.float32)
pno_io_mask = (
(pno_io_mat > 0).astype(np.float32)
+ (pno_io_mat.transpose() > 0).astype(np.float32)
) == 2
symm_pno_inds_only = (
pno_io_mat.astype(np.float32) == pno_io_mat.transpose(
).astype(np.float32)
) * pno_io_mask
if vis:
# IMAGE 2
fig, ax = plt.subplots(1, 1)
sdm = ax.imshow(symm_pno_inds_only, cmap="binary", aspect=0.8)
plt.title(
"Threshold Self-dissimilarity matrix of" +
"the ordering Essential Structure Components"
)
# this locator puts ticks at regular intervals
loc = plticker.MultipleLocator(base=1.0)
ax.yaxis.set_major_locator(loc)
ax.xaxis.set_major_locator(loc)
plt.show()
# Extract all the diagonals in symm_pno_inds_only and get pairs of
# repeated sublists in the order that repeats of essential structure
# components.
# These pairs of repeated sublists are the basis of our hierarchical
# representation.
nzi_lst = find_all_repeats(symm_pno_inds_only, np.arange(1, num_nzi + 1))
remove_inds = (nzi_lst[:, 0] == nzi_lst[:, 2])
# Remove any pairs of repeats that are two copies of the same repeat (i.e.
# a pair (A,B) where A == B)
if np.any(remove_inds):
remove_inds = np.array(remove_inds).astype(int)
remove = np.where(remove_inds == 1)
nzi_lst = np.delete(nzi_lst, remove, axis=0)
# Add the annotation markers to the pairs in nzi_lst
nzi_lst_anno = find_complete_list_anno_only(nzi_lst, num_nzi)
# Remove the overlaps
output_tuple = remove_overlaps(nzi_lst_anno, num_nzi)
(nzi_matrix_no_overlaps, nzi_key_no_overlaps) = output_tuple[1:3]
# Reconstruct full block
nzi_pattern_block = reconstruct_full_block(nzi_matrix_no_overlaps, nzi_key_no_overlaps)
nzi_rows = nzi_pattern_block.shape[0]
if vis:
# IMAGE 3
fig, ax = plt.subplots(1, 1)
sdm = ax.imshow(nzi_pattern_block, cmap="binary", aspect=0.8)
plt.title(
"Repeated ordered sublists of the" +
"Essential Structure Components"
)
# This locator puts ticks at regular intervals
loc = plticker.MultipleLocator(base=1.0)
ax.yaxis.set_major_locator(loc)
ax.xaxis.set_major_locator(loc)
plt.show()
# IMAGE 4
fig, ax = plt.subplots(1, 1)
sdm = ax.imshow((nzi_pattern_block + nzi_matrix_no_overlaps), cmap="binary",
aspect=0.8)
plt.title(
"Repeated ordered sublists of the" +
"Essential Structure Components" +
"with leading index highlighted"
)
loc = plticker.MultipleLocator(
base=1.0
) # This locator puts ticks at regular intervals
ax.yaxis.set_major_locator(loc)
ax.xaxis.set_major_locator(loc)
plt.show()
nzi_rows = nzi_pattern_block.shape[0]
# Find where all blocks start and end
pattern_starts = np.nonzero(non_zero_inds)[0]
pattern_ends = np.array([pattern_starts[1:] - 1])
pattern_ends = np.insert(pattern_ends, np.shape(pattern_ends)[1], sn - 1)
pattern_lengths = np.array(pattern_ends - pattern_starts + 1)
full_visualization = np.zeros((nzi_rows, sn), dtype=int)
full_matrix_no_overlaps = np.zeros((nzi_rows, sn), dtype=int)
for i in range(0, num_nzi):
repeated_sect = nzi_pattern_block[:, i].reshape(
np.shape(nzi_pattern_block)[0], 1
)
full_visualization[:,
pattern_starts[i]: pattern_ends[i] + 1] = np.tile(
repeated_sect, (1, pattern_lengths[i])
)
full_matrix_no_overlaps[:, pattern_starts[i]] = nzi_matrix_no_overlaps[:, i]
# Get full_key, the matching bandwidth key for full_matrix_no_overlaps
full_key = np.zeros((nzi_rows, 1), dtype=int)
find_key_mat = full_visualization + full_matrix_no_overlaps
for i in range(0, nzi_rows):
one_start = np.where(find_key_mat[i, :] == 2)[0][0]
temp_row = find_key_mat[i, :]
temp_row[0 : one_start + 1] = 1
find_zero = np.where(temp_row == 0)[0][0]
if np.size(find_zero) == 0:
find_zero = sn
find_two = np.where(temp_row == 2)[0][0]
if np.size(find_two) == 0:
find_two = sn
one_end = np.minimum(find_zero, find_two)
full_key[i] = one_end - one_start
full_key_inds = np.argsort(full_key, axis=0)
# Switch to row
full_key_inds = full_key_inds[:, 0]
full_key = np.sort(full_key, axis=0)
full_visualization = full_visualization[full_key_inds, :]
full_matrix_no_overlaps = full_matrix_no_overlaps[full_key_inds, :]
# Remove rows of our hierarchical representation that contain only one
# repeat
inds_remove = np.where(np.sum(full_matrix_no_overlaps, 1) <= 1)
full_key = np.delete(full_key, inds_remove, axis=0)
full_matrix_no_overlaps = np.delete(full_matrix_no_overlaps, inds_remove, axis=0)
full_visualization = np.delete(full_visualization, inds_remove, axis=0)
full_anno_lst = get_annotation_lst(full_key)
output = (full_visualization, full_key, full_matrix_no_overlaps, full_anno_lst)
if vis:
# IMAGE 5
full_anno_lst = get_annotation_lst(full_key)
vis_y_labels = get_y_labels(full_key, full_anno_lst)
num_vis_rows = np.size(full_visualization, axis=0)
twos = np.full((num_vis_rows, sn), 2, dtype=int)
vis_array = twos - (full_visualization + full_matrix_no_overlaps)
fig, ax = plt.subplots(1, 1)
sdm = ax.imshow(vis_array, cmap="gray", aspect=5)
plt.title("Complete Aligned Hierarchies")
# Set the number of ticks and set tick intervals to be equal
ax.set_yticks(np.arange(0,np.size(vis_y_labels)-1))
# Set the ticklabels along the y axis and remove 0 in vis_y_labels
ax.set_yticklabels(vis_y_labels[1:])
plt.show()
return output
|
__author__ = 'lucabasa'
__version__ = '1.1.0'
__status__ = 'obsolete'
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import StratifiedKFold
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.feature_selection import VarianceThreshold
pd.set_option('max_columns', 200)
import utility as ut
def train_svc(df_train, df_test):
train = df_train.copy()
test = df_test.copy()
oof = np.zeros(len(train))
preds = np.zeros(len(test))
cols = [c for c in train.columns if c not in ['id', 'target', 'wheezy-copper-turtle-magic']]
for i in range(512):
train2 = train[train['wheezy-copper-turtle-magic']==i]
test2 = test[test['wheezy-copper-turtle-magic']==i]
idx1 = train2.index; idx2 = test2.index
train2.reset_index(drop=True,inplace=True)
sel = VarianceThreshold(threshold=1.5).fit(train2[cols])
train3 = sel.transform(train2[cols])
test3 = sel.transform(test2[cols])
skf = StratifiedKFold(n_splits=25, random_state=15)
for train_index, test_index in skf.split(train3, train2['target']):
clf = Pipeline([('scaler', StandardScaler()),
('svc', SVC(probability=True,kernel='poly',degree=4,gamma='auto'))])
clf.fit(train3[train_index,:],train2.loc[train_index]['target'])
oof[idx1[test_index]] = clf.predict_proba(train3[test_index,:])[:,1]
preds[idx2] += clf.predict_proba(test3)[:,1] / skf.n_splits
if i%25==0:
print(i)
ut.report_oof(df_train, oof)
return oof, preds
def train_logit(df_train, df_test):
train = df_train.copy()
test = df_test.copy()
oof = np.zeros(len(train))
preds = np.zeros(len(test))
cols = [c for c in train.columns if c not in ['id', 'target', 'wheezy-copper-turtle-magic']]
for i in range(512):
train2 = train[train['wheezy-copper-turtle-magic']==i]
test2 = test[test['wheezy-copper-turtle-magic']==i]
idx1 = train2.index; idx2 = test2.index
train2.reset_index(drop=True,inplace=True)
sel = VarianceThreshold(threshold=1.5).fit(train2[cols])
train3 = sel.transform(train2[cols])
test3 = sel.transform(test2[cols])
skf = StratifiedKFold(n_splits=25, random_state=15)
for train_index, test_index in skf.split(train3, train2['target']):
clf = Pipeline([('scaler', StandardScaler()),
('logit', LogisticRegression(solver='saga', penalty='l1', C=1))])
clf.fit(train3[train_index,:],train2.loc[train_index]['target'])
oof[idx1[test_index]] = clf.predict_proba(train3[test_index,:])[:,1]
preds[idx2] += clf.predict_proba(test3)[:,1] / skf.n_splits
ut.report_oof(df_train, oof)
return oof, preds
if __name__ == '__main__':
df_train = pd.read_csv('data/train.csv')
df_test = pd.read_csv('data/test.csv')
oof_svc, preds_svc = train_svc(df_train, df_test)
oof_logit, preds_logit = train_logit(df_train, df_test)
scores = []
for i in np.arange(0.001, 1, 0.001):
score_temp = roc_auc_score(df_train.target, i * oof_svc + (1 - i) * oof_logit)
scores.append(score_temp)
max_score = pd.DataFrame({'i': np.arange(0.001, 1, 0.001), 'score': scores})
use_max = max_score[max_score.score == max(max_score.score)]['i'].values[0]
sub = pd.read_csv('data/sample_submission.csv')
sub['target'] = use_max * preds_svc + (1- use_max) * preds_logit
sub.to_csv('submissions/v18_svclogit_sub.csv',index=False)
|
#!/usr/bin/env python3
import asyncio
import json
import psutil
import socket
import urllib.error
import urllib.request
import iterm2
from psutil._common import bytes2human
af_map = {
socket.AF_INET: 'IPv4',
socket.AF_INET6: 'IPv6',
psutil.AF_LINK: 'MAC',
}
duplex_map = {
psutil.NIC_DUPLEX_FULL: "full",
psutil.NIC_DUPLEX_HALF: "half",
psutil.NIC_DUPLEX_UNKNOWN: "?",
}
# The name of the iTerm2 variable to store the result
VARIABLE = "external_ip"
USER_AGENT = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.1" \
" Safari/605.1.15"
service_url = "https://ipinfo.io/json"
update_interval = 120
country_keys = ["country", "country_iso"]
# Icons are base64-encoded PNGs. The first one is 32x34 and is used for Retina
# displays. The second is 16x32 and is used for non-Retina displays.
ICON1X = "iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAA7klEQVQ4jbWRvWpCQRCFP41I/AGfwoi+gNhYqm2w0kdIE6K+xu3FxjdIi" \
"JU2VlaKXWoLk0K0s1TBsDADA9l7vSk8MHDYPefM7gz3Qhf4BH6kHO/E6ZUX8TWkJqIJxSzCrDUNM7eN6Eue7vhaOm/M/bMv4MMInoCB8C" \
"zQAErAXM7e1ZQ0AWXDM0BaeAHoAXv5okPFF2D5CbgIPwIv8sW+nCVUmDK<KEY>tLZ8Z1nE1VfgEMQY41BmFk" \
"xijAPb5kVbzJENTr+GtesaJmA5n/<KEY>uBFwBhay4r8AfgFM5lUYQdNtmQAAAABJRU5ErkJggg=="
ICON2X = "<KEY>" \
"<KEY>" \
"<KEY>" \
"<KEY>" \
"<KEY>" \
"XxoAH0nMWnspx6eZkD74o9MI3Vwz/CsNlDc77sa7GmK5ux9eQb4GuoOVJexW4H+waWDhpZarYAfHRYUrlbAVw4T3MELDRPEf9Vfh9wnPh" \
"AHcssAAAAASUVORK5CYII="
FLAGS_OFFSET = 127397
A = 65
Z = 90
def emoji(country):
first = ord(country[0])
second = ord(country[1])
if (len(country) != 2) or (first > Z or first < A) or (second > Z or second < A):
return country
return chr(first + FLAGS_OFFSET) + chr(second + FLAGS_OFFSET)
async def get_external_ip():
try:
request = urllib.request.Request(service_url, data=None,
headers={
'User-Agent': USER_AGENT
}
)
with urllib.request.urlopen(request) as response:
resp = response.read().decode("utf-8").strip()
obj = json.loads(resp)
ip = obj["ip"]
country = '🌍'
for country_key in country_keys:
if country_key in obj and len(obj[country_key]) == 2:
country = obj[country_key]
break
return '{}|{}'.format(ip, emoji(country))
except (urllib.error.HTTPError, urllib.error.URLError, TypeError):
return '{}|{}'.format(local_ip(), '📶')
def local_ip():
unfiltered_addresses = psutil.net_if_addrs()
for interface, addresses in unfiltered_addresses.items():
if interface.startswith('en'):
return addresses[0].address
return socket.gethostbyname(socket.gethostname())
def ifconfig():
import io
out = io.StringIO()
stats = psutil.net_if_stats()
io_counters = psutil.net_io_counters(pernic=True)
print("<pre>", file=out)
for nic, addrs in psutil.net_if_addrs().items():
print("%s:" % (nic), file=out)
if nic in stats:
st = stats[nic]
print(" stats : ", end='', file=out)
print("speed=%sMB, duplex=%s, mtu=%s, up=%s" % (
st.speed, duplex_map[st.duplex], st.mtu,
"yes" if st.isup else "no"), file=out)
if nic in io_counters:
io = io_counters[nic]
print(" incoming : ", end='', file=out)
print("bytes=%s, pkts=%s, errs=%s, drops=%s" % (
bytes2human(io.bytes_recv), io.packets_recv, io.errin,
io.dropin), file=out)
print(" outgoing : ", end='', file=out)
print("bytes=%s, pkts=%s, errs=%s, drops=%s" % (
bytes2human(io.bytes_sent), io.packets_sent, io.errout,
io.dropout), file=out)
for addr in addrs:
if not addr.address or addr.address.startswith('fe80::'):
continue
print(" %-4s" % af_map.get(addr.family, addr.family), end="", file=out)
print(" address : %s" % addr.address, file=out)
if addr.broadcast:
print(" broadcast : %s" % addr.broadcast, file=out)
if addr.netmask:
print(" netmask : %s" % addr.netmask, file=out)
if addr.ptp:
print(" p2p : %s" % addr.ptp, file=out)
print("", file=out)
print("</pre>", file=out)
result = out.getvalue()
out.close()
return result
async def external_ip_task(app):
while True:
if not service_url:
await asyncio.sleep(1)
global update_interval
text = await get_external_ip()
if text:
await app.async_set_variable("user." + VARIABLE, text)
await asyncio.sleep(update_interval)
else:
await asyncio.sleep(5)
async def main(connection):
app = await iterm2.async_get_app(connection)
# Start fetching the URL
asyncio.create_task(external_ip_task(app))
icon1x = iterm2.StatusBarComponent.Icon(1, ICON1X)
icon2x = iterm2.StatusBarComponent.Icon(2, ICON2X)
update_interval_knob = "ip_update_interval"
service_url_knob = "ip_provider_url"
knobs = [iterm2.StringKnob("Update Interval", "60", "60", update_interval_knob),
iterm2.StringKnob("Provider URL", "https://ifconfig.co/json", "https://ifconfig.co/json",
service_url_knob)]
# Register the status bar component.
component = iterm2.StatusBarComponent(
short_description="External IP",
detailed_description="Shows public IP address of current host",
knobs=knobs,
exemplar=local_ip(),
update_cadence=None,
identifier="catj.moe.ip",
icons=[icon1x, icon2x])
@iterm2.RPC
async def onclick(session_id):
session = app.get_session_by_id(session_id)
await component.async_open_popover(session_id, ifconfig(), iterm2.util.Size(550, 600))
# This function gets called once per second.
@iterm2.StatusBarRPC
async def external_ip(knobs, value=iterm2.Reference("iterm2.user." + VARIABLE + "?")):
global update_interval
global service_url
if update_interval_knob in knobs and knobs[update_interval_knob]:
update_interval = int(knobs[update_interval_knob])
if service_url_knob in knobs and knobs[service_url_knob]:
service_url = knobs[service_url_knob]
"""This function returns the value to show in a status bar."""
if value:
return value
return local_ip()
# Register the component.
await component.async_register(connection, external_ip, onclick=onclick)
# This instructs the script to run the "main" coroutine and to keep running even after it returns.
iterm2.run_forever(main)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.