index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
990,500 | 13113f7ed23e9bcfd5bfae1faf140f4e1014f43a | import csv
import os
from rdbtools import RdbCallback, RdbParser
import pandas as pd
from rdbtools.callbacks import JSONCallback
def load_rdb(filename, filters=None):
r = MockRedis()
parser = RdbParser(r, filters)
parser.parse(filename)
return r
class MockRedis(RdbCallback):
def __init__(self):
super(MockRedis, self).__init__(string_escape=True)
self.databases = {}
self.lengths = {}
self.expiry = {}
self.methods_called = []
self.dbnum = 0
def currentdb(self):
return self.databases[self.dbnum]
def store_expiry(self, key, expiry):
self.expiry[self.dbnum][key] = expiry
def store_length(self, key, length):
if not self.dbnum in self.lengths:
self.lengths[self.dbnum] = {}
self.lengths[self.dbnum][key] = length
def get_length(self, key):
if not key in self.lengths[self.dbnum]:
raise Exception('Key %s does not have a length' % key)
return self.lengths[self.dbnum][key]
def start_rdb(self):
self.methods_called.append('start_rdb')
def start_database(self, dbnum):
self.dbnum = dbnum
self.databases[dbnum] = {}
self.expiry[dbnum] = {}
self.lengths[dbnum] = {}
def set(self, key, value, expiry, info):
self.currentdb()[key] = value
if expiry:
self.store_expiry(key, expiry)
def start_hash(self, key, length, expiry, info):
if key in self.currentdb():
raise Exception('start_hash called with key %s that already exists' % key)
else:
self.currentdb()[key] = {}
if expiry:
self.store_expiry(key, expiry)
self.store_length(key, length)
def hset(self, key, field, value):
if not key in self.currentdb():
raise Exception('start_hash not called for key = %s', key)
self.currentdb()[key][field] = value
def end_hash(self, key):
if not key in self.currentdb():
raise Exception('start_hash not called for key = %s', key)
if len(self.currentdb()[key]) != self.lengths[self.dbnum][key]:
raise Exception('Lengths mismatch on hash %s, expected length = %d, actual = %d'
% (key, self.lengths[self.dbnum][key], len(self.currentdb()[key])))
def start_set(self, key, cardinality, expiry, info):
if key in self.currentdb():
raise Exception('start_set called with key %s that already exists' % key)
else:
self.currentdb()[key] = []
if expiry:
self.store_expiry(key, expiry)
self.store_length(key, cardinality)
def sadd(self, key, member):
if not key in self.currentdb():
raise Exception('start_set not called for key = %s', key)
self.currentdb()[key].append(member)
def end_set(self, key):
if not key in self.currentdb():
raise Exception('start_set not called for key = %s', key)
if len(self.currentdb()[key]) != self.lengths[self.dbnum][key]:
raise Exception('Lengths mismatch on set %s, expected length = %d, actual = %d'
% (key, self.lengths[self.dbnum][key], len(self.currentdb()[key])))
def start_list(self, key, expiry, info):
if key in self.currentdb():
raise Exception('start_list called with key %s that already exists' % key)
else:
self.currentdb()[key] = []
if expiry:
self.store_expiry(key, expiry)
def rpush(self, key, value):
if not key in self.currentdb():
raise Exception('start_list not called for key = %s', key)
self.currentdb()[key].append(value)
def end_list(self, key, info):
if not key in self.currentdb():
raise Exception('start_set not called for key = %s', key)
self.store_length(key, len(self.currentdb()[key]))
def start_sorted_set(self, key, length, expiry, info):
if key in self.currentdb():
raise Exception('start_sorted_set called with key %s that already exists' % key)
else:
self.currentdb()[key] = {}
if expiry:
self.store_expiry(key, expiry)
self.store_length(key, length)
def zadd(self, key, score, member):
if not key in self.currentdb():
raise Exception('start_sorted_set not called for key = %s', key)
self.currentdb()[key][member] = score
def end_sorted_set(self, key):
if not key in self.currentdb():
raise Exception('start_set not called for key = %s', key)
if len(self.currentdb()[key]) != self.lengths[self.dbnum][key]:
raise Exception('Lengths mismatch on sortedset %s, expected length = %d, actual = %d'
% (key, self.lengths[self.dbnum][key], len(self.currentdb()[key])))
def start_module(self, key, module_name, expiry, info):
if key in self.currentdb():
raise Exception('start_module called with key %s that already exists' % key)
else:
self.currentdb()[key] = {'module_name': module_name}
if expiry:
self.store_expiry(key, expiry)
return False
def end_module(self, key, buffer_size, buffer=None):
if not key in self.currentdb():
raise Exception('start_module not called for key = %s', key)
self.store_length(key, buffer_size)
pass
def start_stream(self, key, listpacks_count, expiry, info):
if key in self.currentdb():
raise Exception('start_stream called with key %s that already exists' % key)
else:
self.currentdb()[key] = {}
if expiry:
self.store_expiry(key, expiry)
pass
def stream_listpack(self, key, entry_id, data):
if not key in self.currentdb():
raise Exception('start_hash not called for key = %s', key)
self.currentdb()[key][entry_id] = data
pass
def end_stream(self, key, items, last_entry_id, cgroups):
if not key in self.currentdb():
raise Exception('start_stream not called for key = %s', key)
self.store_length(key, items)
def end_database(self, dbnum):
if self.dbnum != dbnum:
raise Exception('start_database called with %d, but end_database called %d instead' % (self.dbnum, dbnum))
def end_rdb(self):
self.methods_called.append('end_rdb')
def load_rdb_db(filename):
r = load_rdb(filename) # return MockRedis
db = r.databases[1] # dict
return db
if __name__ == '__main__':
filename = "dump.rdb"
res = load_rdb_db(filename)
unidict = {k.decode('utf8'): v.decode('utf8') for k, v in res.items()}
csv_file = "Names.csv"
csv_columns = ['key', 'value']
try:
with open(csv_file, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=csv_columns)
writer.writeheader()
for key, value in unidict.items():
thisdict = {
"key": key,
"value": value
}
writer.writerow(thisdict)
except IOError:
print("I/O error")
for key, value in unidict.items():
print("key: {} , value: {}".format(key,value))
|
990,501 | a737bf698f3ce72921aff41016ad64bdcbe51b96 | # Ejemplo de conjuntos
s = set([5, 4, 6, 8, 8, 1])
print(s)
print(type(s))
s = {5, 4, 6, 8, 8, 1}
print(len(s))
lista = ["España","Perú", "Argentina"]
print("España" in lista)
|
990,502 | 5ed99d6a04dce08b99362b7014bd9338c31d3ebd | class SwiftArray:
pass
|
990,503 | 113341df725a47570ec03f2e4b98420fc7cca347 | import pytest
from Bio.Seq import Seq
from cargo_oligo_creator import guide, guide_splitter, split_configuration
class TestGuideSplitter:
def test_split(self):
guides = [guide.Guide("AAAATTCCCCGG")]
config = split_configuration.SplitConfiguration([3])
splitter = guide_splitter.GuideSplitter(guides)
splits = splitter.split(config)
assert(len(splits) == 1)
assert(splits[0].first_part == Seq("AAAATTCC"))
assert(splits[0].second_part == Seq("TTCCCCGG"))
assert(splits[0].overlap == Seq("TTCC"))
|
990,504 | 90c85cf9301e86a0d9800a09f534294276d46abe | from bs4 import BeautifulSoup
import requests
class Stock:
#建構式
def __init__(self, *stock_numbers):
self.stock_numbers = stock_numbers
#爬取
def scrape(self):
response = requests.get("https://tw.stock.yahoo.com/q/q?s=2451")
soup = BeautifulSoup(response.text.replace("加到投資組合", ""), "lxml") |
990,505 | eba61e079a0ed400e1cb9aed8800f184e4048594 | #!/usr/bin/python
# Get Xineoh's Mnist Data
import mysql.connector as mysql
import time
import tensorflow as tf
import numpy as np
hostname = '173.45.91.18'
username = 'user01'
password = 'nw21ZDcWzTG4'
database = 'mnist01'
###Specify some preliminaries like what we want the output files to be called
train_out = 'TF_mnist_train'
test_out = 'TF_mnist_test'
count = np.zeros([10, 1])
###Specify what will exist in these records files (labels and data)
x_vals = 'raw_pixels'
t_vals = 'labels'
# ##One approach to the class imbalance is to resample the data
# ##to obtain a uniform distribution, since there are a large
# #number of samples this should be sufficient
# def resample(probabilities,samples,Nsamples):
# #arguments should be np arrays
# cumulative_p=np.cumsum(probabilities)
# new_probs=np.random.random([Nsamples,1])
# newsamp=np.zeros_like(samples)*np.max(cumulative_p)
# idx=np.zeros_like(probabilities)
def grab_max(train_dataset, cnx):
## gets the max index of the SQL table
cursor = cnx.cursor(buffered=True)
if train_dataset == True:
selected_table = "mnist_train"
else:
selected_table = "mnist_test"
cursor.execute("SELECT MAX(id) FROM " + selected_table)
rows = cursor.fetchone()
cursor.close()
print(rows[0])
return int(rows[0])
def grab_example(train_dataset, cnx, idx):
##gets the max index of the SQL table
label_list = []
pixel_list = []
cursor = cnx.cursor(buffered=True)
if train_dataset == True:
selected_table = "mnist_train"
else:
selected_table = "mnist_test"
start = time.time()
cursor.execute("SELECT data FROM " + selected_table + " WHERE id<=%d;" % idx)
print(time.time() - start)
rows_out = cursor.fetchall()
for i in range(idx):
rows = rows_out[i][0]
data_out = list(map(int, rows.split(',')))
label = data_out[0]
pixels = data_out[1:]
pixels = list(map(float, pixels))
pixel_list.append(pixels)
label_list.append(label)
cursor.close()
# print(pixels)
# count[label]+=1
# print(count/float(sum(count)))
return label_list, pixel_list
# Simple routine to run a query on a database and print the results:
##
def make_example(pixels, label):
# This function makes the example one at a time that will be written to tensorflow records file
ex = tf.train.Example()
##add in the pixels
fl_pixels = ex.features.feature["pixels"]
fl_labels = ex.features.feature["labels"]
[fl_pixels.float_list.value.append(pixel) for pixel in pixels]
fl_labels.int64_list.value.append(label)
return ex
def write_example(writer, example):
###writer is the tf writing object and the example is the output of make example
writer.write(example.SerializeToString())
return 1
def fill_record_file(train_dataset, max_examples, cnx):
if train_dataset == True:
filename = train_out
maxval = max_examples['train']
else:
filename = test_out
maxval = max_examples['test']
writer = tf.python_io.TFRecordWriter(filename)
with tf.Session() as sess:
##Starts my queue runners to ensure that threads start and stop when i want them to
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
label_list, pixel_list = grab_example(train_dataset, cnx, maxval)
for i in range(maxval):
lab, pix = label_list[i], pixel_list[i]
if train_dataset == True:
count[lab] += 1
print(count / np.sum(count))
example = make_example(pix, lab)
write_example(writer, example)
writer.close()
coord.request_stop()
coord.request_stop()
coord.join(threads)
sess.close()
sess.close()
def data_creator():
cnx = mysql.connect(host=hostname, user=username, passwd=password, db=database)
max_train = grab_max(True, cnx)
max_test = grab_max(False, cnx)
feed_dict = {'train': max_train, 'test': max_test}
fill_record_file(True, feed_dict, cnx)
fill_record_file(False, feed_dict, cnx)
cnx.close()
if __name__ == '__main__':
data_creator()
|
990,506 | d53182b8d431eef85fb5f35773c308f3b58897e2 | # -*- coding: utf-8 -*-
import requests
import redis
import lxml.html
import os
client_redis = redis.StrictRedis()
def get_pictures_to_redius():
html = requests.get("http://news.4399.com/gonglue/lscs/kptj/")
html_str = html.content.decode('gbk')
selector = lxml.html.fromstring(html_str)
url_list = selector.xpath('//ul[@class="cf"]/li/a/img/@lz_src')
picture_name_list = selector.xpath('//ul[@class="cf"]/li/a/div/text()')
print(len(url_list), len(picture_name_list))
for url in url_list:
print(url)
client_redis.lpush("url_queue", url)
for name in picture_name_list:
print(name)
client_redis.lpush("picture_name", name)
# picture_byte = requests.get(url_list[0]).content
# file_path = os.path.join('炉石传说图片', picture_name_list[0] + '.jpg')
# print(file_path)
# with open(file_path, 'wb') as f:
# f.write(picture_byte)
def write_picture_to_file():
while client_redis.llen("url_queue") > 0:
name = client_redis.rpop('picture_name').decode()
url = client_redis.rpop('url_queue')
picture_byte = requests.get(url).content
file_path = os.path.join('炉石传说图片', name + '.jpg')
with open(file_path, 'wb') as f:
f.write(picture_byte)
print('存入' + name)
get_pictures_to_redius()
write_picture_to_file()
|
990,507 | 773ee53dc135f5b01df467ba5225e46afc2b556b | from lxml import etree
import sqlite3
def myFunction():
print("maj")
connexion = sqlite3.connect("../base_EPI.db")
curseur = connexion.cursor()
fichier = etree.parse("../data.xml")
epis = fichier.getroot()
curseur.execute("SELECT * FROM EPI")
rows = curseur.fetchall()
for row in rows:
print(row)
epi = etree.Element("EPI")
id = etree.SubElement(epi, "id")
id.text = str(row[0])
typeepi = etree.SubElement(epi,"type")
typeepi.text = row[1]
num = etree.SubElement(epi,"numSerie")
num.text = str(row[2])
dateFab = etree.SubElement(epi,"dateFab")
dateFab.text = str(row[3])
dataAchat = etree.SubElement(epi,"dateAchat")
dataAchat.text = str(row[4])
datePremUse = etree.SubElement(epi,"datePremUse")
datePremUse.text = str(row[5])
dateRebut = etree.SubElement(epi, "dateRebut")
dateRebut.text = str(row[6])
modele = etree.SubElement(epi,"modele")
modele.text = str(row[7])
dureeVie = etree.SubElement(epi,"dureeVie")
dureeVie.text = str(row[8])
dureeUse = etree.SubElement(epi, "dureeUse")
dureeUse.text = str(row[9])
marque = etree.SubElement(epi, "marque")
marque.text = str(row[10])
couleur = etree.SubElement(epi, "couleur")
couleur.text = str(row[11])
stock = etree.SubElement(epi,"stock")
stock.text= str(row[12])
statutLoc = etree.SubElement(epi, "statutLoc")
if(row[13] == False):
statutLoc.text = "0"
else:
statutLoc.text = "1"
service = etree.SubElement(epi, "service")
if(row[14] == False):
service.text = "0"
else:
service.text = "1"
retrait = etree.SubElement(epi, "retrait")
if(row[15] == False):
retrait.text = "0"
else:
retrait.text = "1"
rebut = etree.SubElement(epi, "rebut")
if(row[16] == False):
rebut.text = "0"
else:
rebut.text = "1"
epis.append(epi)
fichier.write("../data.xml")
if __name__ == '__main__':
myFunction() |
990,508 | ede921c4b80d1f28fef234f6fb8a886851879d95 | import numpy as np
import pytest
import tensorflow as tf
from tf_explain.core.grad_cam import GradCAM
def test_should_generate_ponderated_output(mocker):
mocker.patch(
"tf_explain.core.grad_cam.GradCAM.ponderate_output",
side_effect=[mocker.sentinel.ponderated_1, mocker.sentinel.ponderated_2],
)
expected_output = [mocker.sentinel.ponderated_1, mocker.sentinel.ponderated_2]
outputs = [mocker.sentinel.output_1, mocker.sentinel.output_2]
grads = [mocker.sentinel.grads_1, mocker.sentinel.grads_2]
output = GradCAM.generate_ponderated_output(outputs, grads)
for real, expected in zip(output, expected_output):
assert real == expected
def test_should_ponderate_output():
grad = np.concatenate(
[np.ones((3, 3, 1)), 2 * np.ones((3, 3, 1)), 3 * np.ones((3, 3, 1))], axis=-1
)
output = np.concatenate(
[np.ones((3, 3, 1)), 2 * np.ones((3, 3, 1)), 4 * np.ones((3, 3, 1))], axis=-1
)
ponderated_output = GradCAM.ponderate_output(output, grad)
ponderated_sum = 1 * 1 + 2 * 2 + 3 * 4
expected_output = ponderated_sum * np.ones((3, 3))
np.testing.assert_almost_equal(expected_output, ponderated_output)
def test_should_produce_gradients_and_filters(convolutional_model, random_data):
images, _ = random_data
layer_name = "activation_1"
use_guided_grads = True
output, grads = GradCAM.get_gradients_and_filters(
convolutional_model, images, layer_name, 0, use_guided_grads
)
assert output.shape == [len(images)] + list(
convolutional_model.get_layer(layer_name).output.shape[1:]
)
assert grads.shape == output.shape
def test_should_explain_output(mocker):
mock_get_gradients = mocker.patch(
"tf_explain.core.grad_cam.GradCAM.get_gradients_and_filters",
return_value=(
[mocker.sentinel.conv_output_1, mocker.sentinel.conv_output_2],
[mocker.sentinel.guided_grads_1, mocker.sentinel.guided_grads_2],
),
)
mocker.sentinel.cam_1.numpy = lambda: mocker.sentinel.cam_1
mocker.sentinel.cam_2.numpy = lambda: mocker.sentinel.cam_2
mock_generate_output = mocker.patch(
"tf_explain.core.grad_cam.GradCAM.generate_ponderated_output",
return_value=[mocker.sentinel.cam_1, mocker.sentinel.cam_2],
)
mocker.patch(
"tf_explain.core.grad_cam.heatmap_display",
side_effect=[mocker.sentinel.heatmap_1, mocker.sentinel.heatmap_2],
)
mocker.patch("tf_explain.core.grad_cam.grid_display", side_effect=lambda x: x)
explainer = GradCAM()
data = ([mocker.sentinel.image_1, mocker.sentinel.image_2], mocker.sentinel.labels)
grid = explainer.explain(
data,
mocker.sentinel.model,
mocker.sentinel.class_index,
mocker.sentinel.layer_name,
mocker.sentinel.use_guided_grads,
)
for heatmap, expected_heatmap in zip(
grid, [mocker.sentinel.heatmap_1, mocker.sentinel.heatmap_2]
):
assert heatmap == expected_heatmap
mock_get_gradients.assert_called_once_with(
mocker.sentinel.model,
[mocker.sentinel.image_1, mocker.sentinel.image_2],
mocker.sentinel.layer_name,
mocker.sentinel.class_index,
mocker.sentinel.use_guided_grads,
)
mock_generate_output.assert_called_once_with(
[mocker.sentinel.conv_output_1, mocker.sentinel.conv_output_2],
[mocker.sentinel.guided_grads_1, mocker.sentinel.guided_grads_2],
)
@pytest.mark.parametrize(
"model,expected_layer_name",
[
(
tf.keras.Sequential(
[
tf.keras.layers.Conv2D(
3, 3, input_shape=(28, 28, 1), name="conv_1"
),
tf.keras.layers.MaxPooling2D(name="maxpool_1"),
tf.keras.layers.Conv2D(3, 3, name="conv_2"),
tf.keras.layers.Flatten(name="flatten"),
tf.keras.layers.Dense(1, name="dense"),
]
),
"conv_2",
),
(
tf.keras.Sequential(
[
tf.keras.layers.Conv2D(
3, 3, input_shape=(28, 28, 1), name="conv_1"
),
tf.keras.layers.MaxPooling2D(name="maxpool_1"),
tf.keras.layers.Conv2D(3, 3, name="conv_2"),
tf.keras.layers.GlobalAveragePooling2D(name="gap"),
tf.keras.layers.Dense(1, name="dense"),
]
),
"conv_2",
),
(
tf.keras.Sequential(
[
tf.keras.layers.Conv2D(
3, 3, input_shape=(28, 28, 1), name="conv_1"
),
tf.keras.layers.MaxPooling2D(name="maxpool_1"),
tf.keras.layers.Flatten(name="flatten"),
tf.keras.layers.Dense(1, name="dense"),
]
),
"maxpool_1",
),
],
)
def test_should_infer_layer_name_for_grad_cam(model, expected_layer_name):
layer_name = GradCAM.infer_grad_cam_target_layer(model)
assert layer_name == expected_layer_name
def test_should_raise_error_if_grad_cam_layer_cannot_be_found():
model = tf.keras.Sequential(
[
tf.keras.layers.Dense(10, input_shape=(10,), name="dense_1"),
tf.keras.layers.Dense(1, name="dense_2"),
]
)
with pytest.raises(ValueError):
layer_name = GradCAM.infer_grad_cam_target_layer(model)
|
990,509 | 17889e614bdb603b1a2bf0c95caa8bc01bc37cf6 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-17 08:41
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('play', '0002_auto_20170113_0746'),
]
operations = [
migrations.RenameField(
model_name='song',
old_name='chords',
new_name='tabs_and_chords',
),
migrations.RenameField(
model_name='song',
old_name='tabs',
new_name='tags',
),
]
|
990,510 | fc0feb262255448e81bbf8dbba894904456c995e | #!/usr/bin/python3
def number_keys(a_dictionary):
''' get number of keys in the dictionary '''
if not a_dictionary:
return 0
return len(a_dictionary)
|
990,511 | 138d6c18ea9b9e64d1ce9830f6db4869ea76eb44 | import matplotlib.pyplot as plt
from transformers import BertTokenizer
from imblearn.over_sampling import RandomOverSampler
from sklearn.model_selection import train_test_split
import numpy as np
from sklearn.preprocessing import OneHotEncoder
import pandas as pd
tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
def build_dataset(path, tokenizer, val_size):
label_mapping = {
-1: 0,
-0.5: 0.16,
-0.25: 0.32,
0: 0.48,
0.25: 0.64,
0.5: 0.80,
1: 1,
}
data_df = pd.read_csv(path)
data_df['data'] = data_df['data'].apply(str)
data_df['labels'] = data_df['labels'].apply(float).apply(lambda l: label_mapping[l])
label_values = np.array(data_df['labels'].values)[:, None]
enc = OneHotEncoder(handle_unknown='ignore')
enc.fit(label_values)
data = np.array(data_df['data'].values)[:, None]
if val_size > 0:
train_X, test_X, train_y, test_y = train_test_split(
data,
label_values,
test_size=val_size,
stratify=label_values)
train_y = enc.transform(train_y)
oversample = RandomOverSampler(sampling_strategy='minority')
for i in range(10):
train_X, train_y = oversample.fit_sample(
train_X,
train_y)
return {'data': train_X, 'labels': enc.inverse_transform(train_y)}, {'data': test_X, 'labels': test_y}
return data_df, None
ds, _ = build_dataset(
'datasets/titles.csv',
tokenizer,
val_size=0.2)
#hist, bins = ds['labels']
plt.hist(ds['labels'], bins='auto')
plt.show()
|
990,512 | 7a32f70a265578eb1d6f635da0391c2a2acae96f | """Tests for sktime custom model flavor."""
import os
from pathlib import Path
from unittest import mock
import boto3
import flavor
import moto
import numpy as np
import pandas as pd
import pytest
from botocore.config import Config
from sktime.datasets import load_airline, load_longley
from sktime.datatypes import convert
from sktime.forecasting.arima import AutoARIMA
from sktime.forecasting.model_selection import temporal_train_test_split
from sktime.forecasting.naive import NaiveForecaster
import mlflow
from mlflow import pyfunc
from mlflow.exceptions import MlflowException
from mlflow.models import Model, infer_signature
from mlflow.models.utils import _read_example
from mlflow.store.artifact.s3_artifact_repo import S3ArtifactRepository
from mlflow.tracking._model_registry import DEFAULT_AWAIT_MAX_SLEEP_SECONDS
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from mlflow.utils.environment import _mlflow_conda_env
FH = [1, 2, 3]
COVERAGE = [0.1, 0.5, 0.9]
ALPHA = [0.1, 0.5, 0.9]
COV = False
@pytest.fixture
def model_path(tmp_path):
"""Create a temporary path to save/log model."""
return tmp_path.joinpath("model")
@pytest.fixture(scope="module")
def mock_s3_bucket():
"""Create a mock S3 bucket using moto.
Returns
-------
string with name of mock S3 bucket
"""
with moto.mock_s3():
bucket_name = "mock-bucket"
my_config = Config(region_name="us-east-1")
s3_client = boto3.client("s3", config=my_config)
s3_client.create_bucket(Bucket=bucket_name)
yield bucket_name
@pytest.fixture
def sktime_custom_env(tmp_path):
"""Create a conda environment and returns path to conda environment yml file."""
conda_env = tmp_path.joinpath("conda_env.yml")
_mlflow_conda_env(conda_env, additional_pip_deps=["sktime"])
return conda_env
@pytest.fixture(scope="module")
def data_airline():
"""Create sample data for univariate model without exogenous regressor."""
return load_airline()
@pytest.fixture(scope="module")
def data_longley():
"""Create sample data for univariate model with exogenous regressor."""
y, X = load_longley()
y_train, y_test, X_train, X_test = temporal_train_test_split(y, X)
return y_train, y_test, X_train, X_test
@pytest.fixture(scope="module")
def auto_arima_model(data_airline):
"""Create instance of fitted auto arima model."""
return AutoARIMA(sp=12, d=0, max_p=2, max_q=2, suppress_warnings=True).fit(data_airline)
@pytest.fixture(scope="module")
def naive_forecaster_model_with_regressor(data_longley):
"""Create instance of fitted naive forecaster model."""
y_train, _, X_train, _ = data_longley
model = NaiveForecaster()
return model.fit(y_train, X_train)
@pytest.mark.parametrize("serialization_format", ["pickle", "cloudpickle"])
def test_auto_arima_model_save_and_load(auto_arima_model, model_path, serialization_format):
"""Test saving and loading of native sktime auto_arima_model."""
flavor.save_model(
sktime_model=auto_arima_model,
path=model_path,
serialization_format=serialization_format,
)
loaded_model = flavor.load_model(
model_uri=model_path,
)
np.testing.assert_array_equal(auto_arima_model.predict(fh=FH), loaded_model.predict(fh=FH))
@pytest.mark.parametrize("serialization_format", ["pickle", "cloudpickle"])
def test_auto_arima_model_pyfunc_output(auto_arima_model, model_path, serialization_format):
"""Test auto arima prediction of loaded pyfunc model."""
flavor.save_model(
sktime_model=auto_arima_model,
path=model_path,
serialization_format=serialization_format,
)
loaded_pyfunc = flavor.pyfunc.load_model(model_uri=model_path)
model_predict = auto_arima_model.predict(fh=FH)
predict_conf = pd.DataFrame([{"fh": FH, "predict_method": "predict"}])
pyfunc_predict = loaded_pyfunc.predict(predict_conf)
np.testing.assert_array_equal(model_predict, pyfunc_predict)
model_predict_interval = auto_arima_model.predict_interval(fh=FH, coverage=COVERAGE)
predict_interval_conf = pd.DataFrame(
[
{
"fh": FH,
"predict_method": "predict_interval",
"coverage": COVERAGE,
}
]
)
pyfunc_predict_interval = loaded_pyfunc.predict(predict_interval_conf)
np.testing.assert_array_equal(model_predict_interval.values, pyfunc_predict_interval.values)
model_predict_quantiles = auto_arima_model.predict_quantiles(fh=FH, alpha=ALPHA)
predict_quantiles_conf = pd.DataFrame(
[
{
"fh": FH,
"predict_method": "predict_quantiles",
"alpha": ALPHA,
}
]
)
pyfunc_predict_quantiles = loaded_pyfunc.predict(predict_quantiles_conf)
np.testing.assert_array_equal(model_predict_quantiles.values, pyfunc_predict_quantiles.values)
model_predict_var = auto_arima_model.predict_var(fh=FH, cov=COV)
predict_var_conf = pd.DataFrame([{"fh": FH, "predict_method": "predict_var", "cov": COV}])
pyfunc_predict_var = loaded_pyfunc.predict(predict_var_conf)
np.testing.assert_array_equal(model_predict_var.values, pyfunc_predict_var.values)
def test_naive_forecaster_model_with_regressor_pyfunc_output(
naive_forecaster_model_with_regressor, model_path, data_longley
):
"""Test naive forecaster prediction of loaded pyfunc model."""
_, _, _, X_test = data_longley
flavor.save_model(sktime_model=naive_forecaster_model_with_regressor, path=model_path)
loaded_pyfunc = flavor.pyfunc.load_model(model_uri=model_path)
X_test_array = convert(X_test, "pd.DataFrame", "np.ndarray")
model_predict = naive_forecaster_model_with_regressor.predict(fh=FH, X=X_test)
predict_conf = pd.DataFrame([{"fh": FH, "predict_method": "predict", "X": X_test_array}])
pyfunc_predict = loaded_pyfunc.predict(predict_conf)
np.testing.assert_array_equal(model_predict, pyfunc_predict)
model_predict_interval = naive_forecaster_model_with_regressor.predict_interval(
fh=FH, coverage=COVERAGE, X=X_test
)
predict_interval_conf = pd.DataFrame(
[
{
"fh": FH,
"predict_method": "predict_interval",
"coverage": COVERAGE,
"X": X_test_array,
}
]
)
pyfunc_predict_interval = loaded_pyfunc.predict(predict_interval_conf)
np.testing.assert_array_equal(model_predict_interval.values, pyfunc_predict_interval.values)
model_predict_quantiles = naive_forecaster_model_with_regressor.predict_quantiles(
fh=FH, alpha=ALPHA, X=X_test
)
predict_quantiles_conf = pd.DataFrame(
[
{
"fh": FH,
"predict_method": "predict_quantiles",
"alpha": ALPHA,
"X": X_test_array,
}
]
)
pyfunc_predict_quantiles = loaded_pyfunc.predict(predict_quantiles_conf)
np.testing.assert_array_equal(model_predict_quantiles.values, pyfunc_predict_quantiles.values)
model_predict_var = naive_forecaster_model_with_regressor.predict_var(fh=FH, cov=COV, X=X_test)
predict_var_conf = pd.DataFrame(
[
{
"fh": FH,
"predict_method": "predict_var",
"cov": COV,
"X": X_test_array,
}
]
)
pyfunc_predict_var = loaded_pyfunc.predict(predict_var_conf)
np.testing.assert_array_equal(model_predict_var.values, pyfunc_predict_var.values)
@pytest.mark.parametrize("use_signature", [True, False])
@pytest.mark.parametrize("use_example", [True, False])
def test_signature_and_examples_saved_correctly(
auto_arima_model, data_airline, model_path, use_signature, use_example
):
"""Test saving of mlflow signature and example for native sktime predict method."""
# Note: Signature inference fails on native model predict_interval/predict_quantiles
prediction = auto_arima_model.predict(fh=FH)
signature = infer_signature(data_airline, prediction) if use_signature else None
example = pd.DataFrame(data_airline[0:5].copy(deep=False)) if use_example else None
flavor.save_model(auto_arima_model, path=model_path, signature=signature, input_example=example)
mlflow_model = Model.load(model_path)
assert signature == mlflow_model.signature
if example is None:
assert mlflow_model.saved_input_example_info is None
else:
r_example = _read_example(mlflow_model, model_path).copy(deep=False)
np.testing.assert_array_equal(r_example, example)
@pytest.mark.parametrize("use_signature", [True, False])
def test_predict_var_signature_saved_correctly(
auto_arima_model, data_airline, model_path, use_signature
):
"""Test saving of mlflow signature for native sktime predict_var method."""
prediction = auto_arima_model.predict_var(fh=FH)
signature = infer_signature(data_airline, prediction) if use_signature else None
flavor.save_model(auto_arima_model, path=model_path, signature=signature)
mlflow_model = Model.load(model_path)
assert signature == mlflow_model.signature
@pytest.mark.parametrize("use_signature", [True, False])
@pytest.mark.parametrize("use_example", [True, False])
def test_signature_and_example_for_pyfunc_predict_inteval(
auto_arima_model, model_path, data_airline, use_signature, use_example
):
"""Test saving of mlflow signature and example for pyfunc predict."""
model_path_primary = model_path.joinpath("primary")
model_path_secondary = model_path.joinpath("secondary")
flavor.save_model(sktime_model=auto_arima_model, path=model_path_primary)
loaded_pyfunc = flavor.pyfunc.load_model(model_uri=model_path_primary)
predict_conf = pd.DataFrame(
[
{
"fh": FH,
"predict_method": "predict_interval",
"coverage": COVERAGE,
}
]
)
forecast = loaded_pyfunc.predict(predict_conf)
signature = infer_signature(data_airline, forecast) if use_signature else None
example = pd.DataFrame(data_airline[0:5].copy(deep=False)) if use_example else None
flavor.save_model(
auto_arima_model,
path=model_path_secondary,
signature=signature,
input_example=example,
)
mlflow_model = Model.load(model_path_secondary)
assert signature == mlflow_model.signature
if example is None:
assert mlflow_model.saved_input_example_info is None
else:
r_example = _read_example(mlflow_model, model_path_secondary).copy(deep=False)
np.testing.assert_array_equal(r_example, example)
@pytest.mark.parametrize("use_signature", [True, False])
def test_signature_for_pyfunc_predict_quantiles(
auto_arima_model, model_path, data_airline, use_signature
):
"""Test saving of mlflow signature for pyfunc sktime predict_quantiles method."""
model_path_primary = model_path.joinpath("primary")
model_path_secondary = model_path.joinpath("secondary")
flavor.save_model(sktime_model=auto_arima_model, path=model_path_primary)
loaded_pyfunc = flavor.pyfunc.load_model(model_uri=model_path_primary)
predict_conf = pd.DataFrame(
[
{
"fh": FH,
"predict_method": "predict_quantiles",
"alpha": ALPHA,
}
]
)
forecast = loaded_pyfunc.predict(predict_conf)
signature = infer_signature(data_airline, forecast) if use_signature else None
flavor.save_model(auto_arima_model, path=model_path_secondary, signature=signature)
mlflow_model = Model.load(model_path_secondary)
assert signature == mlflow_model.signature
def test_load_from_remote_uri_succeeds(auto_arima_model, model_path, mock_s3_bucket):
"""Test loading native sktime model from mock S3 bucket."""
flavor.save_model(sktime_model=auto_arima_model, path=model_path)
artifact_root = f"s3://{mock_s3_bucket}"
artifact_path = "model"
artifact_repo = S3ArtifactRepository(artifact_root)
artifact_repo.log_artifacts(model_path, artifact_path=artifact_path)
model_uri = os.path.join(artifact_root, artifact_path)
reloaded_sktime_model = flavor.load_model(model_uri=model_uri)
np.testing.assert_array_equal(
auto_arima_model.predict(fh=FH),
reloaded_sktime_model.predict(fh=FH),
)
@pytest.mark.parametrize("should_start_run", [True, False])
@pytest.mark.parametrize("serialization_format", ["pickle", "cloudpickle"])
def test_log_model(auto_arima_model, tmp_path, should_start_run, serialization_format):
"""Test logging and reloading sktime model."""
try:
if should_start_run:
mlflow.start_run()
artifact_path = "sktime"
conda_env = tmp_path.joinpath("conda_env.yaml")
_mlflow_conda_env(conda_env, additional_pip_deps=["sktime"])
model_info = flavor.log_model(
sktime_model=auto_arima_model,
artifact_path=artifact_path,
conda_env=str(conda_env),
serialization_format=serialization_format,
)
model_uri = f"runs:/{mlflow.active_run().info.run_id}/{artifact_path}"
assert model_info.model_uri == model_uri
reloaded_model = flavor.load_model(
model_uri=model_uri,
)
np.testing.assert_array_equal(auto_arima_model.predict(), reloaded_model.predict())
model_path = Path(_download_artifact_from_uri(artifact_uri=model_uri))
model_config = Model.load(str(model_path.joinpath("MLmodel")))
assert pyfunc.FLAVOR_NAME in model_config.flavors
finally:
mlflow.end_run()
def test_log_model_calls_register_model(auto_arima_model, tmp_path):
"""Test log model calls register model."""
artifact_path = "sktime"
register_model_patch = mock.patch("mlflow.register_model")
with mlflow.start_run(), register_model_patch:
conda_env = tmp_path.joinpath("conda_env.yaml")
_mlflow_conda_env(conda_env, additional_pip_deps=["sktime"])
flavor.log_model(
sktime_model=auto_arima_model,
artifact_path=artifact_path,
conda_env=str(conda_env),
registered_model_name="SktimeModel",
)
model_uri = f"runs:/{mlflow.active_run().info.run_id}/{artifact_path}"
mlflow.register_model.assert_called_once_with(
model_uri,
"SktimeModel",
await_registration_for=DEFAULT_AWAIT_MAX_SLEEP_SECONDS,
)
def test_log_model_no_registered_model_name(auto_arima_model, tmp_path):
"""Test log model calls register model without registered model name."""
artifact_path = "sktime"
register_model_patch = mock.patch("mlflow.register_model")
with mlflow.start_run(), register_model_patch:
conda_env = tmp_path.joinpath("conda_env.yaml")
_mlflow_conda_env(conda_env, additional_pip_deps=["sktime"])
flavor.log_model(
sktime_model=auto_arima_model,
artifact_path=artifact_path,
conda_env=str(conda_env),
)
mlflow.register_model.assert_not_called()
def test_sktime_pyfunc_raises_invalid_df_input(auto_arima_model, model_path):
"""Test pyfunc call raises error with invalid dataframe configuration."""
flavor.save_model(sktime_model=auto_arima_model, path=model_path)
loaded_pyfunc = flavor.pyfunc.load_model(model_uri=model_path)
with pytest.raises(MlflowException, match="The provided prediction pd.DataFrame "):
loaded_pyfunc.predict(pd.DataFrame([{"predict_method": "predict"}, {"fh": FH}]))
with pytest.raises(MlflowException, match="The provided prediction configuration "):
loaded_pyfunc.predict(pd.DataFrame([{"invalid": True}]))
with pytest.raises(MlflowException, match="Invalid `predict_method` value"):
loaded_pyfunc.predict(pd.DataFrame([{"predict_method": "predict_proba"}]))
def test_sktime_save_model_raises_invalid_serialization_format(auto_arima_model, model_path):
"""Test save_model call raises error with invalid serialization format."""
with pytest.raises(MlflowException, match="Unrecognized serialization format: "):
flavor.save_model(
sktime_model=auto_arima_model, path=model_path, serialization_format="json"
)
|
990,513 | 52f61729420fb069b371453265a84e5f9c0e3508 | # usage: python3 airpurifier.py IP TOKEN
# dependency: python-miio
import miio
import sys
import time
airpurifier = miio.airpurifier.AirPurifier(sys.argv[1], sys.argv[2])
status = airpurifier.status()
data = status.data
def print_data(key):
print('miio,device=airpurifier,ip={} {}={} {}'.format(sys.argv[1], key, data[key], time.time_ns()))
print_data('aqi')
print_data('f1_hour')
print_data('f1_hour_used')
print_data('filter1_life')
print_data('humidity')
print_data('purify_volume')
print_data('sleep_time')
print_data('temp_dec')
print_data('use_time')
|
990,514 | 488b6acf374346fe4921bfb0a927accab161cfdb | import numpy as np
import keras
import cv2
import copy
import os
from imgaug import augmenters as iaa
from sklearn.preprocessing import LabelEncoder
from postprocessing import interval_overlap
BASE_DIR = os.path.dirname(__file__)
IMAGES_DIR = os.path.join(BASE_DIR, 'dataset', 'images')
def bbox_iou(box1, box2):
# 0 ,1 ,2 ,3
# xmin,ymin,xmax,ymax
intersect_w = interval_overlap([box1[0], box1[2]], [box2[0], box2[2]])
intersect_h = interval_overlap([box1[1], box1[3]], [box2[1], box2[3]])
intersect = intersect_w * intersect_h
w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
union = w1 * h1 + w2 * h2 - intersect
return float(intersect) / union
class BatchGenerator(keras.utils.Sequence):
'Generates data for Keras'
def __init__(self, config, dataset, shuffle=True, jitter = True):
'Initialization'
self.config = config
self.dataset = dataset
self.image_h = config['model']['image_h']
self.image_w = config['model']['image_w']
self.n_channels = 3
self.grid_h = config['model']['grid_h']
self.grid_w = config['model']['grid_w']
self.n_classes = config['model']['num_classes']
self.labels = config['model']['classes']
self.batch_size = config['train']['batch_size']
self.max_obj = config['model']['max_obj']
self.shuffle = shuffle
self.jitter = jitter
self.nb_anchors = int(len(config['model']['anchors']) / 2)
self.anchors = [[0, 0, config['model']['anchors'][2 * i], config['model']['anchors'][2 * i + 1]] for i in
range(int(len(config['model']['anchors']) // 2))]
self.on_epoch_end()
sometimes = lambda aug: iaa.Sometimes(0.5, aug)
self.aug_pipe = iaa.Sequential(
[
# apply the following augmenters to most images
# iaa.Fliplr(0.5), # horizontally flip 50% of all images
# iaa.Flipud(0.2), # vertically flip 20% of all images
# sometimes(iaa.Crop(percent=(0, 0.1))), # crop images by 0-10% of their height/width
#sometimes(iaa.Affine(
# scale={"x": (0.8, 1.2), "y": (0.8, 1.2)}, # scale images to 80-120% of their size, individually per axis
# translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)}, # translate by -20 to +20 percent (per axis)
# rotate=(-5, 5), # rotate by -45 to +45 degrees
# shear=(-5, 5), # shear by -16 to +16 degrees
# order=[0, 1], # use nearest neighbour or bilinear interpolation (fast)
# cval=(0, 255), # if mode is constant, use a cval between 0 and 255
# mode=ia.ALL # use any of scikit-image's warping modes (see 2nd image from the top for examples)
#)),
# execute 0 to 5 of the following (less important) augmenters per image
# don't execute all of them, as that would often be way too strong
iaa.SomeOf((0, 3),
[
# sometimes(iaa.Superpixels(p_replace=(0, 1.0), n_segments=(20, 200))), # convert images into their superpixel representation
iaa.OneOf([
iaa.GaussianBlur((0, 3.0)), # blur images with a sigma between 0 and 3.0
iaa.AverageBlur(k=(2, 7)),
# blur image using local means with kernel sizes between 2 and 7
iaa.MedianBlur(k=(3, 11)),
# blur image using local medians with kernel sizes between 2 and 7
]),
iaa.Sharpen(alpha=(0, 1.0), lightness=(0.75, 1.5)), # sharpen images
# iaa.Emboss(alpha=(0, 1.0), strength=(0, 2.0)), # emboss images
# search either for all edges or for directed edges
# sometimes(iaa.OneOf([
# iaa.EdgeDetect(alpha=(0, 0.7)),
# iaa.DirectedEdgeDetect(alpha=(0, 0.7), direction=(0.0, 1.0)),
# ])),
iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05 * 255), per_channel=0.5),
# add gaussian noise to images
iaa.OneOf([
iaa.Dropout((0.01, 0.1), per_channel=0.5), # randomly remove up to 10% of the pixels
# iaa.CoarseDropout((0.03, 0.15), size_percent=(0.02, 0.05), per_channel=0.2),
]),
# iaa.Invert(0.05, per_channel=True), # invert color channels
iaa.Add((-10, 10), per_channel=0.5),
# change brightness of images (by -10 to 10 of original value)
iaa.Multiply((0.5, 1.5), per_channel=0.5),
# change brightness of images (50-150% of original value)
iaa.ContrastNormalization((0.5, 2.0), per_channel=0.5), # improve or worsen the contrast
# iaa.Grayscale(alpha=(0.0, 1.0)),
# sometimes(iaa.ElasticTransformation(alpha=(0.5, 3.5), sigma=0.25)), # move pixels locally around (with random strengths)
# sometimes(iaa.PiecewiseAffine(scale=(0.01, 0.05))) # sometimes move parts of the image around
],
random_order=True
)
],
random_order=True
)
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.ceil(float(len(self.dataset)) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
'''
l_bound = index*self.config['BATCH_SIZE']
r_bound = (index+1)*self.config['BATCH_SIZE']
if r_bound > len(self.images):
r_bound = len(self.images)
l_bound = r_bound - self.config['BATCH_SIZE']
'''
le = LabelEncoder()
le.fit_transform(self.labels)
x_batch = np.zeros((self.batch_size, self.image_h, self.image_w, self.n_channels))
b_batch = np.zeros((self.batch_size, 1, 1, 1, self.max_obj, 4))
y_batch = np.zeros((self.batch_size, self.grid_h, self.grid_w, self.nb_anchors, 4 + 1 + self.num_classes())) # desired network output
#current_batch = self.dataset[l_bound:r_bound]
current_batch = self.dataset[index * self.batch_size:(index + 1) * self.batch_size]
instance_num = 0
for instance in current_batch:
img, object_annotations = self.prep_image_and_annot(instance, jitter=self.jitter)
obj_num = 0
# center of the bounding box is divided with the image width/height and grid width/height
# to get the coordinates relative to a single element of a grid
for obj in object_annotations:
if obj['xmax'] > obj['xmin'] and obj['ymax'] > obj['ymin'] and obj['class'] in self.labels:
center_x = .5 * (obj['xmin'] + obj['xmax']) # center of the lower side of the bb (by x axis)
center_x = center_x / (float(self.image_w) / self.grid_w) # scaled to the grid unit (a value between 0 and GRID_W-1)
center_y = .5 * (obj['ymin'] + obj['ymax']) # center of the lower side (by y axis)
center_y = center_y / (float(self.image_h) / self.grid_h) # scaled to the grid unit (a value between 0 and GRID_H-1)
grid_x = int(np.floor(center_x)) # assigns the object to the matching
grid_y = int(np.floor(center_y)) # grid element according to (center_x, center_y)
if grid_x < self.grid_w and grid_y < self.grid_h:
center_w = (obj['xmax'] - obj['xmin']) / (float(self.image_w) / self.grid_w)
center_h = (obj['ymax'] - obj['ymin']) / (float(self.image_h) / self.grid_h)
box = [center_x, center_y, center_w, center_h]
# find the anchor that best predicts this box
best_anchor = -1
max_iou = -1
shifted_box = [0, 0, center_w, center_h]
for i in range(len(self.anchors)):
anchor = self.anchors[i]
iou = bbox_iou(shifted_box, anchor)
if max_iou < iou:
best_anchor = i
max_iou = iou
classes = [0, 0]
obj_label = int(le.transform([obj['class']]))
if obj_label == 0:
classes[0] = 1
else:
classes[1] = 1
img = self.normalize(img)
x_batch[instance_num] = img
b_batch[instance_num, 0, 0, 0, obj_num] = box
y_batch[instance_num, grid_y, grid_x, best_anchor] = [box[0], box[1], box[2], box[3], 1.0, classes[0], classes[1]]
obj_num += 1
obj_num %= self.max_obj
instance_num += 1
return [x_batch, b_batch], y_batch
def prep_image_and_annot(self, dataset_instance, jitter):
image_path = dataset_instance['image_path']
image = self.load_image(os.path.join(IMAGES_DIR,image_path))
h, w, c = image.shape
if jitter:
image = self.aug_pipe.augment_image(image)
# resize the image to standard size
image = cv2.resize(image, (self.image_h, self.image_w))
object_annotations = copy.deepcopy(dataset_instance['object'])
for obj in object_annotations:
for attr in ['xmin', 'xmax']:
obj[attr] = int(obj[attr] * float(self.image_w) / w)
obj[attr] = max(min(obj[attr], self.image_w), 0)
for attr in ['ymin', 'ymax']:
obj[attr] = int(obj[attr] * float(self.image_h) / h)
obj[attr] = max(min(obj[attr], self.image_h), 0)
return image, object_annotations
def on_epoch_end(self):
'Updates indexes after each epoch'
if self.shuffle:
np.random.shuffle(self.dataset)
def load_image(self, path):
img = cv2.imread(os.path.join(IMAGES_DIR, path))
try:
if len(img.shape) == 3:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
else:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
except:
print(path)
return img
def load_annotation(self, i):
annots = []
for obj in self.dataset[i]['object']:
annot = [obj['xmin'], obj['ymin'], obj['xmax'], obj['ymax'], self.labels.index(obj['class'])]
annots += [annot]
if len(annots) == 0: annots = [[]]
return np.array(annots)
def normalize(self, image):
return image/255.
def num_classes(self):
return len(self.labels)
def size(self):
return len(self.dataset) |
990,515 | bbbfac08d40e81b5d9a162a9fd4c2f14ec7e93ad | import argparse
from estimator import run
parser = argparse.ArgumentParser()
parser.add_argument("mode")
parser.add_argument("base_path")
parser.add_argument("model_dir")
parser.add_argument("-B", "--batch_size",
type=int,
default=128)
parser.add_argument("-L", "--learning_rate",
nargs=2,
type=float,
default=[0.001, 0.0000001],
help="Initial/final learning rate.")
parser.add_argument("-D", "--decay",
nargs=2,
type=float,
default=[100000, 2.0],
help="Decay steps and power.")
parser.add_argument("-R", "--reg",
nargs=2,
default=[None, 0.0],
help="Regularization type and coefficient.")
parser.add_argument("-M", "--mel",
action="store_true",
help="Use mel spectrogram data instead (from TFR!!).")
parser.add_argument("-X", "--mlp",
type=int,
default=0,
help="Use MLP with FLAG hidden units...")
parser.add_argument("-Y", "--dropout",
action="store_true",
help="Use dropout in hidden layer.")
parser.add_argument("-C", "--conv",
action="store_true",
help="Use CNN.")
args = parser.parse_args()
run(args.mode, args.base_path, args.model_dir,
args.batch_size, args.learning_rate, args.decay, args.reg, args.mel,
args.mlp, args.dropout, args.conv)
|
990,516 | 1cfd364d532058e12daddc0f2e5036592cd86e56 | """
BITalino API
Created on Tue Jun 25 2013
@author: Priscila Alves
Adapted on Wed 18 Dec 2013 for Raspberry Pi
@author: Jose Guerreiro
"""
import BITalinoPi
try:
#example
device = BITalinoPi.BITalino()
SamplingRate = 10
nSamples = 10
device.open(SamplingRate)
BITversion = device.version()
print "version: ", BITversion
device.start([0,1,2,3,4,5])
device.trigger([1,1,1,1])
#read samples
dataAcquired = device.read(nSamples)
device.trigger([0,0,0,0])
device.stop()
device.close()
SeqN = dataAcquired[0,:]
D0 = dataAcquired[1,:]
D1 = dataAcquired[2,:]
D2 = dataAcquired[3,:]
D3 = dataAcquired[4,:]
A0 = dataAcquired[5,:]
A1 = dataAcquired[6,:]
A2 = dataAcquired[7,:]
A3 = dataAcquired[8,:]
A4 = dataAcquired[9,:]
A5 = dataAcquired[10,:]
print SeqN
print A0
print A1
print A2
print A3
print A4
print A5
except KeyboardInterrupt:
device.stop()
device.close()
|
990,517 | 060e655408dbe78e76296579f4b1cfcb4351ac55 | #!/usr/bin/env python
""" Generates TOD pickle file.
"""
import argparse
import glob
import os
from atl02v.tod.tod import TOD
from atl02v.shared.paths import path_to_data, path_to_outputs
from atl02v.shared.tools import make_file_dir, pickle_in
from gen_tof import get_size
def generate(path_in, atl01_file=None, anc13_path=None, anc27_path=None):
"""
"""
if atl01_file == None:
atl01_file = glob.glob(os.path.join(path_to_data, path_in, 'ATL01_*.h5'))[0]
if anc13_path == None:
anc13_path = os.path.join(path_to_data, path_in)
if anc27_path == None:
anc27_path = os.path.join(path_to_data, path_in)
atl02_file = glob.glob(os.path.join(path_to_data, path_in, 'ATL02_*.h5'))[0]
tod = TOD(atl01_file=atl01_file, anc13_path=anc13_path, anc27_path=anc27_path,
verbose=False, mf_limit=None)
#s = get_size(tod)
#print("TOD size: {} bytes".format(s))
out_filename = pickle_in(tod, out_location=make_file_dir(os.path.join(path_to_outputs, 'data'), atl02_file))
return out_filename
def parse_args():
""" Parses command line arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--p', dest='path_in',
action='store', type=str, required=True, default='',
help="Path relative to the data/ directory, to the input ATL01, ANC13, and ANC27 files.")
parser.add_argument('--atl01', dest='atl01_file',
action='store', type=str, required=False, default=None,
help="Path + filename to directory of the ATL01.")
parser.add_argument('--anc13', dest='anc13_path',
action='store', type=str, required=False, default=None,
help="Path to outputs directory of the ANC13.")
parser.add_argument('--anc27', dest='anc27_path',
action='store', type=str, required=False, default=None,
help="Path to directory of the ANC27.")
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print("path_in={}".format(args.path_in))
print("atl01_file={}".format(args.atl01_file))
print("anc13_path={}".format(args.anc13_path))
print("anc27_path={}".format(args.anc27_path))
## Make the required option path to all the files.
## Non-required could be to specify individually a file (like ANC13) which would override the required path name
generate(path_in=args.path_in, atl01_file=args.atl01_file,
anc13_path=args.anc13_path, anc27_path=args.anc27_path)
|
990,518 | d7cff9228acb56feb513a36c2511e5b926101357 | from pyramid.response import Response # noqa
from pyramid.view import view_config
from collections import OrderedDict
@view_config(route_name='home', renderer='json')
def home_view(request):
hello = OrderedDict()
hello['message'] = (
'Hello. some day, this page will be a pretty HTML document. '
'Right now though, you can just use it to link to other pages '
'in the app that are complete.'
),
hello['links'] = [
request.route_url('api_v1_country_search', api_version='v1'),
request.route_url('api_v1_disease_landing', api_version='v1'),
]
hello['search-examples'] = [
'%s?count=100:&year=1995:' % (
request.route_url(
'api_v1_disease_search',
api_version='v1',
url_name='buruli-ulcer'
)
),
'%s?count=100000:200000' % (
request.route_url(
'api_v1_disease_search',
api_version='v1',
url_name='guinea-worm'
)
),
'%s?country=us' % (
request.route_url(
'api_v1_disease_search',
api_version='v1',
url_name='rabies'
)
),
]
return hello
|
990,519 | 3e0f100e843dfdb47f981d73a2bac402ab3b928b | # 9095 - 1, 2, 3 더하기(다이나믹 프로그래밍)
def int_input():
return int(input())
def ints_input():
for d in input().split(' '):
yield int(d)
def main():
cnt = int_input()
dp = [0] * 11
dp[1] = 1
dp[2] = 2
dp[3] = 4
for i in range(4, 11):
dp[i] = dp[i - 1] + dp[i - 2] + dp[i - 3]
for i in range(0, cnt):
print(dp[int_input()])
if __name__ == '__main__':
main()
|
990,520 | 5f6ede504f5a33148f4c12210e024d931cd12a09 |
import importlib
import tensorflow as tf
import kaleido as kld
import numpy as np
##### TESTER
class Tester( kld.Algorithm ):
### __INIT__
def __init__( self , args ):
self.preInit()
self.args = args
self.sess = tf.Session()
self.load_network()
args.image_test = kld.prepare_image_dict( args.image_test )
self.build( args )
self.test( args )
### BUILD
def build( self , args ):
self.preBuild()
self.x = kld.plchf( [ None , None , 3 ] , 'input' )
self.xi = tf.expand_dims( self.x , 0 )
self.yh = args.net.build( self.xi )
self.yh = tf.squeeze( self.yh )
self.yh = tf.clip_by_value( self.yh , 0.0 , 255.0 )
### TEST
def test( self , args ):
self.preTest()
model_name = kld.basename( args.model_dir )
suffix = '%s_%d.jpg' % ( model_name , args.image_test['size'] )
self.load_model()
files = kld.get_dir_files( args.input_dir )
for file in files:
print( '%d - %s' % ( args.image_test['size'] , file ) )
file_name = kld.basename( file )[:-4]
file_dir = '%s/%s' % ( args.input_dir , file_name )
kld.make_dir( file_dir )
input = self.load_image( file , args.image_test )
size , pad = 256 , 128
h , w , c = input.shape
n = int( np.ceil( max( h , w ) / size ) )
hs , ws = int( h / n ) , int( w / n )
canvas = np.zeros( input.shape )
for i in range( 0 , h , hs ):
for j in range( 0 , w , ws ):
hst , hfn = i , i + hs
wst , wfn = j , j + ws
hstp , hfnp , wstp , wfnp = 0 , 0 , 0 , 0
if i > 0: hstp -= pad
if j > 0: wstp -= pad
if i < n - 1: hfnp += pad
if j < n - 1: wfnp += pad
input_ij = input[ hst + hstp : hfn + hfnp , wst + wstp : wfn + wfnp , : ]
output_ij = self.sess.run( self.yh , feed_dict = { self.x : input_ij } )
canvas[ hst : hfn , wst : wfn , : ] = output_ij[ - hstp : hs - hstp , - wstp : ws - wstp , : ]
path = '%s/split_%s_%s' % ( file_dir , file_name , suffix )
kld.save_image( canvas , path )
output = self.sess.run( self.yh , feed_dict = { self.x : input } )
path = '%s/%s_%s' % ( file_dir , file_name , suffix )
kld.save_image( output , path )
self.store_model( 'fast_style_transfer' )
|
990,521 | 2c7d85845c8a3ee5814978f27138c910cb67c406 | __all__ = [
"read_IGRF13_COF",
"read_IGRF13coeffs",
"read_WMM",
"read_fortran_DATA",
"read_gauss_coeff",
"read_WWW_test_2020",
]
def read_gauss_coeff(file=None):
'''Reads the tabulated Gauss coefficients
Arguments:
file (string): name of the file must be
"IGRF13.COF" default value or;
"IGRF13coeffs.txt" or;
"WMM_2015.COF" or;
"WMM_2020.COF" or;
"FORTRAN_1900_1995.txt"
Returns
dic_dic_h (dict of dict): h coefficients {year: {(m,n):h,...},...} year ia string
dic_dic_g (dict of dict): g coefficients {year: {(m,n):g,...},...} year ia string
dic_dic_SV_h (dict of dict): SV_h coefficients {year: {(m,n):SV_h,...},...} year ia string
dic_dic_SV_g (dict of dict): SV_g coefficients {year: {(m,n):SV_g,...},...} year ia string
dic_N (dict): dictionary containing the order N of the SH decomposition, dic_N[year]=N
Years (nparray): array of the tabulated year """
'''
if file is None:
file = "IGRF13.COF"
if file == "IGRF13.COF":
(
dic_dic_h,
dic_dic_g,
dic_dic_SV_h,
dic_dic_SV_g,
dic_N,
Years,
) = read_IGRF13_COF(file)
elif file == "IGRF13coeffs.txt":
(
dic_dic_h,
dic_dic_g,
dic_dic_SV_h,
dic_dic_SV_g,
dic_N,
Years,
) = read_IGRF13coeffs(file)
elif file == "WMM_2015.COF":
dic_dic_h, dic_dic_g, dic_dic_SV_h, dic_dic_SV_g, dic_N, Years = read_WMM(file)
elif file == "WMM_2020.COF":
dic_dic_h, dic_dic_g, dic_dic_SV_h, dic_dic_SV_g, dic_N, Years = read_WMM(file)
elif file == "FORTRAN_1900_1995.txt":
dic_dic_h, dic_dic_g, dic_N, Years = read_fortran_DATA(file)
else:
raise Exception(f"undefinited file :{file}")
return dic_dic_h, dic_dic_g, dic_dic_SV_h, dic_dic_SV_g, dic_N, Years
def read_IGRF13_COF(file):
"""read_hg assigns the IGRF13.COF coefficients h and g, in unit of nT, from the text file
available along with the Geomag 7.0 software (Windows version) https://www.ngdc.noaa.gov/IAGA/vmod/igrf.html
Arguments
file containing the coefficients h ,g, SVh, SVg versus m,n, year (IGRF13.COF)
Returns
dic_dic_h (dict of dict): h coefficients {year: {(m,n):h,...},...} year ia string
dic_dic_g (dict of dict): g coefficients {year: {(m,n):g,...},...} year ia string
dic_dic_SV_h (dict of dict): SV_h coefficients {year: {(m,n):SV_h,...},...} year ia string
dic_dic_SV_g (dict of dict): SV_g coefficients {year: {(m,n):SV_g,...},...} year ia string
dic_N (dict): dictionary containing the order N of the SH decomposition, dic_N[year]=N
Years (nparray): array of the tabulated year """
# Standard Library dependencies
import re
import os
# 3rd party dependencies
import pandas as pd
import numpy as np
file = os.path.join(os.path.dirname(__file__), file)
df = pd.read_table(file, delim_whitespace=True, names=[str(i) for i in range(12)])[
[str(i) for i in range(6)]
]
indexes_year = [
(i, x) for i, x in enumerate(list(df["0"])) if ("IGRF" in x) or ("DGRF" in x)
]
indexes = [x[0] for x in indexes_year]
year = [re.findall(r"\d+", x[1])[0] for x in indexes_year]
dic_dic_g = {}
dic_dic_h = {}
dic_dic_SV_g = {}
dic_dic_SV_h = {}
dic_N = {}
years = []
dfs = df
for i, nitems in enumerate(
np.append(np.diff(indexes), [len(df["0"]) - indexes[-1]])
):
if len(year[i]) == 2:
year[i] = "19" + year[i]
dfs = np.split(dfs, [nitems], axis=0)
dg = dfs[0].iloc[1:]
dic_dic_g[year[i]] = {
(int(x[0]), int(x[1])): x[2] for x in zip(dg["1"], dg["0"], dg["2"])
}
dic_dic_h[year[i]] = {
(int(x[0]), int(x[1])): x[2] for x in zip(dg["1"], dg["0"], dg["3"])
}
dic_dic_SV_g[year[i]] = {
(int(x[0]), int(x[1])): x[2] for x in zip(dg["1"], dg["0"], dg["4"])
}
dic_dic_SV_h[year[i]] = {
(int(x[0]), int(x[1])): x[2] for x in zip(dg["1"], dg["0"], dg["5"])
}
dic_N[year[i]] = max([x[0] for x in dic_dic_g[year[i]].keys()])
years.append(float(year[i]))
dfs = dfs[1]
years = np.array(years)
return dic_dic_h, dic_dic_g, dic_dic_SV_h, dic_dic_SV_g, dic_N, np.array(years)
def read_IGRF13coeffs(file):
"""read_hg assigns the IGRF-13 coefficients h and g, in unit of nT, from the text file
downloaded from https://www.ngdc.noaa.gov/IAGA/vmod/igrf.html
Arguments:
file (string): name of the file (WMM_2015.COF or WMM_2015.COF)
Returns:
dic_dic_h (dict of dict): h coefficients {year: {(m,n):h,...},...}
dic_dic_g (dict of dict): g coefficients {year: {(m,n):g,...},...}
dic_dic_SV_h (dict of dict): SV_h coefficients {year: {(m,n):SV_h,...},...}
dic_dic_SV_g (dict of dict): SV_g coefficients {year: {(m,n):SV_g,...},...}
dic_N (dict): dictionary containing the order N of the SH decomposition, dic_N[year]=N
Years (list): list of the tabulated year """
# Standard Library dependencies
import os
# 3rd party dependencies
import numpy as np
import pandas as pd
file = os.path.join(os.path.dirname(__file__), file)
df = pd.read_csv(file, header=3, sep="\s+")
Years = [x for x in df.columns if x[-2:] == ".0"]
v = []
for x in df.groupby("g/h"):
v.append(x)
g = v[0][1]
h = v[1][1]
dic_dic_g = {}
dic_dic_h = {}
dic_dic_SV_g = {}
dic_dic_SV_h = {}
dic_N = {}
for Year in Years:
key_Year = str(int(float(Year)))
dic_dic_g[key_Year] = {(x[0], x[1]): x[2] for x in zip(g["m"], g["n"], g[Year])}
dic_dic_h[key_Year] = {(x[0], x[1]): x[2] for x in zip(h["m"], h["n"], h[Year])}
dic_dic_SV_g[key_Year] = {(x[0], x[1]): 0 for x in zip(g["m"], g["n"])}
dic_dic_SV_h[key_Year] = {(x[0], x[1]): 0 for x in zip(g["m"], g["n"])}
index = set([x[0] for x in dic_dic_h[key_Year].keys()])
N = max(index)
dic_N[key_Year] = N # must be 13
for n in range(1, N + 1):
dic_dic_h[key_Year][(0, n)] = 0
dic_dic_SV_h[key_Year][(0, n)] = 0
dic_dic_SV_h["2020"] = {
(x[0], x[1]): x[2] for x in zip(h["m"], h["n"], h["2020-25"])
}
dic_dic_SV_g["2020"] = {
(x[0], x[1]): x[2] for x in zip(g["m"], g["n"], g["2020-25"])
}
Years = np.array([float(x) for x in Years])
return dic_dic_h, dic_dic_g, dic_dic_SV_h, dic_dic_SV_g, dic_N, Years
def read_WMM(file):
"""read_hg assigns the WMM coefficients h and g, in unit of nT, from the text file
downloaded from https://www.ngdc.noaa.gov/geomag/WMM/wmm_ddownload.shtml
Arguments:
file (string): name of the file (WMM_2015.COF or WMM_2015.COF)
Returns:
dic_dic_h (dict of dict): h coefficients {year: {(m,n):h,...},...}
dic_dic_g (dict of dict): g coefficients {year: {(m,n):g,...},...}
dic_dic_SV_h (dict of dict): SV_h coefficients {year: {(m,n):SV_h,...},...}
dic_dic_SV_g (dict of dict): SV_g coefficients {year: {(m,n):SV_g,...},...}
dic_N (dict): dictionary containing the order N of the SH decomposition, dic_N[year]=N
Years (list): list of the year povided
"""
# Standard Library dependencies
import re
import os
# 3rd party dependencies
import pandas as pd
import numpy as np
file = os.path.join(os.path.dirname(__file__), file)
df = pd.read_csv(file, sep="\s+", skipfooter=2, engine="python")
df = df.reset_index(level=[0, 1])
df = df.reset_index()
df.columns = ["g", "n", "m", "h", "SVg", "SVh"]
year = re.findall(r"\d+", os.path.basename(file))[0]
dic_dic_h = {year: {(x[0], x[1]): x[2] for x in zip(df["m"], df["n"], df["h"])}}
dic_dic_g = {year: {(x[0], x[1]): x[2] for x in zip(df["m"], df["n"], df["g"])}}
dic_dic_SV_h = {
year: {(x[0], x[1]): x[2] for x in zip(df["m"], df["n"], df["SVh"])}
}
dic_dic_SV_g = {
year: {(x[0], x[1]): x[2] for x in zip(df["m"], df["n"], df["SVg"])}
}
dic_N = {year: max(set([x[0] for x in dic_dic_h[year].keys()]))}
Years = np.array([float(year)])
return dic_dic_h, dic_dic_g, dic_dic_SV_h, dic_dic_SV_g, dic_N, Years
def read_fortran_DATA(file):
"""read_hg assigns the coefficients h and g, in unit of nT as
extracteed from the FORTRAN program IGRF13 https://www.ngdc.noaa.gov/IAGA/vmod/igrf13.f
Arguments:
file (string): name of the file (FORTRAN_1900_1995.txt or FORTRAN_2000_2020.txt)
Returns:
dic_dic_h (dict of dict): h coefficients {year: {(m,n):h,...},...}
dic_dic_g (dict of dict): g coefficients {year: {(m,n):g,...},...}
dic_dic_SV_h (dict of dict): SV_h coefficients {year: {(m,n):SV_h,...},...}
dic_dic_SV_g (dict of dict): SV_g coefficients {year: {(m,n):SV_g,...},...}
dic_N (dict): dictionary containing the order N of the SH decomposition, dic_N[year]=N
Years (list): list of the year povided
"""
# Standard Library dependencies
import re
import os
# 3rd party dependencies
import pandas as pd
import numpy as np
file = os.path.join(os.path.dirname(__file__), file)
def construct_dic(df):
df[0] = df[0].apply(lambda x: float(x.split(" ")[-1]))
df = df.drop([df.columns[-1]], axis=1)
df = df.T
res = []
for x in df.columns:
res = res + list(df[x])
N = 0
while len(res) - N * N - 2 * N > 0:
N += 1
N -= 1
dic_g = {}
dic_h = {}
idx = 0
for n in range(1, N + 1):
for m in range(0, n + 1):
dic_g[(m, n)] = res[idx]
idx += 1
if m == 0:
dic_h[(0, n)] = 0
else:
dic_h[(m, n)] = res[idx]
idx += 1
return N, dic_h, dic_g
df = pd.read_csv(file, sep=",", header=None, skipfooter=0, engine="python")
dic_dic_h = {}
dic_dic_g = {}
dic_N = {}
Years = []
for dg in df.groupby(df.columns[-1]):
year = str(dg[0])
dh = dg[1].copy()
N, dic_h, dic_g = construct_dic(dh)
dic_dic_h[year] = dic_h
dic_dic_g[year] = dic_g
dic_N[year] = N
Years.append(float(year))
Years = np.array(Years)
return dic_dic_h, dic_dic_g, dic_N, Years
def read_WWW_test_2020(index):
"""reads the Test Values for WMM2020 .xlsx file
Arguments:
index (int): index>=0 and index < 11
Returns:
Date (dict): Date
height (float): height in meters
colatitude (float): colatitude in °
longitude (float):longitude in °
WMM (dict):
"""
import pandas as pd
import os
assert (index >=0 and index <11), "invalid index must be >=0 and <11"
file = os.path.join(os.path.dirname(__file__), 'WMM2020testvalues.xlsx')
df = pd.read_excel(file, header=1)
WMM = df.to_dict()
WMM = {key:value[index] for key, value in WMM.items()}
Date = {"mode":"dec","year":WMM['Date'] }
height = WMM['Height\n(km)']*1000
colatitude = 90 - WMM['Lat\n(Deg)']
longitude = WMM['Lon\n(Deg)']
del WMM['Date']
del WMM['Height\n(km)']
del WMM['Lat\n(Deg)']
del WMM['Lon\n(Deg)']
return Date, height, colatitude, longitude, WMM
|
990,522 | effafadf4d8596895464cd965c147ce466061154 | # coding=utf-8
from django.conf.urls import patterns, url
urlpatterns = patterns(
'app.contents.views',
url(regex='^articles/list$', view='list_articles', name=u'list_articles'),
url(regex='^articles/new$', view='new_article', name=u'new_article'),
url(regex='^articles/edit/(?P<id>\d+)$', view='edit_article', name=u'edit_article'),
url(regex='^articles/delete/(?P<id>\d+)$', view='delete_article', name=u'delete_article'),
url(regex='^tags/list', view='list_tags', name=u'list_tags'),
url(regex='^tags/add', view='add_tag', name=u'add_tag'),
url(regex='^tags/del', view='del_tag', name=u'del_tag'),
url(regex='^categories/list', view='list_categories', name=u'list_categories'),
url(regex='^categories/new', view='new_category', name=u'new_category'),
url(regex='^categories/edit/(?P<id>\d+)$', view='edit_category', name=u'edit_category'),
url(regex='^categories/delete/(?P<id>\d+)$', view='delete_category', name=u'delete_category'),
)
|
990,523 | 1603e64203aa15f6dbd4227f9ac329e3f85f8335 | #!/usr/bin/env python
# Adds stat uncertainty to json file output from combineTool.py -M Impacts
import json
from argparse import ArgumentParser
from ROOT import TFile
parser = ArgumentParser()
parser.add_argument("-j", "--json", help="input json file")
parser.add_argument("-s", "--statF", default="higgsCombine_paramFit_Test_stat.MultiDimFit.mH125.root")
parser.add_argument("--addBinStats", action="store_true", default=False, help="add barlow-beeston lite binwise stat unc nps")
parser.add_argument("-b", "--binStatF", default="higgsCombine_paramFit_Test_MCbinStats.MultiDimFit.mH125.root")
parser.add_argument("-q", "--quiet", action="store_true", default=False, help="run silently")
parser.add_argument("-o", "--outF", default="", help="store results in this output file (or overwrite original json file if left empty")
args = parser.parse_args()
addBinStats = args.addBinStats
with open(args.json) as jsonfile:
data = json.load(jsonfile)
f = TFile.Open(args.statF)
t = f.Get("limit")
mtvals = [t.MT for evt in t]
mt = [mtvals[1], mtvals[0], mtvals[2]] # [ -sigma, nominal, +sigma ]
impact = max(abs(mt[2] - mt[1]), abs(mt[0] - mt[1]))
#print "Stat uncertainty: %.2f GeV" % (impact/10.)
statPosition = -1 # Position of stat parameter (if it already exists)
for p in xrange(len(data[u'params'])):
if data[u'params'][p][u'name'] == u'stat':
statPosition = p
statVals = \
{
u'name': u'stat',
u'MT': mt,
u'impact_MT': impact,
u'impact_r': 0.0,
u'prefit': [-1.0, 0.0, 1.0],
u'fit': [ -1.0, 0.0, 1.0],
u'groups': [],
u'r': [1.0, 1.0, 1.0],
u'type': "Gaussian",
}
if statPosition >= 0:
# if not args.quiet: print "Replacing stat uncertainty values in json file"
data[u'params'][statPosition] = statVals
else:
# if not args.quiet: print "Adding stat uncertainty values to json file"
data[u'params'].append(statVals)
if addBinStats:
f = TFile.Open(args.binStatF)
t = f.Get("limit")
MCmtvals = [t.MT for evt in t]
MCmt = [MCmtvals[1], MCmtvals[0], MCmtvals[2]] # [ -sigma, nominal, +sigma ]
MCimpact = max(abs(MCmt[2] - MCmt[1]), abs(MCmt[0] - MCmt[1]))
MCstatPosition = -1 # Position of MC stat parameter (if it already exists)
for p in xrange(len(data[u'params'])):
if data[u'params'][p][u'name'] == u'MCbinStats':
statPosition = p
MCstatVals = \
{
u'name': u'MCbinStats',
u'MT': MCmt,
u'impact_MT': MCimpact,
u'impact_r': 0.0,
u'prefit': [-1.0, 0.0, 1.0],
u'fit': [ -1.0, 0.0, 1.0],
u'groups': [],
u'r': [1.0, 1.0, 1.0],
u'type': "Gaussian",
}
if MCstatPosition >= 0:
# if not args.quiet: print "Replacing stat uncertainty values in json file"
data[u'params'][MCstatPosition] = MCstatVals
else:
# if not args.quiet: print "Adding stat uncertainty values to json file"
data[u'params'].append(MCstatVals)
jsondata = json.dumps(data, sort_keys=True, indent=2, separators=(',', ': '))
outF = args.json if args.outF == "" else args.outF
with open(outF, "w") as jsonfile:
jsonfile.write(jsondata)
|
990,524 | ef7f6d206e65a5af46e6f39db6b0ab761f1e880a | """
见 day13天的案例中测试
""" |
990,525 | 49cea5865de2a8cfe1d24cd6bbd85b4a79cc2618 | # -*- coding: utf-8 -*-
from email.policy import default
from odoo import models, fields, api
class CabinetPatientPartner(models.Model):
_inherit = 'res.partner'
l_name = fields.Char('Last Name')
date_naissance = fields.Date('Date de naissance')
sexe = fields.Selection([('male', 'Male'), ('female', 'Female')])
CIN = fields.Char("CIN")
assure = fields.Boolean()
image = fields.Binary()
statut = fields.Selection([
('client', 'Client'),
('premiere_visite', 'Premiére visite')
], default='client')
type = fields.Selection(selection_add=[('patient', "Patient")],default='patient')
mode_du_paiement = fields.Selection([('cheque', 'Chéque'), ('espece', 'Espece')])
ordonance_ids = fields.One2many("cabinet.ordonance", "patient_id")
appoitement_ids = fields.One2many("cabinet.appoitement", "patient_id")
|
990,526 | ebfc2b0feb768d3ec6354a6aac33be78b8fe1eab | #!/usr/bin/env python
'''
A helper script to extract regions from LRW with dlib.
Mouth ROIs are fixed based on median mouth region across 29 frames.
@author Peratham Wiriyathammabhum
@date Jan 10, 2020
'''
import argparse
import os, os.path
import sys
import glob
import errno
import pickle
import math
import time
import copy
from multiprocessing import Pool
from time import time as timer
import numpy as np
import cv2
import yaml
cwd = os.getcwd()
from os.path import expanduser
hp = expanduser("~")
sys.path.insert(0, '/cfarhomes/peratham/lrcodebase')
from lr_config import *
from collections import ChainMap
import re
def get_stats(filename):
stat_dict = {}
vidname = filename.replace('.txt', '.mp4')
# .... ex. 'Duration: 0.53 seconds' -> 0.53 float
# stat_dict['duration'] = ''
lastline = ''
with open(filename,'r') as fp:
lastline = list(fp)[-1]
x = re.match('\w+: (\d+\.\d+) \w+', lastline)
duration = float(x.group(1))
stat_dict['duration'] = duration
# ....
# stat_dict['fps'] = ''
cap = cv2.VideoCapture(vidname)
fps = cap.get(cv2.CAP_PROP_FPS)
stat_dict['fps'] = fps
# ....
# stat_dict['num_frames'] = ''
stat_dict['num_frames'] = int(round(fps*duration))
return {filename:stat_dict}
def process_boundary_stats(sample_paths, pool):
try:
batch_stats = pool.map(get_stats, sample_paths)
except:
print('[Error] {}'.format(file_paths[i]))
return dict(ChainMap(*batch_stats))
def main(args):
image_dir = args.dataset
nthreads = int(args.nthreads)
split = args.split
filenames = glob.glob(os.path.join(image_dir, '*', '{}'.format(split), '*.txt'))
filenames = sorted(filenames)
total_size = len(filenames)
pickle.dump( filenames, open( os.path.join(args.outdir, "lrw.{}.filenames.p".format(split)), "wb" ) )
# ....
res_dict = {} # result dict {filename:{duration:float.sec, fps:int1, num_frames:int2}}
current_iter = 0
chunk = 4*nthreads
while current_iter < total_size:
curr_batch_size = chunk if current_iter + chunk <= total_size else total_size - current_iter
with Pool(nthreads) as pool:
sample_paths = filenames[current_iter:current_iter+curr_batch_size]
bdict = process_boundary_stats(sample_paths, pool)
res_dict = {**res_dict, **bdict}
current_iter += curr_batch_size
if current_iter // chunk % 20 == 0:
print('[Info] Operating...{}'.format(current_iter))
# ....
with open(args.outpickle,'wb') as fp:
pickle.dump(res_dict, fp)
with open(args.outfile,'w') as fp:
yaml.dump(res_dict, fp)
return
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Pytorch Video-only BBC-LRW Example')
parser.add_argument('--dataset', default='/cfarhomes/peratham/datapath/lrw/lipread_mp4',
help='path to dataset')
parser.add_argument('--split', default='train',
help='train, val, test')
parser.add_argument('--outdir', default='/cfarhomes/peratham/datapath/lrw/boundary_stats/',
help='path to output files')
parser.add_argument('--outfile', default='/cfarhomes/peratham/datapath/lrw/boundary_stats/boundary_stats.yaml',
help='path to output yaml')
parser.add_argument('--outpickle', default='/cfarhomes/peratham/datapath/lrw/boundary_stats/boundary_stats.p',
help='path to output pickle')
parser.add_argument('--nthreads', required=False, type=int,
default=64, help='num threads')
args = parser.parse_args()
main(args)
|
990,527 | 75f63351a6855714681ba7259d1c2120d63775f8 | #!flask/bin/python
import numpy as np
import os
import sqlite3
from flask import Flask, jsonify, request, g
from sklearn.externals import joblib
# Configs
DATABASE = 'iris.db'
DEBUG = True
SECRET_KEY = 'my predictive api'
USERNAME = 'admin'
PASSWORD = 'default'
# Create App
app = Flask(__name__)
app.config.from_object(__name__)
# Get champion from pickle and define predict function
__pickle_dir__ = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"pickle/champion.pkl"
)
champion = joblib.load(__pickle_dir__)
def predict(sepal_length, sepal_width, petal_length, petal_width):
"""Receives params and returns the params and predicted value in a json."""
data = np.array(
[sepal_length, sepal_width, petal_length, petal_width]).reshape(1, -1)
pred = champion.predict(data)[0]
prediction = {
'label': str(pred),
'sepal_length': sepal_length,
'sepal_width': sepal_width,
'petal_length': petal_length,
'petal_width': petal_width
}
return prediction
# Data Base connection & decorators
def connect_db():
"""Connect to the database defined in the config variables."""
rv = sqlite3.connect(app.config['DATABASE'])
rv.row_factory = sqlite3.Row
return rv
@app.before_request
def before_request():
"""Opens the database connection automatically before requests."""
g.db = connect_db()
@app.teardown_request
def teardown_request(exception):
"""Closes the database connection automatically after requests."""
db = getattr(g, 'db', None)
if db is not None:
db.close()
def query_db(query, args=(), one=False):
"""Wrapper of the database query for better handling."""
cur = g.db.execute(query, args)
rv = cur.fetchall()
cur.close()
return (rv[0] if rv else None) if one else rv
# API Methods
@app.route('/paramspredict', methods=['GET'])
def paramspredict():
"""
Predict using the values in the url params.
Example:
http://127.0.0.1:5000/paramspredict?sepal_length=3.14&sepal_width=2"
"&petal_length=0.4&petal_width=4
"""
sepal_length = request.args.get('sepal_length')
sepal_width = request.args.get('sepal_width')
petal_length = request.args.get('petal_length')
petal_width = request.args.get('petal_width')
pred = predict(sepal_length, sepal_width, petal_length, petal_width)
return jsonify(pred)
@app.route('/idpredict/<int:id_setosa>')
def idpredict(id_setosa):
"""
Predict using the id in the url and getting the values from the database.
Example:
http://127.0.0.1:5000/idpredict/3
"""
setosa = query_db('select * from iris_setosa where id = ?',
[id_setosa], one=True)
if setosa is None:
return not_found("No existe esa planta.")
else:
sepal_length = setosa['sepal_length']
sepal_width = setosa['sepal_width']
petal_length = setosa['petal_length']
petal_width = setosa['petal_width']
pred = predict(sepal_length, sepal_width, petal_length, petal_width)
return jsonify(pred)
# Error Handle
@app.errorhandler(404)
def not_found(error):
"""Returns json instead of HTML in case of 404."""
message = {
'status': 404,
'message': 'Not Found: ' + request.url,
'error': error}
resp = jsonify(message)
return resp
if __name__ == '__main__':
app.run()
|
990,528 | 99afbf7ff25cf560fd60764229cccbef7e4d7e86 | from ansys.dpf.core import Model
from ansys.dpf.core import check_version
from ansys.dpf.core import errors as dpf_errors
import pytest
def test_get_server_version(multishells):
model = Model(multishells)
server = model._server
# version without specifying server
version_blank = check_version.get_server_version()
assert isinstance(version_blank, str)
v_blank = float(version_blank)
assert v_blank >= 2.0
# version specifying sever
version = check_version.get_server_version(server)
assert isinstance(version, str)
v = float(version)
assert v >= 2.0
def test_check_server_version_dpfserver(multishells):
# this test is working because the server version format is "MAJOR.MINOR".
# It can be adapted if this is evolving.
model = Model(multishells)
server = model._server
v = check_version.get_server_version()
split = v.split(".")
l = 2
assert len(split) == l
server.check_version(v)
v_with_patch = v + ".0"
server.check_version(v_with_patch)
with pytest.raises(dpf_errors.DpfVersionNotSupported):
n = len(split[l - 1])
v_up = v[0:n] + "1"
server.check_version(v_up)
with pytest.raises(dpf_errors.DpfVersionNotSupported):
v_up_patch = v + ".1"
server.check_version(v_up_patch)
def test_check_server_version_checkversion(multishells):
# this test is working because the server version format is "MAJOR.MINOR".
# It can be adapted if this is evolving.
model = Model(multishells)
server = model._server
v = check_version.get_server_version()
split = v.split(".")
l = 2
assert len(split) == l
check_version.server_meet_version_and_raise(v, server)
v_with_patch = v + ".0"
check_version.server_meet_version_and_raise(v_with_patch, server)
with pytest.raises(dpf_errors.DpfVersionNotSupported):
n = len(split[l - 1])
v_up = v[0:n] + "1"
check_version.server_meet_version_and_raise(v_up, server)
with pytest.raises(dpf_errors.DpfVersionNotSupported):
v_up_patch = v + ".1"
check_version.server_meet_version_and_raise(v_up_patch, server)
def test_version_tuple():
t1 = "2.0.0"
t1_check = 2, 0, 0
t1_get = check_version.version_tuple(t1)
assert t1_get == t1_check
t2 = "2.0"
t2_check = 2, 0, 0
t2_get = check_version.version_tuple(t2)
assert t2_get == t2_check
def test_meets_version():
# first is server version, second is version to meet
assert check_version.meets_version("1.32.0", "1.31.0")
assert check_version.meets_version("1.32.1", "1.32.0")
assert check_version.meets_version("1.32.0", "1.32.0")
assert check_version.meets_version("1.32", "1.32")
assert check_version.meets_version("1.32", "1.31")
assert check_version.meets_version("1.32", "1.31.0")
assert check_version.meets_version("1.32.0", "1.31")
assert check_version.meets_version("1.32.0", "1.31.1")
assert not check_version.meets_version("1.31.0", "1.32")
assert not check_version.meets_version("1.31.0", "1.32.0")
assert not check_version.meets_version("1.31.1", "1.32")
assert not check_version.meets_version("1.31.1", "1.32.1")
assert not check_version.meets_version("1.31", "1.32")
assert not check_version.meets_version("1.31.0", "1.31.1")
|
990,529 | 0f8eb26846afd65415da0baa4bfafe7188417479 | def check_if_palindrome():
string_to_check = str(input("Please enter your word/phrase to ckeck if it's a palindrome : \n"))
reverse = string_to_check[::-1] # method2
if string_to_check.lower() == reverse:
print("Your string {} is a palindrome! ".format(string_to_check))
else:
print("Your string {} is not a palindrome! ".format(string_to_check))
check_if_palindrome() |
990,530 | 6aa17f659f8bb0b5df20ac73e134a2043544acb4 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="multiquery",
packages=setuptools.find_packages(),
install_requires=[
"psutil",
"coloredlogs"
],
entry_points={
"console_scripts": [
"multiquery = multiquery.multiquery:main",
"multiupdate = multiquery.multiupdate:main",
],
},
version="0.0.1",
author="Agustin Gianni",
author_email="agustingianni@gmail.com",
description="Run a single query on multiple CodeQL databases.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/agustingianni/multi-query",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.6",
keywords="codeql"
)
|
990,531 | 7554f1b0e446b40280b12004d18fcf9583dd4076 | import os
import os.path
import json
import sys
from ctypes import *
import os
import functools
import thread
import array
from ctypes import *
import os
import os.path
import re
import shutil
import time, datetime
import fileinput;
s = os.sep
root = "./"
songs = None;
preKey = "g_stsj_";
# resFile = open("default.res.json", "r+");
# resDatas = json.loads(resFile.read());
# resFile.close();
# groups = resDatas["groups"];
# for index in range(len(groups)):
# object = groups[index];
# print index, object;
# if("sounds" in object["name"]):
# data = {};
# data["keys"] = object["keys"].replace("_mp3", "_m4a");
# data["name"] = object["name"] + "Ios";
# groups.append(data);
# js_file = open("default.res.json", "w");
# js_file.write(json.dumps(resDatas));
# js_file.close();
for rt, dirs, files in os.walk(root):
for f in files:
if ".mp3" in f:
path = rt+s + f;
path = path.replace("g_", "f_");
os.rename(rt+s+f, path)
print path; |
990,532 | e404265ef11c001e01cd5a22e94016326c8e5b70 | # Communication module to the DataCenter AC rotator.
# William Schoenell <william@iaa.es> - Aug 16, 2014
import argparse
import urllib
import csv
import sys
import time
import datetime
import numpy as np
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
def read_arduino(url):
ino_site = urllib.urlopen(url)
reader = csv.reader(ino_site)
keys = reader.next()
values = reader.next()
data = dict(zip(keys, values))
data['url'] = url
return data
def human_output(data):
print('\n\
Arduino URL: %(url)s \n\
=========================== CONFIGURATION =========================== \n\
temp_low: %(cfg_temp_low)s oC\t - Lower temperature treshold - (0-255) \n\
temp_upp: %(cfg_temp_upp)s oC\t - Upper temperature treshold - (0-255) \n\
temp_interval: %(cfg_temp_int)s s\t - Temperature check interval - (0-255) \n\
rot_interval: %(cfg_rot_int)s h:m\t - AC units rotation interval - Up to 255h \n\n\
=============================== STATUS ============================== \n\
act_unit: %(act_unit)s \t - Main AC unit now \n\
slaves_on: %(slaves_on)s \t - 1 if slaves are turned on in an event of overheating \n\
temp: %(temp)s oC\t - Ambient temperature now \n\
hum: %(hum)s %%\t - Ambient humidity now \n ' % data)
parser = argparse.ArgumentParser()
parser.add_argument("--url", help="Arduino URL address", default="http://192.168.1.129")
parser.add_argument("--temp_low", type=int, help="Set a new lower temperature treshold. In oC. Range (0-255).")
parser.add_argument("--temp_upp", type=int, help="Set a new upper temperature treshold. In oC. Range (0-255).")
parser.add_argument("--temp_int", type=int, help="Set a new temperature check interval. In seconds. Range (0-255).")
parser.add_argument("--rot_int", help="Set a new AC units rotation interval. hh:mm. Up to 255 hours.")
parser.add_argument("--rot_now", action="count",
help="FORCE the AC units rotation now. WARNING: This can damage your units!")
parser.add_argument("--temp_now", action="count", help="FORCE the temperature check now.")
parser.add_argument("--daemon", action="count", help="Start in daemon mode, feeding data to plot.ly.")
parser.add_argument("--new", action="count", help="When with daemon, cleans the plot.ly plot before start.")
args = parser.parse_args()
cmd = False
for command in ('temp_low', 'temp_upp', 'temp_int'):
if args.__getattribute__(command):
cmd = True
data = read_arduino(args.url + '/?cmd=%s&arg=%i' % (command, args.__getattribute__(command)))
if int(data['cmd_status']) == 1 and int(float(data['cfg_' + command])) == args.__getattribute__(command):
print (bcolors.OKGREEN + ' Command %s run successfully.' + bcolors.ENDC) % command
else:
print (
bcolors.FAIL + ' Command %s run UN-successfully. Check for errors!!!' + bcolors.ENDC) % command
if args.rot_int:
cmd = True
h_n, m_n = args.rot_int.split(':')
data = read_arduino(args.url + '/?cmd=rot_int&arg=%s' % args.rot_int)
h_d, m_d = data['cfg_rot_int'].split(':')
if int(h_n) == int(h_d) and int(m_n) == int(m_d) and data['cmd_status'] == 1:
print bcolors.OKGREEN + ' Command rot_int run successfully.' + bcolors.ENDC
else:
print bcolors.FAIL + ' Command rot_int run UN-successfully. Check for errors!!!' + bcolors.ENDC
for command in ('rot_now', 'temp_now'):
if args.__getattribute__(command) > 0:
cmd = True
if raw_input(
bcolors.WARNING + 'This option shold be used with care.\nType YES if you really want to run this command: \n' + bcolors.ENDC) == 'YES':
data = read_arduino(args.url + '/?cmd=%s&arg=0' % command)
if int(data['cmd_status']) == 1:
print (bcolors.OKGREEN + ' Command %s run successfully.' + bcolors.ENDC) % command
else:
print (
bcolors.FAIL + ' Command %s run UN-successfully. Check for errors!!!' + bcolors.ENDC) % command
else:
data = read_arduino(args.url)
if args.daemon:
try:
import plotly.plotly as py
import plotly.tools as tls
from plotly.graph_objs import Scatter, Data, Figure, YAxis, Layout, Font
except ImportError:
print 'Could not import plotly python module.'
sys.exit(2)
layout = Layout(
title='Data Center enviroment data',
yaxis=YAxis(
title='Temperature (Celsius) / Humidity (%)',
range=[10, 50]
),
yaxis2=YAxis(
title='ON/OFF',
titlefont=Font(
color='rgb(148, 103, 189)'
),
tickfont=Font(
color='rgb(148, 103, 189)'
),
overlaying='y',
side='right',
range=[-.5, 1.5]
)
)
trace1 = Scatter(x=[], y=[], name='temperature', stream=dict(token='aamkhlzl44', maxpoints=1440)) #, xaxis='x1')
trace2 = Scatter(x=[], y=[], name='humidity', stream=dict(token='044mpl7nqo', maxpoints=1440)) #, xaxis='x2')
trace3 = Scatter(x=[], y=[], name='Active AC', stream=dict(token='lsdi9172dd', maxpoints=1440), yaxis='y2')
trace4 = Scatter(x=[], y=[], name='Slaves Active?', stream=dict(token='m4of2pjlx3', maxpoints=1440), yaxis='y2')
fig = Figure(data=[trace1, trace2, trace3, trace4], layout=layout)
if args.new > 0:
py.plot(fig, filename='DataCenterACRotation')
else:
py.plot(fig, filename='DataCenterACRotation', fileopt='extend')
s1 = py.Stream('aamkhlzl44')
s2 = py.Stream('044mpl7nqo')
s3 = py.Stream('lsdi9172dd')
s4 = py.Stream('m4of2pjlx3')
s1.open()
s2.open()
s3.open()
s4.open()
while True:
data = read_arduino(args.url)
if np.float(data['temp']) < 100:
print '[%s] Writing to plot.ly server...' % datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
s1.write(dict(x=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), y=np.float(data['temp'])))
s2.write(dict(x=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), y=np.float(data['hum'])))
s3.write(dict(x=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), y=np.float(data['act_unit'])))
s4.write(dict(x=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), y=np.float(data['slaves_on'])))
else:
print 'Skipping due to an incorrect temperature value...'
time.sleep(60)
s1.close()
s2.close()
s3.close()
s4.close()
if not cmd:
data = read_arduino(args.url)
human_output(data)
|
990,533 | 7b9f7dbbd405ad1f29d883f1d2bca02063c2751d | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 23 11:07:33 2020
@author: aaronberlow
"""
#linked lists
import random
class Node:
def __init__(self, data=None, next=None):
self.data = data
self.next = next
def print_node(self):
print(self.data)
class LinkedList:
def __init__(self):
self.head = None
def append_node(self,data):
if not self.head:
self.head = Node(data)
return
else:
current = self.head
while current.next:
current = current.next
current.next = Node(data)
def print_list(self):
node = self.head
while node is not None:
print(node.data)
node = node.next
def search(self, target):
current = self.head
while current != None:
if current.data == target:
print("Found it!")
return True
else:
current = current.next
print("Not found.")
return False
the_list = LinkedList()
for j in range(0,20):
j = random.randint(1,30)
the_list.append_node(j)
the_list.print_list()
the_list.search(10)
|
990,534 | fe0bcce8fe4256d93f01c51726f084ae6002d7a0 | #
# S.E.P.I.A. account handling
# by Florian Quirin
#
import sys
import requests
import json
import getpass
import argparse
try:
from .storage import Storage
except ValueError:
raise ValueError("Please use 'python -m sepia.account' (from outside the 'account.py' folder) to start the main function of this module.")
class Account():
"""
Class to handle SEPIA accounts.
"""
def __init__(
self,
host_address,
user_id,
client_info = "wakeword_tool"):
"""
Constructor.
:param host_address: address of a SEPIA server, e.g. 'https://my.example.com:20726/sepia'.
:param user_id: ID of a user to manage.
:param client_info: client name, e.g. wakeword_tool or python_app
"""
self.host_address = host_address
if not host_address.startswith("http"):
self.host_address = "https://" + self.host_address
if host_address.endswith("/"):
self.host_address = self.host_address[:-1]
self.storage = Storage()
self.client_info = client_info
self.user_id = user_id
def authenticate(self, password):
"""
Send authentication request to a SEPIA server and store basic data if successful.
"""
url = self.host_address + "/assist/authentication"
payload = {
'action' : "validate",
'client' : self.client_info,
#'KEY' : (self.user_id + ";" + password)
'GUUID' : self.user_id,
'PWD' : password
}
headers = {
'Content-Type': "application/json"
}
response = requests.request("POST", url, json=payload, headers=headers)
try:
res = json.loads(response.text)
except NameError:
res = None
if res and res["result"] and res["result"] == "success":
# store result - overwrite any previous entries with same user ID
self.storage.write_user_data(self.user_id, {
"language" : res["user_lang_code"],
"token" : res["keyToken"]
})
name = res["user_name"]["nick"] or res["user_name"]["first"]
print("SEPIA account: Success - " + name + ", your login token has been stored. Hf :-)")
# store default host
self.storage.write_default_host(self.host_address)
print("SEPIA account: Set (new) default host: " + self.host_address)
else:
print("SEPIA account: Failed - I think the password is wrong or we got connection problems.")
def check_login(self):
"""
Send check request to a SEPIA server to see if the token is still valid.
"""
# read token first
user_data = self.storage.get_user_data(self.user_id)
if not "token" in user_data:
sys.exit("SEPIA account: No user data found! Please generate a token first (python -m sepia.account --id=[sepia-user-id] --host=[sepia-server-url]).")
# check token
token = user_data["token"]
url = self.host_address + "/assist/authentication"
payload = {
'action' : "check",
'client' : self.client_info,
'KEY' : (self.user_id + ";" + token)
}
headers = {
'Content-Type': "application/json"
}
response = requests.request("POST", url, json=payload, headers=headers)
try:
res = json.loads(response.text)
except NameError:
res = None
if res["result"] and res["result"] == "success":
name = res["user_name"]["nick"] or res["user_name"]["first"]
print("SEPIA account: Success - Wb " + name + ", your login token is still valid.")
else:
print("SEPIA account: Failed - I think the token is invalid or we got connection problems.")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--id', help='ID of user that wants to trigger a remote action', type=str)
parser.add_argument('--action', help="Name of a pre-defined account action, e.g. 'authenticate' or 'check'", type=str, default="authenticate")
parser.add_argument('--host', help="Host address of SEPIA server, e.g. 'https://my.example.com/sepia'", type=str)
parser.add_argument('--client', help="Client name, default: wakeword_tool", type=str, default="wakeword_tool")
parser.add_argument('--pwd', help="Password for authentication. Use this only for testing please! The password will show in your console history!", type=str)
args = parser.parse_args()
if not args.id:
raise ValueError('Missing user ID')
if not args.host:
if args.action == "authenticate":
raise ValueError('Missing SEPIA host address') # we do this to make sure we don't send the data to the wrong host
account = Account(host_address=args.host, user_id=args.id, client_info=args.client)
if not args.host:
host = account.storage.get_default_host()
if not host:
raise ValueError('Missing SEPIA host address') # now we need it because we got no default stored
else:
account.host_address = host
if args.action == "authenticate":
if args.pwd:
account.authenticate(args.pwd)
else:
# ask for password using error stream (in case normal output is redirected) - Is that safe enough?
p = getpass.getpass(stream=sys.stderr)
account.authenticate(p)
elif args.action == "check":
account.check_login()
else:
print("Action '" + args.action + "' not supported (yet?!)") |
990,535 | a4de7e33eee4e538b9bfc4027cbfb7795a18e3d8 | #sys.path.append('C:\Users\Enrique Cruz\Documents\Columbia\Scalper')
from research import location_settings
#COUNTRY URLS
base_uk = 'http://www.ticketmaster.co.uk'
base_us = 'http://www.ticketmaster.com'
base_ca = 'http://www.ticketmaster.ca'
base_ir = 'http://www.ticketmaster.ie'
base_au = 'http://www.ticketmaster.com.au'
base_nz = 'http://www.ticketmaster.co.nz'
uk = 'http://www.ticketmaster.co.uk/json/browse/music?select=n93'
us = 'http://www.ticketmaster.com/json/browse/music?select=n93.json'
ca = 'http://www.ticketmaster.ca/json/browse/music?select=n93'
ir = 'http://www.ticketmaster.ie/json/browse/music?select=n93'
au = 'http://www.ticketmaster.com.au/json/browse/music?select=n93'
nz = 'http://www.ticketmaster.co.nz/json/browse/music?select=n93'
class US_Location():
#location_settings = ['MARKET_NAME', 'MARKET_ID', 'NDMA']
def __init__(self, location_settings, language='en-us'):
self.country = 'us'
self.url = 'http://www.ticketmaster.com/json/browse/music?select=n93'
self.cookies = dict(
MARKET_NAME=location_settings[0],
MARKET_ID=location_settings[1],
NDMA=location_settings[2],
LANGUAGE=language
)
def get_base_url(self):
return base_us
def get_base_sale_url(self):
return base_us + '/event/'
class Int_Location():
def __init__(self, country, country_url, ndma):
self.country = country
self.url = country_url
self.cookies = dict(NDMA=ndma)
def get_base_url(self):
if self.country == 'uk':
return base_uk
elif self.country == 'ca':
return base_ca
elif self.country == 'ir':
return base_ir
elif self.country == 'au':
return base_au
elif self.country == 'nz':
return base_nz
else:
return '-- unsuported location --'
def get_base_sale_url(self):
if self.country == 'uk':
return base_uk + '/event/'
elif self.country == 'ca':
return base_ca + '/event/'
elif self.country == 'ir':
return base_ir + '/event/'
elif self.country == 'au':
return base_au + '/event/'
elif self.country == 'nz':
return base_nz + '/event/'
else:
return '-- unsuported location --'
def setup_locations():
#US LOCATIONS
locations = []
locations.append(US_Location(location_settings.los_angeles))
locations.append(US_Location(location_settings.san_francisco))
locations.append(US_Location(location_settings.NY_tristate))
locations.append(US_Location(location_settings.philadelphia))
locations.append(US_Location(location_settings.pittsburgh))
locations.append(US_Location(location_settings.phoenix))
locations.append(US_Location(location_settings.san_diego))
locations.append(US_Location(location_settings.chicago))
locations.append(US_Location(location_settings.indianapolis))
locations.append(US_Location(location_settings.kansas_city))
locations.append(US_Location(location_settings.new_orleans))
locations.append(US_Location(location_settings.baltimore))
locations.append(US_Location(location_settings.DC))
locations.append(US_Location(location_settings.boston))
locations.append(US_Location(location_settings.detroit))
locations.append(US_Location(location_settings.saint_louis))
locations.append(US_Location(location_settings.nebraska))
locations.append(US_Location(location_settings.las_vegas))
locations.append(US_Location(location_settings.charlotte))
locations.append(US_Location(location_settings.cleveland))
locations.append(US_Location(location_settings.columbus))
locations.append(US_Location(location_settings.portland))
locations.append(US_Location(location_settings.dallas))
locations.append(US_Location(location_settings.houston))
locations.append(US_Location(location_settings.austin))
locations.append(US_Location(location_settings.san_antonio))
locations.append(US_Location(location_settings.seattle))
locations.append(US_Location(location_settings.milwaukee))
#INTERNATIONAL LOCATIONS
#locations.append(Int_Location('uk', uk, '99999')) #United Kingdom
#locations.append(Int_Location('ir', ir, '345')) #Ireland
locations.append(Int_Location('ca', ca, '527')) #Canada: Toronto, Hamilton & Southwestern Ontario
locations.append(Int_Location('ca', ca, '522')) #Canada: Montreal and Surrounding Area
locations.append(Int_Location('ca', ca, '519')) #Canada: Ottawa-Gatineau & Eastern Ontario
locations.append(Int_Location('ca', ca, '505')) #Canada: Calgary & Southern Alberta
locations.append(Int_Location('ca', ca, '528')) #Canada: B.C. Lower Mainland & Vancouver Island
#locations.append(Int_Location('au', au, '705')) #Australia: Victoria/Tasmania
#locations.append(Int_Location('au', au, '702')) #Australia: New South Wales/Australian Capital Territory
#locations.append(Int_Location('au', au, '703')) #Australia: Queensland
#locations.append(Int_Location('au', au, '704')) #Australia: Western Australia
#locations.append(Int_Location('nz', nz, '751')) #New Zealand: North Island
#locations.append(Int_Location('nz', nz, '752')) #New Zealand: South Island
return locations |
990,536 | 524df03c3358cabdf3036c9a1614cd423642d05d | frase1 = input().lower().strip().replace(" ","").replace(".", "").replace("!","").replace("?","").replace(",","")
frase2 = input().lower().strip().replace(" ","").replace(".", "").replace("!","").replace("?","").replace(",","")
letras1 = []
letras2 = []
for letra0 in frase1:
letras1.append(letra0)
for letra1 in frase2:
letras2.append(letra1)
if frase1 == "halley'scomet":
print(False)
elif [c for c in letras2 if c not in letras1] == []:
print (True)
else:
print(False) |
990,537 | 219c3ca5fd5c7011670e8dd6ba9d802b9df034cb | from topics import QAgiSubscriber
from packages import get_pkg_dir_from_prefix, \
get_ros_workspace_dir, \
get_ros_workspace_src_dir, \
QAgiPackages
from resources import QAgiResources, \
loadRsc, loadRes
|
990,538 | 79b09b62c38a97b9a1cc61f83d3adacd29fb76b1 | ### Fastai v2 training script
# built on fastai v2.2.2
# testing to see how the presets compare to my hand tuning
## So this is training better than my pytorch lightning...
from fastai.vision.all import *
path = '../cv_data/cifar10'
### Setup Image transforms
item_transforms = [ToTensor, Resize(size=(300,300)),
RandomCrop(size=(250,250))
]
batch_transforms = [Dihedral(), Normalize()]
### Setup Data Loaders
dls = ImageDataLoaders.from_folder(path, train='train',
valid='test', device=1,
item_tfms=item_transforms,
batch_tfms=batch_transforms,
bs=164)
### Setup CNN Learner
learn = cnn_learner(dls, resnet18, pretrained=False,
metrics=[accuracy, top_k_accuracy])
learn.fit(n_epoch=50) |
990,539 | 9e5345a7e94a7cc79077e238cbf91d1692f27902 | from application import db
from datetime import date, datetime, timedelta
from application.admin.models import Settings
class SpareKey(db.Model):
id = db.Column(db.Integer, primary_key=True)
branch = db.Column(db.String(32), nullable=False)
loan_no = db.Column(db.String(32), nullable=False)
name = db.Column(db.String(32))
recepient = db.Column(db.String(32))
expected_date_of_return = db.Column(db.Date())
returned_date = db.Column(db.Date)
remarks = db.Column(db.String(100), default=" ")
is_active = db.Column(db.Boolean, default=True)
added = db.Column(db.Date, default=date.today)
updated = db.Column(db.DateTime, default=datetime.now, onupdate=datetime.now)
def get_all_active_keys(self):
active_keys = self.query.filter_by(is_active=True).all()
db.session.close()
return active_keys
def get_all_inactive_keys(self):
inactive_keys = self.query.filter_by(is_active=False).all()
db.session.close()
return inactive_keys
def get_keys_older_than_default_time(self):
default_time = Settings.query.filter_by(name="Default Days").first()
if self.query.all() != None:
keys = SpareKey.query.filter(SpareKey.expected_date_of_return<=date.today()).all()
else:
keys = None
db.session.close()
return keys
def get_keys_with_collections(self):
all_keys = self.query.filter_by(recepient="Collections", is_active=True).all()
db.session.close()
return all_keys
def get_keys_with_all_field_officers(self):
all_keys = self.query.filter(self.recepient!="Collections").all()
keys = []
for k in all_keys:
if k.is_active==True:
keys.append(k)
db.session.close()
return keys
def get_keys_with_a_field_officer(self, field_officer):
all_keys = self.query.filter_by(recepient=field_officer, is_active=True).all()
db.session.close()
return all_keys
def check_key(self, loan_no):
key = self.query.filter_by(loan_no=loan_no).all()
for k in key:
if k.is_active == True:
return True
return False
def make_inward(self, key_id):
key = self.query.filter_by(id=key_id).first()
key.is_active = False
key.inward_date = datetime.today()
db.session.add(key)
db.session.commit()
db.session.close()
def reassign_key(self, key_id, recepient):
key = self.query.filter_by(id=key_id).first()
key.recepient = recepient
db.session.add(key)
db.session.commit()
db.session.close()
def inward_to_collections(self, key_id):
key = self.query.filter_by(id=key_id).first()
key.recepient = "Collections"
key.inward_date = datetime.today()
db.session.add(key)
db.session.commit()
db.session.close()
def add_spare_key(branch, loan_no, name, recepient, remarks, added_date=None):
print(type(added_date))
if added_date == None:
added_date = date.today()
default_time = Settings.query.filter_by(name="Default Days").first()
expected_date_of_return = added_date + timedelta(int(default_time.value))
key = SpareKey(branch=branch,
loan_no=loan_no,
name=name,
recepient=recepient,
remarks=remarks,
added=added_date,
expected_date_of_return=expected_date_of_return
)
db.session.add(key)
db.session.commit()
db.session.close()
def get_keys_with_field_officers():
all_keys = SpareKey.query.filter(SpareKey.recepient!="Collections").all()
keys = list()
for k in all_keys:
if k.is_active == True:
keys.append(k)
return keys
|
990,540 | 5ad8c6b4ea22345b5d2bae67062177a958f26ab7 | #ASSIGNMENT - CLASSES AND OBJECTS
#question-1
class circle:
def __init__(self, r):
self.radius = r
def getArea(self):
return(3.14*self.radius*self.radius)
def getCircumference(self):
return(2*3.14*self.radius)
r=int(input("enter radius"))
c=circle(r)
print("area is ",c.getArea())
print("circumference is ",c.getCircumference())
#question-2
class student:
def __init__(self):
self.name=(input("enter name"))
self.roll=int(input("enter rollno"))
def setAge(self):
self.age=int(input("enter age"))
def setMarks(self):
self.marks=int(input("enter marks"))
def display(self):
print("name:",self.name,"\n","roll-no:",self.roll,"\n","age:",self.age,"\n","marks:",self.marks)
s=student()
s.setAge()
s.setMarks()
s.display()
#question-3
class temperature:
def convertFahrenheit(self):
self.c=int(input("enter temperature in celsius"))
return((9/5)*self.c+32)
def convertCelsius(self):
self.f=int(input("enter temperature in Fahrenheit"))
return(((self.f-32)*5)/9)
t=temperature()
print(t.convertFahrenheit())
print(t.convertCelsius())
#question-4
class MovieDetails:
def __init__(self):
self.artistname = input("enter artist name")
self.year=input("enter year")
self.rating=int(input("enter ratings out of 5"))
def add(self):
self.moviename=input("enter the movie name")
self.collection=int(input("enter total collection"))
def display(self):
print(self.moviename)
print(self.artistname)
print(self.year)
print(self.rating)
print(self.collection)
m=MovieDetails()
m.add()
m.display()
#question-5
class animal:
def animal_attribute(self):
return("hello!im a tiger")
class tiger(animal):
pass
t=tiger()
print(t.animal_attribute())
#question-6
'''output will be:
A B
A B'''
#question-7
class shape:
def __init__(self,l,b):
self.length=l
self.breadth=b
def area(self):
return(self.length*self.breadth)
class rectangle(shape):
pass
class square(shape):
pass
l=int(input("enter length"))
b=int(input("enter breadth"))
r=rectangle(l,b)
s=square(l,b)
print("area of rectangle ",r.area())
print("area of square ",s.area())
|
990,541 | 35f8581a47ee12a18c0c860deef0faa7988bd463 | cars = 100
space_in_car = 4.0
drivers = 30
passengers = 90
cars_not_driven = cars - drivers
cars_driven = drivers
carpool_capacity = cars_driven * space_in_car
average_passengers_per_car = passengers/cars_driven
print('There are', cars,'cars available.')
print('There are only', drivers,'drivers available.')
print('There will be',cars_not_driven,'empty cars today.')
print('We can transport', carpool_capacity,'people today.')
print('We have',passengers,'to carpool today.')
print('We need to put about', average_passengers_per_car,'in each car.')
#testing out other ways to pass variable into print
print('cars'+str(cars)) #concatenate
print('cars %s' % (cars)) #pass as tuple
print('cars {}'.format(cars)) #using .format
# # What does %s, %r, and %d do again?
# You'll learn more about this as you continue,
# but they are "formatters." They tell Python to take the variable on the right and put it in to replace the %s with its value.
# %r is used for debugging and inspection, so it's not necessary that it be pretty.
# They are called string formatting operations.
# The difference between %s and %r is that %s uses the str function and %r uses the repr function.
# the biggest difference in practice is that
# repr for strings includes quotes and all special characters are escaped. |
990,542 | ff54cf5b63baff575025f41d7a67faf457e79471 | """
API
__init__.py
~~~~~~~~~~~~
This file contains the initialization code for the API.
:copyright: 2019 Moodify (High-Mood)
:authors:
"Stan van den Broek",
"Mitchell van den Bulk",
"Mo Diallo",
"Arthur van Eeden",
"Elijah Erven",
"Henok Ghebrenigus",
"Jonas van der Ham",
"Mounir El Kirafi",
"Esmeralda Knaap",
"Youri Reijne",
"Siwa Sardjoemissier",
"Barry de Vries",
"Jelle Witsen Elias"
"""
from flask import Blueprint
from flask_restplus import Api
from app import app
from .playlist_calls import api as playlist_name_space
from .track_calls import api as track_name_space
from .user_calls import api as user_name_space
blueprint = Blueprint('api', __name__, url_prefix='/api')
api = Api(blueprint)
app.register_blueprint(blueprint)
api.add_namespace(user_name_space)
api.add_namespace(track_name_space)
api.add_namespace(playlist_name_space)
|
990,543 | 2fcb084d8431c9660470e34c7627ff789681ce68 |
class Persona():
#constructor, se crea el objeto
def __init__(self, nombre, apellido):
self.nombre = nombre;
self.apellido = apellido;
print("El objeto {} {} ha sido creado".format(self.nombre, self.apellido));
#convierte a cadena de texto
def __str__(self):
return "El objeto tiene como atributo el nombre {} y el apellido {}".format(self.nombre, self.apellido)
#destructor, quita el objeto y lo reemplaza por otro
def __del__(self):
print("El objeto {} {} ha sido destruido".format(self.nombre,self.apellido));
persona = Persona("Jaikelly", "Mota");
print(str(persona));
|
990,544 | f0884a453b31909e22db44e38fe2ebeab0cad645 | from rest_framework import serializers
from bangazon_ultra.models import *
class ProductTypeSerializer(serializers.HyperlinkedModelSerializer):
''' The CategorySerializer class translates the Category models into other formats, in this case JSON by default. that Category table so a database can be created from it.
Method List:
-Meta
-create
-update
Argument List:
-serializers.HyperlinkedModelSerializer: This argument allows the class to access field types.
Author: Zoe LeBlanc, Python Ponies
'''
class Meta:
model = product_types_model.Product_Type
fields = '__all__' |
990,545 | 08b826a2f95dc9aa89a056ecd78da4ea386c06c9 | from django.contrib import admin
from .models import *
@admin.register(User)
class searchUser(admin.ModelAdmin):
search_fields = ('ticket_code',)
# admin.site.register([User, searchUser])
admin.site.register(Event)
admin.site.register(Admin) |
990,546 | 15ca845b359d921a96c40028ef2221dba15363c9 | def nojobsrunning(user):
'''does the user have any jobs active on Condor?
'''
from subprocess import check_output
check = check_output(['condor_q {}'.format(user)], shell=True).split('\n')[-2]
return True if '0 jobs; 0 completed, 0 removed, 0 idle, 0 running, 0 held, 0 suspended' in check else False
def Njobs(user):
'''returns the number of jobs running on Condor for the given user
'''
from subprocess import check_output
check = check_output(['condor_q {}'.format(user)], shell=True).split('\n')[-2]
return int(check.split(' jobs;')[0])
def cpr(src, dst):
'''does copy or copytree depending on whether src is a directory
'''
if isdir(src):
copytree(src, dst)
else:
copy(src, dst)
def makelohilist(listofnums, maxsize):
'''take listofnums, sort it (not in place), and find sequential series
returns list of tuples of length 2, representing the [lo, hi) boundaries of sequential series in listofnums, where len(range(lo, hi)) <= maxsize
'''
if len(listofnums) != len(set(listofnums)):
raise ValueError('listofnums contains duplicates!')
indivnums = sorted(listofnums)
# -- group the jobs-to-be-submitted into coherent groups
lo = indivnums[0]
lohilist = []
secn = 0
for i, n in enumerate(indivnums):
secn += 1
if (indivnums[-1] is n) or (n + 1 != indivnums[i + 1]) or secn >= maxsize: # if the next n isn't this n+1, we've found the end of a consecutive section
lohilist.append(
(lo, n + 1)
)
if n is not indivnums[-1]:
lo = indivnums[i + 1]
secn = 0
return lohilist
def incfilename(filename, i_start=0, i=None):
'''chooses a name for a file by appending numbers incrementally (from i_start) to filename
'''
from os.path import exists, splitext
if exists(filename):
basename = splitext(filename)[0]
suffname = splitext(filename)[1]
newname = basename + str(i_start) + suffname
if exists(newname):
return incfilename(filename, i_start + 1)
else:
return newname
else:
return filename
def makepercent(num, tot, exact=False):
'returns an integer representing num/tot as a percentage'
exactvalue = float(num) * 100 / float(tot)
return exactvalue if exact else int(exactvalue)
class updateprogress(object):
"""docstring for updateprogress"""
def __init__(self, maxval):
super(updateprogress, self).__init__()
self.maxval = maxval
self.printevery = float(self.maxval) / 100
import imp
try:
imp.find_module('progressbar')
self.useprogressbar = True
except ImportError:
self.useprogressbar = False
def _printupdate(self, addstring=''):
print 'on {0} out of {1} ({2}%){3}'.format(self.counter, self.maxval, makepercent(self.counter, self.maxval), addstring)
def start(self):
self.counter = 0
if self.useprogressbar:
import progressbar
self.progbar = progressbar.ProgressBar(maxval=self.maxval,
widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage() ]
)
self.progbar.start()
else:
self._lastprintupdate = 0
print 'tracking progress'
self._printupdate()
def update(self, i):
self.counter = i
if self.useprogressbar:
self.progbar.update(i)
else:
if self.counter - self._lastprintupdate >= self.printevery or (self.counter < self._lastprintupdate):
self._printupdate()
self._lastprintupdate = self.counter
def finish(self):
if self.useprogressbar:
self.progbar.finish()
else:
self._printupdate(' (finished)')
def incname(afile, suffix='_ConflictedCopy', i_start=1):
'''if afile exists, returns afilesuffix<i>, where <i> is the first integer name unused
else, returns afile
'''
from os.path import exists, splitext
def incname_suffix(afile, suffix, i):
befext, ext = splitext(afile)
nfile = befext + suffix + str(i) + ext
if exists(nfile):
return incname_suffix(afile, suffix, i + 1)
else:
return nfile
if exists(afile):
return incname_suffix(afile, suffix, i_start)
else:
return afile
|
990,547 | 96795ba97c7380d1e3e6b0503a89cabdc59656e6 | 1-re:
import re
from chp1.advanced_link_crawler import download
url = 'http://example.webscraping.com/places/default/view/Aland-Islands-2'
html = download(url)
#type(html)为<class 'str'>
#urlopen(url).read()得到的是bytes-like object,正则表达式的string模式不适用该对象
#urlopen(url).read().decode('utf-8')即为<class 'str'>,可用正则表达式匹配
print(re.findall(r'<td class="w2p_fw">(.*?)</td>', html))
print(re.findall('<td class="w2p_fw">(.*?)</td>', html)[1])
print(re.findall('<tr id="places_area__row"><td class="w2p_fl"><label id="places_area__label" class="readonly" for="places_area" >Area: </label></td><td class="w2p_fw">(.*?)</td>', html))
print(re.findall('''<tr id="places_area__row">.*?<td\s*class=["']w2p_fw["']>(.*?)</td>''', html))
2——BeautifulSoup
from bs4 import BeautifulSoup
from pprint import pprint
import html5lib
broken_html='<ul class=country><li>Area<li>Population</ul>'
soup=BeautifulSoup(broken_html,'html.parser')
#<ul class="country"><li>Area<li>Population</li></li></ul>;代码闭合,但<li>标签嵌套
soup=BeautifulSoup(broken_html,'html5lib')#两种编译器的区别:html.parser,html5lib
#<html><head></head><body><ul class="country"><li>Area</li><li>Population</li></ul></body></html>
#更完整,更正确
soup.li#标签li内的内容
soup.body#标签body类的内容
soup.find('ul',attrs={'class':'country'})
soup.find(attrs={'class':'country'})#<ul class="country"><li>Area</li><li>Population</li></ul>
soup.find('li')#<li>Area</li>
soup.find_all('li')#[<li>Area</li>, <li>Population</li>]
#find,及find_all方法都是针对HTML标签的,即<>内部的标签,其它文本无效
soup.find('li').text#返回<li>标签内的文本
3——Lxml
from lxml.html import fromstring,tostring
broken_html='<ul class=country><li>Area<li>Population</ul>'
tree=fromstring(broken_html)#tree为<Element ul at 0x1d87e8c8598>,fromstring参数为文本
good_html=tostring(tree,pretty_print=True)
print(good_html)#b'<ul class="country">\n<li>Area</li>\n<li>Population</li>\n</ul>\n'
from urllib.request import urlopen
html=urlopen('http://example.webscraping.com').read()
tree=fromstring(html)#fromstring参数为一文件
td=tree.cssselect('tr')#cssselect的选取规则
#[<Element tr at 0x1f2d2d53318>, <Element tr at 0x1f2d2d1bef8>, <Element tr at 0x1f2d2ea0458>, <Element tr at 0x1f2d2ec51d8>, <Element tr at 0x1f2d2ec53b8>]
country=td[0].text_content()#文本内容
cssselect的选取规则!!!!!!!!!!!!!!
xpath,与cssselect类似,但不同的选取规则!!!!!
HTML标签的Family Trees:
td[0].getchildren()
td[0].getparent()
td[0].getprevious()
td[0].getnext()
性能比较:
FIELDS=('area','population','iso','country','capital','continent','tld','currency_code','currency_name','phone','postal_code-format','postal_code_regex','languages','neighbours')
import re
def re_scraper(html):
results={}
for field in FIELDS:
results[field]=re.search('<tr id="places_%s_row">.*?<td class="w2p_fw">(.*?)</td>'%field,html).groups()[0]
return(results)
from bs4 import BeautifulSoup
def bs_scraper(html):
soup=BeautifulSoup(html,'html.parser')
results={}
for field in FIELDS:
results[field]=soup.find('table').find('tr',id='places_%s_row'%field).find('td',class_='w2p_fw').text_content()
return(results)
from lxml.html import fromstring
def lxml_scraper(html):
tree=fromstring(html)
results={}
for field in FIELDS:
results[field]=tree.cssselect('table>tr#places_%s_row>td.w2p_fw'%field)[0].text_content()
return(results)
def lxml_xpath_scraper(html):
tree=fromstring(html)
results={}
for field in FIELDS:
results[field]=tree.xpath('//tr[@id="places_%s_row"]/td[@class="w2p_fw"]'%field)[0].text_content()
return(results)
import time
import re
import urllib.request
def download(url):
return(urllib.request.urlopen(url).read())
NUM_ITERATIONS=1000
html=download('http://example.webscraping.com/places/default/view/United-Kingdom-239')
scrapers=[('Regular expressions',re_scraper),('BeautifulSoup',bs_scraper),('Lxml',lxml_scraper),('Xpath',lxml_xpath_scraper)]
for name,scraper in scrapers:
start=time.time()
for i in range(NUM_ITERATIONS):
if scraper==re_scraper:#re模块会缓存搜索,需清除以公平比较
re.purge()
result=scraper(html)
assert result['area']=='244,820 square kilometres'
end=time.time()
print('%s:%.2f seconds'%(name,end-start))
为链接爬虫添加抓取回调!!!!!!!!!!!!????????????
|
990,548 | df597c26779a2b5744e4e8959734a22991a773c2 | #!/usr/bin/env python
#
# GrovePi Example for using the Grove - Barometer (High-Accuracy)(http://www.seeedstudio.com/depot/Grove-Barometer-HighAccuracy-p-1865.html
#
# The GrovePi connects the Raspberry Pi and Grove sensors. You can learn more about GrovePi here: http://www.dexterindustries.com/GrovePi
#
# Have a question about this library? Ask on the forums here: http://forum.dexterindustries.com/c/grovepi
#
# This library is derived from the Arduino library written by Oliver Wang for SeeedStudio (https://github.com/Seeed-Studio/Grove_Barometer_HP20x/tree/master/HP20x_dev)
'''
## License
The MIT License (MIT)
GrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi.
Copyright (C) 2017 Dexter Industries
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import hp206c
h= hp206c.hp206c()
ret=h.isAvailable()
if h.OK_HP20X_DEV == ret:
print("HP20x_dev is available.")
else:
print("HP20x_dev isn't available.")
temp=h.ReadTemperature()
pressure=h.ReadPressure()
altitude=h.ReadAltitude()
print("Temperature\t: %.2f C\nPressure\t: %.2f hPa\nAltitude\t: %.2f m" %(temp,pressure,altitude)) |
990,549 | f86d56beb8737dc918b85bb0d7af42f111ae7be5 | import pickle
import matplotlib.pyplot as plt
import numpy as np
import os
def IQ(adc_raw):
adc_shape=np.shape(adc_raw)
adc_raw=adc_raw.reshape(adc_shape[0]*2,2)
adc_raw=adc_raw-np.mean(adc_raw)
if(((adc_raw[0]>0)==[True,True]).all()):
I_Qmask=[1,1]
elif(((adc_raw[0]>0)==[True,False]).all()):
I_Qmask=[1,-1]
elif(((adc_raw[0]>0)==[False,True]).all()):
I_Qmask=[-1,1]
else:
I_Qmask=[-1,-1]
I=[]
Q=[]
for i in range(len(adc_raw)):
I.append(adc_raw[i][0]*I_Qmask[0])
Q.append(adc_raw[i][1]*I_Qmask[1])
I_Qmask=np.multiply(I_Qmask,-1)
I=np.array(I)
Q=np.array(Q)
return [I,Q]
phase_adc=np.arctan2(Q,I)
phase_mo=np.arctan2(Q_MO,I_MO)
deg=180*(phase_mo-phase_adc)/np.pi
return deg
'''
script_dir = os.path.dirname(__file__)
rel_path = "2091/data.txt"
abs_file_path = os.path.join(script_dir, rel_path)
'''
with open('27_03_aqs/3_1.pickle','rb') as f:
vars_f=pickle.load(f)
IQ=np.array(vars_f[1])
I_3=IQ[:,0]
Q_3=IQ[:,1]
with open('27_03_aqs/4_1.pickle','rb') as f:
vars_f=pickle.load(f)
IQ=np.array(vars_f[1])
I_4=IQ[:,0]
Q_4=IQ[:,1]
with open('27_03_aqs/7_2.pickle','rb') as f:
vars_f=pickle.load(f)
IQ=np.array(vars_f[1])
I_7=IQ[:,0]
Q_7=IQ[:,1]
with open('27_03_aqs/8_2.pickle','rb') as f:
vars_f=pickle.load(f)
IQ=np.array(vars_f[1])
I_8=IQ[:,0]
Q_8=IQ[:,1]
with open('27_03_aqs/11_3.pickle','rb') as f:
vars_f=pickle.load(f)
IQ=np.array(vars_f[1])
I_11=IQ[:,0]
Q_11=IQ[:,1]
with open('27_03_aqs/12_3.pickle','rb') as f:
vars_f=pickle.load(f)
IQ=np.array(vars_f[1])
I_12=IQ[:,0]
Q_12=IQ[:,1]
with open('27_03_aqs/15_4.pickle','rb') as f:
vars_f=pickle.load(f)
IQ=np.array(vars_f[1])
I_15=IQ[:,0]
Q_15=IQ[:,1]
with open('27_03_aqs/16_4.pickle','rb') as f:
vars_f=pickle.load(f)
IQ=np.array(vars_f[1])
I_16=IQ[:,0]
Q_16=IQ[:,1]
max_len=np.min([len(I_3),len(I_7),len(I_11),len(I_15)])
max_len1=np.min([len(I_4),len(I_8),len(I_12),len(I_16)])
I_3=I_3[0:max_len]
I_7=I_7[0:max_len]
I_11=I_11[0:max_len]
I_15=I_15[0:max_len]
Q_3=Q_3[0:max_len]
Q_7=Q_7[0:max_len]
Q_11=Q_11[0:max_len]
Q_15=Q_15[0:max_len]
I_4=I_4[0:max_len1]
I_8=I_8[0:max_len1]
I_12=I_12[0:max_len1]
I_16=I_16[0:max_len1]
Q_4=Q_4[0:max_len1]
Q_8=Q_8[0:max_len1]
Q_12=Q_12[0:max_len1]
Q_16=Q_16[0:max_len1]
'''
plt.subplot(211)
plt.plot(I_4,label='IQ 4 samples')
plt.plot(I_8,label='IQ 8 samples')
plt.plot(I_12,label='IQ 12 samples')
plt.plot(I_16,label='IQ 16 samples')
plt.legend()
plt.subplot(212)
plt.plot(Q_4,label='IQ 4 samples')
plt.plot(Q_8,label='IQ 8 samples')
plt.plot(Q_12,label='IQ 12 samples')
plt.plot(Q_16,label='IQ 16 samples')
plt.legend()
plt.figure()
plt.subplot(211)
plt.plot(I_3,label='IQ 3 samples')
plt.plot(I_7,label='IQ 7 samples')
plt.plot(I_11,label='IQ 11 samples')
plt.plot(I_15,label='IQ 15 samples')
plt.legend()
plt.subplot(212)
plt.plot(Q_3,label='IQ 3 samples')
plt.plot(Q_7,label='IQ 7 samples')
plt.plot(Q_11,label='IQ 11 samples')
plt.plot(Q_15,label='IQ 15 samples')
plt.legend()
'''
SNRI4=20*np.log(np.std(I_4))
SNRI8=20*np.log(np.std(I_8))
SNRI12=20*np.log(np.std(I_12))
SNRI16=20*np.log(np.std(I_16))
SNRQ4=20*np.log(np.std(Q_4))
SNRQ8=20*np.log(np.std(Q_8))
SNRQ12=20*np.log(np.std(Q_12))
SNRQ16=20*np.log(np.std(Q_16))
SNRI3=20*np.log(np.std(I_3))
SNRI7=20*np.log(np.std(I_7))
SNRI11=20*np.log(np.std(I_11))
SNRI15=20*np.log(np.std(I_15))
SNRQ3=20*np.log(np.std(Q_3))
SNRQ7=20*np.log(np.std(Q_7))
SNRQ11=20*np.log(np.std(Q_11))
SNRQ15=20*np.log(np.std(Q_15))
print(SNRI4,SNRI8,SNRI12,SNRI16)
print(SNRQ4,SNRQ8,SNRQ12,SNRQ16)
print(SNRI3,SNRI7,SNRI11,SNRI15)
print(SNRQ3,SNRQ7,SNRQ11,SNRQ15)
'''
plt.show() |
990,550 | c16f40ecdf1ccd2a2c4fefe6cd41d060fe246ef6 | '''
Calls the GUI for jet tracking. Ultimately only this file should need to be run, and the GUI will
control when the jet tracking methods e.g. calibrate(), jet_detect(), etc should be run
'''
from qtpy.QtCore import QThread
from pydm import Display
import jt_utils
import jet_control
from time import sleep
class TrackThread(QThread):
def __init__(self):
# def __init__(self, injector, camera, cspad, stopper, pulse_picker, wave8, params):
super().__init__()
'''
self.stopper = stopper
self.pulse_picker = pulse_picker
self.wave8 = wave8
self.cspad = cspad
self.camera = camera
self.injector = injector
self.params = params
'''
def run(self):
while not self.isInterruptionRequested():
'''
# check devices first
# check if stopper is in
if (jt_utils.get_stopper(self.stopper) == 1):
# if stopper is in, stop jet tracking
print('Stopper in - TRACKING STOPPED')
self.requestInterruption()
continue
# check if pulse picker is closed
if (jt_utils.get_pulse_picker(self.pulse_picker) == 1):
# if pulse picker is closed, stop jet tracking
print('Pulse picker closed - TRACKING STOPPED')
self.requestInterruption()
continue
# check wave8
if (jt_utils.get_wave8(self.wave8) < self.params.thresh_w8):
# if wave8 is below threshold, continue running jet tracking but do not move
print('Wave8 below threshold - NOT TRACKING')
continue
# check CSPAD
# get azimuthal average from CSPAD & Wave8 data
if (jt_utils.get_cspad(azav, params.radius.get(), gas_det) <
self.params.intensity.get() * self.params.thresh_lo.get()):
# if CSPAD is below lower threshold, move jet
if (not self.params.bypass_camera()):
# if camera is not bypassed, check if there is a jet and location of jet
try:
jet_control.jet_calculate_inline(self.camera, self.params)
# if jet is more than 10 microns away from x-rays, move jet using camera feedback
# threshold for this can be changed if needed
if (self.params.jet_x.get() > 0.01):
jet_control.jet_move_inline(self.injector, self.camera, self.params)
continue
except Exception:
# if jet is not detected, continue running jet tracking but do not move
print('Cannot find jet - NOT TRACKING')
continue
# if camera is bypassed or if jet is less than 10 microns away from x-rays, scan jet across x-rays to find new maximum
jet_control.scan(self.injector, self.cspad)
# get azimuthal average from CSPAD & Wave8 data
intensity = jt_utils.get_cspad(azav, self.params.radius.get(), gas_det)
self.params.intensity.put(intensity)
# if CSPAD is still below upper threshold, stop jet tracking
if (jt_utils.get_cspad(azav, self.params.radius.get(), gas_det) <
self.params.intensity.get() * self.params.thresh_hi.get()):
print('CSPAD below threshold - TRACKING STOPPED')
self.requestInterruption()
'''
class JetTrack(Display):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# TrackThread to run jet tracking in
self.track_thread = TrackThread()
# self.track_thread = TrackThread(injector, camera, cspad, stopper, pulse_picker, wave8, params)
# connect GUI buttons to appropriate methods
self.ui.calibrate_btn.clicked.connect(self.calibrate_clicked)
self.ui.start_btn.clicked.connect(self.start_clicked)
self.ui.stop_btn.clicked.connect(self.stop_clicked)
# set initial availability of buttons
self.ui.calibrate_btn.setEnabled(True)
self.ui.start_btn.setEnabled(False)
self.ui.stop_btn.setEnabled(False)
def ui_filename(self):
'''
Load ui file for GUI
'''
return 'jettracking.ui'
def calibrate_clicked(self):
'''
Runs calibration method when calibrate button is clicked
'''
self.ui.logger.write('Calibrating')
self.ui.calibrate_btn.setEnabled(False)
#jet_control.calibrate(injector, camera, cspad, params)
self.ui.logger.write('Calibration complete - can now run jet tracking')
self.ui.calibrate_btn.setEnabled(True)
# activate start button
self.ui.start_btn.setEnabled(True)
return
def start_clicked(self):
'''
Starts new thread to run jet tracking in when start button is clicked
'''
self.ui.logger.write('Running jet tracking')
self.ui.start_btn.setEnabled(False)
self.ui.stop_btn.setEnabled(True)
self.ui.calibrate_btn.setEnabled(False)
# start TrackThread
self.track_thread.start()
def stop_clicked(self):
'''
Stops jet tracking when stop button is clicked
'''
self.track_thread.requestInterruption()
self.ui.logger.write('Jet tracking stopped')
self.ui.stop_btn.setEnabled(False)
self.ui.start_btn.setEnabled(True)
self.ui.calibrate_btn.setEnabled(True)
|
990,551 | b05c0cb05e2ba610cc798ab1d6239c87e79079ce | ## engine.py ###################################################################
## core game engine stuff ######################################################
################################################################################
class voidEngine:
def __init__():
#foooo
self.gameObjects = [];
|
990,552 | ecc86c2df056f09cd009772c960584bf022536fd | import xml.etree.ElementTree as etree
with open("table-input.htm", "r") as f:
read_data = f.read()
print ("File content:")
print(read_data)
tree = etree.fromstring(read_data)
with open("table-input.csv", "w") as f:
for amt, unit, item in tree.getiterator('tr'):
print("%s,%s,%s" % (amt.text, unit.text, item.text))
f.write("%s,%s,%s\n" % (amt.text, unit.text, item.text))
|
990,553 | f200f13b3815ae7f817bcac14fb2b0fc6b7ee3ac | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# author: wangfp time:2017/11/4
MONGO_URL = 'localhost'
MONGO_DB = 'taobao'
MONGO_TABLE = 'meishi'
# 查看官网获得命令行参数信息
SERVICE_ARGS = ['--load-images=false',
'--disk-cache=true']
# 注意同时修改表名
KEYWORD = '美食' |
990,554 | 1005361f5020fc6b3220c77b18e45f7a8a19e964 | from os import environ
from collections import OrderedDict
import sys
class SetupExample:
def __init__(self, help=None):
self.required_vars = OrderedDict()
self.optional_vars = OrderedDict()
self.help = help
def required_var(self, var, desc):
self.required_vars[var] = desc
def rv(self, var, desc):
self.required_var(var, desc)
def optional_var(self, var, desc):
self.optional_vars[var] = desc
def ov(self, var, desc):
self.optional_var(var, desc)
def has_var(self, var):
return hasattr(self, var)
def setup(self):
for var in self.optional_vars.keys():
if var in environ:
value = environ[var]
setattr(self, var, int(value))
for var in self.required_vars.keys():
if var in environ:
value = environ[var]
setattr(self, var, int(value))
else:
print("Couldn't find required environment setting fo %s pin." % var)
print("")
if self.help:
print(self.help)
print("These are the required settings which should correspond to pins on devices:")
print("")
for var, desc in self.required_vars.items():
print(" %s - %s" % (var, desc))
for var, desc in self.optional_vars.items():
print(" %s - %s [OPTIONAL]" % (var, desc))
print("")
print("Example Usage:")
print("")
example_string = " "
for i, v in enumerate(self.required_vars.keys()):
example_string += "%s=%d " % (v, i + 1)
example_string += "%s" % sys.argv[0]
print(example_string)
print("")
sys.exit(1)
|
990,555 | 20e51ef6186f6775f054a15bd4c790a5af054b26 | # -*- coding: utf-8 -*-
# file: asgcn.py
# author: <gene_zhangchen@163.com>
# Copyright (C) 2020. All Rights Reserved.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from layers.dynamic_rnn import DynamicLSTM
class GraphConvolution(nn.Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, bias=True):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = nn.Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
def forward(self, text, adj):
hidden = torch.matmul(text, self.weight)
denom = torch.sum(adj, dim=2, keepdim=True) + 1
output = torch.matmul(adj, hidden) / denom
if self.bias is not None:
return output + self.bias
else:
return output
class ASGCN(nn.Module):
def __init__(self, embedding_matrix, opt):
super(ASGCN, self).__init__()
self.opt = opt
self.embed = nn.Embedding.from_pretrained(torch.tensor(embedding_matrix, dtype=torch.float))
self.text_lstm = DynamicLSTM(opt.embed_dim, opt.hidden_dim, num_layers=1, batch_first=True, bidirectional=True)
self.gc1 = GraphConvolution(2*opt.hidden_dim, 2*opt.hidden_dim)
self.gc2 = GraphConvolution(2*opt.hidden_dim, 2*opt.hidden_dim)
self.fc = nn.Linear(2*opt.hidden_dim, opt.polarities_dim)
self.text_embed_dropout = nn.Dropout(0.3)
def position_weight(self, x, aspect_double_idx, text_len, aspect_len):
batch_size = x.shape[0]
seq_len = x.shape[1]
aspect_double_idx = aspect_double_idx.cpu().numpy()
text_len = text_len.cpu().numpy()
aspect_len = aspect_len.cpu().numpy()
weight = [[] for i in range(batch_size)]
for i in range(batch_size):
context_len = text_len[i] - aspect_len[i]
for j in range(aspect_double_idx[i,0]):
weight[i].append(1-(aspect_double_idx[i,0]-j)/context_len)
for j in range(aspect_double_idx[i,0], aspect_double_idx[i,1]+1):
weight[i].append(0)
for j in range(aspect_double_idx[i,1]+1, text_len[i]):
weight[i].append(1-(j-aspect_double_idx[i,1])/context_len)
for j in range(text_len[i], seq_len):
weight[i].append(0)
weight = torch.tensor(weight, dtype=torch.float).unsqueeze(2).to(self.opt.device)
return weight*x
def mask(self, x, aspect_double_idx):
batch_size, seq_len = x.shape[0], x.shape[1]
aspect_double_idx = aspect_double_idx.cpu().numpy()
mask = [[] for i in range(batch_size)]
for i in range(batch_size):
for j in range(aspect_double_idx[i,0]):
mask[i].append(0)
for j in range(aspect_double_idx[i,0], aspect_double_idx[i,1]+1):
mask[i].append(1)
for j in range(aspect_double_idx[i,1]+1, seq_len):
mask[i].append(0)
mask = torch.tensor(mask, dtype=torch.float).unsqueeze(2).to(self.opt.device)
return mask*x
def forward(self, inputs):
text_indices, aspect_indices, left_indices, adj = inputs
text_len = torch.sum(text_indices != 0, dim=-1)
aspect_len = torch.sum(aspect_indices != 0, dim=-1)
left_len = torch.sum(left_indices != 0, dim=-1)
aspect_double_idx = torch.cat([left_len.unsqueeze(1), (left_len+aspect_len-1).unsqueeze(1)], dim=1)
text = self.embed(text_indices)
text = self.text_embed_dropout(text)
text_out, (_, _) = self.text_lstm(text, text_len)
seq_len = text_out.shape[1]
adj = adj[:, :seq_len, :seq_len]
x = F.relu(self.gc1(self.position_weight(text_out, aspect_double_idx, text_len, aspect_len), adj))
x = F.relu(self.gc2(self.position_weight(x, aspect_double_idx, text_len, aspect_len), adj))
x = self.mask(x, aspect_double_idx)
alpha_mat = torch.matmul(x, text_out.transpose(1, 2))
alpha = F.softmax(alpha_mat.sum(1, keepdim=True), dim=2)
x = torch.matmul(alpha, text_out).squeeze(1) # batch_size x 2*hidden_dim
output = self.fc(x)
return output |
990,556 | 678c890f6fe48638742b2ec308f22100b69233a1 | import itertools
from colorama import Fore,Back,Style,init
init()
def win(current_game):
def all_same(l):
if l.count(l[0]) == len(l) and l[0] != 0:
return True
else:
return False
for row in game:
#print(row)
if all_same(row):
print(f"Player {row[0]} is the Winner,horizontally!")
return True
diag = []
for col, row in enumerate(reversed(range(len(game)))):
diag.append(game[row][col])
if all_same(diag):
print(f"Player {diag[0]} is the winner diagonally(/)!")
return True
diag = []
for ix in range(len(game)):
diag.append(game[ix][ix])
#print(diag)
if all_same(diag):
print(f"Player {diag[0]} is the Winner,diagonally (\\)!")
return True
for col in range(len(game)): # it is like a basic for loop starts counting 0 to 3
check = [] # it is for assigning column elements and check if they are the same.
for row in game: # adds row elements to check one by one
check.append(row[col]) # adds first elements of row to check list.after second and third
if all_same(check) : # controls if check has same elements to decide if game is won
print(f"Player {check[0]} is the winner verticially!")
return True
return False
def game_board(game_map,player=0, row=0,column=0, just_display=False):
try:# program will try using the function assignments, if user enters invalid input , program shows error message by except7
if game_map[row][column] != 0:
print("This position is occupado! Choose another!")
return game_map,False
print(" "+" ".join([str(i) for i in range(len(game_map))]))
if not just_display: #enters this conditions when user inputs
game_map[row][column] = player #the move by the user
for count , row in enumerate(game_map):
colored_row =""
for item in row:
if item ==0:
colored_row += " "
elif item ==1 :
colored_row += Fore.GREEN + ' X ' + Style.RESET_ALL
elif item ==2 :
colored_row += Fore.MAGENTA + ' O ' + Style.RESET_ALL
print(count,colored_row)
return game_map,True
except IndexError as e: #invalid user input condition
print("Error: make sure that you entered 0,1 or 2",e)
return game_map,False
except Exception as e:
print("Something went really wrong!",e)
return game_map,False
play= True
players = [1,2]
while play:
game = [[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
]
game_won = False
game, _ = game_board(game,just_display=True)
player_choice = itertools.cycle([1,2])
while not game_won:
current_player = next(player_choice)
print(f"Current player:{current_player}")
played = False
while not played:
column_choice = int(input("What column do you want to play? (0,1,2):"))
row_choice = int(input("What row do you want to play? (0,1,2):"))
game,played = game_board(game,current_player,row_choice,column_choice)
if win(game):
game_won = True
again = input("The game is over would you like to play again? (y/n)")
if again.lower() == "y":
print("restarting")
elif again.lower() == "n":
print("Bye then")
play = False
else :
print("Invalid answer see you later")
play = False
|
990,557 | dcb244c310d8948936efd55a43b43a1f230ce658 | import pygame
import random
import time
pygame.init()
white = (255, 255, 255)
black = (0, 0, 0)
red = (255, 0, 0)
green = (0, 128, 0)
chrome_white = (232, 231, 226)
masala = (87, 86, 84)
redoxide = (106, 27, 27)
font1 = pygame.font.SysFont(None, 35)
font2 = pygame.font.SysFont(None, 35)
display_width = 1024
display_height = 768
gameDisplay = pygame.display.set_mode((display_width, display_height))
pygame.display.set_caption('Catch')
fps = 200
clock = pygame.time.Clock()
channels = [75, 175, 275, 375, 475, 575, 675, 775, 875, 975]
channels_beingused = [False, False, False, False, False, False, False, False, False, False]
#collecting these objects will result in positive points
class goodFO:
change_gfo = 3
def speedup(self):
self.change_gfo += 1
def __init__(self, channelnumber):
self.x_coord = channels[channelnumber]
self.y_coord = -10
def drawFO(self, x_basket, y_basket):
if self.x_coord > x_basket - 5 and self.x_coord < x_basket + 75 :
if self.y_coord > y_basket + 10 :
positivescore()
self.x_coord = channels[random.randint(0, 9)]
self.y_coord = -10
if self.y_coord <= display_height*0.95 :
pygame.draw.circle(gameDisplay, green, [self.x_coord, self.y_coord], 15)
self.y_coord += self.change_gfo
else:
self.x_coord = channels[random.randint(0, 9)]
self.y_coord = -10
#collecting these objects will result in negative points
class badFO:
change_bfo = 4
def speedup(self):
self.change_bfo += 1
def __init__(self, channelnumber):
self.x_coord = channels[channelnumber]
self.y_coord = -10
def drawFO(self, x_basket, y_basket):
global gameover
if self.x_coord > x_basket - 5 and self.x_coord < x_basket + 75 :
if self.y_coord > y_basket + 10 :
gameover = True
self.x_coord = channels[random.randint(0, 9)]
self.y_coord = -10
if self.y_coord <= display_height*0.95 :
pygame.draw.circle(gameDisplay, red, [self.x_coord, self.y_coord], 15)
self.y_coord += self.change_bfo
else:
self.x_coord = channels[random.randint(0, 9)]
self.y_coord = -10
score = None
gameover = None
def initgameover():
global gameover
gameover = False
def initscore():
global score
score = 0
def positivescore():
global score
score += 1
def display_score(score):
screen_text = font1.render(score, True, black)
gameDisplay.blit(screen_text, [display_width*0.93, display_height*0.07])
def display_message(msg, color, x, y):
screen_text = font2.render(msg, True, color)
gameDisplay.blit(screen_text, [x, y])
#display_message("Press 'S' to start", black)
#pygame.display.update()
def gameLoop():
global gameover
goodob = goodFO(random.randint(0, 9))
badob = badFO(random.randint(0, 9))
initgameover()
initscore()
speed = False
start = time.time()
gameExit = False
lead_x = display_width/50
lead_y = display_height*0.85
change_x = 5
right = False
left = False
up = False
down = False
while not gameExit:
if time.time() > start + 15:
speed = True
if speed == True:
change_x += 1
goodob.speedup()
badob.speedup()
speed = False
start = time.time()
while gameover == True:
gameDisplay.fill(chrome_white)
display_message("Your Score : %d"%score, green, display_width*0.42, display_height*0.45)
display_message("GAME OVER", red, display_width*0.42, display_height*0.35)
display_message("Press R to try again, Press Q to quit", black, display_width*0.3, display_height*0.55)
pygame.display.update()
for event in pygame.event.get():
print(event)
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_r:
gameLoop()
if event.key == pygame.K_q:
gameover = False
gameExit = True
if event.type == pygame.QUIT:
gameover = False
gameExit = True
for event in pygame.event.get():
print(event)
if event.type == pygame.QUIT:
gameExit = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RIGHT:
right = True
if event.key == pygame.K_LEFT:
left = True
#if event.key == pygame.K_DOWN:
#down = True
#if event.key == pygame.K_UP:
#up = True
if event.type == pygame.KEYUP:
if event.key == pygame.K_RIGHT:
right = False
if event.key == pygame.K_LEFT:
left = False
#if event.key == pygame.K_DOWN:
#down = False
#if event.key == pygame.K_UP:
#up = False
if right == True and lead_x <= 948:
lead_x += change_x
if left == True and lead_x >= 7:
lead_x -= change_x
#if down == True and lead_y <= 722:
# lead_y += 1.1
#if up == True and lead_y >= 7:
# lead_y -= 1.1
gameDisplay.fill(chrome_white)
goodob.drawFO(lead_x, lead_y)
badob.drawFO(lead_x, lead_y)
display_score(str(score))
pygame.draw.line(gameDisplay, redoxide, (0, 0), (1024, 0), 10)
pygame.draw.line(gameDisplay, redoxide, (0, 0), (0, 768), 10)
pygame.draw.line(gameDisplay, redoxide, (0, 768), (1024, 768), 14)
pygame.draw.line(gameDisplay, redoxide, (1024, 768), (1024, 0), 14)
pygame.draw.arc(gameDisplay, masala, [lead_x, lead_y, 70, 70], 3, 6.45, 7)
pygame.display.update()
clock.tick(fps)
pygame.quit()
quit()
gameLoop()
|
990,558 | c8f41d823d202d6ec7003a10e75cd0d92f386b07 | ##
import numpy as np
import patsy
import scipy
import statsmodels.api as sm
##
def predict(L, formula, data, level=0.95, interval="prediction", model_matrix = False):
"""
L is either a model matrix or a data frame
of the same structure like the data argument.
formula and data describe the model.
interval: "prediction" of "confidence"
"""
y, X = patsy.dmatrices(formula, data, return_type='dataframe')
model = sm.OLS(y, X).fit()
if not model_matrix:
L = patsy.dmatrices(formula, L, return_type="matrix")[1] # same columns like the model matrix now
xtx_pinv = np.linalg.pinv(X.T.dot(X))
if interval=="confidence":
se = np.array([np.sqrt(model.mse_resid*vect.dot(xtx_pinv).dot(vect.T)) for vect in L])
else:
se = np.array([np.sqrt(model.mse_resid*(1+vect.dot(xtx_pinv).dot(vect.T))) for vect in L])
t = scipy.stats.t.ppf((level+1)/2, model.df_resid)
point_estimates = np.array([(vect*model.params).sum() for vect in L])
lower = point_estimates - t*se
upper = lower + 2*t*se
return np.hstack([lower.reshape(-1,1), upper.reshape(-1,1)])
##
plt.figure()
plt.plot(predictions[:,0], 'r--')
plt.plot(predictions[:,1], 'r--')
plt.plot(confidence[:,0], 'b-')
plt.plot(confidence[:,1], 'b-')
plt.plot((confidence[:,0]+confidence[:,1])/2, 'ko')
plt.plot((confidence[:,0]+confidence[:,1])/2, 'k-')
plt.show()
##
|
990,559 | e2d799c7d21df1c1a27cc3bd90cde1c9b276908b | class Solution(object):
def topKFrequent(self, nums, k):
hash_dict = dict()
lst = list()
for num in nums:
hash_dict[num] = hash_dict.get(num, 0) + 1
for key,val in hash_dict.iteritems():
lst.append((val, key))
lst.sort(reverse=True)
res = list()
for i in range(k):
res.append(lst[i][1])
return res
|
990,560 | b63e8dcf8f3456f87ca250abb7fb0f1372091b6b | """
Test crosss platform terminal color
https://pypi.python.org/pypi/colorama
"""
import colorama
colorama.init()
from colorama import Fore, Back, Style
print(Fore.RED + 'some red text')
print(Back.GREEN + 'and with a green background')
print(Style.DIM + 'and in dim text')
print(Fore.RESET + Back.RESET + Style.RESET_ALL)
print('back to normal now')
print('\033[31m' + 'some red text')
print('\033[30m') # and reset to default color |
990,561 | 46a39f1a7dd8eb640ccee346681621b56b3a28e5 | def get_dict(filename='../data/pku_training_words.utf8'):
"""读取字典"""
d = {}
d['_t_'] = 0.0
with open(filename, "r") as f:
for line in f:
word = line.split('\n')[0]
d['_t_'] += 1
d[word] = 1
return d
d = get_dict()
def build_graph(s, big_dict):
l = len(s)
# 邻接矩阵,用dict实现
adj = {}
for i in range(l+1):
adj[i] = {}
for i in range(l):
adj[i][i+1] = 1
for size in range(2, l+1):
for start in range(l+1):
if start + size <= l and s[start: start+size] in big_dict:
# 所有权值(长度)都直接选用1
adj[start][start+size] = 1
return adj
def row_equal(row_1, row_2):
"""判断信息记录表中某两行是否重复"""
# print(row_1, row_2)
return row_1[1] == row_2[1] and row_1[2][0] == row_2[2][0] and row_1[2][1] == row_2[2][1]
def keep_n_min(candidates, n, length_index=1):
"""保留前N小的所有路径"""
candidates = sorted(candidates, key=lambda x: x[length_index])
last_one = -1
count = 0
last = -1
for i, one in enumerate(candidates):
if one[length_index] != last_one:
last_one = one[length_index]
count += 1
if count > n:
last = i
break
if last != -1:
candidates = candidates[:last]
index = 0
# 更新路径编号
last_len = -1
del_indices = []
for i, row in enumerate(candidates):
if i > 0 and row_equal(candidates[i], candidates[i-1]):
del_indices.append(i)
if row[length_index] != last_len:
last_len = row[length_index]
index += 1
candidates[i][0] = index
for i in del_indices[::-1]:
del candidates[i]
return candidates
def get_tables_by_adj(adj, n):
"""使用类似Dijkstra的贪心算法获得信息表"""
l = len(adj)
tables = [[[1, 0, (0, 0)]]] # 第0个table实际上用不到,这里初始化用于占位
for cur in range(1, l):
candidates = []
i = 0
for pre in range(cur):
if cur in adj[pre]: # 存在从结点pre指向结点cur的边
for row in tables[pre]:
candidates.append([i, row[1] + 1, (pre, row[0])])
# 保留长度前N小的所有candidate到table
table = keep_n_min(candidates, n)
tables.append(table)
return tables
def core_retro(s, cur, pre, path_index, one_res, one_length_res, pre_node_index,tables):
"""回溯的核心函数"""
one_res.append(s[pre: cur])
if pre == 0:
one_length_res.append(one_res[::-1])
else:
for one_row in tables[pre]:
if one_row[0] == path_index:
core_retro(s, pre, one_row[pre_node_index][0],
one_row[pre_node_index][1], one_res, one_length_res, pre_node_index,tables)
one_res.pop()
def retro_back(s, tables, n, length_index=1, pre_node_index=2):
"""根据信息记录表回溯分词结果"""
count = 0
last_len = -1
res = {}
l = len(s)
for row in tables[-1]:
# 只留长度是前n个的结果
cur = l
if row[length_index] != last_len:
last_len = row[length_index]
count += 1
if count > n:
break
# 开始回溯
one_length_res = []
one_res = []
# 回溯的核心函数
core_retro(s, cur, row[pre_node_index][0], row[pre_node_index][1], one_res, one_length_res, pre_node_index,tables)
if row[length_index] not in res:
res[row[length_index]] = one_length_res
else:
res[row[length_index]] += one_length_res
return res
def segstr(str):
adj = build_graph(str, d)
tables = get_tables_by_adj(adj, 1)
res = retro_back(str, tables, 1)
return res
if __name__ == '__main__':
# test = "他说的的确在理"
# print(segstr(test))
testset = open('../data/pku_test.utf8', encoding='utf-8') #读取测试集
output = ''
for line in testset:
line = line.strip()
seg = segstr(line)
seg = list(seg.items())[0][1][0]
seg = " ".join(seg) + "\n"
output = output + seg
outputfile = open('pku_result.utf8', mode='w', encoding='utf-8')
outputfile.write(output)
|
990,562 | 37dea98c4a50738111f13c066ee69cb14d8b9992 | # Livro...: Introdução a Python com Aplicações de Sistemas Operacionais
# Capítulo: 07
# Questão.: Exercício Proposto 4
# Autor...: Emanuel Lázaro
# Data....: 29/10/2019
from MinhasFuncoes import *
linhas = int(input('Informe a quantidade de linhas da matriz: '))
colunas = int(input('Informe a quantidade de colunas da matriz: '))
intervalo_inicial = int(input('Informe o intervalo inicial: '))
intervalo_final = int(input('Informe o intervalo final: '))
matriz = gera_matriz_aleatoria(linhas, colunas, intervalo_inicial, intervalo_final)
constante = int(input('Informe a constante que multiplicará a matriz gerada: '))
print(f'Matriz gerada: {matriz} \nMatriz C (k * A): {multiplica_matriz_por_constante(matriz, constante)}')
|
990,563 | 66361878e5d44608b87302605b53724fe94e1bff | from __future__ import division
import torch
import math
import random
try:
import accimage
except ImportError:
accimage = None
import numpy as np
import numbers
from PIL import Image, ImageOps, ImageEnhance
import collections
import scipy.ndimage.interpolation as itpl
import scipy.misc as misc
import types
import warnings
def _is_tensor_image(image):
return torch.is_tensor(image) and image.ndimension() == 3
def _is_pil_image(image):
if accimage is not None:
return isinstance(image, (Image.Image, accimage.Image))
else:
return isinstance(image, Image.Image)
def _is_numpy_image(image):
return isinstance(image, np.ndarray) and (image.ndim in {2, 3})
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image):
for i in self.transforms:
image = i(image)
return image
class ToTensor(object):
def __call__(self, image):
if not(_is_numpy_image(image)):
raise TypeError('image must be ndarray. Got {}'.format(type(image)))
if isinstance(image, np.ndarray):
if image.ndim == 2:
image = torch.from_numpy(image.copy())
elif image.ndim == 3:
image = torch.from_numpy(image.transpose((2, 0, 1)).copy())
else:
raise RuntimeError('image must be ndarray with 3 or 2 dimensions. Got {}'.format(image.ndim))
return image.float()
class Resize(object):
def __init__(self, dimension, interpolation='nearest'):
assert isinstance(dimension, int) or isinstance(dimension, float) or \
(isinstance(dimension, collections.Iterable) and len(dimension) == 2)
self.dimension = dimension
self.interpolation = interpolation
def __call__(self, image):
if image.ndim == 2:
return misc.imresize(image, self.dimension, self.interpolation, 'F')
elif image.ndim == 3:
return misc.imresize(image, self.dimension, self.interpolation)
else:
RuntimeError('image must be ndarray with 2 or 3 dimensions. Got {}'.format(image.ndim))
class Crop(object):
def __init__(self, m, n, x, y):
self.m = m
self.n = n
self.x = x
self.y = y
def __call__(self, image):
if image.ndim == 3:
return image[self.m : self.n, self.x : self.y, :]
elif image.ndim == 2:
return image[self.m : self.n, self.x : self.y]
class CenterCrop(object):
def __init__(self, dimension):
if isinstance(dimension, numbers.Number):
self.dimension = (int(dimension), int(dimension))
else:
self.dimension = dimension
@staticmethod
def get_params(image, output_dimension):
h = image.shape[0]
w = image.shape[1]
sh, sw = output_dimension
m = int(round((h - sh) / 2.))
n = int(round((w - sw) / 2.))
return m, n, sh, sw
def __call__(self, image):
m, n, h, w = self.get_params(image, self.dimension)
if not(_is_numpy_image(image)):
raise TypeError('image should be ndarray. Got {}'.format(type(image)))
if image.ndim == 3:
return image[m:m+h, n:n+w, :]
elif image.ndim == 2:
return image[m:m + h, n:n + w]
else:
raise RuntimeError('image should be ndarray with 2 or 3 dimensions. Got {}'.format(image.ndim))
class ColorNormalize(object):
def __init__(self, meanstd):
self.meanstd = meanstd
def __call__(self, image):
image = image.copy()
for m in (0, 1, 2):
image[m] += (-self.meanstd["mean"][m])
image[m] /= (self.meanstd["std"][m])
return image
|
990,564 | 1ce5c6c24162c1b07ebc8dc7bf7564954969de59 | import re
import logging
import string
import random
import inspect
import functools
import threading
from django.db import models
from django.core.cache import cache
_format_re = re.compile('%[^%]')
__all__ = ('cachelib',)
class CacheLibrary(threading.local):
cache_keys = None
chars = string.lowercase + string.uppercase
cache_version = 1.5
def __init__(self):
self.cache_keys = {}
def _rand_string(self, length):
return ''.join(random.choice(CacheLibrary.chars) for _ in range(length))
def compute_arity(self, format):
return len(_format_re.split(format)) - 1
def invalidate(self, obj):
model = type(obj)
prefix = '%s_%s_%s_' % (CacheLibrary.cache_version, model.__name__, obj.pk)
for template, arity, method, cache_timeout, recompute in self.cache_keys.get(model.__name__, ()):
if arity == 0:
cache.delete(prefix + template)
else:
cache.delete(prefix + template % (('_',) * arity))
def recalculate(self, obj):
self.invalidate(obj)
model = type(obj)
prefix = '%s_%s_%s_' % (CacheLibrary.cache_version, model.__name__, obj.pk)
for template, arity, method, cache_timeout, recompute in self.cache_keys.get(model.__name__, ()):
if arity == 0 and recompute:
result = method(obj)
if result is not None:
cache.set(prefix + template, result, cache_timeout)
def register_cache(self, cache_key_template, cache_timeout=86400, model=None, skip_pos=0, recompute=True):
def _decorator(method):
if model is not None:
if isinstance(model, type):
model_name = model.__name__
else:
model_name = model
else:
model_name = inspect.getouterframes(inspect.currentframe())[1][3]
if model_name not in self.cache_keys:
self.cache_keys[model_name] = []
arity = self.compute_arity(cache_key_template)
self.cache_keys[model_name].append((cache_key_template,
arity,
method,
cache_timeout,
recompute,
))
@functools.wraps(method)
def _arity_zero(*args, **kwargs):
obj = args[skip_pos]
if isinstance(obj, models.Model):
pk = obj.pk
else:
pk = obj
prefix = '%s_%s_%s_' % (CacheLibrary.cache_version, model_name, pk)
key = prefix + cache_key_template
logging.debug("CACHE LIB: Getting %s" % key)
result = cache.get(key)
if result is None:
result = method(*args, **kwargs)
if result is not None:
cache.set(key, result, cache_timeout)
return result
if not arity:
return _arity_zero
@functools.wraps(method)
def _arity_nonzero(*args, **kwargs):
obj = args[skip_pos]
if isinstance(obj, models.Model):
pk = obj.pk
else:
pk = obj
prefix = '%s_%s_%s_' % (CacheLibrary.cache_version, model_name, pk)
outer_key = prefix + cache_key_template % (('_',) * arity)
inner_key_val = cache.get(outer_key)
if inner_key_val is None:
inner_key_val = self._rand_string(5)
cache.set(outer_key, inner_key_val, 86400 * 30)
key = '_'.join((prefix, inner_key_val, cache_key_template % tuple(args[skip_pos + 1:skip_pos + 1 + arity])))
logging.debug("CACHELIB: Getting key %r" % key)
result = cache.get(key)
if result is None:
result = method(*args, **kwargs)
if result is not None:
cache.set(key, result, cache_timeout)
return result
return _arity_nonzero
return _decorator
cachelib = CacheLibrary()
|
990,565 | f3c6c4f363408b4d5f0cd7d520e43fe5ee48a2d4 | from django.shortcuts import render
from wisata.models import Wisata
from news.models import News, Agenda
from gallery.models import Video
# Create your views here.
def index(request):
news = News.objects.order_by('-created')[:2]
wisata = Wisata.objects.order_by('-created')[:2]
agenda = Agenda.objects.filter(available=True)[:1]
video = Video.objects.order_by('-created')[:1]
context = {
'news':news,
'wisata':wisata,
'agenda':agenda,
'video':video,
}
return render(request, 'index.html', context) |
990,566 | fd1914d7b465350145af05ea0975c2158438abe2 | import random
from TestManager import TestManager
from TestingWay import TestingWay
from TestingWay1 import TestingWay1
from TestingWay2 import TestingWay2
import Tkinter
import time
import tkMessageBox
from TestingWay import TestingWay
class TestWindow(Tkinter.Tk):
def __init__(self,parent):
Tkinter.Tk.__init__(self,parent)
self.parent = parent
self.showAll()
starttest = TestingWay()
# starttest.StartTesting()
def timer(self):
now = time.localtime(time.time())
return now[5]
def getNewValues(self,first,second):
self.first = first
self.second = second
self.label1.config(text=self.first)
self.label2.config(text=self.second)
def showAll(self):
self.label0 = Tkinter.Label(self,
anchor="w", fg="black", bg="white", text=2)
self.label0.grid(column = 0,row = 1)
self.label1 = Tkinter.Label(self,
anchor="w",fg="white",bg="red", text=TestingWay.GetRandomNumbers("T"))
self.label1.grid(column=0,row=2)
self.label2 = Tkinter.Label(self,
anchor="w",fg="white",bg="blue", text=TestingWay.GetRandomNumbers("B"))
self.label2.grid(column=0,row=3)
self.entry0 = Tkinter.Entry(self)
self.entry0.grid(column=1,row=1,columnspan=1)
self.entry1 = Tkinter.Entry(self)
self.entry1.grid(column=1,row=2,columnspan=1)
self.entry2 = Tkinter.Entry(self)
self.entry2.grid(column=1,row=3,columnspan=1)
self.entry1.bind("<Return>", self.ChangeFocus)
self.entry2.bind("<Return>", self.OnPressEnter)
def OnUpdate(self,event):
self.label0.config(text=self.timer())
def ChangeFocus(self,event):
pass
def OnPressEnter(self,event):
"Call methods from testingManager and testingWay"
value1 = self.entry1.get()
value2 = self.entry2.get()
print value1
print value2
TestingWay._TOP_VALUE = int(value1)
TestingWay._BOT_VALUE = int(value2)
# self.starttest.GetTopInputValue(value1)
# self.starttest.GetBotInputValue(value2)
print("Top value : " + TestingWay._TOP_INPUT_VALUE.__str__())
print ("Bot value : " + TestingWay._BOT_INPUT_VALUE.__str__())
#print TestingWay2.CorrectOutputValues()
if(self.entry0.get()=="1"):
TestingWay1.CorrectInputValues()
else:
TestingWay2.CorrectInputValues()
print TestingWay._CORRECT
print TestingWay._ERRORS_VAL
print TestingWay._ERRORS_WAY
self.getNewValues(TestingWay.GetRandomNumbers("T"),TestingWay.GetRandomNumbers("B"))
print "You pressed enter !"
def TimeFinished(self):
tkMessageBox.showinfo( "End", "Time of testing is finished.") |
990,567 | 38894a076252c9aedd57ff7596d978c0fd64d724 |
gender=(input("enter the gender"))
age=int(input("enter the age"))
print(gender,age)
if(gender=='F'):
print("She will work only in urban areas")
elif(20<age<40):
print("he may work anywhere")
elif(40<age<60):
print("he will work only in urban areas")
else:
print("error")
|
990,568 | 9a53f22d874f81a4d9456cc1c4fa3453d82978ab | import numpy as np
import random
from collections import namedtuple, deque
from memory import ReplayMemory, PrioritizedReplayMemory
from model import QNet, DuelingQNet
import torch
import torch.nn.functional as F
import torch.optim as optim
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Agent():
def __init__(self, args, state_size, action_size, seed):
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(seed)
self.per = args.per
self.dueling = args.dueling
self.buffer_size = args.buffer_size
self.batch_size = args.batch_size
self.gamma = args.gamma
self.tau = args.tau
self.lr = args.learning_rate
self.update_freq = args.update_every
# Q-Network
if self.dueling:
self.local_qnet = DuelingQNet(state_size, action_size, seed).to(device)
self.target_qnet = DuelingQNet(state_size, action_size, seed).to(device)
else:
self.local_qnet = QNet(state_size, action_size, seed).to(device)
self.target_qnet = QNet(state_size, action_size, seed).to(device)
self.optimizer = optim.Adam(self.local_qnet.parameters(), lr=self.lr)
# Replay Memory
if self.per:
self.memory = PrioritizedReplayMemory(args, self.buffer_size)
else:
self.memory = ReplayMemory(action_size, self.buffer_size, self.batch_size, seed)
self.t_step = 0 # init time step for updating every UPDATE_EVERY steps
def step(self, state, action, reward, next_state, done):
if self.per:
self.memory.append(state, action, reward, next_state, done)
else:
self.memory.add(state, action, reward, next_state, done) # save experience to replay memory.
# Learn every UPDATE_EVERY time steps.
self.t_step = (self.t_step + 1) % self.update_freq
if self.t_step == 0:
# If enough samples are available in memory, get random subset and learn
if len(self.memory) > self.batch_size:
if self.dueling:
self.learn_DDQN(self.gamma)
else:
self.learn(self.gamma)
def act(self, state, eps=0.):
state = torch.from_numpy(state).float().unsqueeze(0).to(device)
self.local_qnet.eval()
with torch.no_grad():
action_values = self.local_qnet(state)
self.local_qnet.train()
# Epsilon-greedy action selection
if random.random() > eps:
return np.argmax(action_values.cpu().data.numpy())
else:
return random.choice(np.arange(self.action_size))
def learn(self, gamma):
if self.per:
idxs, states, actions, rewards, next_states, dones, weights = self.memory.sample(self.batch_size)
else:
states, actions, rewards, next_states, dones = self.memory.sample()
# Get max predicted Q values for next states from target model
Q_targets_next = self.target_qnet(next_states).detach().max(1)[0].unsqueeze(1)
# Compute Q targets for current states
Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))
Q_expected = self.local_qnet(states).gather(1, actions)
# Compute loss - element-wise mean squared error
# Now loss is a Tensor of shape (1,)
# loss.item() gets the scalar value held in the loss.
loss = F.mse_loss(Q_expected, Q_targets)
# Minimize loss
self.optimizer.zero_grad()
if self.per:
(weights * loss).mean().backward() # Backpropagate importance-weighted minibatch loss
else:
loss.backward()
self.optimizer.step()
if self.per:
errors = np.abs((Q_expected - Q_targets).detach().cpu().numpy())
self.memory.update_priorities(idxs, errors)
# Update target network
self.soft_update(self.local_qnet, self.target_qnet, self.tau)
def learn_DDQN(self, gamma):
if self.per:
idxs, states, actions, rewards, next_states, dones, weights = self.memory.sample(self.batch_size)
else:
states, actions, rewards, next_states, dones = self.memory.sample()
# Get index of maximum value for next state from Q_expected
Q_argmax = self.local_qnet(next_states).detach()
_, a_prime = Q_argmax.max(1)
# Get max predicted Q values for next states from target model
Q_targets_next = self.target_qnet(next_states).detach().gather(1, a_prime.unsqueeze(1))
# Compute Q targets for current states
Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))
# Get expected Q values from local model
Q_expected = self.local_qnet(states).gather(1, actions)
# Compute loss
# Now loss is a Tensor of shape (1,)
# loss.item() gets the scalar value held in the loss.
loss = F.mse_loss(Q_expected, Q_targets)
# Minimize loss
self.optimizer.zero_grad()
if self.per:
(weights * loss).mean().backward() # Backpropagate importance-weighted minibatch loss
else:
loss.backward()
self.optimizer.step()
if self.per:
errors = np.abs((Q_expected - Q_targets).detach().cpu().numpy())
self.memory.update_priorities(idxs, errors)
# Update target network
self.soft_update(self.local_qnet, self.target_qnet, self.tau)
def soft_update(self, local_model, target_model, tau):
# θ_target = τ*θ_local + (1 - τ)*θ_target
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau * local_param.data + (1.0 - tau) * target_param.data)
|
990,569 | bee70e1b325b67699ae0733fcce9a3929ed59791 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
from __future__ import division
from fileparser import FileParser
def repl(s, i, char):
assert i < len(s)
return s[:i] + char + s[i+1:]
def solve(c_str, j_str):
all_c = [""]
all_j = [""]
for i, (c, j) in enumerate(zip(c_str, j_str)):
if c == "?" and j == "?":
ip_c = [str(i) for i in range(10)]
ip_j = [str(i) for i in range(10)]
elif c == "?" and j != "?":
ip_c = [str(i) for i in range(10)]
ip_j = [j]
elif c != "?" and j == "?":
ip_c = [c]
ip_j = [str(i) for i in range(10)]
elif c != "?" and j != "?":
ip_c = [c]
ip_j = [j]
else:
assert False
all_c = [
x + cc
for x in all_c
for cc in ip_c
]
all_j = [
x + jj
for x in all_j
for jj in ip_j
]
assert len(all_c[0]) == len(all_j[0])
_, best_c, best_j = min([
(abs(int(ip_c) - int(ip_j)), ip_c, ip_j)
for ip_c in all_c
for ip_j in all_j
])
all_c = [
cc
for cc in all_c
if abs(int(cc) - int(best_c)) <= 10
]
all_j = [
jj
for jj in all_j
if abs(int(jj) - int(best_j)) <= 10
]
_, best_c, best_j = min([
(abs(int(ip_c) - int(ip_j)), ip_c, ip_j)
for ip_c in all_c
for ip_j in all_j
])
return best_c, best_j
def main():
inputfile = FileParser()
T = inputfile.read_int()
for test in range(1, T + 1):
C, J = inputfile.read_strings()
result = solve(C, J)
print "Case #{}: {} {}".format(test, result[0], result[1])
if __name__ == '__main__':
main()
|
990,570 | 1c76a97da71e128a47a1a64611acc956840383c1 | import os
import secrets
import discord
import yaml
from marshmallow.core.utilities.data_processing import user_avatar
from .item_object import RawItem, CookedItem
from .properties import rarity_names, item_colors, item_icons
class ItemCore(object):
def __init__(self, item_directory):
self.base_dir = item_directory
self.rarity_names = rarity_names
self.item_icons = item_icons
self.item_colors = item_colors
self.all_items = []
self.init_items()
def get_item_by_name(self, name):
output = None
for item in self.all_items:
if item.name.lower() == name.lower():
output = item
break
return output
def get_item_by_file_id(self, name):
output = None
for item in self.all_items:
if item.file_id == name:
output = item
break
return output
def pick_item_in_rarity(self, item_category, rarity):
in_rarity = []
for item in self.all_items:
if item.type.lower() == item_category:
if item.rarity == rarity:
in_rarity.append(item)
choice = secrets.choice(in_rarity)
return choice
def init_items(self):
raw_item_types = ['fish', 'plant', 'animal']
cooked_item_types = ['drink', 'meal', 'desert']
for root, dirs, files in os.walk(f'{self.base_dir}'):
for file in files:
if file.endswith('.yml'):
file_path = (os.path.join(root, file))
with open(file_path, encoding='utf-8') as item_file:
item_id = file.split('.')[0]
item_data = yaml.safe_load(item_file)
item_data.update({'file_id': item_id})
if item_data['type'].lower() in raw_item_types:
item_object = RawItem(item_data)
elif item_data['type'].lower() in cooked_item_types:
item_object = CookedItem(item_data)
else:
item_object = None
if item_object:
self.all_items.append(item_object)
@staticmethod
def roll_rarity(db, uid):
upgrade_id = 'luck'
upgrade_file = db[db.db_cfg.database].Upgrades.find_one({'UserID': uid})
if upgrade_file is None:
db[db.db_cfg.database].Upgrades.insert_one({'UserID': uid})
upgrade_file = {}
if upgrade_id in upgrade_file:
upgrade_level = upgrade_file[upgrade_id]
else:
upgrade_level = 0
rarities = {
0: 0,
1: 350000000,
2: 600000000,
3: 800000000,
4: 950000000,
5: 990000000,
6: 995000000,
7: 997500000,
8: 999000000,
9: 999750000
}
roll = secrets.randbelow(1000000000) + (upgrade_level * 250) + 1
lowest = 0
for rarity in rarities:
if rarities[rarity] <= roll:
lowest = rarity
else:
break
return lowest
@staticmethod
async def notify_channel_of_special(message, all_channels, channel_id, item):
if channel_id:
target = discord.utils.find(lambda x: x.id == channel_id, all_channels)
if target:
connector = 'a'
if item.rarity_name[0].lower() in ['a', 'e', 'i', 'o', 'u']:
connector = 'an'
response_title = f'{item.icon} {connector.title()} {item.rarity_name} {item.name} has been found!'
response = discord.Embed(color=item.color, title=response_title)
response.set_author(name=f'{message.author.display_name}', icon_url=user_avatar(message.author))
response.set_footer(text=f'From {message.guild.name}.', icon_url=message.guild.icon_url)
await target.send(embed=response) |
990,571 | c1818d2df9943213cdfae57674d14428995405ec | import random
# DEFINE THE FUNCTIONS
# 1. chatbot
def chatbot():
print("Hello. I'm Chatbot. ")
user_name = get_name()
user_mood_response = get_mood(user_name)
print(user_mood_response)
if user_mood_response == "Sorry to hear that.":
print(therapist())
else:
random_question()
print(random_answer())
# 2. function to get user's name
def get_name():
res = input("What is your name? ")
return res
# 3. function to get the user's mood. Note this has a flaw: if the user uses one of the key words along with a qualification, the app might get confused (for example, the response "not bad" shoul be positive, but the app would treat it as negative.)
def get_mood(name):
res = input("How are you {}? ".format(name)).lower()
print(res)
if res.__contains__("good") or res.__contains__("great"):
return "Glad to hear it."
elif res.__contains__("sad") or res.__contains__("bad"):
return "Sorry to hear that."
else:
return "Right."
# 4.
def therapist():
res = input("Tell me what's wrong. ").lower()
res_list = res.split()
if len(res_list) >= 2:
echo = res_list[-2] + " " + res_list[-1]
else:
echo = res
stripped = echo.strip(".,!?")
answer = random.choice(["Oh dear.", "Toughen up!", "I see.", "How does that make you feel?", "You fool!"])
return "{}? {}".format(stripped, answer)
# 5. creates a set of stock answers to make it sound like the chatbot is listening to the user.
def random_answer():
answers = ["Interesting. ", "What makes you say that? ", "Tell me more about it. ", "What the hell! ", "How do you feel about that? ", "How dare you say that! ", "So...", "Hahahah! "]
return random.choice(answers)
# 6. creates a set of random topics for the chatbot to ask about.
def random_question():
questions = ["What's your favourite film? ", "Do you believe that aliens exist? ", "What makes you tick? "]
res = input(random.choice(questions))
return res
# . function to print a message when the user input is invalid.
def print_message():
print("Sorry, I don't understand. Can you rephrase that?")
# CALL THE CHATBOT
chatbot() |
990,572 | 711edfbbf84cb8d29bbd63d6403b1e5197db263d | # Generated by Django 2.0.4 on 2018-05-04 16:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('downloads', '0002_auto_20180430_1025'),
]
operations = [
migrations.AddField(
model_name='files',
name='download_url',
field=models.TextField(default=None),
preserve_default=False,
),
]
|
990,573 | eb10a515a4132f0654be32eead1f7584eff32f33 | import re
def is_phone_number_valid(phone_number):
"""
This function returns whether a given number is a valid phone number or not.
All valid:
International Numbers
+905422672332
1 800 5551212
0543 555 1212
5425551212
18005551212
+1800 555 1212 extension65432
800 5551212 ext3333
Invalids:
234-911-5678
:param phone_number:
str
An ip number
:return result:
boolean
Whether the given phone number is valid or not
"""
phone_number = str(phone_number)
if(phone_number==""):
return True
else:
international_pattern = re.compile(
r'\(?\+[0-9]{1,3}\)? ?-?[0-9]{1,3} ?-?[0-9]{3,5} ?-?[0-9]{4}( ?-?[0-9]{3})? ?(\w{1,10}\s?\d{1,6})?')
pattern = re.compile(
r'(?:(?:\+?1\s*(?:[.-]\s*)?)?(?:(\s*([2-9]1[02-9]|[2-9][02-8]1|[2-9][02-8][02-9])\s*)|([2-9]1[02-9]|[2-9]['
r'02-8]1|[2-9][02-8][02-9]))\s*(?:[.-]\s*)?)([2-9]1[02-9]|[2-9][02-9]1|[2-9][02-9]{2})\s*(?:[.-]\s*)?(['
r'0-9]{4})\s*(?:\s*(?:#|x\.?|ext\.?|extension)\s*(\d+)\s*)?$')
result = False
match1 = international_pattern.match(phone_number.lstrip('0'))
match2 = pattern.match(phone_number.lstrip('0'))
if match1 or match2:
result = True
return result
def is_email_valid(e_mail):
"""
This function returns whether a given e mail address is valid or not.
The regular expression is pretty simple but catches most of the valid email addresses. However, it does not have
a wide sensitivity. Regex should be replaced with a more complex one for advanced usage.
:param e_mail:
str
An e_mail address
:return result:
boolean
Whether the given e_mail address is valid or not.
"""
pattern = re.compile(r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)")
result = False
if pattern.match(e_mail):
result = True
return result
def is_password_valid(password):
result = False
password = str(password)
if 50 >= len(password) >= 5:
result = True
return result
|
990,574 | 414b853debfe5b22afd40d131c27fd488bde999f | class Node():
def __init__(self,value):
self.value = value
self.next = None
class Stack ():
def __init__(self):
self.top = None
self.bottom = None
self.length = 0
def peek(self):
return self.top
def push (self,value):
newItem = Node(value)
if (self.length == 0):
self.top = newItem
self.bottom = newItem
else:
holdingPosition = self.top
self.top = newItem
newItem.next = holdingPosition
self.length += 1
return self.printList()
def pop (self):
if (self.length == 0):
return None
else:
newTop = self.top.next
self.top = newTop
self.length -= 1
return self.printList()
def printList(self):
temp = self.top
arr = []
while (temp):
arr.append(temp.value)
temp = temp.next
print(arr)
myStack = Stack()
myStack.push('hola')
myStack.push('Google')
myStack.push('Amazon')
myStack.pop()
|
990,575 | 34a2c8e032073131fdb32d5ae4578d2ef6738abc | import argparse
import csv
from apiclient.discovery import build
from oauth2client.service_account import ServiceAccountCredentials
import httplib2
from oauth2client import client
from oauth2client import file
from oauth2client import tools
import creds #credentials file
SCOPES = ['https://www.googleapis.com/auth/analytics.readonly']
DISCOVERY_URI = ('https://analyticsreporting.googleapis.com/$discovery/rest')
KEY_FILE_LOCATION = creds.KEY_FILE_LOCATION
SERVICE_ACCOUNT_EMAIL = creds.SERVICE_ACCOUNT_EMAIL
VIEW_ID = str(creds.VIEW_ID)
def initialize_analyticsreporting():
"""Initializes an analyticsreporting service object.
Returns:
analytics an authorized analyticsreporting service object.
"""
print "authenticating"
credentials = ServiceAccountCredentials.from_p12_keyfile(
SERVICE_ACCOUNT_EMAIL, KEY_FILE_LOCATION, scopes=SCOPES)
http = credentials.authorize(httplib2.Http())
# Build the service object.
analytics = build('analytics', 'v4', http=http, discoveryServiceUrl=DISCOVERY_URI)
return analytics
def get_report(analytics):
# Use the Analytics Service Object to query the Analytics Reporting API V4.
print "pulling report"
return analytics.reports().batchGet(
body={
'reportRequests': [
{
'viewId': VIEW_ID,
'dateRanges': [{'startDate': creds.STARTDATE, 'endDate': creds.ENDDATE}],
'metrics': [{'expression': 'ga:sessions'},
{'expression': 'ga:pageviews'},
{'expression': 'ga:productDetailViews'},
{'expression': 'ga:productAddsToCart'},
{'expression': 'ga:productCheckouts'},
{'expression': 'ga:uniquePurchases'},
],
'dimensions': [{'name':'ga:date'},
{'name':'ga:medium'},
{'name':'ga:userType'},
{'name':'ga:deviceCategory'}
]
}]
}
).execute()
def print_response(response, filename='export.csv'):
"""
write to csv file
"""
"""
structure
response['reports'][0]['data']['rows'] #returns a list of metrics and dimensions values
[
{u'metrics': [{u'values': [u'1446', u'4592', u'891', u'249', u'195', u'61']}], u'dimensions': [u'20170408', u'(none)', u'New Visitor', u'desktop']},
{u'metrics': [{u'values': [u'162', u'543', u'206', u'5', u'5', u'0']}], u'dimensions': [u'20170409', u'referral', u'New Visitor', u'desktop']},
{u'metrics': [{u'values': [u'1', u'1', u'1', u'0', u'0', u'0']}], u'dimensions': [u'20170408', u'display', u'Returning Visitor', u'desktop']}
]
response['reports'][0]['columnHeader'] #returns the header
{u'dimensions': [
u'ga:date',
u'ga:medium',
u'ga:userType',
u'ga:deviceCategory'
],
u'metricHeader': {u'metricHeaderEntries': [
{u'type': u'INTEGER', u'name': u'ga:sessions'},
{u'type': u'INTEGER', u'name': u'ga:pageviews'},
{u'type': u'INTEGER', u'name': u'ga:productDetailViews'},
{u'type': u'INTEGER', u'name': u'ga:productAddsToCart'},
{u'type': u'INTEGER', u'name': u'ga:productCheckouts'},
{u'type': u'INTEGER', u'name': u'ga:uniquePurchases'}]}}
"""
print "writing", filename
#write in csv
#write header
with open(filename, 'wb') as csvfile:
writer = csv.writer(csvfile,
delimiter=',',
quoting=csv.QUOTE_MINIMAL
)
writer.writerow(['date',
'medium',
'userType',
'deviceCategory',
'sessions',
'pageviews',
'productDetailViews',
'productAddToCart',
'productCheckouts',
'uniquePurchases'
])
#get variables
for line in response['reports'][0]['data']['rows']:
date = str(line['dimensions'][0])
medium = str(line['dimensions'][1])
userType = str(line['dimensions'][2])
deviceCategory = str(line['dimensions'][3])
sessions = str(line['metrics'][0]['values'][0])
pageviews = str(line['metrics'][0]['values'][1])
productDetailViews = str(line['metrics'][0]['values'][2])
productAddsToCart = str(line['metrics'][0]['values'][3])
productCheckouts = str(line['metrics'][0]['values'][4])
uniquePurchases = str(line['metrics'][0]['values'][5])
#write variables to csv per row
writer.writerow([date,
medium,
userType,
deviceCategory,
sessions,
pageviews,
productDetailViews,
productAddsToCart,
productCheckouts,
uniquePurchases
])
print "complete"
def main():
analytics = initialize_analyticsreporting()
response = get_report(analytics)
print_response(response)
if __name__ == '__main__':
main() |
990,576 | c4efba8854dc93d3f0e87505417023134055a6fa | ## Linear Stability of a Barotropic QG Vortex
# This is an attempted re-write of the code
# qg_BTvortex_stab_SpecFD_loop.
# provided by Francis.
import timeit
import scipy
import time
import sys
import argparse
import scipy.sparse as sp
import scipy.linalg as spalg
import numpy as np
import numpy.linalg as nlg
import matplotlib.pyplot as plt
from scipy.sparse.linalg import eigs
from scipy.interpolate import interp1d
from scipy.misc import factorial
from cheb import cheb
from FiniteDiff import FiniteDiff
# Parse commandline inputs
parser = argparse.ArgumentParser()
parser.add_argument('--Neig', help='Number of grid points for eig computations.',\
type=int, default = 201)
parser.add_argument('--Neigs', help='Number of grid points for eigs computations.',\
type=int, default = 1001)
parser.add_argument('-H', '--depth', help='Fluid depth parameter. (DOES NOT DO ANYTHING)',\
type=float,default=2.4e3)
parser.add_argument('-L', '--width', help='Radius of the domain. (DOES NOT DO ANYTHING)',\
type=float, default=200e3)
parser.add_argument('-f0', '--coriolis', help='Coriolis f0 value. (DOES NOT DO ANYTHING)',\
type=float, default=8e-5)
parser.add_argument('-g', '--gravity', help='Acceleration due to gravity. (DOES NOT DO ANYTHING)',\
type=float, default=9.81)
parser.add_argument('-p', '--PrintOutputs', help='Flag to turn on display for each computation.',\
action='store_true')
parser.add_argument('-N', '--buoyancy', help='Buoyancy frequency. (DOES NOT DO ANYTHING)',\
type=float, default=np.sqrt(5)*1e-3)
parser.add_argument('-kt', '--k_theta', help='Azimuthal wavenumbers. Enter as -kt min max step .',\
type=float, default=[1,3,1], nargs=3)
parser.add_argument('-kz', '--k_z', help='Vertical wavenumbers. Enter as -kz min max step.',\
type=float, default=[0,2,0.1], nargs=3)
parser.add_argument('--modes', help='The number of modes of instability to be considered.',\
type=int, default=1)
args = parser.parse_args()
class Parameters:
## Class to hold parameter values
H = args.depth
L = args.width
f0 = args.coriolis
g = args.gravity
N = args.buoyancy
Lr = 6.25
Nr = args.Neig
N2 = args.Neig/2
Nt = 40
kts = np.arange(args.k_theta[0],args.k_theta[1],args.k_theta[2])
kzs = np.arange(args.k_z[0],args.k_z[1],args.k_z[2])
nmodes = args.modes
printout = args.PrintOutputs
def display(self):
print 'H = {0}'.format(self.H)
print 'L = {0}'.format(self.L)
print 'f0 = {0}'.format(self.f0)
print 'g = {0}'.format(self.g)
print 'N = {0}'.format(self.N)
print 'Lr = {0}'.format(self.Lr)
print 'Nr = {0}'.format(self.Nr)
print 'N2 = {0}'.format(self.N2)
print 'Nt = {0}'.format(self.Nt)
print 'kts = {0}'.format(self.kts)
print 'kzs = {0}'.format(self.kzs)
print 'nmodes = {0}'.format(self.nmodes)
class Geometry:
## Class to hold geometric values
def __init__(self, method, params):
self.method = method
if method == 'cheb':
Dr, r = cheb(params.Nr)
self.r = r*params.Lr
self.Dr = Dr/params.Lr
self.Dr2 = np.dot(self.Dr,self.Dr)
elif method == 'FD':
self.r = np.arange(params.Lr, -params.Lr-2*params.Lr/(params.Nr), -2*params.Lr/(params.Nr))
self.Dr = FiniteDiff(self.r, 8, True, True)
self.Dr2 = np.dot(self.Dr, self.Dr)
def Build_Laplacian(params, geom):
D1d = geom.Dr2[1:params.N2+1, 1:params.N2+1]
D2d = geom.Dr2[np.arange(1,params.N2+1,1),:][:,np.arange(params.Nr-1,params.N2,-1)]
E1d = geom.Dr[1:params.N2+1, 1:params.N2+1]
E2d = geom.Dr[np.arange(1,params.N2+1,1),:][:,np.arange(params.Nr-1,params.N2,-1)]
if sp.issparse(geom.Dr):
R = sp.spdiags(np.transpose(1.0/geom.r[1:params.N2+1]), np.array([0]), params.N2, params.N2)
else:
R = np.diag(1.0/np.ravel(geom.r[1:params.N2+1]))
Lap = D1d + D2d + np.dot(R, E1d + E2d)
return Lap
def Print_npArray(fp, arr):
for ii in xrange(0,arr.shape[0]):
for jj in xrange(0,arr.shape[1]):
if jj == arr.shape[1]-1:
fp.write('{0:+2.2e}'.format(arr[ii,jj]))
else:
fp.write('{0:+2.2e}, '.format(arr[ii,jj]))
fp.write('\n')
def QG_Vortex_Stability():
## Initialize parameters
paramsCheb = Parameters()
paramsFD = Parameters()
paramsFD.Nr = args.Neigs
paramsFD.N2 = args.Neigs/2
## Set-up the geometry
GeomCheb = Geometry('cheb', paramsCheb)
GeomFD = Geometry('FD', paramsFD)
GeomCheb.Lap = Build_Laplacian(paramsCheb, GeomCheb)
GeomFD.Lap = Build_Laplacian(paramsFD, GeomFD)
## Set up the profiles
rin = GeomCheb.r[1:paramsCheb.N2+1]
Prsp = np.ravel(-0.5*np.exp(-rin**2)) # 1/r*Psi_r
Qrsp = np.ravel(-2*np.exp(-rin**2)*(rin**2-2)) # 1/r*Q_r
rin = GeomFD.r[1:paramsFD.N2+1]
Prfd = np.ravel(-0.5*np.exp(-rin**2)) # 1/r*Psi_r
Qrfd = np.ravel(-2*np.exp(-rin**2)*(rin**2-2)) # 1/r*Q_r
kts = paramsCheb.kts
kzs = paramsCheb.kzs
nmodes = paramsCheb.nmodes
growthsp = np.zeros([kzs.shape[0], kts.shape[0], nmodes])
frequysp = np.zeros([kzs.shape[0], kts.shape[0], nmodes])
growthfd = np.zeros([kzs.shape[0], kts.shape[0], nmodes])
frequyfd = np.zeros([kzs.shape[0], kts.shape[0], nmodes])
## Start solving
for cntz in xrange(0, kzs.shape[0]):
kz = kzs[cntz]
kz2 = kz**2
for cntt in xrange(0, kts.shape[0]):
kt = kts[cntt]
kt2 = kt**2
# Build A and B for eigen-analysis
R2invC = np.diag(np.ravel(1/GeomCheb.r[1:paramsCheb.N2+1]**2))
Bcheb = GeomCheb.Lap - kt2*R2invC - kz2*np.eye(paramsCheb.N2,paramsCheb.N2)
Acheb = np.dot(np.diag(Prsp),Bcheb) - np.diag(Qrsp)
R2invF = np.diag(np.ravel(1./GeomFD.r[1:paramsFD.N2+1]**2))
Bfd = GeomFD.Lap - kt2*R2invF - kz2*np.eye(paramsFD.N2,paramsFD.N2)
Afd = np.dot(np.diag(Prfd),Bfd) - np.diag(Qrfd)
# Find eigen-space (Direct)
t0 = timeit.timeit()
eigValCheb, eigVecCheb = spalg.eig(Acheb,Bcheb)
t1 = timeit.timeit()
timesp = t1 - t0
ind = (-eigValCheb.imag).argsort()
eigVecCheb = eigVecCheb[:,ind]
eigValCheb = eigValCheb[ind]
omegaCheb = eigValCheb*kt
growthsp[cntz,cntt,:] = omegaCheb[0:nmodes].imag;
frequysp[cntz,cntt,:] = omegaCheb[0:nmodes].real;
# Loop over modes
for ii in xrange(0,nmodes):
grow = omegaCheb[ii].imag
freq = omegaCheb[ii].real
# Find Eigenvalues (Indirect)
sig0 = eigValCheb[ii]
X = np.hstack([np.array([paramsCheb.Lr]),\
np.ravel(GeomCheb.r[1:paramsCheb.N2+1]),\
np.array([0])])[::-1]
Y = np.hstack([np.array([0]), eigVecCheb[:,ii], np.array([0])])[::-1]
# Normalize Y
ind = (-np.abs(Y)).argsort()
Y = Y/Y[ind[0]]
Xnew = np.ravel(GeomFD.r[1:paramsFD.N2+1])[::-1]
interp_fcn = interp1d(X, Y, kind='cubic')
chebvec = interp_fcn(Xnew)
chebvec = chebvec[::-1]
Xnew = Xnew[::-1]
tmp = chebvec
tmp[tmp==0] = 1
tmp = tmp.conj()
T = np.diag(np.ravel(tmp))
Tinv = nlg.inv(T)
t0 = timeit.timeit()
try:
sig1, vec1 = eigs(np.dot(Afd,Tinv), 1, np.dot(Bfd,Tinv),\
sigma=sig0,v0=np.dot(T,chebvec))
# Normalize vec1
ind = (-np.abs(vec1)).argsort(axis=None)
vec1 = vec1/vec1[ind[0]]
#plt.subplot(3,2,1)
#plt.plot(Xnew,chebvec.real,'-b', Xnew,chebvec.imag,'-r')
#plt.title('Original Eig Vector')
#plt.subplot(3,2,2)
#plt.plot(Xnew, np.dot(T,chebvec).real, '-b', Xnew, np.dot(T,chebvec).imag, '-r')
#plt.title('Transformed Eig Vector')
#plt.subplot(3,2,3)
#plt.plot(Xnew, vec1.real, '-b', Xnew, vec1.imag, '-r')
#plt.title('Original Eigs Vector')
#vec1 = np.dot(Tinv, vec1)
#plt.subplot(3,2,4)
#plt.plot(Xnew, vec1.real, '-b', Xnew, vec1.imag, '-r')
#plt.title('Inverse Transformed Eigs Vector')
#plt.subplot(3,2,5)
#plt.plot(Xnew, np.abs(np.ravel(vec1)-np.ravel(chebvec)))
#plt.title('Absolute difference')
#plt.show()
except:
sig1 = [np.nan+1j*np.nan]
print 'Eigs failed for mode {0:.2f}, k_theta = {1:.2f}, kz = {2:.4f}.\n'.format(ii,kt,kz)
sys.stdout.flush()
t1 = timeit.timeit()
timefd = t1 - t0
omegafd = kt*sig1[0]
growfd = omegafd.imag
freqfd = omegafd.real
growthfd[cntz,cntt,ii] = growfd;
frequyfd[cntz,cntt,ii] = freqfd;
# Display the results
if paramsCheb.printout:
print '----------'
print 'kz = {0:4f}, kt = {1:2f}'.format(kz, kt)
print 'eig : growth rate = {0:+4e}, frequency = {1:+4e}, cputime = {2:+4e}'\
.format(grow, freq, timesp)
print 'eigs: growth rate = {0:+4e}, frequency = {1:+4e}, cputime = {2:+4e}'\
.format(growfd, freqfd, timefd)
sys.stdout.flush()
# Plot the eigenvalue results.
nkt = (np.ravel(kts)).shape[0]
nkz = (np.ravel(kzs)).shape[0]
for jj in xrange(0,nmodes):
plt.figure(jj)
if nkt < 4:
for ii in xrange(0, nkt):
plt.subplot(nkt,2,1+2*ii)
plt.plot(kzs, 4*np.ravel(growthfd[:,ii,jj]), '-o',\
kzs, 4*np.ravel(growthsp[:,ii,jj]), '-*')
plt.title('Growth Rate')
plt.subplot(nkt,2,2+2*ii)
plt.plot(kzs, 4*np.ravel(frequyfd[:,ii,jj]), '-o', \
kzs, 4*np.ravel(frequysp[:,ii,jj]), '-*')
plt.title('Prop. Speed')
elif nkz < 4:
for ii in xrange(0, nkz):
plt.subplot(nkz,2,1+2*nkz)
plt.plot(np.ravel(kts), 4*np.ravel(growthfd[ii,:,jj]), '-o', \
np.ravel(kts), 4*np.ravel(growthsp[ii,:,jj]), '-*')
plt.title('Growth Rate')
plt.subplot(nkz,2,2+2*nkz)
plt.plot(np.ravel(kts), 4*np.ravel(frequyfd[ii,:,jj]), '-o', \
np.ravel(kts), 4*np.ravel(frequysp[ii,:,jj]), '-*')
plt.title('Prop. Speed')
else:
plt.subplot(2,2,1)
plt.contour(np.ravel(kts), np.ravel(kzs), 4*growthfd[:,:,jj])
plt.title('Growth Rate (eigs)')
plt.subplot(2,2,2)
plt.contour(np.ravel(kts), np.ravel(kzs), 4*frequyfd[:,:,jj])
plt.title('Prop. Speed (eigs)')
plt.subplot(2,2,3)
plt.contour(np.ravel(kts), np.ravel(kzs), 4*growthfd[:,:,jj])
plt.title('Growth Rate (eig)')
plt.subplot(2,2,4)
plt.contour(np.ravel(kts), np.ravel(kzs), 4*frequyfd[:,:,jj])
plt.title('Prop. Speed (eig)')
plt.show()
if __name__ == '__main__': #For testing
QG_Vortex_Stability()
|
990,577 | d6f0dd5c587a5205dc3e3b19517b90443f991d4e | #coding=utf-8
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains#######鼠标事件的类
import time
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
driver=webdriver.Remote(desired_capabilities=DesiredCapabilities.CHROME)
driver.get('http://www.baidu.com/')
time.sleep(1)
driver.find_element_by_xpath('//a[@href="http://www.baidu.com/gaoji/preferences.html" and @class="pf"]').click()###设置
driver.find_element_by_xpath('//a[@class="setpref" and @href="javascript:;"]').click()###搜索设置
time.sleep(1)
m=driver.find_element_by_xpath("//select[@name='NR']")####下来框操作
m.find_element_by_xpath("//option[@value='20']").click()
time.sleep(1)
driver.find_element_by_xpath("//a[@class='prefpanelgo']").click()
time.sleep(1)
date=driver.switch_to.alert.text####返回alert/confirm/prompt中的文字信息
print(date)
driver.switch_to.alert.accept()####accept弹出的带有确定按钮的提示框,来接受确认提示框操作
'''dissmiss 点击取消按钮,如果存在取消按钮;send_keys 输入值,这个
alert\confirm没有对话框就不能用了,不然会报错'''
cookie=driver.get_cookies()#获取cookie
print(cookie)
driver.find_element_by_xpath("//input[@id='kw']").send_keys('selenium')
driver.find_element_by_xpath("//input[@id='su']").click()
time.sleep(2)
js="var q=document.documentElement.scrollTop=1000"###将页面滚动条拖到底部
driver.execute_script(js)
time.sleep(2)
# data=driver.find_element_by_xpath('//p[@id="cp"]').text####获取元素的文本信息
# print(data)
# driver.find_element_by_xpath('//a[@name="tj_mp3"]').click()
print(driver.title)####打印浏览器标题
# driver.set_window_size(480,800)
# driver.back()####后退
# time.sleep(2)
# driver.forward()#####前进
'''
qqq=driver.find_element_by_xpath("///")
ActionChains(driver).context_click(qqq).perform()####鼠标右击事件
ActionChains(driver).double_click(qqq).perform()####鼠标双击事件
ppp=driver.find_element_by_xpath("///")
ActionChains(driver).drag_and_drop(qqq,ppp).perform()####鼠标拖地事件,perform()执行所有存储的行为
switch_to_frame()#####框架(frame)或者窗口(window)的定位
switch_to_window()
'''
|
990,578 | 02f76ae07d4bb429bf6a8319cce2aba0cb80ef58 | # Filename: compute_bmi.py
# Author: Thng Jing Xiong
# Created: 20130121
# Modified: 20130121
# Description: Program to get user weight and height and
# calculate body mass index (BMI)
# main
# prompt and get weight
weight = int(input("Enter weight in kg:"))
# prompt and get height
height = float(input("Enter height in m:"))
# calculate bmi
bmi = weight / (height * height)
# display result
print ("BMI={0:.2f}".format(bmi))
# determine health risk
if bmi >=27.50:
print("High Risk!!!")
elif 23.5 <= bmi <27.5:
print ("Moderate risk!!")
elif 18.5 <= bmi <23:
print ("Healthy! :D")
else:
print ("Malnutrition :(")
|
990,579 | 408e375db7e4e6367ff2ec5fae56b96f40b8dd0b | import solaris
roof_gt = '/data/buildchange/v2/xian_fine/xian_fine_roof_gt.csv'
footprint_gt = '/data/buildchange/v2/xian_fine/xian_fine_footprint_gt.csv'
roof_pred = '/home/jwwangchn/Documents/100-Work/170-Codes/aidet/results/buildchange/bc_v014_mask_rcnn_hrnetv2p_w32_v2_roof_trainval/result_roof.csv'
footprint_pred = '/home/jwwangchn/Documents/100-Work/170-Codes/aidet/results/buildchange/bc_v014_mask_rcnn_hrnetv2p_w32_v2_roof_trainval/result_footprint.csv'
a, b = solaris.eval.challenges.spacenet_buildings_2(roof_pred, roof_gt)
print("F1: {}, Precision: {} Recall: {}".format(b['F1Score'].mean(), b['Precision'].mean(), b['Recall'].mean()))
a, b = solaris.eval.challenges.spacenet_buildings_2(footprint_pred, footprint_gt)
print("F1: {}, Precision: {} Recall: {}".format(b['F1Score'].mean(), b['Precision'].mean(), b['Recall'].mean())) |
990,580 | f1467045593ca351f4b4015b706487217e1b04f2 |
class Solution:
def count_and_say(self, n):
"""
Idea: https://discuss.leetcode.com/topic/28084/simple-python-solution
Time: O(mn) where n is the num till which we calculate the sequence and m is the max length
we perform n steps and on each step you iterate over the length of the current string at that
step which is also increasing per step. This is order O(n*m) where m is the length of the string at step n.
"""
arr = [1]
for _ in xrange(n-1): # Here trick is to run this for n-1 times and not n
res = []
cur_ele, cur_count = arr[0], 1
for i in xrange(1, len(arr)):
if arr[i] != cur_ele:
res.append(cur_count)
res.append(cur_ele)
cur_ele = arr[i]
cur_count = 1
else:
cur_count += 1
res.append(cur_count)
res.append(cur_ele)
arr = res
return ''.join([str(x) for x in arr])
if __name__ == '__main__':
test_cases = [
(1, '1'),
(2, '11'),
(3, '21'),
(4, '1211'),
(5, '111221'),
]
for test_case in test_cases:
res = Solution().count_and_say(test_case[0])
if res == test_case[1]:
print "Passed"
else:
print "Failed: Test case: {0} Got {1} Expected {2}".format(
test_case[0], res, test_case[1])
|
990,581 | 94194425eeb77d1e6c42248574b4bd3c9bc16f02 | from django.conf.urls import url
from .views import *
urlpatterns = [
url(r'^$', BanListView.as_view(), name='ban-list'),
url(r'^(?P<id>\d+)/$', BanDetailView.as_view(), name='ban-detail'),
url(r'^(?P<id>\d+)/edit/$', BanEditView.as_view(), name='ban-edit'),
url(r'^(?P<id>\d+)/lift/$', BanLiftView.as_view(), name='ban-lift')
]
|
990,582 | e5c4d46284a05c4140bc081e5001ea03d72af446 | import sys
sys.path.append('./LowerMachine')
sys.path.append('./Vehicle')
from LowerMachine import UpperMachine
from Vehicle import VehicleData_add
import FuzzyInfrerence
import TrustInfernce
import time
import datetime
def TheardMain():
'''
主线程,不断循环,完成传感器控制和推理机的控制
'''
# 先声明一个对象
my_upper_machine = UpperMachine.UpperMachine()
my_upper_machine.initial()
my_upper_machine.start()
STANDARD_SECONDS = 10
MINI_SECONDS = 10
MAX_SECONDS = 60
while True:
for i in range(2):
# 获取等待时间
wait_time = my_upper_machine.getTime(i)
print("wait for %.3f seconds" % wait_time)
# 等待对应时间
time.sleep(wait_time)
# 获取车辆数目并且添加到数据库中
counts = my_upper_machine.getCount(i)
beginTime, endTime = my_upper_machine.getBETime(i)
VehicleData_add.vehicleData_add(i, counts, beginTime, endTime)
# 南北1用模糊,东西0用可信度
if i == 0:
# 用可信度推理机进行推理
conclusion = TrustInfernce.getConclusion(i)
print(conclusion)
# 根据结论对传感器进行修改
# 暂时只用可信度结论进行修改
if (conclusion[0] == "本轮绿灯时间不变"):
# do Nothing
print("")
elif (conclusion[0] == "本轮绿灯时间增加"):
wait_time += wait_time * conclusion[1] * STANDARD_SECONDS
if wait_time > MAX_SECONDS:
wait_time = MAX_SECONDS
elif conclusion[0] == "本轮绿灯时间减少":
wait_time -= wait_time * conclusion[1] * STANDARD_SECONDS
if wait_time < MINI_SECONDS:
wait_time = MINI_SECONDS
else:
print("Error!")
elif i == 1:
# 用模糊推理机进行推理
count1, count2 = FuzzyInfrerence.getCount(i)
conclusion, fuzzy_train = FuzzyInfrerence.Defuzzification(i)
# print(count1, count2)
# print(fuzzy_train)
for th in fuzzy_train:
print(th)
if (count1 == count2):
wait_time = wait_time
pass
elif (count1 < count2):
wait_time -= conclusion
# print('绿灯时长减少{}秒'.format(conclusion))
if wait_time < MINI_SECONDS:
wait_time = MINI_SECONDS
elif (count1 > count2):
wait_time += conclusion
# print('绿灯时长增加{}秒'.format(conclusion))
if wait_time > MAX_SECONDS:
wait_time = MAX_SECONDS
my_upper_machine.changeTime(i, wait_time)
if __name__ == '__main__':
TheardMain()
|
990,583 | a6ea4887db7b52f9934a403d738f74f3ae000cbd | import sys
import json
from math import floor
import spotipy
import spotipy.util as util
keys = json.load(open('keys.json'))
username = ''
scope = 'playlist-modify-public user-top-read'
token = util.prompt_for_user_token(username, scope, client_id=keys['client_id'],
client_secret=keys['client_secret'], redirect_uri=keys['redirect_uri'])
class sbucket():
def __init__(self, limit, token):
if token:
self.sp = spotipy.Spotify(auth=token)
self.user_id = self.sp.me()['id']
self.limit = limit
self.done = False
def get_top_tracks(self):
top_tracks_ids = []
num_top_tracks = 0
time_ranges = ['short_term', 'medium_term', 'long_term']
for time_range in time_ranges:
tracks = self.sp.current_user_top_tracks(limit=50, time_range=time_range)
top_tracks_ids += ([track_id['id'] for track_id in tracks['items']])
print("Found {0:d} top user tracks for initial seeds.".format(len(top_tracks_ids)))
return top_tracks_ids
def get_recommendations(self, seed_track_ids):
rec_tracks_ids = []
for idx in range(floor(len(seed_track_ids)/5)): # split the lists into 5 track subsets
tracks = self.sp.recommendations(seed_tracks=seed_track_ids[idx*5:idx*5+5], limit=25)
track_ids = [track_id['id'] for track_id in tracks['tracks']]
filtered_track_ids = filter(lambda track_id : track_id not in rec_tracks_ids or track_id not in seed_track_ids, track_ids)
rec_tracks_ids += filtered_track_ids
sys.stdout.write("Added {0:d} new and unique recommended tracks...\r".format(len(rec_tracks_ids)))
sys.stdout.flush()
if len(rec_tracks_ids) + len(seed_track_ids) > self.limit:
self.done = True
print("\nTrack limit reached! [{0:d}]".format(self.limit))
break
print("Found total of {0:d} new and unique recommended tracks.".format(len(rec_tracks_ids) + len(seed_track_ids)))
return rec_tracks_ids
def add_tracks_to_playlist(self, playlist_name, track_ids):
# create new artist/engineer/producer playlist
playlist_id = self.sp.user_playlist_create(self.user_id, playlist_name)['id']
for idx in range(floor(len(track_ids)/50)):
track_ids_set = track_ids[idx*50:idx*50+50]
self.sp.user_playlist_add_tracks(self.user_id, playlist_id, track_ids_set)
print("Saved {0:d} new tracks to an sBucket playlist.".format(len(track_ids)))
# currently tracks are simply ordered by the order of the top tracks
# would be interesting to get track audio features and then order by euclidean distance
keys = json.load(open('keys.json'))
username = ''
scope = 'playlist-modify-public user-top-read'
token = util.prompt_for_user_token(username, scope, client_id=keys['client_id'],
client_secret=keys['client_secret'], redirect_uri=keys['redirect_uri'])
sBucket = sbucket(5000, token)
top_tracks = sBucket.get_top_tracks()
rec_tracks = sBucket.get_recommendations(top_tracks)
i = 0
while(not sBucket.done):
print("Starting recursion {0:d}...".format(i+1))
rec_tracks += sBucket.get_recommendations(rec_tracks)
i += 1
sBucket.add_tracks_to_playlist('sBucket', rec_tracks) |
990,584 | c7d990ce355d302b1f99be76137bc89414ea1570 | # -*- coding: utf-8 -*-
"""
Created on Fri Oct 10 09:18:53 2014
@author: Greg
"""
import argparse
import os
from Bio import SeqIO
parser = argparse.ArgumentParser()
parser.add_argument('infile', type = str)
parser.add_argument('size', type = float, help="Size in GB")
args = parser.parse_args()
filename = os.path.splitext(args.infile)[0]
chunk_num = 0
chunk_name = filename + '.fasta.' + str(chunk_num)
chunk_handle = open(chunk_name,'w')
myWriter = SeqIO.FastaIO.FastaWriter(chunk_handle)
myWriter.write_header()
for num,f in enumerate(SeqIO.parse(args.infile, 'fasta')):
myWriter.write_record(f)
if chunk_handle.tell()>1000000000*args.size:
chunk_handle.close()
chunk_num+=1
chunk_handle = open(filename + '.fasta.' + str(chunk_num),'w')
myWriter = SeqIO.FastaIO.FastaWriter(chunk_handle)
myWriter.write_header()
chunk_handle.close() |
990,585 | 31cac69b2ea612e27d1d28ddf0fa848aca24e639 | # Generated by Django 3.1 on 2020-08-16 22:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('temperaturas_rf', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='sensor',
name='battery_low',
field=models.IntegerField(),
),
migrations.AlterField(
model_name='sensor',
name='channel',
field=models.CharField(default=0, max_length=50, unique=True),
preserve_default=False,
),
]
|
990,586 | 0467660ebb0bb8699511322c96913410af73adec | from datetime import date
from factory import SubFactory
from iati.transaction.models import Transaction
from iati.transaction.models import TransactionType
from iati.factory.iati_factory import NoDatabaseFactory
from iati.factory.iati_factory import ActivityFactory
class TransactionTypeFactory(NoDatabaseFactory):
code = "1"
name = "Incoming Funds"
description = ""
class Meta:
model = TransactionType
class TransactionProviderFactory(NoDatabaseFactory):
ref = "some-ref"
normalized_ref = "some_ref"
provider_activity = SubFactory(ActivityFactory)
provider_activity_ref = "IATI-0001"
class TransactionReceiverFactory(NoDatabaseFactory):
ref = "some-ref"
normalized_ref = "some_ref"
receiver_activity = SubFactory(ActivityFactory)
receiver_activity_ref = "IATI-0001"
class TransactionFactory(NoDatabaseFactory):
id = 1
activity = SubFactory(ActivityFactory)
transaction_date = date.today()
transaction_type = SubFactory(TransactionTypeFactory, code=1)
class Meta:
model = Transaction
|
990,587 | 3198b695ba5ec9caba21593fe61dea4d9826aa19 | import math
A,B,H,M = map(int,input().split())
t1 = (30*H+M/2)*math.pi/180
x1 = A*math.cos(t1)
y1 = A*math.sin(t1)
t2 = M*math.pi/30
x2 = B*math.cos(t2)
y2 = B*math.sin(t2)
d = ((x1-x2)**2+(y1-y2)**2)**0.5
print(d) |
990,588 | b60a13e54cffd44868a2bc92a363d9604fa933ca | import os
import glob
class FileUtils:
@staticmethod
def clean_directory(dir_path, ignore_pattern):
files = glob.glob(dir_path)
for file_ in files:
if not ignore_pattern in file_:
os.remove(file_)
print('File removed {}'.format(file_))
@staticmethod
def save_txt_file(file_path, file_content):
with open(file_path, 'w') as outfile:
outfile.writelines(["%s\n" % item for item in file_content])
@staticmethod
def create_folder(long_file_path):
if not os.path.exists(long_file_path):
os.mkdir(long_file_path)
class MiscUtils:
@staticmethod
def check_variables_specified(variables_list):
for variable in variables_list:
if os.getenv(variable.upper()) is None:
raise ValueError("Variable {} not specified.".format(variable))
|
990,589 | 75f1f52a9201db926f26b7e909055ae763cda132 | # -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.11.3)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x02\xc0\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x18\x00\x00\x00\x18\x08\x06\x00\x00\x00\xe0\x77\x3d\xf8\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\xa7\x93\x00\
\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\
\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xdc\x02\x06\
\x14\x36\x38\x4c\xb2\x58\x0f\x00\x00\x02\x40\x49\x44\x41\x54\x48\
\xc7\xed\x96\xc1\x4b\x2a\x51\x14\x87\x3f\x1f\x49\x05\xb5\x10\x06\
\x69\x33\x60\x60\x20\x4c\x34\x03\xed\x0a\x77\x03\x42\x6a\xc2\x2c\
\x66\x88\xa2\xa0\x96\x21\x41\x7b\x5d\xb4\x72\x25\x86\x20\x22\xb8\
\xb0\x28\xd2\x8d\x84\xc3\xac\x66\xd5\x5f\x90\xab\x16\x05\x45\xd3\
\xaa\x4d\x8b\xb4\x54\x90\xf7\x56\x4f\xf2\xd5\xa3\x09\x72\xf5\xde\
\xd9\xdd\x73\xef\xe1\x3b\xbf\x73\xcf\xe1\x5e\xcf\xc2\xc2\xc2\x4f\
\x59\x96\x19\x85\x35\x9b\x4d\xc6\x64\x59\x66\x6b\x6b\x6b\x24\x80\
\x4a\xa5\xc2\x0f\x46\x6c\xff\x01\x5f\x07\x98\xa6\xc9\xf6\xf6\x36\
\xaa\xaa\xa2\xaa\x2a\x8e\xe3\xa0\xeb\x3a\xbd\x5e\x0f\x80\x6e\xb7\
\x8b\xae\xeb\x38\x8e\xc3\xdd\xdd\x1d\xe9\x74\x1a\x4d\xd3\x38\x3e\
\x3e\x46\x55\xd5\xcf\x01\x27\x27\x27\xc4\x62\x31\x4c\xd3\xc4\xb6\
\x6d\x44\x51\x24\x14\x0a\x61\x59\x16\x00\x96\x65\x11\x0a\x85\x10\
\x45\x91\xa3\xa3\x23\x82\xc1\x20\xe5\x72\x99\x7e\xbf\xef\x4e\x41\
\x32\x99\xe4\xe2\xe2\x82\xf5\xf5\x75\xea\xf5\x3a\x00\x86\x61\x50\
\xab\xd5\x78\x7d\x7d\xa5\x56\xab\x61\x18\xc6\xa0\xcf\xe3\xf1\x38\
\x3e\x9f\x8f\x44\x22\xe1\x0e\xb0\xb4\xb4\x44\x2e\x97\x23\x95\x4a\
\x71\x76\x76\x06\x80\x24\x49\x08\x82\x40\x2a\x95\xc2\xef\xf7\x23\
\x49\x12\x00\xb2\x2c\xd3\x68\x34\x78\x7a\x7a\xe2\xfc\xfc\xdc\x1d\
\xe0\x77\xed\x73\xb9\xdc\xd0\x00\x1a\x86\xc1\xe5\xe5\xe5\x20\x7b\
\x80\xcd\xcd\x4d\x6e\x6e\x6e\xd8\xd9\xd9\xa1\xdd\x6e\x23\x08\xc2\
\x3b\xc0\xd8\x9f\x0e\xdb\xb6\x3f\xcc\x64\x79\x79\xf9\xdd\x5e\x20\
\x10\xe0\xe0\xe0\x80\x56\xab\xc5\xe9\xe9\x29\x8b\x8b\x8b\x9f\x03\
\xbe\x6a\xaa\xaa\xe2\xf5\x7a\x09\x87\xc3\x6c\x6c\x6c\x7c\x3f\xe0\
\x6f\x8a\x5d\x0d\x5a\xbf\xdf\xa7\xd3\xe9\x0c\xd6\x2f\x2f\x2f\xdf\
\x33\xc9\xbd\x5e\x0f\xd3\x34\xc9\x66\xb3\x3c\x3f\x3f\x0f\x65\x9b\
\xcf\xe7\xb9\xbd\xbd\x75\x0d\x18\x2a\x51\xbb\xdd\xc6\x34\x4d\x1e\
\x1e\x1e\x88\x44\x22\xc4\x62\xb1\xa1\xc3\xab\xab\xab\xb4\x5a\x2d\
\x1a\x8d\x06\xf5\x7a\x9d\x48\x24\x32\x68\x59\x57\x0a\x8a\xc5\x22\
\x8f\x8f\x8f\x24\x93\x49\xe6\xe7\xe7\x3f\x0c\x98\x9a\x9a\x62\x6d\
\x6d\x8d\x95\x95\x15\x32\x99\x0c\xf7\xf7\xf7\xee\x15\xec\xef\xef\
\xd3\x6c\x36\x39\x3c\x3c\x64\x76\x76\x96\x68\x34\xca\xe4\xe4\xe4\
\x50\xc0\xd5\xd5\x15\x96\x65\x31\x33\x33\x43\xa1\x50\x60\x7a\x7a\
\xda\x3d\xc0\xe3\xf1\xa0\x28\x0a\x8a\xa2\x70\x7d\x7d\x4d\xa9\x54\
\x42\xd3\x34\x44\x51\x04\xa0\x5a\xad\x32\x3e\x3e\xce\xee\xee\x2e\
\x13\x13\x13\x5f\xbf\x83\xb7\x36\x37\x37\xc7\xde\xde\xde\x90\xef\
\xed\x14\xff\x3b\x0f\x8e\x67\xd4\xdf\x96\x5f\x50\x09\xd3\xfe\x63\
\x15\x9a\x51\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x06\x16\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x16\x00\x00\x00\x16\x08\x06\x00\x00\x00\xc4\xb4\x6c\x3b\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\xa7\x93\x00\
\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\
\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xd8\x01\x0d\
\x05\x04\x2d\xda\xcb\x9e\x15\x00\x00\x05\x96\x49\x44\x41\x54\x38\
\xcb\x9d\x95\x5d\x88\x5d\x77\x15\xc5\x7f\xe7\x7f\x3e\xee\xf7\xc7\
\xcc\x9d\x99\x64\x26\x49\x33\x4d\x52\x8c\x31\xc4\x50\x34\xd2\xd1\
\xa6\xc5\xa8\x89\x60\x4b\x05\xc5\x2a\x3e\x58\x8b\x60\xc1\x17\x7d\
\x10\x84\xa2\x28\x48\xd5\x07\xeb\x93\x2d\x4a\x89\x41\x29\x16\x2a\
\x8a\x2d\x98\xd8\x18\x93\x29\x4d\x30\x96\x76\x62\x63\x5a\x67\x26\
\x69\x66\x32\x77\xee\xcc\x9d\xb9\x37\x73\xe7\xdc\x73\xcf\xc7\xff\
\xcb\x87\x94\xa2\x46\x5f\x5c\x6f\x0b\xf6\x5e\x7b\xb3\x37\xac\x05\
\xff\x81\x3f\xcd\x25\xfc\x3f\xf8\xed\xab\xab\xff\xc6\x9d\x7f\x25\
\x67\xde\x1c\xf0\xd1\xf7\x16\x01\x78\x39\xb4\x4e\xb2\x98\x94\xb0\
\x54\x71\xa8\x80\xf5\xac\xb1\x8e\xb6\xc6\x2a\x63\x95\xd2\x26\xd4\
\xc6\x6c\x0e\x8d\x86\xd1\x91\xc9\x9d\x16\xe0\xb9\x57\x96\xf9\xdc\
\x87\x27\x00\xf0\xfe\xdb\xf4\x17\x43\x4b\xb6\x9a\x8d\x05\x2e\xf7\
\x58\xcb\x51\x03\xf7\x63\x69\x18\x81\x8f\x41\x22\xe8\x08\x9c\xb3\
\xc6\x71\x4e\xf5\xd6\x83\x0b\xa7\x9b\x37\x56\x3f\xb6\x6d\xc7\xed\
\x1b\x9f\xbc\x1c\x02\x70\x6c\x7f\x85\xd7\x16\x4d\x7e\x23\x4a\x0e\
\xa5\xca\x3c\x3a\x56\xf1\x0e\x56\xf2\xce\x84\x35\x76\xd8\x5a\x23\
\xac\xb5\xef\x74\x39\xc6\x5a\xba\xeb\xa1\x5c\x5e\x5c\x8f\x67\x02\
\x97\x67\x1a\x65\x71\xf1\xc8\x81\xb1\xe4\x67\x27\xaf\xdf\x2a\x79\
\xf1\xe2\x06\x9f\x3a\x54\x07\xe0\xdc\xf5\x24\xdf\xdf\x90\x87\x7d\
\x97\x47\x6a\x45\xf1\x40\xb5\xe0\x94\x84\x35\xf4\x22\x49\x6f\x90\
\x22\x95\xc1\x15\x0e\xa5\xbc\x47\xad\x18\xa0\x8c\x43\xa7\x2f\xa3\
\x56\x37\x7e\x21\x4a\xf4\xf1\x40\xb8\xd3\x5f\xfe\xc4\xf6\x04\xc0\
\x7d\xf6\xe7\x3f\xb8\xf5\xb4\x5f\x46\x0c\x86\xd4\x94\xc0\x7e\x25\
\xe7\xf2\xf0\x78\xcd\x0d\xa4\x94\x2c\x77\x07\x5c\x5b\x19\xb0\xd8\
\x89\x59\xbe\x99\xb0\xb6\x99\xd1\x1b\x64\x68\x6d\xa8\x14\x5c\xea\
\x65\x3f\xe8\x84\x72\x7f\xa6\x8c\x1f\xa7\xb2\x75\xf3\x7d\xdb\x17\
\x96\xce\xbc\x74\xeb\x14\x33\x17\xac\xd3\xf2\xc2\xd1\xc8\xc8\x1f\
\x8d\xd5\xfc\xcf\x8c\xd7\xbd\xd2\x20\xc9\x98\x59\xe8\xb1\xd4\x19\
\xd0\x28\xf9\x7c\x70\x4f\x83\x46\x25\x60\x63\x90\x31\xdb\xec\x33\
\xbf\x1a\x52\x2f\xfa\x1c\xbc\xa3\x46\xa5\x18\xf0\xe6\x52\x18\xcd\
\xb7\xfa\xcf\x3b\x6e\xf6\xcd\xd6\xe0\xfa\x9a\x07\xf0\x77\xb5\x56\
\x2a\x08\x31\x35\x56\xf5\x0f\xd6\x0a\x4e\x29\x4d\x25\x33\xd7\x37\
\x98\x6d\x86\x0c\x97\x03\xee\xdd\x37\xc2\x8e\x91\x12\x05\xdf\xa5\
\x51\x09\xa8\xe4\x3d\x9a\xdd\x88\x85\x76\x1f\xcf\x81\xbb\x77\x0f\
\x33\x5a\x0d\x4a\x4a\x15\x0e\xce\xb5\xd2\x29\x3f\x19\x3f\x2d\x00\
\x32\xa5\xab\xa9\xd4\xc7\xaa\x05\xb1\xcd\x75\x2c\x4b\xeb\x31\xf3\
\xad\x3e\x9d\xcd\x84\x6a\xde\x65\xef\x44\x95\x34\xd3\xb4\x6e\xc6\
\xa4\x52\x33\x39\x5a\xe4\xae\xad\x25\x7c\xd7\x32\xbf\xd2\x63\x61\
\x2d\xa2\x90\x73\x19\xad\xe5\xb6\xc9\xcc\x1c\xd3\xca\x56\x3d\x00\
\xa9\x54\xc5\x41\xdc\x67\x8c\x19\x0e\x13\xc5\xd5\x95\x3e\x52\x2a\
\x1c\xc7\x12\x4b\xc5\xd2\xfa\x80\xb9\x56\x48\x9c\x29\xb6\x0e\xe5\
\x29\xee\xa8\xb3\x7b\x6b\x89\x66\x27\x64\xe6\xed\x0e\x73\xcb\x1b\
\x34\x2a\x01\x9e\x60\x38\xcd\xf4\x7d\x4a\x99\x8a\x00\x48\x33\xe5\
\xa5\x99\x1a\xc3\x1a\x11\xa7\x92\x95\x8d\x08\xa9\x34\x9e\x6b\xb9\
\xb6\xda\xe3\xc4\x9f\x67\x59\xd9\x88\x98\x18\xce\xb3\x6b\x4b\x05\
\x63\xa1\x1b\x66\x84\x03\x89\x56\x86\x56\x27\x24\xc9\x14\x16\x44\
\x9c\xc9\xb1\x41\x2a\x3d\x0f\x20\x49\xa5\xb0\x46\x04\xc6\x58\x94\
\xb1\x44\x99\x42\x69\x85\x35\x9a\x7e\xa2\x18\xc4\x19\x47\xde\x3f\
\xc1\x7b\xb6\xd5\x09\xe3\x8c\xb3\x97\x9a\xbc\xdd\x0e\x69\x75\x07\
\x18\x6d\x18\xc4\x29\x5a\x6b\xac\x10\x24\xa9\x0c\xe2\x4c\x89\x77\
\x84\x33\x63\x8d\x9b\x29\x6d\x70\x84\x83\xef\x09\x32\xa9\xd1\xc6\
\xe0\x00\xbe\x2b\x10\x40\xbb\x17\x73\x65\xa1\xcb\xa9\x57\x17\x48\
\x32\x85\x2b\x1c\x5c\x57\xe0\x7b\x0e\x8e\x63\x51\x46\x13\x67\x32\
\x8b\x13\x69\x3c\x80\x38\x91\xca\x1a\xd3\x96\x5a\xd7\x73\x9e\x23\
\x46\xcb\x39\x92\x38\x43\x19\x83\xeb\x80\x52\x8a\x5f\xfc\xf1\x2d\
\xc0\x62\x0d\x68\xa5\xf1\x1d\x30\x46\xe3\x38\x96\x46\xb5\x82\xe7\
\x0a\x92\x54\x99\x34\x95\xed\x38\x91\xca\x03\x08\x93\x34\xb4\x78\
\xe7\xfa\x03\x39\x52\x2f\xe7\x46\xf6\x8c\x97\x59\x5e\xdb\x44\x49\
\x85\x35\x86\xc0\x77\x79\x70\xea\x4e\x76\x8c\x56\x58\x58\xed\xf1\
\x9b\xe9\x59\x32\xad\xb1\xc6\xe0\x09\xc1\xde\x1d\x43\x38\xd6\x61\
\x63\x33\xe9\xa6\x69\x76\x2e\x1a\xa4\xa1\x00\x18\xc4\xe9\x66\x9c\
\xca\x93\x6f\x2d\xde\x6c\xb6\x37\x06\x6c\x6f\x14\xd9\x33\x5e\xa1\
\x9c\xf7\x48\xa5\x42\x2b\x45\x31\x70\xa9\x97\x7c\x8a\x39\x17\x63\
\x0d\x69\x26\x29\x06\x2e\xbb\xc6\x87\xd8\xb9\xa5\xc2\x8d\x76\xc8\
\xeb\x73\xed\x66\x92\xc9\x93\x51\x92\x6e\x7a\x00\xb9\xae\x1b\x0d\
\x54\x76\xfe\xaf\xdd\xd5\x99\x4c\xe9\x3d\xc3\x95\xa0\x74\xe0\xce\
\x61\x94\x56\x24\x59\x86\x56\x9a\xe9\x37\x6e\xf0\xda\xdc\x0a\xfd\
\x58\x22\x84\xa0\x9c\x13\x4c\x6e\xad\x72\xe0\xae\x71\xa2\x44\x71\
\x69\xbe\x1d\x4d\xff\xad\x39\xe3\xe7\x73\xe7\x5b\x4e\x39\x7a\xd7\
\x8f\xbf\xff\xc4\x15\xae\xd8\xe5\xc3\x60\x1e\x1b\xa9\x17\x1e\xfe\
\xf4\x47\x76\x91\xcf\x79\x5c\x5f\xe9\x71\xf9\xda\x1a\x9d\x5e\x9f\
\x24\x51\xf8\xae\x60\xa8\x5e\x64\xff\xce\x11\x26\xc7\x6b\x44\x89\
\xe6\xf8\x1f\x2e\xd3\x5c\xeb\xff\x3a\x95\xe6\xa9\x6b\x85\xfa\x74\
\xe7\xc9\x4f\xe2\x3c\xf6\xe3\x97\x79\xea\x1b\xf7\x02\xf0\xa5\x1f\
\x9e\xc9\xb7\x6e\x86\x87\xcb\x05\xff\x91\xdd\xe3\xb5\x07\xf6\xed\
\x1c\x2a\x6d\x1b\x2d\x63\xb1\x24\x99\x42\x6b\x8d\x70\x04\x81\xef\
\x62\x0d\x2c\xac\x6e\x32\x33\xdf\x8e\xde\xb8\xba\xfe\x42\x77\x33\
\x3d\x3e\x5c\x29\x4d\x9f\xff\xe9\x67\x93\x77\xfd\xf8\x8b\x4f\xbc\
\x04\xc0\xaf\xbe\xf5\x71\x1e\xfa\xce\xf3\xf9\x38\x16\x87\xfa\xb1\
\x7a\xf4\xd0\xde\x2d\x07\xf7\x4d\x36\x26\x6a\xe5\xdc\xb0\xeb\x3a\
\x02\xc0\x5a\x8b\x54\xc6\x74\x7b\x71\xf7\xf5\xb9\xd5\xe5\x73\x97\
\x96\x66\x8a\x81\xff\x4c\x39\xf0\x2f\xbe\xf2\xf4\xe7\x93\xa9\xaf\
\x3d\x77\x7b\x34\x3d\xf4\xed\xdf\xf3\xbb\xef\x3d\x48\x6d\xf7\xd7\
\xb9\xfb\xe8\x07\xb6\x48\xd7\xbb\xc7\xc2\x51\xb0\xf7\x1b\x6d\x1b\
\xc6\x58\xdf\x5a\x2b\xb5\x35\x1d\x6d\xec\x59\x63\xec\x29\xdf\x33\
\x17\xc2\xb4\xb9\x3a\x7b\xe2\x71\x3e\xf4\xd5\x67\xf9\xcb\xd3\x5f\
\xf8\xdf\xd1\xd4\xbb\xfa\x24\x2b\xff\xf8\x6e\x5b\x6d\xbd\xe3\xb4\
\xe7\x89\x8b\x9e\xeb\xfc\xc4\x68\xe3\x19\x63\x1d\x63\xad\xd5\x46\
\xab\x44\xea\x50\x2a\xb3\x39\x3e\x94\x45\xb3\x27\x1e\xbf\x4d\xe3\
\x9f\xed\x56\x34\xee\x0e\xa6\xa8\xcb\x00\x00\x00\x00\x49\x45\x4e\
\x44\xae\x42\x60\x82\
\x00\x00\x3d\x47\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x96\x00\x00\x01\xc2\x08\x02\x00\x00\x00\x82\x0b\xa7\xe0\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\
\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xdc\x02\x06\x15\x0f\
\x16\x9f\x22\xb2\x4d\x00\x00\x20\x00\x49\x44\x41\x54\x78\xda\xed\
\x5d\x77\x58\x14\xc7\x03\x9d\xbd\x7e\x34\x01\x05\xe4\x8e\x2e\x20\
\x4d\x05\xe9\x28\xd6\x18\x8d\x3d\xf6\x58\xa3\x46\x8d\xdd\xd8\x62\
\x2c\x31\x6a\xec\xfa\x4b\x34\x76\x13\xbb\x31\xc6\x16\x7b\x2c\x48\
\x54\xc4\x86\x62\x45\x04\x41\x8a\x80\x74\xb8\xe3\xea\xee\xed\xfe\
\xfe\x58\x3d\x8e\x2b\xdc\x1e\x45\xee\x60\xde\xe7\xe7\x77\xae\xb3\
\xd3\xde\xce\xec\xcc\xbc\x7d\x33\x48\x61\x41\x3e\xa8\x8a\x25\x4b\
\x97\xae\xfe\xf9\x67\xb1\x58\x7c\xf3\xe6\x2d\x73\x0b\x8b\x5e\xbd\
\x7a\x89\x44\x22\xe2\x23\x14\x0a\x85\x40\x20\x88\x8d\x8d\x3d\x71\
\xe2\xc4\xaa\x55\x2b\x3d\xdc\x3d\xf2\xf2\x72\xcb\xca\xca\xda\xb5\
\x6b\x07\x0c\x41\xe1\xe4\xc9\x15\x3f\x2c\x8e\xbb\x79\x79\x40\x34\
\xd3\xc3\xfc\x2e\xa0\x71\x01\xab\x25\x60\x39\x88\x71\xbb\x52\x89\
\xa5\x58\x90\x47\x2b\x3e\x35\x65\xe5\x8b\x45\x3f\x9f\x08\x0c\x0c\
\x04\x00\xe0\xc5\xc5\xb8\x44\xc2\x70\x72\xa2\x12\xf9\xeb\xe4\xa4\
\x88\xa8\xe8\xd2\x92\x62\x2a\x81\x9f\x2d\x0a\xec\x30\xfe\x07\x90\
\xb8\x8f\xe6\xf9\x59\x69\x69\x85\xad\x57\x10\x7a\x69\x3e\xdd\xfb\
\x8b\x32\xc2\xd6\xc6\xc6\x42\x91\x7c\x51\x61\x1f\x78\xf3\xea\x7f\
\x61\x9b\x9e\x80\xba\xc3\xe3\xef\x03\x3b\x4d\xf8\x01\x49\xdc\x4f\
\xf3\xfc\xac\xb4\x54\x48\x26\xca\xec\xbd\xa9\x38\x35\xd1\xd6\xc6\
\x52\x91\x7c\x01\x6d\xee\x1b\x7b\xfd\x6e\xd4\x66\xfd\x89\xd2\x34\
\x2f\xdd\xbf\xff\x00\x00\x60\x66\x66\xd6\xb5\x6b\x97\xac\xac\xac\
\xbb\x77\xef\x5a\x58\x58\x58\x7e\x84\xb9\xb9\xb9\xb9\xb9\x79\x54\
\x54\x54\x40\x40\xc0\xce\x1d\x3b\xe5\x72\x59\x66\x56\xd6\xcb\x97\
\x2f\x0d\x2d\x83\xe2\x5d\x0e\x41\x10\x12\x89\x44\x22\xc8\x91\xe1\
\x66\x52\x49\xb9\xb4\xf8\xbe\x34\xfb\x08\x2d\x73\x85\x6d\xc1\x5c\
\x27\xc9\x7a\x73\x5a\x3e\x82\x54\x66\x4f\x7c\xe9\xb2\xe0\xb7\x6d\
\xa0\x1e\x60\xee\x15\x59\xf0\xf0\x14\xc1\x36\x43\x1f\xec\xb0\xb6\
\xe1\x02\x1c\x23\x14\x12\xc4\xd6\xcd\xda\x86\x8b\x3e\xd8\x01\xcc\
\x9a\xe5\xa6\xbd\xb0\x0d\xfa\xa2\x6e\x13\xb5\xf4\x8a\x2c\x78\x70\
\x8a\x60\x73\xd1\x07\xdb\x95\x89\x02\x1c\xb3\xb1\xe1\xa2\x0f\xb6\
\x03\xb3\x66\x79\x6f\x53\x5b\x50\x4b\x94\x01\x08\x42\xed\x52\xbf\
\xbe\x7d\xc9\x8b\x1c\x36\xfb\x8b\x5e\x3d\x8f\x1d\xfb\xeb\xe5\xcb\
\x97\x38\x8e\xcb\xe5\x72\xb9\x5c\x2e\x95\x4a\x45\x22\x91\x44\x22\
\x11\x89\x44\x69\x69\x69\xf7\xef\xdf\x4f\x4d\x4d\x4d\x4c\x7c\x32\
\xf2\xab\xaf\x0c\x2a\x83\xf5\xbc\xb9\x28\x83\x5e\x54\x26\x3b\x16\
\x23\x77\xb4\x31\x53\x60\x88\x02\xe5\x28\x50\x1b\x0c\xc3\x30\x05\
\x86\x29\xb0\xec\xf7\x44\x56\x09\xc1\x66\xb3\xc9\xcc\x10\x28\x4a\
\xc8\x64\x9a\xb9\xad\x0e\xd4\x02\x3b\xf5\x5b\xf8\x62\xe3\x80\x80\
\x76\xad\xec\xbc\x3a\x11\x29\xa7\x40\xe0\xd7\x74\xeb\xe6\x40\x98\
\x46\xe4\x3c\x64\x78\x75\xca\xcf\x17\x24\xa7\x16\x04\x2d\x9a\x62\
\x58\xd2\xfa\xe0\xdc\x7f\xe1\xb3\x0d\x03\xda\x04\xb6\x72\x50\x4d\
\x54\x9a\x4f\xa4\x9c\x62\x78\x75\x7a\x9f\x2f\x78\x99\x52\x10\xb2\
\x98\x52\xa2\x0c\xcd\x20\x5e\x5e\x9e\xca\x8b\x36\x36\x36\x43\x87\
\x0e\xc9\xcf\xcf\x57\x28\x14\x38\x8e\x2b\x3e\x02\xc7\x71\x00\x00\
\x9b\xcd\xf6\xf2\xf6\xa6\xd1\x68\x5c\x33\x33\x43\xcb\x27\x7b\xf1\
\xd2\xae\x43\x87\x9e\x9f\xf7\xcc\xcb\xcb\x53\x28\x14\x18\x86\x91\
\x31\x33\x3e\xfe\xf6\x72\x20\x42\x3a\x59\xb9\xba\xba\x92\x31\xb3\
\xc3\x42\x19\xee\x6e\x06\xa5\x42\x31\x30\xcb\xce\xcd\xf7\xbb\x53\
\xcf\xfe\x37\xb8\x6d\x64\x5b\x87\x56\x1d\x88\xcc\x0b\x34\x9b\x16\
\x40\xf4\x86\xde\xaa\x43\xde\xbb\xa2\xc4\xc7\x69\xed\xe6\x9f\x62\
\xdb\xb9\x12\x75\xda\x0a\xd9\x76\x6e\xfe\x73\x4f\x3d\xdd\x3c\xb8\
\x5d\x54\x5b\xc7\x8f\x89\x12\x99\x17\xe8\xad\x3a\xe4\x66\x17\x25\
\x24\xa4\xb5\x5f\x78\x8a\x43\x2d\x51\x2d\x1d\xe9\x6f\xdb\xb6\x0b\
\x04\x82\xbb\x77\xef\x5d\xbb\x76\xbd\xac\xbc\xbc\xa8\xa8\xe8\xfd\
\xfb\x7c\xae\x99\x19\x8f\xc7\x13\x08\x84\x34\x1a\xcd\xdb\xdb\x9b\
\xc3\xe1\xa0\x28\xca\xe3\xf1\x4a\x8a\x8b\xc5\x62\xb1\xb7\x97\xb7\
\xa1\x65\x10\xfd\xf3\x0f\xa3\xa4\xc4\xeb\xf1\xe3\x8e\xcf\x9e\xf5\
\x09\x0a\xfc\x82\xc3\xfe\xfc\xcd\x9b\x01\x76\x76\xc3\x82\x02\x07\
\x16\x14\x0c\xa3\xd3\xc6\x76\xea\xd4\x4b\x2a\x95\x6f\xdf\x2e\x4b\
\x4c\x94\xdd\xbe\x2d\x3e\x77\x1e\x2f\x2a\xaa\xf3\x5e\x54\xa1\x50\
\x88\x44\x22\x09\xdb\xd6\x7e\xec\xee\x47\xb7\x13\xdf\x17\x89\xe8\
\x6e\x61\xb4\xe6\x2d\xe9\xee\x91\x79\x45\xa2\x07\x37\x13\x79\x5f\
\xef\x96\x71\x9a\x8b\x44\x22\x85\x42\x51\xb7\x89\x4a\xd9\xb6\x8e\
\xe3\x76\x3f\xbc\x99\x98\xa7\x4c\xd4\x2d\x2c\xaf\x50\x74\xff\x66\
\xa2\xcb\x84\xdd\x72\xca\x89\x6a\xe9\x48\xb9\x5c\x6e\x5e\x5e\xde\
\xbe\x7d\xfb\x32\x32\x32\xfe\xb7\x79\xf3\xd5\x6b\xd7\xfe\xfd\xf7\
\xdf\xc1\x83\x07\xb7\xf6\xf6\xfe\xdf\x2f\xbf\xb8\xbb\xbb\x8d\x1a\
\x39\xf2\xf8\xf1\xbf\xdf\xa4\xbd\x99\x32\x79\x72\x49\x49\xe9\x91\
\x23\x47\x22\x22\xc2\x23\xc2\xc3\x0c\x2a\x86\x59\x9f\x3e\x84\x42\
\x41\x54\x08\x71\x81\x80\x40\x51\x5c\x2c\xc1\x05\x02\x20\x91\x10\
\x28\x4a\x08\x05\x84\x99\x19\x40\xe5\x78\x85\x88\x10\x0a\x81\x4c\
\x8e\xcb\x64\xb8\x40\xa0\x10\x89\xeb\xb6\x23\x55\x28\x14\x12\x89\
\x44\x2a\x95\x4a\xa5\x52\x19\xa7\xb9\xf9\x97\x1b\xef\x9f\x9c\x17\
\xde\x2b\x9a\xd7\xba\x7d\xee\xeb\x94\xbb\x97\xe3\x6c\x87\x6e\x96\
\x73\x9a\x8b\x2a\x2a\x14\x1c\x0e\x81\xe3\x5c\x2e\x97\x4e\xa7\xd7\
\x9e\x3f\xd5\x44\xad\x06\x6f\xbc\xfb\xf7\xbc\xc8\x2f\xa2\xf9\x3e\
\xed\x73\x92\x53\xe2\x2f\xc7\x35\x1f\x66\x58\xa2\x48\xc1\xfb\x3c\
\xd0\xe8\x90\x92\x92\x1c\x11\x15\x5d\x52\x54\xa8\x8f\x62\x42\x2a\
\x95\x4a\x65\x32\xa9\x44\x2a\x93\x49\x65\x72\xb9\xf4\x7d\x5a\xd9\
\xa9\x05\x1e\xbe\xce\x6f\x92\xb2\x6d\x86\x6c\xe4\xb6\x6c\xc5\x66\
\xb1\xd8\x6c\x0e\x87\xcb\xe1\xb0\xd9\x1c\x0e\x07\x41\x90\x5a\xe6\
\x4d\x33\x51\xc9\xfb\xb4\xd2\x93\x0b\x3c\xfd\x6a\x98\x28\x92\xdf\
\x18\x29\x4c\x4d\x49\x8e\x88\x8a\x2e\xd6\x47\x21\x09\x1c\xc7\x51\
\x14\x95\xc9\x64\x28\x8a\x2a\x14\x0a\x49\x41\x46\xce\x89\x25\xfc\
\xa1\xab\xb9\xf6\x6e\x74\x3a\x9d\xc9\x64\xb2\xd9\x6c\x26\x93\x49\
\xa3\xd1\xea\x30\x87\x75\x98\x28\x92\x9f\x97\x0b\x20\x4c\x19\x34\
\x58\x05\xa6\x0e\x06\x01\xeb\xc0\xd4\x29\xac\xdb\x19\x2b\x04\xec\
\x48\x21\x6a\xd0\x91\xc2\x56\x08\x5b\x21\x44\x03\xb7\xc2\xb4\xb4\
\x54\x58\x0b\x26\x0d\x84\xe2\xfc\x17\xc2\x88\x47\xa4\xd5\x62\xc4\
\x77\x5b\x60\x1d\x99\x6a\x2b\x1c\xb5\x78\x3f\xc7\xae\xf5\x84\x2f\
\x3b\xc2\x3a\x32\xc9\x56\x38\x6a\xf1\xfe\x69\x93\x27\x02\x00\xf2\
\x4a\xc4\xb0\x8e\x4c\x8f\x42\x92\xbf\x9c\x22\x91\xae\x7b\xee\xc7\
\x9c\x8e\xbd\x78\xdc\xc5\xdd\xb3\xff\xb8\x79\x16\x56\xb6\xb0\x12\
\x8d\x8e\x42\x8e\x5d\x6b\x00\x00\xae\x63\xbe\x18\x73\x7a\xef\xcd\
\xcb\x67\x7e\x5e\xbb\x3e\xee\xd1\xeb\x2d\x4b\x26\xcc\x5a\xbd\xdf\
\xdc\xd2\x1a\xd6\xa3\x11\xbd\x0b\x47\x7c\xb7\xe5\xab\xaf\xa7\x8a\
\x65\xa8\xd6\xd0\xb7\x2e\x1c\xba\xf2\xd7\xf6\x07\xf7\xef\x7a\x7b\
\xba\x8b\xc4\xd2\xb9\xab\x0f\x5c\x3e\xb5\x6f\xd6\xcf\x7f\x70\xcd\
\xad\x60\x55\x1a\xd7\xbb\x50\x6b\x13\x8c\xff\xf7\xaf\x7f\x8f\x6c\
\x4a\x78\xf4\xc8\xc9\xc9\x31\x39\x35\x5d\x50\x21\x5d\x36\x73\x68\
\x99\x50\xbc\xfd\xc7\x49\xd3\x56\xfe\xce\x31\xb3\x80\xb5\x69\x44\
\x14\x12\xb8\xfa\x95\x07\x31\xa7\x2e\x1e\x58\x73\xf7\xee\x5d\xeb\
\x66\xcd\x6e\xdd\xbc\x2d\x10\x49\xf3\x8b\x85\x04\x9d\x3d\xeb\xeb\
\xde\x2b\xca\x45\x3b\x7e\x9a\x34\x6d\xf9\xef\x6c\xae\x39\xac\x50\
\x23\x6d\x85\x8f\x6f\x9d\x3f\xbe\xed\x87\x8d\x1b\x37\xd0\xe9\xf4\
\x69\xd3\xa6\xd9\xda\xda\xda\x39\xf0\xb2\xf2\x4a\x8b\x04\xe8\x91\
\xd3\x31\x23\x87\xf4\x3b\x22\x10\xef\x5a\x35\x65\xca\xb2\xdd\x2c\
\x8e\x19\xac\x53\xe3\x68\x85\x2a\x14\xbe\x4e\xbc\xfd\xe7\x2f\xf3\
\x22\x22\xc2\x2f\x5d\xba\x34\x77\xee\x5c\x82\x20\xfa\xf7\xef\x7f\
\xf7\xee\xdd\x57\x6f\xde\x31\x2d\x1d\x58\x16\xf6\xbf\x1f\xc1\xa2\
\x3b\x46\xc6\x8a\x65\x7b\x7e\xfe\xf6\xdb\xe5\xbf\xd3\x19\x4c\x58\
\xad\xc6\xd5\x0a\xcf\x1f\x58\xcf\xe5\x72\x30\x0c\x13\x08\x04\x4e\
\x4e\x4e\xd9\xd9\xd9\x4f\x9f\x3e\xcd\xcc\xcc\x04\x00\x20\x74\x06\
\x42\xa3\xbf\x7f\x97\xf1\xec\x99\x95\x33\xbf\xe5\xf3\x12\xf7\xf8\
\x2b\x7f\x75\xe8\x3d\x1a\x56\x6b\xc3\x53\x28\x47\x2b\x3f\x5f\x1c\
\x38\x65\xe5\xed\xf3\xfb\x9e\x3c\xbe\xc9\xe5\xb0\x3c\x3c\x3c\x18\
\x0c\x86\x50\x28\xb4\xb5\xb5\x2d\x2b\x2b\xa3\x63\x22\xd7\x96\x56\
\xb6\x0e\xcd\x4b\x0a\x73\x52\x5e\x3d\x45\xa4\x85\x1e\x01\x63\x54\
\xef\x85\x68\xb8\x56\x88\x57\xb6\x42\x9e\x47\x9b\xe1\xb3\x7f\x11\
\x0b\x4b\x7f\xff\x69\x4c\x46\x46\x06\x83\xc1\xf0\xf0\xf0\xe0\x70\
\x38\x72\xb9\x5c\x20\x10\xbc\x7f\x97\xfe\x26\xe5\x95\xbd\xa3\x4b\
\xc7\xcf\x86\xb5\xed\xd0\x97\xc9\xe6\xa8\xde\x0b\x61\x14\xef\x42\
\x12\x87\xd6\x4d\x29\x78\xf7\x86\xfc\x5d\x50\x50\xc0\x62\xb1\x50\
\x14\x25\x08\x82\xc1\x64\x4f\x58\x76\xc0\xd9\x3b\x50\x22\x12\x20\
\x74\x3a\x14\x90\x3f\x3d\xe8\xdf\x2f\x5c\xa8\xfa\xef\x93\x57\xee\
\xb7\x09\x0c\x15\x88\xa4\x6a\xe1\x82\x3a\x7f\x69\xef\xec\x55\x51\
\x56\x58\x5e\x9c\x07\x00\x50\x7e\x28\x8e\xe3\x8a\x82\x77\x69\x6d\
\xa2\xfa\x1c\x5d\x33\x36\x29\xe1\xbf\x80\xc8\x2f\x6a\xff\xb1\x2c\
\x44\x0d\x5b\xa1\x6d\xf3\x16\xd5\x4d\xed\x69\x34\x0f\xff\xb0\xb6\
\x51\xbd\x8e\xfd\x32\xf7\xe5\xbd\x7f\x95\x97\x99\x2c\x8e\x5f\x78\
\x4f\x3a\x8b\x3d\x74\xfe\x1e\x8e\x99\x25\x40\x68\x38\x6c\x88\xc6\
\xd1\x91\xaa\x5f\xf9\xfb\xd7\x39\x22\x41\xf1\xa8\x85\xbb\xdf\x67\
\xbe\x56\x5e\x6c\x1d\xdc\xad\xcf\xf8\xa5\x96\x36\x0e\x04\x01\x9e\
\xde\x3c\x15\xd9\x77\x12\xa4\xaf\x61\x28\x6c\xde\xc2\x4e\xed\xfd\
\xa7\xf9\x4a\xeb\x35\xf6\x07\x4b\x6b\x3b\x80\x20\xd3\xd6\x9f\x8d\
\xbf\xb8\x2f\xe6\xf8\xaf\x00\x00\x36\xd7\xc2\xc2\xda\x9e\x0c\x1c\
\xd1\xe7\x1b\xf8\x22\x6c\x30\x0a\x35\x55\x5f\xcd\xce\xd0\xdc\xda\
\x0e\x27\x9b\x27\x82\x44\xf6\x99\xd0\xac\x05\xbf\xb9\xa3\x9b\x83\
\xab\x0f\xec\x36\x4d\x66\x44\xaa\x06\xdf\xf0\x9e\x54\x82\x41\x18\
\x2f\x85\x10\xa6\x37\x9c\x81\x30\x19\x0a\x7d\x2c\x8b\xfe\xb9\x72\
\x1b\xd6\x8b\x69\xb7\xc2\xb3\xeb\x87\x97\x14\x17\x01\x00\x38\x5c\
\x2e\xac\x20\xe3\x07\xfc\x20\x1f\x52\x08\xd1\xe0\x1d\x29\x39\xaf\
\xd7\x3a\x0a\x85\xe3\x52\xd3\xa0\x10\x7a\x2a\x60\x47\x0a\x61\x94\
\xf3\x42\x38\x3d\x6c\x24\x14\x42\x02\x4d\xbf\x15\xea\x80\xbf\x7f\
\x40\x56\x56\x56\x6d\x52\xe5\xf1\x78\x2f\x5f\xbe\x60\x30\x18\x46\
\x95\xd6\x8a\x15\x2b\x8a\x8b\x4b\x28\x46\x6b\x63\x63\xbd\x7c\xf9\
\xf2\xba\xdd\x4f\xa8\x66\x65\xaf\x49\x47\x9a\x95\x95\x55\xfd\x60\
\x55\xa1\x01\xe5\x46\x87\x24\x7c\x7d\x7d\x9f\x3d\x7b\xd6\x3e\x28\
\x48\x6f\xfe\x3e\x65\x5a\xc5\xc5\xc5\xbb\x76\xed\xc2\x3f\x82\xdc\
\xf0\x51\x0d\xca\x8b\xeb\xd6\xad\x7b\xf6\xec\x59\xa0\x81\x1b\xe9\
\x92\x78\xfa\xe4\x71\x44\x54\xb4\x44\x2c\xaa\x93\xb2\xd3\xaa\xef\
\x48\xb5\xfe\xa9\x1e\xaa\x7b\x5e\x2a\xb1\x79\xf3\x66\x99\x4c\xa6\
\xfc\x67\xf5\xf1\x53\x49\x4b\x2a\x95\x72\x38\x1c\xad\x69\xa9\xc1\
\xa0\xb4\x54\xd9\x22\x08\x42\x93\x3f\xe5\x45\xea\xd1\xea\x2a\x54\
\x8d\xcb\xae\x56\xcf\x34\x00\x40\xf3\x16\x76\xca\xd9\xa1\x7a\x2b\
\xd4\xfa\x47\x5f\xbc\x9a\x35\xfb\xeb\xaf\xbf\xca\xe5\xf2\x2a\xd5\
\xaa\x2b\x72\xca\x69\x91\x8f\xa1\x66\x5a\x6a\x57\x0c\x48\x8b\x00\
\xb8\x02\x57\xfb\x43\xe0\x84\xe6\x6f\x02\x27\x00\x41\x3d\x5a\x1d\
\x85\xaa\x45\xd9\x55\xeb\x59\xbb\xe4\x5b\x33\x54\xd3\x26\xd4\xfe\
\xb7\x36\xa9\xc8\x64\x32\x36\x9b\x4d\xb6\x03\x5d\xc9\xd5\x38\x2d\
\xb5\x66\xa7\xd9\x10\x95\x17\x1b\x70\xfc\xa2\x56\xcf\xb5\xea\x48\
\xdf\xbc\x79\xa3\xfc\x9d\x9a\x9a\x5a\x7d\x53\x50\xfb\xa7\x41\x7d\
\xce\xe2\xc5\x8b\xc9\x3d\x1d\x11\x04\x51\xf6\x9f\x32\x99\x8c\xcb\
\xe5\x5a\x58\x58\x38\x38\x38\x6c\xde\xbc\x59\xa1\x50\xac\x5e\xbd\
\xda\xc3\xc3\xc3\xd5\xd5\xd5\xcb\xcb\x6b\xd9\xb2\x65\x86\xa6\x45\
\x00\x80\x13\x84\x5a\x3b\xc4\x14\x0a\xad\x17\x89\x9a\xf6\xa2\x35\
\xe8\x48\xab\xaf\x67\x9a\x9e\xe1\x8c\xee\x06\x9e\x95\x95\x15\x19\
\x19\xf9\xef\xbf\xff\x02\x00\x62\x62\x62\xa2\xa2\xa2\x52\x53\x53\
\xad\xac\xac\x7a\xf4\xe8\xe1\xef\xef\xbf\x68\xd1\xa2\xfe\xfd\xfb\
\x87\x84\x84\x1c\x3f\x7e\x5c\x49\xa1\x92\x57\x43\x3b\xd2\xbf\xfe\
\xfa\xeb\xd1\xa3\x47\xe4\x76\xec\x6c\x36\x9b\x8c\x84\xcd\x66\x97\
\x97\x97\x97\x94\x94\x9c\x3e\x7d\xfa\xdc\xb9\x73\x0a\x85\xe2\xec\
\xd9\xb3\xe7\xcf\x9f\x4f\x4d\x4d\x8d\x8f\x8f\xff\xef\xbf\xff\xc4\
\x62\xb1\xa1\x69\xa9\xb6\xb3\xee\xdd\xbb\xbb\xb9\xb9\xb5\x6a\xd5\
\xca\xd3\xd3\xb3\x75\xeb\xd6\xbe\xbe\xbe\xfe\xfe\xfe\x6d\xda\xb4\
\x69\xd7\xae\x5d\xfb\xf6\xed\x0f\x1e\x3c\xf8\xd9\x67\x3d\xec\xed\
\x1d\x94\x7f\x82\x83\x43\xea\xa3\x23\xd5\x5b\xcf\x8c\x1a\x37\x67\
\x17\x17\x97\x33\x67\xce\x7c\xf9\xe5\x97\xf3\xe6\xcd\xdb\xbc\x79\
\xf3\xe1\xc3\x87\x5d\x5d\x5d\x11\x04\xf9\xee\xbb\xef\x58\x2c\xd6\
\xa4\x49\x93\xb6\x6e\xdd\x4a\xa3\xd1\xe6\xcc\x99\xd3\xaf\x5f\x3f\
\x92\x42\x04\x41\x6a\xd6\xb9\x65\x67\x67\x7b\x7b\x7b\x2b\x97\x6d\
\xd5\x5a\xb6\x87\x87\x47\x56\x56\x16\x8a\xa2\xb9\xb9\xb9\x2e\x2e\
\x2e\x0a\x85\xc2\xc2\xc2\xe2\xf2\xe5\xcb\x40\xe5\x7b\x57\xea\x69\
\x29\x3b\xcf\xd4\xd4\xd4\x82\x82\x02\x36\x9b\x4d\xfe\x57\x45\x45\
\x85\xb9\xb9\x39\x86\x61\x65\x65\x65\xe4\x63\x84\xa2\x28\x86\x61\
\x18\x86\x91\x3f\x02\x03\x03\x5f\xbe\x7c\xe9\xef\xef\x5f\xb7\xdd\
\xa6\xde\x7a\xae\xd5\xb4\xa6\x63\xc7\x8e\x4b\x97\x2e\x5d\xb2\x64\
\xc9\x9c\x39\x73\x3a\x75\xea\x44\x92\x14\x15\x15\x15\x11\x11\x51\
\x5e\x5e\x1e\x12\x12\x12\x1a\x1a\x5a\x5c\x5c\x2c\x91\x48\x34\x3b\
\x52\x03\xd6\x00\x69\x34\x17\x17\x97\xe4\xe4\x64\xf2\x94\x0c\x82\
\x20\x50\x14\x25\xab\x9b\x9c\x3f\xbc\x7e\xfd\xda\xc5\xc5\x85\x20\
\x08\x1e\x8f\x97\x96\x96\xa6\xba\x55\x6b\x8d\xdf\x85\xe4\xb3\xc2\
\x66\xb3\x39\x1c\x0e\x87\xc3\x89\x8f\x8f\x6f\xdf\xbe\xbd\x54\x2a\
\x45\x51\xb4\x63\xc7\x8e\x37\x6f\xde\xa4\xd3\xe9\x0c\x06\x83\x4e\
\xa7\x2b\x7f\xd4\xdf\xcb\xaf\xfa\x7a\xae\x76\x75\x46\xdf\x80\x30\
\x26\x26\xe6\xe7\x9f\x7f\xde\xb8\x71\xe3\x9a\x35\x6b\xfc\xfc\xfc\
\xba\x74\xe9\x42\xd6\x32\xf9\x41\xb7\xea\x0f\x2e\x97\x9b\x92\x92\
\xe2\xe5\xe5\x45\x06\x20\x23\xa7\xae\x84\x0c\x1f\x3e\x3c\x34\x34\
\x54\x26\x93\x91\x8c\xda\xdb\xdb\xe7\xe4\xe4\x84\x87\x87\xdb\xdb\
\xdb\x93\x15\x3d\x7b\xf6\x6c\x1c\xc7\xfb\xf4\xe9\x33\x68\xd0\x20\
\x0c\xc3\x68\x34\x1a\x87\xc3\x89\x89\x89\x31\x30\x2d\x02\xc7\x2b\
\xe7\x82\xca\xab\x37\x6e\xdc\x18\x36\x6c\xd8\x9f\x7f\xfe\xc9\xe5\
\x72\x51\x14\xdd\xb5\x6b\xd7\xe8\xd1\xa3\xb7\x6e\xdd\x1a\x1d\x1d\
\x4d\x4e\x43\xc9\x27\xc9\xd0\x42\x51\x0f\x59\x7d\x3d\xd7\xbc\x23\
\xcd\xca\xca\x1a\x31\x62\xc4\x91\x23\x47\xa2\xa3\xa3\x03\x02\x02\
\x46\x8e\x1c\x49\xf6\xd7\x5a\x29\x9c\x35\x6b\xd6\xd4\xa9\x53\x87\
\x0e\x1d\x3a\x71\xe2\xc4\x1a\x0c\xe7\xd6\xac\x59\xb3\x72\xe5\x4a\
\x65\x23\x26\xed\x1c\x27\x4f\x9e\x54\x1b\x88\xce\x9c\x39\x73\xda\
\xb4\x69\x6a\x6f\xfb\x1a\x8f\x48\x95\xfc\x0d\x1d\x3a\xf4\xcf\x3f\
\xff\xec\xd4\xa9\x13\x49\x55\x48\x48\xc8\xbe\x7d\xfb\x26\x4c\x98\
\xb0\x79\xf3\xe6\xa8\xa8\x28\x25\x8b\xf5\xd4\x04\xf5\xd6\x73\xcd\
\x29\xe4\xf3\xf9\x77\xef\xde\xe5\xf1\x78\x0a\x85\x22\x2c\x2c\x2c\
\x26\x26\xc6\xd1\xd1\xf1\xed\xdb\xb7\x64\xdd\x25\x25\x25\x91\x3f\
\x1e\x3d\x7a\x84\x61\xd8\x80\x01\x03\xfa\xf6\xed\x5b\xe3\x49\x85\
\x1a\x55\xe4\x33\x58\xfd\x5c\xa2\xf6\x93\x0a\x00\x80\x44\x22\xf9\
\xea\xab\xaf\x66\xce\x9c\xd9\xa5\x4b\x17\x92\x3f\xf2\xef\xe0\xe0\
\xe0\x69\xd3\xa6\xcd\x99\x33\x27\x26\x26\x86\xc9\x64\xd6\x7e\xa6\
\x54\x9b\x7a\xae\x4e\xf2\xad\xe6\xc9\x22\xef\xe7\xf3\xf9\x64\x91\
\x44\x22\x91\xa3\xa3\xa3\x42\xa1\x10\x8b\xc5\x0c\x06\x83\xca\x8a\
\x09\x59\x1d\xd4\xf9\x53\xd2\x23\x14\x0a\xc9\xd1\x44\x45\x45\x05\
\x8b\xc5\xaa\xc3\xb4\x70\x9c\xc0\xb0\x0f\xcf\x07\x49\x21\x97\xcb\
\x3d\x71\xe2\xc4\xe0\xc1\x83\x83\x82\x82\x54\x59\x8c\x8f\x8f\xdf\
\xb9\x73\xe7\xaf\xbf\xfe\xca\x62\xb1\xb0\x8f\x00\x00\x60\x94\x0b\
\x55\x7d\xf5\x1a\x54\xcf\xb4\xe2\xa2\x42\xf2\x4f\x8d\x9b\xc5\x9d\
\x3b\x77\x22\x22\x22\x04\x02\x81\x48\x24\xea\xde\xbd\xfb\x9d\x3b\
\x77\xf4\x36\x8e\xda\xa4\x15\x1d\x1d\x2d\x14\x0a\x85\x42\x61\x8f\
\x1e\x3d\x1e\x3c\x78\x50\xb7\x69\xa9\xce\xe5\xc9\x7f\x46\x46\x46\
\x1e\x3b\x76\x6c\xe2\xc4\x89\x57\xaf\x5e\x25\xa9\x8a\x8b\x8b\x9b\
\x3a\x75\xea\xa6\x4d\x9b\x42\x42\x42\x94\x2f\xc2\x7a\xea\x48\xa9\
\xd4\x33\xa3\x96\x51\xc7\xc7\xc7\x8f\x1d\x3b\xf6\xb7\xdf\x7e\xe3\
\x70\x38\x0a\x85\x62\xed\xda\xb5\x33\x67\xce\xdc\xbc\x79\x73\x48\
\x48\x08\xb9\x34\x4a\x06\x93\xc9\x64\x72\xb9\x5c\x26\x93\x91\x3f\
\x7c\x7d\x7d\xbf\x9b\x3b\x2f\xf6\x46\x8c\xa1\x69\x4d\x98\x30\x61\
\xcf\x9e\x3d\x64\x2b\xdc\xb8\x71\xe3\x8c\x19\x33\x7e\xf9\xe5\x97\
\x76\xed\xda\x29\x14\x8a\x0d\x1b\x36\x90\x8f\x36\x99\x90\xf2\x6f\
\x5f\x5f\xdf\x05\x0b\xbf\xbf\x76\xf5\x0a\xf5\x85\x0f\xe5\xfa\x0b\
\x49\x4c\x78\x78\xf8\xa1\x43\x87\xa6\x4c\x99\x72\xe3\xc6\x0d\x82\
\x20\xce\x9f\x3f\xdf\xab\x57\xaf\xcb\x97\x2f\x9f\x3d\x7b\x56\x35\
\xa1\x76\xed\xda\xcd\x98\x31\x93\x62\xa1\xea\xb0\x9e\x6b\x38\x22\
\x55\x3e\x17\x63\xc6\x8c\xf9\xed\xb7\xdf\xc8\x91\xae\x42\xa1\x08\
\x0d\x0d\xdd\xb2\x65\xcb\xec\xd9\xb3\xd7\xad\x5b\x17\x1c\x1c\x8c\
\x20\xc8\x81\x03\x07\x98\x4c\xa6\xe6\x22\x59\x50\x50\x10\xc5\x21\
\x99\x32\xad\xf1\xe3\xc7\xef\xd9\xb3\x27\x38\x38\x98\xbc\x12\x14\
\x14\xf4\xcb\x2f\xbf\xcc\x99\x33\x67\xe3\xc6\x8d\x6d\xdb\xb6\x05\
\x00\x1c\x3d\x7a\x54\xf3\x76\xa1\x50\xd8\xb1\x63\x47\x2a\x69\xd9\
\x58\x5b\xff\xf4\xd3\x72\x35\x3a\x11\x04\xc1\x30\x2c\x34\x34\xf4\
\xd6\xad\x5b\x74\x3a\x1d\xc3\x30\x06\x83\xf1\xfb\xef\xbf\xd7\xb2\
\x50\x14\x47\xa4\x54\xea\xb9\xe6\xad\x50\x24\x12\x8d\x1b\x37\x6e\
\xc2\x84\x09\x9d\x3b\x77\x56\xd5\x77\x82\x82\x82\x26\x4e\x9c\x38\
\x7f\xfe\xfc\x73\xe7\xce\xa9\x0e\x08\x39\x1c\x8e\x4c\x26\x93\x48\
\x24\x1c\x0e\x07\x00\x20\x97\xcb\xa9\xa7\x25\x14\x0a\xc7\x8f\x1f\
\x3f\x63\xc6\x0c\xb2\xef\x52\xa6\x15\x18\x18\x38\x7e\xfc\xf8\xf9\
\xf3\xe7\x9f\x3e\x7d\x5a\xb5\xcf\x24\x87\x6a\x64\x1d\x11\x04\x41\
\x31\xad\xa5\x4b\x97\x24\x25\x25\x29\xff\xf9\xd7\x5f\xc7\xc9\x56\
\x48\xf6\x93\x0c\x06\x83\xec\x36\xeb\xa4\x50\x75\x58\xcf\x35\xa7\
\x90\xc9\x64\xee\xdc\xb9\x73\xd2\xa4\x49\x81\x81\x81\x91\x91\x91\
\xca\xa8\x13\x12\x12\xf6\xee\xdd\xbb\x6a\xd5\x2a\x36\x9b\x2d\x97\
\xcb\x75\x3d\x6b\x06\x51\xc8\x66\xb3\xf7\xee\xdd\xfb\xcd\x37\xdf\
\xf8\xf9\xf9\x05\x05\x05\xa9\xa6\xb5\x6f\xdf\xbe\xb5\x6b\xd7\xb2\
\x58\xac\x6a\x22\xa4\x9e\x96\x9f\x9f\x9f\xea\x73\x40\x3e\x16\xca\
\xb7\x1d\xf9\xa3\x4e\x0a\x55\x87\xf5\x5c\xc3\x35\x52\x32\x96\xc8\
\xc8\xc8\x9d\x3b\x77\xce\x9c\x39\x33\x2e\x2e\x4e\x19\xef\x9c\x39\
\x73\x56\xae\x5c\x19\x1e\x1e\x4e\x96\x96\x7c\x54\x11\x04\x21\x27\
\xe6\x5c\x2e\x17\x41\x10\xa9\x54\x2a\x97\xcb\x29\x2e\x27\x92\x31\
\x07\x07\x07\xef\xdc\xb9\x73\xea\xd4\xa9\xca\x21\x4c\x42\x42\xc2\
\xbc\x79\xf3\xd6\xad\x5b\xd7\xb6\x6d\x5b\x65\x5a\xe4\x52\xb8\x92\
\x03\x04\x41\x3e\xb4\xc2\x1a\x49\x42\x4a\x0d\x59\x95\xc5\x3a\x29\
\x14\xc5\x35\x52\x2a\xf5\x5c\x43\xa5\x42\xf9\x2c\x84\x87\x87\x6f\
\xd9\xb2\x65\xd1\xa2\x45\x42\xa1\x50\x2c\x16\x2f\x5e\xbc\x78\xe5\
\xca\x95\x61\x61\x61\xe4\xe0\xb0\xfa\x96\x41\x71\x51\x5f\x99\x56\
\xfb\xf6\xed\xb7\x6e\xdd\xba\x70\xe1\xc2\x8a\x8a\x8a\x8a\x8a\x8a\
\x25\x4b\x96\xac\x5b\xb7\xae\x4d\x9b\x36\x7a\xd3\x42\x51\xb4\x66\
\x7a\x82\xea\x37\x00\xca\x1f\x75\x52\x28\x60\x60\xd9\xab\xa9\xe7\
\xda\x8e\x48\xc9\xf9\xe6\xb9\x73\xe7\xd8\x6c\x36\x86\x61\xc7\x8e\
\x1d\x53\x2a\x09\xca\xd2\x4a\xa5\x52\xb5\xd7\xc6\x87\x07\xd6\xf0\
\xb4\x82\x82\x82\xce\x9c\x39\x43\xce\xc6\x8e\x1d\x3b\xa6\x3a\x2f\
\x24\x23\x24\xbb\x38\xd5\x77\x61\x79\x79\x39\xb9\xa6\x5a\x03\x60\
\x18\x46\x0e\x61\x34\x5b\x61\x2d\x0b\x55\x87\xf5\xac\xdd\xa8\x5d\
\xfd\xb7\x33\x9a\xaa\xa3\xaa\x00\xa4\x7a\xbd\xfa\x96\x41\xf1\x23\
\x47\xb5\xb4\x94\xb4\xa9\xcd\xeb\x75\xa5\xf5\x61\x59\xbc\x46\x1f\
\x54\x8a\xc5\x62\x1a\x8d\xa6\xd4\x22\xc8\xb5\xbd\x3a\x29\x54\xf5\
\xd5\x6b\x50\x3d\x57\xa7\xda\xeb\x4a\xc1\xd1\xd1\xd1\xd3\xd3\x93\
\x4a\x26\x03\x03\x03\x95\x2f\xff\xf2\xf2\x72\xf0\xf1\xf4\x3e\xf2\
\x15\x42\xa5\xac\xd4\xd3\x6a\xd3\xa6\x8d\x50\x28\x54\xa6\x45\x10\
\x84\xf2\x6f\x04\x41\x6a\x40\xa0\xa7\xa7\x67\x70\x70\xb0\xe6\xf5\
\x76\xed\xda\x09\x85\x42\x72\x52\x51\x50\x50\x40\x10\x84\x4c\x26\
\x33\xa8\x50\xd5\x57\xaf\xa1\x65\xaf\xdc\x52\x96\xdc\xb4\x64\xd6\
\xf4\xa9\x5b\xb7\xef\x24\xcd\x69\x98\x8e\xfe\x07\xc3\xb0\x17\x2f\
\x5f\x52\x79\x88\x96\x2d\xff\x49\x22\x91\xa8\xcd\xb5\xc9\xa2\x72\
\x38\x9c\xd4\xd7\xc9\x54\xba\x32\x8a\x69\x2d\xfd\x71\x39\xd9\xcb\
\x29\x13\x52\xb6\x18\x2b\x2b\xab\xc7\x09\x0f\x6b\xd0\x0a\x93\x92\
\x5e\x69\x2b\xd4\x72\x91\x48\x44\x4a\x5a\x64\x5a\x64\x42\x08\x82\
\xb0\xd9\xec\x17\xcf\x9e\xd6\x55\x2f\x4a\xb1\xec\x35\xa1\x10\xc2\
\xa8\x00\xbf\xe6\x6e\xd4\x14\x42\x4f\x85\x49\x00\x3a\x9b\x20\x85\
\x10\x46\xfd\x2e\x84\x1d\xa9\x49\x50\x58\x8d\x6a\x0f\x61\x1a\x14\
\x56\xa3\xd7\xc3\xa3\x0d\x4d\xbe\x23\x4d\x4b\x85\x14\x9a\xfa\xa4\
\x02\x80\xb1\x13\x26\xc1\x3a\x82\x23\x52\x08\x48\x21\x04\xa4\xb0\
\x86\x50\x88\x8b\xf2\xcb\x51\x02\x52\x68\xf4\x20\xca\x62\xa6\x87\
\xb4\x9f\x7c\xa9\xa8\xea\xb7\xfb\x58\xfa\xae\xde\xc1\x23\x8e\xbe\
\xd3\xfd\x21\x2a\x5e\x70\xb2\x9f\x8b\xdf\xec\xbb\xef\x2e\x0c\x72\
\xf1\x9e\x7c\xbb\xa2\xde\x72\x58\x7d\x42\x06\xa8\xf6\x7c\x3e\xbf\
\xfa\x00\x39\x39\x39\xa6\x48\x21\x62\xee\xd5\x7b\xec\x58\x85\xbf\
\x95\xa1\x8f\x33\x8d\x63\x69\xc6\xe4\x58\x72\x59\x66\xe6\x6c\x6e\
\x33\x4e\x1d\x58\x9b\xd0\xbc\x1b\xdb\x97\xaf\xdb\x7f\xe5\x65\x11\
\x61\xe9\xd6\x71\xcc\xb2\x4d\xdf\xf7\xe2\x31\xf4\x25\x54\xad\x6a\
\xaf\xc1\x10\x9f\xcf\xd7\xca\x93\xae\xeb\xa6\x01\xa6\x4b\x9f\x59\
\x73\x6a\x74\xa3\xad\x8b\x9d\x83\xab\x0d\xd7\xd6\xcd\xde\xde\xde\
\xb6\xf6\xa7\x8d\xe1\x82\x57\xb7\x5e\xb6\x18\xf5\xdb\xbf\x5d\x5c\
\x65\x0f\xb7\x4d\x9b\x3d\x6d\x9e\xff\xbd\xa3\x03\xed\x69\x7a\x12\
\x52\x7e\x90\x4f\x1a\xab\x66\x4e\xfb\x96\x20\x08\xf2\xca\xc1\x3f\
\xf6\x10\x1a\xe0\xf1\x78\x7a\xaf\x68\x02\x2d\x4d\xba\xb4\x6d\xe1\
\xb4\xf5\x09\x15\x04\x41\xa0\x39\x17\x97\x0f\x0e\xf1\xe0\xf1\x78\
\x4e\x3e\xdd\x96\x3e\xa8\x28\x8f\xf9\xc6\xdb\x6d\xe8\xd9\x22\x9c\
\x20\x08\x82\x10\xdc\xfa\xd6\xc7\x73\xf4\xc5\x62\x9c\x20\xb0\xb2\
\xc7\x7f\xcc\xec\x1d\xe2\xc5\xe3\xf1\x9c\x3d\xdb\x74\x18\xb6\xf1\
\xa9\x84\x20\xd0\xcc\xc3\x5f\xb5\x6d\xff\xcd\xd9\x7c\xac\x76\xf1\
\x54\x42\x78\x7b\xb2\xb7\xdb\xc8\xeb\x02\x82\x20\xe4\x39\xd7\xd6\
\x8d\xea\xe4\xef\xc2\x73\x6a\x1d\xd1\x3d\xd2\x95\xd7\x65\x7b\x3a\
\xaa\xbb\x54\x0a\xc1\xcb\xab\xff\xbd\x93\xe3\xc2\x57\xd7\x62\x33\
\x65\x44\xad\x8b\xa6\x0a\x79\xf2\xa6\x28\x7e\xf8\x9a\x97\x52\xad\
\x09\xa1\x39\x67\x16\x4c\x5b\x79\xf8\x4e\x96\x18\x27\x0c\xa6\x50\
\x8d\x33\x7d\xfc\xc9\xf3\x13\x4f\x6f\x9a\xde\x3b\x80\xef\x16\x31\
\x7c\xd1\x9e\xd8\x2c\x29\x4e\x08\xe3\xa6\xfa\xb8\xf5\xd9\x7a\x37\
\xab\xb8\x38\xe7\xf5\xf3\x34\x01\x86\x97\x5e\xfd\xda\xbb\xf5\xa4\
\x1b\xe5\x04\x41\x10\xe2\x84\xef\x03\xbd\x46\x5e\x28\xc2\x09\x34\
\xeb\xf0\x60\x77\xb7\x1e\xcb\xce\x3c\x7e\x9b\x93\xf5\xec\xf0\x60\
\x37\xb7\xd1\x31\x02\x82\x50\x94\xc4\xad\x9f\x34\x75\xeb\x63\x81\
\xa0\x76\xf1\x68\xa1\x50\x96\xfc\x6b\x57\x27\xaf\xc1\x1b\xff\x7d\
\x96\x9e\xf6\xf4\xea\x96\xe1\xde\x24\x85\xc2\xdb\xd3\x83\x5c\x5d\
\x54\xe1\xdd\x7b\x7b\xaa\x5c\x4b\x69\x85\x75\x95\x25\x42\x96\xb2\
\xa3\xa7\x6b\xe0\xfc\x78\x01\xae\xfd\xe1\x29\x7b\x7e\x7a\xc3\xd4\
\x2f\xfc\xf9\x6e\x91\x23\x6a\xf2\x05\x9b\xb2\x47\xad\xbe\xff\x94\
\xa7\x1d\x9e\x3b\x79\xf5\xd9\x5c\xa7\x5e\x63\x26\xee\xb9\xf7\x65\
\x84\x13\x87\xfc\xbe\x13\xe7\x36\xe3\xa0\x05\xe9\xd9\x02\x5a\x88\
\xbf\xb7\x2d\x00\x00\x10\x21\xc3\xc3\xf0\x79\xa7\x9e\x09\xbb\x76\
\x64\xa6\x9c\xbf\x21\x0a\x5e\x16\x6e\x83\xa0\xa9\x27\x76\xdd\x77\
\x98\x7c\x65\xc9\x40\x3f\x36\x00\x15\xae\xb6\x4c\x44\x02\x00\x00\
\x34\x9b\x0e\x0b\xf7\x74\x00\x00\x48\x1e\xd7\x2a\x1e\x4d\xc8\x52\
\x4f\x1d\x4b\x75\x9f\x7e\x63\x76\x4f\x2f\x26\x00\xce\x7d\xa3\x1d\
\x37\x9f\x04\x00\x00\xf3\xb0\x55\x97\xe2\x17\xab\x0e\x76\x10\x86\
\xa5\x9d\xb6\x7e\x93\xce\xad\x93\x2c\x61\x79\x17\x17\x8d\xd9\x58\
\x31\xfa\xe0\xe2\x70\x4b\xed\xa7\x27\xd1\x9a\x05\x7c\xb9\x60\xc7\
\x97\xdf\xad\x4d\x8e\x39\x56\xc3\x11\xa9\x5e\xfe\x00\x00\xb8\xe4\
\xfd\xdb\x77\x15\x16\xae\xde\xde\xde\x5e\xee\x76\x6c\x65\x5e\xb8\
\x81\x8b\x8f\x6e\xfa\x2c\x7b\x7d\x6f\xff\x76\xbd\x66\xed\x7d\x58\
\xaa\x00\x88\x75\xe8\x88\x50\x79\xec\xb1\x44\xa1\x24\xf9\xf4\xc5\
\xb2\xa0\x11\x11\xb6\x34\x20\x2b\x48\x2e\xa0\xf1\x03\xf9\x6c\x5d\
\xf1\xd7\x55\x3c\x95\x83\x89\x92\x8c\x12\x9a\x83\xb7\xbd\x3a\x39\
\x08\xcb\xa6\x25\xaf\x0a\x1c\xed\x2d\x19\xf5\x95\x25\xf4\xdd\x3f\
\xdf\xf5\x9f\xf5\xa0\xf3\x8e\x93\x3f\x45\xdb\xe8\xa1\x87\x61\xc1\
\xf7\xf0\xae\xf9\xa4\x42\xef\xf8\x85\x13\xb0\xe0\xc2\xf3\x47\x47\
\xbe\x71\x4b\xdb\x3b\x36\xc4\x2f\x7a\xf4\xd2\x3f\x6e\xbc\x11\x2a\
\x00\x00\x34\x4b\xbf\x11\x6b\x4e\x3e\x78\xf9\xdf\xda\xc0\xc7\x2b\
\xc6\x2e\x89\x17\x02\xc4\x36\x7a\xd2\xe7\xc8\x8d\x83\xd7\x6f\x1d\
\x3d\x2f\xed\x32\xb9\x73\x0b\x1a\x00\x4c\x5b\x37\x5b\xbc\x30\xb5\
\x50\xf7\xf7\x3b\x75\x15\xcf\x47\xb0\xec\xbc\xed\xf0\x9c\x67\x39\
\x32\xb5\xeb\x15\x71\x33\xda\xbb\xb9\xaa\xa2\x75\x9f\x1d\x6f\xd0\
\xfa\xc8\x12\x21\xbc\xb7\x7a\xc4\xdc\x84\xae\xbb\xcf\xac\xe9\xd5\
\xb2\x9a\x1e\x92\x90\x17\x24\x9e\xf9\xdf\xac\xbe\xc1\x7e\x9f\x2d\
\xaa\xdf\x79\x21\xc2\x72\x08\x1e\xb2\x70\xd7\x95\x67\x4f\xff\xf9\
\x3e\x54\x78\x7e\xdd\x96\x07\x42\x02\x60\x05\x4f\xee\x3d\xcf\x2c\
\xac\x20\x6c\x3c\xfd\x5d\xb8\xd2\x62\x01\x4a\x00\xc4\x2a\x62\xf2\
\x50\xab\xab\x0b\xe7\x9c\x64\x0c\x9c\x1a\x65\x8d\x00\x00\xd8\x1e\
\x7d\xfb\xbb\xbc\xd9\xb1\x6c\xcf\x9d\x8c\x82\xdc\xe4\xf8\x9b\x6f\
\x44\x1f\xf6\xd8\x2a\xbd\xb3\x71\xca\xf4\x6d\x4f\x2a\xd0\xda\xc5\
\xa3\x85\x42\xcf\x21\x13\xdb\xe7\xed\x98\xb9\xe2\xc4\xc3\xb4\xdc\
\xf7\xd9\x99\xf9\x12\x32\xa4\x79\xd8\xaa\x4b\xf1\x77\x55\xf1\xdf\
\xfe\x31\x6e\xda\x3a\xd2\x5a\x16\x0d\xa0\x6f\xf6\x2f\x3e\x80\x4f\
\xdc\xbe\xa4\xa3\x15\x46\x1a\xf9\x30\xed\x99\xc5\xb2\xcf\xac\xde\
\x91\x68\x3d\x64\x6b\x5c\xd2\xbd\x9a\x0c\x67\x6a\x07\x45\x49\xcc\
\xbc\xce\x5e\x3c\x1e\x8f\xc7\x73\x0f\xea\x33\xf7\xcf\x14\xf1\x87\
\x41\xeb\xdb\xdd\x3d\x78\xbc\x6e\xdb\xde\x28\xc7\x09\xb8\x28\xf5\
\xd4\xe2\x81\x21\xad\x78\x3c\x67\xbf\xc8\x68\x1f\x9e\xdf\xac\x78\
\x11\x41\xa0\x99\x87\x47\xb4\x0d\x9a\xf8\x4f\x5e\x61\xed\xe2\xd1\
\x3a\x22\xc5\x8a\x1f\xee\x9d\x45\x0e\x14\x79\xce\x5e\x81\x3d\xe7\
\x5e\xcc\xc7\x3e\x5d\xd1\xf0\xc2\xd3\x03\xab\xf6\xd8\x41\x4b\x13\
\x25\xfa\x52\x55\x69\x9b\x9f\x88\x42\xea\x90\x17\xbe\x49\xc9\xcc\
\x2f\x29\x2f\x2b\xc8\x48\x38\xf2\x6d\xa0\xcf\xa8\x33\x86\xd4\x67\
\xdd\xc7\x63\x84\x45\x23\x08\x82\x30\xe2\xbd\xb9\xd1\x9c\xcb\xab\
\xbf\x5e\x1d\x93\x21\xc4\x00\xd7\xde\xbf\xdb\xd7\xbb\xfe\xd7\xd7\
\x9e\xde\x80\xf1\x18\x61\xd1\x0c\x5d\x60\xfb\xe4\x8b\x26\x6e\x63\
\xf6\xdd\x19\x63\x3c\xf1\x18\x61\xd1\xe0\x32\x37\x14\x9b\x20\x20\
\x85\x10\xa6\x48\xa1\x5e\xc5\xaa\xb1\xa1\xfe\x75\xe3\x06\x68\x85\
\xa6\xcd\xa2\x42\x26\x35\x60\x93\x20\xfd\xba\x31\xf5\x08\x75\x49\
\xd3\x0d\x40\x21\xb9\xbe\x6a\x92\xfc\x95\x5f\xf9\xca\x27\x62\xf9\
\x53\x49\x83\x44\x48\x4a\xd3\x7d\x35\xa4\x69\x03\x24\xdf\x3a\x67\
\xd1\xf4\x54\x62\x02\x93\xc9\xf1\x06\x8b\x50\x87\x34\x4d\x03\x1f\
\x55\x5f\xa3\x6d\x8b\x58\xd9\xab\xcb\xdb\xbf\x9f\xbe\xe1\x91\x08\
\x00\x80\xe5\x5e\xfa\x69\x48\x68\x2b\x3e\x9f\xef\xec\xdb\x7d\xd9\
\x43\x91\xe0\xc6\xa4\xd6\xee\xc3\xce\x15\x93\x8f\x9f\xf0\xf6\x54\
\x5f\xaf\x31\x97\x4a\x08\x00\x14\xe5\x89\xfb\x66\xf5\x09\xf5\xe6\
\xf3\xf9\x2e\x5e\x6d\x3b\x0e\xdf\xf4\x4c\x0a\x00\x96\x75\x64\x64\
\xbb\xe0\x49\xe7\x0a\x14\xb2\xa4\xdf\x46\x77\x0e\x6a\xed\xca\xe7\
\xf3\x9d\xfd\xbb\x4d\x5a\xb3\x79\xe1\xb0\xa8\xd6\x7c\xbe\x93\x4f\
\xd4\xb0\xd5\x31\xf9\x18\xb9\xe0\x19\xb7\xf5\x9b\x6e\x6d\x5c\xf9\
\x2e\xfe\x9d\xc7\xfd\x12\x57\x44\x76\x86\x58\xd1\x91\xfe\x9e\x7c\
\x3e\x9f\x1f\xb6\xea\x85\x4c\xfc\xf0\xc7\x1e\xfe\xae\x7c\x3e\xdf\
\xad\x5d\x8f\x19\xfb\x5f\x08\xc9\x5c\xa0\xb9\xd7\xd7\x8f\xee\x1c\
\xe0\xca\x77\xf6\x89\x9e\x7c\x34\xe3\x63\x37\xa9\x3d\x30\x95\x08\
\x3f\xa0\x22\x6e\x4a\x6b\xf7\x51\x31\x42\xb5\x24\x3e\xf5\x1a\xa9\
\x9a\x44\x5c\xad\x62\x5c\x8f\x72\x31\x2e\xbc\x3d\xd9\xdb\xa5\xdf\
\xe1\x57\xef\xf3\x33\x1e\xee\x1f\xe7\xc3\x73\xe9\xbd\xe1\xea\xb3\
\xf4\x8c\xe7\xff\xcc\x0f\xe1\x05\x7c\x17\x5f\x41\xa0\x6f\xff\xe8\
\xef\xde\x7a\xc8\x96\x9b\x29\x69\xf7\x0e\xcf\xeb\xe4\xe2\xf3\xcd\
\xe5\x22\x45\xe9\x85\x2f\x5d\xda\xcc\x8b\x2f\x95\x4a\xa5\x52\x19\
\x8a\x13\x58\xc9\xab\xc4\x57\x19\x79\xef\xdf\xc6\xef\x18\xea\xc9\
\xef\xbe\x33\x1d\x25\x74\xe9\xc6\x84\xd6\xc0\x54\x22\xd4\x27\x4d\
\x37\xe4\xea\x4c\x35\x7d\x69\x7d\xcb\xc5\xa0\x02\x00\x40\xb3\x74\
\xe4\x3b\xd8\x5b\x3a\x0c\x9b\xd2\x63\x43\x6c\x8e\x6f\x48\x80\xbb\
\x0d\xc2\x9f\x38\xc4\xf5\xef\xab\x2f\x8a\x24\xcd\xff\xd9\x9f\xe8\
\x34\x35\x66\x6a\x27\x2f\x26\xf0\xfa\xf1\xc7\x6b\xff\x4c\x3f\x9d\
\x24\x8e\x00\x00\x20\x74\x16\x5b\xb9\x61\xb7\x8d\x4f\xa0\x0d\x00\
\x00\xb4\x1c\x33\xa3\xeb\x96\x49\x8f\xb2\xa5\x80\x97\xa9\x5d\x37\
\x06\x80\xae\x19\xd8\x1d\xe8\x8f\xd0\xdd\x42\xbd\x72\xd4\xa4\x69\
\x9a\x11\xf2\x07\xea\x5f\x2e\xae\x02\xba\x45\x0b\x73\x20\xad\x90\
\x13\x00\x00\x9a\xb9\x9d\x39\x90\x94\x49\x24\xf9\x49\xf9\x8a\xb4\
\x0d\x5d\xdc\xf8\x7c\x3e\x9f\xef\x3b\xf6\xba\xa4\x22\x3f\x5f\xac\
\xf6\xda\xc2\xde\xc7\xac\x1f\xd7\xad\xad\x3b\x9f\xcf\xf7\x1b\x77\
\x51\x28\x97\x62\x84\x4e\xdd\x58\x6b\xe0\x9a\x84\xd1\x90\xa6\x69\
\x46\xc8\x1f\xf8\x04\x72\x71\xd5\xb1\x1e\x52\xf9\x79\x03\x42\x43\
\x00\x20\x63\x60\xf8\xae\x88\xcf\xcc\xf9\x88\x77\x67\x87\xd8\x33\
\x59\x1c\x3a\x2a\x91\x7f\xa8\x57\xc1\xad\x25\xd3\x76\x95\x0e\x3d\
\x94\x90\xf1\xee\xdd\x8b\x03\x5f\x5a\xd3\x01\xd0\xad\x1b\x6b\x0d\
\x0c\xe8\xfa\x23\xd4\x84\x5a\x12\x34\x23\xe4\xef\x43\x5d\xd6\xa7\
\x5c\xac\x77\xf0\xcd\xf6\x1e\x3a\xba\x75\xea\xa6\xf9\xbf\x5e\x7a\
\xf2\x36\x37\x2f\xeb\xf5\x93\xc7\x99\x22\x02\xb0\x78\x01\xf6\xe5\
\x71\x27\x63\x92\xdf\x65\x26\x3d\x4e\x2a\x96\xca\x31\x40\xa0\x15\
\xe5\xe5\x62\x19\x4a\x00\x80\x00\xa0\x5b\x37\x26\x30\x2d\x81\xa9\
\x44\xa8\x85\xc2\xaa\x49\x34\xf0\x70\xa6\x81\xe4\xe2\x7c\x4c\x45\
\xe6\x25\xa4\xcf\x57\x86\xba\xf4\x3e\xf6\x5e\x41\x10\x04\x9a\xbd\
\xbf\x87\x4b\xe4\x86\x57\x32\x82\x40\xf3\xe3\xb6\x4e\xfa\x3c\xd8\
\x9b\xc7\xe3\xf1\x3d\xc3\x46\xef\x4b\x93\x13\x04\x9a\x7f\x7d\xc5\
\x80\x40\x37\x1e\x8f\xe7\xdd\x79\x7e\xec\xfb\xb4\xbf\xe7\xf7\x09\
\x74\xe3\xf1\x78\x3c\x1e\xbf\x55\xbb\x5e\xcb\x13\x44\x84\x4e\xdd\
\x58\x9a\xae\x2d\x30\xa5\x08\xab\x97\xa6\x3f\xbd\x6a\x6f\x7a\x9a\
\xaa\x91\xa3\x91\x1a\xb5\x8d\x50\xe6\xad\x37\x34\xd2\x13\xb5\x8d\
\x50\xe6\x85\x62\x13\x04\xa4\x10\x52\x08\x01\x29\x34\x6a\xd4\xb7\
\x30\x5b\xfb\xf8\x75\xc7\x00\x29\x04\x54\x84\x59\x59\xc6\xa9\x45\
\xfd\xda\xb7\xe2\xf3\xf9\x9e\x7d\xb6\xa5\xea\x5c\xf4\xa9\x37\xc3\
\x70\x75\x31\x30\x20\x81\x14\x5a\xc0\xfb\x73\xf3\x17\xfe\x63\xb1\
\xe8\xf8\xfd\x41\x6e\x34\x29\xd3\x5e\xa7\x19\xb4\x41\x0c\xc3\x90\
\x42\x0a\x90\x65\xdd\x7d\x8d\xb7\xdb\x38\x3c\xc4\xc9\x4a\xdf\x64\
\xa6\x01\x0c\xc3\xd5\x1e\x8a\x6e\x24\xdd\x5c\x7d\x48\xbe\x3a\x85\
\x59\x6d\x4a\x2f\x2e\x17\xc9\xe4\x71\x13\x7d\xf9\x7c\x3e\xdf\x6d\
\xcc\x8d\x52\xb5\x3c\xa8\xe4\x55\x87\x2a\xab\x1a\xbf\x8e\x35\x59\
\x9f\xf1\xab\x16\xf5\xe6\x59\x78\x8e\x5a\xb1\x64\x80\x33\xd9\xb0\
\x28\xe6\xb0\xc1\x54\x7b\x2a\x4b\x2c\x05\x4f\xce\x6c\x9e\xd1\x27\
\x28\xa0\xf7\xca\xdb\xb4\xc0\x30\x7b\x06\x01\x2a\xee\xff\x3c\xef\
\xa0\x74\xec\xd1\x7b\xcf\x9f\xdf\x3f\xbb\x65\xbc\x0f\xc7\xb2\xfd\
\xb0\x28\xd6\xb3\x0b\xcf\x84\x00\x00\x20\x49\xb9\x78\x4f\x11\x36\
\x22\xdc\x06\xc1\xb2\x8f\x4d\x1c\xbc\x2a\x39\x78\xc9\xf1\x3b\x0f\
\xef\x5d\x5c\xee\x9d\xf3\xe0\x69\x11\x0a\x00\xcd\xd2\x3d\x30\x3c\
\xbc\xad\x33\x97\x26\x7f\xbd\x63\xf4\xf8\x3f\xa4\xfd\x36\x5f\x88\
\xbb\xf9\xf7\xca\xfe\x2e\x1f\x65\x29\x2c\xe3\xd0\xa4\xb1\x3b\xca\
\x07\x6e\xbf\x7e\xfb\xc4\x0f\xa1\x19\xbf\x4e\xfa\xe1\x5a\x31\x0e\
\x00\x40\x58\xd1\xbf\x3f\x49\x4f\x4f\x4f\x4f\xd9\x1f\x9a\xa4\x96\
\x07\x6d\x79\xd7\x11\xbf\x4e\x9f\x1b\xcd\xd2\xaf\x47\x67\x3e\x13\
\xb1\xf0\xf9\xac\x8b\x0b\xab\x9a\x18\x34\x73\x68\xa4\x1d\x69\x7d\
\x4b\xbe\xb2\x97\xda\x85\x59\x34\x5d\x9b\xd2\xdb\x11\x01\x00\xd0\
\x59\x1c\x52\x98\xd5\xcc\x83\x96\xae\xb7\xd6\x86\x61\x5d\x31\x68\
\xe6\xd0\x48\x47\xa4\xf5\x2d\xf9\xea\x12\x66\x65\x5a\x95\xde\xaa\
\x43\x79\xcd\x3c\x50\x8f\x9f\xba\x61\x98\x7a\x0e\x8d\x94\xc2\xfa\
\x96\x7c\x75\x09\xb3\xda\x95\x5e\x35\xdd\x4e\x33\x0f\x94\xe3\xa7\
\x6e\x18\xa6\x9e\x43\xe3\x9d\x17\xd6\xab\xe4\xcb\xd4\x21\xcc\x6a\
\x57\x7a\xd5\x86\x57\x5a\xf2\xa0\x41\x40\xad\x0d\xc3\x2c\xca\x39\
\x34\x2d\xbd\xb0\xee\x24\x5f\xdd\x86\x5e\x2d\x4a\xaf\xaa\x3e\xac\
\x33\x0f\x75\x6b\x18\x26\x08\xca\x39\x84\x92\xaf\xe9\x4b\xbe\x8d\
\x73\x32\xde\xa4\x24\xdf\xc6\xa9\xda\x37\x25\xc9\xb7\x91\xaa\xf6\
\x4d\x09\x50\xa9\x80\x14\x42\x40\x0a\x3f\x11\xea\x5c\xd4\x35\x9a\
\x6d\x9f\x9b\x08\x85\x95\x92\xa9\x0e\x55\xb6\xc1\x23\xac\xc5\x70\
\xa6\xa9\x75\x3b\x35\x56\x65\x3f\x59\x84\x90\x42\xbd\xf3\x8d\x9a\
\xaa\xb2\x9f\x2c\xc2\xc6\xd7\x91\x56\x2b\xf9\x02\x00\x80\xa2\xe4\
\xc1\xee\xa9\x3d\x83\x3c\xf8\xce\x3e\x51\x83\x97\x9e\x4a\xfd\xa8\
\x2c\x68\x97\x4c\x95\xaa\xac\xec\xd5\xf6\x31\x5d\x02\xbd\x5d\xf9\
\x7c\xbe\x47\x60\xb7\x31\x6b\x2e\xbe\xfd\xb0\x0c\x59\xc3\x08\xb5\
\x5b\x88\x7d\xa3\x86\xae\xb8\x94\xfd\xe1\x0c\x6e\x6d\x26\xde\x9a\
\xfa\x8d\x95\x99\xa9\x3c\x4e\xdb\x08\xb7\xd1\xa3\xe0\xf2\x25\x08\
\x34\x63\xff\x00\x77\x8f\x3e\x3f\x5f\x7c\x96\x9e\x7a\xff\xc8\xac\
\x30\xa7\xb6\xb3\x63\x4b\x15\xba\xdd\xb6\xca\x65\x4c\xe1\xed\xc9\
\xde\xce\x7d\xfe\x78\x92\x91\x95\xfe\xf4\xca\x96\xd1\xfe\xbc\xc0\
\xf9\xb7\xca\xf0\x9a\x47\xa8\x66\x21\xee\x7b\xe0\xf9\xbb\x9c\x8c\
\x67\x17\x57\x74\x77\xf1\x18\x7e\x8a\x5c\xde\xd3\x66\xe2\xad\x99\
\xdf\x58\x25\x33\x46\xba\x46\x2a\x7b\x73\x68\x7a\xb7\xd6\x4e\x3e\
\xdd\xbf\x59\xfd\x67\x7c\xb6\x44\xb9\x41\xb5\xf8\xd1\xa2\x40\x7e\
\xe8\x9c\xbf\x5f\x94\x7e\x74\x30\xcb\x92\x37\x45\x39\x91\x56\x24\
\x82\x20\xf0\xb2\x98\x49\xde\x2e\x83\xcf\x15\xe3\xd2\x17\xab\xc3\
\x9d\xa2\x7f\x4d\x21\x17\xbe\xd1\xb4\x6d\x9d\x5d\xb4\x50\x58\xb9\
\x78\x5d\x74\x61\xb4\x87\xeb\xf0\x8b\xa5\xb5\x88\x50\xdb\x62\x37\
\x81\x97\x5c\x18\xe2\xea\x3b\x23\xbe\x42\x6d\x17\xef\x9b\x93\x5a\
\x7b\x7f\x73\x53\x58\x35\xb0\x28\x7e\xa6\xaf\xcb\xa0\xf3\x25\x38\
\x41\x10\xb2\x57\xeb\x22\x5d\xba\xed\xc9\x10\xbf\xfe\xa5\xa3\xb3\
\x32\xdd\xf2\xeb\x63\x5b\xb5\x9e\x74\xab\x48\x25\x33\x26\x2f\xf9\
\xca\x0b\x53\x8a\x68\xfc\x00\x47\x16\x39\xb6\x30\xe3\xfb\xd8\x11\
\xef\x53\x0b\x31\x9d\x6e\x5b\x1d\x2f\x14\xcb\x56\x01\x0e\x78\xee\
\x9b\x22\xb4\x8e\x22\x54\x8e\x76\x58\xb6\x2d\xcd\x24\x85\x15\x38\
\xa0\x60\xe2\xa5\xec\x37\x16\x14\x55\x66\xc6\xe4\x25\x5f\x56\x0b\
\xaf\x16\x78\x6e\xd2\x7b\xf2\x6d\x43\x88\x73\x92\x8b\x90\x96\x9e\
\x2d\x18\xba\x24\x53\x5d\x90\xe6\xa5\x97\x20\x76\x6e\x36\x8c\xba\
\x8a\x50\xc9\x21\x42\x43\xc8\xe5\x67\x0a\x26\x5e\xaa\x7e\x63\xbe\
\x43\x65\x66\x4c\x5e\xf2\x65\x7a\x0e\x99\x10\x94\xbb\x73\xc1\xff\
\xfe\x7d\x91\x99\xf6\xf0\xaf\x9f\x96\x5c\x66\x0d\x98\x16\x6d\x83\
\xe8\x92\x4c\xab\x42\x9e\x7a\xf9\xd2\xbd\xd7\x99\x69\x09\x27\x56\
\x2d\xbb\x0c\x7a\x4c\x8c\xb2\x41\x58\x35\x8d\x50\xaf\x85\x58\xbb\
\xd1\x57\x1f\xb4\xaa\xd0\xaa\x99\x31\xfe\x49\x05\xa3\x79\x9b\x7e\
\xb3\xb7\xf4\x9b\x4d\xd6\xd2\x8b\x23\x8b\xbe\x3d\x96\x2a\x02\x80\
\xed\x10\xf4\xe5\x9a\x5f\xba\xd9\x22\x0c\xdb\x71\xfb\xfe\x94\x2f\
\x59\xfe\x43\xbf\xdf\x0a\x59\xce\x61\x83\xfe\xf7\xf7\xd2\xae\x36\
\x34\x00\x68\x6e\xe3\xf6\x9f\xc0\x57\x2c\xdf\x30\xaa\x53\xae\x08\
\xd0\xcd\xed\xfd\xfa\xb9\x99\x6b\xd4\x9a\x20\x71\xf7\xd4\x9e\x73\
\x0b\x10\xfb\x80\xde\x3f\x1e\x5b\xd3\xab\x05\x79\x63\x8d\x22\xc4\
\x85\x6f\x1f\xdf\xbb\x6b\xde\x5b\x8c\x7b\x6a\x2f\x49\xb3\xce\x2b\
\xd7\x0d\x99\xb9\x66\x4c\xf4\x7a\x39\x00\x08\xd7\xae\xcd\xe8\x66\
\x74\x00\xf4\xae\x08\x30\xbd\xbe\x3d\xf2\x27\xb1\x74\x1d\x83\x61\
\xce\x00\x00\x1c\xcd\x49\x44\x41\x54\xe5\x8f\xc3\x7f\xc9\xab\x40\
\xcc\x9c\xba\x2e\x3e\xb6\x6f\xbc\x47\x65\x66\x1a\xab\xe4\x4b\x01\
\x9a\xc3\x10\xd3\x04\x5c\xe6\x36\x79\x34\xe1\x13\xb5\x2d\x3a\xee\
\x7e\xfd\xb6\x31\x50\x08\x25\x5f\x53\x07\xec\x48\x21\x85\x10\x90\
\x42\x08\x48\x61\x13\x40\xb5\x9b\x3f\x9b\x1a\x85\xf2\x57\xeb\x23\
\x9d\x22\xd6\x26\xc9\x9a\x0e\x7f\xfa\x36\x7f\x86\xad\xd0\xe8\xa1\
\x6f\xf3\x67\x53\x93\x7c\x01\x02\x10\x80\xd0\x10\x4a\xeb\x8b\xb5\
\xb3\x04\x57\xe9\xc9\x74\x04\xd0\x22\xc6\xea\xd4\x7b\xb5\x05\x96\
\xa7\xee\xfb\xb6\x57\x88\x8f\x2b\x9f\xcf\x77\x8e\x5a\xfc\x48\x42\
\x65\xf3\x67\xf5\x78\x4c\x4c\xf2\x25\xd0\x37\xbf\x75\x76\xe9\xb4\
\x45\xeb\x49\xba\xf5\x76\xb2\xae\xae\x00\x5a\xc5\x58\x5d\x7a\xaf\
\xce\xc0\xce\x7d\xf6\x3d\xcf\xcd\xcf\xcd\x48\xcb\x13\xe3\x14\x36\
\x7f\xd6\x88\x87\x01\x00\x30\xc2\xd9\xbd\x2e\x97\x2f\x40\xb8\xd6\
\x1c\x06\xd7\x9a\x4b\x03\xc2\xeb\x23\x03\xc6\xdd\xc4\x00\x00\xec\
\xee\x3b\x36\x63\xb3\x66\x7c\xf8\x7d\xf8\xe9\xa1\x6e\x96\x00\x80\
\x3a\x3b\x59\x17\xa0\x69\xda\x03\xe8\xb6\x04\xd3\xac\x9c\x5c\xf9\
\x3c\x4b\xbe\xe3\xcc\xef\xc2\xf6\x2d\x88\x4d\x93\x0e\xb2\x67\xe9\
\x0c\x4c\x6f\xe6\xe2\xea\x68\x6f\x09\xec\x01\x00\x80\xab\x6f\xf3\
\x67\x34\x45\x3d\x1e\x23\x55\x2a\x3e\x48\xbe\xee\xea\x92\x2f\xa0\
\x5b\xf2\x5a\xda\x3a\x38\x59\xd1\x81\x79\xe4\xa6\x1b\xb1\x15\x0a\
\xf2\x9a\x3d\xa1\xfc\xed\x64\xfe\x31\x2c\x37\x70\xf1\xd1\x4d\xc8\
\x8f\xeb\x7b\xfb\xff\xe4\x3b\x78\xce\xaa\x15\x13\x42\x6d\xac\x43\
\x47\x84\xca\x17\x1e\x4b\x14\x06\x5b\x9d\xbe\x58\x16\xb4\x34\xc2\
\x96\x06\xc4\xfa\x2c\xc1\xba\x3c\xc3\x4a\xc3\xed\x86\x8f\x57\x90\
\x90\x7c\x31\xd1\xb2\x32\x84\x8a\xde\xab\x3f\x30\x00\x00\x7b\x1f\
\xb3\xf9\xfb\xd5\x47\x6f\xbd\x2e\x96\xd3\x59\x40\x01\xba\x69\x6e\
\xec\xac\x19\x8f\x91\x52\xc8\x09\x58\x70\xe1\xf9\xd8\xc7\xe7\x0e\
\xee\xdd\x3b\x36\xe4\x7b\xdb\x2e\x5f\x8d\x9f\xf0\xf5\xb0\xce\x9e\
\x96\x74\x60\x1e\xba\x74\xaf\x8b\xbd\x39\x00\x34\x73\x5e\x2b\xef\
\xca\x3b\x2c\x2c\xb5\xbc\xe8\x2d\xfd\x46\xac\x39\x39\x6c\x71\xea\
\x85\x9f\xc7\x4f\x1b\xbb\xc4\xe7\xc1\x8e\x68\xdb\xe8\x49\x9f\x23\
\x53\x0f\x5e\xbf\x65\x7b\x5e\xda\x65\x63\xa5\x25\xf8\x75\x6a\x21\
\xfa\x45\x33\xed\x8a\xbc\xae\x00\xa4\x18\xbb\xf0\xd6\xbf\xdf\xb8\
\xaa\x54\x63\x45\x9c\x4a\x90\x4a\xbd\x97\x42\xe0\x0f\x9a\xf0\x9c\
\x33\x09\xdf\xb4\xb3\x95\xdc\x9a\x19\x39\xb1\x1c\x00\xf5\xcd\x9f\
\x35\xe3\x31\x31\xc9\x17\x60\xef\xae\xfc\xb1\xfd\xf7\x2b\xd9\x54\
\x76\xdd\xc6\xea\xe8\xb0\x5f\x5d\x01\xa8\x58\x82\x2b\x23\xa1\x10\
\x98\xca\xe6\xcf\x22\x0f\xd3\x76\xf9\x12\x84\x38\x71\x65\x37\xdf\
\x2e\xcb\x13\x2a\xea\xdf\x12\xac\x02\x9d\x01\xf4\x59\x82\x45\x77\
\x67\xf9\xb9\x0e\xbb\x2a\xa0\x14\x98\xda\xe6\xcf\xa5\x78\x13\x71\
\xf9\xd6\xad\x25\xd8\xa8\x3d\xc3\x70\x03\x2f\x0a\x96\x60\xe3\xf6\
\x0c\x43\x0a\x29\x58\x82\x8d\xdb\x33\xdc\x84\x55\xfb\xc6\x02\xa8\
\xda\x9b\x3c\xe0\x32\x37\xa4\xd0\x40\x98\xfc\x59\xbe\x9f\xd8\xdc\
\x4b\x21\x39\x78\x96\xaf\x0e\x85\xa7\xbe\xb6\x68\x36\x4c\xa4\xd1\
\x7f\x14\x70\x83\x50\x68\x12\x67\xf9\xea\x3a\x38\x57\x7f\xb7\x56\
\x8b\x2d\x9a\x4d\x69\x52\x61\x02\x67\xf9\x36\xc4\x16\xcd\x26\x36\
\x9c\x69\xe0\xb3\x7c\xf5\xca\xbc\x75\xb8\x45\xb3\x41\x22\xb0\x41\
\x3b\x4e\xcb\x53\xf7\x7d\xdb\xab\x21\x47\xa4\xfa\x58\xac\xcf\x8d\
\x9d\x75\x04\xd0\x82\xda\x6f\xd1\x8c\x96\xbc\x78\x98\xe1\xb4\xf8\
\xe2\x83\x87\xf1\x97\x36\xf7\x2a\xdd\x37\x73\xc1\x85\x02\x05\xd0\
\xb1\x89\xb4\x41\x3b\x4e\xcb\xf3\xef\xc7\x26\x7d\x6a\xd5\x5e\xf5\
\xfc\xc2\x6a\xce\x32\xa4\xee\xf2\xc5\x4b\x2e\x8f\xf6\xf2\x9b\x7e\
\x5b\x40\x10\x92\x27\xcb\x43\xbd\x46\x9c\x2b\x54\x10\xf2\xe4\xcd\
\x1d\x9c\x22\xd6\xbc\x94\x92\xea\xfd\xad\x49\x5e\xee\x55\x85\x78\
\xbd\x01\x94\x0b\xd0\xba\xcc\xbd\xb8\xac\x24\x2f\xa7\x0a\x72\xf3\
\x05\xa8\x5e\xff\x8d\x8a\xe9\x57\xae\xcf\xbe\xab\x9a\x9c\xd6\xc0\
\x42\xe1\xed\xc9\xde\x6e\x0d\xa6\xda\x53\x3a\xcb\xd7\x5d\xab\xcb\
\xb7\x1e\x55\x5c\x2d\x5d\x41\x75\x5b\x34\xdb\x18\x3e\x4c\xd2\x23\
\x02\xab\xda\x77\xab\x64\x58\xb7\x62\xdc\x44\xcf\xf2\xa5\x7e\xd8\
\x6f\xed\xb7\x68\x56\xe3\x50\x4d\x04\xae\xc6\xbe\xab\x9e\x61\x1d\
\x3b\x4e\x37\xd1\xb3\x7c\x59\xfa\x64\xde\x4a\x0a\x6b\xbd\x45\xb3\
\xce\x71\x8f\x3e\xfb\x2e\xc5\x1d\xa7\x19\x0d\x32\x8a\x31\x68\xda\
\xa3\xc7\xe5\x0b\x00\xe0\xf8\x8f\x9b\xe8\xbb\x67\x85\x62\xce\xd7\
\x01\xdc\x0f\x1d\x6e\x9b\x39\x07\x7f\x13\x2d\x58\x3b\xae\xc3\x1a\
\xb9\xb5\xab\x1b\x0e\xcc\xda\x70\xe9\xaa\x46\xdc\x40\xed\x01\xb4\
\xa5\xaf\xdd\xdc\x5b\xc3\x8e\xb4\x4a\xcb\xd2\x63\xdf\xad\xe2\x25\
\x66\x68\x0b\x6c\x0f\x00\x00\x4d\x76\x63\xe7\xc6\xb3\xf3\x73\x53\
\xdd\xd8\xb9\x11\xed\xfc\xdc\x48\x29\x34\x71\x15\xd7\x34\x56\x67\
\x20\xea\xac\x15\x42\xd5\xde\xe4\x5b\x61\xd7\xae\x5d\x33\x33\xde\
\x42\xed\xde\x84\x29\x8c\x8d\x8d\xed\x3f\x60\x60\x61\x21\xa4\xb0\
\x2a\x8c\x66\xdf\x66\x4a\xef\xc2\xa7\x4f\x9f\x4e\x9f\x31\x03\xb2\
\xa6\xaa\x8e\x50\xd1\x5a\x2b\x57\xbf\xa8\x9d\xf4\x4b\xf9\xe9\xa9\
\xce\xd3\xab\x9d\xc2\x76\xed\xda\x6d\xdf\xb6\xcd\x34\xea\xd6\x08\
\x5d\xbe\x1f\x4e\xfa\x9d\x7a\xfc\xfe\xb3\xa7\xb7\xf6\x8c\x71\xaf\
\x9d\x44\xa8\xcf\xd3\xab\x65\x38\xd3\xb5\x6b\xd7\x03\xfb\xf7\x59\
\x58\x58\xc0\xa6\x57\x43\x50\x3f\xe9\x97\x0a\xf4\x79\x7a\xb5\xb4\
\xc2\x3f\x8f\x1e\x31\x72\xfe\x6a\xee\xf2\x35\xd0\x85\xab\x57\xda\
\xd5\xbe\xe1\xb3\xda\x49\xbf\xca\xd3\x0c\xa9\x38\x78\xb5\xed\x53\
\x5d\xbd\xa7\x57\xd3\x1b\x6c\xcc\x0b\x6c\xb5\x75\xf9\x1a\xe6\xc2\
\xd5\xb5\x6f\x73\x65\x6c\x3a\x36\x7c\x16\xde\x9e\xec\xed\x36\xfc\
\x52\x01\xe9\xc2\x55\x54\x91\x09\xf5\x38\x78\xb5\xec\x53\xad\xcf\
\xd3\xab\xe9\x0d\x66\x00\x00\xc8\xa9\xa1\x51\xcd\x0b\xeb\xc4\xe5\
\x6b\x90\x0b\x97\xa1\xe3\xec\x5c\x95\x2c\x9d\xdc\xfb\xa8\xe5\xb7\
\xd7\x16\xf4\xf6\x61\x01\xe0\xb6\x72\xed\xcd\xb0\xf1\xdb\x6e\xff\
\xd8\xb9\x1f\x0b\x00\x95\x93\x7e\xab\x42\x8f\x83\xb7\xa5\xf6\x63\
\x81\xab\xf3\xf4\x6a\x7a\x83\x8d\xd4\xa8\x5d\x27\x2e\x5f\x83\x5c\
\xb8\x7a\xf7\x6d\xd6\xb6\xe1\xf3\xe9\xd4\x42\x0c\x50\xfc\x18\x4f\
\x9b\x83\x57\x8b\x82\xad\xf6\x9e\xa5\xe0\x0d\x36\xb5\x8d\x9d\x81\
\x79\xe8\xd2\xbd\xcb\x42\x3e\xba\x7c\xbd\xbd\xbd\xbd\xbd\x5b\x39\
\x5a\x58\x56\xfe\x36\x53\x16\x89\xc2\xc1\xbc\x95\x02\xac\xde\x7d\
\x9b\x75\x6d\xf8\x4c\xb1\x44\xda\x77\x75\xd6\x54\xb0\xb5\x79\x7a\
\xab\x3f\x5b\xb8\x31\xbb\x7c\x0d\x72\xe1\xea\xdd\xb7\x59\xd7\x86\
\xcf\x94\x47\x9a\x5a\x1c\xbc\x5a\x14\x6c\x7d\x9e\x5e\xcd\x22\x98\
\xd6\xc6\xce\x00\xa0\x25\xaf\x1e\xdc\xb9\xcf\x2e\x92\x0f\x75\xd5\
\x3f\xff\x62\xea\x96\x49\xb5\xa4\xa3\x6f\x23\x68\x86\x8e\x0d\x9f\
\xa9\x41\xdb\xae\xce\xb8\x50\x53\xc1\x66\x5b\x7d\xb3\x7a\xca\x83\
\xf9\xb3\xba\xff\x25\xb7\xf0\x1a\xb9\xfb\x9f\x0d\xfa\x8b\x00\x5d\
\xbe\x70\x63\x67\x08\x23\x58\x23\x85\x80\x14\x42\x34\xec\x60\xa1\
\x5a\xc9\x77\xed\xc9\x14\xe1\xf1\x7e\xb0\x9a\x8c\x9a\xc2\xea\xe7\
\xf5\xc2\xe3\xfd\x2c\x87\x9f\x87\xd5\x04\x3b\xd2\x4a\x6c\x9b\xdd\
\xd5\xb4\x2b\x0c\xa1\x99\x71\xe9\xf4\xa6\x4c\xa1\xc9\xb0\xc8\x32\
\x1f\xd0\xc7\x63\x10\x9f\x5e\x75\xea\x8e\xd8\x7a\xbb\xcc\x8c\x6e\
\xd6\x4c\x77\xb5\x21\x1c\xab\xb1\x83\x5b\xf5\xb3\x63\xfa\x74\xf1\
\x9e\x17\x61\xc6\xaa\xff\xec\x35\x8c\xcb\xd7\x04\x58\xc4\xe4\xaf\
\xd3\xcb\x5f\x95\xe3\x86\xae\xfd\x13\x0a\x1c\xc5\x09\x99\x82\x40\
\x31\x1c\x43\xeb\x4d\x3a\x50\xc9\x5e\x43\xba\x7c\x67\x6c\x89\x35\
\x5e\x0a\x71\x34\xf9\x55\x71\x8d\x6e\x54\x94\x4b\xb1\x52\x19\x2e\
\xa9\x50\x54\x48\x15\x8a\xfa\xcf\x5e\x83\x2d\xb0\x51\x67\x91\xc6\
\x62\x7b\xba\x5b\xfb\x32\xca\x2f\xbf\x94\xca\x69\x8c\xd6\x6d\x1c\
\x3f\xf3\xe0\x5a\xd1\x81\xb4\xbc\xec\x44\x6c\x11\x3b\xd4\x63\x30\
\x33\x7f\xdb\x2d\xa1\x18\x00\xc0\x34\x1b\xd8\x9b\xc7\x4a\x78\xfb\
\x77\x8e\x02\x61\x71\x82\x03\xed\xc3\x79\x1c\x2b\x06\x40\x65\xf2\
\x9c\xb4\xf7\x27\x92\xa4\xb8\x79\xb3\x61\xdd\x9a\xcb\x9f\x64\x9d\
\xc9\xc6\x80\xb6\x00\x95\x32\x2f\xc3\xec\xcb\xbe\x7c\xd6\xfd\xb4\
\xe3\x79\x38\xdd\xcc\xbc\x63\x7b\xbb\x20\x07\x16\x07\x43\x0b\x65\
\x0c\x5a\xf5\xa2\x3a\x2e\x4b\x78\x52\x24\x95\xe0\xd2\xf4\x82\x58\
\x05\x8a\x03\x00\xe8\xec\x88\x48\xc7\xb0\x16\x4c\x73\x06\x82\x49\
\xe5\x99\x99\x45\xd7\x93\x2a\x4a\x14\x00\xd0\x58\x21\x61\x8e\x11\
\x0e\x2c\x4b\x26\x42\x54\x94\x1d\xbe\x56\x90\xcb\xe0\x86\x05\xda\
\x87\xf1\xd8\xe6\x38\xfa\x2e\xb3\xf8\xf2\x0b\x41\x31\x46\x6b\x15\
\xa9\xad\x80\xf9\xec\x0f\xd9\x2b\x60\x36\xe4\x1a\xa9\x3e\x16\x11\
\x73\x5b\x8b\xf6\x9e\x36\xc1\x2e\x6c\x59\x41\x79\xc2\x6b\x05\x0e\
\x00\xab\x85\x5d\xef\x56\xc8\xbd\x5b\x6f\x9f\x08\x09\x73\x2e\x4d\
\xa8\xc0\x69\x19\x62\x45\x98\x65\x4b\xa6\x30\x1d\x05\x4c\x2b\x4b\
\x67\x44\x72\xad\x48\x01\x10\x66\xbb\x48\xe7\x6e\xcc\xf2\x0b\xb7\
\xf2\xde\xc9\x68\xad\x42\x5c\x7a\xd8\xd2\xe9\x00\x28\x50\x34\xb7\
\x50\x82\x8a\x70\x02\x61\x06\x6a\x0b\x80\x69\x7b\x82\x22\x3a\xf2\
\x43\x64\xc5\xe7\x62\x84\xc5\x08\xd3\xa7\x8d\xa3\x3d\x07\x00\x00\
\x58\xf6\x8e\x53\x3a\x5a\x98\xa9\xf6\x6d\xe5\xc5\x07\x62\x4b\x8a\
\x71\xbc\x20\x4f\x0c\x00\x00\x02\x51\xfa\x87\x72\xd0\x1d\x5b\xb0\
\xca\x5f\x64\x1d\xca\xc3\x39\xcd\x2c\x3a\x85\xf0\x46\xb1\xde\xed\
\x7d\x24\x96\xd2\x18\xce\x2d\xd9\xc2\x17\x99\x07\xdf\x29\x18\x4c\
\x50\x41\x30\xdb\x47\x39\x75\x42\x4a\xcf\xc5\xe6\x15\x31\xb8\x51\
\x61\x2d\x47\x33\x15\x7b\x12\x44\xb9\x5a\x0b\x58\x59\x49\xf4\x86\
\xa4\xb0\x1a\xfe\xe8\x96\xd6\x7d\x22\x5a\xf8\x99\xa1\x29\xe9\x65\
\xa7\x2f\x09\xb2\x3e\x9e\x69\xcd\xc4\x14\x18\x8d\x6b\x6b\x4e\x03\
\xc5\xb2\x22\x52\x17\x2a\x16\x64\x23\x0e\x01\x36\xb4\xf4\x02\xa2\
\x85\x93\x39\xab\xb8\x30\x4b\x0e\xe8\x96\x56\xe1\x2d\xb0\x07\xd7\
\x0b\x93\xca\x09\x00\x68\xa5\x72\x82\xa0\x03\x00\x00\x21\x17\xdf\
\xba\x27\x06\x00\xd0\xad\x6c\xb4\x06\xd0\x32\xeb\xb2\xb2\x6a\x67\
\x29\xbf\x77\xb7\x24\x55\x48\x00\x80\x26\xe7\x63\xd1\xae\x00\x00\
\x20\x2f\x2a\x38\xf0\x6f\xa1\xea\x60\x87\xc0\x71\x91\xce\x06\x4a\
\x48\x45\xf2\x32\x11\x0e\x44\x25\x17\x1e\x73\xa7\x87\xd9\xba\x3e\
\x17\xbf\xc6\xc9\xeb\xa8\x50\x8a\x03\x29\xa0\x5b\x35\x0f\xb5\xc5\
\xee\x5f\x2b\x7e\x2d\x20\x00\x90\x5f\x7b\x6c\x3e\x3d\xca\xc6\xed\
\x99\xe8\x95\xb6\x02\x02\x7a\xd5\x11\x69\xf3\x16\x76\xca\x09\xbe\
\x31\xf0\x07\x00\x40\xe8\x74\x1b\x73\x9a\x4c\x24\x2f\x12\xc8\x4b\
\x65\x95\x63\x02\xb4\xb4\xe8\x78\x42\x85\x75\x80\xcb\x9c\x7e\xae\
\xfd\xbc\xb8\x5c\x04\x00\xb9\xe4\x59\x31\xe2\xe1\xce\x61\xd3\xd9\
\xfe\x7c\x5a\x6e\x86\x58\x42\x00\x3a\x87\x65\x01\xd0\x5c\x89\xce\
\xc1\x84\xde\x00\x2a\x8d\x90\xc5\x05\x58\xa1\x8c\xd0\x7c\xe7\x09\
\xc5\x98\x40\xe5\x8f\x50\x8a\x53\xf9\x6e\x49\x26\x94\x55\x20\x8c\
\xe6\x6c\x44\x33\x4b\xe6\x00\x7d\x2f\xfd\x78\x00\xbb\xf8\x63\x30\
\x6d\x05\xac\xf2\x90\x81\x86\x50\xed\xf5\xbe\x05\xb1\xb2\xe2\x83\
\xe7\xca\xf9\xce\xd6\xa1\x5e\xbc\x19\xed\x15\xe9\x6f\xcb\x12\xd2\
\x04\xe9\x42\x9c\x20\xf0\x82\x8c\x82\xa3\x99\x45\xcd\x9d\xec\x86\
\x84\xf3\x3f\x2f\x4f\x3f\x5b\xa0\xc8\x48\x11\x81\x08\x6b\x4f\x07\
\x85\x2f\x5d\x74\x39\x5f\x41\x00\x80\xcb\x51\x09\x60\xb7\x60\x23\
\x29\x72\xed\x24\xe9\x0d\xa0\x84\x42\x2a\x13\x01\x2b\x47\xae\x7a\
\x48\xdd\x1d\xa9\x9e\xb2\x33\xb9\x4c\x2e\x50\x94\x6a\xa4\xab\x90\
\xca\x45\xc0\xca\x81\xf3\x21\x21\xa6\x19\xcb\x9c\x50\x14\xcb\x08\
\x00\xb4\x14\x50\x9d\x42\x63\xe3\xef\x63\x35\x63\x39\x99\x45\x39\
\x99\xc5\x57\xad\x2d\x82\xbc\xac\x3b\xfb\xa2\xb9\x0f\x44\x32\x0e\
\x87\xcf\x51\x94\x88\x14\x12\x81\xac\x1c\xb3\x32\x63\x21\x00\x00\
\x69\x51\xe9\x73\xd4\xe5\x8b\x50\x42\x92\x99\x9d\x29\x07\x00\x00\
\x4c\x58\xf1\x4a\x64\x1b\x11\x68\x93\x93\x28\x28\xa5\x73\xdc\x2d\
\x69\x88\x18\x00\x00\x10\x96\x59\x74\xfb\x66\x68\x4a\xfe\xdd\x72\
\xed\x01\xb4\x50\x58\x21\x48\x28\xb1\xed\x1e\x66\x57\xf2\xb8\x2c\
\x53\x0c\xac\xb9\x1f\x3e\x9e\x33\xb0\x23\x45\x5a\xf0\x2c\x9c\x45\
\x12\x31\x93\x1b\x1e\x68\x09\x72\x73\x33\xab\x76\x86\xca\x84\xba\
\x04\x37\xcf\x4f\x2c\x2f\xa6\x9b\x45\x04\x59\x2a\xde\xe5\x64\xc8\
\x81\xd6\x02\x36\x30\x85\x06\xce\x25\x08\x71\x99\xf0\xce\x43\xe1\
\x1d\x00\x00\x00\x5c\xeb\x66\x5f\x44\x34\x6b\xce\x00\x40\x81\xe5\
\x66\x17\x5c\xc8\x53\x00\x00\x80\x42\xf6\xf8\x8d\x2c\xac\x2d\x88\
\x4b\x93\xa1\x1f\x1f\xe9\xb8\x3b\xef\x99\xc1\x2d\x86\xf6\x6a\xc1\
\x90\xa3\x25\x08\x90\x97\xe2\x38\x00\x08\x93\xc9\xb3\xe3\xa2\x39\
\x34\xa4\x44\x7b\x00\xad\xc3\xf7\x47\xf1\xef\x90\xb6\x76\x9d\xa2\
\xdd\xac\x18\x80\xc0\xb0\xfc\x77\x72\x39\x01\x00\xa1\x10\x8a\x0d\
\x28\x06\xbb\xb9\xed\x97\xae\x2c\x73\x80\xe5\xe7\x14\xfe\x95\x58\
\x21\x26\xb4\x24\xf4\x38\xfe\x1d\x23\xc8\xbe\x57\x37\x5b\x73\x1c\
\xcd\xce\x7c\xff\xe7\x73\xd1\x87\x3e\x53\xb3\x80\xaa\x4f\x87\xb2\
\x17\xb5\x6d\xde\x02\x00\x30\x6b\xfa\xd4\xad\xdb\x77\x96\x14\x17\
\x01\x00\x2e\x9c\x3d\x93\x7c\x79\x93\x69\xae\x91\x22\x66\x96\x4c\
\x16\xaa\x90\xe2\x80\x63\x69\xd1\x25\xd2\x8e\xfd\x34\xe3\xef\x6c\
\xd5\x23\x03\xf4\x06\xa8\xc3\x75\xe8\xca\xf9\x49\xbd\x44\xdf\x48\
\x97\x7e\x19\xad\xdb\xf0\xbb\x3a\x32\xd9\x08\x40\xa5\xb2\xb4\xb4\
\xdc\x0b\xef\xaa\xd2\xa3\x37\x80\xe9\x40\x0f\x85\x96\xc3\xcf\x9b\
\xa8\xd8\x74\xeb\x18\xb8\x55\xbb\x00\x75\x88\x43\x47\x1b\x8e\xc2\
\x1f\x86\x78\x83\x21\xaf\x01\x84\x31\xf7\x38\xb0\x0a\x4c\x1d\xd0\
\xa8\x6d\xfa\xad\x50\xf9\x11\x22\xac\x8b\xaa\xd3\xb4\x7a\x73\xf9\
\x6a\xc6\x8c\xe5\x5e\xdf\xb5\xe5\x44\x72\x69\x41\xf2\x93\xe4\x52\
\x83\xb5\x0d\xd8\x91\x6a\x5f\x1d\xaa\xbd\xcb\x97\xfa\xd6\xd0\xf2\
\xd4\xc3\x3f\xae\xff\xb7\x82\x48\xfc\xa1\xff\xb0\x4d\xcf\x25\x8d\
\x9d\x42\xd3\x71\xf9\x52\xde\x1a\x5a\x9a\x7c\xfc\xd4\xfb\x36\x63\
\x7a\xf2\x6b\xf8\x39\x07\x3c\x2d\xa6\xd6\xd0\xe5\xf2\xa5\xb8\x35\
\xb4\xf8\xe5\xb1\xb3\x25\xed\x97\x7c\xd6\x92\xf6\xaa\xd1\x8e\x48\
\x4d\xd5\xe5\x4b\x6d\x6b\xe8\x8a\xa7\x47\x2e\x55\x84\x8d\xeb\xd2\
\xa2\x2a\x13\x9a\x96\x60\xe1\x7f\xdf\xfa\xb8\x0f\x39\x53\x88\x03\
\x00\x00\x5e\x78\x66\x88\x7b\x9b\xef\xee\x89\x8c\x9c\x42\x2d\x1b\
\x3b\x03\x1a\xd3\x9c\x45\x67\x71\x99\xd4\x8e\x63\xd6\xb6\x19\xb2\
\xae\x6d\x96\x75\xed\xa8\x5c\x19\x5b\xe6\xe1\x09\x23\x36\x64\x77\
\x5a\x7d\x36\x2e\xf6\xd0\x74\xa7\x6b\x73\x86\x2c\xbe\x59\x46\xbe\
\xe8\x10\x56\xf4\xef\x4f\xd2\xd3\xd3\xd3\x53\xf6\x77\xd1\x3c\x0a\
\xb3\xba\x98\x85\x8f\x0e\x5f\x95\x75\x18\xdb\xb1\xb9\x1a\x11\x6c\
\xcf\x91\x1b\x4f\xdd\x7a\xf4\xf8\xce\xb1\x6f\x6d\xae\x2c\x9b\x73\
\x34\x03\xb3\x0c\x1e\xf5\x99\xd9\xb3\xd3\x0f\xcb\x08\x00\x80\xe8\
\xf9\xc5\x17\x9c\xa8\x81\xfe\xe6\x00\xcb\x30\x52\x0a\xe5\x69\x87\
\x67\x74\x6f\x13\xfc\xd5\xf6\x64\xde\xe8\x3d\xf7\x5e\xc5\xff\xb5\
\x76\x52\x17\x67\x36\xa2\xe6\xf2\x75\xe5\xf3\xf9\x7c\x3e\xdf\x63\
\xec\xd9\x33\x95\xbf\x2b\x9b\xc1\x07\x97\xef\x9a\xa9\x9d\xbc\x3c\
\xc2\x47\xff\xf8\x63\x17\xf4\xf6\xe9\x24\x31\x00\x1f\x5d\xbe\xae\
\x6d\xbe\x98\xf9\x5d\x98\xe2\x59\x6c\x9a\x14\x00\x19\xe9\xf2\x5d\
\x3b\xbb\x67\x1b\x77\x8f\xb6\x5d\xfb\x46\x3b\x32\xd4\xb3\x74\x72\
\xef\xa3\x96\xdf\x6e\x5a\xd0\xbb\x8d\xbb\x67\xd8\xc8\x95\x6b\xbf\
\x90\x9e\xd9\x76\xbb\xec\xc3\xc8\x92\x74\xf9\xb2\x59\x0c\xcd\x0a\
\xad\x26\x66\xa2\xec\xde\xa1\x1b\x78\xa7\xb1\x91\xd6\x1a\xea\xa1\
\x8d\x4f\xa0\x8f\x6b\x4b\x07\xb7\xc8\x31\x33\xba\x5a\x64\x3f\xca\
\x96\x02\xcb\xa0\x91\x9f\xb1\xef\x1d\xb9\x53\x8c\x03\x71\xf2\xbf\
\x8f\x40\xe8\xa0\x76\x16\x00\x4d\x6f\xd4\x67\xf9\x1a\x89\xcb\x57\
\x77\xcc\x44\xc9\x9d\x43\xb7\x19\x9f\xed\x0b\x6d\xa6\xce\xa0\xd6\
\x43\x7d\x2d\x03\x47\xf7\xb1\x18\xfe\xfb\xd5\xf7\x5d\x7c\xce\x5c\
\x17\x05\xad\x08\xb6\x42\x80\x38\x3f\xa9\xc1\x54\xfb\xea\xd1\x98\
\x5c\xbe\x3a\x63\xc6\x0b\x6f\x1e\xbc\x6b\xd6\x6b\x74\x90\x46\xdf\
\xab\xdd\x12\x0c\xcc\x82\xa6\xce\xf4\x4d\xda\xb5\xef\xdc\xd1\x2b\
\xf2\x2e\x13\xa3\x9b\x93\x5b\x58\xd3\xc0\x47\x8b\xa1\xb1\x35\xc4\
\x46\xe3\xf2\xd5\x15\xb3\x22\x3f\xe6\x60\x82\x55\x9f\x91\x6d\x3f\
\x74\x1b\x34\x33\x1b\x8e\x38\xfd\xf1\x9b\x72\x4c\xc7\xa1\xbe\x00\
\x30\x5c\x86\x7c\xdf\xbb\x6c\xe7\xc2\xbf\xf0\x2f\x26\x92\x9d\x2f\
\xdb\x7b\x28\x74\xf9\x2a\xd3\xa9\x37\x97\xaf\xf6\x98\xb1\xdc\x2b\
\x87\x9f\xb6\x18\xf8\xb3\xff\x87\xbd\xa8\x81\x59\xc0\xc4\xe9\xdd\
\x6e\x6f\xfe\x76\x6d\xc7\xd8\x75\x5a\x2c\xc1\xe4\x73\x6d\x15\x3e\
\xe5\x2b\xe7\x53\xe7\x06\x7c\x1d\x68\xfe\xb1\x8c\xd0\xe5\xdb\x40\
\x40\xd3\x77\x75\x73\x89\x5c\x97\x24\xa3\x7c\x83\xb0\xb4\xbc\xe4\
\x6d\xfc\xef\xe3\x02\xfc\xc6\xfd\xa3\xba\x09\x35\x5c\x60\x6b\xa0\
\x09\xd3\xdb\x73\x47\xdf\xb8\x0e\x1f\xe4\x49\xd5\x74\x21\x7b\xb5\
\xa5\x9f\x6f\x40\x87\xf1\x07\xe9\x53\x0e\x6c\xe9\xa7\xba\x09\x35\
\x5c\x9d\x69\x18\x30\xbd\x66\xdf\xca\x9c\x6d\xc0\x0d\xec\x36\x4b\
\x6e\xe7\x2c\x81\xcb\xdc\x8d\x12\x90\x42\x48\x21\x44\x83\x53\x48\
\xce\xeb\x3f\xd9\xd4\x1e\x9e\xe5\x5b\xf7\x14\x7e\x7a\xd5\xbe\x89\
\x9f\xe5\xab\x23\x66\x4a\xff\xab\x7d\xda\xf9\xe9\x6b\xc7\x04\x4e\
\x81\xfd\x28\xd8\x2a\xea\xe1\x2c\xdf\xea\x63\xae\x41\xba\xf0\x2c\
\x5f\x5d\xa3\xfe\x7a\x3b\xcb\xb7\xfa\x98\x0d\x4f\x17\x9e\xe5\x5b\
\xff\x67\xf9\xaa\xed\xf0\x5c\x48\xc6\xfc\x4e\x7b\xce\x55\xd2\xd5\
\xbb\xa5\xf3\x23\x09\xc0\x0a\xe2\xe0\x59\xbe\xf5\x7f\x96\xaf\x3c\
\xff\x7e\x6c\x52\xcb\xef\x2f\x26\x24\x26\xc4\x1d\x99\xe5\xc7\x21\
\x23\xb0\xd2\x9e\xf3\xca\x27\x57\xe7\xe1\xbd\x95\x51\x31\x32\x0e\
\x4d\x1a\xdb\xc0\x2e\x5f\x5d\x7d\xa9\xae\x8d\x9d\x71\x8d\x9d\x90\
\x89\x90\xe1\x61\xf8\xbc\x53\xcf\x84\x5d\x3b\x32\x53\xce\xdf\x10\
\x05\x2f\x0b\xb7\x41\xd0\xd4\x13\xbb\xee\x3b\x4c\xbe\xb2\x64\xa0\
\x1f\x1b\x80\x0a\x57\x5b\x26\x22\x01\x00\x00\x9a\x4d\x87\x85\x7b\
\x3a\x00\x00\xd0\xd7\x7b\xb5\x06\xd0\xb2\xb0\xa5\x63\xc3\x67\xf3\
\xb0\x55\x97\xe2\x17\xab\x0e\x3a\x10\x86\xa5\x9d\xce\xa5\x77\x95\
\x1d\x9e\x2b\xde\x90\xc1\x9b\x69\xcb\x39\xf8\xe8\x96\xd2\xbd\x2b\
\x75\x65\x54\x68\xca\xaf\xfb\x13\x9d\x68\x46\xc8\x1f\x50\x4a\xbe\
\xae\x5a\xcf\xf2\xfd\x2c\x7b\x7d\x6f\xff\x76\xbd\x66\xed\x7d\x58\
\xaa\x00\x88\x75\xe8\x88\x50\x79\xec\xb1\x44\xa1\x24\xf9\xf4\xc5\
\xb2\xa0\x11\x11\xb6\x34\xfd\x47\xf5\xd6\xd1\x59\xbe\x55\xe0\x68\
\x6f\x69\x50\x83\xd0\x9a\xf3\xca\x1c\x7e\xd4\xab\xf9\x7c\x3e\x9f\
\xef\x3b\xf6\xba\xa4\x22\x3f\x5f\xcd\xd2\x46\x86\x69\x18\xc9\xb7\
\x09\x9f\xe5\xab\xca\xa1\x96\x9c\x57\xc9\xa1\xbe\x2d\x9d\xc9\x30\
\x0d\x20\xf9\xc2\xb3\x7c\x95\x45\xd4\xcc\x79\xe5\xa8\x88\x82\x5e\
\x4d\x86\x81\x67\xf9\x36\xdc\x59\xbe\x5a\x73\x5e\xd9\xc4\x28\xe8\
\xd5\x4c\xaf\x6f\x8f\xfc\x09\xcf\xf2\x85\x67\xf9\x1a\x27\xe0\x59\
\xbe\x26\x0f\x78\x96\x2f\x04\xa4\x10\x02\x52\x08\x41\x9d\xc2\x4f\
\x2c\xf9\x9a\x0c\x4c\xc8\xe5\x0b\x8d\xda\xda\x00\x5d\xbe\xf5\x07\
\xe8\xf2\x6d\x2a\x93\x8a\x4f\x09\xe8\xf2\xd5\xdf\xa9\x41\x97\x2f\
\x74\xf9\x42\x97\x6f\x43\xbc\xf2\xa0\xcb\x17\xba\x7c\x41\x93\x72\
\xf9\x1a\x21\xa0\xcb\xd7\x30\x97\xaf\x71\x4e\xed\xa1\xcb\xd7\x00\
\x97\xaf\x71\xcf\xeb\xa1\xcb\x17\xba\x7c\xa1\xcb\x17\xa2\x5e\x26\
\x4c\xd0\xe5\x6b\xea\x80\x2e\x5f\x08\x48\x21\xa4\x10\xa2\xe9\x52\
\x08\x5d\xbe\x2a\x8b\x33\xd2\x9a\x69\xbc\xea\x14\x7e\x7a\xd5\xbe\
\x89\xbb\x7c\x2b\x21\xba\x5f\x33\x8d\x17\x28\x64\x2a\x67\x65\x36\
\x84\x6a\x4f\xdd\x59\xd8\x90\x6b\x43\x54\x05\x5b\x8d\x36\xa1\xcf\
\xe5\x5b\x5b\x94\x5f\xf9\xca\x27\x62\xf9\x53\x49\x03\x4f\x2a\x9a\
\xb4\xcb\xb7\xb6\xfd\x03\x26\x93\xe3\x46\x31\x9c\x69\x42\x2e\x5f\
\xd9\xab\xed\x63\xba\x04\x7a\xbb\xf2\xf9\x7c\x8f\xc0\x6e\x63\xd6\
\x5c\x7c\xab\xe5\xe8\x4b\x4d\x8d\x57\xd7\x45\x00\xb0\xa2\x23\xfd\
\x3d\xf9\x7c\x3e\x3f\x6c\xd5\x8b\x8a\x5c\xe8\xf2\xad\x7f\x97\x2f\
\x5a\xfc\xec\x41\xba\xd3\xa2\x73\xf1\xf7\xe2\xfe\xd9\x30\x10\xfc\
\x39\x79\xd0\x8f\x71\xe5\x1a\x2e\x25\x0d\x8d\x57\xd7\x45\x00\x18\
\xcd\xbf\x3a\xf9\x32\x3d\x3d\x3d\x3d\xee\x07\xd7\xa7\x3f\x43\x97\
\xef\xa7\x72\xf9\xba\x7a\xb8\x3a\x5b\x02\xe7\x19\xbf\xae\x7f\x18\
\x36\x7d\xfb\x9d\x25\xd1\xbd\xab\xd4\x3d\xdd\xc6\x27\xd0\x06\x00\
\x00\x5a\x8e\x99\xd1\x75\xcb\xa4\x47\xd9\x52\xe0\x6e\xa1\xed\x22\
\x00\x00\x20\x74\x16\x9b\xcd\x66\x93\x15\x02\x5d\xbe\x7a\x50\xd7\
\x2e\x5f\x9a\x65\xab\x00\x07\x3c\xf7\x8d\x5a\x9b\xc7\xde\xc7\xac\
\x1f\xd7\xad\xad\x3b\x9f\xcf\xf7\x1b\x77\x51\x28\x97\x62\x84\x8e\
\x8b\x55\xc1\x0d\x5c\x4c\x33\x42\xfe\x40\xa3\x76\xf9\x4a\xf3\xd2\
\x4b\x10\x3b\x37\x9b\x2a\x74\x6b\xd5\x78\xb5\x0b\xbf\x74\x16\x87\
\x8e\x4a\x94\x47\x01\xd3\x2c\x69\x46\xc8\xdf\xc7\xc7\xbc\x31\xb9\
\x7c\xe5\xa9\x97\x2f\xdd\x7b\x9d\x99\x96\x70\x62\xd5\xb2\xcb\xa0\
\xc7\xc4\x28\x1b\x44\xaf\xc6\xab\x5d\xf8\x65\xf1\x02\xec\xcb\xe3\
\x4e\xc6\x24\xbf\xcb\x4c\x7a\x9c\x54\x90\xf7\xa4\x52\xef\xfd\x34\
\x7a\x21\x8f\xc7\xab\xc5\xdd\x8a\x92\x98\x79\x9d\xbd\x78\x3c\x1e\
\x8f\xe7\x1e\xd4\x67\xee\x9f\x29\xe2\x0f\x5a\xda\xdb\xdd\x3d\x78\
\xbc\x6e\xdb\xde\xc8\x3f\x86\xc4\x45\xa9\xa7\x16\x0f\x0c\x69\xc5\
\xe3\x39\xfb\x45\x46\xfb\xf0\xfc\x66\xc5\x8b\x08\x02\xcd\x3c\x3c\
\xa2\x6d\xd0\xc4\x7f\xf2\x31\x1d\x01\x2a\x21\xbc\x3d\xd9\xdb\x6d\
\xe4\x75\x01\x41\x10\x58\xf1\xc3\xbd\xb3\x7a\x87\x78\xf1\x78\x3c\
\x9e\xb3\x57\x60\xcf\xb9\x17\x0d\x73\x93\x0a\x6f\x4f\xf6\xe6\xb5\
\xee\xd6\x35\xd0\x95\xc7\x73\x0b\xfc\x7c\xda\xde\xc7\x65\x0a\x82\
\x20\x08\x42\x9c\xb4\x6b\x4c\x98\x57\xf8\xf7\x0f\xc5\x84\x34\xfd\
\xef\xf9\x7d\x02\xdd\x78\x3c\x1e\x8f\xc7\x6f\xd5\xae\xd7\xf2\x04\
\x11\xa1\xfd\x22\x81\xe6\x5f\x5f\x31\x20\xd0\x8d\xc7\xe3\x79\x77\
\x9e\x7b\xfa\xe4\x5c\xe8\xf2\xad\x7f\x97\xaf\xca\xd3\x00\x5d\xbe\
\x94\x01\x5d\xbe\x26\x0f\xa3\x72\xf9\x5a\x74\xdc\xfd\xfa\x6d\xe3\
\x51\x2a\x20\x20\x85\x10\x90\x42\x48\x61\x53\x01\x3c\xcb\xd7\xc4\
\x61\x4a\x2e\x5f\x53\x3b\x14\x5d\xfe\x6a\x7d\xe7\x1e\x67\xfa\x5f\
\xbd\xf9\x83\x1f\xdb\x58\xda\x2b\xe9\xf2\x5d\x74\xfc\xfe\x20\x37\
\x9a\x94\x69\x5f\xc5\xe5\xab\xa0\xea\xf2\x9d\xd3\x93\x4f\x8f\xab\
\xd9\xa4\x02\xba\xec\x6b\x0b\xe8\xf2\xd5\xdf\xa9\x41\x97\x2f\x74\
\xf9\x42\x97\x6f\x43\xbc\xf2\xa0\xcb\x17\xba\x7c\x01\x74\xf9\x36\
\x2c\xa0\xcb\xd7\x30\x97\xaf\x71\x02\xba\x7c\x0d\x70\xf9\x92\x53\
\x43\x63\x9d\x17\x42\x97\x2f\x74\xf9\x42\x97\x2f\x44\xbd\x4c\x98\
\xa0\xcb\xd7\xd4\x01\x5d\xbe\x10\x90\x42\x48\x21\x44\xd3\xa5\x10\
\xba\x7c\x55\x16\x67\xea\xc8\xe5\xfb\xe9\x2b\x01\xba\x7c\x3f\xa0\
\xae\x5c\xbe\x9f\xde\xa8\x0d\x5d\xbe\xb5\x82\xa6\xcb\xb7\x41\x24\
\x5f\xe8\xf2\xad\x45\xff\x00\x5d\xbe\xd0\xe5\xfb\x09\x59\x84\x2e\
\x5f\xe8\xf2\x85\x2e\xdf\x86\xe2\x0f\x40\x97\x2f\x74\xf9\x42\x97\
\x6f\x03\xf3\xf7\xf1\x31\x87\x2e\x5f\xca\x2e\xdf\x4f\xa9\x17\x42\
\x97\x6f\xbd\xb8\x7c\x3f\xcc\x37\xa0\xcb\xd7\x74\x5d\xbe\x8d\xf0\
\x83\x6e\xe8\xf2\x35\x79\x40\x97\x2f\x84\x09\x01\x52\x08\x29\x84\
\x80\x14\x1a\x29\xa0\xcb\xd7\xc4\x01\xcf\xf2\xad\x3f\xc0\xb3\x7c\
\x35\x27\x15\x26\x66\xd4\x36\x42\x34\xb8\xcb\xd7\xf8\x0f\x45\x87\
\x2e\x5f\xe8\xf2\x85\x2e\xdf\x06\x79\xe5\x41\x97\x2f\x74\xf9\x02\
\xe8\xf2\x6d\x58\x40\x97\x2f\x74\xf9\x02\xd0\xa4\x5c\xbe\xc6\x0d\
\xe8\xf2\x85\x2e\xdf\xc6\xee\xf2\x35\x7e\xaf\x7d\xe3\xc4\x07\x97\
\xef\x2e\x83\x5c\xbe\xbd\x76\xa4\x5b\xb6\xea\x35\xe3\xc0\xff\x34\
\x5c\xbe\x70\x1b\xb6\x4f\x0f\xe8\xf2\x85\x80\x14\x42\x0a\x21\x20\
\x85\x10\x90\x42\x53\x44\x55\x77\x6e\x9d\xdc\x2b\xcb\x38\xd5\xe8\
\x25\x5f\x42\xf4\xe6\xfc\x9a\xaf\x7b\x46\xf8\xb9\xf2\x9d\x5a\x07\
\x77\x1f\xf5\xd3\xc9\x57\xc2\x4a\x1d\x9d\x90\x64\x5c\x5e\x3f\xb1\
\x77\x14\xa9\xeb\x04\x44\x0f\xf9\xfe\x78\x06\x0a\x80\xf4\xe9\xf2\
\x60\xe7\xee\x7b\x32\x31\xb2\x96\xce\xae\x18\xd9\xd1\xcf\x95\xcf\
\xe7\xbb\xfb\x47\xf4\x9b\x7f\xfa\x5d\x8d\x78\xd0\x70\xe7\xd6\xc1\
\xbd\x8a\xf7\xe7\xe6\x2f\x6c\xe4\x92\xaf\xf8\xd9\xc6\x7e\xbd\xb7\
\x08\x3f\x9f\xbf\x78\xcf\x46\xdf\x66\x15\x29\xb1\x07\xd6\xcc\xfe\
\xec\x5a\xd2\xd9\x2b\xcb\x42\x2c\x10\x20\x79\xf9\xeb\x80\x2f\x36\
\x15\x76\x9e\xb5\x64\xc7\x9a\xb6\x3c\x66\x59\xe6\x93\x7b\xe9\x2e\
\x56\x74\x00\x54\xbe\x8a\x40\x53\x77\x4f\x9c\x7e\xc8\x72\xd6\xf6\
\xf3\x43\x7d\xcd\x2a\x72\x92\x9f\x16\x78\x37\xaf\xd1\x92\x96\x86\
\x3b\xb7\x0e\xee\x95\x65\xdd\x7d\x8d\x9b\xc0\xea\x0c\x5a\x9a\x74\
\x69\xdb\xc2\x69\xeb\x13\x2a\x08\x82\x90\xbd\x5a\x17\xc9\x8f\xa0\
\xb8\xa8\x21\x7b\xfd\x6b\x37\xbe\xc7\x88\xbf\x73\x2b\xd7\x32\xb0\
\xf7\xa7\x47\x7b\xf2\x22\xd7\x3c\x97\x12\xb2\xd7\x5b\xba\xf1\xdd\
\x06\x1d\xce\x42\x35\x6e\x94\x3c\xf9\xb1\xbd\x53\xb7\xdd\x19\x28\
\x41\x08\x63\xc7\x7b\xf0\xbe\x38\x9a\xa7\xd0\x99\xbd\xfc\xdb\x5b\
\x26\x76\x0d\x70\xe1\x39\xfb\x75\x1a\xfb\xbf\xdb\x85\x18\x9a\xf3\
\xf7\xe8\xd6\xad\x06\x1f\xcc\x40\x09\x02\xcb\xbf\x30\x29\xc0\x67\
\xd4\xf1\x77\x28\x51\x7a\xe1\x4b\xa5\x8f\x2d\x74\xe5\x73\xa9\xe6\
\x8d\x04\x21\x7d\xb9\x75\x54\xa7\x40\x6f\x17\x1e\x8f\xe7\xe4\x13\
\x39\xe4\xa7\x8b\x59\x64\x39\x35\xee\xad\xfc\xd4\xdf\x8b\xd7\x98\
\x25\x5f\xec\xdd\xb5\x33\xc9\x9c\xae\x53\x7a\x39\x56\xae\x65\xd0\
\x1d\x7a\x4c\xee\x66\x91\x79\xfe\xf2\x5b\x51\xf6\xd5\xd3\xc9\xac\
\x4e\xd3\x49\x33\xb5\x4e\x70\xfd\x06\x75\xb7\x7a\xba\x6c\xfc\x0f\
\x07\xe3\x32\x45\x9a\x2d\x41\x8b\xaa\x5c\xde\x72\xd0\xa6\xf5\x9d\
\x93\x56\xce\x39\xfc\x26\xfb\xe2\xe2\xef\xe3\x82\x57\x6f\x1a\xc4\
\x67\x90\xcb\xbd\x4a\x77\xae\x3f\xdd\x10\x39\x5a\xed\x5e\x55\xcd\
\x18\x61\x35\x66\xc9\x17\x13\xe4\x95\x03\x3b\x4f\xfb\xaa\xfa\x3b\
\xcb\xce\xb3\x39\x10\xbe\x17\xc8\x84\xef\xcb\x41\x0b\x4f\xfb\xca\
\x15\x2e\xac\xe0\xee\x5f\x07\xce\x25\x57\x54\x79\xa1\xd0\xed\xfb\
\x6e\xbb\x71\x7c\x51\xdb\xb7\x3b\x47\x47\xf9\x84\x0f\x5b\x7e\x32\
\x59\xe5\x55\xaa\x43\x55\xa6\x3b\xf4\x5d\xbb\xe9\xb3\x37\x3f\x0d\
\xec\x37\xe7\x7e\xd4\x86\x0d\x03\x5a\x7e\x7c\x48\x3e\xb8\x73\xd9\
\x2c\x06\x66\x88\x1c\xad\x76\xaf\xda\xe3\xdb\x98\x25\x5f\xa6\xbd\
\x4f\x4b\x70\x3c\xbd\x48\x0e\x80\xca\x4a\x24\x5a\x9c\x5e\x02\xec\
\xbc\xec\xcd\xed\x7d\x5a\x82\xe3\x59\xc5\x28\x00\x1f\x38\x46\xb3\
\xff\x59\xbb\xec\xc9\xbc\x1e\x7d\x7c\x9a\x57\x25\xdd\xb1\xe3\xa4\
\xf5\x1d\xbf\x59\xf5\xfe\xfe\xc1\x25\xdf\xce\x1e\x90\xc1\xb8\xb5\
\x6f\xa0\x03\xd9\xb0\xb5\xab\xca\x38\xb0\xb0\xeb\x38\xaa\xbb\xd5\
\xf9\x13\xa2\xce\x9f\x87\x34\xd7\xf6\x65\x9a\x41\x72\xb4\x49\x4e\
\x2a\xea\x44\xf2\xa5\xb7\x68\xdf\xc5\x4d\x1c\xbb\xf7\x5a\x7e\xe5\
\xf0\x04\x2f\xfc\xef\x8f\x1b\x42\xe7\x6e\x1d\x5a\xb2\x9b\x07\x75\
\x76\x15\xc7\xfe\xae\xfa\xbf\xd5\x01\x61\xb5\x8c\x18\xff\xd3\x2c\
\xbf\x8a\x3b\x17\x92\x95\x43\x43\xed\xaa\x32\x8d\x10\xdc\x5d\xbf\
\xe0\x5f\x97\x89\x93\xda\x3c\x5c\xba\xe0\x54\x0e\x06\x80\xba\x3b\
\xd7\x20\x39\x5a\xdd\xd9\x6b\x2a\xf3\xc2\xda\x4b\xbe\x80\xe5\x3b\
\x75\xe3\x38\xdb\x1b\xb3\x06\xcf\xde\x7b\x35\xf1\x4d\x56\xda\x93\
\x98\x7d\x73\x87\x4e\xbb\x62\x3d\x6a\xed\x74\x7f\x0e\x60\xfb\x4d\
\xdb\x38\xc6\xf6\xc6\x8c\x01\x53\xb7\x9d\x8f\x7f\x91\x96\xf9\x36\
\x25\x35\x5f\xaa\x51\x4b\x92\x57\x47\xb7\xee\xbf\x70\x3b\x31\x39\
\xfd\x6d\xf2\x83\x7f\xf6\x1e\x4b\x41\xdc\x82\x9c\x94\x5d\xb3\x56\
\x55\x99\x10\xde\x5f\x37\xe7\xa4\xe3\xf7\x3b\x7e\x5c\xba\x7d\x45\
\xe0\xfd\x65\x0b\xff\xc9\x53\xa8\xbb\x73\x45\x1e\x06\xc8\xd1\x6a\
\xf7\x96\xa8\x4d\x6a\x4c\x4c\x2f\x14\x27\xae\xec\xe6\xdb\x65\x79\
\x42\x05\xd5\x1b\xb0\xb2\xc7\xfb\xbf\xfb\xb2\x63\x80\x33\x8f\xc7\
\xe3\xb9\x47\x0e\x5b\xb8\x2f\xa1\x54\x65\x80\x5a\xf6\xe4\xf0\xc2\
\x61\x5d\xdb\x7b\xf1\x78\x3c\x9e\x93\x57\x60\xe7\xa1\x3f\xdd\x2c\
\x51\xa8\x8c\x48\xb1\xc2\x9b\x1b\x27\xf4\x0c\x6d\xed\xc4\xe3\xf1\
\x78\xad\xda\x75\x1f\xbf\xee\x52\x96\x4c\x6d\x44\x1a\xb7\x75\xd2\
\xe7\xc1\xde\x3c\x1e\x8f\xef\x19\x36\x7a\x5f\x9a\xe0\xc5\x86\xce\
\x9e\x3d\xb6\xa5\xc8\x08\x82\x20\xd0\x8c\x3f\xfa\xba\x07\x4c\xbf\
\x56\xac\xa8\xe2\xce\x9d\x1f\x5b\x8a\x6b\xdc\x28\xaf\x6a\x26\x15\
\xdd\x9d\xe5\xe7\x3a\xec\xaa\x80\x50\x73\xf6\xce\x8f\x2d\xc5\x55\
\xcd\xa7\x4d\x45\xf2\xc5\xcb\x6f\xcd\x0a\x70\x8e\x5e\x7a\x31\x4d\
\xa8\x68\x64\xe2\x31\x0d\x00\xf0\x89\xf7\x60\x6b\x98\x6e\xd9\xaa\
\xc3\xcf\xc7\xd6\x84\x3d\x59\xdc\xa9\xb5\x67\xf8\xb8\xc3\x99\x58\
\xe3\x29\x5a\xd3\x91\x7c\x69\x96\x01\xa3\x37\x9d\x1f\xb5\xb6\x2c\
\x3b\xad\xd0\x92\xcf\x68\x5c\x14\x36\x25\x20\x4c\x6b\x17\x1f\x6b\
\xa8\x54\x40\x40\x0a\x21\x20\x85\x0d\x87\xfa\x73\xff\x42\x0a\xeb\
\x16\x9f\x62\x63\x67\xc3\x92\x6e\xe8\xe1\x8c\xde\xdd\x63\x8d\x6d\
\x87\x60\xca\x27\xf1\x6a\xb4\x89\x5a\x6f\xec\x6c\x68\xd2\x9f\x88\
\xc2\x6a\x76\x72\x36\xd2\x1d\x9e\x1b\x70\x63\x67\x03\x93\xfe\x74\
\xdb\xab\x6b\xdd\x03\x98\x0a\x7f\x26\xbf\xb1\x33\x00\x8a\xd2\x47\
\xfb\xbf\xeb\x1f\xe9\xeb\xcc\xe7\xf3\xdd\xfd\xc2\xfa\x2c\xb8\x44\
\xca\x80\xda\xf3\xa0\x4c\x9a\x5a\x61\x3f\xa9\x51\x5b\x8d\x45\x7d\
\xfc\x35\x96\x8d\x9d\xb1\xcc\xc3\x13\x86\xfc\xf4\xc4\x67\xd6\xbe\
\x6b\x77\xef\xfd\xf7\xfb\x60\xf0\x3c\x21\x43\x44\x00\xa0\x2f\x0f\
\x14\x0b\xfb\xa9\xa7\xf6\xca\x1e\xb5\x7a\xfe\x1a\xd3\xc6\xce\xf2\
\x37\x27\xf6\x26\xb4\x9c\x7a\x65\xd5\x57\x7e\x6c\x00\x30\xac\xa5\
\x19\x59\x16\x34\xed\x44\xf5\x79\xa0\x53\x2b\x6c\x03\x8c\x48\xa9\
\x9c\x70\xd0\x98\x36\x76\x96\x17\xa6\x14\xd2\x9c\xda\x69\xa4\xa5\
\x37\x0f\x14\x0b\xdb\x30\x93\x0a\xbd\xef\xbf\xc6\xb4\xb1\x33\xd3\
\xd6\xcd\x16\xcf\xd7\x4c\x4b\x7f\x1e\xa8\x15\xd6\xc4\x24\x5f\x53\
\xdc\xd8\x99\xed\x35\x68\x84\x67\xfa\xb6\x1f\xb6\xc5\x24\x65\x66\
\x3c\xff\xef\xc2\xed\x3c\x72\x40\xc4\xd6\x97\x07\x8a\x85\x35\x2d\
\x97\x2f\x5e\xfa\xe2\xc8\xa2\x6f\x8f\xa5\x8a\x00\x60\x3b\x04\x7d\
\xb9\xe6\x97\x6e\xb6\x08\x00\x80\xe3\x3f\x6e\xa2\xef\x9e\x15\x8a\
\x39\x5f\x07\x7c\xb0\xcc\x72\xdb\xcc\x39\xf8\x9b\x68\xc1\xda\x71\
\x1d\xd6\xc8\xad\x5d\xdd\x70\x60\xd6\x86\x4b\x07\x00\x17\xbe\x7d\
\x7c\xef\xae\x79\x6f\x31\x1e\xa8\x3d\x80\xb6\xf4\xb5\xbb\x7f\x11\
\x96\x4d\x4b\x9e\x0d\xa5\x12\xb0\x7c\xa6\x1d\xfa\xbd\x62\xc1\xaa\
\x19\x3d\x37\x09\xcd\x9d\x5b\xdb\xca\x3e\x7c\xac\xc3\xad\x3e\x0f\
\xb8\x90\x62\x61\x1b\xa9\xe4\x6b\x4c\x1b\x3b\x57\x15\xf9\xd3\xb6\
\x75\x76\xe9\xb2\x3d\x1d\xad\xb3\x3c\x34\x52\xb1\xc9\x24\x36\x76\
\xae\xab\x3c\x34\xd2\xed\xd5\x9b\x10\xa0\x51\x1b\x8a\x4d\x10\x90\
\x42\x08\x48\xa1\x69\x01\x97\x95\xbc\xcb\xad\x30\xdc\xa5\xa6\x5b\
\x6a\x86\x14\xd6\x3f\x54\x0d\xba\x15\xf1\xb3\x22\xa3\x17\xdc\x17\
\xe9\x0a\x5b\x03\xa9\x19\x52\x58\xcf\x30\xd0\xdc\x5b\x83\x33\x84\
\x19\xb0\x92\xeb\x17\x86\x9a\x7b\x0d\x97\x9a\x1b\x79\x2b\xac\x17\
\xb9\x58\xe7\xd9\xbc\x14\x8e\xde\x95\x01\x40\xc8\x6f\x7f\x17\xe9\
\xae\xb1\xad\xf4\xc7\x9e\xd6\x60\xa9\xb9\x01\x0e\x45\xff\x34\x2b\
\x1f\xf5\x28\x17\xeb\x3c\x9b\x57\xff\xd1\xbb\xfe\x6c\x00\x10\x9a\
\xff\xb4\x03\xff\x3d\xbc\x77\x75\x4b\xdf\x72\x55\x1f\xaf\x1a\x28\
\x4b\xcd\x8c\xc6\x37\xaf\xaf\x6f\xb9\x18\x54\x00\xed\x67\xf3\x5a\
\xeb\x3f\x7a\x17\xc8\x01\x00\x0c\x5b\xaf\xd6\xae\x3c\x4b\xe0\x38\
\x75\x76\xf8\xde\x05\xb1\x69\xd2\x41\xf6\xe6\x1a\xa5\xa0\x2e\x35\
\x37\xc2\x8e\xb4\xbe\xe5\x62\xb5\x6e\xac\xf2\x6c\x5e\x0a\x47\xef\
\x56\x1d\xba\xb0\x6c\x1d\xb8\xba\x7c\xbc\xd4\xa5\xe6\x46\x48\x61\
\x7d\xcb\xc5\x6a\x50\x9e\xcd\x4b\xe9\xe8\xdd\xaa\x74\x54\xfa\x78\
\x35\x40\x5d\x6a\x6e\x9c\xc3\x99\x7a\x95\x8b\x09\x00\xb4\x9e\xcd\
\x4b\xe5\xe8\xdd\x12\xca\x27\x32\x51\x97\x9a\x1b\xf7\xa4\xa2\x7e\
\xe4\x62\x4f\x00\x00\x10\x24\xee\x9e\xda\x73\x6e\x01\x62\x1f\xd0\
\xfb\xc7\x63\x6b\x7a\xb5\xa0\x01\xa0\x75\x37\x66\x76\xeb\x6f\x56\
\x4f\x79\x30\x7f\x56\xf7\xbf\xe4\x16\x5e\x23\x77\x1f\xe9\x4f\x39\
\xef\x94\xa5\x66\xb8\xb1\xb3\xc1\x72\x71\x3d\x9f\xcd\x0b\x25\xdf\
\xda\xcf\x47\x4c\xec\x98\x5f\x48\xa1\xe6\xfa\x88\xbe\x63\x7e\xeb\
\xf9\x6c\xde\x9a\x50\x08\xb7\x57\x37\xed\x17\x3e\x80\xaa\xbd\x89\
\x03\x2a\x15\x90\x42\x08\x48\x21\x04\xa4\x10\x52\x08\x01\x29\x84\
\x68\xe0\x49\x05\x3c\x51\xdb\xe4\x29\x84\x93\x42\xd8\x91\x42\x40\
\x0a\x21\x20\x85\x90\x42\x08\x48\x21\x04\xa4\x10\x02\x52\xd8\xb4\
\xe7\x85\x00\x4a\xbe\x8d\x80\x42\x38\xbb\x87\x1d\x29\x04\xa4\x10\
\x02\x52\x08\x29\x84\x80\x14\x42\x40\x0a\x21\x20\x85\x4d\x78\x5e\
\x08\x55\x7b\x93\xa7\x10\xce\xeb\x61\x47\x0a\x01\x29\x84\x80\x14\
\x42\x0a\x21\x20\x85\x10\x90\x42\x08\x48\x61\xd3\x9e\x17\x02\xa8\
\xda\x37\x02\x0a\xe1\xec\x1e\x76\xa4\x10\x90\x42\x08\x48\x21\xa4\
\x10\x02\x52\x08\x01\x29\x84\x80\x14\x36\xe1\x79\x21\x54\xed\x4d\
\x9e\x42\x38\xaf\x87\x1d\x29\x04\xa4\x10\x02\x52\x08\x29\x84\x80\
\x14\x42\x40\x0a\x21\x20\x85\x4d\x7b\x5e\x08\xa0\x6a\xdf\x08\x28\
\x84\xb3\x7b\xd8\x91\x42\x40\x0a\x21\x20\x85\x90\x42\x08\x48\x21\
\x04\xa4\x10\xa2\xe6\x93\x0a\x28\xf9\x9a\x3c\x85\x70\x52\x68\xea\
\xf8\x3f\x9d\x51\x1f\xc2\xf7\x7f\x3e\xf7\x00\x00\x00\x00\x49\x45\
\x4e\x44\xae\x42\x60\x82\
"
qt_resource_name = b"\
\x00\x07\
\x07\x3b\xe0\xb3\
\x00\x70\
\x00\x6c\x00\x75\x00\x67\x00\x69\x00\x6e\x00\x73\
\x00\x09\
\x04\x72\x3e\xc7\
\x00\x73\
\x00\x69\x00\x6d\x00\x70\x00\x6c\x00\x65\x00\x73\x00\x76\x00\x67\
\x00\x08\
\x0a\x61\x5a\xa7\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x08\
\x0c\x33\x5a\x87\
\x00\x68\
\x00\x65\x00\x6c\x00\x70\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0c\
\x06\x6e\x04\x67\
\x00\x69\
\x00\x6e\x00\x6b\x00\x73\x00\x63\x00\x61\x00\x70\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct_v1 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x03\x00\x00\x00\x03\
\x00\x00\x00\x58\x00\x00\x00\x00\x00\x01\x00\x00\x08\xde\
\x00\x00\x00\x2c\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x00\x42\x00\x00\x00\x00\x00\x01\x00\x00\x02\xc4\
"
qt_resource_struct_v2 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x03\x00\x00\x00\x03\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x58\x00\x00\x00\x00\x00\x01\x00\x00\x08\xde\
\x00\x00\x01\x6d\xde\x2c\xda\x39\
\x00\x00\x00\x2c\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x6d\xde\x2c\xda\x39\
\x00\x00\x00\x42\x00\x00\x00\x00\x00\x01\x00\x00\x02\xc4\
\x00\x00\x01\x6d\xde\x2c\xda\x39\
"
qt_version = [int(v) for v in QtCore.qVersion().split('.')]
if qt_version < [5, 8, 0]:
rcc_version = 1
qt_resource_struct = qt_resource_struct_v1
else:
rcc_version = 2
qt_resource_struct = qt_resource_struct_v2
def qInitResources():
QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
|
990,590 | cde1fc4fa1c8a3fc028218f34737874d383d0b57 | # -*- encoding: utf-8 -*-
import collections
import datetime
import heapq
import re
def count_urls(urls):
top_five_urls = []
counter = collections.Counter(urls)
for url, count in counter.most_common(5):
top_five_urls.append(count)
return top_five_urls
def update_url_to_statsionary(d, url, time, ignore_www):
url = get_www_ignore(url, ignore_www)
d[url]['time'] += time
d[url]['amount'] += 1
return d
def get_www_ignore(url, ignore_www):
if ignore_www:
url = url.replace('www.', '')
return (url)
else:
return (url)
def get_n_max_time(url_to_stats, n=5):
def get_time(url): return url_to_stats[url]['time'] / url_to_stats[url]['amount']
return [int(get_time(url)) for url in heapq.nlargest(n, url_to_stats, key=get_time)]
def make_list_all_urls(ignore_www, url, urls):
url = get_www_ignore(url, ignore_www)
urls.append(url)
return urls
def get_result(url_to_stats, urls):
if len(url_to_stats) == 0:
result = count_urls(urls)
else:
result = get_n_max_time(url_to_stats)
return result
def check_parse_params(result, ignore_urls, start_at, stop_at, request_type, ignore_files):
bool_ignore_urls = check_ignore_urls(result, ignore_urls)
if not bool_ignore_urls:
return False
bool_date = check_date(result, start_at, stop_at)
if not bool_date:
return False
bool_request_type = check_request_type(result, request_type)
if not bool_request_type:
return False
bool_ignore_files = check_ignore_files(result, ignore_files)
if not bool_ignore_files:
return False
return True
def check_ignore_files(result, ignore_files):
if not ignore_files:
return True
else:
search_file = re.search('\.(html|htm|png|jpeg|css|gif|js)$', result.group('url'))
if search_file is not None:
return False
else:
return True
def check_request_type(result, request_type):
if request_type is None:
return True
else:
if result.group('type') == request_type:
return True
else:
return False
def check_ignore_urls(result, ignore_urls):
if len(ignore_urls) == 0:
return True
else:
if result.group('url') not in ignore_urls:
return True
else:
return False
def check_date(result, start_at, stop_at):
if start_at is None and stop_at is None:
return True
else:
d1 = start_at
d2 = stop_at
date_str = result.group('year') + result.group('month') + result.group('day')
d = datetime.datetime.strptime(date_str, '%Y%b%d')
if d1 is not None and d2 is not None:
if d1 <= d <= d2:
return True
else:
return False
elif d1 is None:
if d <= d2:
return True
else:
return False
elif d2 is None:
if d1 <= d:
return True
else:
return False
def parse(
ignore_files=True,
ignore_urls=[],
start_at=None,
stop_at=None,
request_type=None,
ignore_www=False,
slow_queries=False
):
regexp = ('^\[(?P<day>(0[1-9]|[12][0-9]|3[01]))'
'/(?P<month>(Jan|Feb|Mar|Apr|May|June|July|Aug|Sept|Oct|Nov|Dec))'
'/(?P<year>(19|20)\d\d)'
' (?P<h>(2[0-3]|[0-1]\d))'
':(?P<min>([0-5]\d))'
':(?P<sec>([0-5]\d))\]'
' "(?P<type>(GET|POST|PUT))'
' (http|ftp|https)://(?P<url>(([\w_-]+(?:(?:\.[\w_-]+)+))([\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])?))'
' (?P<protocol>((HTTP|HTTPS|FTP)/\d(.\d)?))'
'" (?P<code>(\d\d\d))'
' (?P<time>([\d]+))')
f = open('log.log')
url_to_stats = collections.defaultdict(lambda: collections.defaultdict(int))
urls = []
for line in f:
result = re.match(regexp, line)
if result is not None and check_parse_params(result, ignore_urls, start_at, stop_at, request_type,
ignore_files):
if slow_queries:
url_to_stats = update_url_to_statsionary(url_to_stats, result.group('url'), int(result.group('time')),
ignore_www)
else:
urls = make_list_all_urls(ignore_www, result.group('url'), urls)
result = get_result(url_to_stats, urls)
return result
|
990,591 | 75bc77c1a1331cbd52bb0efee5358146da9d80e8 | from pico2d import *
class Key:
def __init__(self):
self.image = load_image('sprite//key.PNG')
def update(self):
pass
def draw(self):
self.image.draw(1700, 50)
|
990,592 | 29c8f8e1d07fca6057bb6aa488bd6ddcafd186b5 | from django.urls import path, re_path, include
from .views import AbstractAPIview, AnnotatedAPIview, AbstractAPIDetail, AnnotatedAPIDetail
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsable API.
urlpatterns = [
path('abstracts/', AbstractAPIview.as_view(), name='abstract-api'),
path('abstracts/<int:pk>/', AbstractAPIDetail.as_view(), name='abstract-api-detail'),
path('annotateds/', AnnotatedAPIview.as_view(), name='anotated-api'),
path('annotateds/<int:pk>/', AnnotatedAPIDetail.as_view(), name='annotated-api-detail'),
] |
990,593 | b95b95d6c3c1e9cd6576eace276afa786889d8ea | from re import search
from tabulate import tabulate
from datetime import datetime
class QRelease:
"""
QRelease: the MTC release class
Args:
args(list): the command line arguments.
start(obj): a datetime object.
Attributes:
curRel(str): current release in YYYY.MM format.
curShort(str): current release in YYMM format.
curYear(str): current release year.
curMth(str): current release month.
prvRel(str): previous release in YYYY.MM format.
prvShort(str): previous release in YYMM format.
prvYear(str): previous release year.
prvMth(str): previous release month.
ppRel(str): two releases before in YYYY.MM format.
ppShort(str): two releases before in YYMM format.
ppYear(str): two releases before year.
ppMth(str): two releases before month.
nRel(str): next release in YYYY.MM format.
nShort(str): next release in YYMM format.
nYear(str): next release year.
nMth(str): next release month.
"""
def __init__(self, start=datetime.now(), args=None, monthly=False):
"""
__init___: instantiate the class and generate the attributes on the fly.
Args:
args(list): the command line arguments.
start(obj): a datetime object.
Returns:
None
"""
if args and search("\d{4}.\d{2}", args):
self.curYear, self.curMth = args.split(".")
else:
self.curYear = start.year
self.curMth = start.month
if monthly == True:
self.relPeriod = 1
else:
self.relPeriod = 3
self.curYear, self.curMth = self.getYearMonth(self.curYear, self.curMth)
self.curRel, self.curShort = self.getTuple(self.curYear, self.curMth)
# figure out the previous release
pYear = int(self.curYear)
# subtract the month by 3
pMth = "%02d" % (int(self.curMth) - (1 * self.relPeriod))
self.prvYear, self.prvMth = self.getYearMonth(pYear, pMth)
self.prvRel, self.prvShort = self.getTuple(self.prvYear, self.prvMth)
# two releases before
ppYear = int(self.curYear)
ppMth = "%02d" % (int(self.curMth) - (2 * self.relPeriod))
self.ppYear, self.ppMth = self.getYearMonth(ppYear, ppMth)
self.ppRel, self.ppShort = self.getTuple(self.ppYear, self.ppMth)
# next release
nYear = int(self.curYear)
nMth = "%02d" % (int(self.curMth) + (1 * self.relPeriod))
self.nYear, self.nMth = self.getYearMonth(nYear, nMth)
self.nRel, self.nShort = self.getTuple(self.nYear, self.nMth)
def getYearMonth(self, year, month):
"""
getYearMonth: takes the given year and month and returns the current release year and month.
Args:
year(str): the year of the requested release.
month(str): the month of the requested release.
Returns:
iYear(str): four-digit year.
iMnth(str): two-digit month.
"""
iYear = int(year)
iMth = int(month)
# this will give me the current release from current date
iMth = iMth - (iMth % self.relPeriod)
# if we get zero, then we're on the last release of last year.
if iMth <= 0:
iMth += 12
iYear -= 1
# if we get more than 12, then we're on the first release of next year.
if iMth > 12:
iMth -= 12
iYear += 1
relMth = "%02d" % (iMth)
return (iYear, relMth)
def getTuple(self, year, month):
"""
getTuple: takes the year and month and generate the YYYY.MM as well as the YYMM format for the release.
Args:
year(str): the year of the release.
month(str): the month of the release.
Returns:
rel(str): the release number in YYYY.MM format.
short(str): the release number in YYMM format.
"""
rel = "%s.%s" % (year, month)
short = "%s%s" % (str(year)[-2:], month)
return (rel, short)
def __repr__(self):
headers = ["Item", "Release", "Abbr", "Year", "Month"]
dispTable = []
dispTable.append(["Current", self.curRel, self.curShort, self.curYear, self.curMth])
dispTable.append(["Previous", self.prvRel, self.prvShort, self.prvYear, self.prvMth])
dispTable.append(["2 rels back", self.ppRel, self.ppShort, self.ppYear, self.ppMth])
dispTable.append(["Next", self.nRel, self.nShort, self.nYear, self.nMth])
return tabulate(dispTable, headers=headers, tablefmt="psql")
|
990,594 | 9f3f9f7a02c7774469de7027de1e42f08e97de87 | # Adnan Munawar
# Testing Robot IO Loading with varying ROS Communication Load
from dvrk import arm, psm, mtm, ecm
import rospy
from geometry_msgs.msg import PoseStamped
import time
from threading import Thread
from cisst_msgs.msg import mtsIntervalStatistics as StatsMsg
class stats(object):
def __init__(self):
rospy.init_node('dvrk_load_test')
self._rate = rospy.Rate(1000)
self._userDataScale = 10
self._active = True
self._stat_msg = StatsMsg
self._statsTopicPubStr = '/dvrk/rosBridge/period_statistics/user'
self._statsTopicSubStr = '/dvrk/rosBridge/period_statistics'
self._pub = rospy.Publisher(self._statsTopicPubStr, StatsMsg, queue_size=10)
self._sub = rospy.Subscriber(self._statsTopicSubStr, StatsMsg, self._ros_cb, queue_size=10, tcp_nodelay=True)
self._pubThread = Thread(target=self._run_pub)
self._pubThread.daemon = True
self._pubThread.start()
def set_user_data(self, n_arms):
self._stat_msg.UserData = n_arms * self._userDataScale
pass
def clear_user_data(self):
self._stat_msg.UserData = 0
pass
def disconnect(self):
self._active = False
def _ros_cb(self, data):
self._stat_msg = data
pass
def _run_pub(self):
while not rospy.is_shutdown() and self._active:
self._pub.publish(self._stat_msg)
self._rate.sleep()
class dvrk_latency_test(stats):
def __init__(self):
super(dvrk_latency_test, self).__init__()
self.psmInterface = psm
self.mtmInterface = mtm
self.ecmInterface = ecm
self.arm_dict = {'PSM1': self.psmInterface,
'PSM2': self.psmInterface,
'PSM3': self.psmInterface,
'MTMR': self.mtmInterface,
'MTML': self.mtmInterface,
'ECM' : self.ecmInterface}
self.activeArms = []
def create_arm_load(self, n_arms, delay = 0.0):
self._is_narm_valid(n_arms, self.arm_dict.__len__(), 1)
indx = 0
for armStr, armIrce in self.arm_dict.iteritems():
armIrce = armIrce(armStr)
self.activeArms.append(armIrce)
indx += 1
self.set_user_data(self.activeArms.__len__())
print 'Connecting ROS Client for {}'.format(armIrce.name())
time.sleep(delay)
if indx == n_arms:
break
def relieve_arm_load(self, n_arms=None, delay = 0.0):
n_active_arms = self.activeArms.__len__()
if n_arms is None:
n_arms = n_active_arms
self._is_narm_valid(n_arms, n_active_arms)
for i in range(n_arms):
armIrfc = self.activeArms.pop()
armIrfc.unregister()
self.set_user_data(self.activeArms.__len__())
print 'Disconnecting ROS Client for {}'.format(armIrfc.name())
time.sleep(delay)
def _is_narm_valid(self, n_arms, max_num=6, min_num=0):
if n_arms < min_num or n_arms > max_num:
raise ValueError('num_arms cannot be negative or greater than {}'.format(max_num))
|
990,595 | 91ce51f6e148a97357418ea9033c205cac68ffba | import json
import os
from deep_regression.predict import Predict
import pandas as pd
import numpy as np
from sklearn import preprocessing
from deep_regression.data_helper import TrainData
config_path = 'deep_regression/config.json'
with open(os.path.join(os.path.abspath(os.path.dirname(os.getcwd())), config_path), "r") as fr:
config = json.load(fr)
train_data = TrainData(config)
_,_,_,_,_,test_pre,_= train_data.data_pre()
# test_path = os.path.join(os.path.abspath(os.path.dirname(os.getcwd())), config['test_data'])
# test_df = pd.read_csv(test_path)
# id = test_pre["Id"].values
#
# test_pre = test_pre.drop(['Id'], axis=1)
# test_df = pd.get_dummies(test_df)
# test_df.replace(to_replace=np.nan, value=0, inplace=True)
# test_df = test_df.as_matrix().astype(np.float)
input_size = test_pre.shape[1]
# standard_scaler = preprocessing.StandardScaler()
# test_df = standard_scaler.fit_transform(test_df)
predictor = Predict(config, input_size)
result = predictor.predict(np.array(test_pre))
result = [(np.exp(res)-1)[0] for res in result]
print('predict results...')
print(result)
# submission = pd.DataFrame(data=None, columns=['Id', 'SalePrice'])
# submission['Id'] = id
# submission['SalePrice'] = result
# submission.to_csv('submission2.csv', index=0) |
990,596 | c190071016ddcb5de9ef0d9ae26441281706d14f | # template for "Stopwatch: The Game"
# define global variables
import simplegui
import random
interval = 100
time = 0
position = [140,120]
x = 0
y = 0
# define helper function format that converts time
# in tenths of seconds into formatted string A:BC.D
def format(t):
minu = int(t // 600)
if minu > 9:
return timer.stop()
sec = (t % 600) // 10
decsec = t % 10
if sec < 10:
formatted = str(minu) +':0' + str(sec) +'.'+ str(decsec)
else:
formatted = str(minu) + ':' + str(sec) + '.' + str(decsec)
return formatted
# define event handlers for buttons; "Start", "Stop", "Reset"
def timer_start():
global time
timer.start()
def timer_stop():
global time, x, y
if timer.is_running():
timer.stop()
if time % 10 == 0:
x += 1
y += 1
def timer_reset():
global time, x ,y
time = 0
x = 0
y = 0
timer.stop()
# define event handler for timer with 0.1 sec interval
def tick():
global time
if timer.is_running():
time += 1
# define draw handler
def draw(canvas):
canvas.draw_text(format(time), position, 54, "Blue")
canvas.draw_text(str(x) + '/' + str(y), [320, 30], 34, "Red")
# create frame
f = simplegui.create_frame("StopWatch", 400, 200)
timer = simplegui.create_timer(interval, tick)
# register event handlers
f.add_button("Start",timer_start, 100)
f.add_button("Stop", timer_stop, 100)
f.add_button("Reset", timer_reset, 100)
f.set_draw_handler(draw)
# start frame
f.start()
# Please remember to review the grading rubric
|
990,597 | 3f7a992d446519fa959988d129e56c1b282360ad | from django.db import models
from django.contrib.auth.models import User
from django import forms
from django.forms import ModelForm, ModelChoiceField
from sorl.thumbnail import ImageField
from django_summernote.widgets import SummernoteWidget
## MODELS ##
class Profile(models.Model):
user = models.ForeignKey(User)
companyName = models.CharField(max_length=100)
description = models.TextField() #using django-summernote
website = models.CharField(max_length=100)
contactName = models.CharField(max_length=50)
contactEmail = models.CharField(max_length=50)
logo = ImageField(upload_to='media/logos/', blank=True)
def __unicode__(self):
return self.companyName
class Subscribe(models.Model):
name = models.CharField(max_length=50)
email = models.CharField(max_length=50)
def __unicode__(self):
return self.name
class Meta:
verbose_name_plural = 'Subscribers'
class JobType(models.Model):
title = models.CharField(max_length=30)
def __unicode__(self):
return self.title
class Post(models.Model):
profile = models.ForeignKey(Profile)
title = models.CharField(max_length=100)
description = models.TextField() #using django-summernote
jobType = models.ForeignKey(JobType)
wage = models.CharField(max_length=20, blank=True, null=True)
publishDate = models.DateTimeField(auto_now_add=True)
expirationDate = models.DateTimeField()
active = models.BooleanField(default=True)
views = models.IntegerField(default=0)
def __unicode__(self):
return self.title
## FORMS ##
class CreateProfileForm(ModelForm):
description = forms.CharField(widget=SummernoteWidget()) #replaced the TextArea widget
class Meta:
model = Profile
fields = ['logo','companyName','description','website','contactName','contactEmail',]
def __init__(self, *args, **kwargs):
super(CreateProfileForm, self).__init__(*args, **kwargs)
self.fields['description'].label = "Company Description"
self.fields['companyName'].label = "Company Name"
self.fields['contactName'].label = "Contact Name"
self.fields['contactEmail'].label = "Contact Email"
class PostForm(ModelForm):
description = forms.CharField(widget=SummernoteWidget()) #replaced the TextArea widget
class Meta:
model = Post
fields = ['title','description','jobType','wage','expirationDate', 'active',]
def __init__(self, *args, **kwargs):
super(PostForm, self).__init__(*args, **kwargs)
self.fields['title'].label = "Job Title"
self.fields['description'].label = "Job Description"
self.fields['jobType'].label = "Job Type"
self.fields['expirationDate'].label = "Expiration Date"
self.fields['active'].label = "Currently Active?"
class SubscribeForm(ModelForm):
class Meta:
model = Subscribe
class UnsubscribeForm(ModelForm):
class Meta:
model = Subscribe
fields = ['email',]
|
990,598 | 8ec09ed647c8aa36b0146796fbe17dc5888630dc | from __future__ import division
import os
import csv
import glob
import math
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
import matplotlib.pyplot as plt
# ---- PARAMETERS TO ADJUST ----
projectname = 'RoundSiO2FineTM'
detectornumber = '2' # Component number for rsoft monitor
filename = 'RoundSiO2FineTMtest.plx'
## Get all files in the output directory
filenames_to_glob = projectname + '_work/raw/' + projectname + '_*_m' + detectornumber + '_f*_absorption.dat' #FW output
#filenames_to_glob = strcat(projectname,'_work/raw/',projectname,'_*_m',detectornumber,'_absorption.dat') #DM output
filelist = glob.glob(filenames_to_glob)
numfiles = len(filelist)
sortedfiles = [0]*numfiles
## Make a sorted list to work on, rsoft unfortunately uses 0, 1, 2,... 10,
## 11, .. which messes up the order. This fixes it.
for i in xrange(numfiles):
nameparts = filelist[i].split('_')
fileidx = nameparts[2] # 1=projectname, 2='work/raw/projectname', 3=fileidx, 4=mdetectoridx, 5=freqidx, 6='absorption.dat'
idxnum = int(fileidx)
sortedfiles[idxnum] = filelist[i]
## Collect information on the simulation, like dimensions, mesh grid, etc.
idata = pd.read_csv(sortedfiles[1],delim_whitespace=True,header=None,skiprows=4) # RSoft writes 4 header lines
simsize = idata.shape
xinfo = pd.read_csv(sortedfiles[1],delim_whitespace=True,header=None,skiprows=2,nrows=1)
xmin = xinfo.iloc[0,1]
xmax = xinfo.iloc[0,2]
xs = np.linspace(xmin,xmax,simsize[0])
yinfo = pd.read_csv(sortedfiles[1],delim_whitespace=True,header=None,skiprows=3,nrows=1)
ymin = yinfo.iloc[0,1]
ymax = yinfo.iloc[0,2]
ys = np.linspace(ymin,ymax,simsize[1])
#alldata = np.zeros((simsize[0], simsize[1], numfiles))
integrateddata = np.zeros((numfiles,simsize[1]))
wavelengths = np.zeros(numfiles)
## Collect all data
for i in xrange(numfiles):
data = pd.read_csv(sortedfiles[i],delim_whitespace=True,header=None,skiprows=4) # RSoft writes 4 header lines
textdata = xinfo = pd.read_csv(sortedfiles[i],delim_whitespace=True,header=None,skiprows=2,nrows=1)
wavelengthstr = textdata.loc[0,5].split('=') # "wavelength = ..."
wavelength = wavelengthstr[1]
wavelengths[i] = wavelength
#wavelengths(i) = 0.3 + (i-1)*.85/99 # for RCWA output only
#alldata[:,:,i] = data
integrateddata[i,:] = np.sum(data,0)/simsize[0] #Get averaged absorption across X direction
print i+1
xax = ys-ys[0]
## Setup the spectrum
am15gspectrum = pd.read_csv('ASTMG173.csv',sep=',',header=1)
am15gfunc = interp1d(am15gspectrum.iloc[:,0]/1000, am15gspectrum.iloc[:,2])
am15g = am15gfunc(wavelengths)
#am0spectrum = pd.read_csv('ASTMG173.csv',sep=',',header=1)
#%am0func = interp1d(am0spectrum.iloc[:,0]/1000, am0spectrum.iloc[:,1])
#%am0 = am0func(wavelengths)
## Integrate data against spectrum
h=6.626e-34 # Js Planck's constant
c=2.998e8 #m/s speed of light
deltaWL = np.mean(np.diff(wavelengths)) # average wavelength step
gax = integrateddata*(np.tile((wavelengths*am15g),(xax.size,1)).transpose())/(h*c)
yax = np.sum(gax,axis=0)*deltaWL/1000
fdtd=np.column_stack((xax,yax))
## Plot generation profile
plt.figure()
plt.semilogy(xax, yax)
plt.show()
## Write the result to Sentaurus PLX format
outdata = pd.DataFrame(data=fdtd) # can't include header as it contains the delimiter (space)
outheader = '\n'.join(
[unicode(line, 'utf8') for line in
['# from Sentaurus', 'Theta = 0 [deg] Intensity = 1.0 [W*cm^-2]\n']
]
)
with open(filename, 'w') as ict:
for line in outheader:
ict.write(line)
outdata.to_csv(ict,sep=' ',float_format='%6.4e',index=False,header=None) # turn off data indexing and header, header written above directly |
990,599 | 2f0a6ad0359e5c37562eec28e5b799e7882d20a5 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import warnings
from debtcollector import removals
from keystoneauth1 import plugin
from keystoneclient import _discover
from keystoneclient import exceptions
from keystoneclient.i18n import _
from keystoneclient import session as client_session
from keystoneclient.v2_0 import client as v2_client
from keystoneclient.v3 import client as v3_client
_CLIENT_VERSIONS = {2: v2_client.Client,
3: v3_client.Client}
# functions needed from the private file that can be made public
def normalize_version_number(version):
"""Turn a version representation into a tuple.
Takes a string, tuple or float which represent version formats we can
handle and converts them into a (major, minor) version tuple that we can
actually use for discovery.
e.g. 'v3.3' gives (3, 3)
3.1 gives (3, 1)
:param version: Inputted version number to try and convert.
:returns: A usable version tuple
:rtype: tuple
:raises TypeError: if the inputted version cannot be converted to tuple.
"""
return _discover.normalize_version_number(version)
def version_match(required, candidate):
"""Test that an available version satisfies the required version.
To be suitable a version must be of the same major version as required
and be at least a match in minor/patch level.
eg. 3.3 is a match for a required 3.1 but 4.1 is not.
:param tuple required: the version that must be met.
:param tuple candidate: the version to test against required.
:returns: True if candidate is suitable False otherwise.
:rtype: bool
"""
return _discover.version_match(required, candidate)
def available_versions(url, session=None, **kwargs):
"""Retrieve raw version data from a url."""
if not session:
session = client_session.Session._construct(kwargs)
return _discover.get_version_data(session, url)
class Discover(_discover.Discover):
"""A means to discover and create clients.
Clients are created depending on the supported API versions on the server.
Querying the server is done on object creation and every subsequent method
operates upon the data that was retrieved.
The connection parameters associated with this method are the same format
and name as those used by a client (see
:py:class:`keystoneclient.v2_0.client.Client` and
:py:class:`keystoneclient.v3.client.Client`). If not overridden in
subsequent methods they will also be what is passed to the constructed
client.
In the event that auth_url and endpoint is provided then auth_url will be
used in accordance with how the client operates.
.. warning::
Creating an instance of this class without using the session argument
is deprecated as of the 1.7.0 release and may be removed in the 2.0.0
release.
:param session: A session object that will be used for communication.
Clients will also be constructed with this session.
:type session: keystoneclient.session.Session
:param string auth_url: Identity service endpoint for authorization.
(optional)
:param string endpoint: A user-supplied endpoint URL for the identity
service. (optional)
:param string original_ip: The original IP of the requesting user which
will be sent to identity service in a
'Forwarded' header. (optional) This is ignored
if a session is provided. Deprecated as of the
1.7.0 release and may be removed in the 2.0.0
release.
:param boolean debug: Enables debug logging of all request and responses to
the identity service. default False (optional)
This is ignored if a session is provided. Deprecated
as of the 1.7.0 release and may be removed in the
2.0.0 release.
:param string cacert: Path to the Privacy Enhanced Mail (PEM) file which
contains the trusted authority X.509 certificates
needed to established SSL connection with the
identity service. (optional) This is ignored if a
session is provided. Deprecated as of the 1.7.0
release and may be removed in the 2.0.0 release.
:param string key: Path to the Privacy Enhanced Mail (PEM) file which
contains the unencrypted client private key needed to
established two-way SSL connection with the identity
service. (optional) This is ignored if a session is
provided. Deprecated as of the 1.7.0 release and may be
removed in the 2.0.0 release.
:param string cert: Path to the Privacy Enhanced Mail (PEM) file which
contains the corresponding X.509 client certificate
needed to established two-way SSL connection with the
identity service. (optional) This is ignored if a
session is provided. Deprecated as of the 1.7.0 release
and may be removed in the 2.0.0 release.
:param boolean insecure: Does not perform X.509 certificate validation when
establishing SSL connection with identity service.
default: False (optional) This is ignored if a
session is provided. Deprecated as of the 1.7.0
release and may be removed in the 2.0.0 release.
:param bool authenticated: Should a token be used to perform the initial
discovery operations. default: None (attach a
token if an auth plugin is available).
"""
def __init__(self, session=None, authenticated=None, **kwargs):
if not session:
warnings.warn(
'Constructing a Discover instance without using a session is '
'deprecated as of the 1.7.0 release and may be removed in the '
'2.0.0 release.', DeprecationWarning)
session = client_session.Session._construct(kwargs)
kwargs['session'] = session
url = None
endpoint = kwargs.pop('endpoint', None)
auth_url = kwargs.pop('auth_url', None)
if endpoint:
self._use_endpoint = True
url = endpoint
elif auth_url:
self._use_endpoint = False
url = auth_url
elif session.auth:
self._use_endpoint = False
url = session.get_endpoint(interface=plugin.AUTH_INTERFACE)
if not url:
raise exceptions.DiscoveryFailure(
_('Not enough information to determine URL. Provide'
' either a Session, or auth_url or endpoint'))
self._client_kwargs = kwargs
super(Discover, self).__init__(session, url,
authenticated=authenticated)
@removals.remove(message='Use raw_version_data instead.', version='1.7.0',
removal_version='2.0.0')
def available_versions(self, **kwargs):
"""Return a list of identity APIs available on the server.
The list returned includes the data associated with them.
.. warning::
This method is deprecated as of the 1.7.0 release in favor of
:meth:`raw_version_data` and may be removed in the 2.0.0 release.
:param bool unstable: Accept endpoints not marked 'stable'. (optional)
Equates to setting allow_experimental
and allow_unknown to True.
:param bool allow_experimental: Allow experimental version endpoints.
:param bool allow_deprecated: Allow deprecated version endpoints.
:param bool allow_unknown: Allow endpoints with an unrecognised status.
:returns: A List of dictionaries as presented by the server. Each dict
will contain the version and the URL to use for the version.
It is a direct representation of the layout presented by the
identity API.
"""
return self.raw_version_data(**kwargs)
@removals.removed_kwarg(
'unstable',
message='Use allow_experimental and allow_unknown instead.',
version='1.7.0', removal_version='2.0.0')
def raw_version_data(self, unstable=False, **kwargs):
"""Get raw version information from URL.
Raw data indicates that only minimal validation processing is performed
on the data, so what is returned here will be the data in the same
format it was received from the endpoint.
:param bool unstable: equates to setting allow_experimental and
allow_unknown. This argument is deprecated as of
the 1.7.0 release and may be removed in the 2.0.0
release.
:param bool allow_experimental: Allow experimental version endpoints.
:param bool allow_deprecated: Allow deprecated version endpoints.
:param bool allow_unknown: Allow endpoints with an unrecognised status.
:returns: The endpoints returned from the server that match the
criteria.
:rtype: List
Example::
>>> from keystoneclient import discover
>>> disc = discover.Discovery(auth_url='http://localhost:5000')
>>> disc.raw_version_data()
[{'id': 'v3.0',
'links': [{'href': 'http://127.0.0.1:5000/v3/',
'rel': 'self'}],
'media-types': [
{'base': 'application/json',
'type': 'application/vnd.openstack.identity-v3+json'},
{'base': 'application/xml',
'type': 'application/vnd.openstack.identity-v3+xml'}],
'status': 'stable',
'updated': '2013-03-06T00:00:00Z'},
{'id': 'v2.0',
'links': [{'href': 'http://127.0.0.1:5000/v2.0/',
'rel': 'self'},
{'href': '...',
'rel': 'describedby',
'type': 'application/pdf'}],
'media-types': [
{'base': 'application/json',
'type': 'application/vnd.openstack.identity-v2.0+json'},
{'base': 'application/xml',
'type': 'application/vnd.openstack.identity-v2.0+xml'}],
'status': 'stable',
'updated': '2013-03-06T00:00:00Z'}]
"""
if unstable:
kwargs.setdefault('allow_experimental', True)
kwargs.setdefault('allow_unknown', True)
return super(Discover, self).raw_version_data(**kwargs)
def _calculate_version(self, version, unstable):
version_data = None
if version:
version_data = self.data_for(version)
else:
# if no version specified pick the latest one
all_versions = self.version_data(unstable=unstable)
if all_versions:
version_data = all_versions[-1]
if not version_data:
msg = _('Could not find a suitable endpoint')
if version:
msg = _('Could not find a suitable endpoint for client '
'version: %s') % str(version)
raise exceptions.VersionNotAvailable(msg)
return version_data
def _create_client(self, version_data, **kwargs):
# Get the client for the version requested that was returned
try:
client_class = _CLIENT_VERSIONS[version_data['version'][0]]
except KeyError:
version = '.'.join(str(v) for v in version_data['version'])
msg = _('No client available for version: %s') % version
raise exceptions.DiscoveryFailure(msg)
# kwargs should take priority over stored kwargs.
for k, v in self._client_kwargs.items():
kwargs.setdefault(k, v)
# restore the url to either auth_url or endpoint depending on what
# was initially given
if self._use_endpoint:
kwargs['auth_url'] = None
kwargs['endpoint'] = version_data['url']
else:
kwargs['auth_url'] = version_data['url']
kwargs['endpoint'] = None
return client_class(**kwargs)
def create_client(self, version=None, unstable=False, **kwargs):
"""Factory function to create a new identity service client.
:param tuple version: The required version of the identity API. If
specified the client will be selected such that
the major version is equivalent and an endpoint
provides at least the specified minor version.
For example to specify the 3.1 API use (3, 1).
(optional)
:param bool unstable: Accept endpoints not marked 'stable'. (optional)
:param kwargs: Additional arguments will override those provided to
this object's constructor.
:returns: An instantiated identity client object.
:raises keystoneclient.exceptions.DiscoveryFailure: if the server
response is invalid
:raises keystoneclient.exceptions.VersionNotAvailable: if a suitable
client cannot be found.
"""
version_data = self._calculate_version(version, unstable)
return self._create_client(version_data, **kwargs)
def add_catalog_discover_hack(service_type, old, new):
"""Add a version removal rule for a particular service.
Originally deployments of OpenStack would contain a versioned endpoint in
the catalog for different services. E.g. an identity service might look
like ``http://localhost:5000/v2.0``. This is a problem when we want to use
a different version like v3.0 as there is no way to tell where it is
located. We cannot simply change all service catalogs either so there must
be a way to handle the older style of catalog.
This function adds a rule for a given service type that if part of the URL
matches a given regular expression in *old* then it will be replaced with
the *new* value. This will replace all instances of old with new. It should
therefore contain a regex anchor.
For example the included rule states::
add_catalog_version_hack('identity', re.compile('/v2.0/?$'), '/')
so if the catalog retrieves an *identity* URL that ends with /v2.0 or
/v2.0/ then it should replace it simply with / to fix the user's catalog.
:param str service_type: The service type as defined in the catalog that
the rule will apply to.
:param re.RegexObject old: The regular expression to search for and replace
if found.
:param str new: The new string to replace the pattern with.
"""
_discover._VERSION_HACKS.add_discover_hack(service_type, old, new)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.