id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
338168 | <gh_stars>0
# Copyright 2021 RangiLyu.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import cv2
import numpy as np
def random_brightness(img, delta):
img += random.uniform(-delta, delta)
return img
def random_contrast(img, alpha_low, alpha_up):
img *= random.uniform(alpha_low, alpha_up)
return img
def random_saturation(img, alpha_low, alpha_up):
hsv_img = cv2.cvtColor(img.astype(np.float32), cv2.COLOR_BGR2HSV)
hsv_img[..., 1] *= random.uniform(alpha_low, alpha_up)
img = cv2.cvtColor(hsv_img, cv2.COLOR_HSV2BGR)
return img
def normalize(meta, mean, std):
img = meta["img"].astype(np.float32)
mean = np.array(mean, dtype=np.float64).reshape(1, -1)
stdinv = 1 / np.array(std, dtype=np.float64).reshape(1, -1)
cv2.subtract(img, mean, img)
cv2.multiply(img, stdinv, img)
meta["img"] = img
return meta
def _normalize(img, mean, std):
mean = np.array(mean, dtype=np.float32).reshape(1, 1, 3) / 255
std = np.array(std, dtype=np.float32).reshape(1, 1, 3) / 255
img = (img - mean) / std
return img
def random_GasussNoise(img, std):
"""
std: [0, std]
"""
std = random.uniform(0, std)
noise = np.random.normal(scale=std, size=img.shape)
img += noise
img = img.clip(0, 1)
return img
def random_AverageBlur(img, k):
"""
k: kerner size (2, 2) ~ (k, k)
"""
k = random.randint(2, k)
kernel = np.ones((k, k), np.float32) / (k * k)
img = cv2.filter2D(img, -1, kernel)
return img
def color_aug_and_norm(meta, kwargs):
img = meta["img"].astype(np.float32) / 255
if "brightness" in kwargs and random.randint(0, 1):
img = random_brightness(img, kwargs["brightness"])
if "contrast" in kwargs and random.randint(0, 1):
img = random_contrast(img, *kwargs["contrast"])
if "saturation" in kwargs and random.randint(0, 1):
img = random_saturation(img, *kwargs["saturation"])
if 'GasussNoise' in kwargs and random.randint(0, 1):
img = random_GasussNoise(img, kwargs['GasussNoise'])
if 'AverageBlur' in kwargs and random.randint(0, 1):
img = random_AverageBlur(img, kwargs['AverageBlur'])
# cv2.imshow('trans', img)
# cv2.waitKey(0)
img = _normalize(img, *kwargs["normalize"])
meta["img"] = img
return meta
| StarcoderdataPython |
5107176 | <reponame>bavard-ai/bavard-ml-utils
import time
import typing as t
from abc import ABC, abstractmethod
from fastapi import HTTPException, status
from loguru import logger
from bavard_ml_utils.persistence.record_store.base import BaseRecordStore, Record
from bavard_ml_utils.types.utils import hash_model
class ServiceVersionMetadata(Record):
name: str
synced_at: float
"""Time the service version was most recently synced."""
def get_id(self) -> str:
return self.name
def get_sort_key(self):
return self.synced_at
class BaseDatasetRecord(Record):
"""
Base class for a generic dataset object. Supports versioning of the dataset (via its :attr:`digest` attribute). Used
to create :class:`BaseArtifactRecord` objects from. Override with additional attributes to store the actual dataset
data in this object.
"""
agent_id: str
updated_at: float
digest: t.Optional[str]
"""An automatically generated hash of this record."""
def __init__(self, **data):
"""Custom constructor. Includes a post-init step to update the record's digest."""
super().__init__(**data)
self.digest = self.compute_digest()
def compute_digest(self):
"""
Creates a deterministic hash of the dataset. This is needed so we know which dataset version(s) a service
already has artifacts computed for. Without a hash, we couldn't easily know which version of a dataset an
artifact was computed for.
"""
return hash_model(self, exclude={"updated_at", "digest"})
def get_id(self) -> str:
return self.agent_id
def get_sort_key(self):
return self.updated_at
class BaseArtifactRecord(Record):
"""
Base class for an artifact produced by a versioned dataset and a versioned model. Override with additional
attributes to store the actual artifact data in this object.
"""
agent_id: str
dataset_digest: str
"""The hash of the dataset that was used to produce this artifact."""
service_version: str
"""The version of the model that produced this artifact."""
updated_at: float
def get_id(self) -> str:
return self.make_id(self.service_version, self.agent_id)
def get_sort_key(self):
return None
@staticmethod
def make_id(service_version: str, agent_id: str):
"""
We have an artifact's database ID be the concatenation of the service version that created it, and the
agent that the artifact is for. We do this because for a given service version, we only need to persist one
artifact version per agent at a time.
"""
return f"{service_version}-{agent_id}"
class BaseArtifactManager(ABC):
"""
Abstract base class which provides a simple API for creating, persisting, and managing artifacts produced by a
versioned machine learning (ML) model. Each artifact is associated with an agent. An artifact is the deterministic
product of a specific ML model version, and a specific version of a dataset.
"""
def __init__(
self,
artifacts: BaseRecordStore[BaseArtifactRecord],
datasets: BaseRecordStore[BaseDatasetRecord],
versions: BaseRecordStore[ServiceVersionMetadata],
version: str,
*,
max_service_versions=5,
):
self.version = version
self._max_service_versions = max_service_versions
self._artifacts = artifacts
self._datasets = datasets
self._versions = versions
@abstractmethod
def create_artifact_from_dataset(self, dataset: BaseDatasetRecord) -> BaseArtifactRecord:
"""
Implementing subclasses should take `dataset`, and the model associated with version `self.version`, and
producee an artifact.
"""
pass
def delete_artifact(self, agent_id: str):
"""
Deletes from the database a dataset and all artifacts associated with it, if they exist. Returns the number of
total database records that were deleted.
"""
num_deleted = int(self._datasets.delete(agent_id))
num_deleted += self._artifacts.delete_all(agent_id=agent_id) # get rid of any old versions as well
return num_deleted
def save_artifact(self, artifact: BaseArtifactRecord, dataset: BaseDatasetRecord):
"""
Serializes and saves the artifact. Also saves the dataset that was used to create the artifact, so this
manager can recreate the artifact at any time if needed.
"""
self._artifacts.save(artifact)
self._datasets.save(dataset)
def load_artifact(self, agent_id: str) -> BaseArtifactRecord:
"""
Load an agent's artifact from the database. If the agent's dataset exists but its artifact doesn't, then compute
it, save it (so its available next time), and then return it.
"""
dataset = self._datasets.get(agent_id)
if dataset is None:
raise HTTPException(
status.HTTP_404_NOT_FOUND,
f"no dataset exists for agent id {agent_id}; artifact cannot be retrieved or computed",
)
artifact = self._artifacts.get(BaseArtifactRecord.make_id(self.version, agent_id))
if artifact is not None and artifact.dataset_digest == dataset.digest:
return artifact
else:
logger.info(
f"artifact for agent {agent_id} does not exist for service version {self.version} and dataset "
f"digest {dataset.digest}; creating it now"
)
# Artifact has not yet been created for this dataset version and service version.
artifact = self.create_artifact_from_dataset(dataset)
self.save_artifact(artifact, dataset)
return artifact
def sync(self) -> int:
"""
Ensures this service version has its own artifact for all currently saved datasets. Returns the number of
artifacts that had to be created for that to happen.
"""
digest2agent = {dataset.digest: dataset.agent_id for dataset in self._datasets.get_all()}
all_dataset_digests = set(digest2agent.keys())
datasets_currently_indexed = {
artifact.dataset_digest for artifact in self._artifacts.get_all(service_version=self.version)
}
datasets_to_index = all_dataset_digests - datasets_currently_indexed
logger.info(
f"creating artifact for {len(datasets_to_index)}/{len(all_dataset_digests)} "
f"existing datasets for service version {self.version}"
)
for digest in datasets_to_index:
dataset = self._datasets.get(digest2agent[digest])
if dataset is None:
raise AssertionError(
f"Expected dataset to exist for agent {digest2agent[digest]}. It existed just a little bit ago."
)
logger.info(
f"creating artifact for dataset digest={dataset.digest} associated with agent {digest2agent[digest]}"
)
artifact = self.create_artifact_from_dataset(dataset)
self.save_artifact(artifact, dataset)
self._versions.save(ServiceVersionMetadata(name=self.version, synced_at=time.time()))
logger.info("service version sync utility finished successfully.")
self._remove_old_service_versions()
return len(datasets_to_index)
def _remove_old_service_versions(self):
"""
Removes all artifacts for any old service versions. We only keep data for the `self._max_service_versions` most
recent service versions. The old versions are the ones that have been synced least recently.
"""
versions = list(self._versions.get_all())
n_to_remove = len(versions) - self._max_service_versions
if n_to_remove > 0:
logger.info(f"removing data for {n_to_remove} old service versions")
# Remove the oldest versions.
versions_to_remove = sorted(versions, key=lambda v: v.synced_at)[:n_to_remove]
for version in versions_to_remove:
self._artifacts.delete_all(service_version=version.name)
self._versions.delete(version.get_id())
| StarcoderdataPython |
8190821 | import enum
from sims4.tuning.dynamic_enum import DynamicEnum
class BouncerRequestStatus(enum.Int, export=False):
INITIALIZED = 0
SUBMITTED = 1
SIM_FILTER_SERVICE = 2
SPAWN_REQUESTED = 3
FULFILLED = 4
DESTROYED = 5
class BouncerRequestPriority(enum.Int):
GAME_BREAKER = 0
EVENT_VIP = 1
EVENT_HOSTING = 2
VENUE_REQUIRED = 3
EVENT_AUTO_FILL = 4
BACKGROUND_HIGH = 5
BACKGROUND_MEDIUM = 6
BACKGROUND_LOW = 7
EVENT_DEFAULT_JOB = 8
LEAVE = 9
class RequestSpawningOption(enum.Int):
MUST_SPAWN = 1
CANNOT_SPAWN = 2
DONT_CARE = 3
class BouncerExclusivityCategory(enum.IntFlags):
LEAVE = 2
NORMAL = 4
WALKBY = 8
SERVICE = 16
VISIT = 32
LEAVE_NOW = 64
UNGREETED = 128
PRE_VISIT = 256
WORKER = 512
NEUTRAL = 1024
VENUE_EMPLOYEE = 2048
VENUE_BACKGROUND = 4096
CLUB_GATHERING = 8192
FESTIVAL_BACKGROUND = 16384
FESTIVAL_GOER = 32768
WALKBY_SNATCHER = 65536
CAREGIVER = 131072
FIRE = 262144
NON_WALKBY_BACKGROUND = 524288
VENUE_GOER = 1048576
SQUAD = 2097152
INFECTED = 4194304
NEUTRAL_UNPOSSESSABLE = 8388608
NORMAL_UNPOSSESSABLE = 16777216
ROOMMATE = 33554432
FIRE_BRIGADE = 67108864
class BouncerExclusivityOption(enum.Int):
NONE = 0
EXPECTATION_PREFERENCE = 1
ERROR = 2
ALREADY_ASSIGNED = 3
| StarcoderdataPython |
157046 | <reponame>samlowe106/Saved-Sorter-For-Reddit<gh_stars>1-10
import unittest
""" Unless otherwise stated, none of the URLs used for testing should be dead. """
class TestURLS(unittest.TestCase):
"""Verifies that the URLs module works as intended"""
def test_determine_name(self):
"""Verifies that determine_name works as intended"""
def test_download(self):
"""Verifies that determine_name works as intended"""
def test_get_extension(self):
"""Verifies that determine_name works as intended"""
"""
def test_get_extension(self):
url_extension = [
("https://cdnb.artstation.com/p/assets/images/images/026/326/667/large/eddie-mendoza-last-train.jpg?1588487140", ".jpeg"),
("https://cdnb.artstation.com/p/assets/images/images/026/292/247/large/jessie-lam-acv-ladyeivortweaks.jpg?1588382308", ".jpeg"),
("https://i.imgur.com/l8EbNfy.jpg", ".jpeg"),
("https://i.imgur.com/oPXMrnr.png", ".png"),
("https://i.imgur.com/GeZmbJA.png", ".png")
]
for url, extension in url_extension:
r = requests.get(url)
if r.status_code == 200:
self.assertEqual(extension, urls.get_extension(r))
@sandbox(output_dir := "test_dir")
def test_download_image(self):
# Download files as .jpgs
urls.download_image("https://i.redd.it/qqgds2i3ueh31.jpg", "a", output_dir, png=False)
self.assertTrue(os.path.isfile(os.path.join(output_dir, "a.jpeg")))
urls.download_image("https://i.redd.it/mfqm1x49akgy.jpg", "b", output_dir, png=False)
self.assertTrue(os.path.isfile(os.path.join(output_dir, "b.jpeg")))
urls.download_image("https://i.redd.it/jh0gfb3ktvkz.jpg", "c", output_dir, png=False)
self.assertTrue(os.path.isfile(os.path.join(output_dir, "c.jpeg")))
urls.download_image("https://i.imgur.com/ppqan5G.jpg", "d", output_dir, png=False)
self.assertTrue(os.path.isfile(os.path.join(output_dir, "d.jpeg")))
urls.download_image("https://i.imgur.com/CS8QhJG.jpg", "e", output_dir, png=False)
self.assertTrue(os.path.isfile(os.path.join(output_dir, "e.jpeg")))
urls.download_image("https://i.imgur.com/B6HPXkk.jpg", "f", output_dir, png=False)
self.assertTrue(os.path.isfile(os.path.join(output_dir, "f.jpeg")))
# Download files as .pngs
urls.download_image("https://i.redd.it/qqgds2i3ueh31.jpg", "a", output_dir, png=True)
self.assertTrue(os.path.isfile(os.path.join(output_dir, "a.png")))
urls.download_image("https://i.redd.it/mfqm1x49akgy.jpg", "b", output_dir, png=True)
self.assertTrue(os.path.isfile(os.path.join(output_dir, "b.png")))
urls.download_image("https://i.redd.it/jh0gfb3ktvkz.jpg", "c", output_dir, png=True)
self.assertTrue(os.path.isfile(os.path.join(output_dir, "c.png")))
urls.download_image("https://i.imgur.com/ppqan5G.jpg", "d", output_dir, png=True)
self.assertTrue(os.path.isfile(os.path.join(output_dir, "d.png")))
urls.download_image("https://i.imgur.com/CS8QhJG.jpg", "e", output_dir, png=True)
self.assertTrue(os.path.isfile(os.path.join(output_dir, "e.png")))
urls.download_image("https://i.imgur.com/B6HPXkk.jpg", "f", output_dir, png=True)
self.assertTrue(os.path.isfile(os.path.join(output_dir, "f.png")))
""" | StarcoderdataPython |
3349844 | <reponame>antoine-spahr/MNIST-classification-LeNet5<filename>src/dataset/MNISTDataset.py<gh_stars>0
import torchvision
import torchvision.transforms as tf
import torch.utils.data
import PIL.Image
class MNISTDataset(torch.utils.data.Dataset):
"""
Define a MNIST dataset (and variant) that return the data, targt and index.
"""
def __init__(self, dataset_name, data_path, train=True, data_augmentation=True):
"""
Build a MNIST dataset.
----------
INPUT
|---- dataset_name (str) the dataset to load : only MNIST,
| FashionMNIST, KMNIST or QMNIST are supported.
|---- data_path (str) where to find the data.
|---- train (bool) whether the train set is used.
|---- data_augmentation (bool) whether data_augmentation is performed
| (random rotation, scale, translation and brighness changes)
"""
if data_augmentation:
transform = tf.Compose([tf.RandomAffine(15, translate=(0.05, 0.05),
scale=(0.8,1.2),
resample=PIL.Image.BILINEAR),
tf.ColorJitter(brightness=(0.8,1.2)),
tf.ToTensor()])
else:
transform = tf.ToTensor()
if dataset_name == 'MNIST':
self.dataset = torchvision.datasets.MNIST(data_path, train=train,
download=True, transform=transform)
elif dataset_name == 'FashionMNIST':
self.dataset = torchvision.datasets.FashionMNIST(data_path, train=train,
download=True, transform=transform)
elif dataset_name == 'KMNIST':
self.dataset = torchvision.datasets.KMNIST(data_path, train=train,
download=True, transform=transform)
elif dataset_name == 'QMNIST':
self.dataset = torchvision.datasets.QMNIST(data_path, train=train,
download=True, transform=transform)
else:
raise ValueError('Non-supported dataset name')
def __len__(self):
"""
Return the len of the dataset.
"""
return len(self.dataset)
def __getitem__(self, index):
"""
Redefine getitem function to recover indices.
"""
data, target = self.dataset[index]
return data, target, index
| StarcoderdataPython |
381205 | <reponame>vabkar8/questionpapergen<filename>app.py
import sentry_sdk
from sentry_sdk.integrations.flask import FlaskIntegration
from flaskapp import create_app
from flaskapp.config import DevelopmentConfig
sentry_sdk.init(
dsn=
"https://1fdf413ccfcc4a249f79519bfc269965@o374456.ingest.sentry.io/5192531",
integrations=[FlaskIntegration()],
)
app = create_app(config_class=DevelopmentConfig)
@app.after_request
def add_header(response):
"""
Add headers to both force latest IE rendering engine or Chrome Frame,
and also to cache the rendered page for 10 minutes.
"""
response.headers["X-UA-Compatible"] = "IE=Edge,chrome=1"
response.headers["Cache-Control"] = "must-revalidate, max-age=0"
return response
if __name__ == "__main__":
app.run()
| StarcoderdataPython |
8111848 | <filename>third_party/zhon/tests/test-pinyin.py
"""Tests for the zhon.pinyin module."""
import random
import re
import unittest
from zhon import pinyin
NUM_WORDS = 50 # Number of random words to test
WORD_LENGTH = 4 # Length of random words (number of syllables)
NUM_SENT = 10 # Number of random sentences to test
SENT_LENGTH = 5 # Length of random sentences (number of words)
VALID_SYLS = ( # 411 total syllables, including 'r'
'ba', 'pa', 'ma', 'fa', 'da', 'ta', 'na', 'la', 'ga', 'ka', 'ha', 'za',
'ca', 'sa', 'zha', 'cha', 'sha', 'a', 'bo', 'po', 'mo', 'fo', 'yo', 'lo',
'o', 'me', 'de', 'te', 'ne', 'le', 'ge', 'ke', 'he', 'ze', 'ce', 'se',
'zhe', 'che', 'she', 're', 'e', 'bai', 'pai', 'mai', 'dai', 'tai',
'nai', 'lai', 'gai', 'kai', 'hai', 'zai', 'cai', 'sai', 'zhai', 'chai',
'shai', 'ai', 'bei', 'pei', 'mei', 'fei', 'dei', 'tei', 'nei', 'lei',
'gei', 'kei', 'hei', 'zei', 'zhei', 'shei', 'ei', 'bao', 'pao', 'mao',
'dao', 'tao', 'nao', 'lao', 'gao', 'kao', 'hao', 'zao', 'cao', 'sao',
'zhao', 'chao', 'shao', 'rao', 'ao', 'pou', 'mou', 'fou', 'dou', 'tou',
'nou', 'lou', 'gou', 'kou', 'hou', 'zou', 'cou', 'sou', 'zhou', 'chou',
'shou', 'rou', 'ou', 'ban', 'pan', 'man', 'fan', 'dan', 'tan', 'nan',
'lan', 'gan', 'kan', 'han', 'zan', 'can', 'san', 'zhan', 'chan',
'shan', 'ran', 'an', 'bang', 'pang', 'mang', 'fang', 'dang', 'tang',
'nang', 'lang', 'gang', 'kang', 'hang', 'zang', 'cang', 'sang',
'zhang', 'chang', 'shang', 'rang', 'ang', 'ben', 'pen', 'men', 'fen',
'den', 'nen', 'gen', 'ken', 'hen', 'zen', 'cen', 'sen', 'zhen', 'chen',
'shen', 'ren', 'en', 'beng', 'peng', 'meng', 'feng', 'deng', 'teng',
'neng', 'leng', 'geng', 'keng', 'heng', 'zeng', 'ceng', 'seng',
'zheng', 'cheng', 'sheng', 'reng', 'eng', 'dong', 'tong', 'nong',
'long', 'gong', 'kong', 'hong', 'zong', 'cong', 'song', 'zhong',
'chong', 'rong', 'bu', 'pu', 'mu', 'fu', 'du', 'tu', 'nu', 'lu',
'gu', 'ku', 'hu', 'zu', 'cu', 'su', 'zhu', 'chu', 'shu', 'ru', 'wu',
'gua', 'kua', 'hua', 'zhua', 'chua', 'shua', 'rua', 'wa', 'duo', 'tuo',
'nuo', 'luo', 'guo', 'kuo', 'huo', 'zuo', 'cuo', 'suo', 'zhuo', 'chuo',
'shuo', 'ruo', 'wo', 'guai', 'kuai', 'huai', 'zhuai', 'chuai', 'shuai',
'wai', 'dui', 'tui', 'gui', 'kui', 'hui', 'zui', 'cui', 'sui', 'zhui',
'chui', 'shui', 'rui', 'wei', 'duan', 'tuan', 'nuan', 'luan', 'guan',
'kuan', 'huan', 'zuan', 'cuan', 'suan', 'zhuan', 'chuan', 'shuan',
'ruan', 'wan', 'guang', 'kuang', 'huang', 'zhuang', 'chuang', 'shuang',
'wang', 'dun', 'tun', 'nun', 'lun', 'gun', 'kun', 'hun', 'zun', 'cun',
'sun', 'zhun', 'chun', 'shun', 'run', 'wen', 'weng', 'bi', 'pi', 'mi',
'di', 'ti', 'ni', 'li', 'zi', 'ci', 'si', 'zhi', 'chi', 'shi', 'ri',
'ji', 'qi', 'xi', 'yi', 'dia', 'lia', 'jia', 'qia', 'xia', 'ya', 'bie',
'pie', 'mie', 'die', 'tie', 'nie', 'lie', 'jie', 'qie', 'xie', 'ye',
'biao', 'piao', 'miao', 'diao', 'tiao', 'niao', 'liao', 'jiao', 'qiao',
'xiao', 'yao', 'miu', 'diu', 'niu', 'liu', 'jiu', 'qiu', 'xiu', 'you',
'bian', 'pian', 'mian', 'dian', 'tian', 'nian', 'lian', 'jian', 'qian',
'xian', 'yan', 'niang', 'liang', 'jiang', 'qiang', 'xiang', 'yang',
'bin', 'pin', 'min', 'nin', 'lin', 'jin', 'qin', 'xin', 'yin', 'bing',
'ping', 'ming', 'ding', 'ting', 'ning', 'ling', 'jing', 'qing', 'xing',
'ying', 'jiong', 'qiong', 'xiong', 'yong', 'nü', 'lü', 'ju', 'qu',
'xu', 'yu', 'nüe', 'lüe', 'jue', 'que', 'xue', 'yue', 'juan', 'quan',
'xuan', 'yuan', 'jun', 'qun', 'xun', 'yun', 'er', 'r'
)
SYL = re.compile(pinyin.syllable)
A_SYL = re.compile(pinyin.a_syl)
N_SYL = re.compile(pinyin.n_syl)
WORD = re.compile(pinyin.word)
N_WORD = re.compile(pinyin.n_word)
A_WORD = re.compile(pinyin.a_word)
SENT = re.compile(pinyin.sentence)
N_SENT = re.compile(pinyin.n_sent)
A_SENT = re.compile(pinyin.a_sent)
VOWELS = 'aeiou\u00FC'
VOWEL_MAP = {
'a1': '\u0101', 'a2': '\xe1', 'a3': '\u01ce', 'a4': '\xe0', 'a5': 'a',
'e1': '\u0113', 'e2': '\xe9', 'e3': '\u011b', 'e4': '\xe8', 'e5': 'e',
'i1': '\u012b', 'i2': '\xed', 'i3': '\u01d0', 'i4': '\xec', 'i5': 'i',
'o1': '\u014d', 'o2': '\xf3', 'o3': '\u01d2', 'o4': '\xf2', 'o5': 'o',
'u1': '\u016b', 'u2': '\xfa', 'u3': '\u01d4', 'u4': '\xf9', 'u5': 'u',
'\u00fc1': '\u01d6', '\u00fc2': '\u01d8', '\u00fc3': '\u01da',
'\u00fc4': '\u01dc', '\u00fc5': '\u00fc'
}
def _num_vowel_to_acc(vowel, tone):
"""Convert a numbered vowel to an accented vowel."""
try:
return VOWEL_MAP[vowel + str(tone)]
except IndexError:
raise ValueError("Vowel must be one of '{}' and tone must be an int"
"1-5.".format(VOWELS))
def num_syl_to_acc(syllable):
"""Convert a numbered pinyin syllable to an accented pinyin syllable.
Implements the following algorithm:
1. If the syllable has an 'a' or 'e', put the tone over that vowel.
2. If the syllable has 'ou', place the tone over the 'o'.
3. Otherwise, put the tone on the last vowel.
"""
if syllable.startswith('r') and len(syllable) <= 2:
return 'r' # Special case for 'r' syllable.
if re.search('[{}]'.format(VOWELS), syllable) is None:
return syllable
syl, tone = syllable[:-1], syllable[-1]
if tone not in '12345':
# We did not find a tone number. Abort conversion.
return syl
syl = re.sub('u:|v', '\u00fc', syl)
if 'a' in syl:
return syl.replace('a', _num_vowel_to_acc('a', tone))
elif 'e' in syl:
return syl.replace('e', _num_vowel_to_acc('e', tone))
elif 'ou' in syl:
return syl.replace('o', _num_vowel_to_acc('o', tone))
last_vowel = syl[max(map(syl.rfind, VOWELS))] # Find last vowel index.
return syl.replace(last_vowel, _num_vowel_to_acc(last_vowel, tone))
class TestPinyinSyllables(unittest.TestCase):
maxDiff = None
def test_number_syllables(self):
vs = list(VALID_SYLS)
_vs = []
for n in range(0, len(vs)):
vs[n] = vs[n] + str(random.randint(1, 5))
_vs.append(vs[n])
if _vs[n][0] in 'aeo':
_vs[n] = "'{}".format(_vs[n])
s = ''.join(_vs)
self.assertEqual(SYL.findall(s), vs)
self.assertEqual(N_SYL.findall(s), vs)
def test_accent_syllables(self):
vs = list(VALID_SYLS)
_vs = []
for n in range(0, len(vs)):
syl = vs[n]
vs[n] = num_syl_to_acc(vs[n] + str(random.randint(1, 5)))
_vs.append(vs[n])
if syl[0] in 'aeo':
_vs[n] = "'{}".format(_vs[n])
s = ''.join(_vs)
self.assertEqual(SYL.findall(s), vs)
self.assertEqual(A_SYL.findall(s), vs)
def create_word(accented=False):
if accented:
tone = lambda: str(random.randint(1, 5))
vs = [num_syl_to_acc(s + tone()) for s in VALID_SYLS]
else:
vs = [s + str(random.randint(1, 5)) for s in VALID_SYLS]
word = vs[random.randint(0, len(vs) - 1)]
for n in range(1, WORD_LENGTH):
num = random.randint(0, len(vs) - 1)
word += ['-', ''][random.randint(0, 1)]
if VALID_SYLS[num][0] in 'aeo' and word[-1] != '-':
word += "'"
word += vs[num]
return word
class TestPinyinWords(unittest.TestCase):
def test_number_words(self):
for n in range(0, NUM_WORDS):
word = create_word()
self.assertEqual(WORD.match(word).group(0), word)
self.assertEqual(N_WORD.match(word).group(0), word)
def test_accent_words(self):
for n in range(0, NUM_WORDS):
word = create_word(accented=True)
self.assertEqual(WORD.match(word).group(0), word)
self.assertEqual(A_WORD.match(word).group(0), word)
def create_sentence(accented=False):
_sent = []
for n in range(0, SENT_LENGTH):
_sent.append(create_word(accented=accented))
sentence = [_sent.pop(0)]
sentence.extend([random.choice([' ', ', ', '; ']) + w for w in _sent])
return ''.join(sentence) + '.'
class TestPinyinSentences(unittest.TestCase):
def test_number_sentences(self):
for n in range(0, NUM_SENT):
sentence = create_sentence()
self.assertEqual(SENT.match(sentence).group(0), sentence)
self.assertEqual(N_SENT.match(sentence).group(0), sentence)
def test_accent_sentences(self):
for n in range(0, NUM_SENT):
sentence = create_sentence(accented=True)
self.assertEqual(SENT.match(sentence).group(0), sentence)
self.assertEqual(A_SENT.match(sentence).group(0), sentence)
| StarcoderdataPython |
4965585 | # apis_v1/views/views_measure.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from config.base import get_environment_variable
from django.http import HttpResponse
import json
from measure.controllers import add_measure_name_alternatives_to_measure_list_light, measure_retrieve_for_api, \
retrieve_measure_list_for_all_upcoming_elections
import wevote_functions.admin
logger = wevote_functions.admin.get_logger(__name__)
WE_VOTE_SERVER_ROOT_URL = get_environment_variable("WE_VOTE_SERVER_ROOT_URL")
def measure_list_for_upcoming_elections_retrieve_api_view(request): # measureListForUpcomingElectionsRetrieve
"""
Ask for all measures for the elections in google_civic_election_id_list
:param request:
:return:
"""
status = ""
google_civic_election_id_list = request.GET.getlist('google_civic_election_id_list[]')
state_code = request.GET.get('state_code', '')
# We will need all candidates for all upcoming elections so we can search the HTML of
# the possible voter guide for these names
measure_list_light = []
results = retrieve_measure_list_for_all_upcoming_elections(google_civic_election_id_list,
limit_to_this_state_code=state_code)
if results['measure_list_found']:
measure_list_light = results['measure_list_light']
expand_results = add_measure_name_alternatives_to_measure_list_light(measure_list_light)
if expand_results['success']:
measure_list_light = expand_results['measure_list_light']
google_civic_election_id_list = results['google_civic_election_id_list']
status += results['status']
success = results['success']
json_data = {
'status': status,
'success': success,
'google_civic_election_id_list': google_civic_election_id_list,
'measure_list': measure_list_light,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
def measure_retrieve_view(request): # measureRetrieve
measure_id = request.GET.get('measure_id', 0)
measure_we_vote_id = request.GET.get('measure_we_vote_id', None)
return measure_retrieve_for_api(measure_id, measure_we_vote_id)
| StarcoderdataPython |
363327 | # Django
from django.urls import path
# Views
from .views import login_view, signup_view, logout_view
app_name = 'auth'
urlpatterns = [
path('', login_view, name='login'),
path('logout', logout_view, name='logout'),
path('register', signup_view, name='register')
]
| StarcoderdataPython |
9795291 | from .is_iterable_of import is_iterable_of
from .is_list import is_list
class is_list_of(is_iterable_of):
"""
Generates a predicate that checks that the data is a list where every
element of the data is valid according to the given predicate.
"""
prerequisites = [is_list]
| StarcoderdataPython |
11342705 | <filename>crypt.py
#!/usr/bin/env python
#coding=utf-8
import sys
import os
import json
import shutil
import subprocess
import argparse
############################################################
#http://www.coolcode.org/archives/?article-307.html
############################################################
import struct
_DELTA = 0x9E3779B9
def _long2str(v, w):
n = (len(v) - 1) << 2
if w:
m = v[-1]
if (m < n - 3) or (m > n): return ''
n = m
s = struct.pack('<%iL' % len(v), *v)
return s[0:n] if w else s
def _str2long(s, w):
n = len(s)
m = (4 - (n & 3) & 3) + n
s = s.ljust(m, "\0")
v = list(struct.unpack('<%iL' % (m >> 2), s))
if w: v.append(n)
return v
def xxtea_encrypt(str, key):
if str == '': return str
v = _str2long(str, True)
k = _str2long(key.ljust(16, "\0"), False)
n = len(v) - 1
z = v[n]
y = v[0]
sum = 0
q = 6 + 52 // (n + 1)
while q > 0:
sum = (sum + _DELTA) & 0xffffffff
e = sum >> 2 & 3
for p in xrange(n):
y = v[p + 1]
v[p] = (v[p] + ((z >> 5 ^ y << 2) + (y >> 3 ^ z << 4) ^ (sum ^ y) + (k[p & 3 ^ e] ^ z))) & 0xffffffff
z = v[p]
y = v[0]
v[n] = (v[n] + ((z >> 5 ^ y << 2) + (y >> 3 ^ z << 4) ^ (sum ^ y) + (k[n & 3 ^ e] ^ z))) & 0xffffffff
z = v[n]
q -= 1
return _long2str(v, False)
def xxtea_decrypt(str, key):
if str == '': return str
v = _str2long(str, False)
k = _str2long(key.ljust(16, "\0"), False)
n = len(v) - 1
z = v[n]
y = v[0]
q = 6 + 52 // (n + 1)
sum = (q * _DELTA) & 0xffffffff
while (sum != 0):
e = sum >> 2 & 3
for p in xrange(n, 0, -1):
z = v[p - 1]
v[p] = (v[p] - ((z >> 5 ^ y << 2) + (y >> 3 ^ z << 4) ^ (sum ^ y) + (k[p & 3 ^ e] ^ z))) & 0xffffffff
y = v[p]
z = v[n]
v[0] = (v[0] - ((z >> 5 ^ y << 2) + (y >> 3 ^ z << 4) ^ (sum ^ y) + (k[0 & 3 ^ e] ^ z))) & 0xffffffff
y = v[0]
sum = (sum - _DELTA) & 0xffffffff
return _long2str(v, True)
def fread(file):
with open(file, 'rb') as f:
return f.read()
def encrypt_file(file, key, sign):
data = fread(file)
if not data.startswith(sign):
codeded = xxtea_encrypt(data, key)
with open(file, 'wb') as f:
f.write(sign)
f.write(codeded)
def decrypt_file(file, key, sign):
data = fread(file)
if data.startswith(sign):
decoded = xxtea_decrypt(data[len(sign):], key)
with open(file, 'wb') as f:
f.write(decoded)
def scan(*dirs, **kwargs):
files = []
extensions = kwargs['extensions'] if kwargs.has_key('extensions') else None
excludes = kwargs['excludes'] if kwargs.has_key('excludes') else []
for top in dirs:
for root, dirnames, filenames in os.walk(top):
dirnames = [i for i in dirnames if i in excludes]
for f in filenames:
if f in excludes:
continue
ext = os.path.splitext(f)[1].lower()
if extensions is None or ext in extensions:
files.append(os.path.join(root, f))
return files
def prepare():
conf = json.loads(fread('.cocos-project.json'))
key = str(conf['luaEncryptKey'])
sign = str(conf['luaEncryptSign'])
#extensions = ['.ExportJson', '.plist', '.json', '.animation', '.fnt', '.md', '.xml', '.tmx', '.png', '.lua']
extensions = ['.json', '.png', '.lua']
sources = scan('src', 'res', extensions=extensions, excludes=['Backup', 'README.md'])
return key, sign, sources
def encrypt(key, sign, sources):
print('encrypt %d files...' % len(sources))
for f in sources:
encrypt_file(f, key, sign)
print('OK')
def decrypt(key, sign, sources):
print('decrypt %d files...' % len(sources))
for f in sources:
decrypt_file(f, key, sign)
print('OK')
def main():
parser = argparse.ArgumentParser(description='The cocos resource encrypt/decrypt script.')
parser.add_argument('-e','--encrypt', action='store_true', help='encoding resources')
parser.add_argument('-d','--decrypt', action='store_true', help='encoding resources')
args = parser.parse_args()
if args.encrypt == args.decrypt:
parser.print_help()
return
if args.encrypt:
encrypt(*prepare())
else:
decrypt(*prepare())
main()
| StarcoderdataPython |
11347153 | from unittest import TestCase
from app.core.result import Result
class TestResult(TestCase):
def test___unsuccessful_by_default(self):
result = Result()
self.assertEqual(False, result.was_success)
self.assertEqual(False, result.was_error)
def test_set_success(self):
result = Result()
result.set_success()
self.assertEqual(True, result.was_success)
self.assertEqual(False, result.was_error)
def test_add_payload(self):
result = Result()
result.add_payload('test')
self.assertEqual(False, result.was_success)
self.assertEqual(False, result.was_error)
self.assertEqual('test', result.payload)
def test_error(self):
result = Result()
error_message = 'there was an error'
result.error(error_message)
self.assertEqual(False, result.was_success)
self.assertEqual(True, result.was_error)
self.assertEqual(error_message, result.error_message)
| StarcoderdataPython |
153900 | from haystack.query import SearchQuerySet
from search.services.suggest import SuggestBase
class SuggestInvestigator(SuggestBase):
@classmethod
def _query(cls, term):
sqs = SearchQuerySet()
raw_results = sqs.filter(investigator_name=term).order_by('-investigator_complaint_count')[:5]
results = [
cls.entry_format(
suggest_value='{name} ({count})'.format(
name=entry.investigator_name, count=entry.investigator_complaint_count),
tag_value=cls.build_tag_value(
category='allegation__investigator',
value=entry.investigator_id,
display_category='Investigator',
display_value=entry.investigator_name,
)
) for entry in raw_results
]
return {'Investigator': results}
| StarcoderdataPython |
6577115 | from django.db import models
from django.contrib import admin
from django.contrib.admin.views.main import ChangeList
from django.utils.translation import gettext as _, get_language
from martor.widgets import AdminMartorWidget
from .models import Post, Category, Tag
class CustomPostChangeList(ChangeList):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if get_language() == 'ru':
self.title = 'Выберите статью для изменения'
class CustomCategoryChangeList(ChangeList):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if get_language() == 'ru':
self.title = 'Выберите категорию для изменения'
def get_title_changeform_view(url: str, model_accusative_case: str) -> str:
if url.endswith('/add/'):
return f'Добавить {model_accusative_case}'
else:
return f'Изменить {model_accusative_case}'
class PostAdmin(admin.ModelAdmin):
list_display = [
'title', 'slug', 'excerpt', 'views', 'read_time',
'is_published', 'published_date', 'modified_date',
'author', 'category'
]
search_fields = ['title', 'content']
list_display_links = ['title']
list_filter = ['is_published', 'published_date', 'modified_date']
list_editable = ['is_published']
fieldsets = [
(_('admin_main_fields'), {
'fields': (
'title', 'content', 'is_published', 'image', 'category', 'tags'
)
}),
(_('admin_additional_fields'), {
'fields': ('slug', 'excerpt', 'author')
})
]
filter_horizontal = ['tags']
prepopulated_fields = {'slug': ['title']}
formfield_overrides = {
models.TextField: {'widget': AdminMartorWidget}
}
def get_changelist(self, request, **kwargs):
return CustomPostChangeList
def changeform_view(self, request, obj_id, form_url, extra_context=None):
if get_language() == 'ru':
extra_context = {} if extra_context is None else extra_context
extra_context['title'] = get_title_changeform_view(request.path, 'статью') # noqa
return super(PostAdmin, self).changeform_view(
request, obj_id, form_url, extra_context=extra_context
)
class CategoryAdmin(admin.ModelAdmin):
list_display = ['name', 'slug', 'parent']
search_fields = ['name']
list_display_links = ['name']
list_filter = ['parent']
fieldsets = [
(_('admin_main_fields'), {'fields': ('name', 'parent')}),
(_('admin_additional_fields'), {'fields': ('slug',)})
]
prepopulated_fields = {'slug': ['name']}
def get_changelist(self, request, **kwargs):
return CustomCategoryChangeList
def changeform_view(self, request, obj_id, form_url, extra_context=None):
if get_language() == 'ru':
extra_context = {} if extra_context is None else extra_context
extra_context['title'] = get_title_changeform_view(request.path, 'категорию') # noqa
return super(CategoryAdmin, self).changeform_view(
request, obj_id, form_url, extra_context=extra_context
)
class TagAdmin(admin.ModelAdmin):
list_display = ['name']
search_fields = ['name']
list_display_links = ['name']
admin.site.site_title = _('site_title')
admin.site.site_header = _('admin_site_header')
admin.site.register(Post, PostAdmin)
admin.site.register(Category, CategoryAdmin)
admin.site.register(Tag, TagAdmin)
| StarcoderdataPython |
11373136 | <reponame>wyaadarsh/LeetCode-Solutions<filename>Python3/0265-Paint-House-II/soln.py
class Solution:
def minCostII(self, costs):
"""
:type costs: List[List[int]]
:rtype: int
"""
if not costs: return 0
n, k = len(costs), len(costs[0])
for i in range(1, n):
min1 = min(costs[i - 1])
idx = costs[i - 1].index(min1)
min2 = min(costs[i - 1][:idx] + costs[i - 1][idx + 1:])
for j in range(k):
if j == idx:
costs[i][j] += min2
else:
costs[i][j] += min1
return min(costs[-1])
| StarcoderdataPython |
6457308 |
import pcp_utils
import sys
import os
import click
import yaml
import random
RANDOM_SEED = 0
# add gym and baseline to the dir
gym_path = pcp_utils.utils.get_gym_dir()
baseline_path = pcp_utils.utils.get_baseline_dir()
sys.path.append(gym_path)
sys.path.append(baseline_path)
# make symbolic link of the mesh under quantize-gym/gym/envs/robotics/assets/stls
source_mesh_dir = pcp_utils.utils.get_mesh_dir()
gym_mesh_dir = os.path.join(pcp_utils.utils.get_gym_dir(), 'gym/envs/robotics/assets/stls/meshes')
if not os.path.exists(gym_mesh_dir):
os.symlink(source_mesh_dir, gym_mesh_dir, target_is_directory=True)
import argparse
from pcp_utils import utils
from pcp_utils.mesh_object import MeshObject
from pcp_utils.utils import Config
from pcp_utils.parse_task_files import generate_integrated_xml
import tqdm
import numpy as np
##### Imports related to environment #############
import gym
class PolicyCompressor:
class Config(Config):
num_rollouts = 100
max_path_length = 50
accept_threshold = 0.9
num_threads = 5
bbox_indicator = False
# params for the environment
env_name = None
env_base_xml_file = "" #table, the kind of robot, object placeholder
n_robot_dof = 8 # 4 means xyz, 6 means having rotations
place = False
render = True
randomize_color = False
init_info = None
def __init__(self, config:Config, initial_policies=[], output_file="", run_name=""):
self.config = config
self.num_rollouts = config.num_rollouts
self.max_path_length = config.max_path_length
self.accept_threshold = config.accept_threshold
self.num_threads = config.num_threads
self.env_name = config.env_name
self.env_base_xml_file = config.env_base_xml_file
self.bbox_indicator = config.bbox_indicator
self.n_robot_dof = config.n_robot_dof
self.randomize_color = config.randomize_color
self.init_info = config.init_info
self.run_name = run_name
self.policy_bank = initial_policies
self.output_file = output_file
output_folder = "/".join(output_file.split("/")[:-1])
utils.makedir(output_folder)
self.objects_output_xml = dict()
self.object_output_xml_id = 0
self.clusters = {}
# key: name of the cluster(c1)
# value:
# objects: objects inside this clusters (objects, success rate)
# expert_id: expert associate to it
# expert_name:
# 3d tensor model:
self.object_to_cluster = [] #just a lookup
self.num_clusters = 0
self.object_not_clustered = []
self.success_rates_over_class = dict()
self.failed_object = []
self.success_rates = []
self.init_clusters()
def init_clusters(self):
for policy in self.policy_bank:
cluster_id = self.num_clusters
self.clusters[f'c{cluster_id}'] =dict()
self.clusters[f'c{cluster_id}']['objects'] = []
self.clusters[f'c{cluster_id}']['expert_name'] = policy.policy_name
self.clusters[f'c{cluster_id}']['expert'] = policy #.policy_model
self.num_clusters += 1
# Access to a minimal policy bank, also has information about which meshes to run on which policy
# Takes in a new object and determines if it can be merged with an existing policy or can spawn a new policy
# input: new object that needs to be classified
# mesh should be mesh_id like 159e56c18906830278d8f8c02c47cde0, or b9004dcda66abf95b99d2a3bbaea842a which are ShapeNet ids
def add_object(self, obj):
#share the env
# make xml for the object:
integrated_xml = generate_integrated_xml(self.env_base_xml_file, obj.obj_xml_file, scale=obj.scale, mass=obj.mass, euler=obj.euler,
add_bbox_indicator=self.bbox_indicator, randomize_color=self.randomize_color, prefix=self.run_name)
#obj.xml_file)# xml_path="fetch/pick_and_place_kp30000_debug.xml") #xml_path=obj.xml_file)
env = gym.make(self.env_name, xml_path=integrated_xml, use_bbox_indicator=self.bbox_indicator,
n_actions=self.n_robot_dof, init_info=self.init_info)
env.seed(RANDOM_SEED)
env.action_space.seed(RANDOM_SEED)
print(f'max env steps are: {env._max_episode_steps}')
# env.render()
# this ordering should be something learnable
is_clustered = False
success_rates = np.zeros(len(self.clusters.items()))
for cid, cluster in self.clusters.items():
env.seed(RANDOM_SEED)
# load policy of the first mesh (parent mesh) in an existing cluster
print("Checking performance of {} on policy for {}: {}".format(obj.name, cid, cluster['expert'].policy_name))
stats = cluster['expert'].run_forwards(env, obj=obj, num_rollouts=self.num_rollouts, path_length=self.max_path_length, render=self.config.render, cluster_name=cluster['expert'].policy_name, place=self.config.place)#, accept_threshold=self.accept_threshold)
success_rate = stats['success_rate']
print("Success Rate ", success_rate)
success_rates[int(cid[1:])] = success_rate
if success_rate >= self.accept_threshold: #* base_success_rate:
self.object_to_cluster.append((obj, cid))
cluster['objects'].append((obj, success_rate))
is_clustered = True
# if fail: randomly select from on of the top
self.success_rates_over_class[obj.name] = " ".join([str(success_rates[x]) for x in range(self.num_clusters)])
self.success_rates.append(success_rates)
if not is_clustered:
self.object_not_clustered.append(obj)
self.object_to_cluster.append((obj, "not_assigned"))
gym_xml_path = os.path.join(pcp_utils.utils.get_gym_dir(), 'gym/envs/robotics/assets')
integrated_xml_full_path = os.path.join(gym_xml_path, integrated_xml)
os.remove(integrated_xml_full_path)
print("failed objects")
print(self.failed_object)
env.close()
def output_xml(self):
# output cluster name
clusters_output = dict()
for cluster_name in self.clusters:
clusters_output[cluster_name] = dict()
clusters_output[cluster_name]['expert_name'] = self.clusters[cluster_name]['expert_name']
clusters_output[cluster_name]['fn'] = self.clusters[cluster_name]['expert'].__class__.__module__ + ":" + \
self.clusters[cluster_name]['expert'].__class__.__name__
clusters_output[cluster_name]['params'] = self.clusters[cluster_name]['expert'].config.__dict__
output_clusters = dict()
output_clusters['clusters'] = clusters_output
output_objs = dict()
# output objects
for obj, cluster_id in self.object_to_cluster[self.object_output_xml_id:]:
self.objects_output_xml[obj.name] = dict()
for key_id, item in obj.config.__dict__.items():
if isinstance(item, list):
item = " ".join([str(x) for x in item])
#elif isinstance(item, )
self.objects_output_xml[obj.name][key_id] = item
self.objects_output_xml[obj.name]['cluster_id'] = cluster_id
self.objects_output_xml[obj.name]['success_rates_over_class'] = self.success_rates_over_class[obj.name]
self.object_output_xml_id = len(self.objects_output_xml)
output_objs["objs"] = self.objects_output_xml
with open(self.output_file, 'w') as file:
yaml.dump(self.config.__dict__, file, default_flow_style=False)
yaml.dump(output_clusters, file, default_flow_style=False)
yaml.dump(output_objs, file, default_flow_style=False)
print("avg success rate:", np.mean(np.stack(self.success_rates, axis=0),axis=0))
def print_policy_summary(self):
# want to write out something with the policy and objects, and we can load from it.
print("Compressed Meshes")
for cid, cluster in self.clusters.items():
print(f'cluster {cid}')
expert_name = cluster['expert_name']
print(f'expert used in the cluster: {expert_name}')
for obj, success_rate in cluster['objects']:
print(f' {obj.name} ({success_rate})')
print("==========================")
for obj in self.object_not_clustered:
print(f' {obj.name} ')
#print("Percentage compression for threshold {} : {} ".format(self.accept_threshold, self.num_clusters/len(MESHES)))
#print("Cluster Accuracies")
#print(self.cluster_acc)
@click.command()
@click.argument("config_file")#config
@click.option("--task_config_file") #for the objects
@click.option("--output_file") #for the objects
@click.option("--run_name") #define run name to avoid generate_xml to overwrite
def main(config_file, task_config_file, output_file, run_name):
config = utils.config_from_yaml_file(config_file)
# build detector
if "use_detector" in config and config["use_detector"]:
detector_param = config["detector"]
detector_class, detector_config = utils.import_class_from_config(detector_param)
detector = detector_class(detector_config)
else:
detector = None
# init all the policy
initial_policies = []
for policy_name in config["initial_policies"]:
policy_param = config["initial_policies"][policy_name]
if policy_param['params'] is None:
policy_param['params'] = dict()
# add name to the parameter
policy_param['params']["policy_name"] = policy_name
policy_class, policy_config = utils.import_class_from_config(policy_param)
policy = policy_class(policy_config, detector)
initial_policies.append(policy)
print(f"find {len(initial_policies)} initial policies")
# init all objects
objs_config = utils.config_from_yaml_file(task_config_file)
objects_to_cluster = []
for obj_name, obj_config in objs_config['objs'].items():
obj_config_class = MeshObject.Config().update(obj_config)
obj = MeshObject(obj_config_class, obj_name)
objects_to_cluster.append(obj)
print(f"find {len(objects_to_cluster)} objects to cluster")
updated_config = PolicyCompressor.Config().update(config)
compressor = PolicyCompressor(updated_config,
initial_policies=initial_policies,
output_file=output_file,
run_name = run_name
)
nmeshes = len(objects_to_cluster)
for mesh_id, meshobj in enumerate(objects_to_cluster):
print(f"============ {run_name} processing object {mesh_id}/{nmeshes} ============ ")
compressor.add_object(meshobj)
if (mesh_id + 1) %5 == 0:
compressor.output_xml()
compressor.output_xml()
compressor.print_policy_summary()
if __name__=="__main__":
random.seed(RANDOM_SEED)
np.random.seed(RANDOM_SEED)
main()
| StarcoderdataPython |
6653181 | <reponame>mattslezak-shell/PROJ_Option_Pricing_Matlab<filename>CTMC/Diffusion_3D/price_3d_ctmc.py<gh_stars>0
# Generated with SMOP 0.41-beta
try:
from smop.libsmop import *
except ImportError:
raise ImportError('File compiled with `smop3`, please install `smop3` to run it.') from None
# price_3d_ctmc.m
@function
def price_3d_ctmc(S_0s=None,T=None,r=None,R=None,sigmas=None,qs=None,params=None,contractParams=None,M=None,*args,**kwargs):
varargin = price_3d_ctmc.varargin
nargin = price_3d_ctmc.nargin
if nargin < 9:
M=1
# price_3d_ctmc.m:4
dt=T / M
# price_3d_ctmc.m:6
contract=contractParams.contract
# price_3d_ctmc.m:8
if contract == 1:
dt=1
# price_3d_ctmc.m:11
M=1
# price_3d_ctmc.m:11
method=4
# price_3d_ctmc.m:14
num_devs=params.num_devs
# price_3d_ctmc.m:15
m_0=params.m_0
# price_3d_ctmc.m:16
GridMultParam=params.GridMultParam
# price_3d_ctmc.m:17
gridMethod=params.gridMethod
# price_3d_ctmc.m:18
##################################
drifts=r - qs
# price_3d_ctmc.m:21
L,D,C,Cinv=get_transform_matrices_3d(R,method,nargout=4)
# price_3d_ctmc.m:23
# Now Define New Uncorrelated System (the dc underscore)
drift_dc,sigma_dc=decorrelate(sigmas,drifts,C,D,nargout=2)
# price_3d_ctmc.m:26
Ls_dc,Rs_dc=get_CTMC_decorr_boundaries(sigmas,C,T,num_devs,sigma_dc,nargout=2)
# price_3d_ctmc.m:28
Y_0s=concat([0,0,0])
# price_3d_ctmc.m:29
# Form CTMC 1
center=Y_0s(1)
# price_3d_ctmc.m:32
mu_func=lambda s=None: dot(drift_dc(1),concat([s > - 100000]))
# price_3d_ctmc.m:33
sig_func=lambda s=None: dot(sigma_dc(1),concat([s > - 100000]))
# price_3d_ctmc.m:34
Q,y_1,c_index_1=Q_Matrix(m_0,mu_func,sig_func,Ls_dc(1),Rs_dc(1),gridMethod,center,GridMultParam,nargout=3)
# price_3d_ctmc.m:35
P1=expm(dot(Q,dt))
# price_3d_ctmc.m:36
# Form CTMC 2
center=Y_0s(2)
# price_3d_ctmc.m:39
mu_func=lambda s=None: dot(drift_dc(2),concat([s > - 100000]))
# price_3d_ctmc.m:40
sig_func=lambda s=None: dot(sigma_dc(2),concat([s > - 100000]))
# price_3d_ctmc.m:41
Q,y_2,c_index_2=Q_Matrix(m_0,mu_func,sig_func,Ls_dc(2),Rs_dc(2),gridMethod,center,GridMultParam,nargout=3)
# price_3d_ctmc.m:42
P2=expm(dot(Q,dt))
# price_3d_ctmc.m:43
# Form CTMC 3
center=Y_0s(3)
# price_3d_ctmc.m:46
mu_func=lambda s=None: dot(drift_dc(3),concat([s > - 100000]))
# price_3d_ctmc.m:47
sig_func=lambda s=None: dot(sigma_dc(3),concat([s > - 100000]))
# price_3d_ctmc.m:48
Q,y_3,c_index_3=Q_Matrix(m_0,mu_func,sig_func,Ls_dc(3),Rs_dc(3),gridMethod,center,GridMultParam,nargout=3)
# price_3d_ctmc.m:49
P3=expm(dot(Q,dt))
# price_3d_ctmc.m:50
G=get_payoff_G_matrix_from_ygrid_3d(y_1,y_2,y_3,S_0s,sigmas,R,contractParams)
# price_3d_ctmc.m:53
if contract == 1:
# vals = exp(-r*T)*P1*G*P2.';
vals=0
# price_3d_ctmc.m:57
for i in arange(1,m_0).reshape(-1):
for j in arange(1,m_0).reshape(-1):
for k in arange(1,m_0).reshape(-1):
vals=vals + dot(dot(dot(P1(c_index_1,i),P2(c_index_2,j)),P3(c_index_3,k)),G(i,j,k))
# price_3d_ctmc.m:61
vals=dot(vals,exp(dot(- r,T)))
# price_3d_ctmc.m:65
return vals,c_index_1,c_index_2,c_index_3,y_1,y_2,y_3
if __name__ == '__main__':
pass
| StarcoderdataPython |
5190125 | import sys
import threading
import weakref
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another': self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
def test_target_refcnt():
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
print('No raise target shoule be None: %s' % weak_cyclic_object())
print('%d references still around' %
sys.getrefcount(weak_cyclic_object()))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
print('Raise target shoule be None: %s' % weak_raising_cyclic_object())
print('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object()))
test_target_refcnt()
| StarcoderdataPython |
8169127 | # %%
class StigmataSet:
"""Class for Stigmata Objects"""
def __init__(self,name,set2,set3):
self.name = name
self.set2 = set2
self.set3 = set3
def __setattr__(self,setability2,setability3):
self.setbility.update(
{
2:setability2,
3:setability3
}
)
return print(setability)
def __getattr__(self):
print(setability)
class Stigmata(StigmataSet):
def __init__(self,name,pos,hp,sp,atk,dfs,crt,ability,setability2,setability3):
super().__init__(self,setability2,setability3)
self.name = name
self.pos = pos
self.hp = hp
self.sp = sp
self.atk = atk
self.dfs = dfs
self.crt = crt
# %%
red = StigmataSet('霄雲紅')
# %%
| StarcoderdataPython |
4836513 | <gh_stars>1-10
import unittest
import numpy as np
from dataset.assemble.NorbAssembler import NorbAssembler
class Test_NorbAssembler(unittest.TestCase):
def setUp(self):
self._norbAssember = NorbAssembler()
def test_stereoPairsAreSeparated_AndCategoriesUpdated(self):
firstImage = np.arange(0, 6, 1, np.int8).reshape(2, 3)
secondImage = np.arange(6, 12, 1, np.int8).reshape(2, 3)
thirdImage = np.arange(12, 18, 1, np.int8).reshape(2, 3)
fourthImage = np.arange(18, 24, 1, np.int8).reshape(2, 3)
images = np.array([
[firstImage, secondImage],
[thirdImage, fourthImage]
])
firstLabel = np.arange(24, 30, 1, np.int8)
secondLabel = np.arange(30, 36, 1, np.int8)
labels = np.array([firstLabel, secondLabel])
newImages, newLabels = self._norbAssember.assemble(images, labels)
expectedImages = np.array([firstImage, secondImage, thirdImage, fourthImage])
expectedLabels = np.array([
np.concatenate((np.array([0]), firstLabel)),
np.concatenate((np.array([1]), firstLabel)),
np.concatenate((np.array([0]), secondLabel)),
np.concatenate((np.array([1]), secondLabel))
])
np.testing.assert_array_equal(expectedImages, newImages)
np.testing.assert_array_equal(expectedLabels, newLabels)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
1943997 | #####################
### Basic Imports ###
#####################
import random
######################
### Custom Imports ###
######################
from . import definitions
from . import classes
from . import help
from . import commands
######################
# Basic Settings#
def setname(): # Set your characters name
name = input("What is your name captain?\n>> ")
print(f"Welcome aboard Captain {name}")
def setshipname(): # Set ship name
shipname = input("What shall we call your ship?\n>> ")
if len(shipname) <= 2:
shnm = random.sample(["Deluxe","Voltage","Titan"],1)
definitions.status.status["shipname"] = 'S.S.'.join(shnm)
else:
definitions.status.status["shipname"] = shipname
print(f"Captain Your ship name is set to {definitions.status.status['shipname']}")
commands.command()
# Mode Settings #
def setmodewasd():
mode = definitions.player.stats["mode"]
definitions.player.stats["mode"] = "WASD"
print("Mode changed to WASD")
def setmodetxt():
mode = definitions.player.stats["mode"]
definitions.player.stats["mode"] = "TXT"
print("Mode changed to TXT")
# Settings command
def settings():
choice = input("""
Player Settings:
cn -- Character Name
cs -- Ship Name
Game Settings:
wm -- WASD mode for controlling ship
tm -- TEXT mode for controlling ship
>> """)
if choice in ["cn","name","NAME"]:
setname()
elif choice in ["csn","cs","shipname","SHIPNAME"]:
setshipname()
elif choice in ["wm","WASD","wasd","WM"]:
setmodewasd()
commands.command()
elif choice in ["tm","TXT","txt","TM"]:
setmodetxt()
commands.command() | StarcoderdataPython |
8002671 | # from SPARQLWrapper import SPARQLWrapper, JSON
import argparse
import wikidata
parser = argparse.ArgumentParser(
description='Fixes labels for various usages.')
parser.add_argument('-s', help='source file', default='qids.txt')
parser.add_argument('-p', help='property', default='P1705')
args = parser.parse_args()
source = args.s
prop = args.p
qids = []
f = open(source, 'r')
for l in f:
qid = l.strip()
if wikidata.is_qid(qid):
qids.append(qid)
else:
print('not a valid qid:' + qid)
f.close()
print(qids)
| StarcoderdataPython |
1996173 | <reponame>zhengzangw/Fed-SINGA
import argparse
from singa import tensor
from src.client.app import Client
from src.server.app import Server
max_epoch = 3
def main_server(s):
s.start()
s.pull()
for i in range(max_epoch):
print(f"[Server] On epoch {i}")
s.push()
s.pull()
s.close()
def main_client(c):
c.start()
# weight initialization
weights = {}
for i in range(2):
weights["w" + str(i)] = tensor.random((3, 3))
c.weights = weights
c.push()
for i in range(max_epoch):
print(f"[Client {c.global_rank}] On epoch {i}")
# Pull from Server
c.pull()
# Update locally
for k in c.weights.keys():
c.weights[k] += c.global_rank + 1
# Push to Server
c.push()
c.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--mode", choices=["server", "client"])
parser.add_argument("--num_clients", default=1, type=int)
parser.add_argument("--global_rank", default=0, type=int)
parser.add_argument("--secure", action="store_true")
parser.add_argument("--port", default=1234, type=int)
args = parser.parse_args()
if args.mode == "server":
s = Server(num_clients=args.num_clients, port=args.port, secure=args.secure)
main_server(s)
elif args.mode == "client":
c = Client(global_rank=args.global_rank, port=args.port, secure=args.secure)
main_client(c)
| StarcoderdataPython |
3474777 | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definitions of data generators for gym problems."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# We need gym_utils for the game environments defined there.
from tensor2tensor.data_generators import gym_utils # pylint: disable=unused-import
# pylint: disable=g-multiple-import
from tensor2tensor.data_generators.gym_problems import GymDiscreteProblem,\
GymSimulatedDiscreteProblem, GymRealDiscreteProblem, \
GymDiscreteProblemWithAutoencoder, GymDiscreteProblemAutoencoded, \
GymSimulatedDiscreteProblemAutoencoded, \
GymSimulatedDiscreteProblemForWorldModelEval, \
GymSimulatedDiscreteProblemForWorldModelEvalAutoencoded
# pylint: enable=g-multiple-import
from tensor2tensor.utils import registry
# Game list from our list of ROMs
# Removed because XDeterministic-v4 did not exist:
# * adventure
# * defender
# * kaboom
ATARI_GAMES = [
"air_raid", "alien", "amidar", "assault", "asterix", "asteroids",
"atlantis", "bank_heist", "battle_zone", "beam_rider", "berzerk", "bowling",
"boxing", "breakout", "carnival", "centipede", "chopper_command",
"crazy_climber", "demon_attack", "double_dunk", "elevator_action", "enduro",
"fishing_derby", "freeway", "frostbite", "gopher", "gravitar", "hero",
"ice_hockey", "jamesbond", "journey_escape", "kangaroo", "krull",
"kung_fu_master", "montezuma_revenge", "ms_pacman", "name_this_game",
"phoenix", "pitfall", "pong", "pooyan", "private_eye", "qbert", "riverraid",
"road_runner", "robotank", "seaquest", "skiing", "solaris",
"space_invaders", "star_gunner", "tennis", "time_pilot", "tutankham",
"up_n_down", "venture", "video_pinball", "wizard_of_wor", "yars_revenge",
"zaxxon"
]
# List from paper:
# https://arxiv.org/pdf/1805.11593.pdf
# plus frostbite.
ATARI_GAMES_WITH_HUMAN_SCORE = [
"alien", "amidar", "assault", "asterix", "asteroids",
"atlantis", "bank_heist", "battle_zone", "beam_rider", "bowling",
"boxing", "breakout", "chopper_command",
"crazy_climber", "demon_attack", "double_dunk", "enduro",
"fishing_derby", "freeway", "frostbite", "gopher", "gravitar", "hero",
"ice_hockey", "jamesbond", "kangaroo", "krull",
"kung_fu_master", "montezuma_revenge", "ms_pacman", "name_this_game",
"pitfall", "pong", "private_eye", "qbert", "riverraid",
"road_runner", "seaquest", "solaris",
"up_n_down", "video_pinball", "yars_revenge",
]
ATARI_ALL_MODES_SHORT_LIST = []
ATARI_WHITELIST_GAMES = [
"amidar",
"bank_heist",
"berzerk",
"boxing",
"crazy_climber",
"freeway",
"frostbite",
"gopher",
"kung_fu_master",
"ms_pacman",
"pong",
"qbert",
"seaquest",
]
# Games on which model-free does better than model-based at this point.
ATARI_CURIOUS_GAMES = [
"bank_heist",
"boxing",
"enduro",
"kangaroo",
"road_runner",
"up_n_down",
]
# Different ATARI game modes in OpenAI Gym. Full list here:
# https://github.com/openai/gym/blob/master/gym/envs/__init__.py
ATARI_GAME_MODES = [
"Deterministic-v0", # 0.25 repeat action probability, 4 frame skip.
"Deterministic-v4", # 0.00 repeat action probability, 4 frame skip.
"NoFrameskip-v0", # 0.25 repeat action probability, 1 frame skip.
"NoFrameskip-v4", # 0.00 repeat action probability, 1 frame skip.
"-v0", # 0.25 repeat action probability, (2 to 5) frame skip.
"-v4" # 0.00 repeat action probability, (2 to 5) frame skip.
]
# List of all ATARI envs in all modes.
ATARI_PROBLEMS = {}
@registry.register_problem
class GymWrappedFullPongRandom(GymDiscreteProblem):
"""Pong game, random actions."""
@property
def env_name(self):
return "T2TPongWarmUp20RewSkipFull-v1"
@property
def min_reward(self):
return -1
@property
def num_rewards(self):
return 3
@property
def num_testing_steps(self):
return 100
@registry.register_problem
class GymDiscreteProblemWithAgentOnWrappedFullPong(GymRealDiscreteProblem,
GymWrappedFullPongRandom):
pass
@registry.register_problem
class GymDiscreteProblemWithAgentOnWrappedFullPongWithAutoencoder(
GymDiscreteProblemWithAutoencoder, GymWrappedFullPongRandom):
pass
@registry.register_problem
class GymDiscreteProblemWithAgentOnWrappedFullPongAutoencoded(
GymDiscreteProblemAutoencoded, GymWrappedFullPongRandom):
pass
@registry.register_problem
class GymSimulatedDiscreteProblemWithAgentOnWrappedFullPong(
GymSimulatedDiscreteProblem, GymWrappedFullPongRandom):
"""Simulated pong."""
@property
def initial_frames_problem(self):
return "gym_discrete_problem_with_agent_on_wrapped_full_pong"
@property
def num_testing_steps(self):
return 100
@registry.register_problem
class GymSimulatedDiscreteProblemForWorldModelEvalWithAgentOnWrappedFullPong(
GymSimulatedDiscreteProblemForWorldModelEval, GymWrappedFullPongRandom):
"""Simulated pong for world model evaluation."""
@property
def initial_frames_problem(self):
return "gym_discrete_problem_with_agent_on_wrapped_full_pong"
@property
def num_testing_steps(self):
return 100
@registry.register_problem
class GymSimulatedDiscreteProblemWithAgentOnWrappedFullPongAutoencoded(
GymSimulatedDiscreteProblemAutoencoded, GymWrappedFullPongRandom):
"""GymSimulatedDiscreteProblemWithAgentOnWrappedFullPongAutoencoded."""
@property
def initial_frames_problem(self):
return "gym_discrete_problem_with_agent_on_wrapped_full_pong_autoencoded"
@property
def num_testing_steps(self):
return 100
@registry.register_problem
class GymSimulatedDiscreteProblemForWorldModelEvalWithAgentOnWrappedFullPongAutoencoded( # pylint: disable=line-too-long
GymSimulatedDiscreteProblemForWorldModelEvalAutoencoded,
GymWrappedFullPongRandom):
"""Simulated pong for world model evaluation with encoded frames."""
@property
def initial_frames_problem(self):
return "gym_discrete_problem_with_agent_on_wrapped_full_pong_autoencoded"
@property
def num_testing_steps(self):
return 100
class GymClippedRewardRandom(GymDiscreteProblem):
"""Abstract base class for clipped reward games."""
@property
def env_name(self):
raise NotImplementedError
@property
def min_reward(self):
return -1
@property
def num_rewards(self):
return 3
def create_problems_for_game(
game_name,
resize_height_factor=1,
resize_width_factor=1,
game_mode="Deterministic-v4"):
"""Create and register problems for game_name.
Args:
game_name: str, one of the games in ATARI_GAMES, e.g. "bank_heist".
resize_height_factor: factor by which to resize the height of frames.
resize_width_factor: factor by which to resize the width of frames.
game_mode: the frame skip and sticky keys config.
Returns:
dict of problems with keys ("base", "agent", "simulated").
Raises:
ValueError: if clipped_reward=False or game_name not in ATARI_GAMES.
"""
if game_name not in ATARI_GAMES:
raise ValueError("Game %s not in ATARI_GAMES" % game_name)
if game_mode not in ATARI_GAME_MODES:
raise ValueError("Unknown ATARI game mode: %s." % game_mode)
camel_game_name = "".join(
[w[0].upper() + w[1:] for w in game_name.split("_")])
camel_game_name += game_mode
env_name = camel_game_name
# Create and register the Random and WithAgent Problem classes
problem_cls = type("Gym%sRandom" % camel_game_name,
(GymClippedRewardRandom,),
{"env_name": env_name,
"resize_height_factor": resize_height_factor,
"resize_width_factor": resize_width_factor})
registry.register_problem(problem_cls)
with_agent_cls = type("GymDiscreteProblemWithAgentOn%s" % camel_game_name,
(GymRealDiscreteProblem, problem_cls), {})
registry.register_problem(with_agent_cls)
# Create and register the simulated Problem
simulated_cls = type(
"GymSimulatedDiscreteProblemWithAgentOn%s" % camel_game_name,
(GymSimulatedDiscreteProblem, problem_cls), {
"initial_frames_problem": with_agent_cls.name,
"num_testing_steps": 100
})
registry.register_problem(simulated_cls)
# Create and register the simulated Problem
world_model_eval_cls = type(
"GymSimulatedDiscreteProblemForWorldModelEvalWithAgentOn%s" %
camel_game_name,
(GymSimulatedDiscreteProblemForWorldModelEval, problem_cls), {
"initial_frames_problem": with_agent_cls.name,
"num_testing_steps": 100
})
registry.register_problem(world_model_eval_cls)
return {
"base": problem_cls,
"agent": with_agent_cls,
"simulated": simulated_cls,
"world_model_eval": world_model_eval_cls,
}
# Register the atari games with all of the possible modes.
for game in ATARI_ALL_MODES_SHORT_LIST:
ATARI_PROBLEMS[game] = {}
for mode in ATARI_GAME_MODES:
classes = create_problems_for_game(
game,
game_mode=mode)
ATARI_PROBLEMS[game][mode] = classes
| StarcoderdataPython |
6553376 | from . import controllers, defines, hooks, infos, parameters, parts, util
| StarcoderdataPython |
3507356 | from .error_utils import BuildSystemException
def load_export_list_from_def_file(def_file, winapi_only, for_winapi):
export_section_found = False
export_list = []
lines = [line.rstrip('\r\n') for line in open(def_file)]
line_number = 0
inside_export = False
for line in lines:
line_number += 1
text = line.lstrip()
if not text or text[0] == ';':
continue
tokens = text.split()
line_is_keyword = False
if len(line) == len(text):
line_is_keyword = True
if line_is_keyword:
if inside_export:
inside_export = False
elif len(tokens) == 1 and tokens[0] == 'EXPORTS':
if export_section_found:
raise BuildSystemException("'EXPORTS' section found more then once inside DEF file: '{}'".format(def_file))
export_section_found = True
inside_export = True
continue
if inside_export:
if tokens and not tokens[0].startswith('@'):
symbol = tokens[0]
symbol_enabled = True
if winapi_only and not for_winapi:
if symbol in winapi_only:
symbol_enabled = False
if symbol_enabled:
export_list.append(symbol)
if not export_section_found:
raise BuildSystemException("'EXPORTS' section not found inside DEF file: '{}'".format(def_file))
if not export_list:
raise BuildSystemException("Cannot load symbols information from 'EXPORTS' section inside DEF file: '{}'".format(def_file))
return export_list
| StarcoderdataPython |
11383961 | <filename>utils.py
import cv2
def split_board_image(floc, fname, out_dir, board=None):
img = cv2.imread(floc)
square_size = 150
for r in range(0, img.shape[0], 150):
i = r // square_size
for c in range(0, img.shape[1], 150):
j = c // square_size
if board:
piece = board[i][j]
out_loc = str(out_dir) + "/" + str(piece) + "/_" + str(fname) + "_" + str(r) + "_" + str(c) + ".jpg"
else:
out_loc = str(out_dir) + "/" + str(i) + "_" + str(j) + ".jpg"
cv2.imwrite(out_loc, img[r:r + square_size, c:c + square_size, :])
| StarcoderdataPython |
238941 | #!/usr/bin/env python
import os
try:
import readline # NOQA
except ImportError:
pass
from pprint import pprint # NOQA
from coaster.sqlalchemy import BaseMixin
from coaster.utils import buid
from flask import Flask
from nodular import * # NOQA
class User(BaseMixin, db.Model):
__tablename__ = 'user'
userid = db.Column(db.Unicode(22), nullable=False, default=buid, unique=True)
username = db.Column(db.Unicode(250), nullable=True)
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get(
'SQLALCHEMY_DATABASE_URI', 'sqlite://')
app.config['SQLALCHEMY_ECHO'] = False
db.init_app(app)
db.app = app
root = Node(name=u'root', title=u'Root')
db.session.add(root)
os.environ['PYTHONINSPECT'] = 'True'
| StarcoderdataPython |
4908482 | """
This module contains classes intended to parse and deal with data from Roblox roblox badge endpoints.
"""
from .bases.baserobloxbadge import BaseRobloxBadge
from .utilities.shared import ClientSharedObject
class RobloxBadge(BaseRobloxBadge):
"""
Represents a Roblox roblox badge.
Attributes:
id: The badge's ID.
name: The badge's name.
description: The badge's description.
image_url: A link to the badge's image.
"""
def __init__(self, shared: ClientSharedObject, data: dict):
self._shared: ClientSharedObject = shared
self.id: int = data["id"]
super().__init__(shared=self._shared, roblox_badge_id=self.id)
self.name: str = data["name"]
self.description: str = data["description"]
self.image_url: str = data["imageUrl"]
def __repr__(self):
return f"<{self.__class__.__name__} name={self.name!r}>"
| StarcoderdataPython |
4855704 | <filename>src/oci/data_connectivity/models/__init__.py
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from __future__ import absolute_import
from .abstract_call_attribute import AbstractCallAttribute
from .abstract_data_operation_config import AbstractDataOperationConfig
from .abstract_format_attribute import AbstractFormatAttribute
from .abstract_read_attribute import AbstractReadAttribute
from .abstract_write_attribute import AbstractWriteAttribute
from .aggregator_summary import AggregatorSummary
from .attach_data_asset_info import AttachDataAssetInfo
from .attribute import Attribute
from .attribute_profile_result import AttributeProfileResult
from .avro_format_attribute import AvroFormatAttribute
from .base_type import BaseType
from .bicc_read_attributes import BiccReadAttributes
from .bip_call_attribute import BipCallAttribute
from .call_operation_config import CallOperationConfig
from .change_endpoint_compartment_details import ChangeEndpointCompartmentDetails
from .change_registry_compartment_details import ChangeRegistryCompartmentDetails
from .column import Column
from .composite_type import CompositeType
from .compression import Compression
from .config_definition import ConfigDefinition
from .config_parameter_definition import ConfigParameterDefinition
from .config_parameter_value import ConfigParameterValue
from .config_values import ConfigValues
from .configured_type import ConfiguredType
from .connection import Connection
from .connection_property import ConnectionProperty
from .connection_summary import ConnectionSummary
from .connection_summary_collection import ConnectionSummaryCollection
from .connection_validation import ConnectionValidation
from .connection_validation_summary import ConnectionValidationSummary
from .connection_validation_summary_collection import ConnectionValidationSummaryCollection
from .connectivity_validation import ConnectivityValidation
from .connector_attribute import ConnectorAttribute
from .create_attach_data_asset_details import CreateAttachDataAssetDetails
from .create_connection_details import CreateConnectionDetails
from .create_connection_validation_details import CreateConnectionValidationDetails
from .create_connectivity_validation_details import CreateConnectivityValidationDetails
from .create_data_asset_details import CreateDataAssetDetails
from .create_data_preview_details import CreateDataPreviewDetails
from .create_data_profile_details import CreateDataProfileDetails
from .create_de_reference_artifact_details import CreateDeReferenceArtifactDetails
from .create_detach_data_asset_details import CreateDetachDataAssetDetails
from .create_dp_endpoint_details import CreateDpEndpointDetails
from .create_dp_endpoint_from_private import CreateDpEndpointFromPrivate
from .create_dp_endpoint_from_public import CreateDpEndpointFromPublic
from .create_endpoint_details import CreateEndpointDetails
from .create_entity_shape_details import CreateEntityShapeDetails
from .create_entity_shape_from_data_store import CreateEntityShapeFromDataStore
from .create_entity_shape_from_file import CreateEntityShapeFromFile
from .create_entity_shape_from_sql import CreateEntityShapeFromSQL
from .create_entity_shape_from_table import CreateEntityShapeFromTable
from .create_entity_shape_from_view import CreateEntityShapeFromView
from .create_execute_operation_job_details import CreateExecuteOperationJobDetails
from .create_folder_details import CreateFolderDetails
from .create_full_push_down_task_details import CreateFullPushDownTaskDetails
from .create_reference_artifact_details import CreateReferenceArtifactDetails
from .create_registry_details import CreateRegistryDetails
from .create_test_network_connectivity_details import CreateTestNetworkConnectivityDetails
from .csv_format_attribute import CsvFormatAttribute
from .data_asset import DataAsset
from .data_asset_summary import DataAssetSummary
from .data_asset_summary_collection import DataAssetSummaryCollection
from .data_entity import DataEntity
from .data_entity_details import DataEntityDetails
from .data_entity_from_data_store import DataEntityFromDataStore
from .data_entity_from_data_store_entity_details import DataEntityFromDataStoreEntityDetails
from .data_entity_from_file import DataEntityFromFile
from .data_entity_from_file_entity_details import DataEntityFromFileEntityDetails
from .data_entity_from_sql import DataEntityFromSql
from .data_entity_from_sql_entity_details import DataEntityFromSqlEntityDetails
from .data_entity_from_table import DataEntityFromTable
from .data_entity_from_table_entity_details import DataEntityFromTableEntityDetails
from .data_entity_from_view import DataEntityFromView
from .data_entity_from_view_entity_details import DataEntityFromViewEntityDetails
from .data_entity_summary import DataEntitySummary
from .data_entity_summary_collection import DataEntitySummaryCollection
from .data_entity_summary_from_data_store import DataEntitySummaryFromDataStore
from .data_entity_summary_from_file import DataEntitySummaryFromFile
from .data_entity_summary_from_sql import DataEntitySummaryFromSql
from .data_entity_summary_from_table import DataEntitySummaryFromTable
from .data_entity_summary_from_view import DataEntitySummaryFromView
from .data_format import DataFormat
from .data_preview import DataPreview
from .data_profile import DataProfile
from .data_type import DataType
from .data_type_stat import DataTypeStat
from .date_attribute import DateAttribute
from .de_reference_info import DeReferenceInfo
from .derived_type import DerivedType
from .detach_data_asset_info import DetachDataAssetInfo
from .dp_endpoint import DpEndpoint
from .dp_endpoint_details import DpEndpointDetails
from .dp_endpoint_from_private import DpEndpointFromPrivate
from .dp_endpoint_from_private_details import DpEndpointFromPrivateDetails
from .dp_endpoint_from_public import DpEndpointFromPublic
from .dp_endpoint_from_public_details import DpEndpointFromPublicDetails
from .dp_endpoint_summary import DpEndpointSummary
from .dp_endpoint_summary_from_private import DpEndpointSummaryFromPrivate
from .dp_endpoint_summary_from_public import DpEndpointSummaryFromPublic
from .endpoint import Endpoint
from .endpoint_summary import EndpointSummary
from .endpoint_summary_collection import EndpointSummaryCollection
from .entity_profile_result import EntityProfileResult
from .entity_shape import EntityShape
from .entity_shape_from_data_store import EntityShapeFromDataStore
from .entity_shape_from_file import EntityShapeFromFile
from .entity_shape_from_sql import EntityShapeFromSQL
from .entity_shape_from_table import EntityShapeFromTable
from .entity_shape_from_view import EntityShapeFromView
from .error_details import ErrorDetails
from .execute_operation_job import ExecuteOperationJob
from .execute_operation_job_details import ExecuteOperationJobDetails
from .external_storage import ExternalStorage
from .filter_push import FilterPush
from .folder import Folder
from .folder_summary import FolderSummary
from .folder_summary_collection import FolderSummaryCollection
from .foreign_key import ForeignKey
from .full_push_down_task_response import FullPushDownTaskResponse
from .hdfs_write_attributes import HdfsWriteAttributes
from .histogram import Histogram
from .input_port import InputPort
from .join import Join
from .json_format_attribute import JsonFormatAttribute
from .key import Key
from .key_attribute import KeyAttribute
from .key_range import KeyRange
from .key_range_partition_config import KeyRangePartitionConfig
from .message import Message
from .native_shape_field import NativeShapeField
from .network_connectivity_status import NetworkConnectivityStatus
from .network_connectivity_status_collection import NetworkConnectivityStatusCollection
from .numeric_attribute import NumericAttribute
from .object_freq_stat import ObjectFreqStat
from .object_metadata import ObjectMetadata
from .object_storage_write_attributes import ObjectStorageWriteAttributes
from .operation import Operation
from .operation_exec_result import OperationExecResult
from .operation_from_procedure import OperationFromProcedure
from .operation_input_record import OperationInputRecord
from .operation_summary import OperationSummary
from .operation_summary_collection import OperationSummaryCollection
from .operation_summary_from_procedure import OperationSummaryFromProcedure
from .oracle_adwc_write_attributes import OracleAdwcWriteAttributes
from .oracle_atp_write_attributes import OracleAtpWriteAttributes
from .oracle_read_attribute import OracleReadAttribute
from .oracle_read_attributes import OracleReadAttributes
from .oracle_write_attributes import OracleWriteAttributes
from .outlier import Outlier
from .output_port import OutputPort
from .parameter import Parameter
from .parent_reference import ParentReference
from .parquet_format_attribute import ParquetFormatAttribute
from .partition_config import PartitionConfig
from .primary_key import PrimaryKey
from .profile_config import ProfileConfig
from .profile_stat import ProfileStat
from .push_down_operation import PushDownOperation
from .query import Query
from .read_operation_config import ReadOperationConfig
from .reference_artifact_summary import ReferenceArtifactSummary
from .reference_artifact_summary_collection import ReferenceArtifactSummaryCollection
from .reference_info import ReferenceInfo
from .registry import Registry
from .registry_metadata import RegistryMetadata
from .registry_summary import RegistrySummary
from .registry_summary_collection import RegistrySummaryCollection
from .row import Row
from .schema import Schema
from .schema_drift_config import SchemaDriftConfig
from .schema_summary import SchemaSummary
from .schema_summary_collection import SchemaSummaryCollection
from .select import Select
from .shape import Shape
from .shape_field import ShapeField
from .sort import Sort
from .sort_clause import SortClause
from .source import Source
from .string_attribute import StringAttribute
from .structured_type import StructuredType
from .target import Target
from .test_network_connectivity import TestNetworkConnectivity
from .type import Type
from .type_library import TypeLibrary
from .type_summary import TypeSummary
from .type_system import TypeSystem
from .typed_object import TypedObject
from .types_summary_collection import TypesSummaryCollection
from .unique_key import UniqueKey
from .update_connection_details import UpdateConnectionDetails
from .update_data_asset_details import UpdateDataAssetDetails
from .update_dp_endpoint_details import UpdateDpEndpointDetails
from .update_dp_endpoint_from_private import UpdateDpEndpointFromPrivate
from .update_dp_endpoint_from_public import UpdateDpEndpointFromPublic
from .update_endpoint_details import UpdateEndpointDetails
from .update_folder_details import UpdateFolderDetails
from .update_registry_details import UpdateRegistryDetails
from .validation_message import ValidationMessage
from .validation_result import ValidationResult
from .work_request import WorkRequest
from .work_request_error import WorkRequestError
from .work_request_error_collection import WorkRequestErrorCollection
from .work_request_error_summary import WorkRequestErrorSummary
from .work_request_log import WorkRequestLog
from .work_request_log_collection import WorkRequestLogCollection
from .work_request_resource import WorkRequestResource
from .work_request_summary import WorkRequestSummary
from .work_request_summary_collection import WorkRequestSummaryCollection
from .write_operation_config import WriteOperationConfig
# Maps type names to classes for data_connectivity services.
data_connectivity_type_mapping = {
"AbstractCallAttribute": AbstractCallAttribute,
"AbstractDataOperationConfig": AbstractDataOperationConfig,
"AbstractFormatAttribute": AbstractFormatAttribute,
"AbstractReadAttribute": AbstractReadAttribute,
"AbstractWriteAttribute": AbstractWriteAttribute,
"AggregatorSummary": AggregatorSummary,
"AttachDataAssetInfo": AttachDataAssetInfo,
"Attribute": Attribute,
"AttributeProfileResult": AttributeProfileResult,
"AvroFormatAttribute": AvroFormatAttribute,
"BaseType": BaseType,
"BiccReadAttributes": BiccReadAttributes,
"BipCallAttribute": BipCallAttribute,
"CallOperationConfig": CallOperationConfig,
"ChangeEndpointCompartmentDetails": ChangeEndpointCompartmentDetails,
"ChangeRegistryCompartmentDetails": ChangeRegistryCompartmentDetails,
"Column": Column,
"CompositeType": CompositeType,
"Compression": Compression,
"ConfigDefinition": ConfigDefinition,
"ConfigParameterDefinition": ConfigParameterDefinition,
"ConfigParameterValue": ConfigParameterValue,
"ConfigValues": ConfigValues,
"ConfiguredType": ConfiguredType,
"Connection": Connection,
"ConnectionProperty": ConnectionProperty,
"ConnectionSummary": ConnectionSummary,
"ConnectionSummaryCollection": ConnectionSummaryCollection,
"ConnectionValidation": ConnectionValidation,
"ConnectionValidationSummary": ConnectionValidationSummary,
"ConnectionValidationSummaryCollection": ConnectionValidationSummaryCollection,
"ConnectivityValidation": ConnectivityValidation,
"ConnectorAttribute": ConnectorAttribute,
"CreateAttachDataAssetDetails": CreateAttachDataAssetDetails,
"CreateConnectionDetails": CreateConnectionDetails,
"CreateConnectionValidationDetails": CreateConnectionValidationDetails,
"CreateConnectivityValidationDetails": CreateConnectivityValidationDetails,
"CreateDataAssetDetails": CreateDataAssetDetails,
"CreateDataPreviewDetails": CreateDataPreviewDetails,
"CreateDataProfileDetails": CreateDataProfileDetails,
"CreateDeReferenceArtifactDetails": CreateDeReferenceArtifactDetails,
"CreateDetachDataAssetDetails": CreateDetachDataAssetDetails,
"CreateDpEndpointDetails": CreateDpEndpointDetails,
"CreateDpEndpointFromPrivate": CreateDpEndpointFromPrivate,
"CreateDpEndpointFromPublic": CreateDpEndpointFromPublic,
"CreateEndpointDetails": CreateEndpointDetails,
"CreateEntityShapeDetails": CreateEntityShapeDetails,
"CreateEntityShapeFromDataStore": CreateEntityShapeFromDataStore,
"CreateEntityShapeFromFile": CreateEntityShapeFromFile,
"CreateEntityShapeFromSQL": CreateEntityShapeFromSQL,
"CreateEntityShapeFromTable": CreateEntityShapeFromTable,
"CreateEntityShapeFromView": CreateEntityShapeFromView,
"CreateExecuteOperationJobDetails": CreateExecuteOperationJobDetails,
"CreateFolderDetails": CreateFolderDetails,
"CreateFullPushDownTaskDetails": CreateFullPushDownTaskDetails,
"CreateReferenceArtifactDetails": CreateReferenceArtifactDetails,
"CreateRegistryDetails": CreateRegistryDetails,
"CreateTestNetworkConnectivityDetails": CreateTestNetworkConnectivityDetails,
"CsvFormatAttribute": CsvFormatAttribute,
"DataAsset": DataAsset,
"DataAssetSummary": DataAssetSummary,
"DataAssetSummaryCollection": DataAssetSummaryCollection,
"DataEntity": DataEntity,
"DataEntityDetails": DataEntityDetails,
"DataEntityFromDataStore": DataEntityFromDataStore,
"DataEntityFromDataStoreEntityDetails": DataEntityFromDataStoreEntityDetails,
"DataEntityFromFile": DataEntityFromFile,
"DataEntityFromFileEntityDetails": DataEntityFromFileEntityDetails,
"DataEntityFromSql": DataEntityFromSql,
"DataEntityFromSqlEntityDetails": DataEntityFromSqlEntityDetails,
"DataEntityFromTable": DataEntityFromTable,
"DataEntityFromTableEntityDetails": DataEntityFromTableEntityDetails,
"DataEntityFromView": DataEntityFromView,
"DataEntityFromViewEntityDetails": DataEntityFromViewEntityDetails,
"DataEntitySummary": DataEntitySummary,
"DataEntitySummaryCollection": DataEntitySummaryCollection,
"DataEntitySummaryFromDataStore": DataEntitySummaryFromDataStore,
"DataEntitySummaryFromFile": DataEntitySummaryFromFile,
"DataEntitySummaryFromSql": DataEntitySummaryFromSql,
"DataEntitySummaryFromTable": DataEntitySummaryFromTable,
"DataEntitySummaryFromView": DataEntitySummaryFromView,
"DataFormat": DataFormat,
"DataPreview": DataPreview,
"DataProfile": DataProfile,
"DataType": DataType,
"DataTypeStat": DataTypeStat,
"DateAttribute": DateAttribute,
"DeReferenceInfo": DeReferenceInfo,
"DerivedType": DerivedType,
"DetachDataAssetInfo": DetachDataAssetInfo,
"DpEndpoint": DpEndpoint,
"DpEndpointDetails": DpEndpointDetails,
"DpEndpointFromPrivate": DpEndpointFromPrivate,
"DpEndpointFromPrivateDetails": DpEndpointFromPrivateDetails,
"DpEndpointFromPublic": DpEndpointFromPublic,
"DpEndpointFromPublicDetails": DpEndpointFromPublicDetails,
"DpEndpointSummary": DpEndpointSummary,
"DpEndpointSummaryFromPrivate": DpEndpointSummaryFromPrivate,
"DpEndpointSummaryFromPublic": DpEndpointSummaryFromPublic,
"Endpoint": Endpoint,
"EndpointSummary": EndpointSummary,
"EndpointSummaryCollection": EndpointSummaryCollection,
"EntityProfileResult": EntityProfileResult,
"EntityShape": EntityShape,
"EntityShapeFromDataStore": EntityShapeFromDataStore,
"EntityShapeFromFile": EntityShapeFromFile,
"EntityShapeFromSQL": EntityShapeFromSQL,
"EntityShapeFromTable": EntityShapeFromTable,
"EntityShapeFromView": EntityShapeFromView,
"ErrorDetails": ErrorDetails,
"ExecuteOperationJob": ExecuteOperationJob,
"ExecuteOperationJobDetails": ExecuteOperationJobDetails,
"ExternalStorage": ExternalStorage,
"FilterPush": FilterPush,
"Folder": Folder,
"FolderSummary": FolderSummary,
"FolderSummaryCollection": FolderSummaryCollection,
"ForeignKey": ForeignKey,
"FullPushDownTaskResponse": FullPushDownTaskResponse,
"HdfsWriteAttributes": HdfsWriteAttributes,
"Histogram": Histogram,
"InputPort": InputPort,
"Join": Join,
"JsonFormatAttribute": JsonFormatAttribute,
"Key": Key,
"KeyAttribute": KeyAttribute,
"KeyRange": KeyRange,
"KeyRangePartitionConfig": KeyRangePartitionConfig,
"Message": Message,
"NativeShapeField": NativeShapeField,
"NetworkConnectivityStatus": NetworkConnectivityStatus,
"NetworkConnectivityStatusCollection": NetworkConnectivityStatusCollection,
"NumericAttribute": NumericAttribute,
"ObjectFreqStat": ObjectFreqStat,
"ObjectMetadata": ObjectMetadata,
"ObjectStorageWriteAttributes": ObjectStorageWriteAttributes,
"Operation": Operation,
"OperationExecResult": OperationExecResult,
"OperationFromProcedure": OperationFromProcedure,
"OperationInputRecord": OperationInputRecord,
"OperationSummary": OperationSummary,
"OperationSummaryCollection": OperationSummaryCollection,
"OperationSummaryFromProcedure": OperationSummaryFromProcedure,
"OracleAdwcWriteAttributes": OracleAdwcWriteAttributes,
"OracleAtpWriteAttributes": OracleAtpWriteAttributes,
"OracleReadAttribute": OracleReadAttribute,
"OracleReadAttributes": OracleReadAttributes,
"OracleWriteAttributes": OracleWriteAttributes,
"Outlier": Outlier,
"OutputPort": OutputPort,
"Parameter": Parameter,
"ParentReference": ParentReference,
"ParquetFormatAttribute": ParquetFormatAttribute,
"PartitionConfig": PartitionConfig,
"PrimaryKey": PrimaryKey,
"ProfileConfig": ProfileConfig,
"ProfileStat": ProfileStat,
"PushDownOperation": PushDownOperation,
"Query": Query,
"ReadOperationConfig": ReadOperationConfig,
"ReferenceArtifactSummary": ReferenceArtifactSummary,
"ReferenceArtifactSummaryCollection": ReferenceArtifactSummaryCollection,
"ReferenceInfo": ReferenceInfo,
"Registry": Registry,
"RegistryMetadata": RegistryMetadata,
"RegistrySummary": RegistrySummary,
"RegistrySummaryCollection": RegistrySummaryCollection,
"Row": Row,
"Schema": Schema,
"SchemaDriftConfig": SchemaDriftConfig,
"SchemaSummary": SchemaSummary,
"SchemaSummaryCollection": SchemaSummaryCollection,
"Select": Select,
"Shape": Shape,
"ShapeField": ShapeField,
"Sort": Sort,
"SortClause": SortClause,
"Source": Source,
"StringAttribute": StringAttribute,
"StructuredType": StructuredType,
"Target": Target,
"TestNetworkConnectivity": TestNetworkConnectivity,
"Type": Type,
"TypeLibrary": TypeLibrary,
"TypeSummary": TypeSummary,
"TypeSystem": TypeSystem,
"TypedObject": TypedObject,
"TypesSummaryCollection": TypesSummaryCollection,
"UniqueKey": UniqueKey,
"UpdateConnectionDetails": UpdateConnectionDetails,
"UpdateDataAssetDetails": UpdateDataAssetDetails,
"UpdateDpEndpointDetails": UpdateDpEndpointDetails,
"UpdateDpEndpointFromPrivate": UpdateDpEndpointFromPrivate,
"UpdateDpEndpointFromPublic": UpdateDpEndpointFromPublic,
"UpdateEndpointDetails": UpdateEndpointDetails,
"UpdateFolderDetails": UpdateFolderDetails,
"UpdateRegistryDetails": UpdateRegistryDetails,
"ValidationMessage": ValidationMessage,
"ValidationResult": ValidationResult,
"WorkRequest": WorkRequest,
"WorkRequestError": WorkRequestError,
"WorkRequestErrorCollection": WorkRequestErrorCollection,
"WorkRequestErrorSummary": WorkRequestErrorSummary,
"WorkRequestLog": WorkRequestLog,
"WorkRequestLogCollection": WorkRequestLogCollection,
"WorkRequestResource": WorkRequestResource,
"WorkRequestSummary": WorkRequestSummary,
"WorkRequestSummaryCollection": WorkRequestSummaryCollection,
"WriteOperationConfig": WriteOperationConfig
}
| StarcoderdataPython |
9697126 | <reponame>bjonnh/fomu-playground<filename>litex_things/deps/litescope/litescope/software/dump/common.py
def dec2bin(d, width=0):
if d == "x":
return "x"*width
elif d == 0:
b = "0"
else:
b = ""
while d != 0:
b = "01"[d&1] + b
d = d >> 1
return b.zfill(width)
def get_bits(values, low, high=None):
r = []
if high is None:
high = low + 1
for val in values:
t = (val >> low) & (2**(high - low) - 1)
r.append(t)
return r
class DumpData(list):
def __init__(self, width):
self.width = width
def __getitem__(self, key):
if isinstance(key, int):
return get_bits(self, key)
elif isinstance(key, slice):
if key.start != None:
start = key.start
else:
start = 0
if key.stop != None:
stop = key.stop
else:
stop = self.width
if stop > self.width:
stop = self.width
if key.step != None:
raise KeyError
return get_bits(self, start, stop)
else:
raise KeyError
class DumpVariable:
def __init__(self, name, width, values=[]):
self.width = width
self.name = name
self.values = [int(v)%2**width for v in values]
def __len__(self):
return len(self.values)
class Dump:
def __init__(self):
self.variables = []
def add(self, variable):
self.variables.append(variable)
def add_from_layout(self, layout, variable):
i = 0
for s, n in layout:
values = variable[i:i+n]
values2x = [values[j//2] for j in range(len(values)*2)]
self.add(DumpVariable(s, n, values2x))
i += n
self.add(DumpVariable("scope_clk", 1, [1, 0]*(len(self)//2)))
def __len__(self):
l = 0
for variable in self.variables:
l = max(len(variable), l)
return l
| StarcoderdataPython |
32002 | import numpy as np
from scipy import interpolate, signal
from scipy.special import gamma
import ndmath
import warnings
import pkg_resources
class PlaningBoat():
"""Prismatic planing craft
Attributes:
speed (float): Speed (m/s). It is an input to :class:`PlaningBoat`.
weight (float): Weight (N). It is an input to :class:`PlaningBoat`.
beam (float): Beam (m). It is an input to :class:`PlaningBoat`.
lcg (float): Longitudinal center of gravity, measured from the stern (m). It is an input to :class:`PlaningBoat`.
vcg (float): Vertical center of gravity, measured from the keel (m). It is an input to :class:`PlaningBoat`.
r_g (float): Radius of gyration (m). It is an input to :class:`PlaningBoat`.
beta (float): Deadrise (deg). It is an input to :class:`PlaningBoat`.
epsilon (float): Thrust angle w.r.t. keel, CCW with body-fixed origin at 9 o'clock (deg). It is an input to :class:`PlaningBoat`.
vT (float): Thrust vertical distance, measured from keel, and positive up (m). It is an input to :class:`PlaningBoat`.
lT (float): Thrust horizontal distance, measured from stern, and positive forward (m). It is an input to :class:`PlaningBoat`.
length (float): Vessel LOA for seaway behavior estimates (m). Defaults to None. It is an input to :class:`PlaningBoat`.
H_sig (float): Significant wave heigth in an irregular sea state (m). Defaults to None. It is an input to :class:`PlaningBoat`.
ahr (float): Average hull roughness (m). Defaults to 150*10**-6. It is an input to :class:`PlaningBoat`.
Lf (float): Flap chord (m). Defaults to 0. It is an input to :class:`PlaningBoat`.
sigma (float): Flap span-beam ratio (dimensionless). Defaults to 0. It is an input to :class:`PlaningBoat`.
delta (float): Flap deflection (deg). Defaults to 0. It is an input to :class:`PlaningBoat`.
l_air (float): Distance from stern to center of air pressure (m). Defaults to 0. It is an input to :class:`PlaningBoat`.
h_air (float): Height from keel to top of square which bounds the air-drag-inducing area (m). Defaults to 0. It is an input to :class:`PlaningBoat`.
b_air (float): Transverse width of square which bounds the air-drag-inducing area (m). Defaults to 0. It is an input to :class:`PlaningBoat`.
C_shape (float): Area coefficient for air-drag-inducing area (dimensionless). C_shape = 1 means the air drag reference area is h_air*b_air. Defaults to 0. It is an input to :class:`PlaningBoat`.
C_D (float): Air drag coefficient (dimensionless). Defaults to 0.7. It is an input to :class:`PlaningBoat`.
rho (float): Water density (kg/m^3). Defaults to 1025.87. It is an input to :class:`PlaningBoat`.
nu (float): Water kinematic viscosity (m^2/s). Defaults to 1.19*10**-6. It is an input to :class:`PlaningBoat`.
rho_air (float): Air density (kg/m^3). Defaults to 1.225. It is an input to :class:`PlaningBoat`.
g (float): Gravitational acceleration (m/s^2). Defaults to 9.8066. It is an input to :class:`PlaningBoat`.
z_wl (float): Vertical distance of center of gravity to the calm water line (m). Defaults to 0. It is an input to :class:`PlaningBoat`, but modified when running :meth:`get_steady_trim`.
tau (float): Trim angle (deg). Defaults to 5. It is an input to :class:`PlaningBoat`, but modified when running :meth:`get_steady_trim`.
eta_3 (float): Additional heave (m). Initiates to 0.
eta_5 (float): Additional trim (deg). Initiates to zero.
wetted_lengths_type (int): 1 = Use Faltinsen 2005 wave rise approximation, 2 = Use Savitsky's '64 approach, 3 = Use Savitsky's '76 approach. Defaults to 1. It is an input to :class:`PlaningBoat`.
z_max_type (int): 1 = Uses 3rd order polynomial fit, 2 = Uses cubic interpolation from table. This is only used if wetted_lenghts_type == 1. Defaults to 1. It is an input to :class:`PlaningBoat`.
L_K (float): Keel wetted length (m). It is updated when running :meth:`get_geo_lengths`.
L_C (float): Chine wetted length (m). It is updated when running :meth:`get_geo_lengths`.
lambda_W (float): Mean wetted-length to beam ratio, (L_K+L_C)/(2*beam) (dimensionless). It is updated when running :meth:`get_geo_lengths`.
x_s (float): Distance from keel/water-line intersection to start of wetted chine (m). It is updated when running :meth:`get_geo_lengths`.
z_max (float): Maximum pressure coordinate coefficient, z_max/Ut (dimensionless). It is updated when running :meth:`get_geo_lengths`.
hydrodynamic_force ((3,) ndarray): Hydrodynamic force (N, N, N*m). [F_x, F_z, M_cg] with x, y, rot directions in intertial coordinates. It is updated when running :meth:`get_forces`.
skin_friction ((3,) ndarray): Skin friction force (N, N, N*m). [F_x, F_z, M_cg]. It is updated when running :meth:`get_forces`.
air_resistance ((3,) ndarray): Air resistance force (N, N, N*m). [F_x, F_z, M_cg]. It is updated when running :meth:`get_forces`.
flap_force ((3,) ndarray): Flap resultant force (N, N, N*m). [F_x, F_z, M_cg]. It is updated when running :meth:`get_forces`.
thrust_force ((3,) ndarray): Thrust resultant force (N, N, N*m). [F_x, F_z, M_cg]. It is updated when running :meth:`get_forces`.
net_force ((3,) ndarray): Net force (N, N, N*m). [F_x, F_z, M_cg]. It is updated when running :meth:`get_forces`.
mass_matrix ((2, 2) ndarray): Mass coefficients matrix. [[A_33 (kg), A_35 (kg*m/rad)], [A_53 (kg*m), A_55 (kg*m^2/rad)]]. It is updated when running :meth:`get_eom_matrices`.
damping_matrix ((2, 2) ndarray): Damping coefficients matrix. [[B_33 (kg/s), B_35 (kg*m/(s*rad))], [B_53 (kg*m/s), B_55 (kg*m**2/(s*rad))]]. It is updated when running :meth:`get_eom_matrices`.
restoring_matrix ((2, 2) ndarray): Restoring coefficients matrix. [[C_33 (N/m), C_35 (N/rad)], [C_53 (N), C_55 (N*m/rad)]]. It is updated when running :meth:`get_eom_matrices`.
porpoising (list): [[eigenvalue result (bool), est. pitch settling time (s)], [Savitsky chart result (bool), critical trim angle (deg)]]. It is updated when running :meth:`check_porpoising`.
seaway_drag_type (int): 1 = Use Savitsky's '76 approximation, 2 = Use Fridsma's '71 designs charts. Defaults to 1. It is an input to :class:`PlaningBoat`.
avg_impact_acc ((2,) ndarray): Average impact acceleration at center of gravity and bow (g's). [n_cg, n_bow]. It is updated when running :meth:`get_seaway_behavior`.
R_AW (float): Added resistance in waves (N). It is updated when running :meth:`get_seaway_behavior`.
"""
def __init__(self, speed, weight, beam, lcg, vcg, r_g, beta, epsilon, vT, lT, length=None, H_sig=None, ahr=150e-6, Lf=0, sigma=0, delta=0, l_air=0, h_air=0, b_air=0, C_shape=0, C_D=0.7, z_wl=0, tau=5, rho=1025.87, nu=1.19e-6, rho_air=1.225, g=9.8066, wetted_lengths_type=1, z_max_type=1, seaway_drag_type=1):
"""Initialize attributes for PlaningBoat
Args:
speed (float): Speed (m/s).
weight (float): Weidght (N).
beam (float): Beam (m).
lcg (float): Longitudinal center of gravity, measured from the stern (m).
vcg (float): Vertical center of gravity, measured from the keel (m).
r_g (float): Radius of gyration (m).
beta (float): Deadrise (deg).
epsilon (float): Thrust angle w.r.t. keel, CCW with body-fixed origin at 9 o'clock (deg).
vT (float): Thrust vertical distance, measured from keel, and positive up (m).
lT (float): Thrust horizontal distance, measured from stern, and positive forward (m).
length (float, optional): Vessel LOA for seaway behavior estimates (m). Defaults to None.
H_sig (float, optional): Significant wave heigth in an irregular sea state (m). Defaults to None.
ahr (float, optional): Average hull roughness (m). Defaults to 150*10**-6.
Lf (float, optional): Flap chord (m). Defaults to 0.
sigma (float, optional): Flap span-beam ratio (dimensionless). Defaults to 0.
delta (float, optional): Flap deflection (deg). Defaults to 0.
l_air (float, optional): Distance from stern to center of air pressure (m). Defaults to 0.
h_air (float, optional): Height from keel to top of square which bounds the air-drag-inducing area (m). Defaults to 0.
b_air (float, optional): Transverse width of square which bounds the air-drag-inducing area (m). Defaults to 0.
C_shape (float, optional): Area coefficient for air-drag-inducing area (dimensionless). C_shape = 1 means the air drag reference area is h_air*b_air. Defaults to 0.
C_D (float, optional): Air drag coefficient (dimensionless). Defaults to 0.7.
z_wl (float, optional): Vertical distance of center of gravity to the calm water line (m). Defaults to 0.
tau (float, optional): Trim angle (deg). Defaults to 5.
rho (float, optional): Water density (kg/m^3). Defaults to 1025.87.
nu (float, optional): Water kinematic viscosity (m^2/s). Defaults to 1.19*10**-6.
rho_air (float, optional): Air density (kg/m^3). Defaults to 1.225.
g (float, optional): Gravitational acceleration (m/s^2). Defaults to 9.8066.
wetted_lengths_type (int, optional): 1 = Use Faltinsen 2005 wave rise approximation, 2 = Use Savitsky's '64 approach, 3 = Use Savitsky's '76 approach. Defaults to 1.
z_max_type (int, optional): 1 = Uses 3rd order polynomial fit, 2 = Uses cubic interpolation from table. This is only used if wetted_lenghts_type == 1. Defaults to 1.
seaway_drag_type (int, optional): 1 = Use Savitsky's '76 approximation, 2 = Use Fridsma's '71 designs charts. Defaults to 1.
"""
self.speed = speed
self.weight = weight
self.beam = beam
self.lcg = lcg
self.vcg = vcg
self.r_g = r_g
self.beta = beta
self.epsilon = epsilon
self.vT = vT
self.lT = lT
self.length = length
self.H_sig = H_sig
self.ahr = ahr
self.Lf = Lf
self.sigma = sigma
self.delta = delta
self.l_air = l_air
self.h_air = h_air
self.b_air= b_air
self.C_shape = C_shape
self.z_wl = z_wl
self.tau = tau
self.eta_3 = 0
self.eta_5 = 0
self.rho = rho
self.nu = nu
self.rho_air = rho_air
self.C_D = C_D
self.g = g
self.gravity_force = np.array([0, -self.weight, 0])
self.wetted_lengths_type = wetted_lengths_type
self.z_max_type = z_max_type
self.seaway_drag_type = seaway_drag_type
def print_description(self, sigFigs=7, runAllFunctions=True):
"""Returns a formatted description of the vessel.
Args:
sigFigs (int, optional): Number of significant figures to display. Defaults to 7.
runAllFunctions (bool, optional): Runs all functions with default values before printing results. Defaults to True.
"""
if runAllFunctions:
self.get_geo_lengths()
self.get_forces(runGeoLengths=False)
self.get_eom_matrices(runGeoLengths=False)
self.get_seaway_behavior()
self.check_porpoising()
volume = self.weight/(self.g*self.rho)
table = [
['---VESSEL---'],
['Speed', self.speed, 'm/s'],
['V_k', self.speed*1.944, 'knot'],
['Fn (beam)', self.speed/np.sqrt(self.g*self.beam), ''],
['Fn (volume)', self.speed/np.sqrt(self.g*(self.weight/(self.g*self.rho))**(1/3)), ''],
[''],
['Weight', self.weight, 'N'],
['Mass', self.weight/self.g, 'kg'],
['Volume', self.weight/(self.g*self.rho), 'm\u00B3'],
['Beam', self.beam, 'm'],
['LCG', self.lcg, 'm from stern'],
['VCG', self.vcg, 'm from keel'],
['R_g', self.r_g, 'm'],
['Deadrise', self.beta, 'deg'], #'\N{greek small letter beta}'
[''],
['LOA', self.length, 'm'],
['AHR', self.ahr, 'm, average hull roughness'],
[''],
['---ATTITUDE---'],
['z_wl', self.z_wl, 'm, vertical distance of center of gravity to the calm water line'],
['tau', self.tau, 'deg, trim angle'],
['\u03B7\u2083', self.eta_3, 'deg, additional heave'],
['\u03B7\u2085', self.eta_5, 'deg, additional trim'],
['Transom draft', self.L_K*np.sin((self.tau+self.eta_5)*np.pi/180), 'm, draft of keel at transom'],
[''],
['---PROPULSION---'],
['Thrust angle', self.epsilon, 'deg w.r.t. keel (CCW with body-fixed origin at 9 o\'clock)'],
['LCT', self.lT, 'm from stern, positive forward'],
['VCT', self.vT, 'm from keel, positive up'],
[''],
['---FLAP---'],
['Chord', self.Lf, 'm'],
['Span/Beam', self.sigma, ''],
['Angle', self.delta, 'deg w.r.t. keel (CCW with body-fixed origin at 9 o\'clock)'],
[''],
['---AIR DRAG---'],
['l_air', self.l_air, 'm, distance from stern to center of air pressure'],
['h_air', self.h_air, 'm, height from keel to top of square which bounds the air-drag-inducing shape'],
['b_air', self.b_air, 'm, transverse width of square which bounds the air-drag-inducing shape'],
['C_shape', self.C_shape, 'area coefficient for air-drag-inducing shape. C_shape = 1 means the air drag reference area is h_air*b_air'],
['C_D', self.C_D, 'air drag coefficient'],
[''],
['---ENVIRONMENT---'],
['\u03C1', self.rho, 'kg/m\u00B3, water density'],
['\u03BD', self.nu, 'm\u00B2/s, water kinematic viscosity'],
['\u03C1_air', self.rho_air, 'kg/m\u00B3, air density'],
['g', self.g, 'm/s\u00B2, gravitational acceleration'],
[''],
['---WETTED LENGTH OPTIONS---'],
['wetted_lengths_type', self.wetted_lengths_type, '(1 = Use Faltinsen 2005 wave rise approximation, 2 = Use Savitsky\'s \'64 approach, 3 = Use Savitsky\'s \'76 approach)'],
['z_max_type', self.z_max_type, '(1 = Uses 3rd order polynomial fit (faster, recommended), 2 = Use cubic interpolation)'],
[''],
['---WETTED LENGTHS---'],
['L_K', self.L_K, 'm, keel wetted length'],
['L_C', self.L_C, 'm, chine wetted length'],
['\u03BB', self.lambda_W, 'mean wetted-length to beam ratio (L_K+L_C)/(2*beam)'],
['x_s', self.x_s, 'm, distance from keel/water-line intersection to start of wetted chine'],
['z_max', self.z_max, 'maximum pressure coordinate coefficient (z_max/Ut)'],
[''],
['---FORCES [F_x (N, +aft), F_z (N, +up), M_cg (N*m, +pitch up)]---'],
['Hydrodynamic Force', self.hydrodynamic_force, ''],
['Skin Friction', self.skin_friction, ''],
['Air Resistance', self.air_resistance, ''],
['Flap Force', self.flap_force, ''],
['Net Force', self.net_force, ''],
['Resultant Thrust', self.thrust_force, ''],
[''],
['---THURST & POWER---'],
['Thrust Magnitude', np.sqrt(self.thrust_force[0]**2+self.thrust_force[1]**2), 'N'],
['Effective Thrust', -self.thrust_force[0], 'N'],
['Eff. Power', -self.thrust_force[0]*self.speed/1000, 'kW'],
['Eff. Horsepower', -self.thrust_force[0]*self.speed/1000/0.7457, 'hp'],
[''],
['---EOM MATRICES---'],
['Mass matrix, [kg, kg*m/rad; kg*m, kg*m\u00B2/rad]', self.mass_matrix, ''],
['Damping matrix, [kg/s, kg*m/(s*rad); kg*m/s, kg*m\u00B2/(s*rad)]', self.damping_matrix, ''],
['Restoring matrix, [N/m, N/rad; N, N*m/rad]', self.restoring_matrix, ''],
[''],
['---PORPOISING---'],
['[[Eigenvalue check result, Est. pitch settling time (s)],\n [Savitsky chart result, Critical trim angle (deg)]]', np.array(self.porpoising), ''],
[''],
['---BEHAVIOR IN WAVES---'],
['H_sig', self.H_sig, 'm, significant wave heigth'],
['R_AW', self.R_AW, 'N, added resistance in waves'],
['Average impact acceleration [n_cg, n_bow] (g\'s)', self.avg_impact_acc, ''],
]
cLens=[16,0,0] #Min spacing for columns
for row in table:
if len(row)==3:
if row[1] is None:
print('{desc:<{cL0}} {val:<{cL1}} {unit:<{cL2}}'.format(desc=row[0], val=row[1], unit='None', cL0='', cL1=cLens[1], cL2=cLens[2]))
elif isinstance(row[1], (list,np.ndarray)):
print(row[0]+' =')
with np.printoptions(formatter={'float': f'{{:.{sigFigs}g}}'.format}):
print(row[1])
print(row[2])
else:
print('{desc:<{cL0}} {val:<{cL1}.{sNum}g} {unit:<{cL2}}'.format(desc=row[0], val=row[1], unit=row[2], cL0=cLens[0], cL1=cLens[1], cL2=cLens[2], sNum=sigFigs))
else:
print(row[0])
def get_geo_lengths(self):
"""This function outputs the geometric lengths.
Adds/updates the following attributes:
- :attr:`L_K`
- :attr:`L_C`
- :attr:`lambda_W`
- :attr:`x_s`
- :attr:`z_max`
"""
b = self.beam
lcg = self.lcg
vcg = self.vcg
z_wl = self.z_wl
tau = self.tau
beta = self.beta
eta_3 = self.eta_3
eta_5 = self.eta_5
pi = np.pi
wetted_lengths_type = self.wetted_lengths_type
z_max_type = self.z_max_type
#Keel wetted length, Eq. 9.50 of Faltinsen 2005, page 367
L_K = lcg + vcg / np.tan(pi/180*(tau + eta_5)) - (z_wl + eta_3) / np.sin(pi/180*(tau + eta_5))
if L_K < 0:
L_K = 0
if wetted_lengths_type == 1:
#z_max/Vt coefficient, Table 8.3 of Faltinsen 2005, page 303---------------
beta_table = [4, 7.5, 10, 15, 20, 25, 30, 40]
z_max_table = [0.5695, 0.5623, 0.5556, 0.5361, 0.5087, 0.4709, 0.4243, 0.2866]
#Extrapolation warning
if beta < beta_table[0] or beta > beta_table[-1]:
warnings.warn('Deadrise ({0:.3f}) outside the interpolation range of 4-40 deg (Table 8.3 of Faltinsen 2005). Extrapolated values might be inaccurate.'.format(beta), stacklevel=2)
if z_max_type == 1:
z_max = np.polyval([-2.100644618790201e-006, -6.815747611588763e-005, -1.130563334939335e-003, 5.754510457848798e-001], beta)
elif z_max_type == 2:
z_max_func = interpolate.interp1d(beta_table, z_max_table, kind='cubic', fill_value='extrapolate') #Interpolation of the table
z_max = z_max_func(beta)
#--------------------------------------------------------------------------
#Distance from keel/water-line intersection to start of wetted chine (Eq. 9.10 of Faltinsen)
x_s = 0.5 * b * np.tan(pi/180*beta) / ((1 + z_max) * (pi/180)*(tau + eta_5))
if x_s < 0:
x_s = 0
#Chine wetted length, Eq. 9.51 of Faltinsen 2005
L_C = L_K - x_s
if L_C < 0:
L_C = 0
x_s = L_K
warnings.warn('Vessel operating with dry chines (L_C = 0).', stacklevel=2)
#Mean wetted length-to-beam ratio
lambda_W = (L_K + L_C) / (2 * b)
elif wetted_lengths_type == 2:
#Eq. 3 of Savitsky '64
x_s = b/pi*np.tan(pi/180*beta)/np.tan(pi/180*(tau + eta_5))
#Chine wetted length
L_C = L_K - x_s
if L_C < 0:
L_C = 0
x_s = L_K
warnings.warn('Vessel operating with dry chines (L_C = 0).', stacklevel=2)
#Mean wetted length-to-beam ratio
lambda_W = (L_K + L_C)/(2*b)
#z_max/Vt coefficient (E. 9.10 of Faltinsen 2005 rearranged)
z_max = 0.5 * b * np.tan(pi/180*beta) / (x_s * (pi/180)*(tau + eta_5)) - 1
elif wetted_lengths_type == 3:
#Eq. 12 of Savitsky '76
w = (0.57 + beta/1000)*(np.tan(pi/180*beta)/(2*np.tan(pi/180*(tau+eta_5)))-beta/167)
lambda_K = L_K/b
#Eq. 14 of Savitsky '76
lambda_C = (lambda_K-w)-0.2*np.exp(-(lambda_K-w)/0.3)
if lambda_C < 0:
lambda_C = 0
L_C = lambda_C*b
#Mean wetted length-to-beam ratio, Eq. 15 of Savitsky '76
lambda_W = (lambda_K + lambda_C)/2+0.03
x_s = L_K-L_C
#z_max/Vt coefficient (E. 9.10 of Faltinsen 2005 rearranged)
z_max = 0.5 * b * np.tan(pi/180*beta) / (x_s * (pi/180)*(tau + eta_5)) - 1
if self.length is not None:
if L_K > self.length:
warnings.warn('The estimated wetted chine length ({0:.3f}) is larger than the vessel length ({1:.3f}).'.format(L_K, self.length), stacklevel=2)
#Update values
self.L_K = L_K
self.L_C = L_C
self.lambda_W = lambda_W
self.x_s = x_s
self.z_max = z_max
def get_forces(self, runGeoLengths=True):
"""This function calls all the force functions to update the respective object attributes.
Adds/updates the following attributes:
- :attr:`hydrodynamic_force`
- :attr:`skin_friction`
- :attr:`air_resistance`
- :attr:`flap_force`
- :attr:`thrust_force`
- :attr:`net_force`
Args:
runGeoLengths (boolean, optional): Calculate the wetted lengths before calculating the forces. Defaults to True.
Methods:
get_hydrodynamic_force(): This function follows Savitsky 1964 and Faltinsen 2005 in calculating the vessel's hydrodynamic forces and moment.
get_skin_friction(): This function outputs the frictional force of the vessel using ITTC 1957 and the Bowden and Davison 1974 roughness coefficient.
get_air_resistance(): This function estimates the air drag. It assumes a square shape projected area with a shape coefficient.
get_flap_force(): This function outputs the flap forces w.r.t. global coordinates (Savitsky & Brown 1976). Horz: Positive Aft, Vert: Positive Up, Moment: Positive CCW.
sum_forces(): This function gets the sum of forces and moments, and consequently the required net thrust. The coordinates are positive aft, positive up, and positive counterclockwise.
"""
if runGeoLengths:
self.get_geo_lengths() #Calculated wetted lengths in get_forces()
g = self.g
rho_air = self.rho_air
C_D = self.C_D
rho = self.rho
nu = self.nu
AHR = self.ahr
W = self.weight
epsilon = self.epsilon
vT = self.vT
lT = self.lT
U = self.speed
b = self.beam
lcg = self.lcg
vcg = self.vcg
Lf = self.Lf
sigma = self.sigma
delta = self.delta
beam = self.beam
l_air = self.l_air
h_air = self.h_air
b_air = self.b_air
C_shape = self.C_shape
z_wl = self.z_wl
tau = self.tau
beta = self.beta
eta_3 = self.eta_3
eta_5 = self.eta_5
L_K = self.L_K
L_C = self.L_C
lambda_W = self.lambda_W
x_s = self.x_s
z_max = self.z_max
pi = np.pi
def get_hydrodynamic_force():
"""This function follows Savitsky 1964 and Faltinsen 2005 in calculating the vessel's hydrodynamic forces and moment.
"""
#Beam Froude number
Fn_B = U/np.sqrt(g*b)
#Warnings
if Fn_B < 0.6 or Fn_B > 13:
warnings.warn('Beam Froude number = {0:.3f}, outside of range of applicability (0.60 <= U/sqrt(g*b) <= 13.00) for planing lift equation. Results are extrapolations.'.format(Fn_B), stacklevel=2)
if lambda_W > 4:
warnings.warn('Mean wetted length-beam ratio = {0:.3f}, outside of range of applicability (lambda <= 4) for planing lift equation. Results are extrapolations.'.format(lambda_W), stacklevel=2)
if tau < 2 or tau > 15:
warnings.warn('Vessel trim = {0:.3f}, outside of range of applicability (2 deg <= tau <= 15 deg) for planing lift equation. Results are extrapolations.'.format(tau), stacklevel=2)
#0-Deadrise lift coefficient
C_L0 = (tau + eta_5)**1.1 * (0.012 * lambda_W**0.5 + 0.0055 * lambda_W**2.5 / Fn_B**2)
#Lift coefficient with deadrise, C_Lbeta
C_Lbeta = C_L0 - 0.0065 * beta * C_L0**0.6
#Vertical force (lift)
F_z = C_Lbeta * 0.5 * rho * U**2 * b**2
#Horizontal force
F_x = F_z*np.tan(pi/180*(tau + eta_5))
#Lift's Normal force w.r.t. keel
F_N = F_z / np.cos(pi/180*(tau + eta_5))
#Longitudinal position of the center of pressure, l_p (Eq. 4.41, Doctors 1985)
l_p = lambda_W * b * (0.75 - 1 / (5.21 * (Fn_B / lambda_W)**2 + 2.39)) #Limits for this is (0.60 < Fn_B < 13.0, lambda < 4.0)
#Moment about CG (Axis consistent with Fig. 9.24 of Faltinsen (P. 366)
M_cg = - F_N * (lcg - l_p)
#Update values
self.hydrodynamic_force = np.array([F_x, F_z, M_cg])
def get_skin_friction():
"""This function outputs the frictional force of the vessel using ITTC 1957 and the Bowden and Davison 1974 roughness coefficient.
"""
#Surface area of the dry-chine region
S1 = x_s * b / (2 * np.cos(pi/180*beta))
if L_K < x_s:
S1 = S1 * (L_K / x_s)**2
#Surface area of the wetted-chine region
S2 = b * L_C / np.cos(pi/180*beta)
#Total surface area
S = S1 + S2
if S == 0:
F_x = 0
F_z = 0
M_cg = 0
else:
#Mean bottom fluid velocity, Savitsky 1964 - Hadler's empirical formula
V_m = U * np.sqrt(1 - (0.012 * tau**1.1 * np.sqrt(lambda_W) - 0.0065 * beta * (0.012 * np.sqrt(lambda_W) * tau**1.1)**0.6) / (lambda_W * np.cos(tau * pi/180)))
#Reynolds number (with bottom fluid velocity)
Rn = V_m * lambda_W * b / nu
#'Friction coefficient' ITTC 1957
C_f = 0.075/(np.log10(Rn) - 2)**2
#Additional 'friction coefficient' due to skin friction, Bowden and Davison (1974)
deltaC_f = (44*((AHR/(lambda_W*b))**(1/3) - 10*Rn**(-1/3)) + 0.125)/10**3
#Frictional force
R_f = 0.5 * rho * (C_f + deltaC_f) * S * U**2
#Geometric vertical distance from keel
l_f = (b / 4 * np.tan(pi/180*beta) * S2 + b / 6 * np.tan(pi/180*beta) * S1) / (S1 + S2)
#Horizontal force
F_x = R_f * np.cos(pi/180*(tau + eta_5))
#Vertical force
F_z = - R_f * np.sin(pi/180*(tau + eta_5))
#Moment about CG (Axis consistent with Fig. 9.24 of Faltinsen (P. 366))
M_cg = R_f * (l_f - vcg)
#Update values
self.skin_friction = np.array([F_x, F_z, M_cg])
def get_air_resistance():
"""This function estimates the air drag. It assumes a square shape projected area with a shape coefficient.
"""
if C_shape == 0 or b_air == 0:
self.air_resistance = np.array([0, 0, 0])
return
#Vertical distance from calm water line to keel at LOA
a_dist = np.sin(pi/180*(tau + eta_5))*(l_air-L_K)
#Vertical distance from keel to horizontal line level with boat's height
b_dist = np.cos(pi/180*(tau + eta_5))*h_air
#Vertical distance from CG to center of square (moment arm, positive is CG above)
momArm = z_wl - (a_dist + b_dist)/2
#Square projected area
Area = (a_dist+b_dist)*b_air*C_shape
if Area < 0:
Area = 0
#Horizontal force (Positive aft)
F_x = 0.5*rho_air*C_D*Area*U**2
#Vertical force (Positive up)
F_z = 0
#Moment (positve CCW)
M_cg = -F_x*momArm
#Update values
self.air_resistance = np.array([F_x, F_x, M_cg])
def get_flap_force():
"""This function outputs the flap forces w.r.t. global coordinates (Savitsky & Brown 1976). Horz: Positive Aft, Vert: Positive Up, Moment: Positive CCW.
"""
if Lf == 0:
self.flap_force = np.array([0, 0, 0])
return
#Warnings
if Lf > 0.10*(L_K + L_C)/2 or Lf < 0:
warnings.warn('Flap chord = {0:.3f} outside of bounds (0-10% of mean wetted length) for flap forces estimates with Savitsky & Brown 1976'.format(Lf), stacklevel=2)
if delta < 0 or delta > 15:
warnings.warn('Flap deflection angle = {0:.3f} out of bounds (0-15 deg) for flap forces estimates with Savitsky & Brown 1976'.format(delta), stacklevel=2)
Fn_B = U/np.sqrt(g*b)
if Fn_B < 2 or Fn_B > 7:
warnings.warn('Beam-based Froude number Fn_B = {0:.3f} out of bounds (2-7) for flap forces estimates with Savitsky & Brown 1976'.format(Fn_B), stacklevel=2)
F_z = 0.046*(Lf*3.28084)*delta*sigma*(b*3.28084)*(rho/515.379)/2*(U*3.28084)**2*4.44822
F_x = 0.0052*F_z*(tau+eta_5+delta)
l_flap = 0.6*b+Lf*(1-sigma)
M_cg = -F_z*(lcg-l_flap)
#Update values
self.flap_force = np.array([F_x, F_z, M_cg])
def sum_forces():
"""This function gets the sum of forces and moments, and consequently the required net thrust. The coordinates are positive aft, positive up, and positive counterclockwise.
"""
#Call all force functions-------
get_hydrodynamic_force()
get_skin_friction()
get_air_resistance()
get_flap_force()
#-------------------------------
forcesMatrix = np.column_stack((self.gravity_force, self.hydrodynamic_force, self.skin_friction, self.air_resistance, self.flap_force)) #Forces and moments
F_sum = np.sum(forcesMatrix, axis=1) #F[0] is x-dir, F[1] is z-dir, and F[2] is moment
#Required thrust and resultant forces
T = F_sum[0]/np.cos(pi/180*(epsilon+tau+eta_5)); #Magnitude
T_z = T*np.sin(pi/180*(epsilon+tau+eta_5)); #Vertical
T_cg = T*np.cos(pi/180*epsilon)*(vcg - vT) - T*np.sin(pi/180*epsilon)*(lcg - lT); #Moment about cg
#Update resultant thurst values
self.thrust_force = np.array([-F_sum[0], T_z, T_cg])
#Include resultant thrust forces in sum
F_sum[1] = F_sum[1]+T_z
F_sum[2] = F_sum[2]+T_cg
#Update values
self.net_force = F_sum
#Call functions
sum_forces()
def get_steady_trim(self, x0=[0, 3], tauLims=[0.5, 35], tolF=10**-6, maxiter=50):
"""This function finds and sets the equilibrium point when the vessel is steadily running in calm water.
Updates the following attributes:
- :attr:`z_wl`
- :attr:`tau`
Args:
x0 (list of float): Initial guess for equilibirum point [z_wl (m), tau (deg)]. Defaults to [0, 3].
tauLims (list of float): Limits for equilibrium trim search. Defaults to [0.5, 35].
tolF (float): Tolerance for convergence to zero. Defaults to 10**-6.
maxiter (float): Maximum iterations. Defaults to 50.
"""
def _boatForces(x):
self.z_wl = x[0]/10 #the division is for normalization of the variables
self.tau = x[1]
self.get_forces()
return self.net_force[1:3]
def _boatForcesPrime(x):
return ndmath.complexGrad(_boatForces, x)
def _L_K(x):
# self.z_wl = x[0]/10
# self.tau = x[1]
# self.get_geo_lengths() #No need to call, because ndmath's nDimNewton allways calls the obj function before calling this "constraint"
return [-self.L_K]
xlims = np.array([[-np.Inf, np.Inf], tauLims])
warnings.filterwarnings("ignore", category=UserWarning)
[self.z_wl, self.tau] = ndmath.nDimNewton(_boatForces, x0, _boatForcesPrime, tolF, maxiter, xlims, hehcon=_L_K)/[10, 1]
warnings.filterwarnings("default", category=UserWarning)
def get_eom_matrices(self, runGeoLengths=True):
"""This function returns the mass, damping, and stiffness matrices following Faltinsen 2005.
Adds/updates the following parameters:
- :attr:`mass_matrix`
- :attr:`damping_matrix`
- :attr:`restoring_matrix`
Args:
runGeoLengths (boolean, optional): Calculate the wetted lengths before calculating the EOM matrices. Defaults to True.
Methods:
get_mass_matrix(): This function returns the added mass coefficients following Sec. 9.4.1 of Faltinsen 2005, including weight and moment of inertia.
get_damping_matrix(): This function returns the damping coefficients following Sec. 9.4.1 of Faltinsen 2005.
get_restoring_matrix(diffType=1, step=10**-6.6): This function returns the restoring coefficients following the approach in Sec. 9.4.1 of Faltinsen 2005.
"""
if runGeoLengths:
self.get_geo_lengths() #Calculated wetted lengths in get_eom_matrices()
W = self.weight
U = self.speed
rho = self.rho
b = self.beam
lcg = self.lcg
tau = self.tau
beta = self.beta
g = self.g
r_g = self.r_g
eta_5 = self.eta_5
L_K = self.L_K
L_C = self.L_C
lambda_W = self.lambda_W
x_s = self.x_s
z_max = self.z_max
pi = np.pi
def get_mass_matrix():
"""This function returns the added mass coefficients following Sec. 9.4.1 of Faltinsen 2005, including weight and moment of inertia
"""
#Distance of CG from keel-WL intersection
x_G = L_K - lcg
#K constant (Eq. 9.63 of Faltinsen 2005)
K = (pi / np.sin(pi/180*beta) * gamma(1.5 - beta/180) / (gamma(1 - beta/180)**2 * gamma(0.5 + beta/180)) - 1) / np.tan(pi/180*beta)
kappa = (1 + z_max) * (pi/180)*(tau + eta_5) #User defined constant
#Based on Faltinsen's
A1_33 = rho * kappa**2 * K * x_s**3 / 3
A1_35 = A1_33 * (x_G - x_s * 3/4)
A1_53 = A1_35
A1_55 = A1_33 * (x_G**2 - 3/2 * x_G * x_s + 3/5 * x_s**2)
#Contribution from wet-chine region
if L_C > 0:
C_1 = 2 * np.tan(pi/180*beta)**2 / pi * K
A2_33 = (rho * b**3) * C_1 * pi / 8 * L_C / b
A2_35 = (rho * b**4) * (- C_1 * pi / 16 * ((L_K / b)**2 - (x_s / b)**2) + x_G / b * A2_33 / (rho * b**3))
A2_53 = A2_35
A2_55 = (rho * b**5) * (C_1 * pi / 24 * ((L_K / b)**3 - (x_s / b)**3) - C_1 / 8 * pi * (x_G / b) * ((L_K / b)**2 - (x_s / b)**2) + (x_G / b)**2 * A2_33 / (rho * b**3))
else:
A2_33 = 0
A2_35 = 0
A2_53 = 0
A2_55 = 0
#Total added mass & update values
A_33 = A1_33 + A2_33 + W/g # kg, A_33
A_35 = A1_35 + A2_35 # kg*m/rad, A_35
A_53 = A1_53 + A2_53 # kg*m, A_53
A_55 = A1_55 + A2_55 + W/g*r_g**2 # kg*m^2/rad, A_55
self.mass_matrix = np.array([[A_33, A_35], [A_53, A_55]])
def get_damping_matrix():
"""This function returns the damping coefficients following Sec. 9.4.1 of Faltinsen 2005
"""
#Heave-heave added mass (need to substract W/g since it was added)
A_33 = self.mass_matrix[0,0] - W/g
if L_C > 0:
d = 0.5 * b * np.tan(pi/180*beta)
else:
d = (1 + z_max) * (pi/180)*(tau + eta_5) * L_K
#K constant (Eq. 9.63 of Faltinsen 2005, P. 369)
K = (pi / np.sin(pi/180*beta) * gamma(1.5 - beta/180) / (gamma(1 - beta/180)**2 * gamma(0.5 + beta/180)) - 1) / np.tan(pi/180*beta)
#2D Added mass coefficient in heave
a_33 = rho * d**2 * K
#Infinite Fn lift coefficient
C_L0 = (tau + eta_5)**1.1 * 0.012 * lambda_W**0.5
#Derivative w.r.t. tau (rad) of inf. Fn C_L0
dC_L0 = (180 / pi)**1.1 * 0.0132 * (pi/180*(tau + eta_5))**0.1 * lambda_W**0.5
#Derivative w.r.t. tau (rad) of inf. Fn C_Lbeta
dC_Lbeta = dC_L0 * (1 - 0.0039 * beta * C_L0**-0.4)
#Damping coefficients & update values
B_33 = rho / 2 * U * b**2 * dC_Lbeta # kg/s, B_33, Savitsky based
B_35 = - U * (A_33 + lcg * a_33) # kg*m/(s*rad), B_35, Infinite frequency based
B_53 = B_33 * (0.75 * lambda_W * b - lcg) # kg*m/s, B_53, Savitsky based
B_55 = U * lcg**2 * a_33 # kg*m**2/(s*rad), B_55, Infinite frequency based
self.damping_matrix = np.array([[B_33, B_35], [B_53, B_55]])
def get_restoring_matrix(diffType=1, step=10**-6.6):
"""This function returns the restoring coefficients following the approach in Sec. 9.4.1 of Faltinsen 2005
Args:
diffType (int, optional): 1 (recommended) = Complex step method, 2 = Foward step difference. Defaults to 1.
step (float, optional): Step size if using diffType == 2. Defaults to 10**-6.
"""
def _func(eta):
self.eta_3 = eta[0]
self.eta_5 = eta[1]
self.get_forces()
return self.net_force[1:3]
temp_eta_3 = self.eta_3
temp_eta_5 = self.eta_5
if diffType == 1:
C_full = -ndmath.complexGrad(_func, [temp_eta_3, temp_eta_5])
elif diffType == 2:
C_full = -ndmath.finiteGrad(_func, [temp_eta_3, temp_eta_5], 10**-6.6)
#Reset values
self.eta_3 = temp_eta_3
self.eta_5 = temp_eta_5
self.get_forces()
#Conversion deg to rad (degree in denominator)
C_full[0,1] = C_full[0,1] / (pi/180) # N/rad, C_35
C_full[1,1] = C_full[1,1] / (pi/180) # N*m/rad, C_55
#Update values
self.restoring_matrix = C_full
#Call functions
get_mass_matrix()
get_damping_matrix()
get_restoring_matrix()
def check_porpoising(self, stepEstimateType=1):
"""This function checks for porpoising.
Adds/updates the following parameters:
- :attr:`porpoising` (list):
Args:
stepEstimateType (int, optional): Pitch step response settling time estimate type, 1 = -3/np.real(eigVals[0])], 2 = Time-domain simulation estimate. Defaults to 1.
"""
#Eigenvalue analysis
try:
self.mass_matrix
except AttributeError:
warnings.warn('No Equation Of Motion (EOM) matrices found. Running get_eom_matrices().', stacklevel=2)
self.get_eom_matrices()
M = self.mass_matrix
C = self.damping_matrix
K = self.restoring_matrix
nDim = len(M)
A_ss = np.concatenate((np.concatenate((np.zeros((nDim,nDim)), np.identity(nDim)), axis=1), np.concatenate((-np.linalg.solve(M,K), -np.linalg.solve(M,C)), axis=1))) #State space reprecentation
eigVals = np.linalg.eigvals(A_ss)
eig_porpoise = any(eigVal >= 0 for eigVal in eigVals)
if stepEstimateType == 1:
settling_time = -3/np.real(eigVals[0])
elif stepEstimateType == 2:
B_ss = np.array([[1],[0],[0],[0]]) #Pitch only
C_ss = np.array([[1,0,0,0]]) #Pitch only
D_ss = np.array([[0]])
system = (A_ss,B_ss,C_ss,D_ss)
t, y = signal.step(system)
settling_time = (t[next(len(y)-i for i in range(2,len(y)-1) if abs(y[-i]/y[-1])>1.02)]-t[0])
#Savitsky '64 chart method
C_L = self.weight/(1/2*self.rho*self.speed**2*self.beam**2)
x = np.sqrt(C_L/2)
#Warnings
if x > 0.3 or x < 0.13:
warnings.warn('Lift Coefficient = {0:.3f} outside of bounds (0.0338-0.18) for porpoising estimates with Savitsky 1964. Results are extrapolations.'.format(C_L), stacklevel=2)
if self.beta > 20:
warnings.warn('Deadrise = {0:.3f} outside of bounds (0-20 deg) for porpoising estimates with Savitsky 1964. Results are extrapolations.'.format(self.beta), stacklevel=2)
tau_crit_0 = -376.37*x**3 + 329.74*x**2 - 38.485*x + 1.3415
tau_crit_10 = -356.05*x**3 + 314.36*x**2 - 41.674*x + 3.5786
tau_crit_20 = -254.51*x**3 + 239.65*x**2 - 23.936*x + 3.0195
tau_crit_func = interpolate.interp1d([0, 10, 20], [tau_crit_0, tau_crit_10, tau_crit_20], kind='quadratic', fill_value='extrapolate')
tau_crit = tau_crit_func(self.beta)
if self.tau > tau_crit:
chart_porpoise = True
else:
chart_porpoise = False
#Update values
self.porpoising = [[eig_porpoise, settling_time], [chart_porpoise, float(tau_crit)]]
def get_seaway_behavior(self):
"""This function calculates the seaway behavior as stated in Savitsky & Brown '76.
Adds/updates the following parameters:
- :attr:`avg_impact_acc`
- :attr:`R_AW`
"""
if self.H_sig is None:
self.H_sig = self.beam*0.5 #Arbitrary wave height if no user-defined wave height
warnings.warn('Significant wave height has not been specified. Using beam*0.5 = {0:.3f} m.'.format(self.H_sig), stacklevel=2)
if self.length is None:
self.length = self.beam*3
warnings.warn('Vessel length has not been specified. Using beam*3 = {0:.3f} m.'.format(self.length), stacklevel=2)
H_sig = self.H_sig
W = self.weight
beta = self.beta
tau = self.tau
pi = np.pi
Delta_LT = W/9964 #Displacement in long tons
Delta = Delta_LT*2240 #Displacement in lbf
L = self.length*3.281 #Length in ft
b = self.beam*3.281 #Beam in ft
Vk = self.speed*1.944 #Speed in knots
Vk_L = Vk/np.sqrt(L) #Vk/sqrt(L)
H_sig = H_sig*3.281 #Significant wave height in ft
w = self.rho*self.g/(4.448*35.315) #Specific weight in lbf/ft^3
C_Delta = Delta/(w*b**3) #Static beam-loading coefficient
if self.seaway_drag_type == 1: #Savitsky '76
#Check that variables are inside range of applicability (P. 395 of Savitsky & Brown '76)
P1 = Delta_LT/(0.01*L)**3
P2 = L/b
P5 = H_sig/b
P6 = Vk_L
if P1 < 100 or P1 > 250:
warnings.warn('Vessel displacement coefficient = {0:.3f}, outside of range of applicability (100 <= Delta_LT/(0.01*L)^3 <= 250, with units LT/ft^3). Results are extrapolations.'.format(P1), stacklevel=2)
if P2 < 3 or P2 > 5:
warnings.warn('Vessel length/beam = {0:.3f}, outside of range of applicability (3 <= L/b <= 5). Results are extrapolations.'.format(P2), stacklevel=2)
if tau < 3 or tau > 7:
warnings.warn('Vessel trim = {0:.3f}, outside of range of applicability (3 deg <= tau <= 7 deg). Results are extrapolations.'.format(tau), stacklevel=2)
if beta < 10 or beta > 30:
warnings.warn('Vessel deadrise = {0:.3f}, outside of range of applicability (10 deg <= beta <= 30 deg). Results are extrapolations.'.format(beta), stacklevel=2)
if P5 < 0.2 or P5 > 0.7:
warnings.warn('Significant wave height / beam = {0:.3f}, outside of range of applicability (0.2 <= H_sig/b <= 0.7). Results are extrapolations.'.format(P5), stacklevel=2)
if P6 < 2 or P6 > 6:
warnings.warn('Speed coefficient = {0:.3f}, outside of range of applicability (2 <= Vk/sqrt(L) <= 6, with units knots/ft^0.5). Results are extrapolations.'.format(P6), stacklevel=2)
R_AW_2 = (w*b**3)*66*10**-6*(H_sig/b+0.5)*(L/b)**3/C_Delta+0.0043*(tau-4) #Added resistance at Vk/sqrt(L) = 2
R_AW_4 = (Delta)*(0.3*H_sig/b)/(1+2*H_sig/b)*(1.76-tau/6-2*np.tan(beta*pi/180)**3) #Vk/sqrt(L) = 4
R_AW_6 = (w*b**3)*(0.158*H_sig/b)/(1+(H_sig/b)*(0.12*beta-21*C_Delta*(5.6-L/b)+7.5*(6-L/b))) #Vk/sqrt(L) = 6
R_AWs = np.array([R_AW_2, R_AW_4, R_AW_6])
R_AWs_interp = interpolate.interp1d([2,4,6], R_AWs, kind='quadratic', fill_value='extrapolate')
R_AW = R_AWs_interp([Vk_L])[0]
elif self.seaway_drag_type == 2: #Fridsma '71 design charts
#Check that variables are inside range of applicability (P. R-1495 of Fridsma '71)
if C_Delta < 0.3 or C_Delta > 0.9:
warnings.warn('C_Delta = {0:.3f}, outside of range of applicability (0.3 <= C_Delta <= 0.9). Results are extrapolations'.format(C_Delta), stacklevel=2)
if L/b < 3 or L/b > 6:
warnings.warn('L/b = {0:.3f}, outside of range of applicability (3 <= L/b <= 6). Results are extrapolations'.format(L/b), stacklevel=2)
if C_Delta/(L/b) < 0.06 or C_Delta/(L/b) > 0.18:
warnings.warn('C_Delta/(L/b) = {0:.3f}, outside of range of applicability (0.06 <= C_Delta/(L/b) <= 0.18). Results are extrapolations'.format(C_Delta/(L/b)), stacklevel=2)
if tau < 3 or tau > 7:
warnings.warn('tau = {0:.3f}, outside of range of applicability (3 <= tau <= 7). Results are extrapolations'.format(tau), stacklevel=2)
if beta < 10 or beta > 30:
warnings.warn('beta = {0:.3f}, outside of range of applicability (10 <= beta <= 30). Results are extrapolations'.format(beta), stacklevel=2)
if H_sig/b > 0.8:
warnings.warn('H_sig/b = {0:.3f}, outside of range of applicability (H_sig/b <= 0.8). Results are extrapolations'.format(H_sig/b), stacklevel=2)
if Vk_L > 6:
warnings.warn('Vk_L = {0:.3f}, outside of range of applicability (Vk_L <= 6). Results are extrapolations'.format(Vk_L), stacklevel=2)
#Get data tables (required for when package is distributed)
Raw2_tab = pkg_resources.resource_filename(__name__, 'tables\Raw_0.2.csv')
Raw4_tab = pkg_resources.resource_filename(__name__, 'tables\Raw_0.4.csv')
Raw6_tab = pkg_resources.resource_filename(__name__, 'tables\Raw_0.6.csv')
V2_tab = pkg_resources.resource_filename(__name__, 'tables\V_0.2.csv')
V4_tab = pkg_resources.resource_filename(__name__, 'tables\V_0.4.csv')
RawV2_tab = pkg_resources.resource_filename(__name__, 'tables\Raw_V_0.2.csv')
RawV4_tab = pkg_resources.resource_filename(__name__, 'tables\Raw_V_0.4.csv')
RawV6_tab = pkg_resources.resource_filename(__name__, 'tables\Raw_V_0.6.csv')
#Read values from extracted chart points
arr_Raw2 = np.genfromtxt(Raw2_tab, delimiter=',', skip_header=1)
arr_Raw4 = np.genfromtxt(Raw4_tab, delimiter=',', skip_header=1)
arr_Raw6 = np.genfromtxt(Raw6_tab, delimiter=',', skip_header=1)
arr_V2 = np.genfromtxt(V2_tab, delimiter=',', skip_header=1)
arr_V4 = np.genfromtxt(V4_tab, delimiter=',', skip_header=1)
arr_Raw_V2 = np.genfromtxt(RawV2_tab, delimiter=',', skip_header=1)
arr_Raw_V4 = np.genfromtxt(RawV4_tab, delimiter=',', skip_header=1)
arr_Raw_V6 = np.genfromtxt(RawV6_tab, delimiter=',', skip_header=1)
#Create interpolation functions
interp1Type = 'linear'
interp2Type = 'linear'
Raw2m_interp = interpolate.interp2d(arr_Raw2[:, 1], arr_Raw2[:, 0], arr_Raw2[:, 2], kind=interp2Type)
Raw4m_interp = interpolate.interp2d(arr_Raw4[:, 1], arr_Raw4[:, 0], arr_Raw4[:, 2], kind=interp2Type)
Raw6m_interp = interpolate.interp2d(arr_Raw6[:, 1], arr_Raw6[:, 0], arr_Raw6[:, 2], kind=interp2Type)
V2m_interp = interpolate.interp2d(arr_V2[:, 1], arr_V2[:, 0], arr_V2[:, 2], kind=interp2Type)
V4m_interp = interpolate.interp2d(arr_V4[:, 1], arr_V4[:, 0], arr_V4[:, 2], kind=interp2Type)
V6m_interp = V4m_interp
RawRaw2m_interp = interpolate.interp1d(arr_Raw_V2[:, 0], arr_Raw_V2[:, 1], kind=interp1Type, fill_value='extrapolate')
RawRaw4m_interp = interpolate.interp1d(arr_Raw_V4[:, 0], arr_Raw_V4[:, 1], kind=interp1Type, fill_value='extrapolate')
RawRaw6m_interp = interpolate.interp1d(arr_Raw_V6[:, 0], arr_Raw_V6[:, 1], kind=interp1Type, fill_value='extrapolate')
#Get values following procedure shown in Fridsma 1971 paper
VLm = [V2m_interp(beta, tau)[0], V4m_interp(beta, tau)[0], V6m_interp(beta, tau)[0]]
Rwbm = [Raw2m_interp(beta, tau)[0], Raw4m_interp(beta, tau)[0], Raw6m_interp(beta, tau)[0]]
VVm = Vk_L/VLm
RRm = [RawRaw2m_interp(VVm[0]), RawRaw4m_interp(VVm[1]), RawRaw6m_interp(VVm[2])]
Rwb = np.multiply(RRm, Rwbm)
E1 = lambda H_sig: 1 + ((L/b)**2/25 - 1)/(1 + 0.895*(H_sig/b - 0.6)) #V/sqrt(L) = 2
E2 = lambda H_sig: 1 + 10*H_sig/b*(C_Delta/(L/b) - 0.12) #V/sqrt(L) = 4
E3 = lambda H_sig: 1 + 2*H_sig/b*(0.9*(C_Delta-0.6)-0.7*(C_Delta-0.6)**2) #V/sqrt(L) = 6
E_interp = lambda H_sig: interpolate.interp1d([2, 4, 6], [E1(H_sig), E2(H_sig), E3(H_sig)], kind=interp1Type, fill_value='extrapolate')
E = [E_interp(0.2*b)(Vk_L), E_interp(0.4*b)(Vk_L), E_interp(0.6*b)(Vk_L)]
Rwb_final = np.multiply(Rwb,E)
Rwb_final_interp = interpolate.interp1d([0.2, 0.4, 0.6], Rwb_final, kind=interp1Type, fill_value='extrapolate')
R_AW = Rwb_final_interp(H_sig/b)*w*b**3
warnings.warn('Average impact acceleration based on the Fridsma charts is currently not implemented. Using Savitsky & Brown approximation.', stacklevel=2)
n_cg = 0.0104*(H_sig/b+0.084)*tau/4*(5/3-beta/30)*(Vk_L)**2*L/b/C_Delta #g, at CG
n_bow = n_cg*(1+3.8*(L/b-2.25)/(Vk_L)) #g, at bow
avg_impact_acc = np.array([n_cg, n_bow])
#Update values
self.avg_impact_acc = avg_impact_acc
self.R_AW = R_AW*4.448 #lbf to N conversion
| StarcoderdataPython |
5060725 | from __future__ import annotations
from transformers import RobertaTokenizer, XLMRobertaTokenizerFast
try:
from icu import Locale, BreakIterator
icu_available = True
except ImportError:
icu_available = False
from pelutils.jsonl import load_jsonl
ENTITY_UNK_TOKEN = "[UNK]"
ENTITY_MASK_TOKEN = "[MASK]"
class ICUSentenceTokenizer:
""" Segment text to sentences. """
def __init__(self, locale: str):
if not icu_available:
raise ModuleNotFoundError("Pretrain data generation requires installation of the optional requirement `PyIcU`")
# ICU includes lists of common abbreviations that can be used to filter, to ignore,
# these false sentence boundaries for some languages.
# (http://userguide.icu-project.org/boundaryanalysis)
if locale in {"en", "de", "es", "it", "pt"}:
locale += "@ss=standard"
self.locale = Locale(locale)
self.breaker = BreakIterator.createSentenceInstance(self.locale)
def span_tokenize(self, text: str) -> list[tuple[int, int]]:
"""
ICU's BreakIterator gives boundary indices by counting *codeunits*, not *codepoints*.
(https://stackoverflow.com/questions/30775689/python-length-of-unicode-string-confusion)
As a result, something like this can happen.
```
text = "test." is a non-BMP (Basic Multilingual Plane) character, which consists of two codeunits.
len(text)
>>> 6
icu_tokenizer.span_tokenize(text)
>>> [(0, 7)]
```
This results in undesirable bugs in following stages.
So, we have decided to replace non-BMP characters with an arbitrary BMP character, and then run BreakIterator.
"""
# replace non-BMP characters with a whitespace
# (https://stackoverflow.com/questions/36283818/remove-characters-outside-of-the-bmp-emojis-in-python-3)
text = "".join(c if c <= "\uFFFF" else " " for c in text)
self.breaker.setText(text)
start_idx = 0
spans = []
for end_idx in self.breaker:
spans.append((start_idx, end_idx))
start_idx = end_idx
return spans
_ignored_wiki_categories = ("fil", "kategori", "wikipedia", "mediawiki", "portal", "skabelon", "artikeldata", "modul")
def ignore_title(title: str) -> bool:
return any(title.lower().startswith(word + ":")
for word in _ignored_wiki_categories)
def load_entity_vocab(vocab_file: str) -> dict[str, dict[str, int]]:
""" Loads an entity vocab in .jsonl format created by build-entity-vocab
{ "entity": { "id": int, "count": int } }
Kategory, images, and file pages are removed """
entities = dict()
with open(vocab_file) as vf:
for entity in load_jsonl(vf):
if not ignore_title(ent := entity["entities"][0][0]):
entities[ent] = {
"id": entity["id"],
"count": entity["count"],
}
return entities
def calculate_spans(tokens: list[str], tokenizer) -> list[tuple[int, int]]:
""" Calculate word spans from a list of tokens. Excludes punctuation """
spans = list()
i = 0
while i < len(tokens):
if isinstance(tokenizer, XLMRobertaTokenizerFast):
if tokens[i].startswith("▁"):
start = i
i += 1
while i < len(tokens) and tokens[i].isalnum():
i += 1
spans.append((start, i))
else:
i += 1
else:
start = i
if tokens[i].isalnum():
start = i
i += 1
while i < len(tokens) and tokens[i].startswith("##"):
i += 1
spans.append((start, i))
else:
i += 1
# Take cls into account
return [(start+1, end+1) for start, end in spans]
# Imported for API availability
from daluke.data import BatchedExamples
from .loader import DataLoader
from .masking import MaskedBatchedExamples, mask_ent_batch, mask_word_batch
| StarcoderdataPython |
11351759 | import discord
from .utils import Time
from .fluctlight_ext import Fluct
from .db.jsonstorage import JsonApi
async def report_lect_attend(bot, attendants: list, week: int) -> None:
report_json = JsonApi.get('LectureLogging')
guild = bot.get_guild(784607509629239316)
report_channel = discord.utils.get(guild.text_channels, name='sqcs-lecture-attend')
fluct_ext = Fluct(score_mode='lect_attend')
for member_id in attendants:
try:
await fluct_ext.add_score(member_id)
await fluct_ext.active_log_update(member_id)
await fluct_ext.lect_attend_update(member_id)
except BaseException:
await report_channel.send(
f'[DB MANI ERROR][to: {member_id}][score_mode: lect_attend]')
report_json["logs"].append(
f'[LECT ATTEND][week: {week}][attendants:\n'
f'{attendants}\n'
f'[{Time.get_info("whole")}]'
)
JsonApi.put('LectureLogging', report_json)
| StarcoderdataPython |
12810558 | <gh_stars>0
class Config(object):
""" Wrapper class for various (hyper)parameters. """
def __init__(self):
# about the model architecture
self.cnn = 'vgg16' # 'vgg16' or 'resnet50'
self.max_caption_length = 20
self.dim_embedding = 512
self.num_lstm_units = 512
self.num_initalize_layers = 2 # 1 or 2
self.dim_initalize_layer = 512
self.num_attend_layers = 2 # 1 or 2
self.dim_attend_layer = 512
self.num_decode_layers = 2 # 1 or 2
self.dim_decode_layer = 1024
# about the weight initialization and regularization
self.fc_kernel_initializer_scale = 0.08
self.fc_kernel_regularizer_scale = 1e-4
self.fc_activity_regularizer_scale = 0.0
self.conv_kernel_regularizer_scale = 1e-4
self.conv_activity_regularizer_scale = 0.0
self.fc_drop_rate = 0.5
self.lstm_drop_rate = 0.3
self.attention_loss_factor = 0.01
# about the optimization
self.num_epochs = 100
self.batch_size = 32
self.optimizer = 'Adam' # 'Adam', 'RMSProp', 'Momentum' or 'SGD'
self.initial_learning_rate = 0.0001
self.learning_rate_decay_factor = 1.0
self.num_steps_per_decay = 100000
self.clip_gradients = 5.0
self.momentum = 0.0
self.use_nesterov = True
self.decay = 0.9
self.centered = True
self.beta1 = 0.9
self.beta2 = 0.999
self.epsilon = 1e-6
# about the saver
self.save_period = 1000
self.save_dir = './models/'
self.summary_dir = './summary/'
# about the vocabulary
self.vocabulary_file = './vocabulary.csv'
self.vocabulary_size = 5000
# about the training
self.train_image_dir = './train/images/'
self.train_caption_file = './train/captions_train2014.json'
self.temp_annotation_file = './train/anns.csv'
self.temp_data_file = './train/data.npy'
# about the evaluation
self.eval_image_dir = './val/images/'
self.eval_caption_file = './val/captions_val2014.json'
self.eval_image_unsplash = './data/unsplash/images/'
self.eval_caption_file_unsplash = './data/unsplash/captions_unsplash2020.json'
self.eval_result_dir_unsplash = './data/unsplash/results/'
self.eval_result_file_unsplash = './data/unsplash/results.json'
self.save_eval_result_as_image = False
self.eval_image_vizwiz = './data/vizwiz/images/'
self.eval_caption_file_vizwiz = './data/vizwiz/captions_vizwiz2020.json'
self.eval_result_dir_vizwiz = './data/vizwiz/results/'
self.eval_result_file_vizwiz = './data/vizwiz/results.json'
self.save_eval_result_as_image = False
self.eval_image_vizwiz_train = './data/vizwiz_train/images/'
self.eval_caption_file_vizwiz_train = './data/vizwiz_train/captions_vizwiz2020.json'
self.eval_result_dir_vizwiz_train = './data/vizwiz_train/results/'
self.eval_result_file_vizwiz_train = './data/vizwiz_train/results.json'
self.save_eval_result_as_image = False
self.eval_image_insta = './data/instagram/images/'
self.eval_caption_file_insta = './data/instagram/captions_insta2020.json'
self.eval_result_dir_insta = './data/instagram/results/'
self.eval_result_file_insta = './data/instagram/results.json'
self.save_eval_result_as_image = False
self.eval_image_topN = './data/google_top_n/images/'
self.eval_caption_file_topN = './data/google_top_n/captions_topN2020.json'
self.eval_result_dir_topN = './data/google_top_n/results/'
self.eval_result_file_topN = './data/google_top_n/results.json'
self.save_eval_result_as_image = False
self.eval_result_dir = './val/results/'
self.eval_result_file = './val/results.json'
self.save_eval_result_as_image = False
# about the testing
self.test_image_dir = './test/images/'
self.test_result_dir = './test/results/'
self.test_result_file = './test/results.csv'
| StarcoderdataPython |
4896379 | <gh_stars>10-100
"""A set of utils to generate Tensorflow Dataset instances."""
import logging
import tensorflow as tf
import word2vec.utils.vocab as vocab_utils
logger = logging.getLogger(__name__)
__all__ = ('get_w2v_train_dataset')
def ctx_idxx(target_idx, window_size, tokens):
"""Return positions of context words."""
ctx_range = tf.range(start=tf.maximum(tf.constant(0, dtype=tf.int32),
target_idx-window_size),
limit=tf.minimum(tf.size(input=tokens, out_type=tf.int32),
target_idx+window_size+1),
delta=1, dtype=tf.int32)
idx = tf.case({tf.less_equal(target_idx, window_size): lambda: target_idx,
tf.greater(target_idx, window_size): lambda: window_size},
exclusive=True)
t0 = lambda: tf.constant([], dtype=tf.int32)
t1 = lambda: ctx_range[idx+1:]
t2 = lambda: ctx_range[0:idx]
t3 = lambda: tf.concat([ctx_range[0:idx], ctx_range[idx+1:]], axis=0)
c1 = tf.logical_and(tf.equal(idx, 0),
tf.less(idx+1, tf.size(input=ctx_range, out_type=tf.int32)))
c2 = tf.logical_and(tf.greater(idx, 0),
tf.equal(idx+1, tf.size(input=ctx_range, out_type=tf.int32)))
c3 = tf.logical_and(tf.greater(idx, 0),
tf.less(idx+1, tf.size(input=ctx_range, out_type=tf.int32)))
return tf.case({c1: t1, c2: t2, c3: t3}, default=t0, exclusive=True)
# pylint: disable=R1710
def concat_to_features_and_labels(tokens, train_mode, window_size):
"""Concatenate features and labels into Tensor."""
def internal_func(features, labels, target_idx):
if train_mode not in ['cbow', 'skipgram']:
raise Exception('Unsupported Word2Vec mode \'{}\''
.format(train_mode))
ctxs = ctx_idxx(target_idx, window_size, tokens)
if train_mode == 'cbow':
ctx_features = tf.gather(tokens, ctxs)
diff = 2 * window_size - tf.size(input=ctx_features)
ctx_features = tf.reshape(ctx_features, [1, -1])
paddings = tf.concat(
[tf.constant([[0, 0]]),
tf.concat([tf.constant([[0]]), [[diff]]], axis=1)], axis=0)
padded_ctx_features = tf.pad(tensor=ctx_features, paddings=paddings,
constant_values='_CBOW#_!MASK_')
label = tf.reshape(tokens[target_idx], [1, -1])
return tf.concat([features, padded_ctx_features], axis=0), \
tf.concat([labels, label], axis=0), target_idx+1
if train_mode == 'skipgram':
label = tf.reshape(tf.gather(tokens, ctxs), [-1, 1])
feature = tf.fill([tf.size(input=label)], tokens[target_idx])
return tf.concat([features, feature], axis=0), \
tf.concat([labels, label], axis=0), target_idx+1
return internal_func
def extract_examples(tokens, train_mode, window_size, p_num_threads):
"""Extract (features, labels) examples from a list of tokens."""
if train_mode not in ['cbow', 'skipgram']:
raise Exception('Unsupported Word2Vec mode \'{}\''
.format(train_mode))
if train_mode == 'cbow':
features = tf.constant([], shape=[0, 2*window_size], dtype=tf.string)
elif train_mode == 'skipgram':
features = tf.constant([], dtype=tf.string)
labels = tf.constant([], shape=[0, 1], dtype=tf.string)
target_idx = tf.constant(0, dtype=tf.int32)
concat_func = concat_to_features_and_labels(tokens, train_mode,
window_size)
max_size = tf.size(input=tokens, out_type=tf.int32)
idx_below_tokens_size = lambda w, x, idx: tf.less(idx, max_size)
if train_mode == 'cbow':
result = tf.while_loop(
cond=idx_below_tokens_size,
body=concat_func,
loop_vars=[features, labels, target_idx],
shape_invariants=[tf.TensorShape([None, 2*window_size]),
tf.TensorShape([None, 1]),
target_idx.get_shape()],
parallel_iterations=p_num_threads)
elif train_mode == 'skipgram':
result = tf.while_loop(
cond=idx_below_tokens_size,
body=concat_func,
loop_vars=[features, labels, target_idx],
shape_invariants=[tf.TensorShape([None]),
tf.TensorShape([None, 1]),
target_idx.get_shape()],
parallel_iterations=p_num_threads)
return result[0], result[1]
def sample_prob(tokens, sampling_rate, word_count_table, total_count):
"""Sample according to w2v paper formula: p = 1 - sqrt(t/f)."""
freq = word_count_table.lookup(tokens) / total_count
return 1 - tf.sqrt(sampling_rate / freq)
def filter_tokens_mask(tokens, sampling_rate, word_count_table, total_count):
"""Filter tokens in a sentence.
Remove unfrequent words (words with count < min_count) and apply
subsampling according to the original W2V paper.
The word_count_table already contains words with counts >= min_count
and its default value is 0, hence the tf.greater(..., 0) condition.
"""
return tf.logical_and(
tf.greater(word_count_table.lookup(tokens),
tf.constant(0, dtype=tf.float64)),
tf.less(sample_prob(tokens, sampling_rate, word_count_table,
total_count),
tf.random.uniform(shape=[tf.size(input=tokens)],
minval=0, maxval=1, dtype=tf.float64)))
def sample_tokens(tokens, sampling_rate, word_count_table, total_count):
"""Apply subsampling to a set of tokens."""
return tf.boolean_mask(
tensor=tokens, mask=filter_tokens_mask(
tokens, sampling_rate, word_count_table, total_count))
def get_w2v_train_dataset(training_data_filepath, train_mode,
words, counts, total_count, window_size,
sampling_rate, batch_size, num_epochs,
p_num_threads, shuffling_buffer_size):
"""Generate a Tensorflow Dataset for a Word2Vec model."""
word_count_table = vocab_utils.get_tf_word_count_table(words, counts)
return (tf.data.TextLineDataset(training_data_filepath)
.map(tf.strings.strip, num_parallel_calls=p_num_threads)
.filter(lambda x: tf.not_equal(tf.strings.length(input=x), 0))
.map(lambda x: tf.strings.split([x]).to_sparse(),
num_parallel_calls=p_num_threads)
.map(lambda x: sample_tokens(x.values, sampling_rate,
word_count_table, total_count),
num_parallel_calls=p_num_threads)
# Keep examples with at least 2 tokens
.filter(lambda x: tf.greater(tf.size(input=x), 1))
.map(lambda x: extract_examples(x, train_mode, window_size,
p_num_threads),
num_parallel_calls=p_num_threads)
.flat_map(lambda features, labels: \
tf.data.Dataset.from_tensor_slices((features, labels)))
.shuffle(buffer_size=shuffling_buffer_size,
reshuffle_each_iteration=False)
.repeat(num_epochs)
.batch(batch_size, drop_remainder=True))
# we need drop_remainder to statically know the batch dimension
# this is required to get features.get_shape()[0] in
# w2v_estimator.avg_ctx_features_embeddings
| StarcoderdataPython |
359457 | import os, random, pickle
from thebutton.parser import parse
from thebutton.standardwaitstep import StandardWaitStep
COMPLETED_CHALLENGES_PATH = "completed_challenges.pkl"
def load_completed():
try:
with open(COMPLETED_CHALLENGES_PATH, "rb") as f:
return pickle.load(f)
except FileNotFoundError:
return set()
def save_completed(completed):
with open(COMPLETED_CHALLENGES_PATH, "wb") as f:
return pickle.dump(completed, f)
def load_all_challenges(path, randomise):
completed = load_completed()
all_files = list(set(os.listdir(path)) - completed)
if randomise:
random.shuffle(all_files)
else:
all_files.sort()
for filename in all_files:
if filename.endswith(".json"):
yield StandardWaitStep
yield from parse(os.path.join(path, filename))
completed.add(filename)
save_completed(completed)
| StarcoderdataPython |
8031248 | from eru.utils import init_gpu
from eru.model import Model
from eru.layers import GRU, Dense, Input, Activation
import random
import torch
from urllib.request import urlopen
from torch.autograd import Variable
init_gpu(3)
url = "https://stuff.mit.edu/afs/sipb/contrib/pi/pi-billion.txt"
html = urlopen(url).read()
pi = html.decode()
pi = "3" + pi[2:]
class Onehot(object):
def __init__(self, possible_inputs):
self.possible_inputs = possible_inputs
self.word2idx = dict((w, i) for i, w in enumerate(possible_inputs))
self.idx2word = dict((i, w) for i, w in enumerate(possible_inputs))
def encode(self, input):
res = Variable(torch.cuda.FloatTensor(len(input), len(self.possible_inputs)).zero_())
for i, value in enumerate(input):
res[i, self.word2idx[value]] = 1
return res
def encode_batch(self, batch_input):
for i, b in enumerate(batch_input):
batch_input[i] = self.encode(b)
return torch.stack(batch_input, dim=1)
def forward(self, input):
return self.encode_batch(input)
def cuda(self):
return self
possible_inputs = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
possible_inputs = [str(x) for x in possible_inputs]
one_hot_encode = Onehot(possible_inputs)
batch_size = 128
class Gene(object):
def __init__(self, data):
self.data = data
def generate(self):
l = len(self.data)
batch_x = []
batch_y = []
c = 0
while 1:
i = random.randrange(0, l - 50)
x = self.data[i:i + 35]
y = self.data[i + 35]
y = [int(i) for i in y]
batch_x.append(x)
batch_y.append(y)
c += 1
if c == batch_size:
batch_x = one_hot_encode.forward(batch_x)
batch_y = Variable(torch.cuda.LongTensor(batch_y)).view(-1)
yield batch_x, batch_y
batch_x = []
batch_y = []
c = 0
input = Input(10)
x = GRU(10, 128, return_sequence=False)(input)
x = Dense(128, 10)(x)
x = Activation("softmax")(x)
cur_model = Model(input, x)
cur_model.compile("adam", "crossentropy", metrics=['loss', 'acc'])
g = Gene(pi)
cur_model.fit_generator(g, batch_size=128, epochs=1, train_length=1000)
| StarcoderdataPython |
8147208 | <reponame>quentin-ma/study-task-scheduling<gh_stars>1-10
"""Module containing the generation of tasks for scheduling algorithms.
Each task contains parameters that indicate its identifier, its processing time,
dependencies, and others.
"""
import random
class Task:
"""
Task object.
Attributes
----------
id : int
Task identifier
load : int
Processing time of the task (also known as its weight)
predecessors : set of Task.id
Predecessors of the task
successors : set of Task.id
Successors of the task
priority : int
Priority of the task - to be computed
top_level : int
Top level of the task (largest weight of a path from a top vertex,
excluding the tasks' weight) - to be computed
bottom_level : int
Bottom level of the task (largest weight of a path from a bottom
vertex, including the tasks' weight) - to be computed
"""
def __init__(self, id, load):
self.id = id
self.load = load
self.predecessors = set()
self.successors = set()
self.priority = -1
self.top_level = -1
self.bottom_level = -1
"""Simplified representation of a task"""
def __repr__(self):
#return f'task {self.id}' # simplified version
return (f'Task {self.id} (load={self.load}, pred={self.predecessors}'+
f', succ={self.successors}, priority={self.priority})')
"""Less than operator using the priority of the tasks"""
def __lt__(self, other):
return self.priority < other.priority
class Graph:
"""
DAG of tasks for scheduling algorithms.
Attributes
----------
vertices : dict of (Task.id, Task)
Tasks in the graph
topological_order : list of Task.id
List of tasks in topological order
"""
def __init__(self):
self.vertices = dict()
self.topological_order = list()
def __repr__(self):
"""Simplified representation of the graph"""
text = 'Vertices:\n'
for vertex in self.vertices.values():
text += str(vertex) + '\n'
text += 'Topological order:\n'
text += str(self.topological_order)
return text
def topological_ordering(self):
"""Computed a topological order for the graph"""
self.topological_order = list()
checked_vertices = set() # covered vertices
to_check = set() # successors of covered vertices
# First step: find all top (entry) vertices
for id, vertex in self.vertices.items():
# No predecessor == top
if not vertex.predecessors:
checked_vertices.add(id)
self.topological_order.append(id)
# Adds successors to the list of vertices to check
to_check.update(list(vertex.successors))
# Second step: find an order for all other vertices
while to_check:
# Making sure the traversal is not adding a vertex more than once
to_check = to_check - checked_vertices
to_check_later = set() # Vertices to check in another iteration
# Iterates over vertices to check
for id in to_check:
# Checks if all predecessors have been checked
vertex = self.vertices[id]
if vertex.predecessors.issubset(checked_vertices):
# Then it can join the topological order
self.topological_order.append(id)
# Adds its successors can be considered for checking
to_check_later.update(list(vertex.successors))
# And adds it to the list of checked vertices
checked_vertices.add(id)
else:
# Adds it to be checked again later
to_check_later.add(id)
# Updates the set to check for a new pass
to_check = to_check_later
# Checking if all vertices are in the topological_order
if len(self.vertices) != len(self.topological_order):
print('Error: topological order is missing items.')
def reset_predecessors(self):
"""Resets the lists of predecessors of each task"""
for id, task in self.vertices.items():
for succ_id in task.successors:
self.vertices[succ_id].predecessors.add(id)
@staticmethod
def generate_graph(
num_tasks,
load_range,
dependency_range,
rename=False,
rng_seed=None
):
"""
Generates a graph of tasks.
Parameters
----------
num_tasks : int
Number of tasks in the graph
load_range : (int, int)
Minimum and maximum loads for tasks
dependency_range : (int, int)
Minimum and maximum number of predecessors for each task
rename = bool [default = False]
True if task identifiers have to be shuffled
rng_seed : int [optional]
Random number generator seed
Returns
-------
Graph object
DAG of tasks
"""
graph = Graph() # graph to generate and return
min_load, max_load = load_range
min_dep, max_dep = dependency_range
# Translation of ordered to unordered ids if necessary
id = [i for i in range(num_tasks)]
if rename == True:
random.seed(rng_seed-1) # set random seed for the shuffle
random.shuffle(id)
random.seed(rng_seed) # set random seed
# Generate tasks
# The first one has to be a top (entry) vertex
load = random.randrange(min_load, max_load)
graph.vertices[id[0]] = Task(id[0], load)
# The other ones need to have their dependencies created
for task in range(1, num_tasks):
# Creates task
load = random.randrange(min_load, max_load)
graph.vertices[id[task]] = Task(id[task], load)
# Checks how many dependencies it should have
num_dep = random.randrange(min_dep, max_dep)
# Checks if we have enough tasks to cover that
if num_dep < task: # we do
# Gets a sample of valid predecessors and updates them
for pred in random.sample(range(task), k=num_dep):
graph.vertices[id[task]].predecessors.add(id[pred])
graph.vertices[id[pred]].successors.add(id[task])
else: # we do not have enough tasks in the graph to depend
# so we just use all the previous tasks
for pred in range(task):
graph.vertices[id[task]].predecessors.add(id[pred])
graph.vertices[id[pred]].successors.add(id[task])
# Last thing to generate: the topological order
graph.topological_ordering()
return graph
| StarcoderdataPython |
1964588 | <gh_stars>0
import os
from cloudburst.client.client import CloudburstConnection
cloudburst = None
def get_or_init_client():
global cloudburst
if cloudburst is None:
ip = os.environ.get("MODIN_IP", None)
conn = os.environ.get("MODIN_CONNECTION", None)
cloudburst = CloudburstConnection(conn, ip, local=True)
return cloudburst
| StarcoderdataPython |
9774881 | <gh_stars>0
import json
import os
import pytest
from freezegun import freeze_time
from actors_films import display_output, get_actor, get_movies, write_to_disk
from classes import Actor, Movie
@pytest.fixture
def mock_retrieve_celebs(monkeypatch):
monkeypatch.setattr(
"imdb_calls.retrieve_celebs",
lambda x: open("tests/bruce_willis_actor_sample_html").read(),
)
@pytest.fixture
def mock_retrieve_movies(monkeypatch):
monkeypatch.setattr(
"imdb_calls.retrieve_movies",
lambda x: open("tests/bruce_willis_movie_sample_html").read(),
)
def test_get_actor(mock_retrieve_celebs):
expected_actor = Actor("<NAME>", "/name/nm0000246/", "Actor", "Die Hard")
actual_actor = get_actor("bruce willis")
assert actual_actor.name == expected_actor.name
assert actual_actor.slug == expected_actor.slug
assert actual_actor.example_film == expected_actor.example_film
assert actual_actor.example_job == expected_actor.example_job
def test_get_movies(mock_retrieve_movies):
actor = Actor("<NAME>", "/name/nm0000246/", "Actor", "Die Hard")
expected_first = Movie("McClane", "Upcoming")
expected_last = Movie("The First Deadly Sin", "1980")
expected_num_movies = 125
actual_movies = get_movies(actor)
assert actual_movies[0].title == expected_first.title
assert actual_movies[0].year == expected_first.year
assert actual_movies[len(actual_movies) - 1].title == expected_last.title
assert actual_movies[len(actual_movies) - 1].year == expected_last.year
assert len(actual_movies) == expected_num_movies
def test_display_output(capsys):
expected = "Movies:\n1) The Godfather, 1972\n2) The Shawshank Redemption, 1994\n"
movies = [Movie("The Shawshank Redemption", "1994"), Movie("The Godfather", "1972")]
display_output(movies, -1)
captured = capsys.readouterr()
assert captured.out == expected
def test_display_output_reversed_order(capsys):
expected = "Movies:\n1) The Shawshank Redemption, 1994\n2) The Godfather, 1972\n"
movies = [Movie("The Shawshank Redemption", "1994"), Movie("The Godfather", "1972")]
display_output(movies, 1)
captured = capsys.readouterr()
assert captured.out == expected
@freeze_time("2001-01-01 01:01:01")
def test_write_to_disk():
expected_json = {
"actor": "<NAME>",
"movies": [
{"title": "The Godfather", "year": "1972"},
{"title": "The Shawshank Redemption", "year": "1994"},
],
}
actor = Actor("<NAME>", "/name/nm0000246/", "Actor", "Die Hard")
movies = [Movie("The Shawshank Redemption", "1994"), Movie("The Godfather", "1972")]
write_to_disk(actor, movies, -1)
with open("Bruce Willis-2001-01-01 01:01:01.json", "r") as f:
actual_json = json.loads(f.read())
assert actual_json == expected_json
os.remove("Bruce Willis-2001-01-01 01:01:01.json")
| StarcoderdataPython |
1982883 | """ btclib build script for setuptools.
"""
from setuptools import find_packages, setup # type: ignore
import btclib
with open("README.md", "r", encoding="ascii") as file_:
longdescription = file_.read()
setup(
name=btclib.name,
version=btclib.__version__,
url="https://btclib.org",
project_urls={
"Download": "https://github.com/btclib-org/btclib/releases",
"Documentation": "https://btclib.readthedocs.io/",
"GitHub": "https://github.com/btclib-org/btclib",
"Issues": "https://github.com/btclib-org/btclib/issues",
"Pull Requests": "https://github.com/btclib-org/btclib/pulls",
},
license=btclib.__license__,
author=btclib.__author__,
author_email=btclib.__author_email__,
description="A library for 'bitcoin cryptography'",
long_description=longdescription,
long_description_content_type="text/markdown",
packages=find_packages(),
include_package_data=True,
package_data={"btclib": ["_data/*", "ecc/_data/*", "mnemonic/_data/*", "py.typed"]},
# test_suite="btclib.tests",
install_requires=[
"backports-datetime-fromisoformat>=1.0.0; python_version<'3.7'",
"dataclasses>=0.8; python_version<'3.7'",
"dataclasses_json",
],
keywords=(
"bitcoin cryptography elliptic-curves ecdsa schnorr RFC-6979 "
"bip32 bip39 electrum base58 bech32 segwit message-signing "
"bip340"
),
python_requires=">=3.6",
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Topic :: Security :: Cryptography",
"Topic :: Scientific/Engineering",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
| StarcoderdataPython |
6592512 | """
MIT License
Copyright (c) 2020 GamingGeek
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import discord
from discord.ext import commands, flags
import json
import re
class Koding(commands.Cog, name="Koding's Custom Features"):
def __init__(self, bot):
self.bot = bot
self.konfig = json.load(open('koding.json', 'r'))
if not hasattr(self.bot, 'kodingantiswear'):
self.bot.kodingantiswear = self.konfig.get('antiswear', True)
self.swear = self.konfig.get('words', [])
self.urlregex = r'(?:https:\/\/|http:\/\/)[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-zA-Z0-9()]{1,6}\b([-a-zA-Z0-9()@:%_\+.~#?&\/\/=]*)'
@commands.command(name='kodingswear')
async def stopswearingkoding(self, ctx, state: bool = True):
if ctx.author.id != 3<PASSWORD>:
return await ctx.send('no')
self.bot.kodingantiswear = state
self.konfig['antiswear'] = state
json.dump(self.konfig, open('koding.json', 'w'), indent=4)
e = 'enabled' if self.bot.kodingantiswear else 'disabled'
return await ctx.send(f'Antiswear is now {e}')
@commands.command(name='klistswear', aliases=['kswearlist'])
async def swearlist(self, ctx):
if ctx.author.id != <PASSWORD>:
return await ctx.send('no')
return await ctx.send('```\n' + ', '.join(self.swear) + '```')
@commands.command(name='kaddswear', aliases=['kswearadd'])
async def addswear(self, ctx, word: str, f: flags.FlagParser(remove=bool) = flags.EmptyFlags):
if ctx.author.id != <PASSWORD>:
return await ctx.send('no')
remove = False
if isinstance(f, dict):
remove = f['remove']
if not remove:
self.konfig['words'].append(word)
json.dump(self.konfig, open('koding.json', 'w'), indent=4)
self.swear = self.konfig['words']
return await ctx.send(f'Added {word} to the naughty list')
elif word in self.swear:
self.konfig['words'].remove(word)
json.dump(self.konfig, open('koding.json', 'w'), indent=4)
self.swear = self.konfig['words']
return await ctx.send(f'Removed {word} from the naughty list')
@commands.Cog.listener()
async def on_message(self, message):
if message.author.id != <PASSWORD>:
return
if message.content.endswith('/i'):
return
tocheck = re.sub(self.urlregex, 'URL',
message.content, 0, re.MULTILINE)
tocheck = re.sub(r'[^A-Za-z0-9 ]', '', tocheck, 0, re.MULTILINE)
if any(swear in tocheck.lower().split(' ') for swear in self.swear) and self.bot.kodingantiswear:
try:
await message.delete()
except Exception:
await message.author.send('uh oh, you did a naughty! don\'t do that!')
def setup(bot):
bot.add_cog(Koding(bot))
bot.logger.info(f'$GREENLoaded Koding\'s custom features!')
| StarcoderdataPython |
1736722 | #!/usr/bin/env python
# encoding: utf-8
"""
This module summarises the tasks that are to be run daily.
"""
import luigi
from tasks.ingest.list_hdfs_content import CopyFileListToHDFS
from tasks.analyse.hdfs_analysis import GenerateHDFSSummaries
from tasks.analyse.hdfs_reports import GenerateHDFSReports
from tasks.backup.postgresql import BackupProductionW3ACTPostgres, BackupProductionShinePostgres
from tasks.access.search import PopulateCollectionsSolr, GenerateIndexAnnotations, GenerateW3ACTTitleExport
from tasks.access.generate_acl_files import UpdateAccessWhitelist
from tasks.crawl.w3act import GenerateW3actJsonFromCsv
from tasks.common import state_file
class DailyIngestTasks(luigi.WrapperTask):
"""
Daily ingest tasks, should generally be a few hours ahead of the access-side tasks (below):
"""
def requires(self):
return [BackupProductionW3ACTPostgres(),
BackupProductionShinePostgres(),
CopyFileListToHDFS(),
#GenerateHDFSSummaries(),
GenerateHDFSReports(),
GenerateW3actJsonFromCsv()]
class DailyAccessTasks(luigi.WrapperTask):
"""
Daily access tasks. May depend on the ingest tasks, but will usually run from the access server,
so can't be done in the one job. To be run an hour or so after the :py:DailyIngestTasks.
"""
def requires(self):
return [UpdateAccessWhitelist(),
GenerateIndexAnnotations(),
PopulateCollectionsSolr(),
GenerateW3ACTTitleExport()]
if __name__ == '__main__':
# Running from Python, but using the Luigi scheduler:
luigi.run(['DailyIngestTasks'])
| StarcoderdataPython |
3345640 | #! /usr/bin/env python
##############################################################################
## DendroPy Phylogenetic Computing Library.
##
## Copyright 2010-2015 <NAME> and <NAME>.
## All rights reserved.
##
## See "LICENSE.rst" for terms and conditions of usage.
##
## If you use this work or any portion thereof in published work,
## please cite it as:
##
## <NAME>. and <NAME>. 2010. DendroPy: a Python library
## for phylogenetic computing. Bioinformatics 26: 1569-1571.
##
##############################################################################
"""
Wrappers for interacting with GBIF.
"""
import sys
if sys.version_info.major < 3:
from urllib import urlencode
from urllib import urlopen
else:
from urllib.parse import urlencode
from urllib.request import urlopen
from dendropy.datamodel import basemodel
from dendropy.dataio import xmlprocessing
class GbifXmlElement(xmlprocessing.XmlElement):
GBIF_NAMESPACE = "http://portal.gbif.org/ws/response/gbif"
TAXON_OCCURRENCE_NAMESPACE = "http://rs.tdwg.org/ontology/voc/TaxonOccurrence#"
TAXON_CONCEPT_NAMESPACE = "http://rs.tdwg.org/ontology/voc/TaxonConcept#"
TAXON_NAME_NAMESPACE = "http://rs.tdwg.org/ontology/voc/TaxonName#"
RDF_NAMESPACE = "http://www.w3.org/1999/02/22-rdf-syntax-ns#"
def __init__(self, element, default_namespace=None):
if default_namespace is None:
default_namespace = GbifXmlElement.GBIF_NAMESPACE
xmlprocessing.XmlElement.__init__(self,
element=element,
default_namespace=default_namespace)
def get_about_attr(self):
return self._element.get("{%s}about" % self.RDF_NAMESPACE)
# /gbifResponse/dataProviders/dataProvider/dataResources/dataResource/occurrenceRecords/occurrenceRecord
def iter_taxon_occurrence(self):
return self.namespaced_getiterator("TaxonOccurrence",
namespace=self.TAXON_OCCURRENCE_NAMESPACE)
def _process_ret_val(self, element, text_only=False):
if text_only:
if element:
return element.text
else:
return None
else:
return element
def find_institution_code(self, text_only=False):
e = self.namespaced_find("institutionCode", namespace=self.TAXON_OCCURRENCE_NAMESPACE)
return self._process_ret_val(e, text_only)
def find_collection_code(self, text_only=False):
e = self.namespaced_find("collectionCode", namespace=self.TAXON_OCCURRENCE_NAMESPACE)
return self._process_ret_val(e, text_only)
def find_catalog_number(self, text_only=False):
e = self.namespaced_find("catalogNumber", namespace=self.TAXON_OCCURRENCE_NAMESPACE)
return self._process_ret_val(e, text_only)
def find_longitude(self, text_only=False):
e = self.namespaced_find("decimalLongitude", namespace=self.TAXON_OCCURRENCE_NAMESPACE)
return self._process_ret_val(e, text_only)
def find_latitude(self, text_only=False):
e = self.namespaced_find("decimalLatitude", namespace=self.TAXON_OCCURRENCE_NAMESPACE)
return self._process_ret_val(e, text_only)
def find_taxon_name(self, text_only=False):
# path = "{%(ns)s}identifiedTo/{%(ns)s}Identification/{%(ns)s}taxon_name" % {"ns": self.TAXON_OCCURRENCE_NAMESPACE}
path = ["identifiedTo", "Identification", "taxonName"]
e = self.namespaced_find(path, namespace=self.TAXON_OCCURRENCE_NAMESPACE)
return self._process_ret_val(e, text_only)
class GbifDataProvenance(object):
def __init__(self, xml=None):
self.name = None
self.gbif_key = None
self.uri = None
self.rights = None
self.citation = None
if xml:
self.parse_xml(xml)
def parse_xml(self, xml):
self.gbif_key = xml.get("gbifKey")
self.uri = xml.get_about_attr()
self.name = xml.namespaced_find("name").text
self.rights = xml.namespaced_find("rights").text
self.citation = xml.namespaced_find("citation").text
class GbifOccurrenceRecord(object):
def parse_from_stream(stream):
xml_doc = xmlprocessing.XmlDocument(file_obj=stream,
subelement_factory=GbifXmlElement)
gb_recs = []
for txo in xml_doc.root.iter_taxon_occurrence():
gbo = GbifOccurrenceRecord()
gbo.parse_taxon_occurrence_xml(txo)
gb_recs.append(gbo)
return gb_recs
parse_from_stream = staticmethod(parse_from_stream)
def __init__(self):
self.gbif_key = None
self.uri = None
self.institution_code = None
self.collection_code = None
self.catalog_number = None
self.taxon_name = None
self.data_provider = None
self.data_resource = None
self._longitude = None
self._latitude = None
def subelement_factory(self, element):
return GbifXmlElement(element)
def parse_taxon_occurrence_xml(self, txo):
self.gbif_key = txo.get("gbifKey")
self.uri = txo.get_about_attr()
self.institution_code = txo.find_institution_code(text_only=True)
self.collection_code = txo.find_collection_code(text_only=True)
self.catalog_number = txo.find_catalog_number(text_only=True)
self.longitude = txo.find_longitude(text_only=True)
self.latitude = txo.find_latitude(text_only=True)
self.taxon_name = txo.find_taxon_name(text_only=True)
def _get_longitude(self):
return self._longitude
def _set_longitude(self, value):
if value is not None:
try:
self._longitude = float(value)
except ValueError:
self._longitude = value
longitude = property(_get_longitude, _set_longitude)
def _get_latitude(self):
return self._latitude
def _set_latitude(self, value):
if value is not None:
try:
self._latitude = float(value)
except ValueError:
self._latitude = value
latitude = property(_get_latitude, _set_latitude)
def __str__(self):
return "%s (%s) %s: %s [%s %s]" % (
self.institution_code,
self.collection_code,
self.catalog_number,
self.taxon_name,
self.longitude,
self.latitude)
def _get_coordinates_as_string(self, sep=" "):
return "%s%s%s" % (self.longitude, sep, self.latitude)
coordinates_as_string = property(_get_coordinates_as_string)
# def as_coordinate_annotation(self,
# name=None,
# name_prefix=None,
# namespace=None,
# name_is_prefixed=False,
# include_gbif_reference=True,
# include_metadata=True,
# dynamic=False):
# if name is None:
# name = "coordinates"
# if name_prefix is None or namespace is None:
# # name_prefix = "kml"
# # namespace = "http://earth.google.com/kml/2.2"
# # name_prefix = "ogckml"
# # namespace = "http://www.opengis.net/kml/2.2"
# name_prefix = "gml"
# namespace = "http://www.opengis.net/gml"
# if dynamic:
# is_attribute = True
# value = (self, "coordinates_as_string")
# else:
# is_attribute = False
# value = self.coordinates_as_string
# annote = basemodel.Annotation(
# name="pos",
# value=value,
# name_prefix=name_prefix,
# namespace=namespace,
# name_is_prefixed=name_is_prefixed,
# is_attribute=is_attribute,
# annotate_as_reference=False,
# )
# if include_gbif_reference:
# if dynamic:
# value = (self, "uri")
# else:
# value = self.uri
# subannote = basemodel.Annotation(
# name="source",
# value=value,
# # name_prefix="dc",
# # namespace="http://purl.org/dc/elements/1.1/",
# name_prefix="dcterms",
# namespace="http://purl.org/dc/terms/",
# name_is_prefixed=False,
# is_attribute=is_attribute,
# annotate_as_reference=True,
# )
# annote.annotations.add(subannote)
# if include_metadata:
# for attr in [
# ("institution_code", "institutionCode"),
# ("collection_code", "collectionCode"),
# ("catalog_number", "catalogNumber"),
# ("taxon_name", "scientificName"),
# ]:
# if dynamic:
# value = (self, attr[0])
# else:
# value = getattr(self, attr[0])
# subannote = basemodel.Annotation(
# name=attr[1],
# value=value,
# name_prefix="dwc",
# namespace="http://rs.tdwg.org/dwc/terms/",
# name_is_prefixed=False,
# is_attribute=is_attribute,
# annotate_as_reference=False,
# )
# annote.annotations.add(subannote)
# return annote
def as_annotation(self,
name="TaxonOccurrence",
name_prefix="to",
namespace="http://rs.tdwg.org/ontology/voc/TaxonOccurrence#",
include_gbif_reference=True,
dynamic=False):
"""
Sample output (NeXML)::
<meta xsi:type="nex:ResourceMeta" rel="to:TaxonOccurrence" id="d4324014736" >
<meta xsi:type="nex:ResourceMeta" rel="dcterms:source" href="http://data.gbif.org/ws/rest/occurrence/get/44726287" id="d4324014800" />
<meta xsi:type="nex:LiteralMeta" property="to:decimalLongitude" content="-116.02004" datatype="xsd:float" id="d4324014928" />
<meta xsi:type="nex:LiteralMeta" property="to:decimalLatitude" content="34.67338" datatype="xsd:float" id="d4324014992" />
<meta xsi:type="nex:LiteralMeta" property="to:institutionCode" content="ROM" datatype="xsd:string" id="d4324015056" />
<meta xsi:type="nex:LiteralMeta" property="to:collectionCode" content="Herps" datatype="xsd:string" id="d4324015120" />
<meta xsi:type="nex:LiteralMeta" property="to:catalogNumber" content="14584" datatype="xsd:string" id="d4324015184" />
<meta xsi:type="nex:LiteralMeta" property="to:scientificName" content="Crotaphytus bicinctores" datatype="xsd:string" id="d4324015248" />
</meta>
"""
# name_prefix="dwc",
# namespace="http://rs.tdwg.org/dwc/terms/",
top_node = basemodel.Annotation(
name=name,
value=None,
name_prefix=name_prefix,
namespace=namespace,
name_is_prefixed=False,
is_attribute=False,
annotate_as_reference=True,
)
if dynamic:
is_attribute=True
else:
is_attribute=False
if include_gbif_reference:
if dynamic:
value = (self, "uri")
else:
value = self.uri
subannote = basemodel.Annotation(
name="source",
value=value,
# name_prefix="dc",
# namespace="http://purl.org/dc/elements/1.1/",
name_prefix="dcterms",
namespace="http://purl.org/dc/terms/",
name_is_prefixed=False,
is_attribute=is_attribute,
annotate_as_reference=True,
)
top_node.annotations.add(subannote)
for attr in [
("longitude", "decimalLongitude", "xsd:float"),
("latitude", "decimalLatitude", "xsd:float"),
("institution_code", "institutionCode", "xsd:string"),
("collection_code", "collectionCode", "xsd:string"),
("catalog_number", "catalogNumber", "xsd:string"),
("taxon_name", "scientificName", "xsd:string"),
]:
if dynamic:
value = (self, attr[0])
is_attribute=True
else:
value = getattr(self, attr[0])
is_attribute=False
subannote = basemodel.Annotation(
name=attr[1],
value=value,
datatype_hint=attr[2],
name_prefix=name_prefix,
namespace=namespace,
name_is_prefixed=False,
is_attribute=is_attribute,
annotate_as_reference=False,
)
top_node.annotations.add(subannote)
return top_node
class GbifDb(object):
def __init__(self):
self.base_url = None
def compose_query_url(self, action, query_dict):
parts = []
for k, v in query_dict.items():
parts.append("%s=%s" % (k, v))
query_url = self.base_url + action + "?" + "&".join(parts)
return query_url
class GbifOccurrenceDb(GbifDb):
def __init__(self):
GbifDb.__init__(self)
self.base_url = "http://data.gbif.org/ws/rest/occurrence/"
def fetch_keys(self, **kwargs):
url = self.compose_query_url(action="list",
query_dict=kwargs)
response = urlopen(url)
return self.parse_list_keys(response)
def fetch_occurrences(self, **kwargs):
keys = self.fetch_keys(**kwargs)
occurrences = []
for key in keys:
url = self.compose_query_url(action="get",
query_dict={"key": key})
response = urlopen(url)
occurrences.extend(self.parse_occurrence_records(response))
return occurrences
# url = self.compose_query_url(action="list",
# query_dict=kwargs)
# response = urlopen(url)
# return self.parse_occurrence_records(response)
def parse_list_keys(self, stream):
keys = []
xml_doc = xmlprocessing.XmlDocument(file_obj=stream,
subelement_factory=GbifXmlElement)
xml_root = xml_doc.root
for txml in xml_root.iter_taxon_occurrence():
keys.append(txml.get("gbifKey"))
return keys
def parse_occurrence_records(self, stream):
occurrences = GbifOccurrenceRecord.parse_from_stream(stream)
return occurrences
# xml_doc = xmlprocessing.XmlDocument(file_obj=stream,
# subelement_factory=GbifXmlElement)
# xml_root = xml_doc.root
# gbif_recs = []
# for dps in xml_root.namespaced_findall("dataProviders", namespace=self.GBIF_NAMESPACE):
# for dp in dps.namespaced_findall("dataProvider", namespace=self.GBIF_NAMESPACE):
# data_provider = GbifDataProvenance(dp)
# for drs in dp.namespaced_findall("dataResources", namespace=self.GBIF_NAMESPACE):
# for dr in drs.namespaced_findall("dataResource", namespace=self.GBIF_NAMESPACE):
# data_resource = GbifDataProvenance(dr)
# for occurs in dr.namedspaced_findall("occurrenceRecords", namespace=self.GBIF_NAMESPACE):
# for occur in occurs.namespaced_findall("TaxonOccurrence", namespace=self.TAXON_OCCURRENCE_NAMESPACE):
# pass
| StarcoderdataPython |
6689013 | import socket
host = ''
port = 5000
backlog = 5
size = 1024
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host, port))
s.listen(backlog)
while 1:
client, address = s.accept()
data = client.recv(size)
if data:
print(data)
print(len(data))
client.close()
| StarcoderdataPython |
3417743 | <gh_stars>0
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright Holders: <NAME>, <NAME>, <NAME>
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
from __future__ import absolute_import, division, print_function
import pprint
import pkgutil
import sys
import numpy as np
from numpy.polynomial.polynomial import Polynomial
from math import factorial
from pymor.core import logger
from pymor.operators.basic import OperatorBase
from pymor.vectorarrays.numpy import NumpyVectorArray, NumpyVectorSpace
class TestInterface(object):
logger = logger.getLogger(__name__)
TestInterface = TestInterface
def _load_all():
import pymor
ignore_playground = True
fails = []
for _, module_name, _ in pkgutil.walk_packages(pymor.__path__, pymor.__name__ + '.',
lambda n: fails.append((n, ''))):
if ignore_playground and 'playground' in module_name:
continue
try:
__import__(module_name, level=0)
except (TypeError, ImportError) as t:
fails.append((module_name, t))
if len(fails) > 0:
logger.getLogger(__name__).fatal('Failed imports: {}'.format(pprint.pformat(fails)))
raise ImportError(__name__)
def SubclassForImplemetorsOf(InterfaceType):
"""A decorator that dynamically creates subclasses of the decorated base test class
for all implementors of a given Interface
"""
try:
_load_all()
except ImportError:
pass
def decorate(TestCase):
"""saves a new type called cname with correct bases and class dict in globals"""
import pymor.core.dynamic
test_types = set([T for T in InterfaceType.implementors(True) if not (T.has_interface_name()
or issubclass(T, TestInterface))])
for Type in test_types:
cname = 'Test_{}_{}'.format(Type.__name__, TestCase.__name__.replace('Interface', ''))
pymor.core.dynamic.__dict__[cname] = type(cname, (TestCase,), {'Type': Type})
return TestCase
return decorate
def runmodule(filename):
import pytest
sys.exit(pytest.main(sys.argv[1:] + [filename]))
def polynomials(max_order):
for n in xrange(max_order + 1):
f = lambda x: np.power(x, n)
def deri(k):
if k > n:
return lambda _: 0
return lambda x: (factorial(n) / factorial(n - k)) * np.power(x, n - k)
integral = (1 / (n + 1))
yield (n, f, deri, integral)
class MonomOperator(OperatorBase):
source = range = NumpyVectorSpace(1)
type_source = type_range = NumpyVectorArray
def __init__(self, order, monom=None):
self.monom = monom if monom else Polynomial(np.identity(order + 1)[order])
assert isinstance(self.monom, Polynomial)
self.order = order
self.derivative = self.monom.deriv()
self.linear = order == 1
def apply(self, U, ind=None, mu=None):
return NumpyVectorArray(self.monom(U.data))
def jacobian(self, U, mu=None):
return MonomOperator(self.order - 1, self.derivative)
def apply_inverse(self, U, ind=None, mu=None, options=None):
return NumpyVectorArray(1. / U.data)
| StarcoderdataPython |
6444487 | <filename>karesansui/gadget/host.py<gh_stars>10-100
# -*- coding: utf-8 -*-
#
# This file is part of Karesansui.
#
# Copyright (C) 2009-2012 HDE, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import web
import karesansui
from karesansui import config
from karesansui.lib.rest import Rest, auth
from karesansui.lib.utils import generate_uuid, string_from_uuid, \
uni_force, is_param, comma_split, uniq_sort, uri_split, uri_join
from karesansui.lib.checker import Checker, \
CHECK_EMPTY, CHECK_LENGTH, CHECK_ONLYSPACE, CHECK_VALID, \
CHECK_MIN, CHECK_MAX
from karesansui.lib.const import \
NOTE_TITLE_MIN_LENGTH, NOTE_TITLE_MAX_LENGTH, \
MACHINE_NAME_MIN_LENGTH, MACHINE_NAME_MAX_LENGTH, \
TAG_MIN_LENGTH, TAG_MAX_LENGTH, \
FQDN_MIN_LENGTH, FQDN_MAX_LENGTH, \
PORT_MIN_NUMBER, PORT_MAX_NUMBER, \
MACHINE_ATTRIBUTE, MACHINE_HYPERVISOR, \
USER_MIN_LENGTH, USER_MAX_LENGTH
from karesansui.db.access.machine import \
findbyhostall, findby1uniquekey, findby1hostname, \
new as m_new, save as m_save, update as m_update
from karesansui.db.access.tag import new as t_new, \
samecount as t_count, findby1name as t_name
from karesansui.db.access.notebook import new as n_new
def validates_host_add(obj):
checker = Checker()
check = True
_ = obj._
checker.errors = []
if not is_param(obj.input, 'm_name'):
check = False
checker.add_error(_('Parameter m_name does not exist.'))
else:
check = checker.check_string(
_('Machine Name'),
obj.input.m_name,
CHECK_EMPTY | CHECK_LENGTH | CHECK_ONLYSPACE,
None,
min = MACHINE_NAME_MIN_LENGTH,
max = MACHINE_NAME_MAX_LENGTH,
) and check
if not is_param(obj.input, 'm_connect_type'):
check = False
checker.add_error(_('Parameter m_connect_type does not exist.'))
else:
if obj.input.m_connect_type == "karesansui":
if not is_param(obj.input, 'm_hostname'):
check = False
checker.add_error(_('"%s" is required.') % _('FQDN'))
else:
m_hostname_parts = obj.input.m_hostname.split(":")
if len(m_hostname_parts) > 2:
check = False
checker.add_error(_('%s contains too many colon(:)s.') % _('FQDN'))
else:
check = checker.check_domainname(
_('FQDN'),
m_hostname_parts[0],
CHECK_EMPTY | CHECK_LENGTH | CHECK_VALID,
min = FQDN_MIN_LENGTH,
max = FQDN_MAX_LENGTH,
) and check
try:
check = checker.check_number(
_('Port Number'),
m_hostname_parts[1],
CHECK_EMPTY | CHECK_VALID | CHECK_MIN | CHECK_MAX,
PORT_MIN_NUMBER,
PORT_MAX_NUMBER,
) and check
except IndexError:
# when reach here, 'm_hostname' has only host name
pass
if not is_param(obj.input, 'm_uuid'):
check = False
checker.add_error(_('"%s" is required.') % _('Unique Key'))
else:
check = checker.check_unique_key(
_('Unique Key'),
obj.input.m_uuid,
CHECK_EMPTY | CHECK_VALID
) and check
if obj.input.m_connect_type == "libvirt":
if not is_param(obj.input, 'm_uri'):
check = False
checker.add_error(_('"%s" is required.') % _('URI'))
else:
pass
if is_param(obj.input, 'm_auth_user') and obj.input.m_auth_user != "":
check = checker.check_username(
_('User Name'),
obj.input.m_auth_user,
CHECK_LENGTH | CHECK_ONLYSPACE,
min = USER_MIN_LENGTH,
max = USER_MAX_LENGTH,
) and check
if is_param(obj.input, 'note_title'):
check = checker.check_string(
_('Title'),
obj.input.note_title,
CHECK_LENGTH | CHECK_ONLYSPACE,
None,
min = NOTE_TITLE_MIN_LENGTH,
max = NOTE_TITLE_MAX_LENGTH,
) and check
if is_param(obj.input, 'note_value'):
check = checker.check_string(
_('Note'),
obj.input.note_value,
CHECK_ONLYSPACE,
None,
None,
None,
) and check
if is_param(obj.input, 'tags'):
for tag in comma_split(obj.input.tags):
check = checker.check_string(
_('Tag'),
tag,
CHECK_LENGTH | CHECK_ONLYSPACE,
None,
min = TAG_MIN_LENGTH,
max = TAG_MAX_LENGTH,
) and check
obj.view.alert = checker.errors
return check
class Host(Rest):
@auth
def _GET(self, *param, **params):
if self.is_mode_input():
return True
else:
self.view.hosts = findbyhostall(self.orm)
self.view.application_uniqkey = karesansui.config['application.uniqkey']
if self.input.has_key('job_id') is True:
self.view.job_id = self.input.job_id
else:
self.view.job_id = None
return True
@auth
def _POST(self, *param, **params):
if not validates_host_add(self):
return web.badrequest(self.view.alert)
if self.input.m_connect_type == "karesansui":
uniq_key_check = findby1uniquekey(self.orm, self.input.m_uuid)
if uniq_key_check is not None and config['application.uniqkey'] != self.input.m_uuid:
return web.conflict(web.ctx.path)
hostname_check = findby1hostname(self.orm, self.input.m_hostname)
if hostname_check is not None:
return web.conflict(web.ctx.path)
# notebook
note_title = None
if is_param(self.input, "note_title"):
note_title = self.input.note_title
note_value = None
if is_param(self.input, "note_value"):
note_value = self.input.note_value
_notebook = n_new(note_title, note_value)
# tags
_tags = None
if is_param(self.input, "tags"):
_tags = []
tag_array = comma_split(self.input.tags)
tag_array = uniq_sort(tag_array)
for x in tag_array:
if t_count(self.orm, x) == 0:
_tags.append(t_new(x))
else:
_tags.append(t_name(self.orm, x))
name = self.input.m_name
if self.input.m_connect_type == "karesansui":
uniq_key = self.input.m_uuid
attribute = MACHINE_ATTRIBUTE['HOST']
if is_param(self.input, "m_hostname"):
hostname = self.input.m_hostname
if self.input.m_connect_type == "libvirt":
uniq_key = string_from_uuid(generate_uuid())
attribute = MACHINE_ATTRIBUTE['URI']
if is_param(self.input, "m_uri"):
segs = uri_split(self.input.m_uri)
if is_param(self.input, "m_auth_user") and self.input.m_auth_user:
segs["user"] = self.input.m_auth_user
if is_param(self.input, "m_auth_passwd") and self.input.m_auth_passwd:
segs["passwd"] = self.input.m_auth_passwd
hostname = uri_join(segs)
model = findby1uniquekey(self.orm, uniq_key, is_deleted = True)
if model is None:
host = m_new(created_user=self.me,
modified_user=self.me,
uniq_key=uni_force(uniq_key),
name=name,
hostname=hostname,
attribute=attribute,
hypervisor=MACHINE_HYPERVISOR['REAL'],
notebook=_notebook,
tags=_tags,
icon=None,
is_deleted=False)
m_save(self.orm, host)
return web.created(None)
else:
model.name = name
model.hostname = hostname
model.uniq_key = uniq_key
model.notebook.title = note_title
model.notebook.value = note_value
model.tags = _tags
model.is_deleted = False
m_update(self.orm, model)
return web.created(None)
urls = ('/host/?(\.html|\.part)$', Host)
| StarcoderdataPython |
4959746 | <filename>scripts/hello_world.py
# -*- encoding: utf-8 -*-
print('Hello world!')
| StarcoderdataPython |
363157 | <reponame>SqrtMinusOne/ERMaket_Experiment<gh_stars>0
# -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.14.2)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x01\xe8\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x30\x00\x00\x00\x30\x08\x06\x00\x00\x00\x57\x02\xf9\x87\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x01\x9d\x49\x44\x41\x54\x68\x81\xed\xd9\xcf\x2b\
\xc3\x71\x1c\xc7\xf1\xe7\x87\xef\x58\x4d\xb6\x34\x35\xbb\x28\x72\
\x30\x7f\x80\xe3\x4e\xfe\x84\x59\x29\x5c\xdc\x39\xf8\x55\x8a\x22\
\x37\xe2\x0f\x70\x5b\x89\xf6\x27\xb8\x70\x76\xde\x94\xc5\x41\x19\
\x45\xf3\xa5\xc9\x8f\xd1\xc7\x6d\xa6\xf6\xe1\xcb\xcc\x67\xe5\xfd\
\x38\x7e\x3f\xef\x77\x7b\xbd\xfa\x7e\xfa\x5e\x06\x42\x08\x61\x93\
\x32\x1d\x24\x12\x89\x16\xd7\x09\xad\xa0\xf4\x28\xd0\xf5\x87\x99\
\x2a\xe5\x41\xa7\x42\xa5\xbb\xc5\x74\x3a\xfd\x5c\x6d\xc0\x31\x6d\
\xba\xbe\xf6\x65\xd0\xb3\xf5\xcb\xe6\x49\x14\xd4\x9c\xeb\x6b\x07\
\x98\xaf\x36\xd0\x64\xde\x55\x63\xf5\xc9\xf4\x7d\x4a\xa9\x71\xd3\
\x99\xf1\x0d\x50\x71\x6d\x3a\xbb\x9f\xe8\x8f\xbb\x28\x20\xb3\x1f\
\xe4\xfa\xcc\x0f\xc0\xe0\x40\x91\xc9\xe4\x25\x68\xd8\xd8\x89\x70\
\x78\xd4\x56\x5e\x5e\x5b\x98\xa6\xbf\xaf\xe7\xc7\xa1\xb3\xb9\x13\
\x66\x56\xd7\x01\xd0\x9a\x88\x69\xee\x93\x37\xf0\x2e\x16\x77\xf1\
\x07\x5e\x69\x0d\xbc\x12\x8b\xdf\x96\x9f\x4f\x0e\x5f\x12\x0e\x96\
\x08\x87\x4a\x4c\x25\x2f\x3e\xec\xd4\x12\x1e\x20\xd6\xd7\xeb\x69\
\xce\x53\x81\x46\xe6\xa9\x40\x66\x3f\xc8\xd3\x7d\x33\x8f\xc5\x66\
\xb2\x07\xa1\xf2\xf3\x8d\x9d\x08\xd7\xae\x8f\xab\x1b\x87\xcd\xdd\
\xe8\x87\x9d\x6c\xee\xa4\xa6\x60\x99\x63\x6f\xfb\xc6\xcf\xe8\xd0\
\xc8\x84\xae\x29\xc1\x2f\xdb\xdb\xde\xaa\x9a\xf5\x7f\x5c\xa1\x46\
\x66\xbc\x42\x7b\xa7\xa5\x86\xba\x42\x43\x3d\x3e\xb9\x42\x0d\x49\
\x0a\xd8\x26\x05\x6c\x93\x02\xb6\x49\x01\xdb\xa4\x80\x6d\x52\xc0\
\x36\x29\x60\x9b\x14\xb0\x4d\x0a\xd8\x26\x05\x6c\xfb\xac\x40\xfe\
\xcf\x52\x7c\x45\x73\x6e\x3a\x32\x16\x50\x9a\x54\x7d\xd2\xfc\x40\
\x93\x39\x8b\xf1\xff\x81\xc2\xa3\xb3\xd8\xe1\x7f\x41\x2b\x46\x81\
\xa8\x69\xae\xce\xf2\x4a\x93\x2a\x3c\x38\x4b\x96\x7e\x5f\x08\x21\
\xbe\xf0\x06\x98\xe5\x5c\x2d\x2c\xfd\xaa\x7f\x00\x00\x00\x00\x49\
\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x01\x7d\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x30\x00\x00\x00\x30\x08\x06\x00\x00\x00\x57\x02\xf9\x87\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x01\x32\x49\x44\x41\x54\x68\x81\xed\xd9\x41\x4a\
\x03\x41\x10\x05\xd0\x5f\xd3\x01\x43\x20\x59\xe8\xda\x33\xe8\xc6\
\x30\x27\x88\xb7\xd1\xa0\xab\x1c\xc2\x98\x7b\x48\x0e\x20\x78\x82\
\xac\xb2\x0d\x2e\xcd\xd6\x80\x09\x08\x82\xd3\xe5\x46\x34\x06\xab\
\x49\x32\x8e\xc5\xc0\x7f\xbb\xa1\xba\xe9\xfa\xf4\x0c\x74\x33\x00\
\x11\x91\x27\x49\x15\x6f\x26\xaf\xc7\x01\xe1\x16\x40\x0f\x40\xfb\
\x7f\x5a\xfa\xb2\x12\xc5\xfd\xbb\x14\x57\xd7\xdd\xd6\x93\x35\xc8\
\x0c\xf0\xd9\xfc\x14\xc0\x61\x25\xed\x6d\xef\x39\x8b\xc5\xe9\x45\
\xde\x9a\xff\x56\xcc\xac\x59\x0d\x0d\x43\xf8\x37\x0f\x00\x47\x31\
\x84\xa1\x55\x34\x03\xa8\xa0\x57\x4d\x3f\x7b\x50\x9c\x5b\xa5\x46\
\x62\x5a\x67\xfd\xa1\xdf\x3d\x48\x7e\x2f\x9b\x46\x93\x37\x2d\x39\
\x3f\xe2\xfb\x15\xef\x58\xe3\xcc\x1d\xa8\x0b\x06\xf0\xc6\x00\xde\
\x18\xc0\x1b\x03\x78\x63\x00\x6f\xe6\xf9\x64\xf3\x2c\xe3\xcd\x3a\
\x4b\xd5\x7e\x07\x18\xc0\x5b\xea\x3e\xf0\x83\xf3\x7d\xc0\x54\xfb\
\x1d\x60\x00\x6f\x0c\xe0\x8d\x01\xbc\x31\x80\x37\x06\xf0\x96\x3a\
\x0b\xad\xb0\xf6\x4f\xa0\xec\xfd\xa0\xcc\x7c\x01\x5e\xac\x5a\x6a\
\x07\x1e\xf6\x5d\xf0\xaf\x69\xa2\x17\x3b\x80\xe8\x00\xc0\xa2\x8a\
\x86\x76\xb4\x90\xa8\x03\xab\x68\x06\xe8\x9f\x35\x67\x59\x2c\x4e\
\x20\xb8\x03\xb0\xac\xa4\xb5\xb4\x25\x80\xb1\x44\xcd\x2f\xf3\xe6\
\xa3\xc3\xfa\x44\x44\x5b\xf8\x00\x65\xf0\x47\xc7\xfb\xa1\xd5\xf7\
\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x01\x4b\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x30\x00\x00\x00\x30\x08\x06\x00\x00\x00\x57\x02\xf9\x87\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x01\x00\x49\x44\x41\x54\x68\x81\xed\xd5\xad\x0e\
\xc2\x30\x10\x07\xf0\x1b\xb0\x04\x07\x16\x8b\xdb\x03\x60\x00\xbb\
\x67\xc0\xa1\x09\x0f\x44\xd0\x38\x5e\x01\x2c\x73\xf8\x49\x24\x76\
\x72\xc9\x20\x45\x11\xc2\xe8\xd6\x2b\xfb\x28\xa5\xff\x9f\x5b\xdb\
\x34\x77\xed\xf5\x46\x04\x00\x00\x15\x78\xb2\xc1\xc3\x25\x8b\x3c\
\x41\xd3\xb6\x83\x51\x88\xc2\xb1\x3f\xcf\x0f\x76\x64\x2b\x7f\x30\
\x78\x22\xa2\x99\x6c\x50\x9a\x80\x4d\x90\x80\x69\x48\xc0\xb4\x5e\
\x13\x9b\x4e\x46\x5d\x1a\xf4\xa5\x1d\x9a\x2d\x49\x05\x9d\xaf\x77\
\xe5\x3a\x56\x02\xcb\x6d\x5c\x3a\xbf\x5b\x05\x6f\xdf\x55\x83\x27\
\x22\x1a\x32\xf7\x70\xa3\x84\xf2\x27\xac\x92\xa4\x82\x7d\x82\x65\
\x7b\x70\x34\xf2\x06\x38\xb5\x5b\x17\xeb\x4b\xc8\xfa\x04\xd0\x46\
\x89\xd0\x46\x2b\x41\x1b\x95\x41\x1b\xd5\x80\x04\x4c\x63\xbd\x81\
\x7d\x9c\x15\xce\x2d\x02\xbf\xb6\x60\xbe\xe1\xc6\x0d\x3c\x6d\xc2\
\xd7\xf2\xf5\xf1\xf6\x31\xaf\xfa\xe1\xe9\xe2\xb4\x6f\xb7\x6e\x40\
\x45\xf7\x87\x57\x07\xad\x04\x64\x65\x63\x9a\x1b\x25\x64\xba\x55\
\x96\xb1\xfe\x06\x90\x80\x69\x45\x09\x44\xad\x46\xc1\x73\x32\x1d\
\x00\x00\xc0\x1f\x7a\x00\x60\xfb\x3c\x2b\xfc\x7a\xa0\x44\x00\x00\
\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x03\x06\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x30\x00\x00\x00\x30\x08\x06\x00\x00\x00\x57\x02\xf9\x87\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x02\xbb\x49\x44\x41\x54\x68\x81\xed\x97\x4d\x68\
\x13\x41\x14\xc7\xff\xb3\xa9\x24\x31\xc1\xc4\xd6\xd6\x92\xc6\x94\
\xa6\x29\x44\x5a\x6a\x0f\x3d\xd4\xaa\x04\x41\x3d\xa8\x01\x7b\xf2\
\xa0\x82\x82\x17\x3d\x78\x34\x17\x2f\x82\xa7\x9e\x9a\x93\x07\x05\
\x3d\x88\xd0\x53\x8b\x14\x15\x04\x25\x6a\xa4\x48\x91\x26\x44\x63\
\x37\x68\x4d\xda\xa6\xa1\x1f\x69\xa9\x49\x8d\x1f\xdd\xf1\xd0\x6e\
\xc8\x77\xcc\x34\xdd\x24\xb8\xbf\xdb\xcc\xdb\x37\xf3\x7f\x33\xef\
\xbd\xdd\x05\x64\x64\x64\x64\x2a\x09\xc9\x67\x38\x79\xe1\xaa\x1b\
\x14\xfd\x52\x8a\x29\x80\xfb\xc5\xe3\xfb\x47\x73\x19\xb8\xbc\x2e\
\xd5\x23\x1e\x00\x8e\xe4\x33\xd4\x15\xf3\xbc\x79\xe7\x6e\x79\xa5\
\x94\xc8\xe0\xad\x6b\x05\xed\xf9\x6f\xa0\x46\x90\x03\xa8\x34\x35\
\x1f\x40\xd1\x22\x2e\x17\x9e\x50\x0c\xcf\x3c\x51\x04\x97\x13\x00\
\x00\xe3\x5e\x25\x6c\x56\x1d\xfa\x3b\x74\xe0\xb8\xbc\xdd\xbc\x28\
\x92\x04\x30\x32\xb1\x88\x91\x89\xa5\xb4\xb9\xa9\xf9\x75\x4c\xcd\
\xaf\xe3\x2d\xbf\x86\x1b\xa7\x5a\xb0\x5b\xa9\x60\x5a\x7b\xc7\x53\
\xc8\x37\x1b\xcf\x12\x9f\xca\xa7\xb9\x38\xee\xb9\xe6\x99\xd7\x67\
\xbe\x81\xc9\x50\x0c\x0f\x5e\x47\xb0\x12\xfb\x5d\x92\xdf\xf9\xbe\
\x26\x9c\xe9\x69\x60\xdd\x36\x0b\xe6\x00\x1e\xba\x22\x58\x89\x97\
\x26\x1e\x00\x6c\x56\x3d\xeb\x96\x39\xa9\xf9\x2e\xc4\x1c\xc0\x15\
\x5b\x33\xea\x35\xbb\x4a\xf6\x73\x7d\x5e\x65\xdd\x32\x27\xcc\x29\
\x74\xc8\xa4\xc5\xd0\x25\x4b\xd1\xe7\x7c\xb3\x71\x0c\x8e\x85\x92\
\xe3\xe1\xf1\x05\x0c\x8f\x2f\xa4\x3d\xd3\x67\xd1\xe1\xfa\x09\x03\
\x93\x8e\x1d\x4f\xa1\x2e\xa3\x06\x03\xbd\x8d\x79\xed\x9d\x46\x0d\
\x2e\x1f\xdb\xcf\xbc\xbe\x24\xef\x81\x81\xde\x7d\x68\x6f\x52\xe1\
\xb9\x37\x8a\x6f\x4b\x09\x50\x00\x07\xea\x95\xb0\x59\xf5\x38\x6c\
\xd9\x53\xfd\x2f\x32\x00\xe8\x36\x69\xd1\x6d\xd2\x96\x7d\xdd\xff\
\xb7\x0b\x55\x0b\x72\x00\x95\xa6\x68\x11\x17\xfb\x27\xad\x34\x35\
\x7f\x03\x39\x1b\xb0\xc7\x1f\xb0\x0b\x84\x3c\x91\x5a\x4c\x21\x08\
\x25\xf6\x9e\x83\xed\x63\x59\xf3\x99\x13\xef\x66\x66\xd4\xea\xf8\
\x4f\x1f\x00\xb3\x24\xca\xfe\x9d\xaf\xab\x4a\x45\xe7\xf1\xb6\xb6\
\x44\xea\x64\x56\x0d\xa8\x63\x09\x07\x08\x31\x03\x40\x30\x1c\xc1\
\xa3\xd1\xa7\x52\x09\xcc\xc9\xc5\x73\xa7\xd1\x6a\x68\x06\x00\xb3\
\xfe\x97\xe0\x00\x70\x3b\xd5\x9e\x56\x03\x5e\x9e\x37\x13\x42\x1c\
\xe2\xf8\xcd\xfb\x0f\x92\x88\x2c\x44\x9a\x06\x4a\x1d\x5e\x9e\x4f\
\xcb\x8c\xb4\x00\x36\x28\x71\x52\x40\x05\x6c\x9e\x7e\x30\x1c\x91\
\x42\x63\x41\x32\x74\xa8\xff\x08\x8a\xa1\x54\x7b\x32\x00\x8f\x3f\
\x60\x07\x25\x67\xc5\x71\x35\x9c\xbe\x48\xaa\x16\x02\x6a\x9f\xf4\
\x7f\x49\xea\xe4\x80\xcd\xc2\x15\x08\x49\x46\x56\x2d\xa7\x2f\x92\
\xa9\x87\x12\xea\x7c\x35\x3d\xad\x02\xb6\x02\x50\xc7\x12\x0e\xa4\
\x74\x9d\x6a\x3a\x7d\x91\x0c\x4d\x62\x41\x83\x78\x79\xde\x2c\x08\
\xdc\x47\x31\xf7\x4b\x25\x10\x9c\x43\x78\x71\x99\x49\x94\xa1\xb1\
\x01\x1d\xad\x2d\x4c\xbe\x00\x7e\x28\x38\xa1\xab\x6e\x83\x12\x27\
\x18\xc5\x03\x40\x74\xed\x3b\xab\xeb\xb6\x7c\xb1\x55\xd0\x64\xf4\
\xa5\x9b\x6e\x67\x95\x4a\x53\xf3\xdf\x42\x32\x32\x32\x32\x95\xe5\
\x2f\xc1\x1c\xd6\xeb\xcc\xc1\x12\x27\x00\x00\x00\x00\x49\x45\x4e\
\x44\xae\x42\x60\x82\
\x00\x00\x01\x87\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x30\x00\x00\x00\x30\x08\x06\x00\x00\x00\x57\x02\xf9\x87\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x01\x3c\x49\x44\x41\x54\x68\x81\xed\x98\x31\x4a\
\x03\x41\x14\x40\xdf\xcc\x6c\x11\x36\x1b\x05\x15\x12\x24\x8d\x78\
\x00\x8b\x14\x39\x82\x85\x97\xd0\x63\xa4\x52\x4f\xe0\x11\xb4\xb0\
\x12\xc1\xc6\x46\xf4\x12\x16\x82\x88\xbd\x20\x28\x18\x10\x53\x84\
\x8c\x45\xac\x76\x56\x98\x5d\x64\xfe\x82\xff\x95\x7f\x96\xe1\xbd\
\xe5\x6f\xb3\xa0\x28\x8a\xa2\xfc\x67\x4c\xd5\xd0\x9f\x33\x64\xce\
\x09\xb0\x0b\xf4\xa2\x6e\xf2\x1c\x9b\x03\x8e\xfe\x4e\x2d\x8e\x20\
\xe0\x47\xfe\x1e\x58\xab\x7d\x9b\x40\x84\x0d\x26\xcb\x37\x5f\x5f\
\x1e\xc0\x70\xe8\x4f\xa5\x03\x96\x6b\xd3\x9c\xc4\x11\xe1\x0a\x9d\
\xe1\xc9\x32\x18\xf4\xa1\xe8\x82\xad\x6a\x4c\x88\x5f\x78\xbe\x66\
\x2f\x7c\xce\xf6\xcd\xde\xeb\x6d\xf9\x38\xb4\xcb\x32\xd8\xde\x82\
\x95\x9e\xbc\x3c\x80\xb1\x86\x3c\xdf\x64\x7d\xf5\xc6\x5f\x6f\x8c\
\xca\xc7\xa1\xe1\xa0\x0f\xce\x25\x71\xab\x85\x75\x86\x22\xbf\x0a\
\xc6\xc1\x83\x45\x37\x89\x4f\x23\x3a\x9d\x61\x79\x14\x06\xb4\x61\
\x6d\x7e\xc3\xda\xe0\x9b\x6d\xb1\x6d\x1c\x1a\x20\x8d\x06\x48\xa3\
\x01\xd2\x68\x80\x34\x1a\x20\x8d\x06\x48\xa3\x01\xd2\x68\x80\x34\
\x1a\x20\x4d\x55\xc0\x34\xb9\x45\x3c\x1f\xe5\x41\x55\xc0\x5d\x02\
\x91\x66\x78\x22\x7e\x6c\x79\x26\xc0\x5b\x0a\x9f\x9a\xbc\x93\xb9\
\x49\x79\x18\x04\x98\xf1\xe3\x13\xf3\xc5\x0e\x98\x0b\xda\xb1\x4e\
\x53\x3c\x97\x38\x37\x36\xa3\x87\x67\x69\x19\x45\x51\x14\xa5\x5d\
\x7c\x03\x72\xdb\x37\xb7\x81\xa2\x67\xb4\x00\x00\x00\x00\x49\x45\
\x4e\x44\xae\x42\x60\x82\
\x00\x00\x07\x49\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x30\x00\x00\x00\x30\x08\x06\x00\x00\x00\x57\x02\xf9\x87\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x06\xfe\x49\x44\x41\x54\x68\x81\xcd\x9a\x6d\x70\
\x54\xd5\x19\xc7\x7f\xcf\xb9\xc9\x66\x37\x09\x21\xef\x3a\x60\xc2\
\x42\xb4\x7c\x70\xec\x80\x50\xac\xca\x0c\xc5\x0e\xa1\xad\x25\xd9\
\xd0\x64\xa4\x56\x96\x08\x6d\xa1\xc0\xe0\x58\x2a\xe3\xe0\xd4\x61\
\xb4\xe2\xb4\x56\xad\xb6\x50\x90\xb6\x84\x30\xd4\x62\xda\x00\x89\
\x2d\x92\x56\xa8\x53\x50\x5b\x15\xe8\xcb\x87\x62\x81\xbc\xd4\x74\
\x6c\xc8\x0b\xe6\x3d\xbb\xb9\xf7\xf4\x83\x64\x93\xcb\x66\x93\xbb\
\x2f\x60\xff\x9f\xee\x7d\xce\x73\xfe\xcf\xff\x39\xfb\xdc\x73\xce\
\x3d\x77\x85\x04\x43\x83\xb4\x2e\xff\xda\xf4\xa0\x95\x9c\xab\x2c\
\x9d\x06\x60\x29\xe9\x4b\x56\xc1\xf6\xe9\xb5\x07\x5a\x05\x74\x22\
\xe3\x49\xbc\x04\xff\x2c\x59\x3d\xc5\xad\xcc\x62\x34\x8b\x81\xbb\
\x81\xd9\x80\x27\x82\xfb\x00\x70\x0e\x38\x85\x96\xe3\xa9\x29\x7d\
\x0d\xf9\x35\x35\xbd\xf1\xc4\x8f\x39\x81\xa6\x32\xff\x9d\x98\xb2\
\x1e\xa1\x0c\x48\x8b\x91\xa6\x0f\x74\xad\xb6\xd4\xce\x99\xf5\x55\
\x6f\xc7\x42\x10\x75\x02\x8d\xcb\x2a\x3f\x2b\x4a\x6f\x07\x16\xc7\
\x12\x70\x02\xbc\x6e\x89\xde\x3a\xeb\x70\xf5\x5f\xa2\xe9\xe4\x38\
\x81\x0b\x15\xdf\x9c\x6a\x04\x06\x9f\x01\x59\x03\xa8\xa8\xe5\x39\
\x83\x25\xc8\x9e\x40\x20\xb8\xe5\x96\xa3\x07\xba\x9d\x74\x70\x94\
\x40\xb3\x6f\xd5\x3c\xad\x39\x08\x14\xc5\x25\xcf\x31\x74\xb3\xb6\
\xd4\x0a\x27\x65\x35\xe9\x48\x36\x97\xfa\xcb\xb5\xe6\x14\xd7\x4d\
\x3c\x80\xcc\x10\xa5\x4f\x34\x95\xfa\xcb\x26\xf3\x9c\x30\x81\xa6\
\x92\xca\xd5\x1a\xf9\x15\x90\x92\x30\x6d\xce\xe1\x06\xa9\x69\x2c\
\x5d\xf5\xe0\x44\x4e\x11\x4b\xa8\x71\x99\xbf\x54\x94\xfc\x06\x30\
\x12\x2e\x2d\x3a\x98\x82\x5e\x31\xe3\x48\xf5\xaf\xc7\x6b\x1c\x37\
\x81\x96\x65\x95\x9f\xb1\x94\xfe\x13\x9f\xcc\xc8\x8f\x87\x41\xb4\
\xbe\xcb\x5b\x57\x7d\xe6\xea\x86\xb0\x04\x1a\x7d\x95\x99\x62\xe9\
\xf7\x10\x66\x5d\x1f\x6d\x8e\x71\xde\x74\xa5\xcc\x2f\xaa\x79\xe9\
\xa3\xb1\xc6\xb0\x67\x40\xb4\xf5\x83\xff\x43\xf1\x00\x37\x1b\x43\
\x43\x4f\x5f\x6d\xb4\xfd\x02\x57\x16\xa9\x53\x38\x9c\xe7\x93\x0b\
\xa6\x21\xc9\xc9\x04\x2e\x36\xc7\xa4\xc8\x55\xe4\x45\x0f\x0f\x13\
\x6c\xfe\xc0\x69\x17\x4b\x69\xeb\x8e\xc2\xba\xfd\xef\x8e\x18\x92\
\xc6\xb6\x5e\x59\x61\x1d\x89\xcf\x5c\xe1\x63\xea\x0a\x1f\x00\xbd\
\xc7\x4f\xd2\xf1\xe3\x9f\x83\x76\xb8\x4f\x13\x21\x77\xd3\xd7\x49\
\x5b\x7c\x37\x00\x1f\x1d\x3c\xc2\xe5\x97\x0f\x39\xe9\xa9\xb4\xa8\
\xa7\x80\xa5\x21\xc3\xc8\x45\x53\x99\xff\x4e\xa2\xd8\x1e\x64\xf8\
\xbe\x10\xba\x4e\xbf\x67\x21\x99\x0f\x94\x3b\xed\x4a\xd6\xca\xf2\
\x90\x78\x80\x29\x25\x4b\x27\xf0\xb6\x43\x43\xf1\x45\x9f\x7f\xc1\
\xc8\xfd\xe8\x68\x9b\xb2\xde\x31\x0b\x30\xfc\xdf\x76\xdb\xfd\xd4\
\xaf\xdc\x4b\xfa\x92\x45\x93\xf6\x4b\x5f\xb2\x88\x8c\xe5\xf7\x5e\
\xc5\x75\x29\x9a\xd0\x18\xa8\x90\x56\x05\xd0\x56\x51\x91\x8e\xe0\
\x8b\x86\xa4\x63\xe7\x5e\x74\x20\x60\xb3\x65\xaf\xf3\xe3\x9e\x73\
\x6b\xc4\x3e\xee\x39\xb7\x92\xbd\xce\x6f\xb3\xe9\x40\x80\xce\x1d\
\x7b\xa3\x09\x8d\xd6\xba\xbc\xad\xa2\x22\x1d\xae\x24\xd0\x1f\x4c\
\x5d\x0a\xa4\x47\x43\x32\x74\xee\x02\xed\x3f\xda\x63\xab\x7b\x31\
\x0c\xf2\xb6\x6c\xc4\xe5\x2d\x08\xf3\x4f\x2e\x98\x46\xde\x23\x1b\
\x10\x63\xcc\xba\xa8\x35\xed\xcf\xef\x66\xe8\x5f\x17\xa3\x4a\x00\
\x48\xeb\x0f\x78\x96\x84\x12\x40\xcb\x3d\xd1\x32\x00\xf4\xbf\xf9\
\x0e\x5d\xd5\x35\x36\x9b\x4a\xf5\x90\xff\xdd\x6f\x63\xe4\x64\x85\
\x6c\x46\xd6\x54\xf2\x1f\xdf\x8c\x4a\x4b\xb5\xf9\x76\xed\x7b\x85\
\xfe\xb7\xde\x8b\x25\x34\x1a\xf9\x1c\x84\x9e\x01\x7d\x57\x4c\x2c\
\x40\xf7\xa1\xdf\xd1\x73\xf4\x75\x9b\xcd\xc8\xc9\x22\x7f\xeb\x43\
\x88\x3b\x05\x71\xb9\xc8\x7b\x74\x13\x49\x79\x39\x36\x9f\xde\xdf\
\xbf\x41\xf7\xe1\xa3\xb1\x86\x45\x84\x85\x00\x49\x7a\xdb\x36\xd5\
\x7c\xa6\xf1\x53\x31\x33\x01\x9d\x3f\xfb\x25\x49\x37\xe6\xe3\x99\
\x7b\x5b\xc8\xe6\x2a\xf2\x92\xfb\xf0\x5a\x00\x52\x66\xdb\x37\xb2\
\x03\xa7\xff\x4e\xc7\xae\xea\x78\x42\x82\x66\xb6\x06\x91\xf3\xa5\
\x0f\x16\x24\x61\xb5\xc4\xc7\x06\xca\xe3\xe6\x86\xa7\x1f\x1b\xb7\
\xfe\xc7\x22\xd8\xd2\xca\x87\x8f\x7e\x0f\xab\x7f\x20\xde\x90\x18\
\xa6\xbe\x49\x29\x25\xd9\x71\x33\x01\xd6\xc0\x20\x6d\x4f\x3c\x8b\
\xd9\xd1\x19\xd1\xc7\xec\xba\x4c\xdb\x93\xcf\x25\x44\x3c\x00\x86\
\xca\x56\x06\xc3\x53\x12\xc3\x06\x66\xe7\x65\xda\x9e\x7a\x01\x3d\
\x38\x18\xd6\xa6\x87\x02\x5c\xda\xfe\x22\xc3\x97\x3a\x12\x15\x0e\
\x4b\xeb\x8c\x6b\xf5\x6e\x7b\xdd\xa0\x4c\x92\x7a\x12\x45\x66\x64\
\x67\x92\xff\xd8\x43\x88\xdb\x1d\xd6\x26\x29\x2e\xf2\xb6\x6e\x22\
\x29\x37\x21\x15\x0b\x80\x12\xe9\x56\x96\xa5\x23\x17\x6d\x34\x64\
\x1e\x37\xf9\x8f\x6f\xc6\xc8\x89\x2c\xd0\xc8\xca\xfc\x78\x3d\x48\
\x8d\x74\xee\x15\x25\x4c\xab\x53\x15\xcd\x9d\xd1\x0a\xf4\xc7\x45\
\x64\x18\xe4\x3e\xb2\x21\x6c\x06\xea\xff\xf3\x69\x06\xde\x3d\x6b\
\xb3\x25\x17\x4e\x27\x77\xf3\xb7\x40\xc5\x5d\xbd\x7d\x37\xbd\x5a\
\xfd\x1f\x25\xdb\xb6\x59\xc0\xfb\xf1\x30\x65\xaf\xb9\x1f\xcf\xed\
\xb7\xd9\x6c\x81\x0b\xcd\xb4\x3f\xbf\x9b\x4b\xcf\xec\x0c\xdb\x2a\
\x78\xe6\x7d\x9a\x9c\x75\xab\xe2\x09\x09\xc2\x39\x01\x3d\x32\x0c\
\xa7\x62\xe5\xc9\x28\xfb\x22\x53\xbe\xf4\x79\x9b\xcd\xec\xe8\xa2\
\x6d\xfb\x0b\xe8\xc1\xa1\xd1\xd9\xa7\xdd\x5e\xa9\xe9\xc5\x8b\xc8\
\x58\x56\x1c\x6b\x58\xb4\xe6\x24\x8c\xee\x85\x8e\xc7\x42\xe2\x99\
\x3f\x87\xac\x95\x15\x36\x9b\x35\x30\x48\xdb\x93\xcf\xd9\xd6\x03\
\xb3\xeb\x32\x6d\x4f\x3c\x1b\x36\xff\x67\xad\xfe\x2a\xa9\x77\xdc\
\x1e\x4b\x68\x10\x39\x01\x57\x12\x70\x0f\x9a\xc7\x80\xa8\x4e\x89\
\x53\x6e\x99\x45\xde\x96\xf5\xb6\x5a\xd6\xa6\x49\xfb\xf7\x7f\x42\
\xa0\xe9\xdf\x61\xfe\xc1\x96\x56\xda\x7f\xf8\x53\x30\xcd\xb1\x22\
\xc8\x7d\x78\x2d\xae\x22\x6f\xb4\xf2\xfb\xd2\x92\xfb\xfe\x10\x4a\
\xe0\xc6\x86\xfd\x7d\xa0\x1d\xbd\xd3\x8d\x20\x67\xe3\x6a\xc4\xe5\
\xb2\xd9\x3a\x77\x57\x33\x70\xf6\x1f\x11\xfb\x0c\x9c\xfe\x1b\x9d\
\x7b\x0e\xd8\x6c\xe2\x4e\x21\x67\x7d\x65\x34\xa1\x11\xa8\x19\x39\
\x96\x0f\x0d\x9f\xb6\xd4\xce\x68\x48\x92\x6e\xc8\xb5\xdd\x77\xd7\
\xfe\x96\xde\x86\x37\x26\xed\xd7\xf3\xda\x71\xba\x8f\xbc\x66\xe7\
\xca\xcf\x8d\xe0\x3d\x3e\xc4\x92\x90\xd6\x50\x02\x33\xeb\xab\xde\
\x46\xb4\xe3\x67\xa1\xbb\xae\x21\x74\xdd\x7b\xfc\x24\x5d\xfb\xc7\
\x3d\x38\x1b\x17\x5d\x55\x07\xe9\xfb\xe3\x9b\xa3\x5c\xf5\x0d\x13\
\x78\xdb\x21\xc8\xb1\xc2\xfa\xaa\x77\x46\xef\xc7\xe0\xa2\xcf\xbf\
\x40\x69\x79\x0b\x87\x27\x13\x2e\x6f\x01\x28\x15\xc7\xb1\xca\x0c\
\x74\xd0\x24\xd8\xe2\xf8\x58\xc5\xc4\x52\x0b\xbc\xf5\x7b\x4f\x8f\
\x18\xc2\x4e\xe6\x9a\x4b\x2b\x77\x69\xf4\xda\x98\x14\x5d\x7b\xec\
\xf0\x1e\xd9\xb7\x71\xac\x21\x6c\xa4\x3d\xae\xbe\xef\x80\x3e\x77\
\xfd\x34\x39\xc6\xf9\x60\x60\x78\xeb\xd5\xc6\xb0\x04\xf2\x6b\x6a\
\x7a\xb1\x8c\xfb\x81\xf0\x3d\xf1\x27\x87\x01\x4b\x19\xe5\xe3\x7d\
\xb5\x19\xb7\xd6\xbd\xf5\x7b\x4f\x2b\xcd\x7d\xc0\xf0\x35\x97\x36\
\x39\x4c\xb4\x3c\x30\xeb\xd0\x2f\xfe\x3a\x5e\x63\xc4\x87\xb5\xb0\
\x6e\x5f\x9d\x16\xf9\x06\x60\x46\xf2\xb9\x0e\x18\xd6\x5a\xaf\xf1\
\xd6\x55\xd5\x46\x72\x98\xf4\x1b\x59\xa3\xaf\xd2\x27\x5a\xbf\x0c\
\x84\x6f\xf2\xaf\x2d\x06\x94\x66\x45\x61\xdd\xbe\xba\x89\x9c\x1c\
\x7d\xe4\x6b\x2a\xf1\xcf\x45\xe4\x15\xe0\xe6\x84\x48\x9b\x1c\xef\
\x6b\x91\xfb\x66\x1e\xae\x3a\x3b\x99\xa3\xa3\xf9\xde\x5b\x57\x7d\
\x26\x18\x18\x9e\x27\xb0\x0b\xb0\xe2\x96\x17\x19\x26\xb0\x63\x50\
\x1b\xf3\x9d\x88\x87\x18\x3e\x74\xb7\x94\xac\x9c\x6f\x2a\xb5\x5d\
\x34\x4b\xa2\x96\x37\xa1\x10\x39\xa6\x2d\xd9\x3a\x76\x91\x72\xd6\
\x2f\x46\x5c\xf4\xf9\x17\x88\x96\x0d\x02\xcb\x89\xf2\x5c\x75\x0c\
\x7a\x44\xa4\x56\x4c\x76\x8c\xdd\x1e\x44\x83\xb8\xff\xec\xf1\x61\
\xf1\xca\xb4\x41\x8f\x14\x03\x8b\x41\x16\xf2\xf1\x9f\x3d\x52\x23\
\xb8\xf7\x21\x9c\xd3\x9a\x93\x88\x9c\x48\x31\x5d\x0d\xd3\xea\x5f\
\x8a\xeb\x75\x36\xee\x04\xae\x86\x06\xf9\xe0\xcb\xfe\x69\xc1\xe4\
\xa4\x5c\xc3\x34\xd3\x01\x4c\x74\x4f\xb2\x45\x47\xc1\xab\xd5\xad\
\x89\x8e\xf7\x3f\xee\x6f\x5b\x85\x56\x40\xa1\xea\x00\x00\x00\x00\
\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x02\x16\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x30\x00\x00\x00\x30\x08\x06\x00\x00\x00\x57\x02\xf9\x87\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x01\xcb\x49\x44\x41\x54\x68\x81\xed\x97\x3b\x4a\
\x03\x51\x18\x85\xbf\x88\x16\xbe\x05\x1b\xc1\x4e\x0c\x31\x8a\xc1\
\xca\xd2\x80\xfd\x6c\xc1\x2d\x08\xba\x01\xb7\x60\xe1\x06\x2c\x74\
\x01\xa2\x41\xc5\xa0\x62\x67\x65\xb0\xf6\xfd\x2a\x2c\xc4\x46\x63\
\xe2\xa3\xb8\x33\x18\xc6\x9b\xc9\x9d\x9b\x99\x4c\x90\xff\x83\xd3\
\xe5\x3f\xf7\x30\xf3\xe7\x26\x07\x04\x41\x10\xfe\x2b\x63\xc0\x16\
\xb0\x0f\x4c\x24\x9c\x25\x14\xdd\xc0\x0a\xf0\x06\x7c\xbb\x2a\x03\
\xab\x40\x5f\x72\xb1\xcc\x70\x80\x0b\x7e\x83\xfb\x75\x0b\x2c\x24\
\x96\x2e\x80\x71\x60\x9b\xfa\xc1\xfd\x3a\x00\xb2\x89\x24\xf5\xd1\
\xc3\xdf\x75\x31\xd5\x07\x6a\xad\xfa\x5b\x1d\xda\xc3\x01\x2e\x03\
\x02\x9a\xea\x8e\x16\xaf\x55\x1a\xd8\x89\x20\xb8\x5f\x45\x60\x2a\
\xce\xe0\xde\xba\xbc\xc7\x10\x3e\xf6\xb5\x72\x80\xab\x18\x83\xfb\
\x75\x8f\x5a\xab\x54\xb3\xc1\x27\x51\xaf\xd6\x26\x44\x15\x58\x73\
\x55\xb5\xf4\x28\xba\x19\x42\xd3\x8b\x5a\x97\xb2\xe5\xc1\xa7\xc0\
\x6c\x8d\xdf\x0c\x70\x62\xe9\x55\x41\xad\xd5\x80\x69\x78\x07\xb8\
\xb1\x3c\xec\x19\x58\x04\x3a\x34\xbe\x29\xd4\x5a\x3c\x59\x7a\x3f\
\xd0\x60\xad\x32\xc0\x9e\xa5\xf9\x27\xb0\x0e\x0c\x1b\x3c\xa0\x21\
\xd4\x13\xad\x58\x9e\x75\x04\xe4\x74\x4f\xa7\x64\x69\x78\xac\x33\
\x34\x20\xe7\xce\xda\x9c\x59\x42\xf3\x26\xe6\x80\xaf\x10\x26\x8f\
\x44\x73\x53\xd8\xac\xec\x7c\x3d\xb3\x4d\x83\x61\xef\x4b\x35\xd8\
\x64\xf0\x5a\xc2\x5c\x1a\x1b\x41\x46\x23\xc0\x4b\xc0\xf0\x21\x30\
\x6d\x18\xca\xa6\x0f\x64\x80\xdd\x80\xf3\x5f\x81\xd1\x46\x26\xcb\
\x9a\xc1\x86\x37\x40\x0d\x51\xf4\x01\x07\xb8\xd6\xe4\x58\x32\x19\
\xee\x04\xce\xdc\x01\xef\xa7\xdd\xf4\x0e\x8e\xb2\x0f\xf8\xff\xba\
\x9c\x03\x5d\x86\xb3\xe4\x51\xaf\xde\xf4\x3f\x7b\x1a\x28\xd4\x09\
\xad\x53\xc1\x9d\x31\x21\xeb\x66\xc9\x1b\x7e\x3e\x14\xd2\x07\x90\
\x3e\x60\x8e\xf4\x81\x10\x92\x3e\x20\x7d\x40\xe3\x2b\x7d\xc0\x95\
\xf4\x01\x4f\xd2\x07\x6a\x68\xcb\x3e\x60\x43\x5b\xf4\x81\x28\x48\
\xac\x0f\x44\x49\x4b\xfb\x40\x9c\xc4\xda\x07\x04\x41\x10\x9a\xe3\
\x07\x0b\xd1\xe4\x2a\x58\xe0\xe0\xef\x00\x00\x00\x00\x49\x45\x4e\
\x44\xae\x42\x60\x82\
\x00\x00\x02\x9b\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x30\x00\x00\x00\x30\x08\x06\x00\x00\x00\x57\x02\xf9\x87\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x02\x50\x49\x44\x41\x54\x68\x81\xed\x96\xbd\x6b\
\x14\x41\x18\x87\x9f\x59\x3c\x73\x1a\x4f\xa3\xa7\x28\x24\x06\x4c\
\x63\xa7\x20\xc4\x42\x62\x21\x7e\x21\xda\x88\x8d\xff\x80\xa0\x58\
\x89\x9a\xc6\xf6\x0a\x0d\x11\xcb\xe4\x02\x36\x82\xb5\x88\x62\x11\
\x0e\x11\x0e\x82\x85\xa0\xd8\x88\x45\x2c\x14\x04\xbf\xbd\x70\x9e\
\x27\xca\xbd\x36\xde\x9e\xb9\xdb\xef\x9d\x9d\xdd\x62\x7f\xd5\xf2\
\xce\xbc\xf3\x3e\x0f\xb3\xc5\x40\x9e\x3c\x79\x52\x8d\x54\xa9\xc8\
\x1c\xb3\x69\x73\x44\x8a\x54\xa9\xc8\x3c\x22\xf3\x48\x54\x09\xa5\
\x1b\x2a\x68\xa4\x4a\x05\xe1\xda\xea\x22\x37\xd5\x05\xae\x84\x39\
\x27\x15\x01\x47\x78\x7b\x31\x9c\x84\x71\x01\x4f\x78\x7b\x53\x70\
\x09\xa3\x02\x81\xe0\xed\xcd\xc1\x24\x8c\x09\x84\x82\xb7\x9b\xfc\
\x25\x8c\x08\x44\x82\xb7\x9b\xbd\x25\x12\x17\x88\x05\x6f\x1f\xe2\
\x2e\x91\xa8\x80\x16\x78\xfb\x30\x67\x89\xc4\x04\xb4\xc2\xdb\x87\
\x0e\x4a\x58\x5a\x07\x74\xe7\x24\x01\x0f\xa0\x90\xc1\x92\xe6\x24\
\x06\x0f\xb3\xea\x3c\x57\xfb\x8b\x5a\x05\x4c\xc3\x83\x46\x81\x34\
\xe0\x41\x93\x40\x5a\xf0\xa0\x41\x20\x4d\x78\x88\x29\x90\x36\x3c\
\xc4\x10\xc8\x02\x3c\x44\x14\xc8\x0a\x3c\x44\x10\xc8\x12\x3c\x84\
\x14\xc8\x1a\x3c\x84\x10\xc8\x22\x3c\x04\x14\xc8\x2a\x3c\x04\x10\
\xc8\x32\x3c\xf8\xbc\x46\xa5\x5e\x9c\x60\xb4\x70\x28\xee\x10\x87\
\x68\x81\x07\xbf\xe7\x74\x47\x8e\x52\xb2\x0e\x30\x56\x58\xd2\x31\
\xec\x5f\xb4\xc1\x83\x9f\x80\x52\x87\x01\x34\x4a\x68\x85\x07\x0f\
\x01\x11\x2c\x44\x7a\xbf\x4f\x7c\x09\xed\xf0\xe0\x75\x03\x4f\x0a\
\xfb\x80\xad\xab\x6a\xd1\x25\x12\x81\x07\x2f\x01\x4b\x1d\x71\xac\
\x87\x97\x48\x0c\x1e\xbc\x04\xba\xff\xbf\x53\x82\x4b\x24\x0a\x0f\
\x2e\x02\xf7\x6f\xef\x2e\x35\x5a\x43\x53\x9e\x9d\xfe\x12\x89\xc3\
\x83\x8b\xc0\x87\x4f\xe5\x73\x97\x1f\x1c\x2f\x7e\x6d\x15\xbd\xbb\
\xdd\x25\x8c\xc0\x83\x9b\x40\x73\xf8\xcc\xdb\x6f\x9b\x98\x7e\x78\
\x8c\x08\x12\xc6\xe0\xc1\x45\xe0\x5d\x63\xe3\x5e\x80\x50\x12\xa3\
\x85\xa7\x08\xd7\x4d\xc2\x83\xc3\x5b\xe8\xee\xad\xc9\x89\x3b\xcf\
\xf6\x2c\x77\xa4\xb7\x34\xbe\xb9\xc1\xcc\xa9\x45\xb6\xac\x6f\xf7\
\xed\x96\x2f\x28\xeb\x31\x42\x0d\x8b\x9a\x3a\xd8\x7e\x93\x34\x70\
\x7f\xd6\xf4\x17\x3e\xff\x28\x5e\xfc\x1f\x1e\x7a\x37\x71\xe3\xe4\
\xe2\xaf\xf2\x86\x76\x1d\x91\x1a\x96\xd4\x98\xfa\xfd\x5c\x29\x3a\
\xc6\x68\x1d\x32\x20\xf0\xb1\x59\x3a\xd1\xfd\xb6\x94\xb0\x73\xa4\
\xd1\x1a\x1b\x59\x79\x59\x5e\xf7\xf3\x5e\xfd\xf5\xae\x85\xd3\x97\
\x5e\x7c\x37\x8b\xe8\x9d\x01\x81\x95\xf6\xda\xed\xfb\xc7\xdf\xbf\
\xda\x36\xdc\x7c\xb4\x63\xe8\xcf\xdc\xd9\xe9\xa5\xe5\x34\xc0\xf2\
\xe4\xc9\x13\x2c\x7f\x01\x4b\x75\x05\x14\xf6\x97\x79\x4d\x00\x00\
\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x02\x1e\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x30\x00\x00\x00\x30\x08\x06\x00\x00\x00\x57\x02\xf9\x87\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x01\xd3\x49\x44\x41\x54\x68\x81\xed\xd7\xbb\x6a\
\x15\x51\x18\x05\xe0\xef\x88\x16\xde\x05\x1b\xd1\x32\x4a\xd0\x60\
\xb0\xb2\x12\x41\xfb\x79\x05\x5f\x41\xd0\x17\xf0\x15\x2c\x7c\x01\
\x0b\x7d\x00\x51\x50\x30\x5e\xb0\xb3\x32\x58\x7b\xbf\x15\x16\x92\
\x46\x63\xa2\xb1\x98\xd9\x10\x8e\x33\x93\x3d\xdb\x99\x33\x11\xf6\
\x82\x55\xfe\xeb\x5f\xec\xb3\xce\xec\xbd\xc8\xc8\xc8\xc8\xd8\x8e\
\x38\x87\x25\x2c\x8c\x6d\x04\x27\x70\x0f\x17\x62\x07\x76\xe2\x05\
\x36\xf0\x13\xd7\x71\x60\x10\x6b\xed\xd8\x83\x6b\xf8\x51\x79\x79\
\x89\x5d\x31\x83\x57\xab\x81\xcd\xfc\x84\x4b\x98\x0c\x60\xb4\x0e\
\x05\xde\xd6\xf8\xb8\xb2\xd5\xe0\x11\x7c\xab\x19\x0c\x7c\x8c\xd3\
\x43\x38\xae\x30\x8f\xfb\x2d\xfb\x57\x70\xac\x4d\xe0\x76\xcb\x70\
\xe0\x9a\x32\x56\x07\x7b\x34\xbe\x57\x19\x97\xd5\x88\xfd\xb7\x9a\
\x44\xce\xe3\x77\x84\x40\xe0\x67\xfd\xc4\xaa\xc0\xbb\x0e\x7b\x37\
\x70\x71\x5a\x64\x82\xe5\x8e\x22\x81\x4f\xb1\x98\x60\x7c\xb1\x9a\
\x4d\xd9\xb9\xac\xe6\xe0\xe6\xf1\x20\x51\xf0\x17\x6e\xe2\x70\x84\
\xf1\x43\xca\x08\xae\x25\xee\x7a\x62\x8b\x03\x4b\xf9\x49\x03\xbf\
\xe2\x32\x76\xd4\xe8\x4e\x94\x91\xfb\x92\xa8\xdd\xe9\x4b\xd8\xe5\
\x4f\x55\xc7\xe7\x38\xbb\x49\xef\x0c\x9e\x25\x6a\x85\x8f\x46\xd2\
\x5d\x74\x4a\x79\x23\xa7\x2c\x5e\xc7\x8d\x8a\xeb\x89\x1a\x4b\x95\
\x87\x7f\x46\x81\x37\x89\x26\x52\xf8\xd1\x00\x17\xe7\xf4\xd5\x3e\
\x04\xc3\xd3\x65\x7f\x9f\xc6\xa7\x11\x1e\x57\x7d\x9b\x9f\xf9\xe3\
\xb1\xc0\xeb\x1e\x8c\x7f\x50\xc6\x65\x14\x84\x58\x7d\x6f\x30\x37\
\x5a\x5c\xba\xf6\x81\xe3\xb8\x2b\xde\xfc\x43\x9c\x8c\xd4\x9e\x69\
\x1f\x28\xf0\xaa\xc5\xf8\x7b\xf1\x71\x19\xad\x0f\xec\xf6\x77\xac\
\x56\x95\x07\xb1\x2f\xd2\x7c\x72\x1f\x38\xaa\x7c\x73\x37\x9d\xe0\
\x23\xf1\xb1\x9a\xc3\x9d\x8a\x73\x91\x33\x0b\xd5\x8e\xa6\xfd\x2b\
\x95\xc7\x46\xe4\x3e\x90\x80\xdc\x07\x02\x72\x1f\x90\xfb\x40\x2d\
\x73\x1f\x20\xf7\x81\x68\xe6\x3e\xd0\x05\xb9\x0f\x98\x41\x5c\x62\
\x31\x64\x1f\x98\x29\xfa\xec\x03\xa3\xa1\x8f\x3e\xb0\x2d\x90\xd2\
\x07\x32\x32\x32\x32\xfe\x33\xfc\x01\xe5\x1e\xe4\x19\x12\x45\xf3\
\x0c\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x00\xa5\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x30\x00\x00\x00\x30\x08\x06\x00\x00\x00\x57\x02\xf9\x87\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x5a\x49\x44\x41\x54\x68\x81\xed\xd5\xb1\x0d\
\x80\x30\x0c\x04\xc0\x24\x3d\x43\xb0\x1e\x03\x65\x3d\x86\x60\x00\
\xa8\x90\x10\x12\x74\xe1\x0b\xee\x3a\xcb\x85\xfd\xd5\x97\x02\x90\
\x54\x9f\x16\x73\xdf\xf6\xeb\xbc\x2e\x53\x7d\xdb\x8f\x76\xbf\x7f\
\x6a\x5f\x3e\x31\x82\x00\x40\x96\x1e\x48\x13\x00\xc8\xd2\x03\x69\
\x02\x00\x59\x7a\x20\x4d\x00\x20\x4b\x0f\xa4\x09\x00\xf0\x6f\x07\
\xe4\xfa\x28\x24\x4f\x5b\x84\xb4\x00\x00\x00\x00\x49\x45\x4e\x44\
\xae\x42\x60\x82\
\x00\x00\x03\x9d\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x30\x00\x00\x00\x30\x08\x06\x00\x00\x00\x57\x02\xf9\x87\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x03\x52\x49\x44\x41\x54\x68\x81\xed\x99\x5b\x48\
\x14\x51\x18\xc7\x7f\x33\xb3\xae\x29\xae\xb7\x5c\xed\x62\x41\x99\
\x0a\x2e\x6d\x1a\xf9\x52\x20\xd8\xc5\x0c\x36\x2a\x8a\x9e\x22\x0a\
\x82\x22\xea\x21\x8a\xea\xb1\x87\xc0\xca\x42\xa3\xe8\xf2\x10\x56\
\xf4\x1c\xdd\xc4\x8a\xd5\x1e\xa2\x87\x52\xca\x34\xbb\x28\x09\x51\
\x91\x94\x97\x76\x2b\x71\x77\xd6\xdd\x1e\x6c\x4a\xdd\x71\x67\x67\
\x75\x2f\xc6\xfe\x9e\x76\x67\xce\xf7\x9d\xff\x7f\xbf\xf3\x9d\x33\
\xcc\x42\x9c\x38\x71\xa6\x35\x82\xda\xc5\x65\x97\xba\x7c\xca\xe7\
\x5f\x3d\xdd\xaa\x81\x55\x65\x29\x93\x9e\xfc\x78\x67\xf6\x68\x21\
\x27\x9b\xf7\xe4\x1f\xd5\x9b\x43\x9c\xb4\x8a\x29\xc2\x07\x47\x4a\
\x2f\x75\x9d\xd0\x1b\x67\x08\x87\x18\x85\xc3\x87\x0e\x8e\xf9\x7e\
\xea\xf4\x99\x80\xe3\xff\x98\x40\x4f\x25\x62\xa6\x02\x0a\x7a\x2b\
\x11\x73\x06\x40\x9f\x89\x98\x34\x00\x23\x26\x82\x19\x17\xb3\x06\
\x82\x25\x6e\x20\xda\x68\x6e\xa3\x82\x64\xc0\x37\xec\xf1\xbb\xee\
\x70\x79\x49\x4b\x0c\xec\x3f\xd0\xb6\xe9\x94\xa5\x20\xe4\x69\xa3\
\x59\x81\xc4\xd4\x2c\x04\xc9\xdf\xe7\xcd\x4e\x17\x0e\x97\x37\xa4\
\x49\x9d\xb2\x44\xfd\x57\x53\x48\xb1\xe3\xd1\xac\x80\x94\x98\x4c\
\xb2\x79\xbe\xdf\xf5\xcf\xc0\xb9\x0f\x53\xa2\x61\x52\x4c\xfb\x1e\
\x88\x1b\x88\x36\xd3\xde\x80\x66\x13\x5f\xd9\x98\x8b\x35\x67\x46\
\x24\xb4\xf0\xb2\x67\x88\x5d\xb7\x3f\xe9\x8a\xd1\xac\x40\xa4\xc4\
\x03\x2c\x99\xa5\x7f\xae\xff\x7f\x09\x8d\xa7\xf5\xdd\x7b\x5d\xe3\
\x2f\xd6\xec\x27\x3b\x23\x01\xab\xa5\x10\xcb\x52\x1b\x6e\x69\xee\
\x98\xfb\xc5\x85\x79\x7a\x25\x8c\x21\xec\x15\x30\x67\x24\xf1\xf8\
\x95\x97\x85\xbe\x1b\x14\xf5\x56\x50\xd4\xbb\x96\xd9\x03\x35\x48\
\x1e\xc7\x94\xe4\xd7\x5d\x01\xbd\xbf\x58\xff\x8a\x52\x36\x5b\xce\
\x52\x32\x7f\x00\x00\xa3\x31\x8d\x9c\x8e\x6b\xe4\xa4\x3c\x01\x9b\
\x5d\xef\xf4\x7e\x84\xbd\x02\x2b\xd7\x6c\xfb\x2b\x9e\x24\x0b\x74\
\x74\x82\xeb\x27\xf4\xb5\xc2\xbd\xd5\x30\xd4\x37\xa9\xfc\x61\xef\
\x01\x80\x22\x69\x2e\x5e\x5f\x32\xc6\xf6\xb7\x88\xf2\xe0\xbf\x1b\
\x8a\x09\x9b\x1d\x66\xcc\xd4\x9d\x17\x22\xb4\x0b\x0d\x88\xeb\x30\
\x76\x76\x8f\x15\xaf\xd0\xd7\x0a\xf5\x15\xe0\xea\x0f\x29\x77\xd8\
\x7b\x00\x00\xf7\x71\xf8\xf2\x0c\xbe\x35\xab\xdf\xef\x7d\x0e\x77\
\x57\x85\xd4\x13\x91\x39\x07\x8c\xe9\x60\x7b\x08\xe6\xd2\x89\xc7\
\xfc\x59\x4e\x69\x7c\xd7\x95\x3a\x22\x3d\xa0\x20\x15\x5e\x26\x6f\
\x68\x07\xc9\x3f\xda\xd4\x07\xf4\xb5\x72\x81\xed\xec\xe5\x3a\x0e\
\xd2\x83\xca\x19\xd1\x93\x78\xd8\x90\xca\xfb\x25\x57\x19\x34\x59\
\x27\x1c\x53\xc0\x1b\x2e\xb0\x3d\xe8\x4a\x68\xbe\xdc\x6d\xde\xbd\
\x48\xaf\x4e\x6d\x5c\xfd\x23\xbb\x4f\xef\x8b\x09\x87\xbc\xc5\x82\
\x37\xb3\x62\xa6\x65\x6b\x4d\xc0\xee\x8e\xce\xb3\x50\x62\x26\xac\
\x6f\x0a\xd8\x13\x2d\xe6\x43\x74\x65\x6d\x69\xbc\x69\x7f\x1a\x70\
\x7f\xd5\x34\xf0\xb2\x67\x28\x04\x85\x41\x10\xa0\xb1\x6f\x98\xeb\
\x30\x65\x14\x00\x14\x8b\xa2\xc7\x1e\xc8\x84\xe6\x12\x0a\x37\x26\
\x9c\x9c\x67\x27\x45\x8c\x34\xf6\x28\xf1\x00\x18\x3c\x72\xfb\xe2\
\x47\x0f\x9c\x12\x6c\xc8\xad\xaa\xf2\x3b\xb6\xa3\xfe\x38\xfd\x83\
\x54\xf6\x51\xc7\x6b\xac\xaa\xe2\xad\x8d\x0d\x0b\x0c\x6e\xf7\x0a\
\xc1\xed\xbe\xa5\x16\xaf\xba\x8d\xb6\xec\xc9\x57\xad\x4c\x38\x71\
\xdc\x3f\x96\x69\x4a\x28\xb0\x03\x25\x00\x92\x2c\xb7\x59\x9b\x1a\
\x16\x4a\x1e\x8f\xf2\x57\x50\xb6\x5a\x5c\xd4\x2b\xa0\xb0\xbc\xf2\
\x58\xbf\x4f\x74\xaf\xf4\x09\x34\x1b\x3c\x72\xbb\xb5\xe9\xfe\x82\
\x51\xe2\x7b\x84\xe1\xe1\x8d\x6a\x71\x31\x63\x00\x60\x53\x79\xf9\
\x77\xd9\x25\x54\x2e\x7e\xf4\xc0\x69\xf0\xc8\xca\xab\xbb\xcf\x5e\
\x51\x2c\x9b\x53\x5b\xfb\x46\x2d\x26\xe2\x4b\x25\x18\x3e\x1e\x38\
\x90\x29\x0a\xc2\x1d\x04\xc1\xec\x15\x45\xdb\xbc\xea\xea\xae\x68\
\x6b\x8a\x13\x27\x4e\x98\xf8\x0d\x6d\xe7\xfe\x10\xc7\xc0\x69\x8c\
\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x03\xf1\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x30\x00\x00\x00\x30\x08\x06\x00\x00\x00\x57\x02\xf9\x87\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x03\xa6\x49\x44\x41\x54\x68\x81\xed\x97\x51\x68\
\x5b\x65\x14\xc7\x7f\xe7\x26\x69\x5a\xd3\x36\x4d\xd6\xb5\xb2\x8d\
\x0d\xdd\xc0\x89\x88\xeb\x3a\x5b\x98\x1d\x7b\xd8\xc6\x86\x6d\x68\
\xa9\x88\xa0\xe0\xb3\x43\xe9\x83\xad\x2d\x6c\x08\xbe\xcc\xa1\x6d\
\x27\x88\x8e\x3d\xfb\xe2\xc3\xec\x74\xeb\xc4\xe2\x2a\x38\xe7\x10\
\x1b\x5b\xea\x40\x54\xd0\x29\x32\xc7\xa4\xd8\xb4\x9a\x52\xd3\xa4\
\xf7\xf3\xe1\xb6\x49\xee\xcd\xdd\xd6\x66\xbb\xb9\x04\xee\xef\x29\
\xf7\x9c\xf3\x7d\xdf\xff\xdc\xef\x3b\xe7\x7e\x01\x0f\x0f\x8f\xb2\
\x46\xac\x86\xfd\xdd\x9f\x5e\x45\xb1\xd7\x0d\x31\xab\x28\xb8\xfa\
\xd5\xc7\xed\x6d\x6b\x89\xd5\x6c\x46\xbb\x2a\x1e\x40\xe0\xa9\xb5\
\xc6\x16\x26\x50\x66\x78\x09\xb8\x4d\x41\x11\x5f\xba\x9e\x56\x6e\
\x08\xb1\x72\xe8\xe1\x40\x81\x36\x3b\xca\x7e\x07\xbc\x04\xdc\xa6\
\xec\x13\xf0\xaf\x27\xf8\xc5\x33\x3f\x02\xf0\xc1\x4b\x8f\x9a\x9e\
\x8b\x65\x75\x9e\x7b\xa1\xec\x77\xc0\x6b\xa3\x6e\xe3\xd5\x80\xdb\
\x78\x35\xe0\x36\x25\xab\x81\x80\x4f\x38\xfc\x78\x94\xd8\xee\x7a\
\xaa\x2a\xee\xdf\x7b\x5b\x57\x02\xc5\x12\xa9\x0e\xd0\x73\x68\x33\
\xdb\x1b\xab\x00\xd0\x96\xd3\x44\xa6\x2e\x13\x9d\xfe\x9a\xea\x3f\
\x7e\x26\x90\x98\x01\x20\x1d\xd9\x48\x72\xeb\x23\xcc\x36\xed\x43\
\x3d\xfb\x58\x85\x9c\xfd\x61\xe9\x6e\x73\x97\xbc\x06\xa2\x53\x5f\
\xb2\xed\xa3\xf7\xa9\x9c\xb9\x79\xc7\x38\x81\x5f\x75\x51\xfd\x35\
\xe3\xf1\x73\x77\x89\x33\xe3\x54\x02\xa2\xeb\x6c\x1d\x39\xcd\xa6\
\xcf\x3f\x5c\xe7\x40\x35\x14\x6a\x8b\x0f\xc8\x1b\xe8\x76\x6e\x47\
\x6b\xa0\xbd\x69\x03\xcf\xb5\x36\x00\x14\x27\x1e\x40\x49\xdf\xc2\
\x95\x16\x1d\x26\x06\xec\xdc\x8e\x75\xa1\x4a\xbf\xc6\xd3\x4f\x44\
\x01\xe3\xd8\x58\xc5\x4b\x38\x02\x62\xb3\xbc\x68\x86\xcf\x4c\xff\
\xc2\x81\x96\x2e\xbb\x75\x1c\x3f\x42\xda\x72\x9a\x5d\xaf\xbf\x40\
\x70\xe6\xcf\xdc\xa2\x1b\x1b\xa9\x1c\x3e\x8d\x7e\x6d\x9a\xd4\xf0\
\x09\x50\x2b\xa7\x43\x84\x60\x4f\x3f\xbe\x96\xbd\x2c\xf6\x1e\x45\
\xdd\x32\xd5\xc9\x6f\xa1\xe8\xc2\x4e\x6b\x61\x3b\xfe\x1d\x88\x4c\
\x5d\x36\x8b\xaf\x8b\x50\xf5\xce\x19\xb4\x4d\x5b\xf0\x1f\xe9\x20\
\xd8\x7b\xcc\xd8\x09\xd1\x08\xf6\x1e\xc7\x1f\xeb\x46\x1a\x1f\xa4\
\x6a\xf0\x3d\xeb\x4e\x3c\x94\xfc\x3b\xd4\x69\x9d\xdf\xb1\x1a\x18\
\x7a\x7e\x07\x0d\xb5\x01\x36\x4c\x5f\x31\xd9\xd5\xfc\x1c\x99\xf8\
\x37\x04\x62\xcf\x18\x02\x8e\xc4\x00\x81\x4c\x7a\xe5\xb7\xc1\xf2\
\xf7\x53\xa8\x7f\xe6\x4d\x63\x45\xe8\x04\xce\x16\x9d\xc0\x7a\xa8\
\x7d\xc0\x07\x40\xe8\xf7\x9f\xcc\x0e\xa5\x58\x7a\x77\x10\x20\x2f\
\x89\x0e\x53\x48\x66\x6c\x94\xd4\xf0\x9b\xb9\xa3\x95\x63\x8f\xd5\
\xe0\x78\x0d\xb4\xbe\x72\x10\x2d\xb5\x58\xe8\x10\xa1\xa2\xe7\xb5\
\x6c\x12\xab\x64\xc6\x2e\x9a\xeb\x22\x7f\x08\x24\x43\x5f\x4c\xd4\
\xe4\xdb\x5c\xbd\x0b\x89\xd8\xdc\xd7\xec\x6c\x2b\x28\x0a\xbf\x05\
\x8e\x27\xb0\x54\x57\x5f\x68\x5c\xe9\x36\xfe\x8e\xee\x02\x97\xff\
\x70\x3b\xc1\xde\xe3\xf6\x2d\x56\x49\xc1\xe7\xdb\xf1\x04\x92\xdb\
\x76\x9a\x0d\xab\xe2\x63\x39\xf1\x99\xb1\x51\xd2\x17\x73\x37\x06\
\xa3\x3b\xd9\x24\x21\x7c\x67\x9d\xdf\xf1\xcb\xdc\x6c\xd3\x3e\xea\
\x27\x2e\xe5\x34\x84\xeb\xf0\xed\x69\xcd\x3e\x67\x3e\xbb\x40\xea\
\xd4\x49\xc3\xe7\x0f\x64\x3b\x91\x6f\xd7\x6e\x24\x1c\x46\xcd\x25\
\xb2\xb1\x0a\xce\x5b\xe7\x77\x7c\x07\x66\x9b\xf6\xf3\x5f\xc3\xe6\
\x9c\x88\xb9\x04\x8b\xaf\x1e\x45\xbf\x79\xc3\xe8\x36\xa7\x4e\x1a\
\x05\xab\x74\x52\xc3\x27\x48\x8f\x8e\xa0\xfe\xba\xc5\x62\xdf\xcb\
\x56\xf1\xd7\xab\xa3\xc9\x0b\xd6\xf9\xd7\xf4\xaf\xe7\x5e\xf9\xf7\
\xe0\x93\xdd\xa2\x64\xc4\xb4\x70\x38\x62\xf4\x79\x6b\xb7\x11\x0d\
\xa9\x0d\xa3\xe6\x13\x66\xb3\x48\x57\x68\xfc\xdb\xd2\xef\x00\x40\
\xcd\x78\xfc\x1c\xa2\x86\xf2\x6d\x6a\x3e\x61\xdb\x2a\x51\x7a\x81\
\x78\x84\xb7\xec\xc4\x43\x09\xdb\x68\xa8\x2d\x3e\x00\x0c\x16\x31\
\xf4\xed\x50\xdb\xc4\xb1\xdb\x39\x4b\x72\x84\xf2\x59\x38\xd0\xd2\
\x85\x30\xa8\x14\x3b\xee\x14\x27\xc2\x2f\x20\x7d\xb7\x7b\xf3\xd9\
\xb8\xfb\x2b\x6f\x6d\xa8\xe6\xe6\x40\x32\xec\xeb\x12\xa1\x13\x25\
\xcd\x22\x6a\x0b\x80\x52\x72\x03\x51\x93\x0a\xf9\xa4\x7a\x2e\x73\
\x5e\x26\x27\xd3\x6e\xe8\xf3\xf0\x28\x27\xfe\x07\xfa\xc2\x42\xb2\
\xa6\xf4\xe3\xd9\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\
\x00\x00\x03\xb2\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x30\x00\x00\x00\x30\x08\x06\x00\x00\x00\x57\x02\xf9\x87\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x03\x67\x49\x44\x41\x54\x68\x81\xed\x98\xcf\x6f\
\x54\x55\x18\x86\x9f\xef\xdc\x99\x16\x3a\xa3\x6d\x49\x20\x95\x69\
\x35\x60\x02\x8b\xc6\x0d\xd3\x68\x35\x4a\x4b\x59\x18\x68\x49\x60\
\x12\xd8\x68\xa2\xab\x12\x12\x8d\xff\x80\x26\x97\x90\xb8\x70\x67\
\xd2\x8d\xfc\x01\x46\x5b\xb5\x31\xd3\x31\x91\x85\x50\xd4\x8d\x19\
\x17\xda\xd4\xa8\x89\x60\x2c\x30\x85\x12\x41\xa5\x15\x66\xee\x9c\
\x8f\x0d\x0d\x44\xce\xcc\x34\x9d\x73\xad\x31\xf3\xec\xe6\xfb\xce\
\x7c\xef\x7d\xef\x9c\xf7\xfe\x18\x68\xd1\xa2\x45\x6c\xec\xcd\x15\
\x8e\x0f\x1d\x9e\x19\x8f\x53\x43\xe2\x1a\x3c\x74\x24\xff\x2c\x98\
\xb3\x80\xc1\xea\xfe\xd9\x4f\xc7\xbe\x8c\x43\x27\x16\x03\xc3\x47\
\x0b\x3d\x1a\x51\x04\x32\xf7\x4a\x8b\x41\xc2\x0e\x7c\x31\x75\xe8\
\xb2\x6f\x2d\xe3\x7b\x60\x76\xbc\x98\xd4\x88\x49\xee\x1f\x3c\x40\
\x4f\x35\x32\x1f\x1d\x38\xf0\x59\xbb\x6f\x3d\xef\x06\xd2\xd7\xaf\
\x4d\x00\x2f\x38\x5a\x83\x7f\x6f\xe2\x3d\xdf\x7a\x5e\x0d\xec\xcd\
\x15\x8e\xa3\x5a\x33\xb4\x8a\xbe\xe2\x3b\xd4\xde\x32\xf0\x40\x68\
\x1b\x6d\x93\x8a\xcf\x50\x7b\x31\xe0\x08\x6d\x23\xbc\x85\xba\xe9\
\x2d\x54\x23\xb4\x8d\xf0\x16\xea\xa6\x0d\xd4\x09\x6d\x23\xbc\x84\
\x3a\x96\xfb\xc0\xd0\x91\x82\xba\xea\xb3\xd3\xa3\xde\xf5\xbc\x5f\
\x46\xff\x6d\x5a\x06\x36\x9a\x96\x81\x8d\xa6\x65\x60\xa3\x69\x19\
\x78\x88\x30\x74\xce\x14\x40\x63\xb8\x71\x7a\x37\x30\x7c\x6e\xc8\
\x39\x33\xa0\x0a\x47\xfd\xeb\x79\x1f\xb8\xb4\x75\xc9\xfd\x0b\x88\
\x85\xee\xec\x7f\xdf\x40\xe6\x8f\xce\xa4\xab\x1e\x88\xa5\xb4\xbd\
\xe4\xec\x35\x83\x77\x03\x51\x67\xb9\xcb\x55\x4f\x05\x2b\x24\xcb\
\xc9\x4e\xdf\x7a\xde\x0d\xd8\x48\xba\x5d\xf5\x74\x62\x99\x28\x48\
\x6e\xf1\xad\xe7\xdf\x00\x66\x9b\xab\x9e\x0e\x96\x41\xd4\xd9\x6b\
\x86\x18\x42\x65\xf7\xb8\xea\xdd\x89\x3f\x41\x6d\xd6\xbf\x9e\x67\
\xac\xc8\xa0\xab\xde\xb7\xa9\x84\x22\xcf\xf8\xd6\xf3\x6a\x20\x7b\
\x28\xdf\x21\xca\x88\xab\xb7\x73\xf3\x6f\x20\xec\xbf\x12\x6e\xef\
\xf0\xa9\xe9\xd5\x40\x2a\x19\x9c\x00\x1e\x0a\x71\x52\x2a\xec\x4a\
\x5d\x00\xe8\x16\xdb\x7e\xc2\xa7\xa6\x37\x03\xfb\x0e\xcf\x64\x05\
\x3d\xe9\xea\x0d\x3c\x3a\x47\x9b\x54\x00\x50\xe4\xe4\x95\xb7\x9e\
\xf4\x96\x05\x2f\x06\xf6\xe5\x66\x46\xad\xc8\x19\x94\x94\xab\x3f\
\xb2\xe5\xeb\x07\x3f\xa6\x10\x3d\x53\x0a\x77\x1e\xf4\xa1\xbd\xfe\
\x87\xab\x30\x34\xc3\x73\x03\x23\x6a\xe5\x0d\x60\xac\xd6\xb2\xfe\
\xf4\x4f\xbc\xb9\x63\xa2\x96\x78\xbe\x0a\xef\x66\xcc\x85\xb3\x12\
\x62\xd7\x73\x18\x35\x0d\x64\xc7\x8b\xc9\xcd\x97\x17\xd3\xc6\xd8\
\x2e\x93\x30\x29\xb1\x92\x42\xe8\xb5\x46\x76\xa3\x9a\x05\x9e\x07\
\xea\x5e\xd7\x0d\x76\xe5\x9d\x5d\x6f\x93\x69\xbf\xda\x28\xb8\xd7\
\x50\xbe\x42\x28\x02\x3f\x1b\xb5\x97\x08\x82\x65\xa2\x68\xf9\x4e\
\xa4\x37\x1f\xff\x7d\xeb\x2d\x39\xfd\x6d\xc5\xf5\xc5\x44\xad\x89\
\xe9\xa5\xab\x65\xda\x04\x08\x50\x40\x57\x37\x9b\x3a\xff\xf2\x71\
\x51\xb5\xc8\xab\xbd\x6d\x8b\xa2\xc8\xfb\x40\x50\x67\xed\x36\x84\
\x1c\x90\x03\xb0\x62\xc0\x2a\x98\x80\x64\x1b\x94\x7a\x6e\x40\x8d\
\x93\x1d\xd7\x0b\x4d\x45\x45\x5e\x9a\x9d\x1e\x9b\x7a\xec\xd4\xc5\
\x49\x55\x79\x19\x70\x9e\xc1\x66\x89\xc3\xc0\x0f\x62\x78\xee\xfc\
\x27\x07\x3f\x5c\x2d\x64\x4e\xfd\xf2\x41\xd5\xe8\x00\xf0\x9d\x6f\
\x31\x9f\x06\x16\x14\x5e\xeb\xb8\x2d\x7b\xce\x7d\x3c\x5a\xfc\x67\
\xb3\x2f\xbc\xf8\xfd\x1d\x63\x07\x55\xf5\x75\x60\xc1\x97\x68\xcd\
\x0c\xac\x91\x1b\x40\x01\x2b\x93\xd7\xdb\x6e\x7d\x3e\x3f\x75\xac\
\x5c\x6f\xf1\x8e\xf0\xd7\xdb\xc0\xc4\x7c\xd8\x7f\xba\xcb\xae\xbc\
\x28\x22\xc7\x50\xc6\x00\xe7\x23\xf8\x5a\xa8\x67\x40\x81\x9b\x40\
\x04\xfc\x85\xb0\x28\x96\x05\xd0\x4b\x56\x98\x03\xbe\x39\x3f\x3d\
\xfa\x23\xc8\x9a\x53\xbd\x4a\x7f\x38\x5f\x06\xf2\x40\x5e\x43\x4c\
\x29\x7a\x62\xb7\x98\xc4\xd3\x0a\x4f\x09\xb6\x57\x91\x3e\xd0\x1e\
\x90\x47\xee\x1d\xe3\xba\x0d\xb6\x68\xf1\x7f\xe7\x2e\x70\xac\x12\
\x9e\xdf\x1c\xd7\xfe\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\
\x82\
\x00\x00\x03\xb0\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x30\x00\x00\x00\x30\x08\x06\x00\x00\x00\x57\x02\xf9\x87\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x03\x65\x49\x44\x41\x54\x68\x81\xed\x98\x4d\x6c\
\x0c\x61\x18\xc7\x7f\x33\xb3\x4d\xbb\x6c\x6d\x4b\x7d\x84\x8a\x6f\
\xaa\x15\x12\x41\x25\x2e\x15\x14\x07\x87\x3a\x69\x13\x89\x43\x7d\
\xf4\x4e\xc2\x8d\x53\xc5\x81\x2b\x51\x27\xa1\x88\x88\x93\xa4\x2a\
\x36\x0e\x52\x34\x12\x0d\xa1\x69\x85\x2c\x69\xd2\xa2\xdb\x6e\xbb\
\x4c\x9a\xdd\x99\x71\xd8\x5d\xb6\xb5\xef\xd8\x79\x67\xb7\xbb\x87\
\xfd\x5d\x9f\x77\x66\x9e\xff\xff\x7d\xe6\xff\xee\x0e\x14\x29\x52\
\xa4\x48\x3e\x51\xec\x8a\x81\x41\xab\x3a\xaa\xc5\xae\x28\xb0\x1f\
\x28\x9f\xa5\x9e\x92\x4c\xa2\xd0\x6d\x99\xc6\xd9\xc6\x35\x65\x83\
\xa2\x45\x42\x01\x81\x41\xab\x3a\xa6\xc5\xfa\x80\xf9\x39\x69\x2f\
\x73\x42\x6a\x89\x67\xf3\x9e\xe5\xca\x50\xba\xa2\x2a\xba\x2a\xaa\
\xc5\xae\x90\xff\xe6\x01\xe6\x9b\xd1\xe8\x65\x51\x51\x28\x20\x31\
\x36\x05\x82\x22\xec\x45\x28\x80\x19\x33\xdf\xd2\xfe\x9c\x96\xf6\
\xe7\xc2\xc5\x35\x55\x2a\x35\x55\xe2\xdb\xb9\xac\xfb\x45\x05\x8f\
\xf0\x8e\xb3\xc8\xeb\x77\x43\xb4\x75\xf6\xa2\x28\xd0\x7a\x64\x07\
\x5b\x37\x2d\xcd\xf8\x5a\xc7\x02\xec\x5c\x94\xad\xb7\x75\xf6\x32\
\x12\xfa\x05\xc0\x8d\xbb\xbd\xb4\x34\x34\xd1\xff\xc3\xcc\xa8\x1f\
\xfb\xa7\xcd\x12\x53\x31\x4b\xfa\x5a\xc7\x3b\x20\x72\x26\xe9\xac\
\xd3\xba\x69\xc1\xbc\x8d\xb5\x4c\xbc\x79\x87\xbf\x4c\xa3\xf5\xc8\
\xf6\x8c\xdd\x87\x02\x78\x07\x7a\x82\x3a\x93\x73\x2a\xa9\x3b\xb8\
\x9b\x5b\xcd\x55\x7c\x0c\x65\xde\x3c\xe4\x79\x84\x4c\x0b\x1e\x7d\
\x88\xcf\xfe\xb1\x6d\x73\xf1\x48\x74\x93\x57\x01\x3d\x41\x9d\xe1\
\x49\x83\x45\x3e\x8d\x7d\xeb\xbc\x52\xf7\xc8\x9b\x80\x54\xf7\x0f\
\xd5\xce\x91\x72\x1f\xb2\xf4\x0e\xc8\xe4\x78\xaa\xfb\xf5\x2b\xe4\
\xdc\x87\x2c\x9d\x03\xa9\x39\x7e\xbd\xb3\x97\xc0\xb5\x26\xdb\xeb\
\x0d\x0b\xce\x77\xe9\x00\x9c\xa8\xf7\x51\xb7\x50\x9d\x56\x07\x71\
\x9a\xcd\x24\x2b\x23\x94\x9a\xe3\x63\xba\xc1\xe8\x4f\xc3\x76\x7d\
\xd7\x80\xce\xd7\x70\x8c\x6a\xbf\x47\x7a\xf6\x93\xb8\x3e\x07\x0c\
\x0b\xca\x13\x39\xae\x2a\x0a\xa5\xeb\x6b\x68\x7b\x18\xe2\x4c\x43\
\x25\xe5\xa5\xff\x3a\xfb\xfe\xbb\x49\xc7\xcb\x08\x00\xfb\x37\x78\
\xff\xc4\xa6\x53\xe7\x93\xb8\xde\x81\x17\x41\x9d\x48\x22\xc7\x03\
\x57\x9b\x58\xb9\x6a\x09\x43\x61\x83\x8b\x81\x31\xc2\xfa\xbf\x3b\
\xd1\x13\xd4\x19\x89\xb8\x9f\xfd\x24\xae\x04\xcc\xcc\xf1\x05\x73\
\x35\x4e\x37\x54\x50\xed\xf7\x30\x3c\x61\x70\xe9\xd9\xf8\x34\x11\
\xc6\x8c\xe4\xd1\x6c\xff\x0f\x66\x86\x2b\x01\xe9\x72\xbc\xbc\x54\
\x15\x8a\xe8\x1a\xc8\x4e\xf2\xa4\x22\x2d\xc0\x2e\xc7\xd3\x89\xf8\
\x16\x31\xb8\xf9\xfa\xe7\x9f\xf5\xd9\x70\x1f\x5c\x08\x78\x91\x70\
\x7f\xb1\x4f\x63\x67\x1a\x37\x93\x22\x96\xf9\x35\xbe\x7e\x1e\x61\
\xef\xa9\x07\xbc\x7d\x14\xc0\xf7\x6b\x2c\xed\x7a\x59\xa4\xce\x01\
\xc3\x82\x0b\x8f\xe3\x39\x7e\xbc\xde\x47\x6d\x9a\x1c\x8f\xa3\x72\
\xed\xf0\x02\x76\x9f\x7c\x46\x6c\x6a\x0a\x80\x48\xff\x07\x6a\x17\
\x6e\xb4\xbd\x3f\xe4\xf8\x1c\x78\x3c\xa0\xf3\x65\x3c\xc6\x72\xbf\
\x87\xc6\xf5\xf6\x6e\x56\x7a\x55\x2a\xbc\x7f\x1f\x53\xea\xc9\xd2\
\xec\x24\x70\xbc\x03\xef\xbf\x9b\x74\xbc\x8a\xe7\x78\xe3\x06\x2f\
\x83\xa3\xff\xcf\xf1\x13\xcd\x3b\xe8\xb8\xf3\x0a\x45\x51\x84\xbf\
\xf7\x65\xcf\x01\xc7\x02\x64\x7e\xc3\x6c\xdd\xb4\x94\xa7\x57\x0f\
\x4b\x35\xf8\x3f\x1c\x8f\x50\xb6\x73\xdc\x2d\x8e\x05\x64\x3b\xc7\
\xdd\x22\xf5\x12\x17\x8a\xfb\x20\x21\xa0\x90\xdc\x07\x89\x97\x38\
\xd8\xd7\xcf\xd1\xbe\x5c\xb4\x32\x9d\xdb\xe7\x76\x65\xb4\xae\x20\
\xbe\x0b\xb9\x41\x38\xc9\xdd\x9f\xa2\x61\x60\xde\x2c\xf6\x62\x47\
\x78\xdf\xea\x92\x8a\x74\x05\xf1\x0e\x28\x3c\xc9\x59\x3b\x0e\xb1\
\xa0\x5b\x54\x13\x7f\x5e\x37\x8d\x73\xc0\x68\x4e\x3a\x72\x46\xc8\
\x8c\xf7\x92\x16\xa1\x80\xbd\x6b\xca\x06\xd4\x12\xcf\x16\xb0\xee\
\x01\x13\x39\x69\xcd\x9e\x09\x0b\xee\x1b\xa6\x51\x7f\x60\x6d\xd9\
\xc7\x3c\x3c\xbf\x48\x91\x22\x45\x32\xe0\x37\xa9\x3d\x5a\xe1\x99\
\x24\xf0\x18\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x05\x1c\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x30\x00\x00\x00\x30\x08\x06\x00\x00\x00\x57\x02\xf9\x87\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x04\xd1\x49\x44\x41\x54\x68\x81\xd5\x9a\x5d\x6c\
\x14\x55\x14\xc7\x7f\xe7\xee\xf4\x8b\x5d\x2a\xc1\x26\x26\xd4\x68\
\x10\xb0\x89\x8d\xd0\x76\x0b\xa1\x40\x84\xdd\x0a\x24\x26\x04\x50\
\x84\x36\x6d\x4c\x94\x18\x13\x7c\x35\x26\x36\xd1\xf0\x20\x98\xe8\
\x33\x84\x84\x18\x13\x43\x5b\x6c\x15\x13\x12\x35\xc5\x76\x57\x22\
\x44\xbe\xda\x05\x8c\x90\x1a\x1e\x34\x01\x9f\x10\xb5\xed\xb6\x05\
\x76\xe6\xf8\xd0\x6d\xe9\xc7\x7e\xce\x2e\x6c\xf9\x3d\xed\xde\x7b\
\xee\x3d\xff\x73\x33\x73\xe7\xcc\xb9\x23\xe4\x1b\x55\x59\xf9\x53\
\x7b\xa5\x41\x2a\x2c\x55\x2f\x40\x4c\x24\xea\xa0\xb7\xaf\x6e\x6a\
\xb9\x85\x88\xe6\xd3\x9d\xe4\x3a\xc1\xfa\x33\x9f\x2f\x1c\xbd\x57\
\xb6\x45\xd0\x80\x20\xeb\x15\xaa\x80\xb2\x24\xe6\x63\x02\x83\x8a\
\x9e\x05\x42\x77\xa5\xe8\xd4\x6f\x81\xdd\x23\xb9\xf8\x77\x1d\x40\
\x6d\x6f\x47\x83\x08\xfb\x10\x76\x02\x5e\x97\xd3\x44\x41\x4f\x20\
\x7a\x78\x20\xd0\x7a\xce\xcd\x04\x59\x07\x50\x17\x3e\xb6\x16\xcc\
\x41\x94\x80\x1b\x87\xc9\xd1\x3e\xa3\xb4\x5d\x6a\x6c\xb9\x90\xcd\
\xa8\x8c\x03\xf0\xff\xd8\xf5\x84\x7a\x62\x9f\x01\x7b\x01\x93\xad\
\xbc\x0c\x71\x80\xa3\x45\xe3\xce\xfb\xe7\x5f\x69\x1d\xca\x64\x40\
\x46\x01\xd4\x85\xda\xfd\x20\x5f\x01\xcb\x72\x51\x97\x05\x7f\x22\
\x4e\x53\x26\x97\x55\xda\x95\xac\x0d\x77\xee\x02\x39\xcb\xa3\x13\
\x0f\xf0\x2c\x6a\xc2\x75\x7d\x9d\x3b\xd3\x19\xa6\x0c\xa0\x36\xd4\
\xf9\x96\xa8\x1e\x07\x4a\xf2\x26\x2d\x73\x4a\x11\xed\xae\xeb\xeb\
\x78\x33\x95\x51\xd2\x4b\xa8\x26\xd4\xbe\xdd\x20\xdf\x00\x9e\xbc\
\x4b\xcb\x0e\x5b\x45\x9a\x22\x81\xe6\xaf\x13\x75\x26\x0c\xa0\xa6\
\xf7\xd8\x6a\x63\xcc\xcf\x14\x66\xe5\x13\x31\x6e\x3c\xb2\xee\xd2\
\xc6\xe6\xc8\xec\x8e\x39\x01\xd4\x84\xbf\x58\x64\xb4\xa4\x1f\x78\
\xce\x8d\xa7\x17\xcb\x2b\x58\xe1\x5b\x94\xb0\xef\xfa\xf0\x1d\xae\
\x0f\xdf\x71\x33\x2d\x20\x37\xc4\xf6\xd4\xf7\x6f\xde\xfd\xdf\xf4\
\x56\x6b\xb6\x99\xd1\x92\x4f\x71\x29\x1e\xe0\x40\x75\x03\x95\xa5\
\xbe\x84\x7d\x37\xa2\xff\xb2\xe7\xc2\x0f\x2e\x67\xd6\xe5\x8e\xc7\
\xfe\x04\xd8\x37\xbd\x75\xc6\x4d\x3c\xf1\x90\x62\xaf\x4b\x0f\x00\
\x58\x92\x7c\x5f\x48\xd5\x97\x09\x82\xbe\x53\x1b\xee\xac\x9f\xde\
\x36\x6b\x46\x73\x70\x6e\xdb\xbc\xc2\x88\x3a\x07\x66\x34\x4c\xfe\
\xa8\xed\xed\x68\xc8\x7f\x7a\xf0\x30\x90\x2d\xf5\x7d\xed\x6b\x26\
\xff\x4d\x05\x20\x32\xf3\xda\x9a\xcf\xd8\x22\x53\x5a\x0d\x40\x75\
\xb8\xcb\x87\xb0\xa3\x70\x92\xb2\x43\x60\x57\x75\xb8\xcb\x07\xf1\
\x00\x8a\x35\xb6\x15\x48\xbc\x75\xcc\x4f\xbc\x25\x8e\xbd\x19\xe2\
\x01\x08\x04\x0b\xab\x27\x7b\x54\x9c\x4d\xf0\x20\x80\x75\x05\x55\
\xe3\x02\x81\x0d\x00\x16\xfb\xf7\x1b\x45\x9f\x4f\x97\x59\x57\x97\
\x3f\xc9\xc7\x2f\x34\xe0\x49\x63\x57\x51\x9c\xec\x6d\x12\x9e\x2e\
\xf3\x71\x72\xed\xb6\x94\xe3\x6d\x94\x8f\xae\x9d\xe3\xd7\xa1\xdb\
\x29\xed\x40\xaa\x50\x15\x6b\x75\xa0\xaa\xd2\xb6\x75\x41\x1a\x6b\
\x96\x2e\x28\xe7\x99\xb2\x85\xe9\xcc\x52\x62\x89\xa1\xb2\x2c\xfd\
\xad\xb6\xd4\x5b\x9e\x41\x00\x78\x57\x85\xba\x97\x98\x98\x2d\x8b\
\x73\x52\x55\x40\x2c\xeb\xde\x62\x63\xc4\xc9\x6d\x59\x0b\x88\xd8\
\x94\xcf\xe7\xb4\x21\x23\x8c\xa3\x66\xb8\xd0\x22\xdc\xa2\x1e\x86\
\x8c\xe5\x51\xb7\x09\x7a\xc1\x89\xc5\x8a\xef\x58\x17\xc3\x83\xb7\
\xea\x5e\x5a\x31\x0a\x92\x72\x27\xba\x39\x36\xc2\x3f\xf7\xef\xe2\
\x91\xd4\xdb\xa8\xcf\x53\x84\x49\x62\xe3\xa8\x32\x62\xdf\x4f\x39\
\xde\x56\xe5\xe6\x58\x46\xc5\xba\xe8\x95\xe0\xeb\x7f\x09\x80\x3f\
\xd4\x11\x51\xa8\xc9\x64\x54\x3a\xbe\x5f\xb7\x9d\xa7\x4a\x12\xaf\
\xc5\x1f\xa3\x43\xbc\x76\xfe\xbb\x7c\xb8\x01\x74\x60\x20\xd8\xe2\
\x37\x00\xf1\x5a\xe5\x63\x85\xc2\x19\x78\x90\x4e\x87\x0a\xa8\xc5\
\x15\x46\x24\x0c\xf1\x00\x62\xf7\xed\x1e\x20\xa7\x2a\xf1\x23\x26\
\x3a\x8e\xd5\x0b\xf1\x00\xae\x6e\x7d\x23\x0a\xfa\x6d\x61\x35\x65\
\x81\x4a\xf7\x64\x59\xfe\xc1\x83\x4c\xf4\x70\xc1\x04\x65\x89\xa3\
\xf6\x94\xd6\xa9\x00\xe2\x85\xd4\xc7\xe1\x5e\xe8\xb9\xfc\x72\xeb\
\xc5\xc9\x3f\x33\x52\x09\xa3\xfa\x01\x13\x25\xee\xf9\x8a\x2d\x6a\
\xda\xa6\x37\xcc\x08\x20\x7e\xb8\x70\x34\x17\x0f\x31\x4d\x1e\xbf\
\xad\xb9\x1e\x8f\xe9\x91\xfe\xc6\xa6\x81\xe9\x2d\x73\x2a\x73\x77\
\xc5\x7a\xaf\x44\x63\x9b\x98\x38\xeb\xca\x9a\x0f\xaf\xfd\xc2\x32\
\x6f\xe2\xd2\xe2\xe0\x48\x2e\x59\x8b\xdc\x28\x1a\x77\xda\xe6\xb4\
\x26\x32\xf5\xf7\x1d\xaf\x53\x71\xce\x02\xa5\x39\x78\xcc\x27\x63\
\x8a\x69\x88\x04\x9b\xae\xcc\xee\x48\x98\x4e\xf7\x37\x36\x0d\x28\
\xb2\x07\x88\x3d\x74\x69\xe9\xb1\x41\x5b\x13\x89\x87\x14\x65\xc4\
\x48\xb0\xf9\x24\xa2\x6f\x4f\x4c\x50\x30\x62\x22\xec\x1d\x08\xb6\
\x9c\x48\x66\x90\xf6\x8c\xcc\x1f\xee\xd8\xa1\x4a\x27\x8f\xfe\x72\
\x1a\x53\xa4\x29\x12\x6c\x3e\x99\xca\x28\xa3\x43\xbe\xfa\xd3\x9d\
\xb5\x8e\x4d\x17\xe8\xf2\xfc\x68\x4b\x8d\xc0\xef\xb6\xe8\x9e\xcb\
\x81\x96\xcb\xe9\x6c\x33\x7a\xa5\xbc\xb4\xb1\x39\x52\x34\x6e\xfb\
\x45\xf4\x08\x0f\xf7\x39\x61\x83\x1e\x2a\x2d\x1e\xab\xcf\x44\x3c\
\xb8\x38\xe8\x9e\xa8\xcf\xeb\x41\x51\x36\x67\xaf\x2f\x25\x3d\xa2\
\xa6\x6d\xf6\x3e\x9f\x0e\xd7\x9f\x1a\xd4\xf7\xb5\xaf\x51\x91\x77\
\x15\x5e\xc5\x7d\x5d\x75\x58\xe1\x84\x3a\xce\xa1\xe9\xe9\x41\x36\
\xe4\xfc\xb1\xc7\xca\x9e\x2f\xbd\x96\x55\xb4\x45\x44\x03\xaa\x6c\
\x40\xb4\x2a\xc5\xeb\x69\x14\x74\x50\xe1\x8c\x11\x09\x33\xb2\xf0\
\x54\xff\xb6\x6d\xa3\xb9\xf8\xcf\x39\x80\x39\xa8\xca\xaa\x50\xf7\
\x12\x23\x4e\x85\x87\x98\x0f\xc0\x71\xcc\xb0\x2d\xd6\xdf\x57\x1a\
\x77\xdf\xca\xb7\xbb\xff\x01\xd9\x05\x8d\x98\x86\x19\xf9\x9e\x00\
\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x01\xa7\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x30\x00\x00\x00\x30\x08\x06\x00\x00\x00\x57\x02\xf9\x87\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x01\x5c\x49\x44\x41\x54\x68\x81\x63\x60\x18\x05\
\xa3\x60\x14\x8c\x82\x81\x04\x8c\xb8\x24\x4c\x66\xdc\xfe\x0f\x63\
\x7f\x7d\x71\x0f\xab\x9a\x76\x3b\x1e\xaa\x39\xa4\xe5\x96\x18\xb2\
\xa3\x3a\x4f\x67\xa8\x56\x10\xa3\x8f\x89\x6a\x2e\xa0\x22\xf8\xcf\
\xc0\x50\x6e\x3a\xe3\x76\x07\x31\x6a\x59\x68\xed\x18\x18\x28\x2b\
\x29\x46\xe1\x77\xf5\xf4\xe2\x55\x0f\xf5\x04\x03\xa1\x98\x18\x94\
\x31\x00\x03\xc4\xc4\xc4\xa0\xf6\x00\x03\x03\x61\x4f\x0c\x7a\x0f\
\x30\x30\x40\x3c\x81\x4b\x6e\x48\x78\x00\x1f\x18\xf5\xc0\x40\x83\
\x21\xef\x01\xa2\xea\x01\x46\x66\x16\x86\xff\x7f\xff\x60\x88\x7f\
\xfc\xf9\x8f\x81\x9f\x9d\xb8\x30\xc0\x57\xee\x7f\xfa\xcd\x4c\x94\
\x19\xd8\x00\x51\xb6\xb3\xf3\x89\x30\x30\x32\x63\xfa\x75\xdd\xad\
\x9f\x0c\x1f\x7f\xfe\x23\xdb\x72\x06\x06\x88\xe3\xb7\xbe\xe2\x25\
\x5b\x3f\x51\x31\xc0\xcc\xce\xc5\xc0\x25\x2a\x87\x21\xfe\x94\x81\
\x81\x61\xf2\x43\xb2\xed\xa6\x0a\x18\xf2\x79\x60\xd4\x03\x03\x0d\
\x46\x3d\x30\xd0\x80\xa8\x52\x68\x6e\x80\x0c\x83\x9e\x38\x07\xad\
\xdd\x82\x02\x2e\xbe\xf8\xc1\x90\xb2\xf1\x09\x41\x75\x44\xc5\x00\
\xbd\x1d\xcf\xc0\xc0\xc0\xa0\x2f\x41\x9c\x9d\x23\x23\x09\xa1\x83\
\x0b\x37\xef\x52\xdb\x1d\x0c\x0c\x0c\x0c\x0c\x06\xea\xca\x24\xeb\
\x19\x99\x31\x40\x4e\x48\xd1\x0a\x8c\xcc\x18\x18\xcd\x03\x54\x04\
\xa3\x79\x60\xa0\xc1\x68\x1e\x18\x68\x30\x9a\x07\x06\x1a\x10\xe5\
\x81\x8b\x2f\x7e\xd0\xda\x1d\x18\xe0\x02\x91\x76\x12\x95\x84\x88\
\x69\x97\x0f\x14\x18\xf2\x49\x68\x14\x8c\x82\x51\x30\x0a\x06\x16\
\x00\x00\x4b\xd4\x4c\x8b\x21\x06\x80\xa6\x00\x00\x00\x00\x49\x45\
\x4e\x44\xae\x42\x60\x82\
\x00\x00\x01\x7a\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x30\x00\x00\x00\x30\x08\x06\x00\x00\x00\x57\x02\xf9\x87\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x01\x2f\x49\x44\x41\x54\x68\x81\xed\xd8\x31\x4a\
\x03\x41\x14\xc6\xf1\xff\x0e\xb6\xe2\x01\x8c\x8d\x60\xb0\xd3\x23\
\xa4\xb7\x17\x8b\x6d\x44\x14\x24\xa5\x8d\x07\x51\x30\x95\x8d\x82\
\xe8\x01\xc4\x42\xc9\x35\x4c\x61\x95\x56\x24\xb1\x75\x7d\x36\x16\
\x12\x43\x36\x33\x4e\x7c\x2c\x7c\xbf\x72\xd8\x99\x37\x1f\x6f\x66\
\x61\x17\x44\x44\x3c\x15\x39\x16\x29\x8f\x9f\x76\x08\xa1\x07\xac\
\xd6\x3c\x3a\xb4\x82\xa3\xeb\xb3\xce\x7d\x8e\xba\x00\x21\xcf\x2a\
\xe1\x82\xfa\xcd\x03\xb4\x82\xd1\xcb\x52\xf3\xdb\x52\xa6\x75\x5a\
\x00\x57\xe7\x9d\x99\x1d\x2d\xbb\x7d\x33\x58\xcb\x54\x13\xc8\xd5\
\x01\x47\x51\x77\xa0\xec\xf6\x6d\x51\x1b\x81\xfa\x0e\x4e\xd3\xf8\
\x0e\x34\x3e\xc0\x5c\x2d\x5b\xf4\xd1\x99\x14\x73\x94\x1a\xdf\x01\
\x05\xf0\x16\xf5\xda\xba\xbc\x19\xfc\xba\x0b\x83\x97\x11\x00\xed\
\xf5\x95\x3f\x8d\x03\xec\xef\xb5\xf5\x1a\x6d\x1c\x05\xf0\xa6\x00\
\xde\x14\xc0\x9b\x02\x78\x53\x00\x6f\x0a\xe0\x4d\x01\xbc\x29\x80\
\x37\x05\xf0\x16\xfb\x77\xfa\x1d\x58\xfe\x39\x30\xed\xe3\x3c\x65\
\x1c\x18\x45\xee\x05\x88\xee\x40\xf1\x98\x52\x64\x1e\x66\xf6\x90\
\x32\x2f\x2a\xc0\x67\xa8\x4e\x81\xb7\x94\x42\x33\x19\xaf\x55\x15\
\x4e\x52\xa6\x46\x05\x38\xd8\xdd\x7c\x36\x0b\x5b\xc0\x1d\x30\x4e\
\x29\x38\x61\x6c\x66\xb7\x1f\x55\xb1\x7d\x58\x6e\x0c\x33\xac\x27\
\x22\xf2\xcf\xbe\x00\xb5\xdc\x41\x2a\xc6\x97\x9f\x87\x00\x00\x00\
\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
"
qt_resource_name = b"\
\x00\x05\
\x00\x6f\xa6\x53\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x73\
\x00\x08\
\x07\xd8\x59\x87\
\x00\x70\
\x00\x61\x00\x67\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x09\
\x09\x28\xa3\xa7\
\x00\x74\
\x00\x61\x00\x62\x00\x6c\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x08\
\x06\x90\x5a\xe7\
\x00\x66\
\x00\x6f\x00\x72\x00\x6d\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x10\
\x04\xb5\xc7\x67\
\x00\x70\
\x00\x72\x00\x65\x00\x62\x00\x75\x00\x69\x00\x6c\x00\x74\x00\x50\x00\x61\x00\x67\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x08\
\x06\xc1\x59\x87\
\x00\x6f\
\x00\x70\x00\x65\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0a\
\x0c\xad\x0f\x07\
\x00\x64\
\x00\x65\x00\x6c\x00\x65\x00\x74\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x06\
\x07\xc3\x57\x47\
\x00\x75\
\x00\x70\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x08\
\x0b\x07\x5a\x27\
\x00\x65\
\x00\x64\x00\x69\x00\x74\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x08\
\x06\xe1\x5a\x27\
\x00\x64\
\x00\x6f\x00\x77\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x09\
\x0b\x9c\x8f\xe7\
\x00\x65\
\x00\x6e\x00\x74\x00\x72\x00\x79\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0b\
\x05\x79\x4e\x27\
\x00\x73\
\x00\x61\x00\x76\x00\x65\x00\x5f\x00\x61\x00\x73\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0f\
\x09\x00\xca\xc7\
\x00\x64\
\x00\x72\x00\x6f\x00\x70\x00\x5f\x00\x73\x00\x63\x00\x68\x00\x65\x00\x6d\x00\x61\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x09\
\x08\xd8\xb0\xc7\
\x00\x6d\
\x00\x65\x00\x72\x00\x67\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x09\
\x08\x97\x84\x87\
\x00\x63\
\x00\x68\x00\x61\x00\x72\x00\x74\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x07\
\x07\xa7\x57\x87\
\x00\x61\
\x00\x64\x00\x64\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x08\
\x08\xc8\x58\x67\
\x00\x73\
\x00\x61\x00\x76\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x09\
\x0b\x85\x83\x07\
\x00\x63\
\x00\x6c\x00\x65\x00\x61\x00\x72\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct_v1 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x11\x00\x00\x00\x02\
\x00\x00\x00\x54\x00\x00\x00\x00\x00\x01\x00\x00\x04\xbc\
\x00\x00\x01\x00\x00\x00\x00\x00\x00\x01\x00\x00\x18\x22\
\x00\x00\x00\x3e\x00\x00\x00\x00\x00\x01\x00\x00\x03\x6d\
\x00\x00\x00\x7a\x00\x00\x00\x00\x00\x01\x00\x00\x07\xc6\
\x00\x00\x00\xd2\x00\x00\x00\x00\x00\x01\x00\x00\x15\x57\
\x00\x00\x01\x70\x00\x00\x00\x00\x00\x01\x00\x00\x27\x22\
\x00\x00\x00\xaa\x00\x00\x00\x00\x00\x01\x00\x00\x10\x9e\
\x00\x00\x00\x10\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x58\x00\x00\x00\x00\x00\x01\x00\x00\x23\x6e\
\x00\x00\x01\x84\x00\x00\x00\x00\x00\x01\x00\x00\x2c\x42\
\x00\x00\x01\x40\x00\x00\x00\x00\x00\x01\x00\x00\x1f\xb8\
\x00\x00\x01\x1c\x00\x00\x00\x00\x00\x01\x00\x00\x1b\xc3\
\x00\x00\x00\x26\x00\x00\x00\x00\x00\x01\x00\x00\x01\xec\
\x00\x00\x00\xbc\x00\x00\x00\x00\x00\x01\x00\x00\x12\xb8\
\x00\x00\x01\x9a\x00\x00\x00\x00\x00\x01\x00\x00\x2d\xed\
\x00\x00\x00\xe8\x00\x00\x00\x00\x00\x01\x00\x00\x17\x79\
\x00\x00\x00\x90\x00\x00\x00\x00\x00\x01\x00\x00\x09\x51\
"
qt_resource_struct_v2 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x11\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x54\x00\x00\x00\x00\x00\x01\x00\x00\x04\xbc\
\x00\x00\x01\x71\x91\xcb\x52\x8e\
\x00\x00\x01\x00\x00\x00\x00\x00\x00\x01\x00\x00\x18\x22\
\x00\x00\x01\x71\x91\xca\x54\x6e\
\x00\x00\x00\x3e\x00\x00\x00\x00\x00\x01\x00\x00\x03\x6d\
\x00\x00\x01\x71\x91\xc9\x5e\xfa\
\x00\x00\x00\x7a\x00\x00\x00\x00\x00\x01\x00\x00\x07\xc6\
\x00\x00\x01\x71\xa6\x88\x6b\xeb\
\x00\x00\x00\xd2\x00\x00\x00\x00\x00\x01\x00\x00\x15\x57\
\x00\x00\x01\x71\x9c\x4f\x0d\x73\
\x00\x00\x01\x70\x00\x00\x00\x00\x00\x01\x00\x00\x27\x22\
\x00\x00\x01\x71\x9c\x4f\xe7\xe7\
\x00\x00\x00\xaa\x00\x00\x00\x00\x00\x01\x00\x00\x10\x9e\
\x00\x00\x01\x71\x9c\x4e\xcf\xd3\
\x00\x00\x00\x10\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x71\x91\xcc\x32\x42\
\x00\x00\x01\x58\x00\x00\x00\x00\x00\x01\x00\x00\x23\x6e\
\x00\x00\x01\x71\x91\xca\x9b\x46\
\x00\x00\x01\x84\x00\x00\x00\x00\x00\x01\x00\x00\x2c\x42\
\x00\x00\x01\x71\x91\xca\x13\x2e\
\x00\x00\x01\x40\x00\x00\x00\x00\x00\x01\x00\x00\x1f\xb8\
\x00\x00\x01\x71\xa6\x89\x04\xc8\
\x00\x00\x01\x1c\x00\x00\x00\x00\x00\x01\x00\x00\x1b\xc3\
\x00\x00\x01\x71\xa6\xa0\x8b\x8f\
\x00\x00\x00\x26\x00\x00\x00\x00\x00\x01\x00\x00\x01\xec\
\x00\x00\x01\x71\x91\xc9\x13\x42\
\x00\x00\x00\xbc\x00\x00\x00\x00\x00\x01\x00\x00\x12\xb8\
\x00\x00\x01\x71\x9c\x53\x85\x7e\
\x00\x00\x01\x9a\x00\x00\x00\x00\x00\x01\x00\x00\x2d\xed\
\x00\x00\x01\x71\xa6\x8d\xb7\x52\
\x00\x00\x00\xe8\x00\x00\x00\x00\x00\x01\x00\x00\x17\x79\
\x00\x00\x01\x71\x91\xcb\xcc\x92\
\x00\x00\x00\x90\x00\x00\x00\x00\x00\x01\x00\x00\x09\x51\
\x00\x00\x01\x71\x9c\x4f\xa9\x1f\
"
qt_version = [int(v) for v in QtCore.qVersion().split('.')]
if qt_version < [5, 8, 0]:
rcc_version = 1
qt_resource_struct = qt_resource_struct_v1
else:
rcc_version = 2
qt_resource_struct = qt_resource_struct_v2
def qInitResources():
QtCore.qRegisterResourceData(
rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data
)
def qCleanupResources():
QtCore.qUnregisterResourceData(
rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data
)
qInitResources()
| StarcoderdataPython |
5126733 | # internal projects
import sys
import re
# python packages
from vertex import Vertex
from read_file import read_file, extract
class Graph:
'''
this class creates a graph or digraph.
a graph is like a tree, but allows cycles and loops.
its methods allow certain checks and passes on the graph.
'''
def __init__(self, filepath=None):
'''
takes in an optional filepath.
if none is given, a user can input the filepath.
by the end of it all, the graph's parameters are found.
'''
# check up on the option param.
if not filepath:
filepath = input('input the filepath to a graph: ')
# run extract from read_file.
self.type, self.graph = extract(filepath)
def __repr__(self):
'''
a graph has many fascets.
its important to properly represent them all.
this gives an overview of a graph's properties,
especially when printed to terminal.
'''
# grab variables for our summary.
graph_type = self.type
vert_count = self.count_vertices()
edge_count = self.count_edges()
edge_list = self.textify_edges()
# summary is a multi-line output.
summary = '' \
f'Graph Type: {graph_type}\n' \
f'# Vertices: {vert_count}\n' \
f'# Edges: {edge_count}\n' \
f'Edge List:\n{edge_list}\n'
return summary
def count_vertices(self):
'''
returns the number of vertices in the graph.
'''
return len(self.graph)
def count_edges(self):
'''
returns the number of edges in the graph.
'''
return len(self.get_edges())
def get_edges(self):
'''
retrieves every single edge in the graph.
in a digraph, pointing two ways counts as two edges.
in a graph, pointing two ways counts as just one edge.
this is due to the nature of these types of graphs.
---
root_vrtx represents the root vertex
root_edges lists all edges of root_vrtx
ngbr_vrtx represents the neighbor vertex
edge_weight is the weight of the edge
'''
edge_list = []
for vertex in self.graph:
# root_vrtx is the starting vertex
root_vrtx = vertex
# root_edges is the edge list of root_vrtx vertex
root_edges = self.graph[vertex]
for ngbr_vrtx in root_edges.edges:
# check if this vertex combo is worth adding
if ngbr_vrtx >= root_vrtx or self.type == 'digraph':
# edge_weight is the weight of the edge
edge_weight = root_edges.edges[ngbr_vrtx]
# check if this graph has weights
if edge_weight:
edge = [root_vrtx, ngbr_vrtx, edge_weight]
else:
edge = [root_vrtx, ngbr_vrtx]
# add edge to edge_list
edge_list.append(edge)
return edge_list
def textify_edges(self):
'''
this function groups up each edge;
then represents them in a string.
---
each edge is represented by one of these:
(start, finish)
(start, finish, weight)
---
a digraph has both A->B and B->A if it goes two ways.
a graph has just A<->B to represent a two way edge.
'''
# get full list of edges from a helper function.
edge_list = self.get_edges()
# initialize empty return value
final_string = ''
# loop through all edges and clean them up
for edge in edge_list:
# stringifying integers and floats
edge = list(str(item) for item in edge)
# adding string information
edge_string = ','.join(edge)
final_string += '('
final_string += edge_string
final_string += ')\n'
# return final value without extra end spaces.
return final_string.strip()
def shortest_path_bfs(self, start, finish):
'''
this finds the shortest path using breadth-first search.
---
start: given starting node
finish: given finishing node
between: arbitrary iterated node
---
queue: an in-order line of vertices to visit
visited: set of visited vertices
vertices: every single vertex in the graph
'''
vertices = self.graph
# create vertex queue, and start with vertex start
queue = [[start]] # HACK not a real queue
# create visited set, and start with vertex start
visited = {start}
# raise a key error if start or finish is not in the dict.
if start not in vertices:
raise KeyError(start)
if finish not in vertices:
raise KeyError(finish)
while queue != []:
# dequeue first vertex
# HACK change later for non-array
start_list = queue.pop()
start = start_list[-1]
# check a condition
if start == finish:
return start_list
# add its neighbors to the queue
for between in vertices[start].edges:
if between in visited:
pass
else:
# visit the vertex
visited.add(between)
# HACK change later for non-array
between_list = start_list[:]
between_list.append(between)
queue.insert(0, between_list)
else:
# if it reaches the end without returning,
# start is on a graph island from finish.
return []
def shortest_path_dfs(self, start, finish, path=None, solutions=None):
'''
this returns a boolean based on whether two nodes
are connected (True) or disconnected (False).
'''
vertices = self.graph
if not solutions:
solutions = []
else:
solutions = solutions.copy()
if not path:
path = [start]
# raise key error if start/finish are not vertices.
if start not in vertices:
raise KeyError(start)
if finish not in vertices:
raise KeyError(finish)
# iterate through neighbors.
for between in vertices[start].edges:
# check if between is the finish line.
if between == finish:
path.append(between)
solutions.append(path)
# check if between is already in path.
elif between in path:
pass
# otherwise recursively call the new path.
else:
new_path = path.copy()
new_path.append(between)
new_path = self.shortest_path_dfs(
between, finish, new_path, solutions)
# if its not empty, its a success!
if new_path != []:
solutions.append(new_path)
# we now have an array of our solutions.
if len(solutions) == 0:
# there is no solution :(
return []
else:
# we have many solutions.
# pick the shortest one!
min_path = min(solutions, key=lambda x: len(x))
return min_path
def nth_degree_neighbors(self, A, N):
'''
given a node, this function finds every neighbor
that is no closer than n steps away,
but is not further than n steps away.
---
its worded this way to show that, if your neighbor
is next door, you can't say they are 100 miles away
by driving 100 miles in a circle to their house.
they are considered just next door.
'''
N = int(N)
vertices = self.graph
# create visited set, and visit vertex A
visited = {A}
# create vertex queue, and start with vertex A
queue = [[A]] # HACK not a real queue
# degree trackers
good_degrees = []
bad_degrees = []
while queue != []:
# dequeue first vertex
# HACK change later for non-array
a_list = queue.pop()
A = a_list[-1]
# check a condition
if len(a_list) == N:
good_degrees.append(a_list)
else:
bad_degrees.append(a_list)
# add its neighbors to the queue
for C in vertices[A].edges:
if C in visited:
pass
else:
# visit the vertex
visited.add(C)
# HACK change later for non-array
n_list = a_list[:]
n_list.append(C)
queue.insert(0, n_list)
# degree trackers
bad_set = set()
good_set = set()
good_final = []
# bad stuff
for bad in bad_degrees:
if len(bad) < N:
bad_set.add(bad[-1])
# good stuff
for good in good_degrees:
# cant have duplicates
if good[-1] in good_set:
pass
# okay its not a duplicate!
else:
good_set.add(good[-1])
# there's a faster path to this node
if good[-1] in bad_set:
pass
# this is a good node!
else:
good_final.append(good)
return good_final
def find_largest_clique(self):
'''
'''
# loop through every vertex in graph, named parent
all_nodes = self.graph
print(all_nodes)
result = self.get_deepest_clique(
all_nodes, set(all_nodes), set())
# ==HACK== flattens nested results
return set(tuple(sorted(item)) for item in result)
def get_deepest_clique(self, neighbors, valid_neighbors, visited):
'''
'''
all_nodes = self.graph
# prepare return value
cliques = set()
BASE = True
# validate
new_valid_neighbors = set(neighbors) & valid_neighbors
# check base case
# loop through every valid vertex
for vertex in new_valid_neighbors:
if vertex in visited:
# don't redundently visit vertices
pass
else:
# turn off base case
BASE = False
# copy visited to alter in fresh function stack
new_visited = visited.copy()
# visit the vertex in this timeline
new_visited.add(vertex)
# get new neighbors
Vertex = all_nodes[vertex]
new_neighbors = Vertex.edges
# function call
result = self.get_deepest_clique(
new_neighbors, new_valid_neighbors, new_visited)
# ==HACK==
# this part is pretty janky
# just trying to eliminate nested tuples
if isinstance(result, tuple):
if isinstance(result[0], tuple):
for real_result in result:
cliques.add(real_result)
else:
cliques.add(result)
elif isinstance(result, set):
for real_result in result:
cliques.add(real_result)
else:
cliques.add(result)
if BASE:
visited = tuple(visited)
print(visited)
return visited
else:
return cliques
def eulerian_degree(self):
'''
this function simply ensures that every
vertex has an even number of neighbors.
'''
# check every vertex in the graph.
for vertex in self.graph:
# see if it has an even # of edges.
if len(self.graph[vertex].edges) % 2 == 0:
pass
else:
# it is not eulerian if it doesnt.
return False
else:
# if the loop ends, it is eulerian.
return True
| StarcoderdataPython |
274978 | import os
from glob import glob
from nltk import ToktokTokenizer
import numpy as np
from nltk.translate.bleu_score import SmoothingFunction
from fast_bleu import *
os.system("set -x; python setup.py build_ext --build-lib=./")
# min_n = 2
max_n = 5
weights = np.ones(max_n) / float(max_n)
def nltk_org_bleu(refs, hyps):
from nltk.translate.bleu_score import sentence_bleu
return [sentence_bleu(refs, hyp, weights=weights, smoothing_function=SmoothingFunction(epsilon=1. / 10).method1)
for hyp in hyps]
def nltk_bleu(refs, hyps):
from old_metrics.bleu import Bleu
bleu = Bleu(refs, weights, smoothing_function=SmoothingFunction(epsilon=1. / 10).method1)
return bleu.get_score(hyps)
# return [1. for hyp in hyps]
def cpp_bleu(refs, hyps):
w = {i: list(np.ones(i) / (i)) for i in range(2, 6)}
bleu = BLEU(refs, w, verbose=True)
return bleu.get_score(hyps)[max_n]
def nltk_self_bleu(refs, hyps):
from old_metrics.self_bleu import SelfBleu
bleu = SelfBleu(refs, weights, smoothing_function=SmoothingFunction(
epsilon=1. / 10).method1, verbose=False)
res = bleu.get_score()
del bleu
return res
# return [1. for hyp in hyps]
def cpp_self_bleu(refs, hyps):
from fast_bleu.__python_wrapper__ import SelfBLEU
w = {i: list(np.ones(i) / (i)) for i in range(2, 6)}
bleu = SelfBLEU(refs, w, verbose=True)
res = bleu.get_score()
del bleu
return res[max_n]
def get_execution_time(func):
import time
start = time.time()
result = np.array(func(ref_tokens, test_tokens))
end = time.time()
return result, end-start
def compare(nltk_func, cpp_func):
cpp_result, cpp_time = get_execution_time(cpp_func)
nltk_result, nltk_time = get_execution_time(nltk_func)
all_in_one = np.core.records.fromarrays([nltk_result, cpp_result, np.abs(nltk_result - cpp_result)],
names='nltk,cpp,diff')
# print(all_in_one)
print('sum diff ' + str(np.sum(all_in_one.diff)))
print('nltk: {}, cpp: {}, cpp speedup: {}'.format(
nltk_time, cpp_time, float(nltk_time) / cpp_time))
tokenizer = ToktokTokenizer().tokenize
ref_tokens = []
test_tokens = []
with open('data/t.txt') as file:
# with open('data/coco60-test.txt') as file:
lines = file.readlines()
for line in lines:
ref_tokens.append(tokenizer(line))
with open('data/g.txt') as file:
# with open('data/coco60-train.txt') as file:
lines = file.readlines()
for line in lines:
test_tokens.append(tokenizer(line))
print('tokenized!')
compare(nltk_org_bleu, cpp_bleu)
# compare(nltk_bleu, cpp_bleu)
# compare(nltk_self_bleu, cpp_self_bleu)
# res, ti = get_execution_time(cpp_bleu)
# res, ti = get_execution_time(cpp_self_bleu)
# res = np.mean(res)
# print(res, ti) | StarcoderdataPython |
6413427 | """Utility implementations for docstrings.
"""
import inspect
import os
import re
from enum import Enum
from inspect import Signature
from typing import Any
from typing import Callable
from typing import List
from typing import Match
from typing import Optional
from typing import Pattern
from typing import Tuple
from typing import Type
from typing import TypeVar
_DOCSTRING_PATH_COMMENT_KEYWORD: str = 'Docstring:'
_DOCSTRING_PATH_COMMENT_PATTERN: str = (
rf'^\<\!\-\-.*?{_DOCSTRING_PATH_COMMENT_KEYWORD}'
r'(?P<path>.*?)\-\-\>'
)
_HYPHENS_LINE_PATTERN: str = r'\s{4}-----'
class _SectionPattern(Enum):
PARAMETERS = r'\s{4}Parameters$'
RETURNS = r'\s{4}Returns$'
YIELDS = r'\s{4}Yields$'
RECEIVES = r'\s{4}Receives$'
RAISES = r'\s{4}Raises$'
WARNS = r'\s{4}Warns$'
WARNINGS = r'\s{4}Warnings$'
SEE_ALSO = r'\s{4}See Also$'
NOTES = r'\s{4}Notes$'
REFERENCES = r'\s{4}References$'
EXAMPLES = r'\s{4}Examples$'
ATTRIBUTES = r'\s{4}Attributes$'
METHODS = r'\s{4}Methods$'
def get_docstring_src_module_paths(md_file_path: str) -> List[str]:
"""
Get docstring source module paths from a specified
markdown file path.
Parameters
----------
md_file_path : str
Target markdown file path.
Returns
-------
module_paths : list of str
Extracted docstring source module paths.
"""
from apysc._file import file_util
from apysc._file import module_util
md_txt: str = file_util.read_txt(file_path=md_file_path)
lines: List[str] = md_txt.splitlines()
module_paths: List[str] = []
pattern: Pattern = re.compile(
pattern=_DOCSTRING_PATH_COMMENT_PATTERN)
for line in lines:
if not line.startswith('<!--'):
continue
match: Optional[Match] = pattern.search(string=line)
if match is None:
continue
package_path: str = match.group(1).rsplit('.', maxsplit=1)[0]
package_path = package_path.strip()
module_or_class: Any = module_util.\
read_module_or_class_from_package_path(
module_or_class_package_path=package_path)
if inspect.isclass(module_or_class):
package_path = package_path.rsplit('.', maxsplit=1)[0]
module_path: str = package_path.replace('.', '/')
module_path = f'./{module_path}.py'
module_paths.append(module_path)
return module_paths
def reset_replaced_docstring_section(*, md_file_path: str) -> bool:
"""
Reset converted a markdown's docstring section.
Parameters
----------
md_file_path : str
Target markdown document file path.
Returns
-------
is_executed : bool
Replacing is executed or not.
"""
from apysc._file import file_util
md_txt: str = file_util.read_txt(file_path=md_file_path)
matches: List[str] = _get_docstring_path_comment_matches(md_txt=md_txt)
if not matches:
return False
md_txt = _remove_replaced_docstring_section_from_md_txt(
md_txt=md_txt, matches=matches)
with open(md_file_path, 'w') as f:
f.write(md_txt)
return True
def _remove_replaced_docstring_section_from_md_txt(
*, md_txt: str, matches: List[str]) -> str:
"""
Remove replaced docstring from a specified markdown text.
Parameters
----------
md_txt : str
Target markdown text.
matches : list of str
Matched docstring path specification comments.
Returns
-------
md_txt : str
Result markdown text.
"""
lines: List[str] = md_txt.splitlines()
result_lines: List[str] = []
is_reset_section_range: bool = False
for line in lines:
if is_reset_section_range:
if line.startswith('#'):
result_lines.append(f'\n{line}')
is_reset_section_range = False
continue
docstring_path_specification_comment: str = \
_extract_docstring_path_specification_comment_from_line(
line=line, matches=matches)
if docstring_path_specification_comment != '':
result_lines.append(line)
is_reset_section_range = True
continue
result_lines.append(line)
md_txt = '\n'.join(result_lines)
return md_txt
def _extract_docstring_path_specification_comment_from_line(
*, line: str, matches: List[str]) -> str:
"""
Extract a docstring path specification comment
from a specified markdown line text.
Parameters
----------
line : str
Target markdown line text.
matches : list of str
Matched docstring path specification comments.
Returns
-------
docstring_path_specification_comment : str
Extracted comment string.
"""
for match in matches:
if match in line:
return match
return ''
def _get_docstring_path_comment_matches(*, md_txt: str) -> List[str]:
"""
Get matched docstring path specification comments.
Parameters
----------
md_txt : str
Target markdown text.
Returns
-------
matches : list of str
Matched comments.
"""
matches: List[str] = []
for match in re.finditer(
pattern=_DOCSTRING_PATH_COMMENT_PATTERN,
string=md_txt,
flags=re.MULTILINE):
matches.append(match.group(0))
return matches
def replace_docstring_path_specification(*, md_file_path: str) -> None:
"""
Replace a docstring path specification in a specified
markdown document by a converted docstring text.
Parameters
----------
md_file_path : str
Target markdown file path.
"""
from apysc._file import file_util
md_txt: str = file_util.read_txt(file_path=md_file_path)
lines: List[str] = md_txt.splitlines()
result_lines: List[str] = []
for line in lines:
match: Optional[Match] = re.search(
pattern=_DOCSTRING_PATH_COMMENT_PATTERN, string=line)
if match is not None:
result_lines.append(line)
result_lines.append('')
markdown_format_docstring: str = \
_convert_docstring_path_comment_to_markdown_format(
docstring_path_comment=match.group(0),
md_file_path=md_file_path,
)
result_lines.append(markdown_format_docstring)
continue
result_lines.append(line)
continue
md_txt = '\n'.join(result_lines)
file_util.save_plain_txt(
txt=md_txt, file_path=md_file_path)
class _DocstringPathNotFoundError(Exception):
pass
class _DocstringCallableNotExistsError(Exception):
pass
def _convert_docstring_path_comment_to_markdown_format(
*, docstring_path_comment: str,
md_file_path: str) -> str:
"""
Convert a specified docstring path comment to a
markdown format text.
Parameters
----------
docstring_path_comment : str
Target docstring path comment.
md_file_path : str
Target markdown file path.
Returns
-------
markdown_format_docstring : str
Converted text.
"""
module_or_class_package_path: str
callable_name: str
module_or_class_package_path, callable_name = \
_extract_package_path_and_callable_name_from_path(
docstring_path_comment=docstring_path_comment,
)
callable_: Callable = _get_callable_from_package_path_and_callable_name(
module_or_class_package_path=module_or_class_package_path,
callable_name=callable_name,
)
if callable_.__doc__ is None:
return ''
if callable(callable_):
signature: Optional[Signature] = inspect.signature(callable_)
callable_name = callable_.__name__
else:
signature = None
callable_name = ''
markdown_format_docstring: str = _convert_docstring_to_markdown(
docstring=callable_.__doc__,
signature=signature,
callable_name=callable_name,
md_file_path=md_file_path)
return markdown_format_docstring
def _get_callable_from_package_path_and_callable_name(
*, module_or_class_package_path: str,
callable_name: str) -> Callable:
"""
Get a callable object from a specified package path and
callable name.
Parameters
----------
module_or_class_package_path : str
Target module or class package path.
callable_name : str
Target callable name.
Raises
------
_DocstringPathNotFoundError
If a specified package path's module or class
does not exist.
_DocstringCallableNotExistsError
If a target module or class does not have a specified
name function or method.
Returns
-------
callable_ : Callable
Target callable object.
"""
from apysc._file import module_util
try:
module_or_class: Any = \
module_util.read_module_or_class_from_package_path(
module_or_class_package_path=module_or_class_package_path)
except Exception:
raise _DocstringPathNotFoundError(
'Could not found module or class of the docstring path.'
f'\nModule or class package path: {module_or_class_package_path}'
)
try:
callable_: Callable = getattr(module_or_class, callable_name)
except Exception:
raise _DocstringCallableNotExistsError(
"Specified docstring path's module or class does not have "
'a target callable attribute.'
f'\nModule or class package path: {module_or_class_package_path}',
f'\nCallable name: {callable_name}')
return callable_
def _convert_docstring_to_markdown(
*, docstring: str,
signature: Optional[Signature],
callable_name: str,
md_file_path: str) -> str:
"""
Convert a specified docstring to a markdown format text.
Parameters
----------
docstring : str
Target docstring.
signature : Signature or None
Target callable's signature. If a target interface
is property, this argument becomes None.
callable_name : str
Target callable name.
md_file_path : str
Target markdown file path.
Returns
-------
markdown : str
Converted markdown text.
"""
summary: str = _extract_summary_from_docstring(docstring=docstring)
parameters: List[_Parameter] = \
_extract_param_or_rtn_values_from_docstring(
target_type=_Parameter, docstring=docstring)
returns: List[_Return] = _extract_param_or_rtn_values_from_docstring(
target_type=_Return, docstring=docstring)
raises: List[_Raise] = _extract_raise_values_from_docstring(
docstring=docstring)
notes: str = _extract_notes_from_docstring(docstring=docstring)
examples: List[_Example] = _extract_example_values_from_docstring(
docstring=docstring)
references: List[_Reference] = _extract_reference_values_from_docstring(
docstring=docstring)
references = _slice_references_by_md_file_path(
references=references, md_file_path=md_file_path)
markdown: str = (
'<span class="inconspicuous-txt">Note: the document '
'build script generates and updates this '
'API document section automatically. Maybe this section '
'is duplicated compared with previous sections.</span>')
if signature is not None:
markdown += (
f'\n\n**[Interface signature]** `{callable_name}{signature}`<hr>'
)
markdown = _append_summary_to_markdown(
markdown=markdown, summary=summary)
markdown = _append_params_or_rtns_to_markdown(
markdown=markdown, params_or_rtns=parameters)
markdown = _append_params_or_rtns_to_markdown(
markdown=markdown, params_or_rtns=returns)
markdown = _append_raises_to_markdown(markdown=markdown, raises=raises)
markdown = _append_notes_to_markdown(markdown=markdown, notes=notes)
markdown = _append_examples_to_markdown(
markdown=markdown, examples=examples)
markdown = _append_references_to_markdown(
markdown=markdown, references=references)
markdown = _remove_trailing_hr_tag(markdown=markdown)
return markdown
def _remove_trailing_hr_tag(*, markdown: str) -> str:
"""
Remove a trailing `<hr>` tag from a specified markdown string.
Parameters
----------
markdown : str
Target markdown string.
Returns
-------
markdown : str
Result markdown string.
"""
markdown = markdown.strip()
hr_tag: str = '<hr>'
if markdown.endswith(hr_tag):
markdown = markdown[:-len(hr_tag)]
markdown = markdown.strip()
return markdown
def _append_summary_to_markdown(*, markdown: str, summary: str) -> str:
"""
Append a interface summary string to a specified markdown string.
Parameters
----------
markdown : str
Target markdown string.
summary : str
Target summary string.
Returns
-------
markdown : str
Result markdown string.
"""
if summary == '':
return markdown
if markdown != '':
markdown += '\n\n'
markdown += f'**[Interface summary]** {summary}<hr>'
return markdown
def _append_notes_to_markdown(*, markdown: str, notes: str) -> str:
"""
Append a notes string to a specified markdown string.
Parameters
----------
markdown : str
Target markdown string.
notes : str
Target notes string.
Returns
-------
markdown : str
Result markdown string.
"""
if notes == '':
return markdown
if markdown != '':
markdown += '\n\n'
markdown += (
'**[Notes]**'
f'\n\n{notes}<hr>'
)
return markdown
def _extract_notes_from_docstring(*, docstring: str) -> str:
"""
Extract a notes value from a docstring.
Parameters
----------
docstring : str
Target docstring.
Returns
-------
notes : str
Extract notes text value.
"""
lines: List[str] = docstring.splitlines()
lines = _remove_blank_lines_from_list(lines=lines)
is_notes_section_range: bool = False
base_indent_num: int = 0
notes_lines: List[str] = []
for line in lines:
base_indent_num = _get_base_indent_num_if_not_set(
line=line, base_indent_num=base_indent_num)
if _is_target_section_pattern_line(
line=line,
section_pattern=_SectionPattern.NOTES):
is_notes_section_range = True
continue
if _is_skip_target_line(
is_target_section_range=is_notes_section_range,
line=line):
continue
if _is_section_line(line=line):
break
notes_lines.append(line)
notes: str = '\n'.join(notes_lines)
notes = _remove_line_breaks_and_unnecessary_spaces(text=notes)
return notes
class _ParamOrRtnBase:
_name: str
_type_str: str
_description: str
def __init__(
self, *, name: str, type_str: str, description: str) -> None:
"""
Parameter or return value's base class.
Parameters
----------
name : str
Parameter or return value name.
type_str : str
Parameter or return value type name.
description : str
Parameter or return value description.
"""
self._name = name
self._type_str = type_str
self._description = description
def __eq__(self, other: Any) -> bool:
"""
The method for equality comparison.
Parameters
----------
other : Any
Other instance to compare with.
Returns
-------
result : bool
If each attribute is equal to the other, this
method returns True.
"""
if not isinstance(other, _ParamOrRtnBase):
return False
if self.name != other.name:
return False
if self.type_str != other.type_str:
return False
if self.description != other.description:
return False
return True
@property
def name(self) -> str:
"""
Get a parameter or return value name.
Returns
-------
name : str
A parameter or return value name.
"""
return self._name
@property
def type_str(self) -> str:
"""
Get a parameter or return value type name.
Returns
-------
type_str : str
A parameter or return value type name.
"""
return self._type_str
@property
def description(self) -> str:
"""
Get a parameter or return value description.
Returns
-------
description : str
A parameter or return value description.
"""
return self._description
class _Parameter(_ParamOrRtnBase):
"""Parameter value type.
"""
class _Return(_ParamOrRtnBase):
"""Return value type.
"""
class _Raise:
"""Raise value type.
"""
_err_class_name: str
_description: str
def __init__(self, *, err_class_name: str, description: str) -> None:
"""
Raise value type.
Parameters
----------
err_class_name : str
Target error class name.
description : str
Error condition description.
"""
self._err_class_name = err_class_name
self._description = description
@property
def err_class_name(self) -> str:
"""
Get a target error class name.
Returns
-------
err_class_name : str
A target error class name.
"""
return self._err_class_name
@property
def description(self) -> str:
"""
Get a error condition description.
Returns
-------
description : str
A error condition description.
"""
return self._description
def __eq__(self, other: Any) -> bool:
"""
The method for equality comparison.
Parameters
----------
other : Any
Other value to compare with.
Returns
-------
result : bool
If each attribute is equal to the other, this
method returns True.
"""
if not isinstance(other, _Raise):
return False
if self.err_class_name != other.err_class_name:
return False
if self._description != other._description:
return False
return True
class _Reference:
"""Reference value type.
"""
_page_label: str
_url: str
def __init__(self, *, page_label: str, url: str) -> None:
"""
Reference value type.
Parameters
----------
page_label : str
Target reference page label.
url : str
Target reference page URL.
"""
self._page_label = page_label
self._url = url
@property
def page_label(self) -> str:
"""
Get a target reference page label.
Returns
-------
page_label : str
A target reference page label.
"""
return self._page_label
@property
def url(self) -> str:
"""
Get a target reference page URL.
Returns
-------
url : str
A target reference page.
"""
return self._url
def __eq__(self, other: Any) -> bool:
"""
The method for equality comparison.
Parameters
----------
other : Any
Other value to compare with.
Returns
-------
result : bool
If each attribute is equal to the other, this
method returns True.
"""
if not isinstance(other, _Reference):
return False
if self.page_label != other.page_label:
return False
if self.url != other.url:
return False
return True
class _Example:
"""Example value type.
"""
_input_code_block: str
_expected_output: str
def __init__(
self, *, input_code_block: str,
expected_output: str = '') -> None:
"""
Example value type.
Parameters
----------
input_code_block : str
Input code block string.
expected_output : str, default ''
Expected output string.
"""
self._input_code_block = input_code_block
self._expected_output = expected_output
@property
def input_code_block(self) -> str:
"""
Get a input code block string.
Returns
-------
input_code_block : str
A input code block string.
"""
return self._input_code_block
@property
def expected_output(self) -> str:
"""
Get a expected output string.
Returns
-------
expected_output : str
A expected output string.
"""
return self._expected_output
def __eq__(self, other: Any) -> bool:
"""
The method for equality comparison.
Parameters
----------
other : Any
Other value to compare with.
Returns
-------
result : bool
If each attribute is equal to the other, this
method returns True.
"""
if not isinstance(other, _Example):
return False
if self.input_code_block != other.input_code_block:
return False
if self.expected_output != other.expected_output:
return False
return True
def _slice_references_by_md_file_path(
references: List[_Reference],
md_file_path: str) -> List[_Reference]:
"""
Slice a specified references list to exclude a same
URL's document file.
Parameters
----------
references : list of _Reference
Target references list to slice.
md_file_path : str
Target markdown file path.
Returns
-------
sliced_references : list of _Reference
Sliced list.
"""
md_file_name: str = os.path.basename(md_file_path)
md_file_name = md_file_name.rsplit('.', maxsplit=1)[0]
sliced_references: List[_Reference] = []
for reference in references:
reference_file_name: str = reference.url.rsplit('/', 1)[-1]
reference_file_name = reference_file_name.rsplit(
'.', maxsplit=1)[0]
if reference_file_name == md_file_name:
continue
sliced_references.append(reference)
return sliced_references
def _append_examples_to_markdown(
*, markdown: str, examples: List[_Example]) -> str:
"""
Append examples to a specified markdown string.
Parameters
----------
markdown : str
Target markdown string.
examples : list of _Example
Examples list value to append to.
Returns
-------
markdown : str
Result markdown string.
"""
if not examples:
return markdown
if markdown != '':
markdown += '\n\n'
markdown += '**[Examples]**\n\n```py'
for i, example in enumerate(examples):
if i != 0:
markdown += '\n'
markdown += (
f'\n{example.input_code_block}'
)
if example.expected_output != '':
markdown += f'\n{example.expected_output}'
markdown += '\n```'
markdown += '\n\n<hr>'
return markdown
def _extract_example_values_from_docstring(
*, docstring: str) -> List[_Example]:
"""
Extract example values from a docstring.
Parameters
----------
docstring : str
Target docstring.
Returns
-------
example_values : list of _Example
Extracted example values.
"""
lines: List[str] = docstring.splitlines()
lines = _remove_blank_lines_from_list(lines=lines)
is_example_section_range: bool = False
input_code_block_lines: List[str] = []
example_values: List[_Example] = []
for line in lines:
if _is_target_section_pattern_line(
line=line,
section_pattern=_SectionPattern.EXAMPLES):
is_example_section_range = True
continue
if _is_skip_target_line(
is_target_section_range=is_example_section_range,
line=line):
continue
if _is_section_line(line=line):
break
if _is_example_output_line(line=line):
_make_example_and_append_to_list(
example_values=example_values,
input_code_block_lines=input_code_block_lines,
expected_output=line)
continue
input_code_block_lines.append(line)
_make_example_and_append_to_list(
example_values=example_values,
input_code_block_lines=input_code_block_lines,
expected_output='')
return example_values
def _make_example_and_append_to_list(
*,
example_values: List[_Example],
input_code_block_lines: List[str],
expected_output: str) -> None:
"""
Make an example value and append it ot a specified list.
Notes
-----
This function clears a list of input code block lines.
Parameters
----------
example_values : list of _Example
A list to append an example value.
input_code_block_lines : list of str
A list of input code block lines.
expected_output : str
An expected output string.
"""
if not input_code_block_lines:
return
input_code_block_lines_: List[str] = [
line.strip() for line in input_code_block_lines]
input_code_block: str = '\n'.join(input_code_block_lines_)
example: _Example = _Example(
input_code_block=input_code_block,
expected_output=expected_output.strip())
example_values.append(example)
input_code_block_lines.clear()
def _is_example_output_line(*, line: str) -> bool:
"""
Get a boolean indicating whether a specified line is
example section's output line or not.
Parameters
----------
line : str
Target docstring line.
Returns
-------
result : bool
This function return True if a specified line is
example section's output line.
"""
line = line.strip()
if line.startswith('>>>') or line.startswith('...'):
return False
return True
def _append_references_to_markdown(
markdown: str, references: List[_Reference]) -> str:
"""
Append references to a specified markdown string.
Parameters
----------
markdown : str
Target markdown string.
references : list of _Reference
References list value to append to.
Returns
-------
markdown : str
Result markdown string.
"""
if not references:
return markdown
if markdown != '':
markdown += '\n\n'
markdown += '**[References]**\n'
for reference in references:
markdown += (
f'\n- [{reference.page_label}]({reference.url})'
)
markdown += '\n\n<hr>'
return markdown
def _append_raises_to_markdown(
*, markdown: str, raises: List[_Raise]) -> str:
"""
Append raises to a specified markdown string.
Parameters
----------
markdown : str
Target markdown string.
raises : list of _Raise
Raises list value to append to.
Returns
-------
markdown : str
Result markdown string.
"""
if not raises:
return markdown
if markdown != '':
markdown += '\n\n'
markdown += '**[Raises]**\n'
for raise_ in raises:
markdown += (
f'\n- {raise_.err_class_name}: {raise_.description}'
)
markdown += '\n\n<hr>'
return markdown
_ParamOrRtn = TypeVar('_ParamOrRtn', _Parameter, _Return)
def _append_params_or_rtns_to_markdown(
*, markdown: str,
params_or_rtns: List[_ParamOrRtn]) -> str:
"""
Append parameters or returns to a specified markdown string.
Parameters
----------
markdown : str
Target markdown string.
params_or_rtns : list of _ParamOrRtn
Parameters or returns to append to.
Returns
-------
markdown : str
Result markdown string.
"""
if not params_or_rtns:
return markdown
if isinstance(params_or_rtns[0], _Parameter):
section_label: str = 'Parameters'
else:
section_label = 'Returns'
if markdown != '':
markdown += '\n\n'
markdown += f'**[{section_label}]**\n'
for parameter in params_or_rtns:
markdown += (
f'\n- `{parameter.name}`: {parameter.type_str}'
f'\n - {parameter.description}'
)
markdown += '\n\n<hr>'
return markdown
def _extract_reference_values_from_docstring(
*, docstring: str) -> List[_Reference]:
"""
Extract reference values from a docstring.
Parameters
----------
docstring : str
Target docstring.
Returns
-------
reference_values : list of _Reference
Extracted reference values.
"""
lines: List[str] = docstring.splitlines()
lines = _remove_blank_lines_from_list(lines=lines)
is_references_section_range: bool = False
page_label: str = ''
url: str = ''
base_indent_num: int = 0
reference_values: List[_Reference] = []
for line in lines:
current_indent_num: int = _get_indent_num_from_line(line=line)
base_indent_num = _get_base_indent_num_if_not_set(
line=line, base_indent_num=base_indent_num)
if _is_target_section_pattern_line(
line=line,
section_pattern=_SectionPattern.REFERENCES):
is_references_section_range = True
continue
if _is_skip_target_line(
is_target_section_range=is_references_section_range,
line=line):
continue
if _is_section_line(line=line):
break
if current_indent_num == base_indent_num:
page_label = _remove_unnecessary_markdown_list_from_line(
line=line)
continue
url = _remove_unnecessary_markdown_list_from_line(line=line)
_make_reference_and_append_to_list(
reference_values=reference_values,
page_label=page_label, url=url)
url = ''
_make_reference_and_append_to_list(
reference_values=reference_values,
page_label=page_label, url=url)
return reference_values
def _make_reference_and_append_to_list(
*,
reference_values: List[_Reference],
page_label: str,
url: str) -> None:
"""
Make a reference value and append it to a specified list.
Parameters
----------
reference_values : list of _Reference
A list to append a reference value.
page_label : str
Target reference page label.
url : str
Target reference page URL.
"""
if url == '':
return
reference: _Reference = _Reference(
page_label=page_label, url=url)
reference_values.append(reference)
def _remove_unnecessary_markdown_list_from_line(
*, line: str) -> str:
"""
Remove unnecessary markdown list string from a line.
Parameters
----------
line : str
Target docstring line.
Returns
-------
line : str
Result docstring line.
"""
line = line.replace('- ', '', 1)
line = line.strip()
return line
def _extract_raise_values_from_docstring(*, docstring: str) -> List[_Raise]:
"""
Extract raise values from a docstring.
Parameters
----------
docstring : str
Target docstring.
Returns
-------
raise_values : list of _Raise
Extracted raise values.
"""
lines: List[str] = docstring.splitlines()
lines = _remove_blank_lines_from_list(lines=lines)
is_raises_section_range: bool = False
err_class_name: str = ''
base_indent_num: int = 0
description_lines: List[str] = []
raise_values: List[_Raise] = []
for line in lines:
current_indent_num: int = _get_indent_num_from_line(line=line)
base_indent_num = _get_base_indent_num_if_not_set(
line=line, base_indent_num=base_indent_num)
if _is_target_section_pattern_line(
line=line,
section_pattern=_SectionPattern.RAISES):
is_raises_section_range = True
continue
if _is_skip_target_line(
is_target_section_range=is_raises_section_range,
line=line):
continue
if _is_section_line(line=line):
break
if current_indent_num == base_indent_num:
_make_raise_description_and_append_to_list(
raise_values=raise_values,
err_class_name=err_class_name,
description_lines=description_lines,
)
err_class_name = line.strip()
continue
description_lines.append(line)
_make_raise_description_and_append_to_list(
raise_values=raise_values,
err_class_name=err_class_name,
description_lines=description_lines,
)
return raise_values
def _remove_blank_lines_from_list(*, lines: List[str]) -> List[str]:
"""
Remove blank lines from a list of lines.
Parameters
----------
lines : list of str
Target list of lines.
Returns
-------
result_lines : list of str
A lines list which removed blank lines.
"""
result_lines: List[str] = []
for line in lines:
if line.strip() == '':
continue
result_lines.append(line)
return result_lines
def _get_base_indent_num_if_not_set(
*, line: str, base_indent_num: int) -> int:
"""
Get a base indent number from line if it is not set.
Parameters
----------
line : str
Target docstring line.
base_indent_num : int
Current base indent number.
Returns
-------
base_indent_num : int
If the base_indent_num argument is zero, this function
returns the current line indent number. Otherwise, it
returns the same value of the base_indent_num argument.
"""
if base_indent_num != 0:
return base_indent_num
current_indent_num: int = _get_indent_num_from_line(line=line)
return current_indent_num
def _make_raise_description_and_append_to_list(
*,
raise_values: List[_Raise],
err_class_name: str,
description_lines: List[str]) -> None:
"""
Make a raise value description from a list of lines and
append raise value to a specified list.
Notes
-----
This function clears a list of description lines.
Parameters
----------
raise_values : list of _Raise
A list to append a raise value.
err_class_name : str
Target error class name.
description_lines : list of str
A list of description lines.
"""
if not description_lines:
return
description: str = '\n'.join(description_lines)
description = _remove_line_breaks_and_unnecessary_spaces(
text=description)
raise_: _Raise = _Raise(
err_class_name=err_class_name, description=description)
raise_values.append(raise_)
description_lines.clear()
def _extract_param_or_rtn_values_from_docstring(
*, target_type: Type[_ParamOrRtn],
docstring: str) -> List[_ParamOrRtn]:
"""
Extract parameter or return values from a docstring.
Parameters
----------
target_type : Type
Target type of the _Parameter or _Return.
docstring : str
Target docstring.
Returns
-------
param_or_rtn_values : list of _Parameter or _Return
Extracted parameter or return values.
"""
lines: List[str] = docstring.splitlines()
lines = _remove_blank_lines_from_list(lines=lines)
is_param_or_rtn_section_range: bool = False
value_name: str = ''
value_type_str: str = ''
base_indent_num: int = 0
description_lines: List[str] = []
param_or_rtn_values: List[_ParamOrRtn] = []
params_or_rtns_section_pattern: _SectionPattern = \
_get_params_or_rtns_section_pattern_by_type(target_type=target_type)
for line in lines:
current_indent_num: int = _get_indent_num_from_line(line=line)
base_indent_num = _get_base_indent_num_if_not_set(
line=line, base_indent_num=base_indent_num)
if _is_target_section_pattern_line(
line=line,
section_pattern=params_or_rtns_section_pattern):
is_param_or_rtn_section_range = True
continue
if _is_skip_target_line(
is_target_section_range=is_param_or_rtn_section_range,
line=line):
continue
if _is_section_line(line=line):
break
if current_indent_num == base_indent_num:
_make_prm_or_rtn_description_and_append_to_list(
target_type=target_type,
param_or_rtn_values=param_or_rtn_values,
value_name=value_name,
value_type_str=value_type_str,
description_lines=description_lines,
)
value_name, value_type_str = _get_value_name_and_type_from_line(
line=line)
continue
description_lines.append(line)
_make_prm_or_rtn_description_and_append_to_list(
target_type=target_type,
param_or_rtn_values=param_or_rtn_values,
value_name=value_name,
value_type_str=value_type_str,
description_lines=description_lines,
)
return param_or_rtn_values
def _is_skip_target_line(
*, is_target_section_range: bool, line: str) -> bool:
"""
Get a boolean indicating whether a specified line
is skipping target or not.
Parameters
----------
is_target_section_range : bool
A boolean indicating whether a specified line
is in a range of target section.
line : str
Target docstring line.
Returns
-------
result : bool
A boolean indicating whether a specified line
is skipping target or not.
"""
if not is_target_section_range:
return True
if _is_hyphens_line(line=line):
return True
return False
def _get_params_or_rtns_section_pattern_by_type(
*,
target_type: Type[_ParamOrRtnBase]) -> _SectionPattern:
"""
Get the parameters or returns section pattern
of a specified type.
Parameters
----------
target_type : _Parameter or _Return
Target type.
Returns
-------
pattern : _SectionPattern
Target section pattern.
Raises
------
ValueError
If an invalid target type is provided.
"""
if target_type == _Parameter:
return _SectionPattern.PARAMETERS
if target_type == _Return:
return _SectionPattern.RETURNS
raise ValueError(
f'Invalid type argument is provided: {target_type}')
def _make_prm_or_rtn_description_and_append_to_list(
*,
target_type: Type[_ParamOrRtn],
param_or_rtn_values: List[_ParamOrRtn],
value_name: str,
value_type_str: str,
description_lines: List[str]) -> None:
"""
Make a parameter or return value description from a list of
lines and append parameter or return value to a specified list.
Notes
-----
This function clears a list of description lines.
Parameters
----------
target_type : Type
Target type of the _Parameter or _Return.
param_or_rtn_values : lisf of _ParamOrRtnBase
A list to append a parameter or return value.
value_name : str
Parameter or return value name.
value_type_str : str
Parameter or return type name.
description_lines : list of str
A list of description lines.
"""
if not description_lines:
return
description: str = '\n'.join(description_lines)
description = _remove_line_breaks_and_unnecessary_spaces(
text=description)
param_or_rtn: _ParamOrRtn = target_type(
name=value_name, type_str=value_type_str,
description=description)
param_or_rtn_values.append(param_or_rtn)
description_lines.clear()
def _get_indent_num_from_line(*, line: str) -> int:
"""
Get an indent number from a specified docstring line.
Parameters
----------
line : str
Target docstring line.
Returns
-------
indent_num : int
Indent number of a specified docstring line.
"""
spaces: int = 0
for char in line:
if char != ' ':
break
spaces += 1
indent_num: int = spaces // 4
return indent_num
def _get_value_name_and_type_from_line(*, line: str) -> Tuple[str, str]:
"""
Get a parameter or return value and type from
a specified line.
Parameters
----------
line : str
Target docstring line.
Returns
-------
value_name : str
Target parameter or return value name.
type_name : str
Target parameter or return value type name.
"""
if ':' not in line:
return '', ''
splitted: List[str] = line.split(':', maxsplit=1)
value_name: str = splitted[0].strip()
type_name: str = splitted[1].strip()
return value_name, type_name
def _is_hyphens_line(*, line: str) -> bool:
"""
Get a boolean indicating whether a specified line is
a hyphens line or not.
Parameters
----------
line : str
Target docstring line.
Returns
-------
result : bool
If a specified line is a hyphens line, this function
returns True.
"""
match: Optional[Match] = re.search(
pattern=_HYPHENS_LINE_PATTERN, string=line)
if match is None:
return False
return True
def _is_target_section_pattern_line(
*,
line: str,
section_pattern: _SectionPattern) -> bool:
"""
Get a boolean indicating whether a specified line
is matching with a target section pattern or not.
Parameters
----------
line : str
Target docstring line.
section_pattern : _SectionPattern
Target section pattern.
Returns
-------
result : bool
If a specified line is the parameters section,
this function returns True.
"""
match: Optional[Match] = re.search(
pattern=section_pattern.value, string=line)
if match is None:
return False
return True
def _extract_summary_from_docstring(*, docstring: str) -> str:
"""
Extract a summary text from a docstring.
Parameters
----------
docstring : str
Target docstring.
Returns
-------
summary : str
Extracted summary text.
Notes
-----
This function converts line break to a space.
"""
lines: List[str] = docstring.splitlines()
result_lines: List[str] = []
for line in lines:
if _is_section_line(line=line):
break
result_lines.append(line)
summary: str = '\n'.join(result_lines)
summary = _remove_line_breaks_and_unnecessary_spaces(text=summary)
return summary
def _remove_line_breaks_and_unnecessary_spaces(*, text: str) -> str:
"""
Remove line breaks to a single space and unnecessary
spaces (e.g., double spaces and leading and trailing spaces).
Parameters
----------
text : str
Target text.
Returns
-------
text : str
Converted text.
"""
from apysc._string import string_util
text = text.strip()
text = text.replace('\n', ' ')
text = string_util.replace_double_spaces_to_single_space(
string=text)
text = text.strip()
return text
def _is_section_line(*, line: str) -> bool:
"""
Get a boolean indicating whether a specified docstring line
is a section line or not.
Parameters
----------
line : str
Target docstring line text.
Returns
-------
result : bool
If a specified docstring line is section line, this
function returns True.
"""
for pattern in _SectionPattern:
match: Optional[Match] = re.search(
pattern=pattern.value, string=line)
if match is None:
continue
return True
return False
def _extract_package_path_and_callable_name_from_path(
*, docstring_path_comment: str) -> Tuple[str, str]:
"""
Extract a module or class package path and callable
name from a specified path comment.
Parameters
----------
docstring_path_comment : str
Target docstring path comment.
Returns
-------
module_or_class_package_path : str
Extracted module or class package path.
e.g., 'apy.path' or 'any.path.AnyClass'.
callable_name : str
Extracted callable name.
"""
path: str = _extract_path_from_docstring_comment(
docstring_path_comment=docstring_path_comment)
if '.' not in path:
return '', ''
splitted: List[str] = path.rsplit('.', maxsplit=1)
module_or_class_package_path: str = splitted[0]
callable_name: str = splitted[1]
return module_or_class_package_path, callable_name
def _extract_path_from_docstring_comment(
*, docstring_path_comment: str) -> str:
"""
Extract a path string from a specified docstring path comment.
Parameters
----------
docstring_path_comment : str
Target docstring path comment.
Returns
-------
path : str
Extracted path string.
"""
match: Optional[Match] = re.search(
pattern=_DOCSTRING_PATH_COMMENT_PATTERN,
string=docstring_path_comment)
if match is None:
return ''
path: str = match.group(1)
path = path.strip()
return path
| StarcoderdataPython |
3493567 | <reponame>CaptainE/lcnn<filename>bts/pytorch/bts_test.py
# Copyright (C) 2019 <NAME>
#
# This file is a part of BTS.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
from __future__ import absolute_import, division, print_function
import os
import argparse
import time
import numpy as np
import cv2
import sys
import torch
import torch.nn as nn
from torch.autograd import Variable
from bts_dataloader import *
import errno
import matplotlib.pyplot as plt
from tqdm import tqdm
from bts_dataloader import *
def convert_arg_line_to_args(arg_line):
for arg in arg_line.split():
if not arg.strip():
continue
yield arg
parser = argparse.ArgumentParser(description='BTS PyTorch implementation.', fromfile_prefix_chars='@')
parser.convert_arg_line_to_args = convert_arg_line_to_args
parser.add_argument('--model_name', type=str, help='model name', default='bts_nyu_v2')
parser.add_argument('--encoder', type=str, help='type of encoder, vgg or desenet121_bts or densenet161_bts',
default='resnet50_bts')
parser.add_argument('--data_path', type=str, help='path to the data', required=True)
parser.add_argument('--filenames_file', type=str, help='path to the filenames text file', required=True)
parser.add_argument('--input_height', type=int, help='input height', default=480)
parser.add_argument('--input_width', type=int, help='input width', default=640)
parser.add_argument('--max_depth', type=float, help='maximum depth in estimation', default=80)
parser.add_argument('--checkpoint_path', type=str, help='path to a specific checkpoint to load', default='')
parser.add_argument('--dataset', type=str, help='dataset to train on, make3d or nyudepthv2', default='nyu')
parser.add_argument('--do_kb_crop', help='if set, crop input images as kitti benchmark images', action='store_true')
parser.add_argument('--save_lpg', help='if set, save outputs from lpg layers', action='store_true')
parser.add_argument('--bts_size', type=int, help='initial num_filters in bts', default=512)
if sys.argv.__len__() == 2:
arg_filename_with_prefix = '@' + sys.argv[1]
args = parser.parse_args([arg_filename_with_prefix])
else:
args = parser.parse_args()
model_dir = os.path.dirname(args.checkpoint_path)
sys.path.append(model_dir)
for key, val in vars(__import__(args.model_name)).items():
if key.startswith('__') and key.endswith('__'):
continue
vars()[key] = val
def get_num_lines(file_path):
f = open(file_path, 'r')
lines = f.readlines()
f.close()
return len(lines)
def test(params):
"""Test function."""
args.mode = 'test'
dataloader = BtsDataLoader(args, 'test')
model = BtsModel(params=args)
model = torch.nn.DataParallel(model)
checkpoint = torch.load(args.checkpoint_path)
model.load_state_dict(checkpoint['model'])
model.eval()
model.cuda()
num_test_samples = get_num_lines(args.filenames_file)
with open(args.filenames_file) as f:
lines = f.readlines()
print('now testing {} files with {}'.format(num_test_samples, args.checkpoint_path))
pred_depths = []
pred_8x8s = []
pred_4x4s = []
pred_2x2s = []
pred_1x1s = []
start_time = time.time()
with torch.no_grad():
for _, sample in enumerate(tqdm(dataloader.data)):
image = Variable(sample['image'].cuda())
focal = Variable(sample['focal'].cuda())
# Predict
lpg8x8, lpg4x4, lpg2x2, reduc1x1, depth_est = model(image, focal)
pred_depths.append(depth_est.cpu().numpy().squeeze())
pred_8x8s.append(lpg8x8[0].cpu().numpy().squeeze())
pred_4x4s.append(lpg4x4[0].cpu().numpy().squeeze())
pred_2x2s.append(lpg2x2[0].cpu().numpy().squeeze())
pred_1x1s.append(reduc1x1[0].cpu().numpy().squeeze())
elapsed_time = time.time() - start_time
print('Elapesed time: %s' % str(elapsed_time))
print('Done.')
save_name = 'result_' + args.model_name
print('Saving result pngs..')
if not os.path.exists(os.path.dirname(save_name)):
try:
os.mkdir(save_name)
os.mkdir(save_name + '/raw')
os.mkdir(save_name + '/cmap')
os.mkdir(save_name + '/rgb')
os.mkdir(save_name + '/gt')
except OSError as e:
if e.errno != errno.EEXIST:
raise
for s in tqdm(range(num_test_samples)):
if args.dataset == 'kitti':
date_drive = lines[s].split('/')[1]
filename_pred_png = save_name + '/raw/' + date_drive + '_' + lines[s].split()[0].split('/')[-1].replace(
'.jpg', '.png')
filename_cmap_png = save_name + '/cmap/' + date_drive + '_' + lines[s].split()[0].split('/')[
-1].replace('.jpg', '.png')
filename_image_png = save_name + '/rgb/' + date_drive + '_' + lines[s].split()[0].split('/')[-1]
elif args.dataset == 'kitti_benchmark':
filename_pred_png = save_name + '/raw/' + lines[s].split()[0].split('/')[-1].replace('.jpg', '.png')
filename_cmap_png = save_name + '/cmap/' + lines[s].split()[0].split('/')[-1].replace('.jpg', '.png')
filename_image_png = save_name + '/rgb/' + lines[s].split()[0].split('/')[-1]
else:
scene_name = lines[s].split()[0].split('/')[0]
filename_pred_png = save_name + '/raw/' + scene_name + '_' + lines[s].split()[0].split('/')[1].replace(
'.jpg', '.png')
filename_cmap_png = save_name + '/cmap/' + scene_name + '_' + lines[s].split()[0].split('/')[1].replace(
'.jpg', '.png')
filename_gt_png = save_name + '/gt/' + scene_name + '_' + lines[s].split()[0].split('/')[1].replace(
'.jpg', '.png')
filename_image_png = save_name + '/rgb/' + scene_name + '_' + lines[s].split()[0].split('/')[1]
rgb_path = os.path.join(args.data_path, './' + lines[s].split()[0])
image = cv2.imread(rgb_path)
if args.dataset == 'nyu':
gt_path = os.path.join(args.data_path, './' + lines[s].split()[1])
gt = cv2.imread(gt_path, -1).astype(np.float32) / 1000.0 # Visualization purpose only
gt[gt == 0] = np.amax(gt)
pred_depth = pred_depths[s]
pred_8x8 = pred_8x8s[s]
pred_4x4 = pred_4x4s[s]
pred_2x2 = pred_2x2s[s]
pred_1x1 = pred_1x1s[s]
if args.dataset == 'kitti' or args.dataset == 'kitti_benchmark':
pred_depth_scaled = pred_depth * 256.0
else:
pred_depth_scaled = pred_depth * 1000.0
pred_depth_scaled = pred_depth_scaled.astype(np.uint16)
cv2.imwrite(filename_pred_png, pred_depth_scaled, [cv2.IMWRITE_PNG_COMPRESSION, 0])
if args.save_lpg:
cv2.imwrite(filename_image_png, image[10:-1 - 9, 10:-1 - 9, :])
if args.dataset == 'nyu':
plt.imsave(filename_gt_png, np.log10(gt[10:-1 - 9, 10:-1 - 9]), cmap='Greys')
pred_depth_cropped = pred_depth[10:-1 - 9, 10:-1 - 9]
plt.imsave(filename_cmap_png, np.log10(pred_depth_cropped), cmap='Greys')
pred_8x8_cropped = pred_8x8[10:-1 - 9, 10:-1 - 9]
filename_lpg_cmap_png = filename_cmap_png.replace('.png', '_8x8.png')
plt.imsave(filename_lpg_cmap_png, np.log10(pred_8x8_cropped), cmap='Greys')
pred_4x4_cropped = pred_4x4[10:-1 - 9, 10:-1 - 9]
filename_lpg_cmap_png = filename_cmap_png.replace('.png', '_4x4.png')
plt.imsave(filename_lpg_cmap_png, np.log10(pred_4x4_cropped), cmap='Greys')
pred_2x2_cropped = pred_2x2[10:-1 - 9, 10:-1 - 9]
filename_lpg_cmap_png = filename_cmap_png.replace('.png', '_2x2.png')
plt.imsave(filename_lpg_cmap_png, np.log10(pred_2x2_cropped), cmap='Greys')
pred_1x1_cropped = pred_1x1[10:-1 - 9, 10:-1 - 9]
filename_lpg_cmap_png = filename_cmap_png.replace('.png', '_1x1.png')
plt.imsave(filename_lpg_cmap_png, np.log10(pred_1x1_cropped), cmap='Greys')
else:
plt.imsave(filename_cmap_png, np.log10(pred_depth), cmap='Greys')
filename_lpg_cmap_png = filename_cmap_png.replace('.png', '_8x8.png')
plt.imsave(filename_lpg_cmap_png, np.log10(pred_8x8), cmap='Greys')
filename_lpg_cmap_png = filename_cmap_png.replace('.png', '_4x4.png')
plt.imsave(filename_lpg_cmap_png, np.log10(pred_4x4), cmap='Greys')
filename_lpg_cmap_png = filename_cmap_png.replace('.png', '_2x2.png')
plt.imsave(filename_lpg_cmap_png, np.log10(pred_2x2), cmap='Greys')
filename_lpg_cmap_png = filename_cmap_png.replace('.png', '_1x1.png')
plt.imsave(filename_lpg_cmap_png, np.log10(pred_1x1), cmap='Greys')
return
if __name__ == '__main__':
test(args)
| StarcoderdataPython |
79387 | """
Copyright (c) 2019 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from textwrap import dedent
import sys
from flexmock import flexmock
import pytest
import atomic_reactor.utils.koji as koji_util
from atomic_reactor import util
from atomic_reactor.utils.cachito import CachitoAPI
from atomic_reactor.constants import (
PLUGIN_BUILD_ORCHESTRATE_KEY,
REMOTE_SOURCE_TARBALL_FILENAME,
REMOTE_SOURCE_JSON_FILENAME,
)
from atomic_reactor.inner import DockerBuildWorkflow
from atomic_reactor.plugin import PreBuildPluginsRunner, PluginFailedException
from atomic_reactor.plugins import pre_reactor_config
from atomic_reactor.plugins.build_orchestrate_build import (
WORKSPACE_KEY_OVERRIDE_KWARGS, OrchestrateBuildPlugin)
from atomic_reactor.plugins.pre_reactor_config import (
ReactorConfigPlugin, WORKSPACE_CONF_KEY, ReactorConfig)
from atomic_reactor.plugins.pre_resolve_remote_source import ResolveRemoteSourcePlugin
from atomic_reactor.source import SourceConfig
from tests.constants import MOCK_SOURCE
from tests.stubs import StubInsideBuilder, StubSource
KOJI_HUB = 'http://koji.com/hub'
KOJI_TASK_ID = 123
KOJI_TASK_OWNER = 'spam'
CACHITO_URL = 'https://cachito.example.com'
CACHITO_REQUEST_ID = 98765
SECOND_CACHITO_REQUEST_ID = 98766
CACHITO_REQUEST_DOWNLOAD_URL = '{}/api/v1/{}/download'.format(CACHITO_URL, CACHITO_REQUEST_ID)
SECOND_CACHITO_REQUEST_DOWNLOAD_URL = '{}/api/v1/{}/download'.format(CACHITO_URL,
SECOND_CACHITO_REQUEST_ID)
CACHITO_REQUEST_CONFIG_URL = '{}/api/v1/requests/{}/configuration-files'.format(
CACHITO_URL,
CACHITO_REQUEST_ID
)
SECOND_CACHITO_REQUEST_CONFIG_URL = '{}/api/v1/requests/{}/configuration-files'.format(
CACHITO_URL,
SECOND_CACHITO_REQUEST_ID
)
CACHITO_ICM_URL = '{}/api/v1/content-manifest?requests={}'.format(
CACHITO_URL,
CACHITO_REQUEST_ID
)
SECOND_CACHITO_ICM_URL = '{}/api/v1/content-manifest?requests={}'.format(
CACHITO_URL,
SECOND_CACHITO_REQUEST_ID
)
REMOTE_SOURCE_REPO = 'https://git.example.com/team/repo.git'
REMOTE_SOURCE_REF = 'b55c00f45ec3dfee0c766cea3d395d6e21cc2e5a'
REMOTE_SOURCE_PACKAGES = [
{
'name': 'test-package',
'type': 'npm',
'version': '0.0.1'
}
]
SECOND_REMOTE_SOURCE_REPO = 'https://git.example.com/other-team/other-repo.git'
SECOND_REMOTE_SOURCE_REF = 'd55c00f45ec3dfee0c766cea3d395d6e21cc2e5c'
CACHITO_SOURCE_REQUEST = {
'id': CACHITO_REQUEST_ID,
'repo': REMOTE_SOURCE_REPO,
'ref': REMOTE_SOURCE_REF,
'environment_variables': {
'GO111MODULE': 'on',
'GOPATH': 'deps/gomod',
'GOCACHE': 'deps/gomod',
},
'flags': ['enable-confeti', 'enable-party-popper'],
'pkg_managers': ['gomod'],
'dependencies': [
{
'name': 'github.com/op/go-logging',
'type': 'gomod',
'version': 'v0.1.1',
}
],
'packages': [
{
'name': 'github.com/spam/bacon/v2',
'type': 'gomod',
'version': 'v2.0.3'
}
],
'configuration_files': CACHITO_REQUEST_CONFIG_URL,
'content_manifest': CACHITO_ICM_URL,
'extra_cruft': 'ignored',
}
SECOND_CACHITO_SOURCE_REQUEST = {
'id': SECOND_CACHITO_REQUEST_ID,
'repo': SECOND_REMOTE_SOURCE_REPO,
'ref': SECOND_REMOTE_SOURCE_REF,
'environment_variables': {
'PIP_CERT': 'app/package-index-ca.pem',
'PIP_INDEX_URL': 'http://example-pip-index.url/stuff'
},
'flags': [],
'pkg_managers': ['pip'],
'dependencies': [
{
'name': 'click',
'type': 'pip',
'version': '5.0',
}
],
'packages': [
{
'name': 'osbs/cachito-pip-with-deps',
'type': 'pip',
'version': '1.0.0'
}
],
'configuration_files': SECOND_CACHITO_REQUEST_CONFIG_URL,
'content_manifest': SECOND_CACHITO_ICM_URL,
'extra_cruft': 'ignored',
}
REMOTE_SOURCE_JSON = {
'repo': REMOTE_SOURCE_REPO,
'ref': REMOTE_SOURCE_REF,
'environment_variables': {
'GO111MODULE': 'on',
'GOPATH': 'deps/gomod',
'GOCACHE': 'deps/gomod',
},
'flags': ['enable-confeti', 'enable-party-popper'],
'pkg_managers': ['gomod'],
'dependencies': [
{
'name': 'github.com/op/go-logging',
'type': 'gomod',
'version': 'v0.1.1',
}
],
'packages': [
{
'name': 'github.com/spam/bacon/v2',
'type': 'gomod',
'version': 'v2.0.3'
}
],
'configuration_files': CACHITO_REQUEST_CONFIG_URL,
'content_manifest': CACHITO_ICM_URL,
}
SECOND_REMOTE_SOURCE_JSON = {
'repo': SECOND_REMOTE_SOURCE_REPO,
'ref': SECOND_REMOTE_SOURCE_REF,
'environment_variables': {
'PIP_CERT': 'app/package-index-ca.pem',
'PIP_INDEX_URL': 'http://example-pip-index.url/stuff'
},
'flags': [],
'pkg_managers': ['pip'],
'dependencies': [
{
'name': 'click',
'type': 'pip',
'version': '5.0',
}
],
'packages': [
{
'name': 'osbs/cachito-pip-with-deps',
'type': 'pip',
'version': '1.0.0'
}
],
'configuration_files': SECOND_CACHITO_REQUEST_CONFIG_URL,
'content_manifest': SECOND_CACHITO_ICM_URL,
}
CACHITO_ENV_VARS_JSON = {
'GO111MODULE': {'kind': 'literal', 'value': 'on'},
'GOPATH': {'kind': 'path', 'value': 'deps/gomod'},
'GOCACHE': {'kind': 'path', 'value': 'deps/gomod'},
}
# Assert this with the CACHITO_ENV_VARS_JSON
CACHITO_BUILD_ARGS = {
'GO111MODULE': 'on',
'GOPATH': '/remote-source/deps/gomod',
'GOCACHE': '/remote-source/deps/gomod',
}
SECOND_CACHITO_ENV_VARS_JSON = {
'PIP_CERT': {'kind': 'path', 'value': 'app/package-index-ca.pem'},
'PIP_INDEX_URL': {'kind': 'literal', 'value': 'http://example-pip-index.url/stuff'},
}
@pytest.fixture
def workflow(tmpdir, user_params):
workflow = DockerBuildWorkflow(source=MOCK_SOURCE)
# Stash the tmpdir in workflow so it can be used later
workflow._tmpdir = tmpdir
class MockSource(StubSource):
def __init__(self, workdir):
super(MockSource, self).__init__()
self.workdir = workdir
workflow.source = MockSource(str(tmpdir))
builder = StubInsideBuilder().for_workflow(workflow)
builder.set_df_path(str(tmpdir))
builder.tasker = flexmock()
workflow.builder = flexmock(builder)
workflow.buildstep_plugins_conf = [{'name': PLUGIN_BUILD_ORCHESTRATE_KEY}]
mock_repo_config(workflow)
mock_reactor_config(workflow)
mock_build_json()
mock_koji()
return workflow
def mock_reactor_config(workflow, data=None):
if data is None:
data = dedent("""\
version: 1
cachito:
api_url: {}
auth:
ssl_certs_dir: {}
koji:
hub_url: /
root_url: ''
auth: {{}}
""".format(CACHITO_URL, workflow._tmpdir))
workflow.plugin_workspace[ReactorConfigPlugin.key] = {}
workflow._tmpdir.join('cert').write('')
config = util.read_yaml(data, 'schemas/config.json')
workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] = ReactorConfig(config)
def mock_build_json(build_json=None):
if build_json is None:
build_json = {'metadata': {'labels': {'koji-task-id': str(KOJI_TASK_ID)}}}
flexmock(util).should_receive('get_build_json').and_return(build_json)
def mock_repo_config(workflow, data=None):
if data is None:
data = dedent("""\
remote_source:
repo: {}
ref: {}
""".format(REMOTE_SOURCE_REPO, REMOTE_SOURCE_REF))
workflow._tmpdir.join('container.yaml').write(data)
# The repo config is read when SourceConfig is initialized. Force
# reloading here to make usage easier.
workflow.source.config = SourceConfig(str(workflow._tmpdir))
def mock_cachito_api_multiple_remote_sources(workflow, user=KOJI_TASK_OWNER):
(
flexmock(CachitoAPI)
.should_receive("request_sources")
.with_args(
repo=REMOTE_SOURCE_REPO,
ref=REMOTE_SOURCE_REF,
user=user,
dependency_replacements=None,
)
.and_return({"id": CACHITO_REQUEST_ID})
.ordered()
)
(
flexmock(CachitoAPI)
.should_receive("request_sources")
.with_args(
repo=SECOND_REMOTE_SOURCE_REPO,
ref=SECOND_REMOTE_SOURCE_REF,
user=user,
dependency_replacements=None,
)
.and_return({"id": SECOND_CACHITO_REQUEST_ID})
.ordered()
)
(
flexmock(CachitoAPI)
.should_receive("wait_for_request")
.with_args({"id": CACHITO_REQUEST_ID})
.and_return(CACHITO_SOURCE_REQUEST)
.ordered()
)
(
flexmock(CachitoAPI)
.should_receive("wait_for_request")
.with_args({"id": SECOND_CACHITO_REQUEST_ID})
.and_return(SECOND_CACHITO_SOURCE_REQUEST)
.ordered()
)
(
flexmock(CachitoAPI)
.should_receive("assemble_download_url")
.with_args(CACHITO_SOURCE_REQUEST)
.and_return(CACHITO_REQUEST_DOWNLOAD_URL)
.ordered()
)
(
flexmock(CachitoAPI)
.should_receive("download_sources")
.with_args(
CACHITO_SOURCE_REQUEST,
dest_dir=str(workflow._tmpdir),
dest_filename="remote-source-gomod.tar.gz",
)
.and_return(expected_dowload_path(workflow))
.ordered()
)
(
flexmock(CachitoAPI)
.should_receive("assemble_download_url")
.with_args(SECOND_CACHITO_SOURCE_REQUEST)
.and_return(SECOND_CACHITO_REQUEST_DOWNLOAD_URL)
.ordered()
)
(
flexmock(CachitoAPI)
.should_receive("download_sources")
.with_args(
SECOND_CACHITO_SOURCE_REQUEST,
dest_dir=str(workflow._tmpdir),
dest_filename="remote-source-pip.tar.gz",
)
.and_return(expected_dowload_path(workflow))
.ordered()
)
(
flexmock(CachitoAPI)
.should_receive("get_request_env_vars")
.with_args(CACHITO_SOURCE_REQUEST["id"])
.and_return(CACHITO_ENV_VARS_JSON)
.ordered()
)
(
flexmock(CachitoAPI)
.should_receive("get_request_env_vars")
.with_args(SECOND_CACHITO_SOURCE_REQUEST["id"])
.and_return(SECOND_CACHITO_ENV_VARS_JSON)
.ordered()
)
def mock_cachito_api(workflow, user=KOJI_TASK_OWNER, source_request=None,
dependency_replacements=None,
env_vars_json=None):
if source_request is None:
source_request = CACHITO_SOURCE_REQUEST
(flexmock(CachitoAPI)
.should_receive('request_sources')
.with_args(
repo=REMOTE_SOURCE_REPO,
ref=REMOTE_SOURCE_REF,
user=user,
dependency_replacements=dependency_replacements,
)
.and_return({'id': CACHITO_REQUEST_ID}))
(flexmock(CachitoAPI)
.should_receive('wait_for_request')
.with_args({'id': CACHITO_REQUEST_ID})
.and_return(source_request))
(flexmock(CachitoAPI)
.should_receive('download_sources')
.with_args(source_request, dest_dir=str(workflow._tmpdir),
dest_filename=REMOTE_SOURCE_TARBALL_FILENAME)
.and_return(expected_dowload_path(workflow)))
(flexmock(CachitoAPI)
.should_receive('assemble_download_url')
.with_args(source_request)
.and_return(CACHITO_REQUEST_DOWNLOAD_URL))
(flexmock(CachitoAPI)
.should_receive('get_request_env_vars')
.with_args(source_request['id'])
.and_return(env_vars_json or CACHITO_ENV_VARS_JSON))
def mock_koji(user=KOJI_TASK_OWNER):
koji_session = flexmock()
flexmock(pre_reactor_config).should_receive('get_koji_session').and_return(koji_session)
flexmock(koji_util).should_receive('get_koji_task_owner').and_return({'name': user})
def expected_dowload_path(workflow):
return workflow._tmpdir.join('source.tar.gz')
def setup_function(*args):
# IMPORTANT: This needs to be done to ensure mocks at the module
# level are reset between test cases.
sys.modules.pop('pre_resolve_remote_source', None)
def teardown_function(*args):
# IMPORTANT: This needs to be done to ensure mocks at the module
# level are reset between test cases.
sys.modules.pop('pre_resolve_remote_source', None)
@pytest.mark.parametrize('scratch', (True, False))
@pytest.mark.parametrize('dr_strs, dependency_replacements',
((None, None),
(['gomod:foo.bar/project:2'],
[{
'name': 'foo.bar/project',
'type': 'gomod',
'version': '2'}]),
(['gomod:foo.bar/project:2:newproject'],
[{
'name': 'foo.bar/project',
'type': 'gomod',
'new_name': 'newproject',
'version': '2'}]),
(['gomod:foo.bar/project'], None)))
@pytest.mark.parametrize('env_vars_json, expected_build_args', [
[CACHITO_ENV_VARS_JSON, CACHITO_BUILD_ARGS],
[
{
'GOPATH': {'kind': 'path', 'value': 'deps/gomod'},
'GOCACHE': {'kind': 'path', 'value': 'deps/gomod'},
},
{
'GOPATH': '/remote-source/deps/gomod',
'GOCACHE': '/remote-source/deps/gomod',
},
],
[
{'GO111MODULE': {'kind': 'literal', 'value': 'on'}},
{
'GO111MODULE': 'on',
},
],
])
def test_resolve_remote_source(workflow, scratch, dr_strs, dependency_replacements,
env_vars_json, expected_build_args):
build_json = {'metadata': {'labels': {'koji-task-id': str(KOJI_TASK_ID)}}}
mock_build_json(build_json=build_json)
mock_cachito_api(workflow,
dependency_replacements=dependency_replacements,
env_vars_json=env_vars_json)
workflow.user_params['scratch'] = scratch
err = None
if dr_strs and not scratch:
err = 'Cachito dependency replacements are only allowed for scratch builds'
if dr_strs and any(len(dr.split(':')) < 3 for dr in dr_strs):
err = 'Cachito dependency replacements must be'
expected_plugin_results = [
{
"name": None,
"url": CACHITO_REQUEST_DOWNLOAD_URL,
"remote_source_json": {
"json": REMOTE_SOURCE_JSON,
"filename": REMOTE_SOURCE_JSON_FILENAME,
},
"remote_source_tarball": {
"filename": REMOTE_SOURCE_TARBALL_FILENAME,
"path": expected_dowload_path(workflow),
},
},
]
expected_worker_params = [{
'build_args': expected_build_args,
'configs': CACHITO_REQUEST_CONFIG_URL,
'request_id': CACHITO_REQUEST_ID,
'url': CACHITO_REQUEST_DOWNLOAD_URL,
'name': None,
}]
run_plugin_with_args(
workflow,
dependency_replacements=dr_strs,
expect_error=err,
expected_plugin_results=expected_plugin_results,
expected_worker_params=expected_worker_params
)
@pytest.mark.parametrize(
'env_vars_json',
[
{
'GOPATH': {'kind': 'path', 'value': 'deps/gomod'},
'GOCACHE': {'kind': 'path', 'value': 'deps/gomod'},
'GO111MODULE': {'kind': 'literal', 'value': 'on'},
'GOX': {'kind': 'new', 'value': 'new-kind'},
},
]
)
def test_fail_build_if_unknown_kind(workflow, env_vars_json):
mock_cachito_api(workflow, env_vars_json=env_vars_json)
run_plugin_with_args(workflow, expect_error=r'.*Unknown kind new got from Cachito')
@pytest.mark.parametrize('build_json', ({}, {'metadata': {}}))
def test_no_koji_user(workflow, build_json, caplog):
reactor_config = dedent("""\
version: 1
cachito:
api_url: {}
auth:
ssl_certs_dir: {}
koji:
hub_url: /
root_url: ''
auth: {{}}
""".format(CACHITO_URL, workflow._tmpdir))
mock_reactor_config(workflow, reactor_config)
mock_build_json(build_json=build_json)
mock_cachito_api(workflow, user='unknown_user')
log_msg = 'No build metadata'
if build_json:
log_msg = 'Invalid Koji task ID'
expected_plugin_results = [
{
"name": None,
"url": CACHITO_REQUEST_DOWNLOAD_URL,
"remote_source_json": {
"json": REMOTE_SOURCE_JSON,
"filename": REMOTE_SOURCE_JSON_FILENAME,
},
"remote_source_tarball": {
"filename": REMOTE_SOURCE_TARBALL_FILENAME,
"path": expected_dowload_path(workflow),
},
},
]
expected_worker_params = [{
'build_args': CACHITO_BUILD_ARGS,
'configs': CACHITO_REQUEST_CONFIG_URL,
'request_id': CACHITO_REQUEST_ID,
'url': CACHITO_REQUEST_DOWNLOAD_URL,
'name': None,
}]
run_plugin_with_args(workflow, expected_plugin_results=expected_plugin_results,
expected_worker_params=expected_worker_params)
assert log_msg in caplog.text
@pytest.mark.parametrize('pop_key', ('repo', 'ref', 'packages'))
def test_invalid_remote_source_structure(workflow, pop_key):
source_request = {
'id': CACHITO_REQUEST_ID,
'repo': REMOTE_SOURCE_REPO,
'ref': REMOTE_SOURCE_REF,
'packages': REMOTE_SOURCE_PACKAGES,
}
source_request.pop(pop_key)
mock_cachito_api(workflow, source_request=source_request)
run_plugin_with_args(workflow, expect_error='Received invalid source request')
def test_ignore_when_missing_cachito_config(workflow):
reactor_config = dedent("""\
version: 1
koji:
hub_url: /
root_url: ''
auth: {}
""")
mock_reactor_config(workflow, reactor_config)
result = run_plugin_with_args(workflow, expect_result=False)
assert result is None
def test_invalid_cert_reference(workflow):
bad_certs_dir = str(workflow._tmpdir.join('invalid-dir'))
reactor_config = dedent("""\
version: 1
cachito:
api_url: {}
auth:
ssl_certs_dir: {}
koji:
hub_url: /
root_url: ''
auth: {{}}
""".format(CACHITO_URL, bad_certs_dir))
mock_reactor_config(workflow, reactor_config)
run_plugin_with_args(workflow, expect_error="Cachito ssl_certs_dir doesn't exist")
def test_ignore_when_missing_remote_source_config(workflow):
remote_source_config = dedent("""---""")
mock_repo_config(workflow, remote_source_config)
result = run_plugin_with_args(workflow, expect_result=False)
assert result is None
@pytest.mark.parametrize(('build_json', 'log_entry'), (
({}, 'No build metadata'),
({'metadata': None}, 'Invalid Koji task ID'),
({'metadata': {}}, 'Invalid Koji task ID'),
({'metadata': {'labels': {}}}, 'Invalid Koji task ID'),
({'metadata': {'labels': {'koji-task-id': None}}}, 'Invalid Koji task ID'),
({'metadata': {'labels': {'koji-task-id': 'not-an-int'}}}, 'Invalid Koji task ID'),
))
def test_bad_build_metadata(workflow, build_json, log_entry, caplog):
mock_build_json(build_json=build_json)
mock_cachito_api(workflow, user='unknown_user')
expected_plugin_results = [
{
"name": None,
"url": CACHITO_REQUEST_DOWNLOAD_URL,
"remote_source_json": {
"json": REMOTE_SOURCE_JSON,
"filename": REMOTE_SOURCE_JSON_FILENAME,
},
"remote_source_tarball": {
"filename": REMOTE_SOURCE_TARBALL_FILENAME,
"path": expected_dowload_path(workflow),
},
},
]
expected_worker_params = [{
'build_args': CACHITO_BUILD_ARGS,
'configs': CACHITO_REQUEST_CONFIG_URL,
'request_id': CACHITO_REQUEST_ID,
'url': CACHITO_REQUEST_DOWNLOAD_URL,
'name': None,
}]
run_plugin_with_args(workflow, expected_plugin_results=expected_plugin_results,
expected_worker_params=expected_worker_params)
assert log_entry in caplog.text
assert 'unknown_user' in caplog.text
@pytest.mark.parametrize('allow_multiple_remote_sources', [True, False])
def test_allow_multiple_remote_sources(workflow, allow_multiple_remote_sources):
first_remote_source_name = 'gomod'
first_remote_tarball_filename = 'remote-source-gomod.tar.gz'
first_remote_json_filename = 'remote-source-gomod.json'
second_remote_source_name = 'pip'
second_remote_tarball_filename = 'remote-source-pip.tar.gz'
second_remote_json_filename = 'remote-source-pip.json'
container_yaml_config = dedent(
"""\
remote_sources:
- name: {}
remote_source:
repo: {}
ref: {}
- name: {}
remote_source:
repo: {}
ref: {}
"""
).format(
first_remote_source_name,
REMOTE_SOURCE_REPO,
REMOTE_SOURCE_REF,
second_remote_source_name,
SECOND_REMOTE_SOURCE_REPO,
SECOND_REMOTE_SOURCE_REF,
)
reactor_config = dedent("""\
version: 1
cachito:
api_url: {}
auth:
ssl_certs_dir: {}
koji:
hub_url: /
root_url: ''
auth: {{}}
allow_multiple_remote_sources: {}
""".format(CACHITO_URL, workflow._tmpdir, allow_multiple_remote_sources))
build_json = {'metadata': {'labels': {'koji-task-id': str(KOJI_TASK_ID)}}}
mock_build_json(build_json=build_json)
mock_repo_config(workflow, data=container_yaml_config)
mock_reactor_config(workflow, reactor_config)
mock_cachito_api_multiple_remote_sources(workflow)
if not allow_multiple_remote_sources:
err_msg = (
"Multiple remote sources are not enabled, "
"use single remote source in container.yaml"
)
result = run_plugin_with_args(workflow, expect_result=False, expect_error=err_msg)
assert result is None
else:
cachito_build_args = {
'GO111MODULE': 'on',
'GOPATH': f'/remote-source/{first_remote_source_name}/deps/gomod',
'GOCACHE': f'/remote-source/{first_remote_source_name}/deps/gomod',
}
second_cachito_build_args = {
'PIP_CERT': f'/remote-source/{second_remote_source_name}/app/package-index-ca.pem',
'PIP_INDEX_URL': 'http://example-pip-index.url/stuff'
}
expected_plugin_results = [
{
"name": first_remote_source_name,
"url": CACHITO_REQUEST_DOWNLOAD_URL,
"remote_source_json": {
"json": REMOTE_SOURCE_JSON,
"filename": first_remote_json_filename,
},
"remote_source_tarball": {
"filename": first_remote_tarball_filename,
"path": expected_dowload_path(workflow),
},
},
{
"name": second_remote_source_name,
"url": SECOND_CACHITO_REQUEST_DOWNLOAD_URL,
"remote_source_json": {
"json": SECOND_REMOTE_SOURCE_JSON,
"filename": second_remote_json_filename,
},
"remote_source_tarball": {
"filename": second_remote_tarball_filename,
"path": expected_dowload_path(workflow),
},
},
]
expected_worker_params = [
{
"build_args": cachito_build_args,
"configs": CACHITO_REQUEST_CONFIG_URL,
"request_id": CACHITO_REQUEST_ID,
"url": CACHITO_REQUEST_DOWNLOAD_URL,
"name": first_remote_source_name,
},
{
"build_args": second_cachito_build_args,
"configs": SECOND_CACHITO_REQUEST_CONFIG_URL,
"request_id": SECOND_CACHITO_REQUEST_ID,
"url": SECOND_CACHITO_REQUEST_DOWNLOAD_URL,
"name": second_remote_source_name,
},
]
run_plugin_with_args(workflow, expected_plugin_results=expected_plugin_results,
expected_worker_params=expected_worker_params)
def test_multiple_remote_sources_non_unique_names(workflow):
container_yaml_config = dedent("""\
remote_sources:
- name: same
remote_source:
repo: https://git.example.com/team/repo.git
ref: a55c00f45ec3dfee0c766cea3d395d6e21cc2e5a
- name: same
remote_source:
repo: https://git.example.com/team/repo.git
ref: a55c00f45ec3dfee0c766cea3d395d6e21cc2e5a
- name: bit-different
remote_source:
repo: https://git.example.com/team/repo.git
ref: a55c00f45ec3dfee0c766cea3d395d6e21cc2e5a
""")
reactor_config = dedent("""\
version: 1
cachito:
api_url: {}
auth:
ssl_certs_dir: {}
koji:
hub_url: /
root_url: ''
auth: {{}}
allow_multiple_remote_sources: True
""".format(CACHITO_URL, workflow._tmpdir))
mock_repo_config(workflow, data=container_yaml_config)
mock_reactor_config(workflow, reactor_config)
err_msg = (
r"Provided remote sources parameters contain non unique names: \['same'\]"
)
result = run_plugin_with_args(workflow, expect_result=False, expect_error=err_msg)
assert result is None
def run_plugin_with_args(workflow, dependency_replacements=None, expect_error=None,
expect_result=True, expected_plugin_results=None,
expected_worker_params=None):
runner = PreBuildPluginsRunner(
workflow.builder.tasker,
workflow,
[
{
"name": ResolveRemoteSourcePlugin.key,
"args": {"dependency_replacements": dependency_replacements},
},
],
)
if expect_error:
with pytest.raises(PluginFailedException, match=expect_error):
runner.run()
return
results = runner.run()[ResolveRemoteSourcePlugin.key]
if expect_result:
assert results == expected_plugin_results
# A result means the plugin was enabled and executed successfully.
# Let's verify the expected side effects.
orchestrator_build_workspace = workflow.plugin_workspace[OrchestrateBuildPlugin.key]
worker_params = orchestrator_build_workspace[WORKSPACE_KEY_OVERRIDE_KWARGS][None]
assert worker_params["remote_sources"] == expected_worker_params
return results
| StarcoderdataPython |
4845524 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import setuptools
from dbsavior import __version__ as version
here = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, here)
with open("README.md", "r") as fh:
long_description = fh.read()
requirements_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'requirements.txt')
with open(requirements_path) as requirements_file:
requirements = requirements_file.readlines()
setuptools.setup(
name="dbsavior", # Replace with your own package name
version=version,
author="<NAME>",
author_email="<EMAIL>",
description="This backup restore bot can backup/restore a database and upload/download the backup file to/from"
" a remote storage engine",
license='Apache License 2.0',
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/pengfei99/K8sCronJobPostgresBackup",
# we need to indicate excitement which package will be published, otherwise import will raise module name not found
packages=setuptools.find_packages(include=['dbsavior']),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
install_requires=requirements,
python_requires='>=3.8',
)
| StarcoderdataPython |
9667060 | from __future__ import print_function
import sys
import time
import numpy as np
from pyspark import SparkContext
if __name__ == "__main__":
sc = SparkContext(appName="PythonLR")
D = 10
p = 4
iterations = 20
N = 10
if len(sys.argv)>1:
N = int(sys.argv[1])
if len(sys.argv)>2:
iterations = int(sys.argv[2])
if len(sys.argv)>3:
D = int(sys.argv[3])
if len(sys.argv)>4:
p = int(sys.argv[4])
print("N %d D %d iterations %d p %d" %(N,D,iterations,p))
points = sc.parallelize(range(1,N)).mapPartitions(lambda r: [np.random.ranf(size=(len(list(r)),D+p))])
a = points.cache().first()
start = time.time()
alphaN = 0.01/N
w = np.zeros(shape=(D,p))
print("Initial w: " + str(w))
def gradient(matrix, w):
Y = matrix[:, 0:p]
X = matrix[:, p:]
return alphaN * X.T.dot(X.dot(w)-Y)
def add(x, y):
x += y
return x
for i in range(iterations):
#print("On iteration %i" % (i + 1))
w -= points.map(lambda m: gradient(m, w)).reduce(add)
print("linear regression exec time %f" % (time.time()-start))
print("Final w: " + str(w))
sc.stop()
| StarcoderdataPython |
3569048 | """
Authors: <NAME> (<EMAIL>),
<NAME> (<EMAIL>)
Copyright © 2021, United States Government, as represented by the Administrator
of the National Aeronautics and Space Administration. All rights reserved.
The HybridQ: A Hybrid Simulator for Quantum Circuits platform is licensed under
the Apache License, Version 2.0 (the "License"); you may not use this file
except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0.
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from __future__ import annotations
from warnings import warn
import numpy as np
import scipy.linalg
def is_dm(rho: np.ndarray, atol=1e-6) -> bool:
"""
check if the given input a valid density matrix.
"""
rho = np.asarray(rho)
d = int(np.sqrt(np.prod(rho.shape)))
rho_full = np.reshape(rho, (d, d))
hc = np.allclose(rho_full, rho_full.T.conj(), atol=atol)
tp = np.isclose(np.trace(rho_full), 1, atol=atol)
apprx_gtr = lambda y, x: np.real(y) >= x or np.isclose(y, x, atol=atol)
ev = np.linalg.eigvals(rho_full)
psd = np.all([apprx_gtr(e, 0) for e in ev])
return (hc and tp and psd)
def ptrace(state: np.ndarray,
keep: {int, list[int]},
dims: {int, list[int]} = None) -> np.ndarray:
"""
compute the partial trace of a pure state (vector) or density matrix.
Parameters
-----------
state: np.array
One dimensional for pure state e.g. np.array([1,0,0,0])
or two dimensional for density matrix e.g. np.array([[1,0],[0,0]])
keep: list of int
the qubits we want to keep (all others traced out).
Can also specify a single int if only keeping one qubit.
dims: list of int, optional
List of qudit dimensions respecting the ordering of `state`.
Number of qubits is `len(dims)`, and full Hilbert space
dimension is `product(dims)`.
If unspecified, assumes 2 for all.
Returns the density matrix of the remaining qubits.
Notes
-----
To convert shape to ket, one can use np.reshape(state, (d,)),
where `d` is the dimension.
To convert shape to density matrix, one can use np.reshape(state, (d, d)).
"""
state = np.asarray(state)
if len(state.shape) not in (1, 2):
raise ValueError('should be pure state (one dimensional) '
'or density matrix (two dimensional). '
f'Received dimension {len(state.shape)}')
# pure state or not
pure = len(state.shape) == 1
if not pure and state.shape[0] != state.shape[1]:
raise ValueError('invalid state input.')
full_dim = np.prod(state.shape[0])
if dims is not None and full_dim != np.prod(dims):
raise ValueError('specified dimensions inconsistent with state')
n_qubits = np.log2(full_dim) if dims is None else len(dims)
if np.isclose(n_qubits, round(n_qubits)):
n_qubits = int(round(n_qubits))
else:
raise ValueError('invalid state size')
keep = [keep] if isinstance(keep, int) else list(keep)
if not np.all([q in range(n_qubits)
for q in keep]) or len(keep) >= n_qubits:
raise ValueError('invalid axes')
if dims is None:
dims = [2] * n_qubits
# dimensions of qubits we keep
final_dims = [dims[i] for i in keep]
final_dim = np.prod(final_dims)
# dimensions to trace out
drop_dim = int(round(full_dim / final_dim))
if pure:
state = state.reshape(dims)
perm = keep + [q for q in range(n_qubits) if q not in keep]
state = np.transpose(state, perm).reshape(final_dim, drop_dim)
return np.einsum('ij,kj->ik', state, state.conj())
else:
# now we have to redefine things in case of a density matrix
# basically we double the sizes
density_dims = dims + dims
keep += [q + n_qubits for q in keep]
perm = keep + [q for q in range(2 * n_qubits) if q not in keep]
state = state.reshape(density_dims)
state = np.transpose(state, perm)
state = state.reshape((final_dim, final_dim, drop_dim, drop_dim))
return np.einsum('ijkk->ij', state)
def is_channel(channel: SuperGate,
atol=1e-8,
order: tuple[any, ...] = None,
**kwargs) -> bool:
"""
Checks using the Choi matrix whether or not `channel` defines
a valid quantum channel.
That is, we check it is a valid CPTP map.
Parameters
----------
channel: MatrixSuperGate or KrausSuperGate
Must have the method 'map()'.
atol: float, optional
absolute tolerance to use for determining channel is CPTP.
order: tuple[any, ...], optional
If provided, Kraus' map is ordered accordingly to `order`.
See `MatrixChannel.map()`
kwargs: kwargs for `MatrixChannel.map()`
"""
C = choi_matrix(channel, order, **kwargs)
dim = _channel_dim(channel)
# trace preserving
tp = np.isclose(C.trace(), dim, atol=atol)
# hermiticity preserving
hp = np.allclose(C, C.conj().T, atol=atol)
# completely positive
apprx_gtr = lambda e, x: np.real(e) >= x or np.isclose(e, x, atol=atol)
cp = np.all([
apprx_gtr(e, 0) and np.isclose(np.imag(e), 0, atol=atol)
for e in np.linalg.eigvals(C)
])
return tp and hp and cp
def choi_matrix(channel: SuperGate,
order: tuple[any, ...] = None,
**kwargs) -> np.ndarray:
"""
return the Choi matrix for channel, of shape (d**2, d**2)
for a d-dimensional Hilbert space.
The channel can be applied as:
Lambda(rho) = Tr_0[ (I \otimes rho^T) C]
where C is the Choi matrix.
Parameters
----------
channel: MatrixSuperGate or KrausSuperGate
Must have the method 'map()'.
order: tuple[any, ...], optional
If provided, Kraus' map is ordered accordingly to `order`.
See `MatrixChannel.map()`
kwargs: kwargs for `MatrixChannel.map()`
"""
if not hasattr(channel, 'map'):
raise ValueError("'channel' must have method 'map()'")
op = channel.map(order, **kwargs)
d = _channel_dim(channel)
C = np.zeros((d**2, d**2), dtype=complex)
for ij in range(d**2):
Eij = np.zeros(d**2)
Eij[ij] = 1
map = op @ Eij # using vectorization
C += np.kron(Eij.reshape((d, d)), map.reshape((d, d)))
return C
def fidelity(state1: np.ndarray,
state2: np.ndarray,
*,
use_sqrt_def: bool = False,
atol: float = 1e-8) -> float:
"""
Compute the fidelity of two quantum states as:
F(state1, state2) = ( Tr[ sqrt{sqrt(state1) * state2 * sqrt(state1)} ] )^2
Parameters
----------
state1: np.ndarray
Either a ket or density matrix.
If a ket, it should have shape (d,), where d is the dimension.
If a density matrix, it should have shape (d, d).
state2: np.ndarray
Either a ket or density matrix.
If a ket, it should have shape (d,), where d is the dimension.
If a density matrix, it should have shape (d, d).
use_sqrt_def: bool, optional
If True, return the definition of fidelity without the square.
atol: float, optional
absolute tolerance used in rounding (imaginary parts
smaller than this will be rounded to 0).
Notes
-----
`state1` and `state2` must have consistent dimensions (but do not need
to be both ket or both density matrix; one can be a ket and the other
a density matrix).
To convert shape to ket, one can use np.reshape(state, (d,)).
To convert shape to density matrix, one can use np.reshape(state, (d, d)).
If both states are pure, the definition is equivalent to
|<psi1| psi2>|^2
"""
state1 = np.asarray(state1)
state2 = np.asarray(state2)
def _validate_shape(rho_or_psi):
valid = True
dims = rho_or_psi.shape
if len(dims) not in (1, 2):
valid = False
if len(dims) == 2 and dims[0] != dims[1]:
valid = False
if not valid:
raise ValueError("Invalid state dimensions. "
"Ket type should be 1-dimensional (state.ndim==1)."
" Density matrix should be square d x d")
_validate_shape(state1)
_validate_shape(state2)
dim1 = state1.shape[0]
dim2 = state2.shape[0]
if dim1 != dim2:
raise ValueError(f"state dimensions inconsistent, got {dim1} != {dim2}")
# ket or density matrix
ket1 = state1.ndim == 1
ket2 = state2.ndim == 1
def _convert_to_real(F):
if np.isclose(np.imag(F), 0, atol=atol):
F = np.real(F)
else:
warn("Fidelity has non-trivial imaginary component")
return F
power = 1 if use_sqrt_def else 2
if ket1 and ket2:
# both states are kets
return np.abs(np.inner(state1.conj(), state2))**power
elif np.sum([ket1, ket2]) == 1:
# one of the states is a ket, the other a density matrix
# compute |<psi | rho | psi>|^2
rho = state2 if ket1 else state1
psi = state1 if ket1 else state2
psi_right = rho @ psi
F = np.sqrt(np.inner(psi.conj(), psi_right))
return _convert_to_real(F)**power
else:
# both density matrices
sqrt_rho = scipy.linalg.sqrtm(state1)
_tmp = sqrt_rho @ state2 @ sqrt_rho
# since we take the trace, we can just sum up the sqrt of the
# eigenvalues, instead of computing the full matrix sqrt.
eigs = np.linalg.eigvals(_tmp)
F = np.sum([np.sqrt(e) for e in eigs])
return _convert_to_real(F)**power
def reconstruct_dm(pure_states: list[np.ndarray],
probs: list[float] = None) -> np.ndarray:
"""
Compute sum of pure states 1/N sum_i |psi_i><psi_i|.
Parameters
----------
pure_states: list[np.ndarray]
A list of the pure states we wish to sum up to the density matrix.
probs: list[float], optional
If specified, it must be of the same length as `pure_states`.
In this case, the computation will return
sum_i P[i] |psi_i><psi_i|
where P[i] is the i'th probability.
Default will set each prob to 1/len(pure_states).
Notes
-----
All states will be converted to be one-dimensional psi.shape = (d,),
and the returned density matrix will be square (d,d).
If there are inconsistencies in dims, a ValueError will be raised.
"""
if probs is None:
probs = [1 / len(pure_states)] * len(pure_states)
if len(probs) != len(pure_states):
raise ValueError("Invalid `probs`: length not consistent.")
# here we convert to numpy arrays, then reshape to be one dimensional
pure_states = [
np.sqrt(probs[i]) * np.asarray(psi) for i, psi in enumerate(pure_states)
]
pure_states = [
np.reshape(psi, (np.prod(psi.shape),)) for psi in pure_states
]
pure_states = np.asarray(pure_states)
all_dims = set([np.prod(psi.shape) for psi in pure_states])
if len(all_dims) != 1:
raise ValueError(f"Recieved states with inconsistent dimensions. "
f"Received {all_dims}.")
return np.einsum('ij,ik', pure_states, pure_states.conj())
def _channel_dim(channel):
# map() gives the dimension squared of the channel
full_dims = channel.map().shape
assert len(full_dims) == 2
assert full_dims[0] == full_dims[1]
d = np.sqrt(full_dims[0])
if not np.isclose(d, int(d)):
raise ValueError('invalid shape for channel')
return int(d)
| StarcoderdataPython |
381937 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Project : tql-Python.
# @File : coordinate
# @Time : 2019-09-22 16:00
# @Author : yuanjie
# @Email : <EMAIL>
# @Software : PyCharm
# @Description :
import re
import requests
url = 'https://blog.csdn.net/Yellow_python/article/details/81115987'
header = {'User-Agent': 'Opera/8.0 (Windows NT 5.1; U; en)'}
r = requests.get(url, headers=header)
contain = re.findall('<pre><code>([\s\S]+?)</code></pre>', r.text)[0].strip()
with open('coordinate.txt', 'w', encoding='utf-8') as f:
f.write(contain)
| StarcoderdataPython |
3478240 | __all__ = ["ModelAdapter"]
from icevision.models.torchvision.lightning_model_adapter import *
from icevision.models.torchvision.retinanet.prediction import *
class ModelAdapter(RCNNModelAdapter):
"""Lightning module specialized for retinanet, with metrics support.
The methods `forward`, `training_step`, `validation_step`, `validation_epoch_end`
are already overriden.
# Arguments
model: The pytorch model to use.
metrics: `Sequence` of metrics to use.
# Returns
A `LightningModule`.
"""
def convert_raw_predictions(self, batch, raw_preds, records):
return convert_raw_predictions(
batch=batch, raw_preds=raw_preds, records=records, detection_threshold=0.0
)
| StarcoderdataPython |
8170253 | """
MIT License
Copyright (c) 2020 ValkyriaKing711
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from typing import Union
from discord import PartialEmoji, RawReactionActionEvent, Role
from discord.ext import commands
from discord.ext.commands import Cog, Context
from .utils import utils
SUCCESS_EMOJI = "<:yes:567019270467223572>"
class Roles(Cog):
def __init__(self, bot: utils.Bot):
self.bot = bot
self.channel = bot.get_channel(797633928152088586)
self.role_dict = {}
self.bot.loop.create_task(self.cache_roles())
async def cache_roles(self):
data = await self.bot.db.fetch("SELECT * FROM roles")
for i in data:
self.role_dict[i["emoji"]] = dict(i)
@commands.group(invoke_without_command=True)
async def roles(self, ctx: Context):
"""Command group for managing reaction roles and categories."""
if not ctx.author.guild_permissions.administrator:
await ctx.send("<#797633928152088586>")
else:
await ctx.send_help(self.roles)
@commands.has_permissions(administrator=True)
@roles.command(name="add", aliases=["new"])
async def role_add(self, ctx: Context, category_id: int, role: Union[Role, str],
emoji: Union[PartialEmoji, PartialEmoji.from_str], *, text: str):
"""Adds a role to the given category.
<role> can be an existing role name, id or mention;
or alternatively a name which a new role will be created as.
As per usual, all arguments except the last one (for example <role>)
require "quotes" around them if it's multiple words.
Example usage:
egg roles add 797953789826302002 "flying disk" 🥏 Gives access to flying disk channel.
- In case a role called flying disk doesn't exist, it'll create one on the fly.
"""
if emoji.id and emoji.id not in [e.id for e in self.bot.emojis]:
return await ctx.send("A custom emoji can only be added if I'm able to see it.")
message = await self.channel.fetch_message(category_id)
text = text.replace("\n", " ")
new_content = message.content + f"\n{emoji}: {text}"
if isinstance(role, str):
role = await ctx.guild.create_role(name=role)
await self.bot.db.execute(
"INSERT INTO roles (emoji, role_id, category_id, description) VALUES (?, ?, ?, ?)",
str(emoji), role.id, category_id, text
)
self.role_dict[str(emoji)] = {
"category_id": category_id,
"emoji": str(emoji),
"role_id": role.id,
"description": text
}
await message.edit(content=new_content)
await message.add_reaction(emoji)
await ctx.message.add_reaction(SUCCESS_EMOJI)
@commands.has_permissions(administrator=True)
@roles.command(name="delete", aliases=["remove"])
async def role_delete(self, ctx: Context, *,
emoji: Union[PartialEmoji, PartialEmoji.from_str]):
"""Deletes a role from its respective category.
The command's only parameter is the emoji of the role that's being deleted.
Example usage:
egg roles delete 🥏
"""
role_data = await self.bot.db.fetchone("SELECT * FROM roles WHERE emoji = ?", str(emoji))
category_id = role_data["category_id"]
message = await self.channel.fetch_message(category_id)
lines = message.content.split("\n")
lines.remove(f"{emoji}: {role_data['description']}")
await self.bot.db.execute("DELETE FROM roles WHERE emoji = ?", str(emoji))
del self.role_dict[str(emoji)]
await message.edit(content="\n".join(lines))
await message.clear_reaction(emoji)
await ctx.message.add_reaction(SUCCESS_EMOJI)
@commands.has_permissions(administrator=True)
@roles.group(invoke_without_command=True)
async def category(self, ctx: Context):
"""Command group for managing role categories."""
await ctx.send_help(self.category)
@commands.has_permissions(administrator=True)
@category.command(name="add", aliases=["new", "create"])
async def category_add(self, ctx: Context, *, title: str):
"""Adds a new category with the given title."""
title = title.strip("*:")
await self.channel.send(f"**{title}:**")
await ctx.message.add_reaction(SUCCESS_EMOJI)
@commands.has_permissions(administrator=True)
@category.command(name="delete", aliases=["remove"])
async def category_delete(self, ctx: Context, category_id: int):
"""Removes a category."""
await self.bot.db.execute("DELETE FROM roles WHERE category_id = ?", category_id)
self.role_dict = {
k: v for k, v in self.role_dict.items()
if v["category_id"] != category_id
}
message = await self.channel.fetch_message(category_id)
await message.delete()
await ctx.message.add_reaction(SUCCESS_EMOJI)
@commands.has_permissions(administrator=True)
@category.command()
async def title(self, ctx: Context, category_id: int, *, text: str):
"""Edits a category's title.
Example usage:
egg roles category title 797953789826302002 Some cool new title
- Note that the ** and : are added automatically.
"""
text = text.strip(":*")
message = await self.channel.fetch_message(category_id)
lines = message.content.split("\n")
if lines[0].startswith("**"):
lines[0] = f"**{text}:**"
else:
lines.insert(0, f"**{text}:**")
await message.edit(content="\n".join(lines))
await ctx.message.add_reaction(SUCCESS_EMOJI)
@Cog.listener(name="on_raw_reaction_add")
@Cog.listener(name="on_raw_reaction_remove")
async def handle_reaction(self, payload: RawReactionActionEvent):
if payload.channel_id != self.channel.id:
return
if payload.user_id == self.bot.user.id:
return
emoji = payload.emoji
guild = self.bot.get_guild(payload.guild_id)
member = guild.get_member(payload.user_id)
if str(emoji) not in self.role_dict:
message = await self.channel.fetch_message(payload.message_id)
await message.remove_reaction(emoji, member)
return
guild = self.bot.get_guild(payload.guild_id)
role_id = self.role_dict[str(emoji)]["role_id"]
role = guild.get_role(role_id)
if payload.event_type == "REACTION_ADD" and role not in member.roles:
await member.add_roles(role)
elif payload.event_type == "REACTION_REMOVE" and role in member.roles:
await member.remove_roles(role)
def setup(bot: utils.Bot):
bot.add_cog(Roles(bot))
| StarcoderdataPython |
11214087 | <gh_stars>1-10
import convst
from setuptools import setup, find_packages
from codecs import open
import numpy
import os
ROOT = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(ROOT, 'README.md'), encoding="utf-8") as f:
README = f.read()
setup(
name="convst",
description="The Convolutional Shapelet Transform algorithm",
long_description_content_type='text/markdown',
long_description=README,
author="<NAME>",
packages=find_packages(),
license='BSD 2',
download_url = 'https://github.com/baraline/convst/archive/v0.1.4.tar.gz',
version=convst.__version__,
keywords = ['shapelets', 'time-series-classification', 'shapelet-transform','convolutional-kernels'],
url="https://github.com/baraline/convst",
author_email="<EMAIL>",
python_requires='>=3.7',
install_requires=[
"matplotlib >= 3.1",
"numba >= 0.50",
"pandas >= 1.1",
"scikit_learn >= 0.24",
"scipy >= 1.5.0",
"seaborn >= 0.11",
"sktime >= 0.5",
"numpy >= 1.18.5",
"sphinx_gallery >= 0.8",
"numpydoc >= 1.0"
],
zip_safe=False
)
| StarcoderdataPython |
9720900 | x = list(input().replace(" ",""))
y = list(input().replace(" ",""))
from collections import Counter
x = Counter(x)
y = Counter(y)
for i,j in y.items():
if i in x:
if j<=x[i]:
pass
else:
print("NO")
break
else:
print("NO")
break
else:
print("YES") | StarcoderdataPython |
8134755 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""":Mod: test_model
:Synopsis:
:Author:
servilla
:Created:
11/25/18
"""
import os
import sys
import time
import unittest
import daiquiri
import pendulum
from soh.config import Config
from soh.model.soh_db import SohDb
sys.path.insert(0, os.path.abspath('../src'))
logger = daiquiri.getLogger('test_model: ' + __name__)
soh_db = None
class TestModel(unittest.TestCase):
def setUp(self):
self._soh_db = SohDb()
self._soh_db.connect_soh_db()
def tearDown(self):
if os.path.exists(Config.db):
os.remove(Config.db)
def test_event(self):
now_utc = pendulum.now('UTC')
self._soh_db.insert_soh_event(timestamp=now_utc)
event_id = self._soh_db.get_soh_latest_event().event_id
event_time = self._soh_db.get_soh_event_timestamp(
event_id=event_id).timestamp
self.assertEqual(now_utc.toordinal(), event_time.toordinal())
def test_status(self):
now_utc = pendulum.now('UTC')
host = 'test.edirepository.org'
self._soh_db.insert_soh_event(timestamp=now_utc)
event_id = self._soh_db.get_soh_latest_event().event_id
self._soh_db.insert_soh_status(event_id=event_id, server=host,
status=str(Config.UP), timestamp=now_utc)
status = int(self._soh_db.get_soh_latest_status_by_server(server=host).status)
self.assertEqual(Config.UP, status)
def test_get_latest_status_of_server(self):
now_utc = pendulum.now('UTC')
host = 'test.edirepository.org'
self._soh_db.insert_soh_event(timestamp=now_utc)
event_id = self._soh_db.get_soh_latest_event().event_id
self._soh_db.insert_soh_status(event_id=event_id, server=host,
status=str(Config.UP), timestamp=now_utc)
time.sleep(2)
now_utc = pendulum.now('UTC')
host = 'test.edirepository.org'
self._soh_db.insert_soh_event(timestamp=now_utc)
event_id = self._soh_db.get_soh_latest_event().event_id
self._soh_db.insert_soh_status(event_id=event_id, server=host,
status=str(
Config.assertions['TOMCAT_DOWN']),
timestamp=now_utc)
status = int(
self._soh_db.get_soh_latest_status_by_server(server=host).status)
self.assertEqual(Config.assertions['TOMCAT_DOWN'], status)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1734319 | import os
import requests
log = open("./tests/latest.stylelog", "w")
results = os.popen("pycodestyle ./parameterparser/*.py").read()
lines = results.strip().splitlines()
url = None
url_failed = "https://img.shields.io/badge/pycodestyle-failed-red.svg"
url_success = "https://img.shields.io/badge/pycodestyle-success-brightgreen.svg"
if len(lines) > 0:
print("Style Failed")
log.write("Style Failed" + os.linesep)
log.write(results + os.linesep)
url = url_failed
else:
print("Style Passed")
log.write("Style Passed" + os.linesep)
url = url_success
print("Updating ./stylebadge.svg with: " + url)
log.write("Updating ./stylebadge.svg with: " + url + os.linesep)
open("./stylebadge.svg", "wb").write(requests.get(url).content)
print("Updating git commit with new stylebadge.svg")
log.write("Updating git commit with new stylebadge.svg" + os.linesep)
git_add = os.popen("git add ./stylebadge.svg").read()
print(git_add)
log.write(git_add + os.linesep)
log.close()
os.popen("git add ./tests/latest.stylelog")
| StarcoderdataPython |
3386419 | from selenium.webdriver.common.by import By
class ProductsPageLocator(object):
IMG_BROKEN = (By.XPATH, '//img[contains(@src, "jpgWithGarbageOnItToBreakTheUrl")]')
SHOPPING_CART_LABEL = (By.XPATH, '//div[@id="shopping_cart_container"]//span[contains(@class,"shopping_cart_badge")]')
SHOPPING_CART_ITEM = (By.XPATH, '//div[@id="shopping_cart_container"]')
_PREFIX = '//div[@class="inventory_list"]/div[@class="inventory_item"]['
def PRODUCT_NAME_LABEL(index):
ITEM = ']//div[@class="inventory_item_name"]'
return (By.XPATH, (ProductsPageLocator._PREFIX + str(index) + ITEM))
def PRODUCT_DESC_LABEL(index):
ITEM = ']//div[@class="inventory_item_desc"]'
return (By.XPATH, (ProductsPageLocator._PREFIX + str(index) + ITEM))
def PRODUCT_IMAGE(index):
ITEM = ']//div[@class="inventory_item_img"]'
return (By.XPATH, (ProductsPageLocator._PREFIX + str(index) + ITEM))
def PRODUCT_PRICE_LABEL(index):
ITEM = ']//div[@class="inventory_item_price"]'
return (By.XPATH, (ProductsPageLocator._PREFIX + str(index) + ITEM))
def PRODUCT_ADD_BUTTON(index):
ITEM = ']//button[text()="ADD TO CART"]'
return (By.XPATH, (ProductsPageLocator._PREFIX + str(index) + ITEM))
def PRODUCT_REMOVE_BUTTON(index):
ITEM = ']//button[text()="REMOVE"]'
return (By.XPATH, (ProductsPageLocator._PREFIX + str(index) + ITEM))
| StarcoderdataPython |
9780677 | """
A one-time parser for dataset "B" containing information about French zones.
"""
import argparse
import csv
import os
import xml.etree.ElementTree as et
import fetch
DATASET_B_URL = 'https://www.data.gouv.fr/fr/datasets/r/eeebe970-6e2b-47fc-b801-4a38d53fac0d'
OUTPUT_HEADER = [
'zone_id',
'zone_code',
'resident_population',
'resident_population_year',
'area_km2',
'organisation_name',
'multipolygon',
]
def format_polygon_line(indiviual_points):
"""
Given a list of coordinates [lat1, long1, lat2, long2, lat3, long3, ...], create a string with
longitude latitude couples separated by commas, and enclosed in parentheses, ex:
'(long1 lat1, long2 lat2, long3 lat3, ...)'
:param indiviual_points: a list of coordinates.
:return: a formatted polygon line
"""
formatted_string = '('
for i in range(0, len(indiviual_points), 2):
formatted_string += indiviual_points[i + 1] + ' ' + indiviual_points[i] + ','
formatted_string = formatted_string[:-1]
formatted_string += ')'
return formatted_string
def extract_multipolygon(param):
"""
Parse a gml:MultiSurface element and extract all polygons in WKT format
:param param:
:return: a WKT multipolygon
"""
multipolygon = 'MULTIPOLYGON ('
polygon_node_list = param.findall('.//gml:Polygon', fetch.NS)
for polygon_node in polygon_node_list:
multipolygon += '('
exterior_points = polygon_node.find('.//gml:exterior', fetch.NS).find('.//gml:posList', fetch.NS).text.split()
multipolygon += format_polygon_line(exterior_points)
interior_nodes_list = polygon_node.findall('.//gml:interior', fetch.NS)
if interior_nodes_list:
for interior_node in interior_nodes_list:
multipolygon += ','
interior_points = interior_node.find('.//gml:posList', fetch.NS).text.split()
multipolygon += format_polygon_line(interior_points)
multipolygon += '),'
multipolygon = multipolygon[:-1]
multipolygon += ')'
return multipolygon
def parse_dataset_b(xml_content):
"""
Extract the fields that interest us
:param xml_content:
:return: a list of attributes per zone (cf OUTPUT_HEADER)
"""
if xml_content is None:
return []
xroot = et.fromstring(xml_content)
zone_node_list = xroot.findall('.//aqd:AQD_Zone', fetch.NS)
parsed_content = []
for zone_node in zone_node_list:
zone_id = zone_node.attrib['{' + fetch.NS['gml'] + '}id']
zone_code = zone_node.find('.//aqd:zoneCode', fetch.NS).text
resident_population = zone_node.find('.//aqd:residentPopulation', fetch.NS).text
resident_population_year_node = zone_node.find('.//aqd:residentPopulationYear', fetch.NS)
if resident_population_year_node:
resident_population_year = resident_population_year_node.find('.//gml:timePosition', fetch.NS).text
else:
resident_population_year = ''
area = zone_node.find('.//aqd:area', fetch.NS).text
organisation_name = zone_node.find('.//base2:organisationName', fetch.NS).find('.//gco:CharacterString',
fetch.NS).text
multipolygon = extract_multipolygon(zone_node.find('.//gml:MultiSurface', fetch.NS))
parsed_content.append([
zone_id,
zone_code,
resident_population,
resident_population_year,
area,
organisation_name,
multipolygon
])
return parsed_content
def transform_to_csv(outdir):
xml_content = fetch.fetch_file_content(DATASET_B_URL)
rows = parse_dataset_b(xml_content)
new_filename = os.path.join(outdir, 'zone.csv')
with open(new_filename, 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow(OUTPUT_HEADER)
writer.writerows(rows)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--outdir', help='output directory (default = `data`)', default='data')
args = parser.parse_args()
outdir = args.outdir
if not os.path.exists(outdir):
os.makedirs(outdir)
transform_to_csv(args.outdir)
| StarcoderdataPython |
12815406 | <filename>rtmlparse/elements/readoutnoise.py
from lxml import etree
from .baseelement import BaseElement
from .misc import auto_attr_check
@auto_attr_check
class ReadoutNoise(BaseElement):
Base64Data = str
Description = str
Uri = str
Value = float
def __init__(self, parent, name=None, uid=None):
# BaseElement
BaseElement.__init__(self, 'ReadoutNoise', parent, name=name, uid=uid)
# Bias
self.Base64Data = None
self.Description = None
self.Uri = None
self.Value = None
def to_xml(self, parent, add_children=True):
# add element
element = BaseElement.to_xml(self, parent, add_children=add_children)
if element is None:
return None
ns = '{' + self.rtml.namespace + '}'
# other stuff
self.add_text_value(element, 'Base64Data', self.Base64Data, namespace=ns)
self.add_text_value(element, 'Description', self.Description, namespace=ns)
self.add_text_value(element, 'Uri', self.Uri, namespace=ns)
self.add_text_value(element, 'Value', self.Value, 'f', attrib={'units': 'adu'}, namespace=ns)
# return base element
return element
def from_xml(self, element, rtml):
# base call
BaseElement.from_xml(self, element, rtml)
ns = '{' + rtml.namespace + '}'
# other stuff
self.Base64Data = self.from_text_value(element, 'Base64Data', str, namespace=ns)
self.Description = self.from_text_value(element, 'Description', str, namespace=ns)
self.Uri = self.from_text_value(element, 'Uri', str, namespace=ns)
self.Value = self.from_text_value(element, 'Value', float, namespace=ns)
| StarcoderdataPython |
1772014 | <reponame>taojy123/PokemonCard
# Generated by Django 2.1.4 on 2019-09-02 08:13
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('game', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='player',
name='turn',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='player',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='card',
name='skill',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='game.Skill'),
),
migrations.AlterField(
model_name='card',
name='status',
field=models.IntegerField(choices=[(-1, '弃牌'), (0, '牌堆'), (1, '手牌'), (2, '休养'), (3, '上场')], default=0),
),
]
| StarcoderdataPython |
349237 | <filename>render/DroneRender.py<gh_stars>10-100
import bpy
from math import radians
import sys
import os
import argparse
import pyexcel
def get_dataset(dataset):
if dataset == 'suncg':
return suncg.SunCG()
if dataset == 'matterport3d':
return matterport3d.Matterport3D()
if dataset == 'stanford2d3d':
return stanford2d3d.Stanford2D3D()
else:
print("!!! - Erroneous dataset selection.")
def init_script():
arguments_vector = sys.argv
in_blender = False
if "--" not in arguments_vector: # running from inside blender
arguments_vector = [] # as if no args are passed
module_path = bpy.context.space_data.text.filepath # all modules in same path as the script
os.chdir(os.path.dirname(module_path))
in_blender = True
else: # running as cli-like background blender script
arguments_vector = arguments_vector[arguments_vector.index("--") + 1:]# get all args after "--"
module_path = os.path.abspath(__file__) # all modules in same path as the script
return arguments_vector, module_path, in_blender
def import_modules(module_path, verbose):
if verbose:
print("Adding module path: %s" % module_path)
sys.path.append(os.path.dirname(module_path))
import deleters
import utils
import engine
import dataset
import colour
import semantics
import suncg
import matterport3d
import stanford2d3d
import airsim
import imp # force a reload
imp.reload(deleters)
imp.reload(utils)
imp.reload(engine)
imp.reload(dataset)
imp.reload(colour)
imp.reload(semantics)
imp.reload(suncg)
imp.reload(matterport3d)
imp.reload(stanford2d3d)
imp.reload(airsim)
def print_arguments(args):
print("Supplied arguments:")
for arg in vars(args):
print("\t", arg, ":", getattr(args, arg))
if len(unknown) > 0:
print("Unknown arguments:")
for arg in unknown:
print("\t", arg,)
print("Current working directory : %s" % os.getcwd())
print("Bleder data filepath : %s" % bpy.data.filepath)
def parse_arguments(args):
parser = argparse.ArgumentParser()
parser.add_argument('-v','--verbose', help='Log script flow details.', default=True, action='store_true')
parser.add_argument('--scene_model', help='File containing the 3D scene to be imported', \
default='..\\..\\Data\\Matterport3D\\Data\\1pXnuDYAj8r\\1pXnuDYAj8r\\matterport_mesh\\2e84c97e728d46babd3270f4e1a0ae3a\\2e84c97e728d46babd3270f4e1a0ae3a.obj')
parser.add_argument('--output_path', help='The path where the rendered output will be saved at.',\
default="..\\..\\Data\\test_renders\\renders\\")
parser.add_argument('--camera_path', help='The path where the camera position will be parsed from.',\
default="..\\..\\Data\\test_renders\\poses\\test\\1pXnuDYAj8r\\2019-12-24-13-30-23\\airsim_rec_blender.txt")
parser.add_argument('--samples', help='Number of samples to be used when ray-tracing.', type=int, default=256)
parser.add_argument('--device_type', help='Compute device type.', default='GPU', choices=['CPU', 'GPU'])
parser.add_argument('--device_id', help='Compute device ID.', default=1, type=int)
parser.add_argument('--width', help='Rendered images width.', default=320, type=int)
parser.add_argument('--height', help='Rendered images height.', default=180, type=int)
parser.add_argument('--dataset', help='Which dataset this sample belongs to.', \
default='matterport3d', \
choices=['matterport3d', 'stanford2d3d', 'gibson'])
parser.add_argument('--ego_fov_h', help='Egocentric camera horizontal field-of-view (in degrees).', default=85.0, type=float)
parser.add_argument('--far_dist', help='Convergence (far plane) distance.', default=8.0, type=float)
primary_render_group = parser.add_mutually_exclusive_group()
primary_render_group.add_argument('-c','--color', help='Render and save color image.', default=False, action='store_true')
primary_render_group.add_argument('-r','--raw', help='Render and save emission image.', default=True, action='store_true')
parser.add_argument('-d','--depth', help='Render and save depth map.', default=True, action='store_true')
primary_render_group.add_argument('-n','--normals', help='Render and save rendered normals.', default=False, action='store_true')
parser.add_argument('--normal_map', help='Render and save normal map.', default=True, action='store_true')
parser.add_argument('-f','--flow', help='Render and save optical flow map.', default=True, action='store_true')
parser.add_argument('-m','--mask', help='Render and save occlusion mask.', default=False, action='store_true')
parser.add_argument('--log_sheet', help='The path where processing information will be logged at.',\
default="..\\..\\Data\\test_renders\\drone.xlsx")
return parser.parse_known_args(args)
if __name__ == "__main__":
arguments_vector, module_path, in_blender = init_script()
args, unknown = parse_arguments(arguments_vector)
if args.verbose:
print_arguments(args)
import_modules(module_path, args.verbose)
import deleters
import utils
import engine
import dataset
import colour
import semantics
import suncg
import matterport3d
import stanford2d3d
import airsim
deleters.delete_all()
deleters.delete_materials()
dataset = get_dataset(args.dataset)
dataset.import_model(args.scene_model)
base_filename = dataset.get_instance_name(args.scene_model)
render_engine = engine.Cycles28(
args.device_type, args.device_id, args.samples,
args.depth, args.normals or args.normal_map,
False, args.raw, args.flow)
nodes, links, compositor = render_engine.get_scene_nodes()
output_nodes = []
if args.depth:
depth_out = dataset.get_depth_output(args.output_path, base_filename, \
nodes, links, compositor)
output_nodes.append(engine.OutputNode(depth_out, base_filename, 'depth'))
if args.color:
image_out = dataset.get_color_output(args.output_path, base_filename, \
nodes, links, compositor)
output_nodes.append(engine.OutputNode(image_out, base_filename, 'color'))
elif args.normals:
normals_out = dataset.get_normals_output(args.output_path, base_filename, \
nodes, links, compositor)
output_nodes.append(engine.OutputNode(normals_out, base_filename, 'normals'))
if args.raw:
emission_out = dataset.get_emission_output(args.output_path, base_filename, \
nodes, links, compositor)
output_nodes.append(engine.OutputNode(emission_out, base_filename, 'emission'))
if args.normal_map:
normal_map_out = dataset.get_normal_map_output(args.output_path, base_filename, \
nodes, links, compositor)
output_nodes.append(engine.OutputNode(normal_map_out, base_filename, 'normal_map'))
if args.flow:
flow_map_out = dataset.get_flow_map_output(args.output_path, base_filename, \
nodes, links, compositor)
output_nodes.append(engine.OutputNode(flow_map_out, base_filename, 'flow_map'))
if in_blender:
print("Running from inside blender ...")
camera_positions = airsim.load_camera_tuples(args.camera_path)
trajectory_date = os.path.basename(os.path.dirname(args.camera_path))
camera_pos_index = 0
for ego_pos_rot_t, ego_pos_rot_tp1, exo_pos_rot in camera_positions:
camera = render_engine.get_camera('perspective', args.width, args.height)
camera.data.stereo.convergence_distance = args.far_dist
camera.data.clip_start = 0.1
camera.data.clip_end = args.far_dist
camera.data.lens_unit = 'FOV'
camera.data.angle = radians(args.ego_fov_h)
dataset.set_render_settings()
camera.rotation_mode = 'QUATERNION'
bpy.context.scene.frame_set(0)
camera.location = ego_pos_rot_t[0]
camera.rotation_quaternion = ego_pos_rot_t[1]
camera.keyframe_insert('location',group="LocRot")
camera.keyframe_insert('rotation_quaternion',group="LocRot")
bpy.context.scene.frame_set(1)
camera.location = ego_pos_rot_tp1[0]
camera.rotation_quaternion = ego_pos_rot_tp1[1]
camera.keyframe_insert('location',group="LocRot")
camera.keyframe_insert('rotation_quaternion',group="LocRot")
bpy.context.scene.camera = camera
for fid in range(2):
for node in output_nodes:
node.prepare_render("egocentric", fid, trajectory_date, camera_pos_index)
bpy.context.scene.frame_set(fid)
bpy.ops.render.render(write_still=True)
camera_pos_index += 1
if in_blender and camera_pos_index >= 3:
break
if args.log_sheet:
print("Updating %s" % args.log_sheet)
data = pyexcel.get_sheet(file_name=args.log_sheet)
count = len(data.to_array())
data[count, 0] = trajectory_date
data.save_as(args.log_sheet)
| StarcoderdataPython |
1925631 | from .config import ConfigLoader | StarcoderdataPython |
291019 | import datetime
from marshmallow import Schema, fields
class DeadlineSchema(Schema):
id = fields.Int(dump_only=True)
name = fields.Str(required=True)
date = fields.DateTime(required=True)
status = fields.Str()
class DeadlineUpdateSchema(Schema):
name = fields.Str()
date = fields.DateTime()
status = fields.Str()
deadline_schema = DeadlineSchema()
deadlines_schema = DeadlineSchema(many=True)
deadline_update_schema = DeadlineUpdateSchema()
| StarcoderdataPython |
1602166 | <filename>TCP_Connection/server.py
'''
Assignment 2
Server for image classification
Author: fanconic
'''
import base64
from threading import Thread
from queue import Queue
from PIL import Image
import socket, json
from keras.preprocessing import image
from keras.applications.resnet50 import ResNet50, preprocess_input, decode_predictions
from io import BytesIO
import numpy as np
import time
queue = Queue()
PORT = 50002
ADDRESS = 'localhost'
# Listen for incoming connections
def main_thread():
# Busy Waiting
# Create a socket
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Bind socket to localhost
server_socket.bind((ADDRESS, PORT))
# Listen for incoming connections from clients
server_socket.listen(10)
while True:
# Receive data from client
(client_socket, address) = server_socket.accept()
queue.put(client_socket)
# Receive data from clients, decode, and feed into neural network
def second_thread():
# ResNet50 Model adn Predicitons
model = ResNet50(weights='imagenet')
while True:
client_socket = queue.get()
# Receive TCP and check with end string, if everything has arrived
data = ''
while True:
part = client_socket.recv(1024)
data += part.decode('utf-8')
if '##END##' in data:
break
# laod from JSON file and replace the end string
data = data.replace('##END##', '')
data = json.loads(data)
# Get variables from dict
chat_id = data['chat_id']
encoded_image = data['encoded_image']
img = base64.b64decode(encoded_image)
# Convert picture from bytes to image
# https://www.programcreek.com/python/example/89218/keras.preprocessing.image.img_to_array
img = Image.open(BytesIO(img))
# Keras ResNet50 uses 224 X 224 images
img = img.resize((224,224))
# Write the picture to an array
X = image.img_to_array(img)
# Adding additional axis
X = np.expand_dims(X, axis=0)
# Preprocess the input
X = preprocess_input(X)
pred = model.predict(X)
pred = decode_predictions(pred)
# Process Predictions
predictions = []
for i in range (5):
predictions.append({'Label': pred[0][i][1], 'Probability': str(pred[0][i][2])})
# prepare data to be sent back
data = {
'chat_id': chat_id,
'predictions': predictions
}
data = json.dumps(data) + '##END##'
try:
# Send back data
client_socket.sendall(data.encode('utf-8'))
finally:
# Close socket
client_socket.close()
if __name__ == "__main__":
Thread(target= main_thread).start()
Thread(target= second_thread).start()
while True:
time.sleep(10)
| StarcoderdataPython |
3417821 | from django.test import TestCase
from .preprocess import PreProcessor
from django.conf import settings
from .models import Build
DATA_ROOT_DIRECTORY = settings.DATA_ROOT_DIRECTORY
# Create your tests here.
class PreprocessTestCase(TestCase):
def setUp(self):
self.processor = PreProcessor(data_root_directory=DATA_ROOT_DIRECTORY)
def test_view_build_failed_info(self):
self.processor.view_build_failed_info('apache_storm', 'train_set.txt')
def test_write_data_to_db(self):
projects = sorted(self.processor.get_sub_folders())
# self.processor.write_data_to_db('sensu_sensu-community-plugins', 'train_set.txt')
# exit(-1)
print("Total projects: %s" % len(projects))
index = 370
untest_projects = projects[index:]
for project_name in untest_projects:
index += 1
print('\nProject %s: %s' % (index, project_name))
if project_name.startswith('.') or project_name.endswith('.csv') or project_name.endswith('.txt'):
continue
try:
self.processor.write_data_to_db(project_name, 'train_set.txt')
except Exception as e:
print(str(e))
print(index, project_name)
exit(-1)
class ModelsTestCase(TestCase):
def setUp(self) -> None:
pass
def test_describe_projects(self):
Build.describe_projects()
| StarcoderdataPython |
236502 | from distutils.core import setup
from Cython.Build import cythonize
from setuptools import Extension
from os import path
import numpy as np
ext_package = 'boilerplate'
cython = [Extension('utils',
[path.join(ext_package, 'utils.pyx')],
include_dirs=[np.get_include()])
]
setup(
name='boilerplate',
packages=['boilerplate'],
version='0.0.2',
description="Python boilerplate project",
license=open('LICENSE.rst').read(),
# Setuptools 18.0 properly handles Cython extensions.
setup_requires=[
'setuptools>=18.0',
'cython',
],
author='<NAME>',
url='https://github.com/johannesu/cython-wrapper-boilerplate',
keywords=['boilerplate'],
long_description=open('README.rst').read(),
ext_package=ext_package,
ext_modules=cythonize(cython),
)
| StarcoderdataPython |
248669 | <reponame>Matheus-Henrique-Burey/Curso-de-Python
print('=-' * 15)
print('CALCULADORA')
print('=-' * 15)
opcao = 0
num1 = int(input('Digite um numero: '))
num2 = int(input('Digite outro numero: '))
while not opcao == 5:
print('-' * 30)
print('''Qual operação voce deseja realizar:
[ 1 ] SOMAR
[ 2 ] MULTIPLICAR
[ 3 ] MAIOR
[ 4 ] NOVOS NUMEROS
[ 5 ] SAIR DO PROGRAMA''')
opcao = int(input('Sua escloha: '))
if opcao == 1:
print(f'Soma: {num1} + {num2} = {num2 + num1}')
elif opcao == 2:
print(f'Multiplicaçao: {num1} x {num2} = {num1 * num2}')
elif opcao == 3:
if num1 < num2:
maior = num2
menor = num1
else:
maior = num1
menor = num2
print(f'O valor {maior} é maior que {menor}')
elif opcao == 4:
num1 = int(input('Digite um numero: '))
num2 = int(input('Digite outro numero: '))
elif opcao == 5:
print('Volte sempre!!!')
else:
print('Comando invalido, digite novamente')
| StarcoderdataPython |
9642311 | #!/usr/bin/python
def outlierCleaner(predictions, ages, net_worths):
"""
Clean away the 10% of points that have the largest
residual errors (difference between the prediction
and the actual net worth).
Return a list of tuples named cleaned_data where
each tuple is of the form (age, net_worth, error).
"""
cleaned_data = []
### your code goes here
#print(ages)
zipped = zip(ages, net_worths,abs(predictions-net_worths))
cleaned_data = list(zipped)
cleaned_data.sort(key=lambda tup: tup[2])
return cleaned_data[:round(len(cleaned_data)*0.9)]
| StarcoderdataPython |
9664947 | #!/usr/bin/python3
# ____ _ _ ____ _ _ _
# / ___|___ _ __ ___ _ __ ___ _ _ _ __ (_) |_ _ _ / ___|___ _ __ | |_ _ __ ___ | | | ___ _ __
# | | / _ \| '_ ` _ \| '_ ` _ \| | | | '_ \| | __| | | | | / _ \| '_ \| __| '__/ _ \| | |/ _ \ '__|
# | |__| (_) | | | | | | | | | | | |_| | | | | | |_| |_| | |__| (_) | | | | |_| | | (_) | | | __/ |
# \____\___/|_| |_| |_|_| |_| |_|\__,_|_| |_|_|\__|\__, |\____\___/|_| |_|\__|_| \___/|_|_|\___|_|
# |___/
#Copyright (c) 2018 CommunityController
#All rights reserved.
#
#This work is licensed under the terms of the MIT license.
#For a copy, see <https://opensource.org/licenses/MIT>.
import os
import re
import json
import socket
import asyncore
import requests
from threading import Thread
from time import sleep, time
from lib.switch_controller import *
bannedConfig = None
cmmndsConfig = None
serialConfig = None
twitchConfig = None
ccsapiConfig = None
botClient = None
mainClient = None
commandQueue = []
SERIAL_DEVICE = "COM5"
SERIAL_BAUD = 9600
#Could be useful if we add more ways of inputting commands (Facebook Live, Discord...)
class UserMessage:
message = ""
username = ""
origin = ""
mod = False
sub = False
def loadMessageFromTwitch(self, twitchData: str):
if "PRIVMSG" in twitchData:
self.origin = "Twitch"
if twitchData.find("mod=") != -1:
self.mod = bool(int(twitchData[twitchData.find("mod=")+4]))
if twitchData.find("subscriber=") != -1:
self.sub = bool(int(twitchData[twitchData.find("subscriber=")+11]))
r = re.compile(r"^:([\w\W]{0,}?)![\w\W]{0,}?@[\w\W]{0,}?\.tmi\.twitch\.tv\s([A-Z]{0,}?)\s#([\w\W]{0,}?)\s:([\w\W]{0,}?)$")
matches = r.match(twitchData)
if matches:
self.username = matches.groups()[0]
self.message = matches.groups()[3]
def loadConfig() -> None:
global twitchConfig
global serialConfig
global cmmndsConfig
global bannedConfig
global ccsapiConfig
global commandQueue
exceptionCount = 0
os.makedirs("config", exist_ok=True)
#Loading Twitch config
try:
twitchConfig = json.load(open("config/twitch.json", "r"))
if not all(k in twitchConfig for k in ("host", "port", "mainUsername", "mainPassword")):
raise
except:
twitchConfig = {"host": "irc.chat.twitch.tv", "port": 6667, "mainUsername": "CommunityController", "mainPassword": "<PASSWORD>"}
json.dump(twitchConfig, open("config/twitch.json", "w"))
exceptionCount += 1
print("Twitch config file not found! Sample config created.")
#Loading Serial config
try:
serialConfig = json.load(open("config/serial.json", "r"))
if not all(k in serialConfig for k in ("device", "baud")):
raise
except:
serialConfig = {"device": "COM5", "baud": 9600}
json.dump(serialConfig, open("config/serial.json", "w"))
exceptionCount += 1
print("Serial config file not found! Sample config created.")
#Loading Commands config
try:
cmmndsConfig = json.load(open("config/commands.json", "r"))
except:
cmmndsConfig = {"A": "controller.push_button(BUTTON_A)"}
json.dump(cmmndsConfig, open("config/commands.json", "w"))
exceptionCount += 1
print("Commands config file not found! Sample config created.")
#Loading Community Controller site API and shadowban config
try:
ccsapiConfig = json.load(open("config/CommunityControllerAPI.json", "r"))
r = requests.get(ccsapiConfig["url"] + "/shadowbans", headers={"Accept": "application/json", "Authorization": "Bearer " + ccsapiConfig["token"]})
bannedConfig = r.json()
json.dump(bannedConfig, open("config/shadowbans.json", "w"))
except:
ccsapiConfig = {"url": "https://communitycontroller.com/api", "token": "<PASSWORD>"}
json.dump(ccsapiConfig, open("config/CommunityControllerAPI.json", "w"))
try:
bannedConfig = json.load(open("config/shadowbans.json", "r"))
except:
bannedConfig = {"shadowbans": []}
json.dump(bannedConfig, open("config/shadowbans.json", "w"))
try:
commandQueueJson = json.load(open("config/queue.json", "r"))
commandQueue = commandQueueJson["queue"]
except:
commandQueue = []
commandQueueJson = {"queue": []}
json.dump(commandQueueJson, open("config/queue.json", "w"))
if exceptionCount >= 1:
print("Please edit the config files and try again.")
exit(0)
#Copy/Pasted it from V1. Might rewrite it if needed.
def customCommand(single: str) -> None:
command_executed = False
tmpr = single[7:single.find(")")].strip().replace("_", " ") # tmpr == "smthg"
combine = []
if tmpr[0:1] == "[" and tmpr.find("]") > 0: # tmpr == "a[b, ...]c"
combine = tmpr[tmpr.find("[") + 1:tmpr.find("]")].split(";") # combine == ["b", "..."]
tmpr = tmpr[tmpr.find("]") + 1:] # tmpr == "c"
elif tmpr.find(";") > -1: # tmpr == "x,y"
combine = [tmpr[0:tmpr.find(";")]] # combine == ["x"]
else: # tmpr = "x"
combine = [tmpr] # combine == ["x"]
tmpr = ""
tmpr = tmpr[tmpr.find(";") + 1:].strip()
# At this point...
# combine is an array of commands
# tmpr is a string supposedly containing the duration of the custom command
duration = 0.02
try:
duration = float(tmpr)
if duration > 0 and duration <= 1: # the duration has to be between 0 and 1 second
duration = duration
else:
duration = 0.02
except:
0
cmd = [] # array of the commands to execute, again...
for i in combine:
i = i.strip().replace(" ", "_")
if i in ["PLUS", "START"]:
cmd.append(BUTTON_PLUS)
elif i in ["MINUS", "SELECT"]:
cmd.append(BUTTON_MINUS)
elif i == "A":
cmd.append(BUTTON_A)
elif i == "B":
cmd.append(BUTTON_B)
elif i == "X":
cmd.append(BUTTON_X)
elif i == "Y":
cmd.append(BUTTON_Y)
elif i in ["UP", "DUP", "D_UP"]:
cmd.append(DPAD_UP)
elif i in ["DOWN", "DDOWN", "D_DOWN"]:
cmd.append(DPAD_DOWN)
elif i in ["LEFT", "DLEFT", "D_LEFT"]:
cmd.append(DPAD_LEFT)
elif i in ["RIGHT", "DRIGHT", "D_RIGHT"]:
cmd.append(DPAD_RIGHT)
elif i in ["L", "LB"]:
cmd.append(BUTTON_L)
elif i in ["R", "RB"]:
cmd.append(BUTTON_R)
elif i in ["ZL", "LT"]:
cmd.append(BUTTON_ZL)
elif i in ["ZR", "RT"]:
cmd.append(BUTTON_ZR)
elif i in ["LCLICK", "L3"]:
cmd.append(BUTTON_LCLICK)
elif i in ["RCLICK", "R3"]:
cmd.append(BUTTON_RCLICK)
elif i in ["LUP", "L_UP"]:
cmd.append("L_UP")
elif i in ["LDOWN", "L_DOWN"]:
cmd.append("L_DOWN")
elif i in ["LLEFT", "L_LEFT"]:
cmd.append("L_LEFT")
elif i in ["LRIGHT", "L_RIGHT"]:
cmd.append("L_RIGHT")
elif i in ["RUP", "R_UP"]:
cmd.append("R_UP")
elif i in ["RDOWN", "R_DOWN"]:
cmd.append("R_DOWN")
elif i in ["RLEFT", "R_LEFT"]:
cmd.append("R_LEFT")
elif i in ["RRIGHT", "R_RIGHT"]:
cmd.append("R_RIGHT")
elif i == "WAIT":
cmd.append("WAIT")
for i in cmd: # buttons to hold
if i in [BUTTON_PLUS, BUTTON_MINUS, BUTTON_A, BUTTON_B, BUTTON_X, BUTTON_Y, BUTTON_L, BUTTON_R,
BUTTON_ZL, BUTTON_ZR, BUTTON_LCLICK, BUTTON_RCLICK]:
controller.hold_buttons(i)
command_executed = True
elif i in [DPAD_UP, DPAD_DOWN, DPAD_LEFT, DPAD_RIGHT]:
controller.hold_dpad(i)
command_executed = True
elif i == "L_UP":
controller.move_forward(MODE_BACK_VIEW)
command_executed = True
elif i == "L_DOWN":
controller.move_backward(MODE_BACK_VIEW)
command_executed = True
elif i == "L_LEFT":
controller.move_left()
command_executed = True
elif i == "L_RIGHT":
controller.move_right()
command_executed = True
elif i == "R_UP":
controller.look_up()
command_executed = True
elif i == "R_DOWN":
controller.look_down()
command_executed = True
elif i == "R_LEFT":
controller.look_left()
command_executed = True
elif i == "R_RIGHT":
controller.look_right()
command_executed = True
elif i == "WAIT":
command_executed = True
if command_executed: # sleep if any command has been executed
sleep(duration)
for i in cmd: # release the buttons
if i in [BUTTON_PLUS, BUTTON_MINUS, BUTTON_A, BUTTON_B, BUTTON_X, BUTTON_Y, BUTTON_L, BUTTON_R,
BUTTON_ZL, BUTTON_ZR, BUTTON_LCLICK, BUTTON_RCLICK]:
controller.release_buttons(i)
elif i in [DPAD_UP, DPAD_DOWN, DPAD_LEFT, DPAD_RIGHT]:
controller.release_dpad()
elif i in ["L_UP", "L_DOWN", "L_LEFT", "L_RIGHT"]:
controller.release_left_stick()
elif i in ["R_UP", "R_DOWN", "R_LEFT", "R_RIGHT"]:
controller.release_right_stick()
def isUserBanned(username: str):
global bannedConfig
isBanned = False
for u in bannedConfig["shadowbans"]:
if u["user"] == username:
isBanned = True
return isBanned
def executeCommand(command: str):
global cmmndsConfig
print("executeCommand(" + command + ")")
if command in cmmndsConfig:
exec(cmmndsConfig[command])
sleep(0.1)
def useCommand(command: str):
#Anarchy mode
if command[0:7] == "CUSTOM(" and command.find(")") > 7:
print("Using a custom command!")
customCommand(command)
else:
simultaneousCommands = command.split("_&_")
if len(simultaneousCommands) > 1:
threadsArr = []
for cmd in simultaneousCommands:
threadsArr.append(Thread(target=executeCommand, args=[cmd]))
threadsArr[-1].start()
for t in threadsArr:
t.join()
else:
executeCommand(command)
def addToQueue(command: str):
global commandQueue
commandQueue.append(command)
commandQueueJson = {"queue": commandQueue}
json.dump(commandQueueJson, open("config/queue.json", "w"))
def commandQueueThread():
global commandQueue
while True:
if len(commandQueue) > 0:
useCommand(commandQueue[0])
commandQueue.pop(0)
commandQueueJson = {"queue": commandQueue}
json.dump(commandQueueJson, open("config/queue.json", "w"))
def parseMessage(userMessage):
message = userMessage.message.strip().upper()
if len(message) > 0:
if message[-1] == ",": #Removes the comma at the end of the message if there's one
message = message[:-1]
splitMessage = message.split(",")
maxCommands = 8
if userMessage.sub:
maxCommands = 8
if userMessage.mod:
maxCommands = 10
print(userMessage.username + " (from " + userMessage.origin + "): " + userMessage.message)
if len(splitMessage) <= maxCommands and not isUserBanned(userMessage.username):
for single in splitMessage:
single = single.strip().replace(" ", "_")
addToQueue(single)
class TwitchIRC(asyncore.dispatcher):
username = None
password = None
channel = None
authenticated = False
def __init__(self, username: str, password: str, channel: str) -> None:
assert username is not None, "No username specified!"
assert password is not None, "No password specified!"
assert channel is not None, "No channel specified!"
global twitchConfig
self.username = username
self.password = password
self.channel = channel
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.connect((twitchConfig["host"], twitchConfig["port"]))
self.buffer = bytes("PASS %s\r\nNICK %s\r\n" % (password, username), "utf8")
def handle_connect(self):
pass
def handle_close(self):
self.close()
def handle_read(self):
data = self.recv(2048).decode("utf8", errors="ignore").rstrip()
if "Welcome, GLHF!" in data and not self.authenticated:
self.authenticated = True
self.buffer += bytes("JOIN #%s\r\n" % (self.channel), "utf8")
print("Successfully authenticated!")
print("JOIN #%s\r\n" % (self.channel))
#self.buffer += bytes("CAP REQ :twitch.tv/tags\r\n", "utf8")
elif data == "PING :tmi.twitch.tv":
print("Ping!")
self.buffer += b"PONG :tmi.twitch.tv\r\n"
print("Pong!")
elif "%s.tmi.twitch.tv" % (self.channel) not in data or self.username in data: #chat messages here
if "PRIVMSG" in data:
message = UserMessage()
message.loadMessageFromTwitch(data)
Thread(target=parseMessage, args=[message]).start()
def readable(self):
return True
def writable(self):
return (len(self.buffer) > 0)
def handle_write(self):
sent = self.send(self.buffer)
self.buffer = self.buffer[sent:]
if __name__ == "__main__":
loadConfig()
with Controller() as controller:
try:
mainClient = TwitchIRC(twitchConfig["mainUsername"], twitchConfig["mainPassword"], twitchConfig["mainUsername"].lower())
Thread(target=commandQueueThread).start()
except KeyboardInterrupt:
controller.reset.wait()
exit(0)
asyncore.loop()
#くコ:彡 | StarcoderdataPython |
5023771 | import uuid
import pytest
import requests
from selenium.webdriver.common.action_chains import ActionChains
from baselayer.app.env import load_env
from skyportal.tests import api
env, cfg = load_env()
endpoint = cfg['app.sedm_endpoint']
sedm_isonline = requests.get(endpoint, timeout=5).status_code in [200, 400]
def add_telescope_and_instrument(instrument_name, token):
status, data = api("GET", f"instrument?name={instrument_name}", token=token)
if len(data["data"]) == 1:
return data["data"][0]
telescope_name = str(uuid.uuid4())
status, data = api(
"POST",
"telescope",
data={
"name": telescope_name,
"nickname": telescope_name,
"lat": 0.0,
"lon": 0.0,
"elevation": 0.0,
"diameter": 10.0,
"robotic": True,
},
token=token,
)
assert status == 200
assert data["status"] == "success"
telescope_id = data["data"]["id"]
status, data = api(
"POST",
"instrument",
data={
"name": instrument_name,
"type": "imager",
"band": "Optical",
"telescope_id": telescope_id,
"filters": ["ztfg"],
"api_classname": "SEDMAPI",
},
token=token,
)
assert status == 200
assert data["status"] == "success"
return data["data"]
def add_allocation(instrument_id, group_id, token):
status, data = api(
"POST",
"allocation",
data={
"group_id": group_id,
"instrument_id": instrument_id,
"hours_allocated": 100,
"pi": "Ed Hubble",
},
token=token,
)
assert status == 200
assert data["status"] == "success"
return data["data"]
def add_followup_request_using_frontend_and_verify(
driver, super_admin_user, public_source, super_admin_token, public_group
):
"""Adds a new followup request and makes sure it renders properly."""
idata = add_telescope_and_instrument("SEDM", super_admin_token)
add_allocation(idata['id'], public_group.id, super_admin_token)
driver.get(f"/become_user/{super_admin_user.id}")
driver.get(f"/source/{public_source.id}")
# wait for the plots to load
driver.wait_for_xpath('//div[@class="bk-root"]//span[text()="Flux"]', timeout=20)
# this waits for the spectroscopy plot by looking for the element Mg
driver.wait_for_xpath('//div[@class="bk-root"]//label[text()="Mg"]', timeout=20)
submit_button = driver.wait_for_xpath(
'//form[@class="rjsf"]//button[@type="submit"]'
)
mode_select = driver.wait_for_xpath('//*[@id="root_observation_type"]')
driver.scroll_to_element(mode_select)
ActionChains(driver).move_to_element(mode_select).pause(1).click().perform()
mix_n_match_option = driver.wait_for_xpath('''//li[@data-value="Mix 'n Match"]''')
driver.scroll_to_element_and_click(mix_n_match_option)
u_band_option = driver.wait_for_xpath('//input[@id="root_observation_choices_0"]')
driver.scroll_to_element_and_click(u_band_option)
ifu_option = driver.wait_for_xpath('//input[@id="root_observation_choices_4"]')
driver.scroll_to_element_and_click(ifu_option)
driver.scroll_to_element_and_click(submit_button)
driver.wait_for_xpath(
f'//table[contains(@data-testid, "followupRequestTable")]//td[contains(., "Mix \'n Match")]'
)
driver.wait_for_xpath(
f'''//table[contains(@data-testid, "followupRequestTable")]//td[contains(., "u,IFU")]'''
)
driver.wait_for_xpath(
f'''//table[contains(@data-testid, "followupRequestTable")]//td[contains(., "1")]'''
)
driver.wait_for_xpath(
f'''//table[contains(@data-testid, "followupRequestTable")]//td[contains(., "submitted")]'''
)
@pytest.mark.flaky(reruns=2)
@pytest.mark.skipif(not sedm_isonline, reason="SEDM server down")
def test_submit_new_followup_request(
driver, super_admin_user, public_source, super_admin_token, public_group
):
add_followup_request_using_frontend_and_verify(
driver, super_admin_user, public_source, super_admin_token, public_group
)
@pytest.mark.flaky(reruns=2)
@pytest.mark.skipif(not sedm_isonline, reason="SEDM server down")
def test_edit_existing_followup_request(
driver, super_admin_user, public_source, super_admin_token, public_group
):
add_followup_request_using_frontend_and_verify(
driver, super_admin_user, public_source, super_admin_token, public_group
)
edit_button = driver.wait_for_xpath(f'//button[contains(@name, "editRequest")]')
driver.scroll_to_element_and_click(edit_button)
mode_select = driver.wait_for_xpath(
'//div[@role="dialog"]//div[@id="root_observation_type"]'
)
ActionChains(driver).move_to_element(mode_select).pause(1).click().perform()
mix_n_match_option = driver.wait_for_xpath('''//li[@data-value="IFU"]''')
driver.scroll_to_element_and_click(mix_n_match_option)
submit_button = driver.wait_for_xpath(
'//form[@class="rjsf"]//button[@type="submit"]'
)
driver.scroll_to_element_and_click(submit_button)
driver.wait_for_xpath(
'//table[contains(@data-testid, "followupRequestTable")]//td[contains(., "IFU")]'
)
driver.wait_for_xpath(
'''//table[contains(@data-testid, "followupRequestTable")]//td[contains(., "1")]'''
)
driver.wait_for_xpath(
'''//table[contains(@data-testid, "followupRequestTable")]//td[contains(., "submitted")]'''
)
@pytest.mark.flaky(reruns=2)
@pytest.mark.skipif(not sedm_isonline, reason='SEDM server down')
def test_delete_followup_request(
driver, super_admin_user, public_source, super_admin_token, public_group
):
add_followup_request_using_frontend_and_verify(
driver, super_admin_user, public_source, super_admin_token, public_group
)
delete_button = driver.wait_for_xpath(f'//button[contains(@name, "deleteRequest")]')
driver.scroll_to_element_and_click(delete_button)
driver.wait_for_xpath_to_disappear(
'''//table[contains(@data-testid, "followupRequestTable")]//td[contains(., "u,IFU")]'''
)
driver.wait_for_xpath_to_disappear(
'''//table[contains(@data-testid, "followupRequestTable")]//td[contains(., "1")]'''
)
driver.wait_for_xpath_to_disappear(
'''//table[contains(@data-testid, "followupRequestTable")]//td[contains(., "submitted")]'''
)
| StarcoderdataPython |
3389384 | <reponame>neelmraman/defopt
from pathlib import Path
import nox
from nox import Session, session
python_versions = ['3.5', '3.6', '3.7', '3.8', '3.9', '3.10']
nox.options.sessions = ['tests', 'docs']
nox.options.reuse_existing_virtualenvs = True
@session(python=python_versions)
@nox.parametrize('old', [False, True])
def tests(session: Session, old: bool) -> None:
"""Run the tests."""
args = session.posargs or ['--buffer']
session.install('--upgrade', 'pip', 'setuptools', 'wheel', 'coverage')
session.install('-e', '.')
if old:
# Oldest supported versions
session.install('docutils==0.12', 'sphinxcontrib-napoleon==0.7.0')
if session.python in ['3.5', '3.6', '3.7']:
session.install(
'typing_extensions==3.7.4', 'typing_inspect==0.5.0'
)
coverage_file = f'.coverage.{session.python}.oldest'
else:
coverage_file = f'.coverage.{session.python}'
try:
session.run(
'coverage',
'run',
'--module',
'unittest',
*args,
env={'COVERAGE_FILE': coverage_file},
)
finally:
if session.interactive:
session.notify('coverage', posargs=[])
@session
def coverage(session: Session) -> None:
"""Produce the coverage report."""
args = session.posargs or ['report', '--show-missing']
session.install('coverage')
if not session.posargs and any(Path().glob('.coverage.*')):
session.run('coverage', 'combine')
session.run('coverage', *args)
@session
def docs(session: Session) -> None:
"""Produce the coverage report."""
args = session.posargs or ['-b', 'html', 'doc/source', 'doc/build']
session.install('-r', 'doc/requirements.txt')
session.install('-e', '.')
session.run('sphinx-build', *args)
| StarcoderdataPython |
3474237 | import numpy as np
from apvisitproc import apVisit2input as ap
import os
DATAPATH = os.path.dirname(__file__)
KIC = 'testKIC'
FITSFILEPATH = os.path.join(DATAPATH, 'apVisit-r5-7125-56557-285.fits')
# One set of locID, mjd, and fiberID that exist in the test Visitlist
locID = 5215
mjd = 55840
fiberID = 277
def test_load_allvisitinfo():
'''
Test loading locID, mjd, and fiberID values from a test Visitlist file
'''
locIDs, mjds, fiberIDs = ap.load_allvisitinfo(DATAPATH, KIC)
assert len(locIDs) > 0
assert len(locIDs) == len(mjds)
assert len(mjds) == len(fiberIDs)
assert float(locID) in locIDs
assert float(mjd) in mjds
assert float(fiberID) in fiberIDs
def test_load_apVisit():
'''
Test reading data from an apVisit file on disk (uses apogee module tools)
'''
result = ap.load_apVisit(DATAPATH, KIC, locID, mjd, fiberID)
# result contains: fitsfilepath, specfileout, wave, flux, fluxerr
assert os.path.isfile(result[0])
wave = result[2]
flux = result[3]
fluxerr = result[4]
assert len(wave) > 0
assert len(wave) == len(flux)
assert len(flux) == len(fluxerr)
def test_normalize_spec():
'''
Test spectrum normalization for one spectrum
'''
result = ap.load_apVisit(DATAPATH, KIC, locID, mjd, fiberID)
wave = result[2]
flux = result[3]
fluxerr = result[4]
specnorm, specnormerr = ap.normalize_spec(wave, flux, fluxerr, plot=False)
assert len(specnorm) == len(specnormerr)
assert len(specnorm) == len(wave)
# The median of the normalized fluxes should be closer to 1 than before
assert np.abs(1 - np.median(specnorm)) < np.abs(1 - np.median(flux))
# The median of the normalized fluxes should be more similar at the beginning
# and end of the wavelength range than the original fluxes
assert ((np.median(specnorm[0:50]) - np.median(specnorm[-50:-1])) <
(np.median(flux[0:50]) - np.median(flux[-50:-1])))
def test_make_BFinfile():
'''
Test that HJD and BCV header values can be accessed and contain
the expected values returned by fitsheader (plus 2400000 for HJD)
'''
HJD, BCV = ap.make_BFinfile(FITSFILEPATH)
assert HJD == 2456557.7326138
assert BCV == -10.9548025967
| StarcoderdataPython |
3495622 | <reponame>0mza987/azureml-examples
# description: deploy sklearn ridge model trained on diabetes data to AKS
# imports
import json
import time
import mlflow
import mlflow.azureml
import requests
import pandas as pd
from random import randint
from pathlib import Path
from azureml.core import Workspace
from azureml.core.webservice import AksWebservice
# get workspace
ws = Workspace.from_config()
# get root of git repo
prefix = Path(__file__).parent
# azure ml settings
experiment_name = "sklearn-diabetes-example"
# setup mlflow tracking
mlflow.set_tracking_uri(ws.get_mlflow_tracking_uri())
mlflow.set_experiment(experiment_name)
# get latest completed run of the training
runs_df = mlflow.search_runs()
runs_df = runs_df.loc[runs_df["status"] == "FINISHED"]
runs_df = runs_df.sort_values(by="end_time", ascending=False)
print(runs_df.head())
run_id = runs_df.at[0, "run_id"]
# create deployment configuration
aks_config = AksWebservice.deploy_configuration(
compute_target_name="aks-cpu-deploy",
cpu_cores=2,
memory_gb=5,
tags={"data": "diabetes", "method": "sklearn"},
description="Predict using webservice",
)
# create webservice
webservice, azure_model = mlflow.azureml.deploy(
model_uri=f"runs:/{run_id}/model",
workspace=ws,
deployment_config=aks_config,
service_name="sklearn-diabetes-" + str(randint(10000, 99999)),
model_name=experiment_name,
)
# test webservice
data = pd.read_csv(prefix.joinpath("data", "diabetes", "diabetes.csv"))
sample = data.drop(["progression"], axis=1).iloc[[0]]
query_input = sample.to_json(orient="split")
query_input = eval(query_input)
query_input.pop("index", None)
# if (key) auth is enabled, retrieve the API keys. AML generates two keys.
key1, Key2 = webservice.get_keys()
# # if token auth is enabled, retrieve the token.
# access_token, refresh_after = webservice.get_token()
# If (key) auth is enabled, don't forget to add key to the HTTP header.
headers = {
"Content-Type": "application/json",
"Authorization": "Bearer " + key1,
}
# # If token auth is enabled, don't forget to add token to the HTTP header.
# headers = {'Content-Type':'application/json', 'Authorization': 'Bearer ' + access_token}
response = requests.post(
url=webservice.scoring_uri, data=json.dumps(query_input), headers=headers
)
print(response.text)
# delete webservice
time.sleep(5)
webservice.delete()
| StarcoderdataPython |
68754 | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Matgenix SRL, All rights reserved.
# Distributed open source for academic and non-profit users.
# Contact Matgenix for commercial usage.
# See LICENSE file for details.
"""Module containing custodian validators for SISSO."""
import os
from custodian.custodian import Validator # type: ignore
class NormalCompletionValidator(Validator):
"""Validator of the normal completion of SISSO."""
def __init__(
self,
output_file: str = "SISSO.out",
stdout_file: str = "SISSO.log",
stderr_file: str = "SISSO.err",
):
"""Construct NormalCompletionValidator class.
This validator checks that the standard error file (SISSO.err by default) is
empty, that the standard output file is not empty and that the output file
(SISSO.out) is completed, i.e. ends with "Have a nice day !"
Args:
output_file: Name of the output file (default: SISSO.log).
stdout_file: Name of the standard output file (default: SISSO.log).
stderr_file: Name of the standard error file (default: SISSO.err).
"""
self.output_file = output_file
self.stdout_file = stdout_file
self.stderr_file = stderr_file
def check(self) -> bool:
"""Validate the normal completion of SISSO.
Returns:
bool: True if the standard error file is empty, the standard output file
is not empty and the output file ends with "Have a nice day !".
"""
if not os.path.isfile(self.output_file):
return True
if not os.path.isfile(self.stdout_file):
return True
if os.stat(self.stdout_file).st_size == 0:
return True
if os.path.isfile(self.stderr_file):
if os.stat(self.stderr_file).st_size != 0:
return True
with open(self.output_file, "rb") as f:
out = f.read()
return out.rfind(b"Have a nice day !") < 0
| StarcoderdataPython |
1821064 | <filename>build.py
import logging
import sys
from mkdocs.config import load_config
from mkdocs.commands import build
if __name__ == "__main__":
logging.basicConfig(
stream=sys.stdout,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=0,
)
build.build(load_config())
| StarcoderdataPython |
139621 | <reponame>luigiberrettini/build-deploy-stats<gh_stars>1-10
#!/usr/bin/env python3
from dateutil import parser
from statsSend.session import Session
from statsSend.utils import print_exception
from statsSend.urlBuilder import UrlBuilder
from statsSend.teamCity.teamCityProject import TeamCityProject
class TeamCityStatisticsSender:
def __init__(self, settings, reporter):
page_size = int(settings['page_size'])
url_builder = UrlBuilder(settings['server_url'], settings['api_url_prefix'], '', page_size)
headers = { 'Accept': 'application/json'}
user = settings['user']
password = settings['password']
self.session_factory = lambda: Session(url_builder, headers, user, password)
self.project_id = settings['project_id']
self.since_timestamp = parser.parse(settings['since_timestamp']).strftime('%Y%m%dT%H%M%S%z')
self.reporter = reporter
async def send(self):
if ("report_categories" in dir(self.reporter)):
async with self.session_factory() as session:
try:
project = TeamCityProject(session, self.project_id)
categories = [build_configuration.to_category() async for build_configuration in project.retrieve_build_configurations()]
self.reporter.report_categories(categories)
except Exception as err:
print_exception('Error sending categories')
async with self.session_factory() as session:
try:
project = TeamCityProject(session, self.project_id)
async for build_configuration in project.retrieve_build_configurations():
async for build_run in build_configuration.retrieve_build_runs_since_timestamp(self.since_timestamp):
try:
activity = build_run.to_activity()
self.reporter.report_activity(activity)
except Exception as err:
print_exception('Error reporting activity')
except Exception as err:
print_exception('Error reporting activities') | StarcoderdataPython |
1795633 | <gh_stars>10-100
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name:normalizedFunc
Description : 实现几种常用的标准化函数
Email : <EMAIL>
Date:2017/12/10
"""
import numpy as np
# min-max 标准化
# 按列
def min_max_normalized(data):
"""标准化
最大最小值标准化
Parameters
----------
:param data: numpy array
待标准化的数据
Returns
-------
:return res:
标准化结果
Examples
--------
>>> X = np.array([-2, -3, 0, -1, 2])
>>> res = min_max_normalized(X)
>>> print(res)
[ 0.2 0. 0.6 0.4 1. ]
"""
col_max = np.max(data, axis=0)
col_min = np.min(data, axis=0)
return np.divide(data - col_min, col_max - col_min)
| StarcoderdataPython |
11201193 | #coding: utf-8
from __future__ import absolute_import, division, print_function
try:
import numexpr
except:
pass
import numpy as np
import pyoperators as po
from .core import (
BlockColumnOperator, CompositionOperator, ConstantOperator, DiagonalBase,
IdentityOperator, MultiplicationOperator, Operator, ReductionOperator)
from .flags import (
idempotent, inplace, real, separable, square, update_output)
from .utils import (
operation_assignment, operation_symbol, pi, strenum, tointtuple)
from .utils.ufuncs import hard_thresholding, soft_thresholding
__all__ = ['Cartesian2SphericalOperator',
'ClipOperator',
'HardThresholdingOperator',
'MaxOperator',
'MinOperator',
'MinMaxOperator',
'MaximumOperator',
'MinimumOperator',
'NormalizeOperator',
'NumexprOperator',
'PowerOperator',
'ProductOperator',
'ReciprocalOperator',
'RoundOperator',
'SoftThresholdingOperator',
'Spherical2CartesianOperator',
'SqrtOperator',
'SquareOperator',
'To1dOperator',
'ToNdOperator']
@real
class _CartesianSpherical(Operator):
CONVENTIONS = ('zenith,azimuth',
'azimuth,zenith',
'elevation,azimuth',
'azimuth,elevation')
def __init__(self, convention, dtype=float, **keywords):
if not isinstance(convention, str):
raise TypeError("The input convention '{0}' is not a string.".
format(convention))
convention_ = convention.replace(' ', '').lower()
if convention_ not in self.CONVENTIONS:
raise ValueError(
"Invalid spherical convention '{0}'. Expected values are {1}.".
format(convention, strenum(self.CONVENTIONS)))
self.convention = convention_
Operator.__init__(self, dtype=dtype, **keywords)
@staticmethod
def _reshapecartesian(shape):
return shape[:-1] + (2,)
@staticmethod
def _reshapespherical(shape):
return shape[:-1] + (3,)
@staticmethod
def _validatecartesian(shape):
if len(shape) == 0 or shape[-1] != 3:
raise ValueError('Invalid cartesian shape.')
@staticmethod
def _validatespherical(shape):
if len(shape) == 0 or shape[-1] != 2:
raise ValueError('Invalid spherical shape.')
@staticmethod
def _rule_identity(s, o):
if s.convention == o.convention:
return IdentityOperator()
class Cartesian2SphericalOperator(_CartesianSpherical):
"""
Convert cartesian unit vectors into spherical coordinates in radians
or degrees.
The spherical coordinate system is defined by:
- the zenith direction of coordinate (0, 0, 1)
- the azimuthal reference of coordinate (1, 0, 0)
- the azimuth signedness: it is counted positively from the X axis
to the Y axis.
The last dimension of the operator's output is 2 and it encodes
the two spherical angles. Four conventions define what these angles are:
- 'zenith,azimuth': (theta, phi) angles commonly used
in physics or the (colatitude, longitude) angles used
in the celestial and geographical coordinate systems
- 'azimuth,zenith': (longitude, colatitude) convention
- 'elevation,azimuth: (latitude, longitude) convention
- 'azimuth,elevation': (longitude, latitude) convention
"""
def __init__(self, convention, degrees=False, **keywords):
"""
convention : string
One of the following spherical coordinate conventions:
'zenith,azimuth', 'azimuth,zenith', 'elevation,azimuth' and
'azimuth,elevation'.
degrees : boolean, optional
If true, the spherical coordinates are returned in degrees.
"""
if degrees:
self.__class__ = CompositionOperator
self.__init__(
[po.linear.DegreesOperator(),
Cartesian2SphericalOperator(convention, **keywords)])
return
self.degrees = False
_CartesianSpherical.__init__(
self, convention,
reshapein=self._reshapecartesian,
reshapeout=self._reshapespherical,
validatein=self._validatecartesian,
validateout=self._validatespherical,
**keywords)
self.set_rule('I',
lambda s: Spherical2CartesianOperator(s.convention))
self.set_rule(('.', Spherical2CartesianOperator), self._rule_identity,
CompositionOperator)
def direct(self, input, output):
if self.convention.startswith('azimuth'):
o1, o2 = output[..., 1], output[..., 0]
else:
o1, o2 = output[..., 0], output[..., 1]
np.arccos(input[..., 2], o1)
if 'elevation' in self.convention:
np.subtract(pi(self.dtype) / 2, o1, o1)
np.arctan2(input[..., 1], input[..., 0], o2)
if o2.ndim > 0:
o2[o2 < 0] += 2 * pi(self.dtype)
elif o2 < 0:
o2 += 2 * pi(self.dtype)
class Spherical2CartesianOperator(_CartesianSpherical):
"""
Convert spherical coordinates in radians or degrees into unit cartesian
vectors.
The spherical coordinate system is defined by:
- the zenith direction of coordinate (0, 0, 1)
- the azimuthal reference of coordinate (1, 0, 0)
- the azimuth signedness: it is counted positively from the X axis
to the Y axis.
The last dimension of the operator's input is 2 and it encodes
the two spherical angles. Four conventions define what these angles are:
- 'zenith,azimuth': (theta, phi) angles commonly used
in physics or the (colatitude, longitude) angles used
in the celestial and geographical coordinate systems
- 'azimuth,zenith': (longitude, colatitude) convention
- 'elevation,azimuth: (latitude, longitude) convention
- 'azimuth,elevation': (longitude, latitude) convention
"""
def __init__(self, convention, degrees=False, **keywords):
"""
convention : string
One of the following spherical coordinate conventions:
'zenith,azimuth', 'azimuth,zenith', 'elevation,azimuth' and
'azimuth,elevation'.
degrees : boolean, optional
If true, the input spherical coordinates are assumed to be in
degrees.
"""
if degrees:
self.__class__ = CompositionOperator
self.__init__(
[Spherical2CartesianOperator(convention, **keywords),
po.linear.RadiansOperator()])
return
self.degrees = False
_CartesianSpherical.__init__(
self, convention,
reshapein=self._reshapespherical,
reshapeout=self._reshapecartesian,
validatein=self._validatespherical,
validateout=self._validatecartesian,
**keywords)
self.set_rule('I',
lambda s: Cartesian2SphericalOperator(s.convention))
self.set_rule(('.', Cartesian2SphericalOperator), self._rule_identity,
CompositionOperator)
def direct(self, input, output):
if self.convention.startswith('azimuth'):
theta, phi = input[..., 1], input[..., 0]
else:
theta, phi = input[..., 0], input[..., 1]
if 'elevation' in self.convention:
theta = 0.5 * pi(self.dtype) - theta
sintheta = np.sin(theta)
np.multiply(sintheta, np.cos(phi), output[..., 0])
np.multiply(sintheta, np.sin(phi), output[..., 1])
np.cos(theta, output[..., 2])
@square
@inplace
@separable
class ClipOperator(Operator):
"""
Clip (limit) the values in an array.
Given an interval, values outside the interval are clipped to
the interval edges. For example, if an interval of ``[0, 1]``
is specified, values smaller than 0 become 0, and values larger
than 1 become 1.
Arguments
---------
minvalue: scalar or array_like
The minimum limit below which all input values are set to vmin.
maxvalue: scalar or array_like
The maximum limit above which all input values are set to vmax.
Exemples
--------
>>> C = ClipOperator(0, 1)
>>> x = np.linspace(-2, 2, 5)
>>> x
array([-2., -1., 0., 1., 2.])
>>> C(x)
array([ 0., 0., 0., 1., 1.])
See also
--------
MaximumOperator, MinimumOperator, np.clip
"""
def __init__(self, minvalue, maxvalue, **keywords):
self.minvalue = np.asarray(minvalue)
self.maxvalue = np.asarray(maxvalue)
Operator.__init__(self, **keywords)
def direct(self, input, output):
np.clip(input, self.minvalue, self.maxvalue, out=output)
@property
def nbytes(self):
return self.minvalue.nbytes + self.maxvalue.nbytes
def __str__(self):
return 'clip(..., {0}, {1})'.format(self.minvalue, self.maxvalue)
@square
@inplace
@separable
class PowerOperator(Operator):
'X -> X**n'
def __init__(self, n, dtype=float, **keywords):
if np.allclose(n, -1) and not isinstance(self, ReciprocalOperator):
self.__class__ = ReciprocalOperator
self.__init__(dtype=dtype, **keywords)
return
if n == 0:
self.__class__ = ConstantOperator
self.__init__(1, dtype=dtype, **keywords)
return
if np.allclose(n, 0.5) and not isinstance(self, SqrtOperator):
self.__class__ = SqrtOperator
self.__init__(dtype=dtype, **keywords)
return
if np.allclose(n, 1):
self.__class__ = IdentityOperator
self.__init__(**keywords)
return
if np.allclose(n, 2) and not isinstance(self, SquareOperator):
self.__class__ = SquareOperator
self.__init__(dtype=dtype, **keywords)
return
self.n = n
Operator.__init__(self, dtype=dtype, **keywords)
self.set_rule('I', lambda s: PowerOperator(1/s.n))
self.set_rule(('.', PowerOperator),
lambda s, o: PowerOperator(s.n * o.n),
CompositionOperator)
self.set_rule(('.', PowerOperator),
lambda s, o: PowerOperator(s.n + o.n),
MultiplicationOperator)
self.set_rule(('.', DiagonalBase),
lambda s, o: MultiplicationOperator(
[ConstantOperator(o.get_data(),
broadcast=o.broadcast),
PowerOperator(s.n + 1)]),
MultiplicationOperator)
def direct(self, input, output):
np.power(input, self.n, output)
@property
def nbytes(self):
return self.n.nbytes
def __str__(self):
return '...**{0}'.format(self.n)
class ReciprocalOperator(PowerOperator):
'X -> 1 / X'
def __init__(self, **keywords):
PowerOperator.__init__(self, -1, **keywords)
def direct(self, input, output):
np.reciprocal(input, output)
def __str__(self):
return '1/...'
class SqrtOperator(PowerOperator):
'X -> sqrt(X)'
def __init__(self, **keywords):
PowerOperator.__init__(self, 0.5, **keywords)
def direct(self, input, output):
np.sqrt(input, output)
class SquareOperator(PowerOperator):
'X -> X**2'
def __init__(self, **keywords):
PowerOperator.__init__(self, 2, **keywords)
def direct(self, input, output):
np.square(input, output)
def __str__(self):
return u'...²'.encode('utf-8')
class ProductOperator(ReductionOperator):
"""
Product-along-axis operator.
Parameters
----------
axis : integer, optional
Axis along which the reduction is performed. If None, all dimensions
are collapsed.
dtype : dtype, optional
Reduction data type.
skipna : boolean, optional
If this is set to True, the reduction is done as if any NA elements
were not counted in the array. The default, False, causes the NA values
to propagate, so if any element in a set of elements being reduced is
NA, the result will be NA.
Example
-------
>>> op = ProductOperator()
>>> op([1,2,3])
array(6)
"""
def __init__(self, axis=None, dtype=None, skipna=True, **keywords):
ReductionOperator.__init__(self, np.multiply, axis=axis, dtype=dtype,
skipna=skipna, **keywords)
def __str__(self):
return 'product' if self.axis is None \
else 'product(..., axis={0})'.format(self.axis)
class MaxOperator(ReductionOperator):
"""
Max-along-axis operator.
Parameters
----------
axis : integer, optional
Axis along which the reduction is performed. If None, all dimensions
are collapsed.
dtype : dtype, optional
Reduction data type.
skipna : boolean, optional
If this is set to True, the reduction is done as if any NA elements
were not counted in the array. The default, False, causes the NA values
to propagate, so if any element in a set of elements being reduced is
NA, the result will be NA.
Example
-------
>>> op = MaxOperator()
>>> op([1,2,3])
array(3)
"""
def __init__(self, axis=None, dtype=None, skipna=False, **keywords):
if np.__version__ < '2':
func = np.nanmax if skipna else np.max
else:
func = np.max
ReductionOperator.__init__(self, func, axis=axis, dtype=dtype,
skipna=skipna, **keywords)
def __str__(self):
return 'max' if self.axis is None \
else 'max(..., axis={0})'.format(self.axis)
class MinOperator(ReductionOperator):
"""
Min-along-axis operator.
Parameters
----------
axis : integer, optional
Axis along which the reduction is performed. If None, all dimensions
are collapsed.
dtype : dtype, optional
Reduction data type.
skipna : boolean, optional
If this is set to True, the reduction is done as if any NA elements
were not counted in the array. The default, False, causes the NA values
to propagate, so if any element in a set of elements being reduced is
NA, the result will be NA.
Example
-------
>>> op = MinOperator()
>>> op([1,2,3])
array(1)
"""
def __init__(self, axis=None, dtype=None, skipna=False, **keywords):
if np.__version__ < '2':
func = np.nanmin if skipna else np.min
else:
func = np.min
ReductionOperator.__init__(self, func, axis=axis, dtype=dtype,
skipna=skipna, **keywords)
def __str__(self):
return 'min' if self.axis is None \
else 'min(..., axis={0})'.format(self.axis)
class MinMaxOperator(BlockColumnOperator):
"""
MinMax-along-axis operator.
Parameters
----------
axis : integer, optional
Axis along which the reduction is performed. If None, all dimensions
are collapsed.
new_axisout : integer, optional
Axis in which the minimum and maximum values are set.
dtype : dtype, optional
Operator data type.
skipna : boolean, optional
If this is set to True, the reduction is done as if any NA elements
were not counted in the array. The default, False, causes the NA values
to propagate, so if any element in a set of elements being reduced is
NA, the result will be NA.
Example
-------
>>> op = MinMaxOperator()
>>> op([1,2,3])
array([1, 3])
>>> op = MinMaxOperator(axis=0, new_axisout=0)
>>> op([[1,2,3],[2,1,4],[0,1,8]])
array([[0, 1, 3],
[2, 2, 8]])
"""
def __init__(self, axis=None, dtype=None, skipna=False, new_axisout=-1,
**keywords):
operands = [MinOperator(axis=axis, dtype=dtype, skipna=skipna),
MaxOperator(axis=axis, dtype=dtype, skipna=skipna)]
BlockColumnOperator.__init__(self, operands, new_axisout=new_axisout,
**keywords)
def __str__(self):
return 'minmax' if self.axis is None \
else 'minmax(..., axis={0})'.format(self.axis)
@square
@inplace
@separable
class MaximumOperator(Operator):
"""
Set all input array values above a given value to this value.
Arguments
---------
value: scalar or array_like
Threshold value to which the input array is compared.
Exemple
-------
>>> M = MaximumOperator(1)
>>> x = np.linspace(-2, 2, 5)
>>> x
array([-2., -1., 0., 1., 2.])
>>> M(x)
array([ 1., 1., 1., 1., 2.])
See also
--------
ClipOperator, MinimumOperator, np.maximum
"""
def __init__(self, value, **keywords):
self.value = np.asarray(value)
Operator.__init__(self, **keywords)
def direct(self, input, output):
np.maximum(input, self.value, output)
@property
def nbytes(self):
return self.value.nbytes
def __str__(self):
return 'maximum(..., {0})'.format(self.value)
@square
@inplace
@separable
class MinimumOperator(Operator):
"""
Set all input array values above a given value to this value.
Arguments
---------
value: scalar, broadcastable array
The value to which the input array is compared.
Exemple
-------
>>> M = MinimumOperator(1)
>>> x = np.linspace(-2, 2, 5)
>>> x
array([-2., -1., 0., 1., 2.])
>>> M(x)
array([-2., -1., 0., 1., 1.])
See also
--------
ClipOperator, MaximumOperator, np.minimum
"""
def __init__(self, value, **keywords):
self.value = np.asarray(value)
Operator.__init__(self, **keywords)
def direct(self, input, output):
np.minimum(input, self.value, output)
@property
def nbytes(self):
return self.value.nbytes
def __str__(self):
return 'minimum(..., {0})'.format(self.value)
@square
@inplace
class NormalizeOperator(Operator):
"""
Normalize a cartesian vector.
Example
-------
>>> n = NormalizeOperator()
>>> n([1, 1])
array([ 0.70710678, 0.70710678])
"""
def __init__(self, dtype=float, **keywords):
Operator.__init__(self, dtype=dtype, **keywords)
def direct(self, input, output):
np.divide(input, np.sqrt(np.sum(input**2, axis=-1))[..., None], output)
@square
@inplace
@update_output
class NumexprOperator(Operator):
"""
Return an operator evaluating an expression using numexpr.
Parameters
----------
expr : string
The numexp expression to be evaluated. It must contain the 'input'
variable name.
global_dict : dict
A dictionary of global variables that are passed to numexpr's
'evaluate' method.
Example
-------
>>> k = 1.2
>>> op = NumexprOperator('exp(input+k)', {'k':k})
>>> print(op(1) == np.exp(2.2))
True
"""
def __init__(self, expr, global_dict=None, dtype=float, **keywords):
import numexpr
self.expr = expr
self.global_dict = global_dict
if numexpr.__version__ < '2.1':
keywords['flags'] = self.validate_flags(
keywords.get('flags', {}), update_output=False)
Operator.__init__(self, dtype=dtype, **keywords)
def direct(self, input, output, operation=operation_assignment):
if operation is operation_assignment:
expr = self.expr
else:
op = operation_symbol[operation]
expr = 'output' + op + '(' + self.expr + ')'
numexpr.evaluate(expr, global_dict=self.global_dict, out=output)
@property
def nbytes(self):
if self.global_dict is None:
return 0
return np.sum(v.nbytes for v in self.global_dict.values()
if hasattr(v, 'nbytes'))
def __str__(self):
return 'numexpr({0}, ...)'.format(self.expr)
@square
@idempotent
@inplace
@separable
class RoundOperator(Operator):
"""
Rounding operator.
The rounding method may be one of the following:
- rtz : round towards zero (truncation)
- rti : round towards infinity (Not implemented)
- rtmi : round towards minus infinity (floor)
- rtpi : round towards positive infinity (ceil)
- rhtz : round half towards zero (Not implemented)
- rhti : round half towards infinity (Fortran's nint)
- rhtmi : round half towards minus infinity
- rhtpi : round half towards positive infinity
- rhte : round half to even (numpy's round),
- rhto : round half to odd
- rhs : round half stochastically (Not implemented)
"""
def __init__(self, method='rhte', **keywords):
method = method.lower()
table = {'rtz': np.trunc,
#'rti'
'rtmi': np.floor,
'rtpi': np.ceil,
#'rhtz'
#'rhti'
'rhtmi': self._direct_rhtmi,
'rhtpi': self._direct_rhtpi,
'rhte': lambda i, o: np.round(i, 0, o),
#'rhs'
}
if method not in table:
raise ValueError(
'Invalid rounding method. Expected values are {0}.'.format(
strenum(table.keys())))
Operator.__init__(self, table[method], **keywords)
self.method = method
@staticmethod
def _direct_rhtmi(input, output):
""" Round half to -inf. """
np.add(input, 0.5, output)
np.ceil(output, output)
np.add(output, -1, output)
@staticmethod
def _direct_rhtpi(input, output):
""" Round half to +inf. """
np.add(input, -0.5, output)
np.floor(output, output)
np.add(output, 1, output)
def __str__(self):
method = self.method[1:]
if method == 'rmi':
method = 'floor'
elif method == 'tpi':
method = 'ceil'
elif method == 'tz':
method = 'trunc'
return 'round_{0}'.format(method)
@square
@idempotent
@inplace
@separable
class HardThresholdingOperator(Operator):
"""
Hard thresholding operator.
Ha(x) = x if |x| > a,
0 otherwise.
Parameter
---------
a : positive float or array
The hard threshold.
"""
def __init__(self, a, **keywords):
a = np.asarray(a)
if np.any(a < 0):
raise ValueError('Negative hard threshold.')
if a.ndim > 0:
keywords['shapein'] = a.shape
if 'dtype' not in keywords:
keywords['dtype'] = float
if np.all(a == 0):
self.__class__ = IdentityOperator
self.__init__(**keywords)
return
Operator.__init__(self, **keywords)
self.a = a
self.set_rule(('.', HardThresholdingOperator), lambda s, o:
HardThresholdingOperator(np.maximum(s.a, o.a)),
CompositionOperator)
def direct(self, input, output):
hard_thresholding(input, self.a, output)
@property
def nbytes(self):
return self.a.nbytes
def __str__(self):
return 'hardthreshold(..., {0})'.format(self.a)
@square
@inplace
@separable
class SoftThresholdingOperator(Operator):
"""
Soft thresholding operator.
Sa(x) = sign(x) [|x| - a]+
Parameter
---------
a : positive float or array
The soft threshold.
"""
def __init__(self, a, **keywords):
a = np.asarray(a)
if np.any(a < 0):
raise ValueError('Negative soft threshold.')
if a.ndim > 0:
keywords['shapein'] = a.shape
if 'dtype' not in keywords:
keywords['dtype'] = float
if np.all(a == 0):
self.__class__ = IdentityOperator
self.__init__(**keywords)
return
Operator.__init__(self, **keywords)
self.a = a
def direct(self, input, output):
soft_thresholding(input, self.a, output)
@property
def nbytes(self):
return self.a.nbytes
def __str__(self):
return 'softthreshold(..., {0})'.format(self.a)
@separable
class _1dNdOperator(Operator):
""" Base class for 1d-Nd coordinate mappings. """
def __init__(self, shape_, order='C', **keywords):
shape_ = tointtuple(shape_)
ndim = len(shape_)
if ndim == 1:
raise NotImplementedError('ndim == 1 is not implemented.')
if order.upper() not in ('C', 'F'):
raise ValueError("Invalid order '{0}'. Expected order is 'C' or 'F"
"'".format(order))
order = order.upper()
Operator.__init__(self, **keywords)
self.shape_ = shape_
self.order = order
self.ndim = ndim
if order == 'C':
self.coefs = np.cumproduct((1,) + shape_[:0:-1])[::-1]
elif order == 'F':
self.coefs = np.cumproduct((1,) + shape_[:-1])
def _reshape_to1d(self, shape):
return shape[:-1]
def _reshape_tond(self, shape):
return shape + (self.ndim,)
def _validate_to1d(self, shape):
if shape[-1] != self.ndim:
raise ValueError("Invalid shape '{0}'. The expected last dimension"
" is '{1}'.".format(shape, self.ndim))
class To1dOperator(_1dNdOperator):
"""
Convert an N-dimensional indexing to a 1-dimensional indexing.
C order:
------------------------- -------------
| (0,0) | (0,1) | (0,2) | | 0 | 1 | 2 |
------------------------- => -------------
| (1,0) | (1,1) | (1,2) | | 3 | 4 | 5 |
------------------------- -------------
Fortan order:
------------------------- -------------
| (0,0) | (0,1) | (0,2) | | 0 | 2 | 4 |
------------------------- => -------------
| (1,0) | (1,1) | (1,2) | | 1 | 3 | 5 |
------------------------- -------------
Parameters
----------
shape : tuple of int
The shape of the array whose element' multi-dimensional coordinates
will be converted into 1-d coordinates.
order : str
'C' for row-major and 'F' for column-major 1-d indexing.
"""
def __init__(self, shape_, order='C', **keywords):
if 'reshapein' not in keywords:
keywords['reshapein'] = self._reshape_to1d
if 'reshapeout' not in keywords:
keywords['reshapeout'] = self._reshape_tond
if 'validatein' not in keywords:
keywords['validatein'] = self._validate_to1d
_1dNdOperator.__init__(self, shape_, order=order, **keywords)
self.set_rule('I', lambda s: ToNdOperator(s.shape_, order=s.order))
def direct(self, input, output):
np.dot(input, self.coefs, out=output)
class ToNdOperator(_1dNdOperator):
"""
Convert a 1-dimensional indexing to an N-dimensional indexing.
C order:
------------- -------------------------
| 0 | 1 | 2 | | (0,0) | (0,1) | (0,2) |
------------- => -------------------------
| 3 | 4 | 5 | | (1,0) | (1,1) | (1,2) |
------------- -------------------------
Fortan order
------------- -------------------------
| 0 | 2 | 4 | | (0,0) | (0,1) | (0,2) |
------------- => -------------------------
| 1 | 3 | 5 | | (1,0) | (1,1) | (1,2) |
------------- -------------------------
Parameters
----------
shape : tuple of int
The shape of the array whose element' multi-dimensional coordinates
will be converted into 1-d coordinates.
order : str
'C' for row-major and 'F' for column-major 1-d indexing.
"""
def __init__(self, shape_, order='C', **keywords):
if 'reshapein' not in keywords:
keywords['reshapein'] = self._reshape_tond
if 'reshapeout' not in keywords:
keywords['reshapeout'] = self._reshape_to1d
if 'validateout' not in keywords:
keywords['validateout'] = self._validate_to1d
_1dNdOperator.__init__(self, shape_, order=order, **keywords)
self.set_rule('I', lambda s: To1dOperator(
s.shape_, order=s.order))
def direct(self, input, output):
np.floor_divide(input[..., None], self.coefs, out=output)
np.mod(output, self.shape_, out=output)
def __str__(self):
return 'toNd'
| StarcoderdataPython |
6638777 | <filename>2016/10/part1.py<gh_stars>0
from pathlib import Path
puzzle_input_raw = (Path(__file__).parent / "input.txt").read_text()
import re
from collections import defaultdict
instructions = puzzle_input_raw.splitlines()
GIVE_VALUE_PATTERN = re.compile(r"value (\d+) goes to bot (\d+)")
BOT_GIVE_PATTERN = re.compile(r"bot (\d+) gives low to (bot|output) (\d+) and high to (bot|output) (\d+)")
bots = defaultdict(set)
outputs = defaultdict(set)
targets = {"bot": bots, "output": outputs}
looking_for = {61, 17}
while instructions:
instruction = instructions.pop(0)
if instruction.startswith("value"):
v, b = tuple(int(x) for x in GIVE_VALUE_PATTERN.match(instruction).groups())
bots[b].add(int(v))
else:
b, low_type, low_target, high_type, high_target = tuple(int(x) if x.isdigit() else x for x in BOT_GIVE_PATTERN.match(instruction).groups())
if len(bots[b]) != 2:
instructions.append(instruction)
else:
if bots[b] == looking_for:
print(b)
break
low, high = sorted(bots[b])
targets[low_type][low_target].add(low)
targets[high_type][high_target].add(high)
bots[b].clear()
| StarcoderdataPython |
8090391 | """
This module contains the tests for the automatic noise suppression extension.
"""
import numpy as np
import pytest
from spokestack import utils
from spokestack.context import SpeechContext
from spokestack.nsx.webrtc import AutomaticNoiseSuppression
np.random.seed(42)
def test_construction():
nsx = AutomaticNoiseSuppression(16000, 1)
nsx.reset()
nsx.close()
def test_invalid_sample_rate():
with pytest.raises(ValueError):
_ = AutomaticNoiseSuppression(sample_rate=900000, policy=1)
def test_invalid_frame_size():
context = SpeechContext()
nsx = AutomaticNoiseSuppression(16000, 1)
bad_frame = np.random.rand(550)
with pytest.raises(ValueError):
nsx(context, bad_frame)
def test_invalid_frame_dtype():
context = SpeechContext()
nsx = AutomaticNoiseSuppression(16000, 1)
bad_frame = np.random.rand(320)
with pytest.raises(TypeError):
nsx(context, bad_frame)
def test_processing():
context = SpeechContext()
nsx = AutomaticNoiseSuppression(16000, 1)
# no suppression
expect = sin_frame()
actual = sin_frame()
np.allclose(rms(expect), rms(actual), atol=3)
nsx(context, utils.float_to_int16(sin_frame()))
nsx(context, utils.float_to_int16(actual))
np.allclose(rms(expect), rms(actual), atol=3)
nsx.close()
# valid suppression
expect = sin_frame()
actual = add_noise(sin_frame())
nsx(context, utils.float_to_int16(sin_frame()))
nsx(context, utils.float_to_int16(actual))
np.allclose(rms(expect), rms(actual), atol=3)
def sin_frame(sample_rate=16000, frequency=100, frame_width=20):
frame_width = sample_rate * frame_width // 1000
x = 2 * np.pi * np.arange(sample_rate) / sample_rate
frame = np.sin(frequency * x)
frame = frame[:frame_width]
return frame
def rms(y):
return 20 * np.log10(max(np.sqrt(np.mean(y ** 2)), 1e-5) / 2e-5)
def add_noise(frame):
noise = np.random.normal(size=frame.shape)
noise = noise / np.power(10, 10 / 20.0)
frame += noise
return np.clip(frame, -1.0, 1.0)
| StarcoderdataPython |
8138983 | <filename>prod2vec_train.py
import json
import gensim
from snowflake_client import SnowflakeClient
def train_product_2_vec_model(sessions, min_c=2, size=48, window=3, iterations=20, ns_exponent=0.75):
"""
Wrap gensim standard word2vec model, providing sensible parameters from other experiments with prod2vec.
:param sessions: list of list of strings
:param min_c: gensim param
:param size: gensim param
:param window: gensim param
:param iterations: gensim param
:param ns_exponent: gensim param
:return: gensim Keyed Vector object after training
"""
model = gensim.models.Word2Vec(
sessions,
min_count=min_c,
size=size,
window=window,
iter=iterations,
ns_exponent=ns_exponent
)
return model.wv
def get_products_in_session_from_snowflake(
snowflake_client: SnowflakeClient,
env_id: str,
start_date: str,
end_date: str,
min_size: int=2,
max_size: int=50
):
"""
Template function to get products in all sessions from a SQL database. Result is a list of list, each list
containing the sequence of SKU for the products viewed in a shopping sessions.
:param snowflake_client: Python class to connect to a remote database
:param env_id: client id
:param start_date: start date
:param end_date: end date
:param min_size: specify a minimum session length to avoid sessions too short (1 product)
:param max_size: specify a maximum session length to avoid sessions that are suspiciously long (100 products)
:return: list of lists of strings, each string is an SKU in a session
"""
sql_query = """"""
raise Exception("Need to implement this!")
# get rows
rows = snowflake_client.fetch_all(
sql_query,
params={
'env_id': env_id,
'start_date': start_date,
'end_date': end_date,
'min_size': min_size,
'max_size': max_size
},
debug=False)
# need to de-serialize from snowflake
return [json.loads(s['SKUS']) for s in rows]
def calculate_prod_to_vecs(
env_id: str,
train_start: str,
train_end: str,
snowflake_client: SnowflakeClient
):
sessions = get_products_in_session_from_snowflake(snowflake_client, env_id, train_start, train_end)
prod2vec_model = train_product_2_vec_model(sessions)
return prod2vec_model | StarcoderdataPython |
11234866 | <gh_stars>10-100
from django.db import models
from django.core.mail import send_mail
from django.core.urlresolvers import reverse
import os
import urllib.parse
import uuid
# Create your models here.
class Subscriber(models.Model):
email = models.EmailField(max_length=254)
active = models.BooleanField(default=False)
confirm_key = models.UUIDField(primary_key=False, default=uuid.uuid4)
subscribe_date = models.DateField(auto_now_add=True)
def confirm_url(self, hostname, secure = False):
return "{host}{path}?{params}".format(
host = hostname,
path = reverse('register_user'),
params = urllib.parse.urlencode({'k': self.confirm_key})
)
def send_subscribe_confirm_email(self):
hostname = os.environ.get('BASE_IRI', 'http://www.ospc.org')
print(self.email)
send_mail(subject="Thank you for joining the conversation on American tax policy",
message = """Welcome!
Thank you for registering with ospc.org. This is the best way to stay up to date on
the latest news from the Open Source Policy Center. We also invite you to try
the TaxBrain webapp.
Please visit {url} to confirm your subscription""".format(url = self.confirm_url(hostname)),
from_email = "Open Source Policy Center <<EMAIL>>",
recipient_list = [self.email])
| StarcoderdataPython |
321631 | import warnings
from typing import Any
import bs4
from django.utils.functional import cached_property
from tate.legacy.finders import DocumentFinder, ImageFinder, PageFinder
from tate.legacy.utils.classes import CommandBoundObject
from wagtail.images import get_image_model
Image = get_image_model()
class BaseParser(CommandBoundObject):
def parse(self, value: Any) -> Any:
self.messages = []
@staticmethod
def get_soup(value: str) -> bs4.BeautifulSoup:
"""
Return a ``bs4.BeautifulSoup`` instance representing the provided
``value``, which should be the contents of a HTML document, or
more likely, as snippet of HTML as a string.
NOTE: We're using the 'lxml' parser here, as it's already in the
project's requirements, is faster, and will produce more
consistant results than the default.
"""
soup = bs4.BeautifulSoup(value, features="lxml")
# Remove body, head and html tags (likely added by bs4)
for elem in soup.find_all("body"):
elem.unwrap()
for elem in soup.find_all("head"):
elem.unwrap()
for elem in soup.find_all("html"):
elem.unwrap()
return soup
def get_or_create_finder(self, key: str, finder_class: type):
try:
return self.command.finders[key]
except AttributeError:
warnings.warn(
f"{type(self).__name__} instance is not bound to a command instance with "
f"a 'finders' attribute, so is creating its own {finder_class} instance. "
"Did you forget to run bind_to_command()?"
)
except KeyError:
warnings.warn(
f"The command instance bound to {type(self).__name__} cannot share a "
f"finder instance matching the key '{key}', so the "
f"{type(self).__name__} is creating its own {finder_class} instance."
)
return finder_class()
@cached_property
def page_finder(self) -> PageFinder:
return self.get_or_create_finder("pages", PageFinder)
@cached_property
def document_finder(self) -> DocumentFinder:
return self.get_or_create_finder("documents", DocumentFinder)
@cached_property
def image_finder(self) -> ImageFinder:
return self.get_or_create_finder("images", ImageFinder)
def find_image(self, value: Any):
"""
Return a Wagtail image instance matching a supplied 'legacy system ID' value,
or path/filename string.
Raises ``django.core.exceptions.ObjectDoesNotExist`` if no such image can be found.
"""
return self.image_finder.find(value)
@cached_property
def fallback_image(self):
return Image.objects.all().first()
def find_document(self, value: Any):
"""
Return a Wagtail document instance matching a supplied 'legacy system ID' value,
or path/filename string.
Raises ``django.core.exceptions.ObjectDoesNotExist`` if no such document can be found.
"""
return self.document_finder.find(value)
def find_page(self, value: Any):
"""
Return a Wagtail ``Page`` instance matching a supplied 'legacy system ID' value,
url or path string.
Raises ``Page.DoesNotExist`` if no such page can be found.
"""
return self.page_finder.find(value)
class BaseRichTextContainingParser(BaseParser):
richtext_parse_class = None
@cached_property
def richtext_parser(self):
return self.richtext_parse_class(self.command)
def parse_richtext(self, value: str) -> str:
value = self.richtext_parser.parse(value)
self.messages.extend(self.richtext_parser.messages)
return value
| StarcoderdataPython |
3424971 | """
aio asynchronous (nonblocking) input output package
"""
from .wiring import WireLog
| StarcoderdataPython |
36257 | <gh_stars>1-10
from cymepy.export_manager.base_definations import ExportManager
from cymepy.common import EXPORT_FILENAME
import json
import os
class Writer(ExportManager):
def __init__(self, sim_instance, solver, options, logger, **kwargs):
super(Writer, self).__init__(sim_instance, solver, options, logger, **kwargs)
self.results = []
self.path = os.path.join(
self.settings["project"]['project_path'],
'exports',
f"{EXPORT_FILENAME}.json"
)
pass
def update(self):
results = super().update()
self.results.append(results)
return
def export(self):
with open(self.path, "w") as write_file:
json.dump(self.results, write_file, indent=4, sort_keys=True)
| StarcoderdataPython |
6426909 | """
函数参数
实际参数
"""
# 位置形参:实参必填
# 缺少实参
# TypeError: func01() missing 1 required positional argument: 'p3'
# 实参过多
# TypeError: func01() takes 3 positional arguments but 4 were given
def func01(p1, p2, p3):
print(p1)
print(p2)
print(p3)
# 默认形参:实参可选
# 必须从右向左依次存在
def func02(p1=0, p2="", p3=0.0):
print(p1)
print(p2)
print(p3)
# 1. 位置实参:按顺序与形参对应
func01(1, 2, 3)
# 2. 关键字实参:按名字与形参对应
# 作用:增加调用函数时的代码可读性
# 指定某一个形参(跳过其他形参)
func01(p1=1, p2=2, p3=3)
func01(p2=2, p1=1, p3=3)
func02(p2="b")
func02(p1=100)
func02(100)
func02(100,p3 = 1.2)
| StarcoderdataPython |
6471070 | import pytest
import uuid
from app.models.Connector import PrsConnectorCreate, PrsConnectorEntry
from fastapi import HTTPException
def test_connector_create():
data = PrsConnectorCreate()
conn = PrsConnectorEntry(data=data)
try:
uuid.UUID(conn.id)
except ValueError as ex:
assert False, ex
conn_copy = PrsConnectorEntry(id=conn.id)
assert conn_copy.id == conn.id
def test_wrong_connector_id():
id = str(uuid.uuid4())
with pytest.raises(HTTPException):
conn = PrsConnectorEntry(id=id)
| StarcoderdataPython |
3472988 | from hexapod.models import VirtualHexapod
from tests.kinematics_cases import case1, case2
from tests.helpers import assert_hexapod_points_equal
CASES = [case1, case2]
def assert_kinematics(case, assume_ground_targets):
hexapod = VirtualHexapod(case.given_dimensions)
hexapod.update(case.given_poses, assume_ground_targets)
assert_hexapod_points_equal(
hexapod, case.correct_body_points, case.correct_leg_points, case.description
)
def test_sample_kinematics():
for case in CASES:
assert_kinematics(case, True)
assert_kinematics(case, False)
| StarcoderdataPython |
4965698 | import plotly
import pickle
from src.plotting.pages.linkShare import figure_generator as fg
# Case when user tried to access some incorrect shared link
def test_incorrect_link_handling():
# Throw error if no or wrong object type returned
assert isinstance(fg.make_graph(), plotly.graph_objs._figure.Figure)
def test_fake_file_access_handling():
fg.filename = "fake_file"
assert isinstance(fg.make_graph(), plotly.graph_objs._figure.Figure)
def test_dummy_file_access_handling():
a_file = open("test.pkl", "wb+")
data = {
"df": [
{
"X0": 0.105462161,
"X1": 0.025386467,
},
{
"X0": -0.323946043,
"X1": -0.180540713,
},
{
"X0": -0.177804072,
"X1": 0.041125761,
},
{
"X0": -0.265325854,
"X1": 0.199801246,
},
{
"X0": -0.014840754,
"X1": 0.126975545,
},
{
"X0": -0.217654183,
"X1": 0.313129246,
},
]
}
pickle.dump(data, a_file)
a_file.close()
fg.filename = "test.pkl"
assert isinstance(fg.make_graph(), plotly.graph_objs._figure.Figure)
| StarcoderdataPython |
6441984 | <reponame>lixiaoy1/nova
# Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.tests.functional.api_sample_tests import test_servers
import nova.tests.functional.api_samples_test_base as astb
def fake_get(*args, **kwargs):
nova_group = {}
nova_group['id'] = 1
nova_group['description'] = 'default'
nova_group['name'] = 'default'
nova_group['project_id'] = astb.PROJECT_ID
nova_group['rules'] = []
return nova_group
def fake_get_instances_security_groups_bindings(self, context, servers,
detailed=False):
result = {}
for s in servers:
result[s.get('id')] = [{'name': 'test'}]
return result
def fake_add_to_instance(self, context, instance, security_group_name):
pass
def fake_remove_from_instance(self, context, instance, security_group_name):
pass
def fake_list(self, context, names=None, ids=None, project=None,
search_opts=None):
return [fake_get()]
def fake_get_instance_security_groups(self, context, instance_uuid,
detailed=False):
return [fake_get()]
def fake_create_security_group(self, context, name, description):
return fake_get()
def fake_create_security_group_rule(self, context, security_group, new_rule):
return {
'from_port': 22,
'to_port': 22,
'cidr': '10.0.0.0/24',
'id': '00000000-0000-0000-0000-000000000000',
'parent_group_id': '11111111-1111-1111-1111-111111111111',
'protocol': 'tcp',
'group_id': None
}
def fake_remove_rules(self, context, security_group, rule_ids):
pass
def fake_get_rule(self, context, id):
return {
'id': id,
'parent_group_id': '11111111-1111-1111-1111-111111111111'
}
class SecurityGroupsJsonTest(test_servers.ServersSampleBase):
sample_dir = 'os-security-groups'
USE_NEUTRON = True
def setUp(self):
super(SecurityGroupsJsonTest, self).setUp()
path = 'nova.network.security_group.neutron_driver.SecurityGroupAPI.'
self.stub_out(path + 'get', fake_get)
self.stub_out(path + 'get_instances_security_groups_bindings',
fake_get_instances_security_groups_bindings)
self.stub_out(path + 'add_to_instance', fake_add_to_instance)
self.stub_out(path + 'remove_from_instance', fake_remove_from_instance)
self.stub_out(path + 'list', fake_list)
self.stub_out(path + 'get_instance_security_groups',
fake_get_instance_security_groups)
self.stub_out(path + 'create_security_group',
fake_create_security_group)
self.stub_out(path + 'create_security_group_rule',
fake_create_security_group_rule)
self.stub_out(path + 'remove_rules',
fake_remove_rules)
self.stub_out(path + 'get_rule',
fake_get_rule)
def _get_create_subs(self):
return {
'group_name': 'default',
"description": "default",
}
def _create_security_group(self):
subs = self._get_create_subs()
return self._do_post('os-security-groups',
'security-group-post-req', subs)
def _add_group(self, uuid):
subs = {
'group_name': 'test'
}
return self._do_post('servers/%s/action' % uuid,
'security-group-add-post-req', subs)
def test_security_group_create(self):
response = self._create_security_group()
subs = self._get_create_subs()
self._verify_response('security-groups-create-resp', subs,
response, 200)
def test_security_groups_list(self):
# Get api sample of security groups get list request.
response = self._do_get('os-security-groups')
self._verify_response('security-groups-list-get-resp',
{}, response, 200)
def test_security_groups_get(self):
# Get api sample of security groups get request.
security_group_id = '11111111-1111-1111-1111-111111111111'
response = self._do_get('os-security-groups/%s' % security_group_id)
self._verify_response('security-groups-get-resp', {}, response, 200)
def test_security_groups_list_server(self):
# Get api sample of security groups for a specific server.
uuid = self._post_server()
response = self._do_get('servers/%s/os-security-groups' % uuid)
self._verify_response('server-security-groups-list-resp',
{}, response, 200)
def test_security_groups_add(self):
self._create_security_group()
uuid = self._post_server()
response = self._add_group(uuid)
self.assertEqual(202, response.status_code)
self.assertEqual('', response.text)
def test_security_groups_remove(self):
self._create_security_group()
uuid = self._post_server()
self._add_group(uuid)
subs = {
'group_name': 'test'
}
response = self._do_post('servers/%s/action' % uuid,
'security-group-remove-post-req', subs)
self.assertEqual(202, response.status_code)
self.assertEqual('', response.text)
def test_security_group_rules_create(self):
response = self._do_post('os-security-group-rules',
'security-group-rules-post-req', {})
self._verify_response('security-group-rules-post-resp', {}, response,
200)
def test_security_group_rules_remove(self):
response = self._do_delete(
'os-security-group-rules/00000000-0000-0000-0000-000000000000')
self.assertEqual(202, response.status_code)
| StarcoderdataPython |
3406794 | <filename>challenge_4/python/sarcodian/src/challenge_4.py
"""
Each root has two subtrees, each subtree may have a root
Each tree has the format ['parent',
['child1',
['child1.1'],['child1.2']],
['child2',
['child2.1'],['child2.2']]]
"""
import itertools
def reverse(tree):
subtree = tree.copy()
if len(subtree[1]) < 2 and len(subtree[2]) < 2:
return [subtree[0], subtree[2], subtree[1]]
elif len(subtree[1]) < 2:
return [subtree[0], [reverse(subtree[2])], subtree[1]]
elif len(subtree[2]) < 2:
return [subtree[0], subtree[2], [reverse(subtree[1])]]
else:
return [subtree[0], reverse(subtree[2]), reverse(subtree[1])]
test = [4,
[2,
[1], [3]],
[7,
[6], [9]]
]
def create_large_tree():
"""
Create a tree with up to 4 levels of nodes to show that implementation scales, enter less then 15
"""
value_of_nodes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 'a', 'b', 'c', 'd', 'e']
tree = ''
depth = 0
count = 0
while depth < 4:
if depth == 0:
tree = [value_of_nodes[0], [], []]
depth += 1
count += 1
elif depth == 1:
for i in [1,2]:
tree[i] = [value_of_nodes[count], [], []]
count += 1
depth += 1
elif depth == 2:
for i,j in itertools.product([1,2], repeat=depth):
tree[i][j] = [value_of_nodes[count], [], []]
count += 1
depth += 1
elif depth == 3:
for i, j, k in itertools.product([1,2], repeat=depth):
tree[i][j][k] = [value_of_nodes[count], [], []]
count += 1
depth += 1
return tree
print(test)
test_rev = reverse(test)
print(test_rev)
print(reverse(test_rev))
test2 = create_large_tree()
print(test2)
test_rev2 = reverse(test2)
print(test_rev2)
print(reverse(test_rev2)) | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.