max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
|---|---|---|---|---|---|---|---|---|---|---|
blog/routers/user.py
|
Royalmayur/fastapi
| 0
|
6627751
|
from fastapi import APIRouter,status,Depends
from blog import schemas,database
from sqlalchemy.orm.session import Session
from blog.repository import user
# If you are building an application or a web API
# FastAPI provides a convenience tool to structure your application while keeping all the flexibility.
"""
├── app
│ ├── __init__.py
│ ├── main.py
│ ├── dependencies.py
│ └── routers
│ │ ├── __init__.py
│ │ ├── items.py
│ │ └── users.py
│ └── internal
│ ├── __init__.py
│ └── admin.py
"""
# That's why we should use external routers directory which is defined every routes file.
# for defining Routes, we use APIRouter class for create routes
router = APIRouter(
prefix = '/user', # defined routes Url
tags=["Users"] # Tags are used for catagorized our routes.
)
get_db = database.get_db
@router.post('/', status_code=status.HTTP_201_CREATED, response_model=schemas.User_show) # "?limit=10&published=true" "?" is query in url but don't need to specify here
def create_user(request:schemas.User, db:Session=Depends(get_db)): #here we handle query paramenters ,i proovide default value also
return user.create(request,db)
# Fast api is smart enough to identify which is query parameter and path parameter ,if path have any params then api check same name params have in path operation function then it make path params otherwise make it query.
@router.get('/{id}', status_code=status.HTTP_200_OK, response_model=schemas.User_show) #route defined ,And its called operation on the path and get is operation
#Called, Path operation function
def get_user(id:int, db:Session=Depends(get_db)):
return user.getUser(id,db)
|
from fastapi import APIRouter,status,Depends
from blog import schemas,database
from sqlalchemy.orm.session import Session
from blog.repository import user
# If you are building an application or a web API
# FastAPI provides a convenience tool to structure your application while keeping all the flexibility.
"""
├── app
│ ├── __init__.py
│ ├── main.py
│ ├── dependencies.py
│ └── routers
│ │ ├── __init__.py
│ │ ├── items.py
│ │ └── users.py
│ └── internal
│ ├── __init__.py
│ └── admin.py
"""
# That's why we should use external routers directory which is defined every routes file.
# for defining Routes, we use APIRouter class for create routes
router = APIRouter(
prefix = '/user', # defined routes Url
tags=["Users"] # Tags are used for catagorized our routes.
)
get_db = database.get_db
@router.post('/', status_code=status.HTTP_201_CREATED, response_model=schemas.User_show) # "?limit=10&published=true" "?" is query in url but don't need to specify here
def create_user(request:schemas.User, db:Session=Depends(get_db)): #here we handle query paramenters ,i proovide default value also
return user.create(request,db)
# Fast api is smart enough to identify which is query parameter and path parameter ,if path have any params then api check same name params have in path operation function then it make path params otherwise make it query.
@router.get('/{id}', status_code=status.HTTP_200_OK, response_model=schemas.User_show) #route defined ,And its called operation on the path and get is operation
#Called, Path operation function
def get_user(id:int, db:Session=Depends(get_db)):
return user.getUser(id,db)
|
en
| 0.696653
|
# If you are building an application or a web API # FastAPI provides a convenience tool to structure your application while keeping all the flexibility. ├── app │ ├── __init__.py │ ├── main.py │ ├── dependencies.py │ └── routers │ │ ├── __init__.py │ │ ├── items.py │ │ └── users.py │ └── internal │ ├── __init__.py │ └── admin.py # That's why we should use external routers directory which is defined every routes file. # for defining Routes, we use APIRouter class for create routes # defined routes Url # Tags are used for catagorized our routes. # "?limit=10&published=true" "?" is query in url but don't need to specify here #here we handle query paramenters ,i proovide default value also # Fast api is smart enough to identify which is query parameter and path parameter ,if path have any params then api check same name params have in path operation function then it make path params otherwise make it query. #route defined ,And its called operation on the path and get is operation #Called, Path operation function
| 2.772372
| 3
|
adb_shell/exceptions.py
|
zeibou/adb_shell
| 1
|
6627752
|
# Copyright (c) 2020 <NAME> and contributors
#
# This file is part of the adb-shell package. It incorporates work
# covered by the following license notice:
#
#
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ADB-related exceptions.
"""
from . import constants
class AdbCommandFailureException(Exception):
"""A ``b'FAIL'`` packet was received.
"""
class DeviceAuthError(Exception):
"""Device authentication failed.
"""
def __init__(self, message, *args):
message %= args
super(DeviceAuthError, self).__init__(message, *args)
class InterleavedDataError(Exception):
"""We only support command sent serially.
"""
class InvalidChecksumError(Exception):
"""Checksum of data didn't match expected checksum.
"""
class InvalidCommandError(Exception):
"""Got an invalid command.
"""
def __init__(self, message, response_header, response_data):
if response_header == constants.FAIL:
message = 'Command failed, device said so. (%s)' % message
super(InvalidCommandError, self).__init__(message, response_header, response_data)
class InvalidHandleError(Exception):
"""The provided handle does not implement the necessary methods: ``close``, ``connect``, ``bulk_read``, and ``bulk_write``.
"""
class InvalidResponseError(Exception):
"""Got an invalid response to our command.
"""
class PushFailedError(Exception):
"""Pushing a file failed for some reason.
"""
class TcpTimeoutException(Exception):
"""TCP connection timed read/write operation exceeded the allowed time.
"""
|
# Copyright (c) 2020 <NAME> and contributors
#
# This file is part of the adb-shell package. It incorporates work
# covered by the following license notice:
#
#
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ADB-related exceptions.
"""
from . import constants
class AdbCommandFailureException(Exception):
"""A ``b'FAIL'`` packet was received.
"""
class DeviceAuthError(Exception):
"""Device authentication failed.
"""
def __init__(self, message, *args):
message %= args
super(DeviceAuthError, self).__init__(message, *args)
class InterleavedDataError(Exception):
"""We only support command sent serially.
"""
class InvalidChecksumError(Exception):
"""Checksum of data didn't match expected checksum.
"""
class InvalidCommandError(Exception):
"""Got an invalid command.
"""
def __init__(self, message, response_header, response_data):
if response_header == constants.FAIL:
message = 'Command failed, device said so. (%s)' % message
super(InvalidCommandError, self).__init__(message, response_header, response_data)
class InvalidHandleError(Exception):
"""The provided handle does not implement the necessary methods: ``close``, ``connect``, ``bulk_read``, and ``bulk_write``.
"""
class InvalidResponseError(Exception):
"""Got an invalid response to our command.
"""
class PushFailedError(Exception):
"""Pushing a file failed for some reason.
"""
class TcpTimeoutException(Exception):
"""TCP connection timed read/write operation exceeded the allowed time.
"""
|
en
| 0.888808
|
# Copyright (c) 2020 <NAME> and contributors # # This file is part of the adb-shell package. It incorporates work # covered by the following license notice: # # # Copyright 2014 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ADB-related exceptions. A ``b'FAIL'`` packet was received. Device authentication failed. We only support command sent serially. Checksum of data didn't match expected checksum. Got an invalid command. The provided handle does not implement the necessary methods: ``close``, ``connect``, ``bulk_read``, and ``bulk_write``. Got an invalid response to our command. Pushing a file failed for some reason. TCP connection timed read/write operation exceeded the allowed time.
| 2.27723
| 2
|
tests/mock_clients/mock_s3.py
|
kabirkhan/cloudpathlib
| 128
|
6627753
|
<filename>tests/mock_clients/mock_s3.py<gh_stars>100-1000
import collections
from datetime import datetime
from pathlib import Path, PurePosixPath
import shutil
from tempfile import TemporaryDirectory
from boto3.session import Session
from botocore.exceptions import ClientError
from .utils import delete_empty_parents_up_to_root
TEST_ASSETS = Path(__file__).parent.parent / "assets"
# Since we don't contol exactly when the filesystem finishes writing a file
# and the test files are super small, we can end up with race conditions in
# the tests where the updated file is modified before the source file,
# which breaks our caching logic
NoSuchKey = Session().client("s3").exceptions.NoSuchKey
def mocked_session_class_factory(test_dir: str):
class MockBoto3Session:
def __init__(self, *args, **kwargs):
# copy test assets for reference in tests without affecting assets
self.tmp = TemporaryDirectory()
self.tmp_path = Path(self.tmp.name) / "test_case_copy"
shutil.copytree(TEST_ASSETS, self.tmp_path / test_dir)
def __del__(self):
self.tmp.cleanup()
def resource(self, item, endpoint_url, config=None):
return MockBoto3Resource(self.tmp_path)
def client(self, item, endpoint_url, config=None):
return MockBoto3Client(self.tmp_path)
return MockBoto3Session
class MockBoto3Resource:
def __init__(self, root):
self.root = root
self.download_config = None
self.upload_config = None
def Bucket(self, bucket):
return MockBoto3Bucket(self.root)
def ObjectSummary(self, bucket, key):
return MockBoto3ObjectSummary(self.root, key)
def Object(self, bucket, key):
return MockBoto3Object(self.root, key, self)
class MockBoto3Object:
def __init__(self, root, path, resource):
self.root = root
self.path = root / path
self.resource = resource
def get(self):
if not self.path.exists() or self.path.is_dir():
raise NoSuchKey({}, {})
else:
return {"key": str(PurePosixPath(self.path))}
def load(self):
if not self.path.exists() or self.path.is_dir():
raise ClientError({}, {})
else:
return {"key": str(PurePosixPath(self.path))}
@property
def key(self):
return str(PurePosixPath(self.path).relative_to(PurePosixPath(self.root)))
def copy_from(self, CopySource=None, Metadata=None, MetadataDirective=None):
if CopySource["Key"] == str(self.path.relative_to(self.root)):
# same file, touch
self.path.touch()
else:
self.path.write_bytes((self.root / Path(CopySource["Key"])).read_bytes())
def download_file(self, to_path, Config=None):
to_path = Path(to_path)
to_path.write_bytes(self.path.read_bytes())
# track config to make sure it's used in tests
self.resource.download_config = Config
def upload_file(self, from_path, Config=None):
self.path.parent.mkdir(parents=True, exist_ok=True)
self.path.write_bytes(Path(from_path).read_bytes())
self.resource.upload_config = Config
def delete(self):
self.path.unlink()
delete_empty_parents_up_to_root(self.path, self.root)
return {"ResponseMetadata": {"HTTPStatusCode": 204}}
def copy(self, source):
# boto3 is more like "copy from"
source = self.root / source["Key"]
self.path.parent.mkdir(parents=True, exist_ok=True)
return shutil.copy(str(source), str(self.path))
class MockBoto3ObjectSummary:
def __init__(self, root, path):
self.path = root / path
def get(self):
if not self.path.exists() or self.path.is_dir():
raise NoSuchKey({}, {})
else:
return {
"LastModified": datetime.fromtimestamp(self.path.stat().st_mtime),
"ContentLength": None,
"ETag": hash(str(self.path)),
"ContentType": None,
"Metadata": {},
}
class MockBoto3Bucket:
def __init__(self, root):
self.root = root
@property
def objects(self):
return MockObjects(self.root)
class MockObjects:
def __init__(self, root):
self.root = root
def filter(self, Prefix=""):
path = self.root / Prefix
if path.is_file():
return MockCollection([PurePosixPath(path)], self.root)
items = [
PurePosixPath(f)
for f in path.glob("**/*")
if f.is_file() and not f.name.startswith(".")
]
return MockCollection(items, self.root)
class MockCollection:
def __init__(self, items, root):
self.root = root
s3_obj = collections.namedtuple("s3_obj", "key bucket_name")
self.full_paths = items
self.s3_obj_paths = [
s3_obj(bucket_name="bucket", key=str(i.relative_to(self.root))) for i in items
]
def __iter__(self):
return iter(self.s3_obj_paths)
def limit(self, n):
return self.s3_obj_paths[:n]
def delete(self):
for p in self.full_paths:
Path(p).unlink()
delete_empty_parents_up_to_root(Path(p), self.root)
return [{"ResponseMetadata": {"HTTPStatusCode": 200}}]
class MockBoto3Client:
def __init__(self, root):
self.root = root
def get_paginator(self, api):
return MockBoto3Paginator(self.root)
@property
def exceptions(self):
Ex = collections.namedtuple("Ex", "NoSuchKey")
return Ex(NoSuchKey=NoSuchKey)
class MockBoto3Paginator:
def __init__(self, root, per_page=2):
self.root = root
self.per_page = per_page
def paginate(self, Bucket=None, Prefix="", Delimiter=None):
new_dir = self.root / Prefix
items = [f for f in new_dir.iterdir() if not f.name.startswith(".")]
for ix in range(0, len(items), self.per_page):
page = items[ix : ix + self.per_page]
dirs = [
{"Prefix": str(_.relative_to(self.root).as_posix())} for _ in page if _.is_dir()
]
files = [
{"Key": str(_.relative_to(self.root).as_posix())} for _ in page if _.is_file()
]
yield {"CommonPrefixes": dirs, "Contents": files}
|
<filename>tests/mock_clients/mock_s3.py<gh_stars>100-1000
import collections
from datetime import datetime
from pathlib import Path, PurePosixPath
import shutil
from tempfile import TemporaryDirectory
from boto3.session import Session
from botocore.exceptions import ClientError
from .utils import delete_empty_parents_up_to_root
TEST_ASSETS = Path(__file__).parent.parent / "assets"
# Since we don't contol exactly when the filesystem finishes writing a file
# and the test files are super small, we can end up with race conditions in
# the tests where the updated file is modified before the source file,
# which breaks our caching logic
NoSuchKey = Session().client("s3").exceptions.NoSuchKey
def mocked_session_class_factory(test_dir: str):
class MockBoto3Session:
def __init__(self, *args, **kwargs):
# copy test assets for reference in tests without affecting assets
self.tmp = TemporaryDirectory()
self.tmp_path = Path(self.tmp.name) / "test_case_copy"
shutil.copytree(TEST_ASSETS, self.tmp_path / test_dir)
def __del__(self):
self.tmp.cleanup()
def resource(self, item, endpoint_url, config=None):
return MockBoto3Resource(self.tmp_path)
def client(self, item, endpoint_url, config=None):
return MockBoto3Client(self.tmp_path)
return MockBoto3Session
class MockBoto3Resource:
def __init__(self, root):
self.root = root
self.download_config = None
self.upload_config = None
def Bucket(self, bucket):
return MockBoto3Bucket(self.root)
def ObjectSummary(self, bucket, key):
return MockBoto3ObjectSummary(self.root, key)
def Object(self, bucket, key):
return MockBoto3Object(self.root, key, self)
class MockBoto3Object:
def __init__(self, root, path, resource):
self.root = root
self.path = root / path
self.resource = resource
def get(self):
if not self.path.exists() or self.path.is_dir():
raise NoSuchKey({}, {})
else:
return {"key": str(PurePosixPath(self.path))}
def load(self):
if not self.path.exists() or self.path.is_dir():
raise ClientError({}, {})
else:
return {"key": str(PurePosixPath(self.path))}
@property
def key(self):
return str(PurePosixPath(self.path).relative_to(PurePosixPath(self.root)))
def copy_from(self, CopySource=None, Metadata=None, MetadataDirective=None):
if CopySource["Key"] == str(self.path.relative_to(self.root)):
# same file, touch
self.path.touch()
else:
self.path.write_bytes((self.root / Path(CopySource["Key"])).read_bytes())
def download_file(self, to_path, Config=None):
to_path = Path(to_path)
to_path.write_bytes(self.path.read_bytes())
# track config to make sure it's used in tests
self.resource.download_config = Config
def upload_file(self, from_path, Config=None):
self.path.parent.mkdir(parents=True, exist_ok=True)
self.path.write_bytes(Path(from_path).read_bytes())
self.resource.upload_config = Config
def delete(self):
self.path.unlink()
delete_empty_parents_up_to_root(self.path, self.root)
return {"ResponseMetadata": {"HTTPStatusCode": 204}}
def copy(self, source):
# boto3 is more like "copy from"
source = self.root / source["Key"]
self.path.parent.mkdir(parents=True, exist_ok=True)
return shutil.copy(str(source), str(self.path))
class MockBoto3ObjectSummary:
def __init__(self, root, path):
self.path = root / path
def get(self):
if not self.path.exists() or self.path.is_dir():
raise NoSuchKey({}, {})
else:
return {
"LastModified": datetime.fromtimestamp(self.path.stat().st_mtime),
"ContentLength": None,
"ETag": hash(str(self.path)),
"ContentType": None,
"Metadata": {},
}
class MockBoto3Bucket:
def __init__(self, root):
self.root = root
@property
def objects(self):
return MockObjects(self.root)
class MockObjects:
def __init__(self, root):
self.root = root
def filter(self, Prefix=""):
path = self.root / Prefix
if path.is_file():
return MockCollection([PurePosixPath(path)], self.root)
items = [
PurePosixPath(f)
for f in path.glob("**/*")
if f.is_file() and not f.name.startswith(".")
]
return MockCollection(items, self.root)
class MockCollection:
def __init__(self, items, root):
self.root = root
s3_obj = collections.namedtuple("s3_obj", "key bucket_name")
self.full_paths = items
self.s3_obj_paths = [
s3_obj(bucket_name="bucket", key=str(i.relative_to(self.root))) for i in items
]
def __iter__(self):
return iter(self.s3_obj_paths)
def limit(self, n):
return self.s3_obj_paths[:n]
def delete(self):
for p in self.full_paths:
Path(p).unlink()
delete_empty_parents_up_to_root(Path(p), self.root)
return [{"ResponseMetadata": {"HTTPStatusCode": 200}}]
class MockBoto3Client:
def __init__(self, root):
self.root = root
def get_paginator(self, api):
return MockBoto3Paginator(self.root)
@property
def exceptions(self):
Ex = collections.namedtuple("Ex", "NoSuchKey")
return Ex(NoSuchKey=NoSuchKey)
class MockBoto3Paginator:
def __init__(self, root, per_page=2):
self.root = root
self.per_page = per_page
def paginate(self, Bucket=None, Prefix="", Delimiter=None):
new_dir = self.root / Prefix
items = [f for f in new_dir.iterdir() if not f.name.startswith(".")]
for ix in range(0, len(items), self.per_page):
page = items[ix : ix + self.per_page]
dirs = [
{"Prefix": str(_.relative_to(self.root).as_posix())} for _ in page if _.is_dir()
]
files = [
{"Key": str(_.relative_to(self.root).as_posix())} for _ in page if _.is_file()
]
yield {"CommonPrefixes": dirs, "Contents": files}
|
en
| 0.913182
|
# Since we don't contol exactly when the filesystem finishes writing a file # and the test files are super small, we can end up with race conditions in # the tests where the updated file is modified before the source file, # which breaks our caching logic # copy test assets for reference in tests without affecting assets # same file, touch # track config to make sure it's used in tests # boto3 is more like "copy from"
| 2.14216
| 2
|
webhook/utils.py
|
fbsamples/cp_reference
| 2
|
6627754
|
<gh_stars>1-10
# Copyright 2004-present, Facebook. All Rights Reserved.
import json
from datetime import datetime, timezone
from fb_metadata.models.fb_metadata import FacebookMetadata
from .models import WebhookNotification
from .choices import WebhookEvents
def processWebhookNotification(raw_data):
''' process the raw data provided by a webhook notification
params:
raw_data: raw data in json format from webhook
'''
# currently only processing setup statuses of commerce accounts
raw_data = json.loads(raw_data)
topic = raw_data["object"]
entry = raw_data['entry'][0]
time_sent = entry['time']
commerce_account_id = entry['id']
change = entry['changes'][0]
event = change['field']
value = change['value'] # noqa: F841
# save notification
fb_metadata = FacebookMetadata.objects.filter(commerce_account_id__exact=commerce_account_id).first()
store = fb_metadata.store
new_notification = WebhookNotification(
store=store,
topic=topic,
event=event,
time_sent=datetime.fromtimestamp(time_sent, timezone.utc),
raw_notification_data=json.dumps(raw_data)
)
new_notification.save()
if event == WebhookEvents.SETUP_STATUS:
fb_metadata.fb_shop_setup_status = value.get('shop_setup', '')
fb_metadata.fb_shop_payment_setup_status = value.get('payment_setup', '')
fb_metadata.fb_shop_review_status = value.get('review_status', {}).get('status','')
fb_metadata.save()
|
# Copyright 2004-present, Facebook. All Rights Reserved.
import json
from datetime import datetime, timezone
from fb_metadata.models.fb_metadata import FacebookMetadata
from .models import WebhookNotification
from .choices import WebhookEvents
def processWebhookNotification(raw_data):
''' process the raw data provided by a webhook notification
params:
raw_data: raw data in json format from webhook
'''
# currently only processing setup statuses of commerce accounts
raw_data = json.loads(raw_data)
topic = raw_data["object"]
entry = raw_data['entry'][0]
time_sent = entry['time']
commerce_account_id = entry['id']
change = entry['changes'][0]
event = change['field']
value = change['value'] # noqa: F841
# save notification
fb_metadata = FacebookMetadata.objects.filter(commerce_account_id__exact=commerce_account_id).first()
store = fb_metadata.store
new_notification = WebhookNotification(
store=store,
topic=topic,
event=event,
time_sent=datetime.fromtimestamp(time_sent, timezone.utc),
raw_notification_data=json.dumps(raw_data)
)
new_notification.save()
if event == WebhookEvents.SETUP_STATUS:
fb_metadata.fb_shop_setup_status = value.get('shop_setup', '')
fb_metadata.fb_shop_payment_setup_status = value.get('payment_setup', '')
fb_metadata.fb_shop_review_status = value.get('review_status', {}).get('status','')
fb_metadata.save()
|
en
| 0.725277
|
# Copyright 2004-present, Facebook. All Rights Reserved. process the raw data provided by a webhook notification params: raw_data: raw data in json format from webhook # currently only processing setup statuses of commerce accounts # noqa: F841 # save notification
| 2.256371
| 2
|
pyrez/api/__init__.py
|
pytheous/Pyrez
| 25
|
6627755
|
from .API import API
from .APIBase import APIBase
from .BaseSmitePaladins import BaseSmitePaladins
from .PaladinsAPI import PaladinsAPI
from .RealmRoyaleAPI import RealmRoyaleAPI
from .SmiteAPI import SmiteAPI
from .StatusPageAPI import StatusPageAPI
#Cyclic import ^
__all__ = (
"API",
"APIBase",
"PaladinsAPI",
"RealmRoyaleAPI",
"SmiteAPI",
"StatusPageAPI",
)
|
from .API import API
from .APIBase import APIBase
from .BaseSmitePaladins import BaseSmitePaladins
from .PaladinsAPI import PaladinsAPI
from .RealmRoyaleAPI import RealmRoyaleAPI
from .SmiteAPI import SmiteAPI
from .StatusPageAPI import StatusPageAPI
#Cyclic import ^
__all__ = (
"API",
"APIBase",
"PaladinsAPI",
"RealmRoyaleAPI",
"SmiteAPI",
"StatusPageAPI",
)
|
es
| 0.320963
|
#Cyclic import ^
| 1.250098
| 1
|
ovejero/bnn_alexnet.py
|
swagnercarena/ovejero
| 4
|
6627756
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Build the TensorFlow model and loss functions
This module contains the functions needed to build the BNN model used in
ovejero as well as the loss functions for the different posteriors.
See the script model_trainer.py for examples of how to use these functions.
"""
import tensorflow as tf
import numpy as np
from tensorflow.keras import initializers, activations
import tensorflow.keras.backend as K
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Flatten, Conv2D, MaxPooling2D, Input, Dense
from tensorflow.keras.layers import Layer, InputSpec
class AlwaysDropout(Layer):
"""
This class applies dropout to an input both during training and inference.
This is consistent with the BNN methodology.
"""
def __init__(self, dropout_rate, **kwargs):
"""
Initialize the AlwaysDropout layer.
Parameters:
dropout_rate (float): A number in the range [0,1) that will serve
as the dropout rate for the layer. A larger rate means more
dropout.
"""
super(AlwaysDropout, self).__init__(**kwargs)
# Check for a bad dropout input
if dropout_rate >= 1.0 or dropout_rate < 0.0:
raise ValueError('dropout rate of %f not between 0 and 1' % (
dropout_rate))
# Save the dropout rate for later.
self.dropout_rate = dropout_rate
def call(self, inputs, training=None):
"""
The function that takes the inputs (likely outputs of a previous layer)
and conducts dropout.
Parameters:
inputs (tf.Keras.Layer): The inputs to the Dense layer.
training (bool): A required input for call. Setting training to
true or false does nothing because always dropout behaves the
same way in both cases.
Returns:
(tf.Keras.Layer): The output of the Dense layer.
"""
return tf.nn.dropout(inputs, self.dropout_rate)
def get_config(self):
"""
Return the configuration dictionary required by Keras.
"""
config = {'dropout_rate': self.dropout_rate}
base_config = super(AlwaysDropout, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def compute_output_shape(self, input_shape):
"""
Compute the shape of the output given the input. Needed for Keras
layer.
Parameters:
input_shape ((int,...)): The shape of the input to our Dense layer.
Returns:
((int,...)): The output shape of the layer.
"""
return input_shape
def cd_regularizer(p, kernel, kernel_regularizer, dropout_regularizer,
input_dim):
"""
Calculate the regularization term for concrete dropout.
Parameters:
p (tf.Tensor): A 1D Tensor containing the p value for dropout (between
0 and 1).
kernel (tf.Tensor): A 2D Tensor defining the weights of the Dense
layer
kernel_initializer (float): The relative strength of kernel
regularization term.
dropout_regularizer (float): The relative strength of the dropout
regularization term.
input_dim (int): The dimension of the input to the layer.
Returns:
(tf.Tensor): The tensorflow graph to calculate the regularization term.
Notes:
This is currently not being used because of issues with the Keras
framework. Once it updates this will be employed instead of dividing
the loss into two parts.
"""
regularizer = p * K.log(p)
regularizer += (1.0 - p) + K.log(1.0 - p)
regularizer *= dropout_regularizer * input_dim
regularizer += kernel_regularizer * K.sum(K.square(kernel)) / (1.0 - p)
return regularizer
class ConcreteDropout(Layer):
"""
This class defines a concrete dropout layer that is built around a
Keras Dense layer. The dropout is parametrized by a weight that is
optimized along with the model's weights themselves. Heavy inspiration
from code for arxiv.1705.07832.
"""
def __init__(self, output_dim, activation=None,
kernel_initializer='glorot_uniform', bias_initializer='zeros',
kernel_regularizer=1e-6, dropout_regularizer=1e-5, init_min=0.1,
init_max=0.1, temp=0.1, random_seed=None, **kwargs):
"""
Initialize the Concrete dropout Dense layer. This will initialize the
dense layer along with the overhead needed for concrete dropout.
Parameters:
output_dim (int): The number of output parameters
activation (str): The type of activation function to be used. Will
be passed into tensorflow's activation function library.
kernel_initializer (str): The type of initializer to use for the
kernel. Will be passed to tensorflow's initializer library
bias_initializer (str): The type of initializer to use for the
bias. Will be passed to tensorflow's initializer library
kernel_regularizer (float): The strength of the concrete dropout
regularization term
dropout_regularizer (float): The strength of the concrete dropout
p regularization term
init_min (float): The minimum initial value of the dropout rate
init_max (float): The maximum initial value of the dropout rate
temp (float): The temperature that defines how close the concrete
distribution will be to true dropout.
random_seed (int): A seed to use in the random function calls. If
None no explicit seed will be used.
Returns:
(keras.Layer): The initialized ConcreteDropout layer. Must still be
built.
Notes:
Technically the regularization terms must be divided by the number
of training examples. This is degenerate with the value of the
regularizers, so we do not specify it here.
The initial dropout rate will be drawn from a uniform distribution
with the bounds passed into init.
"""
# We do this because Keras does this
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
# First initialize the properties required by the Dense class
super(ConcreteDropout, self).__init__(**kwargs)
# Save everything important to self
self.output_dim = output_dim
self.activation = activations.get(activation)
self.kernel_initializer = initializers.get(
kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = kernel_regularizer
self.dropout_regularizer = dropout_regularizer
# Convert to logit space (since we want to parameterize our weights
# such that any value outputted by the network is valid).
self.init_min = np.log(init_min) - np.log(1.0 - init_min)
self.init_max = np.log(init_max) - np.log(1.0 - init_max)
self.temp = temp
self.random_seed = random_seed
def build(self, input_shape=None):
"""
Build the weights and operations that the network will use.
Parameters:
input_shape ((int,...)): The shape of the input to our Dense layer.
"""
assert len(input_shape) >= 2
input_dim = input_shape[-1]
self.kernel = self.add_weight(shape=(input_dim, self.output_dim),
initializer=self.kernel_initializer, name='kernel')
self.bias = self.add_weight(shape=(self.output_dim,),
initializer=self.bias_initializer, name='bias')
# Although we define p in logit space, we then apply the sigmoid
# operation to get the desired value between 0 and 1.
self.p_logit = self.add_weight(name='p_logit', shape=(1,),
initializer=initializers.RandomUniform(self.init_min,
self.init_max), trainable=True)
# Because of issues with Keras, these functions need to be defined
# here.
def p_logit_regularizer(p_logit):
"""
Calculate the regularization term for p_logit.
Parameters:
p_logit (tf.Tensor): A 1D Tensor containing the p_logit value
for dropout.
Returns:
(tf.Tensor): The tensorflow graph to calculate the
p_logit regularization term.
"""
# Although we define p in logit space, we then apply the sigmoid
# operation to get the desired value between 0 and 1.
p = K.sum(K.sigmoid(p_logit))
regularizer = p * K.log(p)
regularizer += (1.0 - p) * K.log(1.0 - p)
regularizer *= self.dropout_regularizer * input_dim
return regularizer
def kernel_regularizer(kernel):
"""
Calculate the regularization term for concrete dropout.
Parameters:
kernel (tf.Tensor): A 2D Tensor containing the kernel for our
Dense layer computation.
Returns:
(tf.Tensor): The tensorflow graph to calculate the
kernel regularization term.
"""
regularizer = self.kernel_regularizer * K.sum(
K.square(kernel)) / (1.0 - K.sum(K.sigmoid(self.p_logit)))
return regularizer
# This is supposed to change in later versions.
self._handle_weight_regularization('p_logit_regularizer',self.p_logit,
p_logit_regularizer)
self._handle_weight_regularization('kernel_regularizer',self.kernel,
kernel_regularizer)
# Requirement for Keras
self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
self.built = True
def call(self, inputs, training=None):
"""
The function that takes the inputs of the layer and conducts the
Dense layer multiplication with concrete dropout.
Parameters:
inputs (tf.Keras.Layer): The inputs to the Dense layer.
training (bool): A required input for call. Setting training to
true or false does nothing because concrete dropout behaves
the same way in both cases.
Returns:
(tf.Keras.Layer): The output of the Dense layer.
"""
# Small epsilon parameter needed for stable optimization
eps = K.cast_to_floatx(K.epsilon())
# Build the random tensor for dropout from uniform noise. This
# formulation allows for a derivative with respect to p.
unif_noise = K.random_uniform(shape=K.shape(inputs),
seed=self.random_seed)
drop_prob = (K.log(K.sigmoid(self.p_logit)+eps) - K.log(1.0-
K.sigmoid(self.p_logit) + eps) + K.log(unif_noise + eps) -
K.log(1.0 - unif_noise + eps))
drop_prob = K.sigmoid(drop_prob / self.temp)
inputs *= (1.0 - drop_prob)
inputs /= (1.0 - K.sigmoid(self.p_logit))
# Now just carry out the basic operations of a Dense layer.
output = K.dot(inputs, self.kernel)
output = K.bias_add(output, self.bias, data_format='channels_last')
if self.activation is not None:
output = self.activation(output)
return output
def compute_output_shape(self, input_shape):
"""
Compute the shape of the output given the input. Needed for Keras
layer.
Parameters:
input_shape ((int,...)): The shape of the input to our Dense layer.
Returns:
((int,...)): The output shape of the layer.
"""
output_shape = list(input_shape)
output_shape[-1] = self.output_dim
return tuple(output_shape)
def get_config(self):
"""
Return the configuration dictionary required by Keras.
"""
config = {
'output_shape': self.output_shape,
'activation': activations.serialize(self.activation),
'kernel_initializer': initializers.serialize(
self.kernel_initializer),
'bias_initializer': initializers.serialize(
self.bias_initializer),
'kernel_regularizer': self.kernel_regularizer,
'dropout_regularizer': self.dropout_regularizer
}
base_config = super(ConcreteDropout, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class SpatialConcreteDropout(Conv2D):
"""
This class defines a spatial concrete dropout layer that is built around a
Keras Conv2D layer. The dropout is parametrized by a weight that is
optimized along with the model's weights themselves. Heavy inspiration
from code for arxiv.1705.07832.
"""
def __init__(self, filters, kernel_size, strides=(1,1), padding='valid',
activation=None, kernel_regularizer=1e-6, dropout_regularizer=1e-5,
init_min=0.1, init_max=0.1, temp=0.1, random_seed=None, **kwargs):
"""
Initialize the Spatial Concrete dropout Dense layer. This will initialize
the Conv2d layer along with the overhead needed for spatial concrete
dropout.
ParametersL
filters (int): The number of filters to use for the Conv2D layer
kernel_size ((int,int)): The dimensions of the kernel for the
Conv2D layer
strides ((int,int)): The stride to take in each direction for the
Conv2D layer.
padding (str): What type of padding to use to get the desired
output dimensions from the Conv2D layer. Either valid or same
activation (str): The type of activation function to be used. Will
be passed into tensorflow's activation function library.
kernel_regularizer (float): The strength of the concrete dropout
regularization term
dropout_regularizer (float): The strength of the concrete dropout
p regularization term
init_min (float): The minimum initial value of the dropout rate
init_max (float): The maximum initial value of the dropout rate
temp (float): The temperature that defines how close the concrete
distribution will be to true dropout.
random_seed (int): A seed to use in the random function calls. If
None no explicit seed will be used.
Returns:
(keras.Layer): The initialized SpatialConcreteDropout layer. Must
still be built.
Notes:
Technically the regularization terms must be divided by the number
of training examples. This is degenerate with the value of the
regularizers, so we do not specify it here.
The initial dropout rate will be drawn from a uniform distribution
with the bounds passed into init.
"""
super(SpatialConcreteDropout, self).__init__(filters, kernel_size,
strides=strides, padding=padding, activation=activation, **kwargs)
# Need to change name to avoid issues with Conv2D
self.cd_kernel_regularizer = kernel_regularizer
self.dropout_regularizer =dropout_regularizer
self.init_min = np.log(init_min) - np.log(1.0 - init_min)
self.init_max = np.log(init_max) - np.log(1.0 - init_max)
self.temp = temp
self.random_seed = random_seed
def build(self, input_shape=None):
"""
Build the weights and operations that the network will use.
Parameters:
input_shape ((int,...)): The shape of the input to our Conv2D layer.
"""
super(SpatialConcreteDropout, self).build(input_shape)
input_dim = input_shape[3]
# kernel already set by inherited build function.
# Although we define p in logit space, we then apply the sigmoid
# operation to get the desired value between 0 and 1.
self.p_logit = self.add_weight(name='p_logit',shape=(1,),
initializer=initializers.RandomUniform(self.init_min,
self.init_max), trainable=True)
# Because of issues with Keras, these functions need to be defined
# here.
def p_logit_regularizer(p_logit):
"""
Calculate the regularization term for p_logit.
Parameters:
p_logit (tf.Tensor): A 1D Tensor containing the p_logit value
for dropout.
Returns:
(tf.Tensor): The tensorflow graph to calculate the
p_logit regularization term.
"""
# Although we define p in logit space, we then apply the sigmoid
# operation to get the desired value between 0 and 1.
p = K.sum(K.sigmoid(p_logit))
regularizer = p * K.log(p)
regularizer += (1.0 - p) * K.log(1.0 - p)
regularizer *= self.dropout_regularizer * input_dim
return regularizer
def kernel_regularizer(kernel):
"""
Calculate the regularization term for concrete dropout.
Parameters:
kernel (tf.Tensor): A 2D Tensor containing the kernel for our
Dense layer computation.
Returns:
(tf.Tensor): The tensorflow graph to calculate the
kernel regularization term.
"""
regularizer = self.cd_kernel_regularizer * K.sum(
K.square(kernel)) / (1.0 - K.sum(K.sigmoid(self.p_logit)))
return regularizer
# This is supposed to change in later versions.
self._handle_weight_regularization('p_logit_regularizer',self.p_logit,
p_logit_regularizer)
self._handle_weight_regularization('kernel_regularizer',self.kernel,
kernel_regularizer)
self.built = True
def call(self, inputs, training=None):
"""
The function that takes the inputs of the layer and conducts the
Dense layer multiplication with concrete dropout.
Parameters:
inputs (tf.Keras.Layer): The inputs to the Dense layer.
training (bool): A required input for call. Setting training to
true or false does nothing because concrete dropout behaves the
same way in both cases.
Returns:
(tf.Keras.Layer): The output of the Dense layer.
"""
# Small epsilon parameter needed for stable optimization
eps = K.cast_to_floatx(K.epsilon())
# Build the random tensor for dropout from uniform noise. This
# formulation allows for a derivative with respect to p.
input_shape = K.shape(inputs)
noise_shape = (input_shape[0], 1, 1, input_shape[3])
unif_noise = K.random_uniform(shape=noise_shape,
seed=self.random_seed)
drop_prob = (K.log(K.sigmoid(self.p_logit)+eps) -
K.log(1.0-K.sigmoid(self.p_logit)+eps) + K.log(unif_noise + eps)
- K.log(1.0 - unif_noise + eps))
drop_prob = K.sigmoid(drop_prob/self.temp)
inputs *= (1.0 - drop_prob)
inputs /= (1.0 - K.sigmoid(self.p_logit))
# Now just carry out the basic operations of a Dense layer.
return super(SpatialConcreteDropout, self).call(inputs)
def compute_output_shape(self, input_shape):
"""
Compute the shape of the output given the input. Needed for Keras
layer.
Parameters:
input_shape ((int,...)): The shape of the input to our Dense layer.
Returns:
((int,...)): The output shape of the layer.
"""
return super(SpatialConcreteDropout, self).compute_output_shape(
input_shape)
def dropout_alexnet(img_size, num_params, kernel_regularizer=1e-6,
dropout_rate=0.1,random_seed=None):
"""
Build the tensorflow graph for the alexnet BNN.
Parameters:
img_size ((int,int,int)): A tupe with shape (pix,pix,freq) that describes
the size of the input images
num_params (int): The number of lensing parameters to predict
kernel_regularizer (float): The strength of the l2 norm (associated to
the strength of the prior on the weights)
dropout_rate (float): The dropout rate to use for the layers.
random_seed (int): A seed to use in the random function calls. If None
no explicit seed will be used.
Returns:
(tf.Tensor): The model (i.e. the tensorflow graph for the model)
"""
# Initialize model
inputs = Input(shape=img_size)
regularizer = tf.keras.regularizers.l2(kernel_regularizer*(1-dropout_rate))
# Layer 1
# model.add(AlwaysDropout(dropout_rate))
if dropout_rate > 0:
x = AlwaysDropout(dropout_rate)(inputs)
else:
x = inputs
x = Conv2D(filters=64, kernel_size=(5,5), strides=(2,2),
padding='valid', activation='relu', input_shape=img_size,
kernel_regularizer=regularizer)(x)
x = MaxPooling2D(pool_size=(3,3), strides=(2,2), padding='same')(x)
# Layer 2
if dropout_rate > 0:
x = AlwaysDropout(dropout_rate)(x)
x = Conv2D(filters=192, kernel_size=(5,5), strides=(1,1),
padding='same', activation='relu',
kernel_regularizer=regularizer)(x)
x = MaxPooling2D(pool_size=(3,3), strides=(2,2), padding='same')(x)
# Layer 3
if dropout_rate > 0:
x = AlwaysDropout(dropout_rate)(x)
x = Conv2D(filters=384, kernel_size=(3,3), strides=(1,1),
padding='same', activation='relu',
kernel_regularizer=regularizer)(x)
# Layer 4
if dropout_rate > 0:
x = AlwaysDropout(dropout_rate)(x)
x = Conv2D(filters=384, kernel_size=(3,3), strides=(1,1),
padding='same', activation='relu',
kernel_regularizer=regularizer)(x)
# Layer 5
if dropout_rate > 0:
x = AlwaysDropout(dropout_rate)(x)
x = Conv2D(filters=256, kernel_size=(3,3), strides=(1,1),
padding='same', activation='relu',
kernel_regularizer=regularizer)(x)
x = MaxPooling2D(pool_size=(3,3), strides=(2,2), padding='same')(x)
# Pass to fully connected layers
x = Flatten()(x)
# Layer 6
if dropout_rate > 0:
x = AlwaysDropout(dropout_rate)(x)
x = Dense(4096, activation='relu',
kernel_regularizer=regularizer)(x)
# Layer 7
if dropout_rate > 0:
x = AlwaysDropout(dropout_rate)(x)
x = Dense(4096, activation='relu',
kernel_regularizer=regularizer)(x)
# Output
if dropout_rate > 0:
x = AlwaysDropout(dropout_rate)(x)
outputs = Dense(num_params,
kernel_regularizer=regularizer)(x)
# Construct model
model = Model(inputs=inputs, outputs=outputs)
return model
def concrete_alexnet(img_size, num_params, kernel_regularizer=1e-6,
dropout_regularizer=1e-5, init_min=0.1, init_max=0.1,
temp=0.1, random_seed=None):
"""
Build the tensorflow graph for the concrete dropout alexnet BNN.
Parameters:
img_size ((int,int,int)): A tupe with shape (pix,pix,freq) that describes
the size of the input images
num_params (int): The number of lensing parameters to predict
kernel_regularizer (float): The strength of the l2 norm (associated to
the strength of the prior on the weights)
dropout_regularizer (float): The stronger it is, the more concrete
dropout will tend towards larger dropout rates.
init_min (float): The minimum value that the dropout weight p will
be initialized to.
init_max (float): The maximum value that the dropout weight p will
be initialized to.
temp (float): The temperature that defines how close the concrete
distribution will be to true dropout.
random_seed (int): A seed to use in the random function calls. If None
no explicit seed will be used.
Returns:
(tf.Tensor): The model (i.e. the tensorflow graph for the model)
Notes:
While the concrete dropout implementation works, the training of the
dropout terms is very slow. It's possible that modifying the learning
rate schedule may help.
"""
# Initialize model
inputs = Input(shape=img_size)
# Layer 1
# model.add(AlwaysDropout(dropout_rate))
x = SpatialConcreteDropout(filters=64, kernel_size=(5,5), strides=(2,2),
padding='valid', activation='relu', input_shape=img_size,
kernel_regularizer=kernel_regularizer,
dropout_regularizer=dropout_regularizer,
init_min=init_min, init_max=init_max, temp=temp,
random_seed=random_seed)(inputs)
x = MaxPooling2D(pool_size=(3,3), strides=(2,2), padding='same')(x)
# Layer 2
x = SpatialConcreteDropout(filters=192, kernel_size=(5,5), strides=(1,1),
padding='same', activation='relu',
kernel_regularizer=kernel_regularizer,
dropout_regularizer=dropout_regularizer,
init_min=init_min, init_max=init_max, temp=temp,
random_seed=random_seed)(x)
x = MaxPooling2D(pool_size=(3,3), strides=(2,2), padding='same')(x)
# Layer 3
x = SpatialConcreteDropout(filters=384, kernel_size=(3,3), strides=(1,1),
padding='same', activation='relu',
kernel_regularizer=kernel_regularizer,
dropout_regularizer=dropout_regularizer,
init_min=init_min, init_max=init_max, temp=temp,
random_seed=random_seed)(x)
# Layer 4
x = SpatialConcreteDropout(filters=384, kernel_size=(3,3), strides=(1,1),
padding='same', activation='relu',
kernel_regularizer=kernel_regularizer,
dropout_regularizer=dropout_regularizer,
init_min=init_min, init_max=init_max, temp=temp,
random_seed=random_seed)(x)
# Layer 5
x = SpatialConcreteDropout(filters=256, kernel_size=(3,3), strides=(1,1),
padding='same', activation='relu',
kernel_regularizer=kernel_regularizer,
dropout_regularizer=dropout_regularizer,
init_min=init_min, init_max=init_max, temp=temp,
random_seed=random_seed)(x)
x = MaxPooling2D(pool_size=(3,3), strides=(2,2), padding='same')(x)
# Pass to fully connected layers
x = Flatten()(x)
# Layer 6
x = ConcreteDropout(4096, activation='relu',
kernel_regularizer=kernel_regularizer,
dropout_regularizer=dropout_regularizer, init_min=init_min,
init_max=init_max, temp=temp, random_seed=random_seed)(x)
# Layer 7
x = ConcreteDropout(4096, activation='relu',
kernel_regularizer=kernel_regularizer,
dropout_regularizer=dropout_regularizer, init_min=init_min,
init_max=init_max, temp=temp, random_seed=random_seed)(x)
# Output
outputs = ConcreteDropout(num_params,
kernel_regularizer=kernel_regularizer,
dropout_regularizer=dropout_regularizer, init_min=init_min,
init_max=init_max, temp=temp, random_seed=random_seed)(x)
# Construct model
model = Model(inputs=inputs, outputs=outputs)
return model
class LensingLossFunctions:
"""
A class used to generate the loss functions for the three types of bayesian
nn models we have implemented: diagonal covariance, full covariance,
and mixture of full covariances. Currently only two gaussians are allowed
in the mixture.
"""
def __init__(self,flip_pairs,num_params):
"""
Initialize the class with the pairs of parameters that must be flipped.
These are parameters like shear and ellipticity that have been defined
such that negating both parameters gives the same
physical definition of the system.
Parameters:
flip_pairs ([[int,int,...],...]): A list of pairs of numbers to
conduct the flip operation on. If empty no flip pairs will be
used. Note if you also want to consider two sets of parameters
being flipped at the same time, that must be added to this list.
num_params (int): The number of parameters to predict.
"""
self.flip_pairs = flip_pairs
self.num_params = num_params
# Calculate the split list for lower traingular matrix
self.split_list = []
for i in range(1,num_params+1):
self.split_list += [i]
# Now for each flip pair (including no flip) we will add a flip
# matrix to our list.
self.flip_mat_list = [tf.linalg.diag(tf.constant(np.ones(
self.num_params),dtype=tf.float32))]
for flip_pair in self.flip_pairs:
# Initialize a numpy array since this is the easiest way
# to flexibly set the tensor.
const_initializer = np.ones(self.num_params)
const_initializer[flip_pair] = -1
self.flip_mat_list.append(tf.linalg.diag(tf.constant(
const_initializer,dtype=tf.float32)))
def mse_loss(self, y_true, output):
"""
Returns the MSE loss of the predicted parameters. Will ignore parameters
associated with the covariance matrix.
Parameters:
y_true (tf.Tensor): The true values of the parameters
output (tf.Tensor): The predicted values of the lensing parameters.
This assumes the first num_params are
Returns:
(tf.Tensor): The mse loss function.
Notes:
This function should never be used as a loss function. It is useful
as a metric to understand what portion of the reduciton in the loss
function can be attributed to improved parameter accuracy. Also
note that for the gmm models the output will default to the first
Gaussian for this metric.
"""
y_pred, _ = tf.split(output,num_or_size_splits=(self.num_params,-1),
axis=-1)
loss_list = []
for flip_mat in self.flip_mat_list:
loss_list.append(tf.reduce_mean(tf.square(
tf.matmul(y_pred,flip_mat)-y_true),axis=-1))
loss_stack = tf.stack(loss_list,axis=-1)
return tf.reduce_min(loss_stack,axis=-1)
def log_gauss_diag(self,y_true,y_pred,std_pred):
"""
Return the negative log posterior of a Gaussian with diagonal
covariance matrix
Parameters:
y_true (tf.Tensor): The true values of the parameters
y_pred (tf.Tensor): The predicted value of the parameters
std_pred (tf.Tensor): The predicted diagonal entries of the
covariance. Note that std_pred is assumed to be the log of the
covariance matrix values.
Returns:
(tf.Tensor): The TF graph for calculating the nlp
Notes:
This loss does not include the constant factor of 1/(2*pi)^(d/2).
"""
return 0.5*tf.reduce_sum(tf.multiply(tf.square(y_pred-y_true),
tf.exp(-std_pred)),axis=-1) + 0.5*tf.reduce_sum(
std_pred,axis=-1)
def diagonal_covariance_loss(self,y_true,output):
"""
Return the loss function assuming a diagonal covariance matrix
Parameters:
y_true (tf.Tensor): The true values of the lensing parameters
output (tf.Tensor): The predicted values of the lensing parameters.
This should include 2*self.num_params parameters to account for
the diagonal entries of our covariance matrix. Covariance matrix
values are assumed to be in log space.
Returns:
(tf.Tensor): The loss function (i.e. the tensorflow graph for it).
"""
# First split the data into predicted parameters and covariance matrix
# element
y_pred, std_pred = tf.split(output,num_or_size_splits=2,axis=-1)
# Add each possible flip to the loss list. We will then take the
# minimum.
loss_list = []
for flip_mat in self.flip_mat_list:
loss_list.append(self.log_gauss_diag(y_true,
tf.matmul(y_pred,flip_mat),std_pred))
loss_stack = tf.stack(loss_list,axis=-1)
return tf.reduce_min(loss_stack,axis=-1)
def construct_precision_matrix(self,L_mat_elements):
"""
Take the matrix elements for the log cholesky decomposition and
convert them to the precision matrix. Also return the value of
the diagonal elements before exponentiation, since we get that for
free.
Parameters:
L_mat_elements (tf.Tensor): A tensor of length
num_params*(num_params+1)/2 that define the lower traingular
matrix elements of the log cholesky decomposition
Returns:
((tf.Tensor,tf.Tensor)): Both the precision matrix and the diagonal
elements (before exponentiation) of the log cholesky L matrix.
Note that this second value is important for the posterior
calculation.
"""
# First split the tensor into the elements that will populate each row
cov_elements_split = tf.split(L_mat_elements,
num_or_size_splits=self.split_list,axis=-1)
# Before we stack these elements, we have to pad them with zeros
# (corresponding to the 0s of the lower traingular matrix).
cov_elements_stack = []
pad_offset = 1
for cov_element in cov_elements_split:
# Use tf pad function since it's likely the fastest option.
pad = tf.constant([[0,0],[0,self.num_params-pad_offset]])
cov_elements_stack.append(tf.pad(cov_element,pad))
pad_offset+=1
# Stack the tensors to form our matrix. Use axis=-2 to avoid issues
# with batches of matrices being passed in.
L_mat = tf.stack(cov_elements_stack,axis=-2)
# Pull out the diagonal part, and then (since we're using log
# cholesky) exponentiate the diagonal.
L_mat_diag = tf.linalg.diag_part(L_mat)
L_mat = tf.linalg.set_diag(L_mat,tf.exp(L_mat_diag))
# Calculate the actual precision matrix
prec_mat = tf.matmul(L_mat,tf.transpose(L_mat,perm=[0,2,1]))
return prec_mat, L_mat_diag, L_mat
def log_gauss_full(self,y_true,y_pred,prec_mat,L_diag):
"""
Return the negative log posterior of a Gaussian with full
covariance matrix
Parameters:
y_true (tf.Tensor): The true values of the parameters
y_pred (tf.Tensor): The predicted value of the parameters
prec_mat: The precision matrix
L_diag (tf.Tensor): The diagonal (non exponentiated) values of the
log cholesky decomposition of the precision matrix
Returns:
(tf.Tensor): The TF graph for calculating the nlp
Notes:
This loss does not include the constant factor of 1/(2*pi)^(d/2).
"""
y_dif = y_true - y_pred
return -tf.reduce_sum(L_diag,-1) + 0.5 * tf.reduce_sum(
tf.multiply(y_dif,tf.reduce_sum(tf.multiply(tf.expand_dims(
y_dif,-1),prec_mat),axis=-2)),-1)
def full_covariance_loss(self,y_true,output):
"""
Return the loss function assuming a full covariance matrix
Parameters:
y_true (tf.Tensor): The true values of the lensing parameters
output (tf.Tensor): The predicted values of the lensing parameters.
This should include self.num_params parameters for the prediction
and self.num_params*(self.num_params+1)/2 parameters for the
lower triangular log cholesky decomposition
Returns:
(tf.Tensor): The loss function (i.e. the tensorflow graph for it).
"""
# Start by dividing the output into the L_elements and the prediction
# values.
L_elements_len = int(self.num_params*(self.num_params+1)/2)
y_pred, L_mat_elements = tf.split(output,
num_or_size_splits=[self.num_params,L_elements_len],axis=-1)
# Build the precision matrix and extract the diagonal part
prec_mat, L_diag, _ = self.construct_precision_matrix(L_mat_elements)
# Add each possible flip to the loss list. We will then take the
# minimum.
loss_list = []
for flip_mat in self.flip_mat_list:
loss_list.append(self.log_gauss_full(y_true,
tf.matmul(y_pred,flip_mat),prec_mat,L_diag))
loss_stack = tf.stack(loss_list,axis=-1)
return tf.reduce_min(loss_stack,axis=-1)
def log_gauss_gm_full(self,y_true,y_preds,prec_mats,L_diags,pis):
"""
Return the negative log posterior of a GMM with full
covariance matrix for each GM. Note this code allows for any number
of GMMs.
Parameters:
y_true (tf.Tensor): The true values of the parameters
y_preds ([tf.Tensor,...]): A list of the predicted value of the
parameters
prec_mats ([tf.Tensor,...]): A list of the precision matrices
L_diags ([tf.Tensor,...]): A list of the diagonal (non exponentiated)
values of the log cholesky decomposition of the precision
matrices
Returns:
(tf.Tensor): The TF graph for calculating the nlp
Notes:
This loss does not include the constant factors of 1/(2*pi)^(d/2).
"""
# Stack together the loss to be able to do the logsumexp trick
loss_list = []
for p_i in range(len(y_preds)):
# Since we're summing the probabilities using a logsumexp,
# we don't want the negative here. Also note that we add an
# epsilon to our log operation to avoid nan gradients.
loss_list.append(-self.log_gauss_full(y_true,y_preds[p_i],
prec_mats[p_i],L_diags[p_i])+tf.squeeze(tf.math.log(
pis[p_i]+K.epsilon()),axis=-1))
# Use tf implementation of logsumexp
return -tf.reduce_logsumexp(tf.stack(loss_list,axis=-1),axis=-1)
def gm_full_covariance_loss(self,y_true,output):
"""
Return the loss function assuming a mixture of two gaussians each with
a full covariance matrix
Parameters:
y_true (tf.Tensor): The true values of the lensing parameters
output (tf.Tensor): The predicted values of the lensing parameters.
This should include 2 gm which consists of self.num_params
parameters for the prediction and
self.num_params*(self.num_params+1)/2 parameters for the
lower triangular log cholesky decomposition of each gm.
It should also include one final parameter for the ratio
between the two gms.
Returns:
(tf.Tensor): The loss function (i.e. the tensorflow graph for it).
"""
# Start by seperating out the predictions for each gaussian model.
L_elements_len = int(self.num_params*(self.num_params+1)/2)
y_pred1, L_mat_elements1, y_pred2, L_mat_elements2, pi_logit = tf.split(
output,num_or_size_splits=[self.num_params,L_elements_len,
self.num_params,L_elements_len,1],axis=-1)
# Set the probability between 0.5 and 1.0. In this parameterization the
# first Gaussian is always favored.
pi = 0.5+tf.sigmoid(pi_logit)/2.0
# Now build the precision matrix for our two models and extract the
# diagonal components used for the loss calculation
prec_mat1, L_diag1, _ = self.construct_precision_matrix(L_mat_elements1)
prec_mat2, L_diag2, _ = self.construct_precision_matrix(L_mat_elements2)
# Add each possible flip to the loss list. We will then take the
# minimum.
loss_list = []
prec_mats = [prec_mat1,prec_mat2]
L_diags = [L_diag1,L_diag2]
pis = [pi,1-pi]
for flip_mat1 in self.flip_mat_list:
for flip_mat2 in self.flip_mat_list:
# The y_preds depends on the selected flips
y_preds = [tf.matmul(y_pred1,flip_mat1),
tf.matmul(y_pred2,flip_mat2)]
loss_list.append(self.log_gauss_gm_full(y_true,y_preds,
prec_mats,L_diags,pis))
loss_stack = tf.stack(loss_list,axis=-1)
return tf.reduce_min(loss_stack,axis=-1)
def p_value(model):
"""
Returns the average value of the dropout in each concrete layer.
Parameters:
model (keras.Model): A Keras model from with the dropout values will be
extracted.
Notes:
This is a hack that allows us to easily keep track of the dropout value
during training.
"""
def p_fake_loss(y_true,y_pred):
# We won't be using either y_true or y_pred
loss = []
for layer in model.layers:
if 'dropout' in layer.name:
loss.append(tf.sigmoid(layer.weights[2]))
return tf.reduce_mean(loss)
return p_fake_loss
|
# -*- coding: utf-8 -*-
"""
Build the TensorFlow model and loss functions
This module contains the functions needed to build the BNN model used in
ovejero as well as the loss functions for the different posteriors.
See the script model_trainer.py for examples of how to use these functions.
"""
import tensorflow as tf
import numpy as np
from tensorflow.keras import initializers, activations
import tensorflow.keras.backend as K
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Flatten, Conv2D, MaxPooling2D, Input, Dense
from tensorflow.keras.layers import Layer, InputSpec
class AlwaysDropout(Layer):
"""
This class applies dropout to an input both during training and inference.
This is consistent with the BNN methodology.
"""
def __init__(self, dropout_rate, **kwargs):
"""
Initialize the AlwaysDropout layer.
Parameters:
dropout_rate (float): A number in the range [0,1) that will serve
as the dropout rate for the layer. A larger rate means more
dropout.
"""
super(AlwaysDropout, self).__init__(**kwargs)
# Check for a bad dropout input
if dropout_rate >= 1.0 or dropout_rate < 0.0:
raise ValueError('dropout rate of %f not between 0 and 1' % (
dropout_rate))
# Save the dropout rate for later.
self.dropout_rate = dropout_rate
def call(self, inputs, training=None):
"""
The function that takes the inputs (likely outputs of a previous layer)
and conducts dropout.
Parameters:
inputs (tf.Keras.Layer): The inputs to the Dense layer.
training (bool): A required input for call. Setting training to
true or false does nothing because always dropout behaves the
same way in both cases.
Returns:
(tf.Keras.Layer): The output of the Dense layer.
"""
return tf.nn.dropout(inputs, self.dropout_rate)
def get_config(self):
"""
Return the configuration dictionary required by Keras.
"""
config = {'dropout_rate': self.dropout_rate}
base_config = super(AlwaysDropout, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def compute_output_shape(self, input_shape):
"""
Compute the shape of the output given the input. Needed for Keras
layer.
Parameters:
input_shape ((int,...)): The shape of the input to our Dense layer.
Returns:
((int,...)): The output shape of the layer.
"""
return input_shape
def cd_regularizer(p, kernel, kernel_regularizer, dropout_regularizer,
input_dim):
"""
Calculate the regularization term for concrete dropout.
Parameters:
p (tf.Tensor): A 1D Tensor containing the p value for dropout (between
0 and 1).
kernel (tf.Tensor): A 2D Tensor defining the weights of the Dense
layer
kernel_initializer (float): The relative strength of kernel
regularization term.
dropout_regularizer (float): The relative strength of the dropout
regularization term.
input_dim (int): The dimension of the input to the layer.
Returns:
(tf.Tensor): The tensorflow graph to calculate the regularization term.
Notes:
This is currently not being used because of issues with the Keras
framework. Once it updates this will be employed instead of dividing
the loss into two parts.
"""
regularizer = p * K.log(p)
regularizer += (1.0 - p) + K.log(1.0 - p)
regularizer *= dropout_regularizer * input_dim
regularizer += kernel_regularizer * K.sum(K.square(kernel)) / (1.0 - p)
return regularizer
class ConcreteDropout(Layer):
"""
This class defines a concrete dropout layer that is built around a
Keras Dense layer. The dropout is parametrized by a weight that is
optimized along with the model's weights themselves. Heavy inspiration
from code for arxiv.1705.07832.
"""
def __init__(self, output_dim, activation=None,
kernel_initializer='glorot_uniform', bias_initializer='zeros',
kernel_regularizer=1e-6, dropout_regularizer=1e-5, init_min=0.1,
init_max=0.1, temp=0.1, random_seed=None, **kwargs):
"""
Initialize the Concrete dropout Dense layer. This will initialize the
dense layer along with the overhead needed for concrete dropout.
Parameters:
output_dim (int): The number of output parameters
activation (str): The type of activation function to be used. Will
be passed into tensorflow's activation function library.
kernel_initializer (str): The type of initializer to use for the
kernel. Will be passed to tensorflow's initializer library
bias_initializer (str): The type of initializer to use for the
bias. Will be passed to tensorflow's initializer library
kernel_regularizer (float): The strength of the concrete dropout
regularization term
dropout_regularizer (float): The strength of the concrete dropout
p regularization term
init_min (float): The minimum initial value of the dropout rate
init_max (float): The maximum initial value of the dropout rate
temp (float): The temperature that defines how close the concrete
distribution will be to true dropout.
random_seed (int): A seed to use in the random function calls. If
None no explicit seed will be used.
Returns:
(keras.Layer): The initialized ConcreteDropout layer. Must still be
built.
Notes:
Technically the regularization terms must be divided by the number
of training examples. This is degenerate with the value of the
regularizers, so we do not specify it here.
The initial dropout rate will be drawn from a uniform distribution
with the bounds passed into init.
"""
# We do this because Keras does this
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
# First initialize the properties required by the Dense class
super(ConcreteDropout, self).__init__(**kwargs)
# Save everything important to self
self.output_dim = output_dim
self.activation = activations.get(activation)
self.kernel_initializer = initializers.get(
kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = kernel_regularizer
self.dropout_regularizer = dropout_regularizer
# Convert to logit space (since we want to parameterize our weights
# such that any value outputted by the network is valid).
self.init_min = np.log(init_min) - np.log(1.0 - init_min)
self.init_max = np.log(init_max) - np.log(1.0 - init_max)
self.temp = temp
self.random_seed = random_seed
def build(self, input_shape=None):
"""
Build the weights and operations that the network will use.
Parameters:
input_shape ((int,...)): The shape of the input to our Dense layer.
"""
assert len(input_shape) >= 2
input_dim = input_shape[-1]
self.kernel = self.add_weight(shape=(input_dim, self.output_dim),
initializer=self.kernel_initializer, name='kernel')
self.bias = self.add_weight(shape=(self.output_dim,),
initializer=self.bias_initializer, name='bias')
# Although we define p in logit space, we then apply the sigmoid
# operation to get the desired value between 0 and 1.
self.p_logit = self.add_weight(name='p_logit', shape=(1,),
initializer=initializers.RandomUniform(self.init_min,
self.init_max), trainable=True)
# Because of issues with Keras, these functions need to be defined
# here.
def p_logit_regularizer(p_logit):
"""
Calculate the regularization term for p_logit.
Parameters:
p_logit (tf.Tensor): A 1D Tensor containing the p_logit value
for dropout.
Returns:
(tf.Tensor): The tensorflow graph to calculate the
p_logit regularization term.
"""
# Although we define p in logit space, we then apply the sigmoid
# operation to get the desired value between 0 and 1.
p = K.sum(K.sigmoid(p_logit))
regularizer = p * K.log(p)
regularizer += (1.0 - p) * K.log(1.0 - p)
regularizer *= self.dropout_regularizer * input_dim
return regularizer
def kernel_regularizer(kernel):
"""
Calculate the regularization term for concrete dropout.
Parameters:
kernel (tf.Tensor): A 2D Tensor containing the kernel for our
Dense layer computation.
Returns:
(tf.Tensor): The tensorflow graph to calculate the
kernel regularization term.
"""
regularizer = self.kernel_regularizer * K.sum(
K.square(kernel)) / (1.0 - K.sum(K.sigmoid(self.p_logit)))
return regularizer
# This is supposed to change in later versions.
self._handle_weight_regularization('p_logit_regularizer',self.p_logit,
p_logit_regularizer)
self._handle_weight_regularization('kernel_regularizer',self.kernel,
kernel_regularizer)
# Requirement for Keras
self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
self.built = True
def call(self, inputs, training=None):
"""
The function that takes the inputs of the layer and conducts the
Dense layer multiplication with concrete dropout.
Parameters:
inputs (tf.Keras.Layer): The inputs to the Dense layer.
training (bool): A required input for call. Setting training to
true or false does nothing because concrete dropout behaves
the same way in both cases.
Returns:
(tf.Keras.Layer): The output of the Dense layer.
"""
# Small epsilon parameter needed for stable optimization
eps = K.cast_to_floatx(K.epsilon())
# Build the random tensor for dropout from uniform noise. This
# formulation allows for a derivative with respect to p.
unif_noise = K.random_uniform(shape=K.shape(inputs),
seed=self.random_seed)
drop_prob = (K.log(K.sigmoid(self.p_logit)+eps) - K.log(1.0-
K.sigmoid(self.p_logit) + eps) + K.log(unif_noise + eps) -
K.log(1.0 - unif_noise + eps))
drop_prob = K.sigmoid(drop_prob / self.temp)
inputs *= (1.0 - drop_prob)
inputs /= (1.0 - K.sigmoid(self.p_logit))
# Now just carry out the basic operations of a Dense layer.
output = K.dot(inputs, self.kernel)
output = K.bias_add(output, self.bias, data_format='channels_last')
if self.activation is not None:
output = self.activation(output)
return output
def compute_output_shape(self, input_shape):
"""
Compute the shape of the output given the input. Needed for Keras
layer.
Parameters:
input_shape ((int,...)): The shape of the input to our Dense layer.
Returns:
((int,...)): The output shape of the layer.
"""
output_shape = list(input_shape)
output_shape[-1] = self.output_dim
return tuple(output_shape)
def get_config(self):
"""
Return the configuration dictionary required by Keras.
"""
config = {
'output_shape': self.output_shape,
'activation': activations.serialize(self.activation),
'kernel_initializer': initializers.serialize(
self.kernel_initializer),
'bias_initializer': initializers.serialize(
self.bias_initializer),
'kernel_regularizer': self.kernel_regularizer,
'dropout_regularizer': self.dropout_regularizer
}
base_config = super(ConcreteDropout, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class SpatialConcreteDropout(Conv2D):
"""
This class defines a spatial concrete dropout layer that is built around a
Keras Conv2D layer. The dropout is parametrized by a weight that is
optimized along with the model's weights themselves. Heavy inspiration
from code for arxiv.1705.07832.
"""
def __init__(self, filters, kernel_size, strides=(1,1), padding='valid',
activation=None, kernel_regularizer=1e-6, dropout_regularizer=1e-5,
init_min=0.1, init_max=0.1, temp=0.1, random_seed=None, **kwargs):
"""
Initialize the Spatial Concrete dropout Dense layer. This will initialize
the Conv2d layer along with the overhead needed for spatial concrete
dropout.
ParametersL
filters (int): The number of filters to use for the Conv2D layer
kernel_size ((int,int)): The dimensions of the kernel for the
Conv2D layer
strides ((int,int)): The stride to take in each direction for the
Conv2D layer.
padding (str): What type of padding to use to get the desired
output dimensions from the Conv2D layer. Either valid or same
activation (str): The type of activation function to be used. Will
be passed into tensorflow's activation function library.
kernel_regularizer (float): The strength of the concrete dropout
regularization term
dropout_regularizer (float): The strength of the concrete dropout
p regularization term
init_min (float): The minimum initial value of the dropout rate
init_max (float): The maximum initial value of the dropout rate
temp (float): The temperature that defines how close the concrete
distribution will be to true dropout.
random_seed (int): A seed to use in the random function calls. If
None no explicit seed will be used.
Returns:
(keras.Layer): The initialized SpatialConcreteDropout layer. Must
still be built.
Notes:
Technically the regularization terms must be divided by the number
of training examples. This is degenerate with the value of the
regularizers, so we do not specify it here.
The initial dropout rate will be drawn from a uniform distribution
with the bounds passed into init.
"""
super(SpatialConcreteDropout, self).__init__(filters, kernel_size,
strides=strides, padding=padding, activation=activation, **kwargs)
# Need to change name to avoid issues with Conv2D
self.cd_kernel_regularizer = kernel_regularizer
self.dropout_regularizer =dropout_regularizer
self.init_min = np.log(init_min) - np.log(1.0 - init_min)
self.init_max = np.log(init_max) - np.log(1.0 - init_max)
self.temp = temp
self.random_seed = random_seed
def build(self, input_shape=None):
"""
Build the weights and operations that the network will use.
Parameters:
input_shape ((int,...)): The shape of the input to our Conv2D layer.
"""
super(SpatialConcreteDropout, self).build(input_shape)
input_dim = input_shape[3]
# kernel already set by inherited build function.
# Although we define p in logit space, we then apply the sigmoid
# operation to get the desired value between 0 and 1.
self.p_logit = self.add_weight(name='p_logit',shape=(1,),
initializer=initializers.RandomUniform(self.init_min,
self.init_max), trainable=True)
# Because of issues with Keras, these functions need to be defined
# here.
def p_logit_regularizer(p_logit):
"""
Calculate the regularization term for p_logit.
Parameters:
p_logit (tf.Tensor): A 1D Tensor containing the p_logit value
for dropout.
Returns:
(tf.Tensor): The tensorflow graph to calculate the
p_logit regularization term.
"""
# Although we define p in logit space, we then apply the sigmoid
# operation to get the desired value between 0 and 1.
p = K.sum(K.sigmoid(p_logit))
regularizer = p * K.log(p)
regularizer += (1.0 - p) * K.log(1.0 - p)
regularizer *= self.dropout_regularizer * input_dim
return regularizer
def kernel_regularizer(kernel):
"""
Calculate the regularization term for concrete dropout.
Parameters:
kernel (tf.Tensor): A 2D Tensor containing the kernel for our
Dense layer computation.
Returns:
(tf.Tensor): The tensorflow graph to calculate the
kernel regularization term.
"""
regularizer = self.cd_kernel_regularizer * K.sum(
K.square(kernel)) / (1.0 - K.sum(K.sigmoid(self.p_logit)))
return regularizer
# This is supposed to change in later versions.
self._handle_weight_regularization('p_logit_regularizer',self.p_logit,
p_logit_regularizer)
self._handle_weight_regularization('kernel_regularizer',self.kernel,
kernel_regularizer)
self.built = True
def call(self, inputs, training=None):
"""
The function that takes the inputs of the layer and conducts the
Dense layer multiplication with concrete dropout.
Parameters:
inputs (tf.Keras.Layer): The inputs to the Dense layer.
training (bool): A required input for call. Setting training to
true or false does nothing because concrete dropout behaves the
same way in both cases.
Returns:
(tf.Keras.Layer): The output of the Dense layer.
"""
# Small epsilon parameter needed for stable optimization
eps = K.cast_to_floatx(K.epsilon())
# Build the random tensor for dropout from uniform noise. This
# formulation allows for a derivative with respect to p.
input_shape = K.shape(inputs)
noise_shape = (input_shape[0], 1, 1, input_shape[3])
unif_noise = K.random_uniform(shape=noise_shape,
seed=self.random_seed)
drop_prob = (K.log(K.sigmoid(self.p_logit)+eps) -
K.log(1.0-K.sigmoid(self.p_logit)+eps) + K.log(unif_noise + eps)
- K.log(1.0 - unif_noise + eps))
drop_prob = K.sigmoid(drop_prob/self.temp)
inputs *= (1.0 - drop_prob)
inputs /= (1.0 - K.sigmoid(self.p_logit))
# Now just carry out the basic operations of a Dense layer.
return super(SpatialConcreteDropout, self).call(inputs)
def compute_output_shape(self, input_shape):
"""
Compute the shape of the output given the input. Needed for Keras
layer.
Parameters:
input_shape ((int,...)): The shape of the input to our Dense layer.
Returns:
((int,...)): The output shape of the layer.
"""
return super(SpatialConcreteDropout, self).compute_output_shape(
input_shape)
def dropout_alexnet(img_size, num_params, kernel_regularizer=1e-6,
dropout_rate=0.1,random_seed=None):
"""
Build the tensorflow graph for the alexnet BNN.
Parameters:
img_size ((int,int,int)): A tupe with shape (pix,pix,freq) that describes
the size of the input images
num_params (int): The number of lensing parameters to predict
kernel_regularizer (float): The strength of the l2 norm (associated to
the strength of the prior on the weights)
dropout_rate (float): The dropout rate to use for the layers.
random_seed (int): A seed to use in the random function calls. If None
no explicit seed will be used.
Returns:
(tf.Tensor): The model (i.e. the tensorflow graph for the model)
"""
# Initialize model
inputs = Input(shape=img_size)
regularizer = tf.keras.regularizers.l2(kernel_regularizer*(1-dropout_rate))
# Layer 1
# model.add(AlwaysDropout(dropout_rate))
if dropout_rate > 0:
x = AlwaysDropout(dropout_rate)(inputs)
else:
x = inputs
x = Conv2D(filters=64, kernel_size=(5,5), strides=(2,2),
padding='valid', activation='relu', input_shape=img_size,
kernel_regularizer=regularizer)(x)
x = MaxPooling2D(pool_size=(3,3), strides=(2,2), padding='same')(x)
# Layer 2
if dropout_rate > 0:
x = AlwaysDropout(dropout_rate)(x)
x = Conv2D(filters=192, kernel_size=(5,5), strides=(1,1),
padding='same', activation='relu',
kernel_regularizer=regularizer)(x)
x = MaxPooling2D(pool_size=(3,3), strides=(2,2), padding='same')(x)
# Layer 3
if dropout_rate > 0:
x = AlwaysDropout(dropout_rate)(x)
x = Conv2D(filters=384, kernel_size=(3,3), strides=(1,1),
padding='same', activation='relu',
kernel_regularizer=regularizer)(x)
# Layer 4
if dropout_rate > 0:
x = AlwaysDropout(dropout_rate)(x)
x = Conv2D(filters=384, kernel_size=(3,3), strides=(1,1),
padding='same', activation='relu',
kernel_regularizer=regularizer)(x)
# Layer 5
if dropout_rate > 0:
x = AlwaysDropout(dropout_rate)(x)
x = Conv2D(filters=256, kernel_size=(3,3), strides=(1,1),
padding='same', activation='relu',
kernel_regularizer=regularizer)(x)
x = MaxPooling2D(pool_size=(3,3), strides=(2,2), padding='same')(x)
# Pass to fully connected layers
x = Flatten()(x)
# Layer 6
if dropout_rate > 0:
x = AlwaysDropout(dropout_rate)(x)
x = Dense(4096, activation='relu',
kernel_regularizer=regularizer)(x)
# Layer 7
if dropout_rate > 0:
x = AlwaysDropout(dropout_rate)(x)
x = Dense(4096, activation='relu',
kernel_regularizer=regularizer)(x)
# Output
if dropout_rate > 0:
x = AlwaysDropout(dropout_rate)(x)
outputs = Dense(num_params,
kernel_regularizer=regularizer)(x)
# Construct model
model = Model(inputs=inputs, outputs=outputs)
return model
def concrete_alexnet(img_size, num_params, kernel_regularizer=1e-6,
dropout_regularizer=1e-5, init_min=0.1, init_max=0.1,
temp=0.1, random_seed=None):
"""
Build the tensorflow graph for the concrete dropout alexnet BNN.
Parameters:
img_size ((int,int,int)): A tupe with shape (pix,pix,freq) that describes
the size of the input images
num_params (int): The number of lensing parameters to predict
kernel_regularizer (float): The strength of the l2 norm (associated to
the strength of the prior on the weights)
dropout_regularizer (float): The stronger it is, the more concrete
dropout will tend towards larger dropout rates.
init_min (float): The minimum value that the dropout weight p will
be initialized to.
init_max (float): The maximum value that the dropout weight p will
be initialized to.
temp (float): The temperature that defines how close the concrete
distribution will be to true dropout.
random_seed (int): A seed to use in the random function calls. If None
no explicit seed will be used.
Returns:
(tf.Tensor): The model (i.e. the tensorflow graph for the model)
Notes:
While the concrete dropout implementation works, the training of the
dropout terms is very slow. It's possible that modifying the learning
rate schedule may help.
"""
# Initialize model
inputs = Input(shape=img_size)
# Layer 1
# model.add(AlwaysDropout(dropout_rate))
x = SpatialConcreteDropout(filters=64, kernel_size=(5,5), strides=(2,2),
padding='valid', activation='relu', input_shape=img_size,
kernel_regularizer=kernel_regularizer,
dropout_regularizer=dropout_regularizer,
init_min=init_min, init_max=init_max, temp=temp,
random_seed=random_seed)(inputs)
x = MaxPooling2D(pool_size=(3,3), strides=(2,2), padding='same')(x)
# Layer 2
x = SpatialConcreteDropout(filters=192, kernel_size=(5,5), strides=(1,1),
padding='same', activation='relu',
kernel_regularizer=kernel_regularizer,
dropout_regularizer=dropout_regularizer,
init_min=init_min, init_max=init_max, temp=temp,
random_seed=random_seed)(x)
x = MaxPooling2D(pool_size=(3,3), strides=(2,2), padding='same')(x)
# Layer 3
x = SpatialConcreteDropout(filters=384, kernel_size=(3,3), strides=(1,1),
padding='same', activation='relu',
kernel_regularizer=kernel_regularizer,
dropout_regularizer=dropout_regularizer,
init_min=init_min, init_max=init_max, temp=temp,
random_seed=random_seed)(x)
# Layer 4
x = SpatialConcreteDropout(filters=384, kernel_size=(3,3), strides=(1,1),
padding='same', activation='relu',
kernel_regularizer=kernel_regularizer,
dropout_regularizer=dropout_regularizer,
init_min=init_min, init_max=init_max, temp=temp,
random_seed=random_seed)(x)
# Layer 5
x = SpatialConcreteDropout(filters=256, kernel_size=(3,3), strides=(1,1),
padding='same', activation='relu',
kernel_regularizer=kernel_regularizer,
dropout_regularizer=dropout_regularizer,
init_min=init_min, init_max=init_max, temp=temp,
random_seed=random_seed)(x)
x = MaxPooling2D(pool_size=(3,3), strides=(2,2), padding='same')(x)
# Pass to fully connected layers
x = Flatten()(x)
# Layer 6
x = ConcreteDropout(4096, activation='relu',
kernel_regularizer=kernel_regularizer,
dropout_regularizer=dropout_regularizer, init_min=init_min,
init_max=init_max, temp=temp, random_seed=random_seed)(x)
# Layer 7
x = ConcreteDropout(4096, activation='relu',
kernel_regularizer=kernel_regularizer,
dropout_regularizer=dropout_regularizer, init_min=init_min,
init_max=init_max, temp=temp, random_seed=random_seed)(x)
# Output
outputs = ConcreteDropout(num_params,
kernel_regularizer=kernel_regularizer,
dropout_regularizer=dropout_regularizer, init_min=init_min,
init_max=init_max, temp=temp, random_seed=random_seed)(x)
# Construct model
model = Model(inputs=inputs, outputs=outputs)
return model
class LensingLossFunctions:
"""
A class used to generate the loss functions for the three types of bayesian
nn models we have implemented: diagonal covariance, full covariance,
and mixture of full covariances. Currently only two gaussians are allowed
in the mixture.
"""
def __init__(self,flip_pairs,num_params):
"""
Initialize the class with the pairs of parameters that must be flipped.
These are parameters like shear and ellipticity that have been defined
such that negating both parameters gives the same
physical definition of the system.
Parameters:
flip_pairs ([[int,int,...],...]): A list of pairs of numbers to
conduct the flip operation on. If empty no flip pairs will be
used. Note if you also want to consider two sets of parameters
being flipped at the same time, that must be added to this list.
num_params (int): The number of parameters to predict.
"""
self.flip_pairs = flip_pairs
self.num_params = num_params
# Calculate the split list for lower traingular matrix
self.split_list = []
for i in range(1,num_params+1):
self.split_list += [i]
# Now for each flip pair (including no flip) we will add a flip
# matrix to our list.
self.flip_mat_list = [tf.linalg.diag(tf.constant(np.ones(
self.num_params),dtype=tf.float32))]
for flip_pair in self.flip_pairs:
# Initialize a numpy array since this is the easiest way
# to flexibly set the tensor.
const_initializer = np.ones(self.num_params)
const_initializer[flip_pair] = -1
self.flip_mat_list.append(tf.linalg.diag(tf.constant(
const_initializer,dtype=tf.float32)))
def mse_loss(self, y_true, output):
"""
Returns the MSE loss of the predicted parameters. Will ignore parameters
associated with the covariance matrix.
Parameters:
y_true (tf.Tensor): The true values of the parameters
output (tf.Tensor): The predicted values of the lensing parameters.
This assumes the first num_params are
Returns:
(tf.Tensor): The mse loss function.
Notes:
This function should never be used as a loss function. It is useful
as a metric to understand what portion of the reduciton in the loss
function can be attributed to improved parameter accuracy. Also
note that for the gmm models the output will default to the first
Gaussian for this metric.
"""
y_pred, _ = tf.split(output,num_or_size_splits=(self.num_params,-1),
axis=-1)
loss_list = []
for flip_mat in self.flip_mat_list:
loss_list.append(tf.reduce_mean(tf.square(
tf.matmul(y_pred,flip_mat)-y_true),axis=-1))
loss_stack = tf.stack(loss_list,axis=-1)
return tf.reduce_min(loss_stack,axis=-1)
def log_gauss_diag(self,y_true,y_pred,std_pred):
"""
Return the negative log posterior of a Gaussian with diagonal
covariance matrix
Parameters:
y_true (tf.Tensor): The true values of the parameters
y_pred (tf.Tensor): The predicted value of the parameters
std_pred (tf.Tensor): The predicted diagonal entries of the
covariance. Note that std_pred is assumed to be the log of the
covariance matrix values.
Returns:
(tf.Tensor): The TF graph for calculating the nlp
Notes:
This loss does not include the constant factor of 1/(2*pi)^(d/2).
"""
return 0.5*tf.reduce_sum(tf.multiply(tf.square(y_pred-y_true),
tf.exp(-std_pred)),axis=-1) + 0.5*tf.reduce_sum(
std_pred,axis=-1)
def diagonal_covariance_loss(self,y_true,output):
"""
Return the loss function assuming a diagonal covariance matrix
Parameters:
y_true (tf.Tensor): The true values of the lensing parameters
output (tf.Tensor): The predicted values of the lensing parameters.
This should include 2*self.num_params parameters to account for
the diagonal entries of our covariance matrix. Covariance matrix
values are assumed to be in log space.
Returns:
(tf.Tensor): The loss function (i.e. the tensorflow graph for it).
"""
# First split the data into predicted parameters and covariance matrix
# element
y_pred, std_pred = tf.split(output,num_or_size_splits=2,axis=-1)
# Add each possible flip to the loss list. We will then take the
# minimum.
loss_list = []
for flip_mat in self.flip_mat_list:
loss_list.append(self.log_gauss_diag(y_true,
tf.matmul(y_pred,flip_mat),std_pred))
loss_stack = tf.stack(loss_list,axis=-1)
return tf.reduce_min(loss_stack,axis=-1)
def construct_precision_matrix(self,L_mat_elements):
"""
Take the matrix elements for the log cholesky decomposition and
convert them to the precision matrix. Also return the value of
the diagonal elements before exponentiation, since we get that for
free.
Parameters:
L_mat_elements (tf.Tensor): A tensor of length
num_params*(num_params+1)/2 that define the lower traingular
matrix elements of the log cholesky decomposition
Returns:
((tf.Tensor,tf.Tensor)): Both the precision matrix and the diagonal
elements (before exponentiation) of the log cholesky L matrix.
Note that this second value is important for the posterior
calculation.
"""
# First split the tensor into the elements that will populate each row
cov_elements_split = tf.split(L_mat_elements,
num_or_size_splits=self.split_list,axis=-1)
# Before we stack these elements, we have to pad them with zeros
# (corresponding to the 0s of the lower traingular matrix).
cov_elements_stack = []
pad_offset = 1
for cov_element in cov_elements_split:
# Use tf pad function since it's likely the fastest option.
pad = tf.constant([[0,0],[0,self.num_params-pad_offset]])
cov_elements_stack.append(tf.pad(cov_element,pad))
pad_offset+=1
# Stack the tensors to form our matrix. Use axis=-2 to avoid issues
# with batches of matrices being passed in.
L_mat = tf.stack(cov_elements_stack,axis=-2)
# Pull out the diagonal part, and then (since we're using log
# cholesky) exponentiate the diagonal.
L_mat_diag = tf.linalg.diag_part(L_mat)
L_mat = tf.linalg.set_diag(L_mat,tf.exp(L_mat_diag))
# Calculate the actual precision matrix
prec_mat = tf.matmul(L_mat,tf.transpose(L_mat,perm=[0,2,1]))
return prec_mat, L_mat_diag, L_mat
def log_gauss_full(self,y_true,y_pred,prec_mat,L_diag):
"""
Return the negative log posterior of a Gaussian with full
covariance matrix
Parameters:
y_true (tf.Tensor): The true values of the parameters
y_pred (tf.Tensor): The predicted value of the parameters
prec_mat: The precision matrix
L_diag (tf.Tensor): The diagonal (non exponentiated) values of the
log cholesky decomposition of the precision matrix
Returns:
(tf.Tensor): The TF graph for calculating the nlp
Notes:
This loss does not include the constant factor of 1/(2*pi)^(d/2).
"""
y_dif = y_true - y_pred
return -tf.reduce_sum(L_diag,-1) + 0.5 * tf.reduce_sum(
tf.multiply(y_dif,tf.reduce_sum(tf.multiply(tf.expand_dims(
y_dif,-1),prec_mat),axis=-2)),-1)
def full_covariance_loss(self,y_true,output):
"""
Return the loss function assuming a full covariance matrix
Parameters:
y_true (tf.Tensor): The true values of the lensing parameters
output (tf.Tensor): The predicted values of the lensing parameters.
This should include self.num_params parameters for the prediction
and self.num_params*(self.num_params+1)/2 parameters for the
lower triangular log cholesky decomposition
Returns:
(tf.Tensor): The loss function (i.e. the tensorflow graph for it).
"""
# Start by dividing the output into the L_elements and the prediction
# values.
L_elements_len = int(self.num_params*(self.num_params+1)/2)
y_pred, L_mat_elements = tf.split(output,
num_or_size_splits=[self.num_params,L_elements_len],axis=-1)
# Build the precision matrix and extract the diagonal part
prec_mat, L_diag, _ = self.construct_precision_matrix(L_mat_elements)
# Add each possible flip to the loss list. We will then take the
# minimum.
loss_list = []
for flip_mat in self.flip_mat_list:
loss_list.append(self.log_gauss_full(y_true,
tf.matmul(y_pred,flip_mat),prec_mat,L_diag))
loss_stack = tf.stack(loss_list,axis=-1)
return tf.reduce_min(loss_stack,axis=-1)
def log_gauss_gm_full(self,y_true,y_preds,prec_mats,L_diags,pis):
"""
Return the negative log posterior of a GMM with full
covariance matrix for each GM. Note this code allows for any number
of GMMs.
Parameters:
y_true (tf.Tensor): The true values of the parameters
y_preds ([tf.Tensor,...]): A list of the predicted value of the
parameters
prec_mats ([tf.Tensor,...]): A list of the precision matrices
L_diags ([tf.Tensor,...]): A list of the diagonal (non exponentiated)
values of the log cholesky decomposition of the precision
matrices
Returns:
(tf.Tensor): The TF graph for calculating the nlp
Notes:
This loss does not include the constant factors of 1/(2*pi)^(d/2).
"""
# Stack together the loss to be able to do the logsumexp trick
loss_list = []
for p_i in range(len(y_preds)):
# Since we're summing the probabilities using a logsumexp,
# we don't want the negative here. Also note that we add an
# epsilon to our log operation to avoid nan gradients.
loss_list.append(-self.log_gauss_full(y_true,y_preds[p_i],
prec_mats[p_i],L_diags[p_i])+tf.squeeze(tf.math.log(
pis[p_i]+K.epsilon()),axis=-1))
# Use tf implementation of logsumexp
return -tf.reduce_logsumexp(tf.stack(loss_list,axis=-1),axis=-1)
def gm_full_covariance_loss(self,y_true,output):
"""
Return the loss function assuming a mixture of two gaussians each with
a full covariance matrix
Parameters:
y_true (tf.Tensor): The true values of the lensing parameters
output (tf.Tensor): The predicted values of the lensing parameters.
This should include 2 gm which consists of self.num_params
parameters for the prediction and
self.num_params*(self.num_params+1)/2 parameters for the
lower triangular log cholesky decomposition of each gm.
It should also include one final parameter for the ratio
between the two gms.
Returns:
(tf.Tensor): The loss function (i.e. the tensorflow graph for it).
"""
# Start by seperating out the predictions for each gaussian model.
L_elements_len = int(self.num_params*(self.num_params+1)/2)
y_pred1, L_mat_elements1, y_pred2, L_mat_elements2, pi_logit = tf.split(
output,num_or_size_splits=[self.num_params,L_elements_len,
self.num_params,L_elements_len,1],axis=-1)
# Set the probability between 0.5 and 1.0. In this parameterization the
# first Gaussian is always favored.
pi = 0.5+tf.sigmoid(pi_logit)/2.0
# Now build the precision matrix for our two models and extract the
# diagonal components used for the loss calculation
prec_mat1, L_diag1, _ = self.construct_precision_matrix(L_mat_elements1)
prec_mat2, L_diag2, _ = self.construct_precision_matrix(L_mat_elements2)
# Add each possible flip to the loss list. We will then take the
# minimum.
loss_list = []
prec_mats = [prec_mat1,prec_mat2]
L_diags = [L_diag1,L_diag2]
pis = [pi,1-pi]
for flip_mat1 in self.flip_mat_list:
for flip_mat2 in self.flip_mat_list:
# The y_preds depends on the selected flips
y_preds = [tf.matmul(y_pred1,flip_mat1),
tf.matmul(y_pred2,flip_mat2)]
loss_list.append(self.log_gauss_gm_full(y_true,y_preds,
prec_mats,L_diags,pis))
loss_stack = tf.stack(loss_list,axis=-1)
return tf.reduce_min(loss_stack,axis=-1)
def p_value(model):
"""
Returns the average value of the dropout in each concrete layer.
Parameters:
model (keras.Model): A Keras model from with the dropout values will be
extracted.
Notes:
This is a hack that allows us to easily keep track of the dropout value
during training.
"""
def p_fake_loss(y_true,y_pred):
# We won't be using either y_true or y_pred
loss = []
for layer in model.layers:
if 'dropout' in layer.name:
loss.append(tf.sigmoid(layer.weights[2]))
return tf.reduce_mean(loss)
return p_fake_loss
|
en
| 0.742409
|
# -*- coding: utf-8 -*- Build the TensorFlow model and loss functions This module contains the functions needed to build the BNN model used in ovejero as well as the loss functions for the different posteriors. See the script model_trainer.py for examples of how to use these functions. This class applies dropout to an input both during training and inference. This is consistent with the BNN methodology. Initialize the AlwaysDropout layer. Parameters: dropout_rate (float): A number in the range [0,1) that will serve as the dropout rate for the layer. A larger rate means more dropout. # Check for a bad dropout input # Save the dropout rate for later. The function that takes the inputs (likely outputs of a previous layer) and conducts dropout. Parameters: inputs (tf.Keras.Layer): The inputs to the Dense layer. training (bool): A required input for call. Setting training to true or false does nothing because always dropout behaves the same way in both cases. Returns: (tf.Keras.Layer): The output of the Dense layer. Return the configuration dictionary required by Keras. Compute the shape of the output given the input. Needed for Keras layer. Parameters: input_shape ((int,...)): The shape of the input to our Dense layer. Returns: ((int,...)): The output shape of the layer. Calculate the regularization term for concrete dropout. Parameters: p (tf.Tensor): A 1D Tensor containing the p value for dropout (between 0 and 1). kernel (tf.Tensor): A 2D Tensor defining the weights of the Dense layer kernel_initializer (float): The relative strength of kernel regularization term. dropout_regularizer (float): The relative strength of the dropout regularization term. input_dim (int): The dimension of the input to the layer. Returns: (tf.Tensor): The tensorflow graph to calculate the regularization term. Notes: This is currently not being used because of issues with the Keras framework. Once it updates this will be employed instead of dividing the loss into two parts. This class defines a concrete dropout layer that is built around a Keras Dense layer. The dropout is parametrized by a weight that is optimized along with the model's weights themselves. Heavy inspiration from code for arxiv.1705.07832. Initialize the Concrete dropout Dense layer. This will initialize the dense layer along with the overhead needed for concrete dropout. Parameters: output_dim (int): The number of output parameters activation (str): The type of activation function to be used. Will be passed into tensorflow's activation function library. kernel_initializer (str): The type of initializer to use for the kernel. Will be passed to tensorflow's initializer library bias_initializer (str): The type of initializer to use for the bias. Will be passed to tensorflow's initializer library kernel_regularizer (float): The strength of the concrete dropout regularization term dropout_regularizer (float): The strength of the concrete dropout p regularization term init_min (float): The minimum initial value of the dropout rate init_max (float): The maximum initial value of the dropout rate temp (float): The temperature that defines how close the concrete distribution will be to true dropout. random_seed (int): A seed to use in the random function calls. If None no explicit seed will be used. Returns: (keras.Layer): The initialized ConcreteDropout layer. Must still be built. Notes: Technically the regularization terms must be divided by the number of training examples. This is degenerate with the value of the regularizers, so we do not specify it here. The initial dropout rate will be drawn from a uniform distribution with the bounds passed into init. # We do this because Keras does this # First initialize the properties required by the Dense class # Save everything important to self # Convert to logit space (since we want to parameterize our weights # such that any value outputted by the network is valid). Build the weights and operations that the network will use. Parameters: input_shape ((int,...)): The shape of the input to our Dense layer. # Although we define p in logit space, we then apply the sigmoid # operation to get the desired value between 0 and 1. # Because of issues with Keras, these functions need to be defined # here. Calculate the regularization term for p_logit. Parameters: p_logit (tf.Tensor): A 1D Tensor containing the p_logit value for dropout. Returns: (tf.Tensor): The tensorflow graph to calculate the p_logit regularization term. # Although we define p in logit space, we then apply the sigmoid # operation to get the desired value between 0 and 1. Calculate the regularization term for concrete dropout. Parameters: kernel (tf.Tensor): A 2D Tensor containing the kernel for our Dense layer computation. Returns: (tf.Tensor): The tensorflow graph to calculate the kernel regularization term. # This is supposed to change in later versions. # Requirement for Keras The function that takes the inputs of the layer and conducts the Dense layer multiplication with concrete dropout. Parameters: inputs (tf.Keras.Layer): The inputs to the Dense layer. training (bool): A required input for call. Setting training to true or false does nothing because concrete dropout behaves the same way in both cases. Returns: (tf.Keras.Layer): The output of the Dense layer. # Small epsilon parameter needed for stable optimization # Build the random tensor for dropout from uniform noise. This # formulation allows for a derivative with respect to p. # Now just carry out the basic operations of a Dense layer. Compute the shape of the output given the input. Needed for Keras layer. Parameters: input_shape ((int,...)): The shape of the input to our Dense layer. Returns: ((int,...)): The output shape of the layer. Return the configuration dictionary required by Keras. This class defines a spatial concrete dropout layer that is built around a Keras Conv2D layer. The dropout is parametrized by a weight that is optimized along with the model's weights themselves. Heavy inspiration from code for arxiv.1705.07832. Initialize the Spatial Concrete dropout Dense layer. This will initialize the Conv2d layer along with the overhead needed for spatial concrete dropout. ParametersL filters (int): The number of filters to use for the Conv2D layer kernel_size ((int,int)): The dimensions of the kernel for the Conv2D layer strides ((int,int)): The stride to take in each direction for the Conv2D layer. padding (str): What type of padding to use to get the desired output dimensions from the Conv2D layer. Either valid or same activation (str): The type of activation function to be used. Will be passed into tensorflow's activation function library. kernel_regularizer (float): The strength of the concrete dropout regularization term dropout_regularizer (float): The strength of the concrete dropout p regularization term init_min (float): The minimum initial value of the dropout rate init_max (float): The maximum initial value of the dropout rate temp (float): The temperature that defines how close the concrete distribution will be to true dropout. random_seed (int): A seed to use in the random function calls. If None no explicit seed will be used. Returns: (keras.Layer): The initialized SpatialConcreteDropout layer. Must still be built. Notes: Technically the regularization terms must be divided by the number of training examples. This is degenerate with the value of the regularizers, so we do not specify it here. The initial dropout rate will be drawn from a uniform distribution with the bounds passed into init. # Need to change name to avoid issues with Conv2D Build the weights and operations that the network will use. Parameters: input_shape ((int,...)): The shape of the input to our Conv2D layer. # kernel already set by inherited build function. # Although we define p in logit space, we then apply the sigmoid # operation to get the desired value between 0 and 1. # Because of issues with Keras, these functions need to be defined # here. Calculate the regularization term for p_logit. Parameters: p_logit (tf.Tensor): A 1D Tensor containing the p_logit value for dropout. Returns: (tf.Tensor): The tensorflow graph to calculate the p_logit regularization term. # Although we define p in logit space, we then apply the sigmoid # operation to get the desired value between 0 and 1. Calculate the regularization term for concrete dropout. Parameters: kernel (tf.Tensor): A 2D Tensor containing the kernel for our Dense layer computation. Returns: (tf.Tensor): The tensorflow graph to calculate the kernel regularization term. # This is supposed to change in later versions. The function that takes the inputs of the layer and conducts the Dense layer multiplication with concrete dropout. Parameters: inputs (tf.Keras.Layer): The inputs to the Dense layer. training (bool): A required input for call. Setting training to true or false does nothing because concrete dropout behaves the same way in both cases. Returns: (tf.Keras.Layer): The output of the Dense layer. # Small epsilon parameter needed for stable optimization # Build the random tensor for dropout from uniform noise. This # formulation allows for a derivative with respect to p. # Now just carry out the basic operations of a Dense layer. Compute the shape of the output given the input. Needed for Keras layer. Parameters: input_shape ((int,...)): The shape of the input to our Dense layer. Returns: ((int,...)): The output shape of the layer. Build the tensorflow graph for the alexnet BNN. Parameters: img_size ((int,int,int)): A tupe with shape (pix,pix,freq) that describes the size of the input images num_params (int): The number of lensing parameters to predict kernel_regularizer (float): The strength of the l2 norm (associated to the strength of the prior on the weights) dropout_rate (float): The dropout rate to use for the layers. random_seed (int): A seed to use in the random function calls. If None no explicit seed will be used. Returns: (tf.Tensor): The model (i.e. the tensorflow graph for the model) # Initialize model # Layer 1 # model.add(AlwaysDropout(dropout_rate)) # Layer 2 # Layer 3 # Layer 4 # Layer 5 # Pass to fully connected layers # Layer 6 # Layer 7 # Output # Construct model Build the tensorflow graph for the concrete dropout alexnet BNN. Parameters: img_size ((int,int,int)): A tupe with shape (pix,pix,freq) that describes the size of the input images num_params (int): The number of lensing parameters to predict kernel_regularizer (float): The strength of the l2 norm (associated to the strength of the prior on the weights) dropout_regularizer (float): The stronger it is, the more concrete dropout will tend towards larger dropout rates. init_min (float): The minimum value that the dropout weight p will be initialized to. init_max (float): The maximum value that the dropout weight p will be initialized to. temp (float): The temperature that defines how close the concrete distribution will be to true dropout. random_seed (int): A seed to use in the random function calls. If None no explicit seed will be used. Returns: (tf.Tensor): The model (i.e. the tensorflow graph for the model) Notes: While the concrete dropout implementation works, the training of the dropout terms is very slow. It's possible that modifying the learning rate schedule may help. # Initialize model # Layer 1 # model.add(AlwaysDropout(dropout_rate)) # Layer 2 # Layer 3 # Layer 4 # Layer 5 # Pass to fully connected layers # Layer 6 # Layer 7 # Output # Construct model A class used to generate the loss functions for the three types of bayesian nn models we have implemented: diagonal covariance, full covariance, and mixture of full covariances. Currently only two gaussians are allowed in the mixture. Initialize the class with the pairs of parameters that must be flipped. These are parameters like shear and ellipticity that have been defined such that negating both parameters gives the same physical definition of the system. Parameters: flip_pairs ([[int,int,...],...]): A list of pairs of numbers to conduct the flip operation on. If empty no flip pairs will be used. Note if you also want to consider two sets of parameters being flipped at the same time, that must be added to this list. num_params (int): The number of parameters to predict. # Calculate the split list for lower traingular matrix # Now for each flip pair (including no flip) we will add a flip # matrix to our list. # Initialize a numpy array since this is the easiest way # to flexibly set the tensor. Returns the MSE loss of the predicted parameters. Will ignore parameters associated with the covariance matrix. Parameters: y_true (tf.Tensor): The true values of the parameters output (tf.Tensor): The predicted values of the lensing parameters. This assumes the first num_params are Returns: (tf.Tensor): The mse loss function. Notes: This function should never be used as a loss function. It is useful as a metric to understand what portion of the reduciton in the loss function can be attributed to improved parameter accuracy. Also note that for the gmm models the output will default to the first Gaussian for this metric. Return the negative log posterior of a Gaussian with diagonal covariance matrix Parameters: y_true (tf.Tensor): The true values of the parameters y_pred (tf.Tensor): The predicted value of the parameters std_pred (tf.Tensor): The predicted diagonal entries of the covariance. Note that std_pred is assumed to be the log of the covariance matrix values. Returns: (tf.Tensor): The TF graph for calculating the nlp Notes: This loss does not include the constant factor of 1/(2*pi)^(d/2). Return the loss function assuming a diagonal covariance matrix Parameters: y_true (tf.Tensor): The true values of the lensing parameters output (tf.Tensor): The predicted values of the lensing parameters. This should include 2*self.num_params parameters to account for the diagonal entries of our covariance matrix. Covariance matrix values are assumed to be in log space. Returns: (tf.Tensor): The loss function (i.e. the tensorflow graph for it). # First split the data into predicted parameters and covariance matrix # element # Add each possible flip to the loss list. We will then take the # minimum. Take the matrix elements for the log cholesky decomposition and convert them to the precision matrix. Also return the value of the diagonal elements before exponentiation, since we get that for free. Parameters: L_mat_elements (tf.Tensor): A tensor of length num_params*(num_params+1)/2 that define the lower traingular matrix elements of the log cholesky decomposition Returns: ((tf.Tensor,tf.Tensor)): Both the precision matrix and the diagonal elements (before exponentiation) of the log cholesky L matrix. Note that this second value is important for the posterior calculation. # First split the tensor into the elements that will populate each row # Before we stack these elements, we have to pad them with zeros # (corresponding to the 0s of the lower traingular matrix). # Use tf pad function since it's likely the fastest option. # Stack the tensors to form our matrix. Use axis=-2 to avoid issues # with batches of matrices being passed in. # Pull out the diagonal part, and then (since we're using log # cholesky) exponentiate the diagonal. # Calculate the actual precision matrix Return the negative log posterior of a Gaussian with full covariance matrix Parameters: y_true (tf.Tensor): The true values of the parameters y_pred (tf.Tensor): The predicted value of the parameters prec_mat: The precision matrix L_diag (tf.Tensor): The diagonal (non exponentiated) values of the log cholesky decomposition of the precision matrix Returns: (tf.Tensor): The TF graph for calculating the nlp Notes: This loss does not include the constant factor of 1/(2*pi)^(d/2). Return the loss function assuming a full covariance matrix Parameters: y_true (tf.Tensor): The true values of the lensing parameters output (tf.Tensor): The predicted values of the lensing parameters. This should include self.num_params parameters for the prediction and self.num_params*(self.num_params+1)/2 parameters for the lower triangular log cholesky decomposition Returns: (tf.Tensor): The loss function (i.e. the tensorflow graph for it). # Start by dividing the output into the L_elements and the prediction # values. # Build the precision matrix and extract the diagonal part # Add each possible flip to the loss list. We will then take the # minimum. Return the negative log posterior of a GMM with full covariance matrix for each GM. Note this code allows for any number of GMMs. Parameters: y_true (tf.Tensor): The true values of the parameters y_preds ([tf.Tensor,...]): A list of the predicted value of the parameters prec_mats ([tf.Tensor,...]): A list of the precision matrices L_diags ([tf.Tensor,...]): A list of the diagonal (non exponentiated) values of the log cholesky decomposition of the precision matrices Returns: (tf.Tensor): The TF graph for calculating the nlp Notes: This loss does not include the constant factors of 1/(2*pi)^(d/2). # Stack together the loss to be able to do the logsumexp trick # Since we're summing the probabilities using a logsumexp, # we don't want the negative here. Also note that we add an # epsilon to our log operation to avoid nan gradients. # Use tf implementation of logsumexp Return the loss function assuming a mixture of two gaussians each with a full covariance matrix Parameters: y_true (tf.Tensor): The true values of the lensing parameters output (tf.Tensor): The predicted values of the lensing parameters. This should include 2 gm which consists of self.num_params parameters for the prediction and self.num_params*(self.num_params+1)/2 parameters for the lower triangular log cholesky decomposition of each gm. It should also include one final parameter for the ratio between the two gms. Returns: (tf.Tensor): The loss function (i.e. the tensorflow graph for it). # Start by seperating out the predictions for each gaussian model. # Set the probability between 0.5 and 1.0. In this parameterization the # first Gaussian is always favored. # Now build the precision matrix for our two models and extract the # diagonal components used for the loss calculation # Add each possible flip to the loss list. We will then take the # minimum. # The y_preds depends on the selected flips Returns the average value of the dropout in each concrete layer. Parameters: model (keras.Model): A Keras model from with the dropout values will be extracted. Notes: This is a hack that allows us to easily keep track of the dropout value during training. # We won't be using either y_true or y_pred
| 3.331306
| 3
|
sevenseconds/config/bastion.py
|
aryszka/sevenseconds
| 0
|
6627757
|
import time
import socket
import yaml
import datetime
import base64
import difflib
import botocore.exceptions
import requests
import json
from copy import deepcopy
from ..helper import info, warning, error, ActionOnExit, substitute_template_vars
from ..helper.aws import filter_subnets, associate_address, get_tag
from .route53 import configure_dns_record, delete_dns_record
from ..config import AccountData
def configure_bastion_host(account: AccountData, vpc: object, region: str, base_ami_id: str):
ec2 = account.session.resource('ec2', region)
cf = account.session.resource('cloudformation', region)
cfc = account.session.client('cloudformation', region)
enable_bastion = account.config.get("enable_odd", False)
re_deploy = account.config['bastion'].get('re_deploy', account.options.get('redeploy_odd_host'))
bastion_version = None
if account.config['bastion'].get('version_url'):
with ActionOnExit('Get last Tag for Bastion Image...') as act:
r = requests.get(account.config['bastion'].get('version_url'))
if r.status_code != 200:
act.error('Error code: {}'.format(r.status_code))
act.error('Error msg: {}'.format(r.text))
return
tags = sorted(r.json(), key=lambda x: x['created'], reverse=True)
bastion_version = tags[0]['name']
act.ok(bastion_version)
config = substitute_template_vars(account.config['bastion'].get('ami_config'),
{'account_name': account.name,
'vpc_net': str(vpc.cidr_block),
'version': bastion_version})
user_data = '#taupage-ami-config\n{}'.format(yaml.safe_dump(config)).encode('utf-8')
# Search all existing hosts (Instances and Cloudformation)
instance_filter = [
{'Name': 'tag:Name',
'Values': ['Odd (SSH Bastion Host)']},
{'Name': 'instance-state-name',
'Values': ['running', 'pending', 'stopping', 'stopped']},
]
legacy_instances = list(vpc.instances.filter(Filters=instance_filter))
for instance in legacy_instances:
# Terminate old (stopped) Odd Systems
if instance.state.get('Name') == 'stopped':
drop_bastionhost(instance)
else:
# Verify Running Version (Userdate, FS Parameter)
inst_user_data = base64.b64decode(instance.describe_attribute(Attribute='userData')['UserData']['Value'])
if instance.image_id != base_ami_id:
error('{} use {} instand of {}.'.format(instance.id, instance.image_id, base_ami_id))
if re_deploy or account.options.get('update_odd_host'):
error(' ==> Make re-deploy')
re_deploy = True
if inst_user_data != user_data:
original = inst_user_data.decode('utf-8')
new = user_data.decode('utf-8')
diff = difflib.ndiff(original.splitlines(1), new.splitlines(1))
error('{} use a different UserData\n{}'.format(instance.id, ''.join(diff)))
if re_deploy or account.options.get('update_odd_host'):
error(' ==> Make re-deploy')
re_deploy = True
launch_time = instance.launch_time
if (not wait_for_ssh_port(instance.public_ip_address, 60) and
datetime.timedelta(minutes=15) < datetime.datetime.now(launch_time.tzinfo) - launch_time):
error('Bastion Host does not response. Drop Bastionhost and create new one')
drop_bastionhost(instance)
legacy_instances = None
# Start migration
if legacy_instances and re_deploy:
for instance in legacy_instances:
drop_bastionhost(instance)
legacy_instances = None
update_needed = False
# Check Odd Hosts in other vpcs
cloudformation_filter = [
{'Name': 'tag:aws:cloudformation:logical-id',
'Values': ['OddServerInstance']},
{'Name': 'instance-state-name',
'Values': ['running', 'pending', 'stopping', 'stopped']},
]
cloudformation_instances = list(vpc.instances.filter(Filters=cloudformation_filter))
if cloudformation_instances:
if not enable_bastion:
info('bastion not enabled and instances found. Start clean up')
delete_bastion_host(account, region)
return
for instance in cloudformation_instances:
# Terminate old (stopped) Odd Systems
if instance.state.get('Name') == 'stopped':
drop_bastionhost(instance)
else:
# Verify Running Version (Userdate, FS Parameter)
oddstack = cf.Stack(get_tag(instance.tags, 'aws:cloudformation:stack-name'))
used_ami_id = get_tag(oddstack.parameters, 'TaupageId', prefix='Parameter')
if used_ami_id != base_ami_id:
error('{} use {} instand of {}.'.format(oddstack.name, used_ami_id, base_ami_id))
if re_deploy or account.options.get('update_odd_host'):
error(' ==> prepare change set')
update_needed = True
used_bastion_version = get_tag(oddstack.parameters, 'OddRelease', prefix='Parameter')
if used_bastion_version != bastion_version:
error('{} use {} instand of {}.'.format(oddstack.name, used_bastion_version, bastion_version))
if re_deploy or account.options.get('update_odd_host'):
error(' ==> prepare change set')
update_needed = True
if update_needed or re_deploy:
update_cf_bastion_host(account, vpc, region, oddstack, base_ami_id, bastion_version)
if not legacy_instances:
info('check old odd security groups')
cleanup_old_security_group(account, region, oddstack, vpc)
if not legacy_instances and not cloudformation_instances and enable_bastion:
try:
stack = cf.Stack('Odd')
info('Stack Status: {}'.format(stack.stack_status))
except Exception:
create_cf_bastion_host(account, vpc, region, base_ami_id, bastion_version)
if stack.stack_status in ('UPDATE_IN_PROGRESS', 'CREATE_IN_PROGRESS'):
if stack.stack_status.startswith('UPDATE_'):
waiter = cfc.get_waiter('stack_update_complete')
else:
waiter = cfc.get_waiter('stack_create_complete')
with ActionOnExit('Waiting of Stack') as act:
try:
waiter.wait(StackName='Odd')
except botocore.exceptions.WaiterError as e:
act.error('Stack creation failed: {}'.format(e))
return
info('check old odd security groups')
cleanup_old_security_group(account, region, stack, vpc)
instance = ec2.Instance(stack.Resource(logical_id='OddServerInstance').physical_resource_id)
launch_time = instance.launch_time
if (not wait_for_ssh_port(instance.public_ip_address, 60) and
datetime.timedelta(minutes=15) < datetime.datetime.now(launch_time.tzinfo) - launch_time):
error('Bastion Host does not response. Force Update for Bastionhost Stack')
update_cf_bastion_host(account, vpc, region, stack, base_ami_id, bastion_version)
def cleanup_old_security_group(account: AccountData, region: str, oddstack: object, vpc: object):
ec2 = account.session.resource('ec2', region)
stack_security_group_id = oddstack.Resource(logical_id='OddSecurityGroup').physical_resource_id
sgs = [x for x in vpc.security_groups.all() if x.group_name == 'Odd (SSH Bastion Host)']
for sg in sgs:
with ActionOnExit('Found old Odd Security Group {}/{}'.format(sg.id, sg.group_name)) as act:
for sg_depency in vpc.meta.client.describe_security_groups(Filters=[
{
'Name': 'ip-permission.group-id',
'Values': [
sg.group_id,
]
},
])['SecurityGroups']:
sg_depency = ec2.SecurityGroup(sg_depency.get('GroupId'))
with ActionOnExit(
'Found old Odd SG depency in Security Group {}/{}'
.format(sg_depency.id, sg_depency.group_name)) as act:
for permission in sg_depency.ip_permissions:
_change_permission(sg_depency, permission, sg.group_id, stack_security_group_id, 'ingress', act)
for permission in sg_depency.ip_permissions_egress:
_change_permission(sg_depency, permission, sg.group_id, stack_security_group_id, 'egress', act)
try:
sg.delete()
act.ok('removed')
except Exception as e:
act.error('Can\'t cleanup old Odd Stack: {}'.format(e))
def _change_permission(sg, permission, old_group_id, new_group_id, direction, act):
old_permission = deepcopy(permission)
replace = False
for user_id_group_pair in permission.get('UserIdGroupPairs', []):
if user_id_group_pair.get('GroupId') == old_group_id:
user_id_group_pair['GroupId'] = new_group_id
replace = True
if permission.get('UserIdGroupPairs'):
permission['UserIdGroupPairs'] = list(
dict(
(v['GroupId'], v) for v in permission['UserIdGroupPairs']
).values()
)
if replace:
try:
if direction == 'egress':
sg.revoke_egress(IpPermissions=[old_permission])
elif direction == 'ingress':
sg.revoke_ingress(IpPermissions=[old_permission])
except Exception as e:
act.error('Can\'t revoke the Permissions: {}'.format(e))
try:
if direction == 'egress':
sg.authorize_egress(IpPermissions=[permission])
elif direction == 'ingress':
sg.authorize_ingress(IpPermissions=[permission])
except Exception as e:
act.error('Can\'t authorize the Permissions: {}'.format(e))
def create_cf_bastion_host(account: AccountData, vpc: object, region: str, ami_id: str, bastion_version: str):
cf = account.session.resource('cloudformation', region)
cfc = account.session.client('cloudformation', region)
ec2c = account.session.client('ec2', region)
subnet_ids = [a.id for a in filter_subnets(vpc, 'dmz')]
if not subnet_ids:
warning('No DMZ subnet found')
return
allocation_id, ip = associate_address(ec2c)
stackname = 'Odd'
stack = cf.create_stack(
StackName=stackname,
TemplateBody=json.dumps(account.config['bastion'].get('cf_template')),
Parameters=[
{
'ParameterKey': 'AccountName',
'ParameterValue': account.name
},
{
'ParameterKey': 'DisableApiTermination',
'ParameterValue': 'false'
},
{
'ParameterKey': 'EIPAllocation',
'ParameterValue': allocation_id
},
{
'ParameterKey': 'OddRelease',
'ParameterValue': bastion_version
},
{
'ParameterKey': 'SubnetId',
'ParameterValue': subnet_ids[0]
},
{
'ParameterKey': 'TaupageId',
'ParameterValue': ami_id
},
{
'ParameterKey': 'VPCNetwork',
'ParameterValue': str(vpc.cidr_block)
},
{
'ParameterKey': 'VpcId',
'ParameterValue': vpc.id
}
],
OnFailure='DELETE',
Tags=[
{'Key': 'LastUpdate', 'Value': time.strftime('%Y-%m-%dT%H:%M:%S%z')},
{'Key': 'InfrastructureComponent', 'Value': 'true'}
]
)
with ActionOnExit('Wait of stack create complete') as act:
waiter = cfc.get_waiter('stack_create_complete')
try:
waiter.wait(StackName=stack.name)
except botocore.exceptions.WaiterError as e:
act.error('Stack creation failed: {}'.format(e))
return
info('SSH Bastion instance is running with public IP {}'.format(ip))
if account.domain is not None:
configure_dns_record(account, 'odd-{}'.format(region), ip)
else:
warning('No DNS domain configured, skipping record creation')
def update_cf_bastion_host(account: AccountData, vpc: object, region: str, stack: object, ami_id: str,
bastion_version: str):
cloudformation = account.session.client('cloudformation', region)
# switch subnet, every update => force reinitialisation
current_subnet = get_tag(stack.parameters, 'SubnetId', prefix='Parameter')
subnet_ids = [a.id for a in filter_subnets(vpc, 'dmz')]
if current_subnet in subnet_ids:
subnet_ids.remove(current_subnet)
if not subnet_ids:
warning('No DMZ subnet found')
return
response = stack.update(
TemplateBody=json.dumps(account.config['bastion'].get('cf_template')),
Parameters=[
{
'ParameterKey': 'AccountName',
'ParameterValue': account.name
},
{
'ParameterKey': 'DisableApiTermination',
'ParameterValue': 'false'
},
{
'ParameterKey': 'EIPAllocation',
'ParameterValue': get_tag(stack.parameters, 'EIPAllocation', prefix='Parameter')
},
{
'ParameterKey': 'OddRelease',
'ParameterValue': bastion_version
},
{
'ParameterKey': 'SubnetId',
'ParameterValue': subnet_ids[0]
},
{
'ParameterKey': 'TaupageId',
'ParameterValue': ami_id
},
{
'ParameterKey': 'VPCNetwork',
'ParameterValue': str(vpc.cidr_block)
},
{
'ParameterKey': 'VpcId',
'ParameterValue': vpc.id
}
],
Tags=[
{'Key': 'LastUpdate', 'Value': time.strftime('%Y-%m-%dT%H:%M:%S%z')},
{'Key': 'InfrastructureComponent', 'Value': 'true'}
]
)
info(response)
with ActionOnExit('Wait of stack update complete') as act:
waiter = cloudformation.get_waiter('stack_update_complete')
try:
waiter.wait(StackName=stack.name)
except botocore.exceptions.WaiterError as e:
act.error('Stack creation failed: {}'.format(e))
return
def drop_bastionhost(instance):
with ActionOnExit('Terminating SSH Bastion host..'):
instance.reload()
if instance.state.get('Name') in ('running', 'pending', 'stopping', 'stopped'):
instance.modify_attribute(Attribute='disableApiTermination', Value='false')
instance.terminate()
instance.wait_until_terminated()
def wait_for_ssh_port(host: str, timeout: int):
start = time.time()
with ActionOnExit('Waiting for SSH port of {}..'.format(host)) as act:
while True:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
result = sock.connect_ex((host, 22))
except Exception:
result = -1
if result == 0:
return True
if time.time() - start > timeout:
act.error('TIMEOUT')
return False
time.sleep(5)
act.progress()
def delete_bastion_host(account: AccountData, region: str):
ec2 = account.session.resource('ec2', region)
cf = account.session.resource('cloudformation', region)
cfc = account.session.client('cloudformation', region)
for instance in ec2.instances.all():
if get_tag(instance.tags, 'Name') == 'Odd (SSH Bastion Host)':
if instance.state.get('Name') in ('running', 'pending', 'stopping', 'stopped'):
if account.domain is not None and instance.public_ip_address:
try:
delete_dns_record(account, 'odd-{}'.format(region), instance.public_ip_address)
except Exception:
pass
drop_bastionhost(instance)
cloudformation_filter = [
{'Name': 'tag:aws:cloudformation:logical-id',
'Values': ['OddServerInstance']},
{'Name': 'instance-state-name',
'Values': ['running', 'pending', 'stopping', 'stopped']},
]
for instance in ec2.instances.filter(Filters=cloudformation_filter):
if account.domain is not None and instance.public_ip_address:
try:
delete_dns_record(account, 'odd-{}'.format(region), instance.public_ip_address)
except Exception as e:
warning('Can\'t cleanup old Odd host name: {}'.format(e))
oddstack = cf.Stack(get_tag(instance.tags, 'aws:cloudformation:stack-name'))
oddstack.delete()
waiter = cfc.get_waiter('stack_delete_complete')
with ActionOnExit('Waiting of Stack delete') as act:
try:
waiter.wait(StackName=get_tag(instance.tags, 'aws:cloudformation:stack-name'))
except botocore.exceptions.WaiterError as e:
act.error('Stack delete failed: {}'.format(e))
|
import time
import socket
import yaml
import datetime
import base64
import difflib
import botocore.exceptions
import requests
import json
from copy import deepcopy
from ..helper import info, warning, error, ActionOnExit, substitute_template_vars
from ..helper.aws import filter_subnets, associate_address, get_tag
from .route53 import configure_dns_record, delete_dns_record
from ..config import AccountData
def configure_bastion_host(account: AccountData, vpc: object, region: str, base_ami_id: str):
ec2 = account.session.resource('ec2', region)
cf = account.session.resource('cloudformation', region)
cfc = account.session.client('cloudformation', region)
enable_bastion = account.config.get("enable_odd", False)
re_deploy = account.config['bastion'].get('re_deploy', account.options.get('redeploy_odd_host'))
bastion_version = None
if account.config['bastion'].get('version_url'):
with ActionOnExit('Get last Tag for Bastion Image...') as act:
r = requests.get(account.config['bastion'].get('version_url'))
if r.status_code != 200:
act.error('Error code: {}'.format(r.status_code))
act.error('Error msg: {}'.format(r.text))
return
tags = sorted(r.json(), key=lambda x: x['created'], reverse=True)
bastion_version = tags[0]['name']
act.ok(bastion_version)
config = substitute_template_vars(account.config['bastion'].get('ami_config'),
{'account_name': account.name,
'vpc_net': str(vpc.cidr_block),
'version': bastion_version})
user_data = '#taupage-ami-config\n{}'.format(yaml.safe_dump(config)).encode('utf-8')
# Search all existing hosts (Instances and Cloudformation)
instance_filter = [
{'Name': 'tag:Name',
'Values': ['Odd (SSH Bastion Host)']},
{'Name': 'instance-state-name',
'Values': ['running', 'pending', 'stopping', 'stopped']},
]
legacy_instances = list(vpc.instances.filter(Filters=instance_filter))
for instance in legacy_instances:
# Terminate old (stopped) Odd Systems
if instance.state.get('Name') == 'stopped':
drop_bastionhost(instance)
else:
# Verify Running Version (Userdate, FS Parameter)
inst_user_data = base64.b64decode(instance.describe_attribute(Attribute='userData')['UserData']['Value'])
if instance.image_id != base_ami_id:
error('{} use {} instand of {}.'.format(instance.id, instance.image_id, base_ami_id))
if re_deploy or account.options.get('update_odd_host'):
error(' ==> Make re-deploy')
re_deploy = True
if inst_user_data != user_data:
original = inst_user_data.decode('utf-8')
new = user_data.decode('utf-8')
diff = difflib.ndiff(original.splitlines(1), new.splitlines(1))
error('{} use a different UserData\n{}'.format(instance.id, ''.join(diff)))
if re_deploy or account.options.get('update_odd_host'):
error(' ==> Make re-deploy')
re_deploy = True
launch_time = instance.launch_time
if (not wait_for_ssh_port(instance.public_ip_address, 60) and
datetime.timedelta(minutes=15) < datetime.datetime.now(launch_time.tzinfo) - launch_time):
error('Bastion Host does not response. Drop Bastionhost and create new one')
drop_bastionhost(instance)
legacy_instances = None
# Start migration
if legacy_instances and re_deploy:
for instance in legacy_instances:
drop_bastionhost(instance)
legacy_instances = None
update_needed = False
# Check Odd Hosts in other vpcs
cloudformation_filter = [
{'Name': 'tag:aws:cloudformation:logical-id',
'Values': ['OddServerInstance']},
{'Name': 'instance-state-name',
'Values': ['running', 'pending', 'stopping', 'stopped']},
]
cloudformation_instances = list(vpc.instances.filter(Filters=cloudformation_filter))
if cloudformation_instances:
if not enable_bastion:
info('bastion not enabled and instances found. Start clean up')
delete_bastion_host(account, region)
return
for instance in cloudformation_instances:
# Terminate old (stopped) Odd Systems
if instance.state.get('Name') == 'stopped':
drop_bastionhost(instance)
else:
# Verify Running Version (Userdate, FS Parameter)
oddstack = cf.Stack(get_tag(instance.tags, 'aws:cloudformation:stack-name'))
used_ami_id = get_tag(oddstack.parameters, 'TaupageId', prefix='Parameter')
if used_ami_id != base_ami_id:
error('{} use {} instand of {}.'.format(oddstack.name, used_ami_id, base_ami_id))
if re_deploy or account.options.get('update_odd_host'):
error(' ==> prepare change set')
update_needed = True
used_bastion_version = get_tag(oddstack.parameters, 'OddRelease', prefix='Parameter')
if used_bastion_version != bastion_version:
error('{} use {} instand of {}.'.format(oddstack.name, used_bastion_version, bastion_version))
if re_deploy or account.options.get('update_odd_host'):
error(' ==> prepare change set')
update_needed = True
if update_needed or re_deploy:
update_cf_bastion_host(account, vpc, region, oddstack, base_ami_id, bastion_version)
if not legacy_instances:
info('check old odd security groups')
cleanup_old_security_group(account, region, oddstack, vpc)
if not legacy_instances and not cloudformation_instances and enable_bastion:
try:
stack = cf.Stack('Odd')
info('Stack Status: {}'.format(stack.stack_status))
except Exception:
create_cf_bastion_host(account, vpc, region, base_ami_id, bastion_version)
if stack.stack_status in ('UPDATE_IN_PROGRESS', 'CREATE_IN_PROGRESS'):
if stack.stack_status.startswith('UPDATE_'):
waiter = cfc.get_waiter('stack_update_complete')
else:
waiter = cfc.get_waiter('stack_create_complete')
with ActionOnExit('Waiting of Stack') as act:
try:
waiter.wait(StackName='Odd')
except botocore.exceptions.WaiterError as e:
act.error('Stack creation failed: {}'.format(e))
return
info('check old odd security groups')
cleanup_old_security_group(account, region, stack, vpc)
instance = ec2.Instance(stack.Resource(logical_id='OddServerInstance').physical_resource_id)
launch_time = instance.launch_time
if (not wait_for_ssh_port(instance.public_ip_address, 60) and
datetime.timedelta(minutes=15) < datetime.datetime.now(launch_time.tzinfo) - launch_time):
error('Bastion Host does not response. Force Update for Bastionhost Stack')
update_cf_bastion_host(account, vpc, region, stack, base_ami_id, bastion_version)
def cleanup_old_security_group(account: AccountData, region: str, oddstack: object, vpc: object):
ec2 = account.session.resource('ec2', region)
stack_security_group_id = oddstack.Resource(logical_id='OddSecurityGroup').physical_resource_id
sgs = [x for x in vpc.security_groups.all() if x.group_name == 'Odd (SSH Bastion Host)']
for sg in sgs:
with ActionOnExit('Found old Odd Security Group {}/{}'.format(sg.id, sg.group_name)) as act:
for sg_depency in vpc.meta.client.describe_security_groups(Filters=[
{
'Name': 'ip-permission.group-id',
'Values': [
sg.group_id,
]
},
])['SecurityGroups']:
sg_depency = ec2.SecurityGroup(sg_depency.get('GroupId'))
with ActionOnExit(
'Found old Odd SG depency in Security Group {}/{}'
.format(sg_depency.id, sg_depency.group_name)) as act:
for permission in sg_depency.ip_permissions:
_change_permission(sg_depency, permission, sg.group_id, stack_security_group_id, 'ingress', act)
for permission in sg_depency.ip_permissions_egress:
_change_permission(sg_depency, permission, sg.group_id, stack_security_group_id, 'egress', act)
try:
sg.delete()
act.ok('removed')
except Exception as e:
act.error('Can\'t cleanup old Odd Stack: {}'.format(e))
def _change_permission(sg, permission, old_group_id, new_group_id, direction, act):
old_permission = deepcopy(permission)
replace = False
for user_id_group_pair in permission.get('UserIdGroupPairs', []):
if user_id_group_pair.get('GroupId') == old_group_id:
user_id_group_pair['GroupId'] = new_group_id
replace = True
if permission.get('UserIdGroupPairs'):
permission['UserIdGroupPairs'] = list(
dict(
(v['GroupId'], v) for v in permission['UserIdGroupPairs']
).values()
)
if replace:
try:
if direction == 'egress':
sg.revoke_egress(IpPermissions=[old_permission])
elif direction == 'ingress':
sg.revoke_ingress(IpPermissions=[old_permission])
except Exception as e:
act.error('Can\'t revoke the Permissions: {}'.format(e))
try:
if direction == 'egress':
sg.authorize_egress(IpPermissions=[permission])
elif direction == 'ingress':
sg.authorize_ingress(IpPermissions=[permission])
except Exception as e:
act.error('Can\'t authorize the Permissions: {}'.format(e))
def create_cf_bastion_host(account: AccountData, vpc: object, region: str, ami_id: str, bastion_version: str):
cf = account.session.resource('cloudformation', region)
cfc = account.session.client('cloudformation', region)
ec2c = account.session.client('ec2', region)
subnet_ids = [a.id for a in filter_subnets(vpc, 'dmz')]
if not subnet_ids:
warning('No DMZ subnet found')
return
allocation_id, ip = associate_address(ec2c)
stackname = 'Odd'
stack = cf.create_stack(
StackName=stackname,
TemplateBody=json.dumps(account.config['bastion'].get('cf_template')),
Parameters=[
{
'ParameterKey': 'AccountName',
'ParameterValue': account.name
},
{
'ParameterKey': 'DisableApiTermination',
'ParameterValue': 'false'
},
{
'ParameterKey': 'EIPAllocation',
'ParameterValue': allocation_id
},
{
'ParameterKey': 'OddRelease',
'ParameterValue': bastion_version
},
{
'ParameterKey': 'SubnetId',
'ParameterValue': subnet_ids[0]
},
{
'ParameterKey': 'TaupageId',
'ParameterValue': ami_id
},
{
'ParameterKey': 'VPCNetwork',
'ParameterValue': str(vpc.cidr_block)
},
{
'ParameterKey': 'VpcId',
'ParameterValue': vpc.id
}
],
OnFailure='DELETE',
Tags=[
{'Key': 'LastUpdate', 'Value': time.strftime('%Y-%m-%dT%H:%M:%S%z')},
{'Key': 'InfrastructureComponent', 'Value': 'true'}
]
)
with ActionOnExit('Wait of stack create complete') as act:
waiter = cfc.get_waiter('stack_create_complete')
try:
waiter.wait(StackName=stack.name)
except botocore.exceptions.WaiterError as e:
act.error('Stack creation failed: {}'.format(e))
return
info('SSH Bastion instance is running with public IP {}'.format(ip))
if account.domain is not None:
configure_dns_record(account, 'odd-{}'.format(region), ip)
else:
warning('No DNS domain configured, skipping record creation')
def update_cf_bastion_host(account: AccountData, vpc: object, region: str, stack: object, ami_id: str,
bastion_version: str):
cloudformation = account.session.client('cloudformation', region)
# switch subnet, every update => force reinitialisation
current_subnet = get_tag(stack.parameters, 'SubnetId', prefix='Parameter')
subnet_ids = [a.id for a in filter_subnets(vpc, 'dmz')]
if current_subnet in subnet_ids:
subnet_ids.remove(current_subnet)
if not subnet_ids:
warning('No DMZ subnet found')
return
response = stack.update(
TemplateBody=json.dumps(account.config['bastion'].get('cf_template')),
Parameters=[
{
'ParameterKey': 'AccountName',
'ParameterValue': account.name
},
{
'ParameterKey': 'DisableApiTermination',
'ParameterValue': 'false'
},
{
'ParameterKey': 'EIPAllocation',
'ParameterValue': get_tag(stack.parameters, 'EIPAllocation', prefix='Parameter')
},
{
'ParameterKey': 'OddRelease',
'ParameterValue': bastion_version
},
{
'ParameterKey': 'SubnetId',
'ParameterValue': subnet_ids[0]
},
{
'ParameterKey': 'TaupageId',
'ParameterValue': ami_id
},
{
'ParameterKey': 'VPCNetwork',
'ParameterValue': str(vpc.cidr_block)
},
{
'ParameterKey': 'VpcId',
'ParameterValue': vpc.id
}
],
Tags=[
{'Key': 'LastUpdate', 'Value': time.strftime('%Y-%m-%dT%H:%M:%S%z')},
{'Key': 'InfrastructureComponent', 'Value': 'true'}
]
)
info(response)
with ActionOnExit('Wait of stack update complete') as act:
waiter = cloudformation.get_waiter('stack_update_complete')
try:
waiter.wait(StackName=stack.name)
except botocore.exceptions.WaiterError as e:
act.error('Stack creation failed: {}'.format(e))
return
def drop_bastionhost(instance):
with ActionOnExit('Terminating SSH Bastion host..'):
instance.reload()
if instance.state.get('Name') in ('running', 'pending', 'stopping', 'stopped'):
instance.modify_attribute(Attribute='disableApiTermination', Value='false')
instance.terminate()
instance.wait_until_terminated()
def wait_for_ssh_port(host: str, timeout: int):
start = time.time()
with ActionOnExit('Waiting for SSH port of {}..'.format(host)) as act:
while True:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
result = sock.connect_ex((host, 22))
except Exception:
result = -1
if result == 0:
return True
if time.time() - start > timeout:
act.error('TIMEOUT')
return False
time.sleep(5)
act.progress()
def delete_bastion_host(account: AccountData, region: str):
ec2 = account.session.resource('ec2', region)
cf = account.session.resource('cloudformation', region)
cfc = account.session.client('cloudformation', region)
for instance in ec2.instances.all():
if get_tag(instance.tags, 'Name') == 'Odd (SSH Bastion Host)':
if instance.state.get('Name') in ('running', 'pending', 'stopping', 'stopped'):
if account.domain is not None and instance.public_ip_address:
try:
delete_dns_record(account, 'odd-{}'.format(region), instance.public_ip_address)
except Exception:
pass
drop_bastionhost(instance)
cloudformation_filter = [
{'Name': 'tag:aws:cloudformation:logical-id',
'Values': ['OddServerInstance']},
{'Name': 'instance-state-name',
'Values': ['running', 'pending', 'stopping', 'stopped']},
]
for instance in ec2.instances.filter(Filters=cloudformation_filter):
if account.domain is not None and instance.public_ip_address:
try:
delete_dns_record(account, 'odd-{}'.format(region), instance.public_ip_address)
except Exception as e:
warning('Can\'t cleanup old Odd host name: {}'.format(e))
oddstack = cf.Stack(get_tag(instance.tags, 'aws:cloudformation:stack-name'))
oddstack.delete()
waiter = cfc.get_waiter('stack_delete_complete')
with ActionOnExit('Waiting of Stack delete') as act:
try:
waiter.wait(StackName=get_tag(instance.tags, 'aws:cloudformation:stack-name'))
except botocore.exceptions.WaiterError as e:
act.error('Stack delete failed: {}'.format(e))
|
en
| 0.72893
|
# Search all existing hosts (Instances and Cloudformation) # Terminate old (stopped) Odd Systems # Verify Running Version (Userdate, FS Parameter) # Start migration # Check Odd Hosts in other vpcs # Terminate old (stopped) Odd Systems # Verify Running Version (Userdate, FS Parameter) # switch subnet, every update => force reinitialisation
| 2.022563
| 2
|
Curso_de_Python_ Curso_em_Video/PythonExercicios/ex066.py
|
DanilooSilva/Cursos_de_Python
| 0
|
6627758
|
<reponame>DanilooSilva/Cursos_de_Python<filename>Curso_de_Python_ Curso_em_Video/PythonExercicios/ex066.py
soma = cont = 0
while True:
valor = int(input('Digite um valor (999 para parar): '))
if valor == 999:
break
soma += valor
cont += 1
print(f'A soma do(s) {cont} valor(es) foi {soma}!')
|
Curso_em_Video/PythonExercicios/ex066.py
soma = cont = 0
while True:
valor = int(input('Digite um valor (999 para parar): '))
if valor == 999:
break
soma += valor
cont += 1
print(f'A soma do(s) {cont} valor(es) foi {soma}!')
|
none
| 1
| 3.650723
| 4
|
|
NeverLan CTF/2020/PROG/password_crack/password_crack.py
|
dgsse/CTF-Writeups
| 19
|
6627759
|
<gh_stars>10-100
#!/usr/bin/python
#-*- coding: utf-8 -*-
import hashlib
def main():
HASH = "267530778aa6585019c98985eeda255f"
colors = ["red", "blue", "yellow", "purple", "green", "black", "white", "gray", "pink", "violet", "brow", "orange", "grey", "dark", "magenta", "lime", "blank"]
members = ["zestyfe", "durkinza", "purvesta", "s7a73farm"]
for color in colors:
for year in range(1900, 2021):
for member in members:
hashencode = color + "-" + str(year) + "-" + member
result = hashlib.md5(hashencode)
hash_crack = result.hexdigest()
if hash_crack == HASH:
print(HASH + " is: " + hashencode)
exit()
if __name__ == "__main__":
main()
|
#!/usr/bin/python
#-*- coding: utf-8 -*-
import hashlib
def main():
HASH = "267530778aa6585019c98985eeda255f"
colors = ["red", "blue", "yellow", "purple", "green", "black", "white", "gray", "pink", "violet", "brow", "orange", "grey", "dark", "magenta", "lime", "blank"]
members = ["zestyfe", "durkinza", "purvesta", "s7a73farm"]
for color in colors:
for year in range(1900, 2021):
for member in members:
hashencode = color + "-" + str(year) + "-" + member
result = hashlib.md5(hashencode)
hash_crack = result.hexdigest()
if hash_crack == HASH:
print(HASH + " is: " + hashencode)
exit()
if __name__ == "__main__":
main()
|
en
| 0.348434
|
#!/usr/bin/python #-*- coding: utf-8 -*-
| 3.380095
| 3
|
test_type_hinting.py
|
dertilo/coding
| 0
|
6627760
|
<filename>test_type_hinting.py<gh_stars>0
#TODO: to be removed
# import os
# from dataclasses import asdict
# from pprint import pprint
#
# import numpy
# from typeguard.util import TYPEGUARD_CACHE
# from util import data_io
#
# from dummy_package.another_dummy_module import AnotherDummyClass
# from dummy_package.dummy_module import DummyClass, AudioConfig
# from dummy_package.dummy_module_2 import DummyClass2, dummy_fun, dummy_fun_2, \
# DummyChild, generator, build_generator
# from redbaron_type_hinting.adding_type_hints import enrich_pyfiles_by_type_hints
#
# FILE_NAME = "types.jsonl"
#
#
# def test_type_hinting():
# """
# pytest --typeguard-packages=dummy_package
# """
# main_dummy()
# type_logs = list(TYPEGUARD_CACHE.values())
# enrich_pyfiles_by_type_hints(type_logs)
#
#
# def main_dummy():
# x = DummyClass()
# x.bla(numpy.zeros((1, 3)))
# c = DummyClass2()
# y = dummy_fun(x)
# y = dummy_fun(DummyChild())
#
# y = dummy_fun_2(AudioConfig(bitrate=4))
# y = dummy_fun_2(AudioConfig(bitrate=None))
# c.foo = x
# bla = c.dummy_method(x)
# bla = c.dummy_class_method(x)
# bla = c.dummy_static_method(x)
# x = AnotherDummyClass()
# x.bla(numpy.zeros((1, 3)))
#
# x = list(generator((DummyClass() for _ in range(3))))
# x = list(build_generator((DummyClass() for _ in range(3))))
#
#
# # def test_dogfooding(tmp_path):
# # TYPES_JSONL = str(tmp_path / FILE_NAME)
# # enrich_pyfiles_by_type_hints("dummy_types.jsonl")
# #
# # enrich_pyfiles_by_type_hints(TYPES_JSONL)
|
<filename>test_type_hinting.py<gh_stars>0
#TODO: to be removed
# import os
# from dataclasses import asdict
# from pprint import pprint
#
# import numpy
# from typeguard.util import TYPEGUARD_CACHE
# from util import data_io
#
# from dummy_package.another_dummy_module import AnotherDummyClass
# from dummy_package.dummy_module import DummyClass, AudioConfig
# from dummy_package.dummy_module_2 import DummyClass2, dummy_fun, dummy_fun_2, \
# DummyChild, generator, build_generator
# from redbaron_type_hinting.adding_type_hints import enrich_pyfiles_by_type_hints
#
# FILE_NAME = "types.jsonl"
#
#
# def test_type_hinting():
# """
# pytest --typeguard-packages=dummy_package
# """
# main_dummy()
# type_logs = list(TYPEGUARD_CACHE.values())
# enrich_pyfiles_by_type_hints(type_logs)
#
#
# def main_dummy():
# x = DummyClass()
# x.bla(numpy.zeros((1, 3)))
# c = DummyClass2()
# y = dummy_fun(x)
# y = dummy_fun(DummyChild())
#
# y = dummy_fun_2(AudioConfig(bitrate=4))
# y = dummy_fun_2(AudioConfig(bitrate=None))
# c.foo = x
# bla = c.dummy_method(x)
# bla = c.dummy_class_method(x)
# bla = c.dummy_static_method(x)
# x = AnotherDummyClass()
# x.bla(numpy.zeros((1, 3)))
#
# x = list(generator((DummyClass() for _ in range(3))))
# x = list(build_generator((DummyClass() for _ in range(3))))
#
#
# # def test_dogfooding(tmp_path):
# # TYPES_JSONL = str(tmp_path / FILE_NAME)
# # enrich_pyfiles_by_type_hints("dummy_types.jsonl")
# #
# # enrich_pyfiles_by_type_hints(TYPES_JSONL)
|
en
| 0.441986
|
#TODO: to be removed # import os # from dataclasses import asdict # from pprint import pprint # # import numpy # from typeguard.util import TYPEGUARD_CACHE # from util import data_io # # from dummy_package.another_dummy_module import AnotherDummyClass # from dummy_package.dummy_module import DummyClass, AudioConfig # from dummy_package.dummy_module_2 import DummyClass2, dummy_fun, dummy_fun_2, \ # DummyChild, generator, build_generator # from redbaron_type_hinting.adding_type_hints import enrich_pyfiles_by_type_hints # # FILE_NAME = "types.jsonl" # # # def test_type_hinting(): # """ # pytest --typeguard-packages=dummy_package # """ # main_dummy() # type_logs = list(TYPEGUARD_CACHE.values()) # enrich_pyfiles_by_type_hints(type_logs) # # # def main_dummy(): # x = DummyClass() # x.bla(numpy.zeros((1, 3))) # c = DummyClass2() # y = dummy_fun(x) # y = dummy_fun(DummyChild()) # # y = dummy_fun_2(AudioConfig(bitrate=4)) # y = dummy_fun_2(AudioConfig(bitrate=None)) # c.foo = x # bla = c.dummy_method(x) # bla = c.dummy_class_method(x) # bla = c.dummy_static_method(x) # x = AnotherDummyClass() # x.bla(numpy.zeros((1, 3))) # # x = list(generator((DummyClass() for _ in range(3)))) # x = list(build_generator((DummyClass() for _ in range(3)))) # # # # def test_dogfooding(tmp_path): # # TYPES_JSONL = str(tmp_path / FILE_NAME) # # enrich_pyfiles_by_type_hints("dummy_types.jsonl") # # # # enrich_pyfiles_by_type_hints(TYPES_JSONL)
| 2.025185
| 2
|
ansible/venv/lib/python2.7/site-packages/ansible/modules/network/cnos/cnos_save.py
|
gvashchenkolineate/gvashchenkolineate_infra_trytravis
| 17
|
6627761
|
<reponame>gvashchenkolineate/gvashchenkolineate_infra_trytravis<filename>ansible/venv/lib/python2.7/site-packages/ansible/modules/network/cnos/cnos_save.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
#
# Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to save running config to start up config to Lenovo Switches
# Lenovo Networking
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cnos_save
author: "<NAME> (@amuraleedhar)"
short_description: Save the running configuration as the startup configuration
on devices running Lenovo CNOS
description:
- This module allows you to copy the running configuration of a switch over
its startup configuration. It is recommended to use this module shortly
after any major configuration changes so they persist after a switch
restart. This module uses SSH to manage network device configuration.
The results of the operation will be placed in a directory named 'results'
that must be created by the user in their local directory to where the
playbook is run.
version_added: "2.3"
extends_documentation_fragment: cnos
options: {}
'''
EXAMPLES = '''
Tasks : The following are examples of using the module cnos_save. These are
written in the main.yml file of the tasks directory.
---
- name: Test Save
cnos_save:
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
outputfile: "./results/test_save_{{ inventory_hostname }}_output.txt"
'''
RETURN = '''
msg:
description: Success or failure message
returned: always
type: str
sample: "Switch Running Config is Saved to Startup Config"
'''
import sys
import time
import socket
import array
import json
import time
import re
try:
from ansible.module_utils.network.cnos import cnos
HAS_LIB = True
except Exception:
HAS_LIB = False
from ansible.module_utils.basic import AnsibleModule
from collections import defaultdict
def main():
module = AnsibleModule(
argument_spec=dict(
outputfile=dict(required=True),
host=dict(required=False),
username=dict(required=False),
password=dict(required=False, no_log=True),
enablePassword=dict(required=False, no_log=True),
deviceType=dict(required=True),),
supports_check_mode=False)
command = 'write memory'
outputfile = module.params['outputfile']
output = ''
cmd = [{'command': command, 'prompt': None, 'answer': None}]
output = output + str(cnos.run_cnos_commands(module, cmd))
# Save it into the file
file = open(outputfile, "a")
file.write(output)
file.close()
errorMsg = cnos.checkOutputForError(output)
if(errorMsg is None):
module.exit_json(changed=True,
msg="Switch Running Config is Saved to Startup Config ")
else:
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
main()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
#
# Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to save running config to start up config to Lenovo Switches
# Lenovo Networking
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cnos_save
author: "<NAME> (@amuraleedhar)"
short_description: Save the running configuration as the startup configuration
on devices running Lenovo CNOS
description:
- This module allows you to copy the running configuration of a switch over
its startup configuration. It is recommended to use this module shortly
after any major configuration changes so they persist after a switch
restart. This module uses SSH to manage network device configuration.
The results of the operation will be placed in a directory named 'results'
that must be created by the user in their local directory to where the
playbook is run.
version_added: "2.3"
extends_documentation_fragment: cnos
options: {}
'''
EXAMPLES = '''
Tasks : The following are examples of using the module cnos_save. These are
written in the main.yml file of the tasks directory.
---
- name: Test Save
cnos_save:
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
outputfile: "./results/test_save_{{ inventory_hostname }}_output.txt"
'''
RETURN = '''
msg:
description: Success or failure message
returned: always
type: str
sample: "Switch Running Config is Saved to Startup Config"
'''
import sys
import time
import socket
import array
import json
import time
import re
try:
from ansible.module_utils.network.cnos import cnos
HAS_LIB = True
except Exception:
HAS_LIB = False
from ansible.module_utils.basic import AnsibleModule
from collections import defaultdict
def main():
module = AnsibleModule(
argument_spec=dict(
outputfile=dict(required=True),
host=dict(required=False),
username=dict(required=False),
password=dict(required=False, no_log=True),
enablePassword=dict(required=False, no_log=True),
deviceType=dict(required=True),),
supports_check_mode=False)
command = 'write memory'
outputfile = module.params['outputfile']
output = ''
cmd = [{'command': command, 'prompt': None, 'answer': None}]
output = output + str(cnos.run_cnos_commands(module, cmd))
# Save it into the file
file = open(outputfile, "a")
file.write(output)
file.close()
errorMsg = cnos.checkOutputForError(output)
if(errorMsg is None):
module.exit_json(changed=True,
msg="Switch Running Config is Saved to Startup Config ")
else:
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
main()
|
en
| 0.8303
|
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright (C) 2017 Lenovo, Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # # Module to save running config to start up config to Lenovo Switches # Lenovo Networking # --- module: cnos_save author: "<NAME> (@amuraleedhar)" short_description: Save the running configuration as the startup configuration on devices running Lenovo CNOS description: - This module allows you to copy the running configuration of a switch over its startup configuration. It is recommended to use this module shortly after any major configuration changes so they persist after a switch restart. This module uses SSH to manage network device configuration. The results of the operation will be placed in a directory named 'results' that must be created by the user in their local directory to where the playbook is run. version_added: "2.3" extends_documentation_fragment: cnos options: {} Tasks : The following are examples of using the module cnos_save. These are written in the main.yml file of the tasks directory. --- - name: Test Save cnos_save: deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" outputfile: "./results/test_save_{{ inventory_hostname }}_output.txt" msg: description: Success or failure message returned: always type: str sample: "Switch Running Config is Saved to Startup Config" # Save it into the file
| 1.620885
| 2
|
yapftests/main_test.py
|
TinkerBoard2-Android/external-yapf
| 12
|
6627762
|
# -*- coding: utf-8 -*-
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for yapf.__init__.main."""
from contextlib import contextmanager
import sys
import unittest
import yapf
from yapf.yapflib import py3compat
class IO(object):
"""IO is a thin wrapper around StringIO.
This is strictly to wrap the Python 3 StringIO object so that it can supply a
"buffer" attribute.
"""
class Buffer(object):
def __init__(self):
self.string_io = py3compat.StringIO()
def write(self, s):
if py3compat.PY3 and isinstance(s, bytes):
s = str(s, 'utf-8')
self.string_io.write(s)
def getvalue(self):
return self.string_io.getvalue()
def __init__(self):
self.buffer = self.Buffer()
def write(self, s):
self.buffer.write(s)
def getvalue(self):
return self.buffer.getvalue()
@contextmanager
def captured_output():
new_out, new_err = IO(), IO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
@contextmanager
def patched_input(code):
"""Monkey patch code as though it were coming from stdin."""
def lines():
for line in code.splitlines():
yield line
raise EOFError()
def patch_raw_input(lines=lines()):
return next(lines)
try:
orig_raw_import = yapf.py3compat.raw_input
yapf.py3compat.raw_input = patch_raw_input
yield
finally:
yapf.py3compat.raw_input = orig_raw_import
class RunMainTest(unittest.TestCase):
def testShouldHandleYapfError(self):
"""run_main should handle YapfError and sys.exit(1)."""
expected_message = 'yapf: Input filenames did not match any python files\n'
sys.argv = ['yapf', 'foo.c']
with captured_output() as (out, err):
with self.assertRaises(SystemExit):
yapf.run_main()
self.assertEqual(out.getvalue(), '')
self.assertEqual(err.getvalue(), expected_message)
class MainTest(unittest.TestCase):
def testNoPythonFilesMatched(self):
with self.assertRaisesRegexp(yapf.errors.YapfError,
'did not match any python files'):
yapf.main(['yapf', 'foo.c'])
def testEchoInput(self):
code = 'a = 1\nb = 2\n'
with patched_input(code):
with captured_output() as (out, _):
ret = yapf.main([])
self.assertEqual(ret, 0)
self.assertEqual(out.getvalue(), code)
def testEchoInputWithStyle(self):
code = 'def f(a = 1):\n return 2*a\n'
chromium_code = 'def f(a=1):\n return 2 * a\n'
with patched_input(code):
with captured_output() as (out, _):
ret = yapf.main(['-', '--style=chromium'])
self.assertEqual(ret, 0)
self.assertEqual(out.getvalue(), chromium_code)
def testEchoBadInput(self):
bad_syntax = ' a = 1\n'
with patched_input(bad_syntax):
with captured_output() as (_, _):
with self.assertRaisesRegexp(SyntaxError, 'unexpected indent'):
yapf.main([])
def testHelp(self):
with captured_output() as (out, _):
ret = yapf.main(['-', '--style-help', '--style=pep8'])
self.assertEqual(ret, 0)
help_message = out.getvalue()
self.assertIn('indent_width=4', help_message)
self.assertIn('The number of spaces required before a trailing comment.',
help_message)
def testVersion(self):
with captured_output() as (out, _):
ret = yapf.main(['-', '--version'])
self.assertEqual(ret, 0)
version = 'yapf {}\n'.format(yapf.__version__)
self.assertEqual(version, out.getvalue())
|
# -*- coding: utf-8 -*-
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for yapf.__init__.main."""
from contextlib import contextmanager
import sys
import unittest
import yapf
from yapf.yapflib import py3compat
class IO(object):
"""IO is a thin wrapper around StringIO.
This is strictly to wrap the Python 3 StringIO object so that it can supply a
"buffer" attribute.
"""
class Buffer(object):
def __init__(self):
self.string_io = py3compat.StringIO()
def write(self, s):
if py3compat.PY3 and isinstance(s, bytes):
s = str(s, 'utf-8')
self.string_io.write(s)
def getvalue(self):
return self.string_io.getvalue()
def __init__(self):
self.buffer = self.Buffer()
def write(self, s):
self.buffer.write(s)
def getvalue(self):
return self.buffer.getvalue()
@contextmanager
def captured_output():
new_out, new_err = IO(), IO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
@contextmanager
def patched_input(code):
"""Monkey patch code as though it were coming from stdin."""
def lines():
for line in code.splitlines():
yield line
raise EOFError()
def patch_raw_input(lines=lines()):
return next(lines)
try:
orig_raw_import = yapf.py3compat.raw_input
yapf.py3compat.raw_input = patch_raw_input
yield
finally:
yapf.py3compat.raw_input = orig_raw_import
class RunMainTest(unittest.TestCase):
def testShouldHandleYapfError(self):
"""run_main should handle YapfError and sys.exit(1)."""
expected_message = 'yapf: Input filenames did not match any python files\n'
sys.argv = ['yapf', 'foo.c']
with captured_output() as (out, err):
with self.assertRaises(SystemExit):
yapf.run_main()
self.assertEqual(out.getvalue(), '')
self.assertEqual(err.getvalue(), expected_message)
class MainTest(unittest.TestCase):
def testNoPythonFilesMatched(self):
with self.assertRaisesRegexp(yapf.errors.YapfError,
'did not match any python files'):
yapf.main(['yapf', 'foo.c'])
def testEchoInput(self):
code = 'a = 1\nb = 2\n'
with patched_input(code):
with captured_output() as (out, _):
ret = yapf.main([])
self.assertEqual(ret, 0)
self.assertEqual(out.getvalue(), code)
def testEchoInputWithStyle(self):
code = 'def f(a = 1):\n return 2*a\n'
chromium_code = 'def f(a=1):\n return 2 * a\n'
with patched_input(code):
with captured_output() as (out, _):
ret = yapf.main(['-', '--style=chromium'])
self.assertEqual(ret, 0)
self.assertEqual(out.getvalue(), chromium_code)
def testEchoBadInput(self):
bad_syntax = ' a = 1\n'
with patched_input(bad_syntax):
with captured_output() as (_, _):
with self.assertRaisesRegexp(SyntaxError, 'unexpected indent'):
yapf.main([])
def testHelp(self):
with captured_output() as (out, _):
ret = yapf.main(['-', '--style-help', '--style=pep8'])
self.assertEqual(ret, 0)
help_message = out.getvalue()
self.assertIn('indent_width=4', help_message)
self.assertIn('The number of spaces required before a trailing comment.',
help_message)
def testVersion(self):
with captured_output() as (out, _):
ret = yapf.main(['-', '--version'])
self.assertEqual(ret, 0)
version = 'yapf {}\n'.format(yapf.__version__)
self.assertEqual(version, out.getvalue())
|
en
| 0.855066
|
# -*- coding: utf-8 -*- # Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Tests for yapf.__init__.main. IO is a thin wrapper around StringIO. This is strictly to wrap the Python 3 StringIO object so that it can supply a "buffer" attribute. Monkey patch code as though it were coming from stdin. run_main should handle YapfError and sys.exit(1).
| 2.22666
| 2
|
migrations/versions/4b56cde3ebd7_new_fetching_flags.py
|
RobbiNespu/forget
| 157
|
6627763
|
<filename>migrations/versions/4b56cde3ebd7_new_fetching_flags.py<gh_stars>100-1000
"""new fetching flags
Revision ID: <KEY>
Revises: <PASSWORD>
Create Date: 2019-02-24 11:53:29.128983
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('accounts', sa.Column('fetch_current_batch_end_id', sa.String(), nullable=True))
op.add_column('accounts', sa.Column('fetch_history_complete', sa.Boolean(), server_default='FALSE', nullable=False))
op.create_foreign_key(op.f('fk_accounts_fetch_current_batch_end_id_posts'), 'accounts', 'posts', ['fetch_current_batch_end_id'], ['id'], ondelete='SET NULL')
def downgrade():
op.drop_constraint(op.f('fk_accounts_fetch_current_batch_end_id_posts'), 'accounts', type_='foreignkey')
op.drop_column('accounts', 'fetch_history_complete')
op.drop_column('accounts', 'fetch_current_batch_end_id')
|
<filename>migrations/versions/4b56cde3ebd7_new_fetching_flags.py<gh_stars>100-1000
"""new fetching flags
Revision ID: <KEY>
Revises: <PASSWORD>
Create Date: 2019-02-24 11:53:29.128983
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('accounts', sa.Column('fetch_current_batch_end_id', sa.String(), nullable=True))
op.add_column('accounts', sa.Column('fetch_history_complete', sa.Boolean(), server_default='FALSE', nullable=False))
op.create_foreign_key(op.f('fk_accounts_fetch_current_batch_end_id_posts'), 'accounts', 'posts', ['fetch_current_batch_end_id'], ['id'], ondelete='SET NULL')
def downgrade():
op.drop_constraint(op.f('fk_accounts_fetch_current_batch_end_id_posts'), 'accounts', type_='foreignkey')
op.drop_column('accounts', 'fetch_history_complete')
op.drop_column('accounts', 'fetch_current_batch_end_id')
|
en
| 0.428637
|
new fetching flags Revision ID: <KEY> Revises: <PASSWORD> Create Date: 2019-02-24 11:53:29.128983 # revision identifiers, used by Alembic.
| 1.497458
| 1
|
application.py
|
MoonHyuk/BOJ-statistics
| 62
|
6627764
|
from collections import OrderedDict
import datetime
import json
import os
from multiprocessing import Process
import urllib.request
from bs4 import BeautifulSoup
from flask import Flask, render_template, request, abort, jsonify
from flask_debugtoolbar import DebugToolbarExtension
from flask_sslify import SSLify
import requests
from models import db
from models import User, Submission, AcceptedSubmission, Ranking
application = Flask(__name__)
sslify = SSLify(application)
application.config.from_object(os.environ['APP_SETTINGS'])
application.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.init_app(application)
toolbar = DebugToolbarExtension(application)
# define header for urllib request
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) ' \
'Chrome/58.0.3029.110 Safari/537.36'
hds = {'User-Agent': user_agent}
hds_json = {'User-Agent': user_agent, 'Content-Type': 'Application/json'}
# constants
RESULTS = ["기다리는 중", "재채점을 기다리는 중", "채점 준비중", "채점중", "맞았습니다!!", "출력 형식이 잘못되었습니다",
"틀렸습니다", "시간 초과", "메모리 초과", "출력 초과", "런타임 에러", "컴파일 에러"]
def is_boj_user(user_id):
url = "https://www.acmicpc.net/user/" + user_id
try:
req = urllib.request.Request(url, headers=hds)
urllib.request.urlopen(req, timeout=5)
except urllib.error.HTTPError:
return False
except UnicodeEncodeError:
return False
else:
soup = get_soup_from_url(url)
return soup.h1.string.strip()
def get_soup_from_url(url):
req = urllib.request.Request(url, headers=hds)
temp = 0
while temp < 10:
try:
fp = urllib.request.urlopen(req, timeout=10)
break
except:
temp += 1
source = fp.read()
fp.close()
return BeautifulSoup(source, "lxml")
def update_profile(user_id):
user = User.query.filter_by(boj_id=user_id).first()
soup = get_soup_from_url("https://www.acmicpc.net/user/" + user_id)
intro = soup.blockquote.string
solved_num = soup.tbody.find_all('tr')[1].td.string
if user.intro != intro:
user.intro = intro
if user.solved_num != solved_num:
user.solved_num = solved_num
update_submission(user_id)
user.update_time = datetime.datetime.utcnow()
db.session.commit()
return user
def update_submission(user_id):
soup = get_soup_from_url("https://www.acmicpc.net/status?user_id=" + user_id)
table = soup.find(id="status-table")
trs = table.tbody.find_all('tr')
latest_submit_id = 0
submissions = Submission.query.filter_by(boj_id=user_id)
if submissions.first() is not None:
latest_submit_id = submissions.order_by(Submission.submit_id.desc()).first().submit_id
i = 0
while 1:
# If it's last submission
try:
tr = trs[i]
except LookupError:
break
# Parse data
tds = tr.find_all('td')
submit_id = int(tds[0].string)
date = tds[8].a.attrs['title']
date = datetime.datetime.strptime(date, "%Y년 %m월 %d일 %H시 %M분 %S초")
if submit_id == latest_submit_id or (datetime.datetime.utcnow() - date).days >= 14:
break
try:
problem_id = int(tds[2].a.string)
problem_name = tds[2].a.attrs['title']
result = tds[3].span.span.string.replace("\n", "").replace("\t", "")
result = RESULTS.index(result)
# 틀렸을 경우 메모리와 시간은 0으로 한다.
try:
memory = int(tds[4].find(text=True, recursive=False))
except TypeError:
memory = 0
try:
time = int(tds[5].find(text=True, recursive=False))
except TypeError:
time = 0
language = tds[6].string.replace("\n", "").replace("\t", "")
# 코드 길이를 감추는 문제들이 있음. 그런 경우 code_length 를 0으로 해준다.
try:
code_length = int(tds[7].string[:-2].replace("\n", "").replace("\t", "").split(" ")[0])
except ValueError:
code_length = 0
# Save data
submit = Submission(submit_id=submit_id, datetime=date, problem_id=problem_id, problem_name=problem_name,
result=result, memory=memory, time=time, language=language, code_length=code_length,
boj_id=user_id)
db.session.add(submit)
except:
pass
# Load next submission page
if tr == trs[-1]:
soup = get_soup_from_url("https://www.acmicpc.net/status?user_id=" + user_id + "&top=" + str(submit_id))
table = soup.find(id="status-table")
trs = table.tbody.find_all('tr')
i = 0
i += 1
db.session.commit()
def update_accepted(index=0, batch_num=10):
with application.app_context():
users = User.query.order_by(-User.id).all()
count = User.query.count()
size = count // batch_num
proc = os.getpid()
start = index * size
end = (index + 1) * size if index + 1 != batch_num else count
for user in users[start:end]:
user_id = user.boj_id
print("user {0} start by: {1}".format(user_id, proc))
url = "https://www.acmicpc.net/status?user_id=" + user_id + "&result_id=4"
soup = get_soup_from_url(url)
table = soup.find(id="status-table")
trs = table.tbody.find_all('tr')
latest_submit_id = 0
submissions = AcceptedSubmission.query.filter_by(boj_id=user_id)
prev_accepted_ids = [submission.problem_id for submission in submissions]
new_accepted_ids = []
if submissions.first() is not None:
latest_submit_id = submissions.order_by(AcceptedSubmission.submit_id.desc()).first().submit_id
i = 0
while 1:
# If it's last submission
try:
tr = trs[i]
except LookupError:
break
# Parse data
tds = tr.find_all('td')
submit_id = int(tds[0].string)
if submit_id == latest_submit_id:
break
try:
problem_id = int(tds[2].a.string)
if problem_id not in prev_accepted_ids:
date = tds[8].a.attrs['title']
date = datetime.datetime.strptime(date, "%Y년 %m월 %d일 %H시 %M분 %S초")
# 틀렸을 경우 메모리와 시간은 0으로 한다.
try:
memory = int(tds[4].find(text=True, recursive=False))
except TypeError:
memory = 0
try:
time = int(tds[5].find(text=True, recursive=False))
except TypeError:
time = 0
language = tds[6].string.replace("\n", "").replace("\t", "")
# 코드 길이를 감추는 문제들이 있음. 그런 경우 code_length 를 0으로 해준다.
try:
code_length = int(tds[7].string[:-2].replace("\n", "").replace("\t", "").split(" ")[0])
except ValueError:
code_length = 0
# Save data
if problem_id in new_accepted_ids:
db.session.query(AcceptedSubmission).filter_by(boj_id=user_id,
problem_id=problem_id).update({
"memory": memory, "time": time, "language": language, "code_length": code_length,
"datetime": date
})
else:
accepted = AcceptedSubmission(submit_id=submit_id, problem_id=problem_id, datetime=date,
memory=memory, time=time, language=language,
code_length=code_length,
boj_id=user_id)
db.session.add(accepted)
new_accepted_ids.append(problem_id)
except AttributeError:
pass
# Load next submission page
if tr == trs[-1]:
soup = get_soup_from_url(
"https://www.acmicpc.net/status?user_id=" + user_id + "&result_id=4&top=" + str(submit_id))
table = soup.find(id="status-table")
trs = table.tbody.find_all('tr')
i = 0
i += 1
db.session.commit()
print("user " + user_id + " done")
print("Process {0} is done".format(proc))
def schedule_accepted():
with application.app_context():
BATCH_NUM = 4
procs = []
for index in range(BATCH_NUM):
proc = Process(target=update_accepted, args=(index, BATCH_NUM))
procs.append(proc)
proc.start()
for proc in procs:
proc.join()
return "OK"
def request_koo_api(api, data):
req = urllib.request.Request("https://koosa.ga/api/" + api, data=json.dumps(data).encode("utf-8"), headers=hds_json)
fp = urllib.request.urlopen(req)
source = fp.read()
fp.close()
return json.loads(source.decode("utf-8"))["result"]
def update_rank(event, context):
with application.app_context():
my_kwargs = event.get("kwargs")
date = datetime.datetime.utcnow().strftime('%Y/%m/%d')
for i in range(my_kwargs["start"], my_kwargs["end"]):
url = "https://www.acmicpc.net/ranklist/" + str(i)
soup = get_soup_from_url(url)
table = soup.find(id='ranklist')
trs = table.tbody.find_all('tr')
boj_ids = list()
boj_ranks = list()
for tr in trs:
tds = tr.find_all('td')
if int(tds[3].a.string.strip()) <= 19:
break
boj_ids.append(''.join(tds[1].find_all(text=True, recursive=True)).strip())
boj_ranks.append(int(tds[0].string))
api = request_koo_api("user", boj_ids)
koo_ranks = list(user["ranking"] for user in api)
for _ in range(len(boj_ids)):
boj_id = boj_ids[_]
boj_rank = boj_ranks[_]
if koo_ranks[_] == None:
koo_rank = 0
else:
koo_rank = koo_ranks[_] + 1
data = {date: [boj_rank, koo_rank]}
if not Ranking.query.filter_by(boj_id=boj_id).scalar():
ranking = Ranking(boj_id=boj_id, ranking=data)
db.session.add(ranking)
db.session.commit()
else:
user = Ranking.query.filter_by(boj_id=boj_id)
new_ranking = user.first().ranking
new_ranking.update(data)
user.first().ranking = new_ranking
db.session.commit()
print("{0} {1} {2}".format(boj_id, boj_rank, koo_rank))
return "OK"
@application.route('/')
def render_index():
user = [i.boj_id for i in User.query.order_by(User.update_time).all()][::-1]
user_dict = OrderedDict()
for i in user:
user_dict[i] = None
return render_template("index.html", user = json.dumps(user_dict))
@application.route('/user')
def get_user():
submissions = []
accepted_submissions = []
ranking_date = []
boj_rank = []
koo_rank = []
user_dict = []
user_id = request.args.get("id")
acc_user_id = is_boj_user(user_id)
if acc_user_id:
if not User.query.filter_by(boj_id=acc_user_id).scalar():
user = User(boj_id=acc_user_id)
db.session.add(user)
db.session.commit()
else:
return render_template("index.html", id=user_id, err=True)
user = User.query.filter_by(boj_id=acc_user_id).first()
if user.update_time is None or (datetime.datetime.utcnow() - user.update_time).seconds > 600:
updated = False
else:
updated = True
two_weeks_ago = datetime.date.today() - datetime.timedelta(days=14)
submissions = Submission.query.filter_by(boj_id=acc_user_id).filter(Submission.datetime > two_weeks_ago).all()
accepted_submissions = AcceptedSubmission.query.filter_by(boj_id=acc_user_id).order_by(
AcceptedSubmission.datetime).all()
if Ranking.query.filter_by(boj_id=acc_user_id).scalar():
ranking_json = Ranking.query.filter_by(boj_id=acc_user_id).first().ranking
ranking_date = sorted(list(ranking_json.keys()))
ranking_values = [ranking_json[i] for i in ranking_date]
boj_rank = [i[0] for i in ranking_values]
koo_rank = [i[1] for i in ranking_values]
user_ids = [i.boj_id for i in User.query.order_by(User.update_time).all()][::-1]
user_dict = OrderedDict()
for i in user_ids:
user_dict[i] = None
return render_template("user.html", user=user, updated=updated, submissions=submissions,
accepted_submissions=accepted_submissions, ranking_date=ranking_date,
boj_rank=boj_rank, koo_rank=koo_rank, user_ids=json.dumps(user_dict))
@application.route('/_get_friend_data')
def get_friend_data():
friend_id = request.args.get("friend_id")
friend_accepted = AcceptedSubmission.query.filter_by(boj_id=friend_id).order_by(AcceptedSubmission.datetime).all()
ret = [d.__dict__['datetime'].strftime("%Y-%m-%d") for d in friend_accepted]
return jsonify(ret=ret)
@application.route('/update_user')
def update_user():
if request.is_xhr:
user_id = request.args.get('id')
update_profile(user_id)
return "OK"
else:
abort(404)
@application.route('/statistics')
def statistics():
with open('ranking.txt', 'r') as f:
data_list = []
data_txt = f.readlines()
for data in data_txt:
data_list.append(data.strip('\n').split(' '))
return render_template("statistics.html", data_list=data_list)
if __name__ == "__main__":
application.run(use_reloader=False)
|
from collections import OrderedDict
import datetime
import json
import os
from multiprocessing import Process
import urllib.request
from bs4 import BeautifulSoup
from flask import Flask, render_template, request, abort, jsonify
from flask_debugtoolbar import DebugToolbarExtension
from flask_sslify import SSLify
import requests
from models import db
from models import User, Submission, AcceptedSubmission, Ranking
application = Flask(__name__)
sslify = SSLify(application)
application.config.from_object(os.environ['APP_SETTINGS'])
application.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.init_app(application)
toolbar = DebugToolbarExtension(application)
# define header for urllib request
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) ' \
'Chrome/58.0.3029.110 Safari/537.36'
hds = {'User-Agent': user_agent}
hds_json = {'User-Agent': user_agent, 'Content-Type': 'Application/json'}
# constants
RESULTS = ["기다리는 중", "재채점을 기다리는 중", "채점 준비중", "채점중", "맞았습니다!!", "출력 형식이 잘못되었습니다",
"틀렸습니다", "시간 초과", "메모리 초과", "출력 초과", "런타임 에러", "컴파일 에러"]
def is_boj_user(user_id):
url = "https://www.acmicpc.net/user/" + user_id
try:
req = urllib.request.Request(url, headers=hds)
urllib.request.urlopen(req, timeout=5)
except urllib.error.HTTPError:
return False
except UnicodeEncodeError:
return False
else:
soup = get_soup_from_url(url)
return soup.h1.string.strip()
def get_soup_from_url(url):
req = urllib.request.Request(url, headers=hds)
temp = 0
while temp < 10:
try:
fp = urllib.request.urlopen(req, timeout=10)
break
except:
temp += 1
source = fp.read()
fp.close()
return BeautifulSoup(source, "lxml")
def update_profile(user_id):
user = User.query.filter_by(boj_id=user_id).first()
soup = get_soup_from_url("https://www.acmicpc.net/user/" + user_id)
intro = soup.blockquote.string
solved_num = soup.tbody.find_all('tr')[1].td.string
if user.intro != intro:
user.intro = intro
if user.solved_num != solved_num:
user.solved_num = solved_num
update_submission(user_id)
user.update_time = datetime.datetime.utcnow()
db.session.commit()
return user
def update_submission(user_id):
soup = get_soup_from_url("https://www.acmicpc.net/status?user_id=" + user_id)
table = soup.find(id="status-table")
trs = table.tbody.find_all('tr')
latest_submit_id = 0
submissions = Submission.query.filter_by(boj_id=user_id)
if submissions.first() is not None:
latest_submit_id = submissions.order_by(Submission.submit_id.desc()).first().submit_id
i = 0
while 1:
# If it's last submission
try:
tr = trs[i]
except LookupError:
break
# Parse data
tds = tr.find_all('td')
submit_id = int(tds[0].string)
date = tds[8].a.attrs['title']
date = datetime.datetime.strptime(date, "%Y년 %m월 %d일 %H시 %M분 %S초")
if submit_id == latest_submit_id or (datetime.datetime.utcnow() - date).days >= 14:
break
try:
problem_id = int(tds[2].a.string)
problem_name = tds[2].a.attrs['title']
result = tds[3].span.span.string.replace("\n", "").replace("\t", "")
result = RESULTS.index(result)
# 틀렸을 경우 메모리와 시간은 0으로 한다.
try:
memory = int(tds[4].find(text=True, recursive=False))
except TypeError:
memory = 0
try:
time = int(tds[5].find(text=True, recursive=False))
except TypeError:
time = 0
language = tds[6].string.replace("\n", "").replace("\t", "")
# 코드 길이를 감추는 문제들이 있음. 그런 경우 code_length 를 0으로 해준다.
try:
code_length = int(tds[7].string[:-2].replace("\n", "").replace("\t", "").split(" ")[0])
except ValueError:
code_length = 0
# Save data
submit = Submission(submit_id=submit_id, datetime=date, problem_id=problem_id, problem_name=problem_name,
result=result, memory=memory, time=time, language=language, code_length=code_length,
boj_id=user_id)
db.session.add(submit)
except:
pass
# Load next submission page
if tr == trs[-1]:
soup = get_soup_from_url("https://www.acmicpc.net/status?user_id=" + user_id + "&top=" + str(submit_id))
table = soup.find(id="status-table")
trs = table.tbody.find_all('tr')
i = 0
i += 1
db.session.commit()
def update_accepted(index=0, batch_num=10):
with application.app_context():
users = User.query.order_by(-User.id).all()
count = User.query.count()
size = count // batch_num
proc = os.getpid()
start = index * size
end = (index + 1) * size if index + 1 != batch_num else count
for user in users[start:end]:
user_id = user.boj_id
print("user {0} start by: {1}".format(user_id, proc))
url = "https://www.acmicpc.net/status?user_id=" + user_id + "&result_id=4"
soup = get_soup_from_url(url)
table = soup.find(id="status-table")
trs = table.tbody.find_all('tr')
latest_submit_id = 0
submissions = AcceptedSubmission.query.filter_by(boj_id=user_id)
prev_accepted_ids = [submission.problem_id for submission in submissions]
new_accepted_ids = []
if submissions.first() is not None:
latest_submit_id = submissions.order_by(AcceptedSubmission.submit_id.desc()).first().submit_id
i = 0
while 1:
# If it's last submission
try:
tr = trs[i]
except LookupError:
break
# Parse data
tds = tr.find_all('td')
submit_id = int(tds[0].string)
if submit_id == latest_submit_id:
break
try:
problem_id = int(tds[2].a.string)
if problem_id not in prev_accepted_ids:
date = tds[8].a.attrs['title']
date = datetime.datetime.strptime(date, "%Y년 %m월 %d일 %H시 %M분 %S초")
# 틀렸을 경우 메모리와 시간은 0으로 한다.
try:
memory = int(tds[4].find(text=True, recursive=False))
except TypeError:
memory = 0
try:
time = int(tds[5].find(text=True, recursive=False))
except TypeError:
time = 0
language = tds[6].string.replace("\n", "").replace("\t", "")
# 코드 길이를 감추는 문제들이 있음. 그런 경우 code_length 를 0으로 해준다.
try:
code_length = int(tds[7].string[:-2].replace("\n", "").replace("\t", "").split(" ")[0])
except ValueError:
code_length = 0
# Save data
if problem_id in new_accepted_ids:
db.session.query(AcceptedSubmission).filter_by(boj_id=user_id,
problem_id=problem_id).update({
"memory": memory, "time": time, "language": language, "code_length": code_length,
"datetime": date
})
else:
accepted = AcceptedSubmission(submit_id=submit_id, problem_id=problem_id, datetime=date,
memory=memory, time=time, language=language,
code_length=code_length,
boj_id=user_id)
db.session.add(accepted)
new_accepted_ids.append(problem_id)
except AttributeError:
pass
# Load next submission page
if tr == trs[-1]:
soup = get_soup_from_url(
"https://www.acmicpc.net/status?user_id=" + user_id + "&result_id=4&top=" + str(submit_id))
table = soup.find(id="status-table")
trs = table.tbody.find_all('tr')
i = 0
i += 1
db.session.commit()
print("user " + user_id + " done")
print("Process {0} is done".format(proc))
def schedule_accepted():
with application.app_context():
BATCH_NUM = 4
procs = []
for index in range(BATCH_NUM):
proc = Process(target=update_accepted, args=(index, BATCH_NUM))
procs.append(proc)
proc.start()
for proc in procs:
proc.join()
return "OK"
def request_koo_api(api, data):
req = urllib.request.Request("https://koosa.ga/api/" + api, data=json.dumps(data).encode("utf-8"), headers=hds_json)
fp = urllib.request.urlopen(req)
source = fp.read()
fp.close()
return json.loads(source.decode("utf-8"))["result"]
def update_rank(event, context):
with application.app_context():
my_kwargs = event.get("kwargs")
date = datetime.datetime.utcnow().strftime('%Y/%m/%d')
for i in range(my_kwargs["start"], my_kwargs["end"]):
url = "https://www.acmicpc.net/ranklist/" + str(i)
soup = get_soup_from_url(url)
table = soup.find(id='ranklist')
trs = table.tbody.find_all('tr')
boj_ids = list()
boj_ranks = list()
for tr in trs:
tds = tr.find_all('td')
if int(tds[3].a.string.strip()) <= 19:
break
boj_ids.append(''.join(tds[1].find_all(text=True, recursive=True)).strip())
boj_ranks.append(int(tds[0].string))
api = request_koo_api("user", boj_ids)
koo_ranks = list(user["ranking"] for user in api)
for _ in range(len(boj_ids)):
boj_id = boj_ids[_]
boj_rank = boj_ranks[_]
if koo_ranks[_] == None:
koo_rank = 0
else:
koo_rank = koo_ranks[_] + 1
data = {date: [boj_rank, koo_rank]}
if not Ranking.query.filter_by(boj_id=boj_id).scalar():
ranking = Ranking(boj_id=boj_id, ranking=data)
db.session.add(ranking)
db.session.commit()
else:
user = Ranking.query.filter_by(boj_id=boj_id)
new_ranking = user.first().ranking
new_ranking.update(data)
user.first().ranking = new_ranking
db.session.commit()
print("{0} {1} {2}".format(boj_id, boj_rank, koo_rank))
return "OK"
@application.route('/')
def render_index():
user = [i.boj_id for i in User.query.order_by(User.update_time).all()][::-1]
user_dict = OrderedDict()
for i in user:
user_dict[i] = None
return render_template("index.html", user = json.dumps(user_dict))
@application.route('/user')
def get_user():
submissions = []
accepted_submissions = []
ranking_date = []
boj_rank = []
koo_rank = []
user_dict = []
user_id = request.args.get("id")
acc_user_id = is_boj_user(user_id)
if acc_user_id:
if not User.query.filter_by(boj_id=acc_user_id).scalar():
user = User(boj_id=acc_user_id)
db.session.add(user)
db.session.commit()
else:
return render_template("index.html", id=user_id, err=True)
user = User.query.filter_by(boj_id=acc_user_id).first()
if user.update_time is None or (datetime.datetime.utcnow() - user.update_time).seconds > 600:
updated = False
else:
updated = True
two_weeks_ago = datetime.date.today() - datetime.timedelta(days=14)
submissions = Submission.query.filter_by(boj_id=acc_user_id).filter(Submission.datetime > two_weeks_ago).all()
accepted_submissions = AcceptedSubmission.query.filter_by(boj_id=acc_user_id).order_by(
AcceptedSubmission.datetime).all()
if Ranking.query.filter_by(boj_id=acc_user_id).scalar():
ranking_json = Ranking.query.filter_by(boj_id=acc_user_id).first().ranking
ranking_date = sorted(list(ranking_json.keys()))
ranking_values = [ranking_json[i] for i in ranking_date]
boj_rank = [i[0] for i in ranking_values]
koo_rank = [i[1] for i in ranking_values]
user_ids = [i.boj_id for i in User.query.order_by(User.update_time).all()][::-1]
user_dict = OrderedDict()
for i in user_ids:
user_dict[i] = None
return render_template("user.html", user=user, updated=updated, submissions=submissions,
accepted_submissions=accepted_submissions, ranking_date=ranking_date,
boj_rank=boj_rank, koo_rank=koo_rank, user_ids=json.dumps(user_dict))
@application.route('/_get_friend_data')
def get_friend_data():
friend_id = request.args.get("friend_id")
friend_accepted = AcceptedSubmission.query.filter_by(boj_id=friend_id).order_by(AcceptedSubmission.datetime).all()
ret = [d.__dict__['datetime'].strftime("%Y-%m-%d") for d in friend_accepted]
return jsonify(ret=ret)
@application.route('/update_user')
def update_user():
if request.is_xhr:
user_id = request.args.get('id')
update_profile(user_id)
return "OK"
else:
abort(404)
@application.route('/statistics')
def statistics():
with open('ranking.txt', 'r') as f:
data_list = []
data_txt = f.readlines()
for data in data_txt:
data_list.append(data.strip('\n').split(' '))
return render_template("statistics.html", data_list=data_list)
if __name__ == "__main__":
application.run(use_reloader=False)
|
ko
| 0.992772
|
# define header for urllib request # constants # If it's last submission # Parse data # 틀렸을 경우 메모리와 시간은 0으로 한다. # 코드 길이를 감추는 문제들이 있음. 그런 경우 code_length 를 0으로 해준다. # Save data # Load next submission page # If it's last submission # Parse data # 틀렸을 경우 메모리와 시간은 0으로 한다. # 코드 길이를 감추는 문제들이 있음. 그런 경우 code_length 를 0으로 해준다. # Save data # Load next submission page
| 2.143552
| 2
|
qwer.py
|
CSID-DGU/-2020-1-OSSP1-ninetynine-2
| 1
|
6627765
|
#image_path ref_path output_path mode (1) gpu
def texture_editing(prn, image_path, ref_path, output_path, mode = 1):
# read image
image = imread(image_path)
[h, w, _] = image.shape
#-- 1. 3d reconstruction -> get texture.
pos = prn.process(image)
vertices = prn.get_vertices(pos)
image = image/255.
texture = cv2.remap(image, pos[:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT,borderValue=(0))
#-- 2. Texture Editing
Mode = mode
# change part of texture(for data augumentation/selfie editing. Here modify eyes for example)
if Mode == 0:
# load eye mask
uv_face_eye = imread('Data/uv-data/uv_face_eyes.png', as_grey=True)/255.
uv_face = imread('Data/uv-data/uv_face.png', as_grey=True)/255.
eye_mask = (abs(uv_face_eye - uv_face) > 0).astype(np.float32)
# texture from another image or a processed texture
ref_image = imread(args.ref_path)
ref_pos = prn.process(ref_image)
ref_image = ref_image/255.
ref_texture = cv2.remap(ref_image, ref_pos[:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT,borderValue=(0))
# modify texture
new_texture = texture*(1 - eye_mask[:,:,np.newaxis]) + ref_texture*eye_mask[:,:,np.newaxis]
# change whole face(face swap)
elif Mode == 1:
# texture from another image or a processed texture
ref_image = imread(ref_path)
ref_pos = prn.process(ref_image)
ref_image = ref_image/255.
ref_texture = cv2.remap(ref_image, ref_pos[:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT,borderValue=(0))
ref_vertices = prn.get_vertices(ref_pos)
new_texture = ref_texture#(texture + ref_texture)/2.
else:
print('Wrong Mode! Mode should be 0 or 1.')
exit()
#-- 3. remap to input image.(render)
vis_colors = np.ones((vertices.shape[0], 1))
face_mask = render_texture(vertices.T, vis_colors.T, prn.triangles.T, h, w, c = 1)
face_mask = np.squeeze(face_mask > 0).astype(np.float32)
new_colors = prn.get_colors_from_texture(new_texture)
new_image = render_texture(vertices.T, new_colors.T, prn.triangles.T, h, w, c = 3)
new_image = image*(1 - face_mask[:,:,np.newaxis]) + new_image*face_mask[:,:,np.newaxis]
# Possion Editing for blending image
vis_ind = np.argwhere(face_mask>0)
vis_min = np.min(vis_ind, 0)
vis_max = np.max(vis_ind, 0)
center = (int((vis_min[1] + vis_max[1])/2+0.5), int((vis_min[0] + vis_max[0])/2+0.5))
output = cv2.seamlessClone((new_image*255).astype(np.uint8), (image*255).astype(np.uint8), (face_mask*255).astype(np.uint8), center, cv2.NORMAL_CLONE)
# save output
imsave(output_path, output)
print('Done.')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Texture Editing by PRN')
parser.add_argument('-i', '--image_path', default='TestImages/AFLW2000/image00081.jpg', type=str,
help='path to input image')
parser.add_argument('-r', '--ref_path', default='TestImages/trump.jpg', type=str,
help='path to reference image(texture ref)')
parser.add_argument('-o', '--output_path', default='TestImages/output.jpg', type=str,
help='path to save output')
parser.add_argument('--mode', default=1, type=int,
help='ways to edit texture. 0 for modifying parts, 1 for changing whole')
parser.add_argument('--gpu', default='0', type=str,
help='set gpu id, -1 for CPU')
#image_path ref_path output_path mode (1) gpu
# ---- init PRN
os.environ['CUDA_VISIBLE_DEVICES'] = -1 # GPU number, -1 for CPU
prn = PRN(is_dlib = True)
texture_editing(prn, parser.parse_args())
|
#image_path ref_path output_path mode (1) gpu
def texture_editing(prn, image_path, ref_path, output_path, mode = 1):
# read image
image = imread(image_path)
[h, w, _] = image.shape
#-- 1. 3d reconstruction -> get texture.
pos = prn.process(image)
vertices = prn.get_vertices(pos)
image = image/255.
texture = cv2.remap(image, pos[:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT,borderValue=(0))
#-- 2. Texture Editing
Mode = mode
# change part of texture(for data augumentation/selfie editing. Here modify eyes for example)
if Mode == 0:
# load eye mask
uv_face_eye = imread('Data/uv-data/uv_face_eyes.png', as_grey=True)/255.
uv_face = imread('Data/uv-data/uv_face.png', as_grey=True)/255.
eye_mask = (abs(uv_face_eye - uv_face) > 0).astype(np.float32)
# texture from another image or a processed texture
ref_image = imread(args.ref_path)
ref_pos = prn.process(ref_image)
ref_image = ref_image/255.
ref_texture = cv2.remap(ref_image, ref_pos[:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT,borderValue=(0))
# modify texture
new_texture = texture*(1 - eye_mask[:,:,np.newaxis]) + ref_texture*eye_mask[:,:,np.newaxis]
# change whole face(face swap)
elif Mode == 1:
# texture from another image or a processed texture
ref_image = imread(ref_path)
ref_pos = prn.process(ref_image)
ref_image = ref_image/255.
ref_texture = cv2.remap(ref_image, ref_pos[:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT,borderValue=(0))
ref_vertices = prn.get_vertices(ref_pos)
new_texture = ref_texture#(texture + ref_texture)/2.
else:
print('Wrong Mode! Mode should be 0 or 1.')
exit()
#-- 3. remap to input image.(render)
vis_colors = np.ones((vertices.shape[0], 1))
face_mask = render_texture(vertices.T, vis_colors.T, prn.triangles.T, h, w, c = 1)
face_mask = np.squeeze(face_mask > 0).astype(np.float32)
new_colors = prn.get_colors_from_texture(new_texture)
new_image = render_texture(vertices.T, new_colors.T, prn.triangles.T, h, w, c = 3)
new_image = image*(1 - face_mask[:,:,np.newaxis]) + new_image*face_mask[:,:,np.newaxis]
# Possion Editing for blending image
vis_ind = np.argwhere(face_mask>0)
vis_min = np.min(vis_ind, 0)
vis_max = np.max(vis_ind, 0)
center = (int((vis_min[1] + vis_max[1])/2+0.5), int((vis_min[0] + vis_max[0])/2+0.5))
output = cv2.seamlessClone((new_image*255).astype(np.uint8), (image*255).astype(np.uint8), (face_mask*255).astype(np.uint8), center, cv2.NORMAL_CLONE)
# save output
imsave(output_path, output)
print('Done.')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Texture Editing by PRN')
parser.add_argument('-i', '--image_path', default='TestImages/AFLW2000/image00081.jpg', type=str,
help='path to input image')
parser.add_argument('-r', '--ref_path', default='TestImages/trump.jpg', type=str,
help='path to reference image(texture ref)')
parser.add_argument('-o', '--output_path', default='TestImages/output.jpg', type=str,
help='path to save output')
parser.add_argument('--mode', default=1, type=int,
help='ways to edit texture. 0 for modifying parts, 1 for changing whole')
parser.add_argument('--gpu', default='0', type=str,
help='set gpu id, -1 for CPU')
#image_path ref_path output_path mode (1) gpu
# ---- init PRN
os.environ['CUDA_VISIBLE_DEVICES'] = -1 # GPU number, -1 for CPU
prn = PRN(is_dlib = True)
texture_editing(prn, parser.parse_args())
|
en
| 0.616336
|
#image_path ref_path output_path mode (1) gpu # read image #-- 1. 3d reconstruction -> get texture. #-- 2. Texture Editing # change part of texture(for data augumentation/selfie editing. Here modify eyes for example) # load eye mask # texture from another image or a processed texture # modify texture # change whole face(face swap) # texture from another image or a processed texture #(texture + ref_texture)/2. #-- 3. remap to input image.(render) # Possion Editing for blending image # save output #image_path ref_path output_path mode (1) gpu # ---- init PRN # GPU number, -1 for CPU
| 2.611456
| 3
|
taskana_api/entities/tasks.py
|
aK0nshin/taskana-api
| 0
|
6627766
|
<filename>taskana_api/entities/tasks.py<gh_stars>0
from sqlmodel import SQLModel
class TaskBase(SQLModel):
title: str
description: str
class TaskCreate(TaskBase):
pass
class TaskUpdate(TaskBase):
pass
|
<filename>taskana_api/entities/tasks.py<gh_stars>0
from sqlmodel import SQLModel
class TaskBase(SQLModel):
title: str
description: str
class TaskCreate(TaskBase):
pass
class TaskUpdate(TaskBase):
pass
|
none
| 1
| 1.37976
| 1
|
|
app.py
|
paul-404/WhatCanDataDo-Dashboard-development
| 2
|
6627767
|
import dash
import dash_html_components as html
import dash_core_components as dcc
import dash_bootstrap_components as dbc
import pandas as pd
import requests
import plotly.graph_objects as go
from dash.dependencies import Input, Output
### Launch app
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.config.suppress_callback_exceptions = True # suppress callback errors
server = app.server
app.title="COVID-19 Live Dashboard"
### Import Data from JHU CSSE & create new country dataframe
# Import Data
df_cases_jhu = pd.read_csv("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv")
df_recovered_jhu = pd.read_csv("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv")
df_deaths_jhu = pd.read_csv("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv")
# Country Dataframe: Create new dataframes with entries per country (sum over province) & rename columns to single words & drop Lat and Long columns
df_cases = df_cases_jhu.rename(columns={"Country/Region": "Country", "Province/State": "Province"}).groupby("Country").sum().reset_index().drop(["Lat", "Long"], axis=1)
df_recovered = df_recovered_jhu.rename(columns={"Country/Region": "Country", "Province/State": "Province"}).groupby("Country").sum().reset_index().drop(["Lat", "Long"], axis=1)
df_deaths = df_deaths_jhu.rename(columns={"Country/Region": "Country", "Province/State": "Province"}).groupby("Country").sum().reset_index().drop(["Lat", "Long"], axis=1)
### Call APIs for live counts world
live_all = requests.get("https://corona.lmao.ninja/all").json()
### Create Country Selection Lists
countries_top10 = ["US", "Italy", "Spain", "China", "Germany", "France", "Iran", "United Kingdom", "Switzerland", "Turkey"]
countries_top15 = ["US", "Italy", "Spain", "China", "Germany", "France", "Iran", "United Kingdom", "Switzerland", "Turkey", "Belgium", "Netherlands", "Austria", "Korea, South", "Canada"]
countries_top20 = ["US", "Italy", "Spain", "China", "Germany", "France", "Iran", "United Kingdom", "Switzerland", "Turkey", "Belgium", "Netherlands", "Austria", "Korea, South", "Canada", "Portugal", "Brazil", "Israel", "Norway", "Australia"]
countries_asia = ["China", "Korea, South", "Georgia", "Malaysia", "Philippines", "Japan", "Pakistan", "India", "Thailand", "Indonesia"]
countries_europe = ["Italy", "Spain", "Germany", "France",
"United Kingdom", "Switzerland", "Belgium", "Netherlands", "Austria", "Portugal"]
# Country selection depending on Measures:
countries_mask = ["China", "Korea, South", "Japan", "Singapore", "Taiwan*", "Czechia"]
countries_nomask = ["US", "Italy", "Spain", "Germany", "France", "United Kingdom"]
threshold = 100 # Minimum number of cases on first day for trend plots
### Create Dropdown options
dropdown_options = [{"label" : i, "value" : i} for i in df_cases["Country"].unique()]
### Create Map figure
#World Map
fig_world = go.Figure()
scale = df_cases["4/5/20"].max() # Use max cases in country on "4/5/20" as scaling factor
fig_world.add_trace(go.Scattergeo(
locationmode = 'country names',
locations = df_cases['Country'],
text = df_cases.iloc[:,-1],
marker = dict(
size = df_cases.iloc[:,-1]*1000/scale,
line_color='rgb(40,40,40)',
line_width=0.5,
sizemode = 'area',
color = 'red',
opacity = 0.7,
),
name='total confirmed'
)
)
df_recovered_size = df_recovered.iloc[:,-1] + df_deaths.iloc[:,-1] # Add Deaths to recovered size since deaths are displayed on top. This way at the end total confirmed size = total recovered size
fig_world.add_trace(go.Scattergeo(
locationmode = 'country names',
locations = df_recovered['Country'],
text = df_recovered.iloc[:,-1],
marker = dict(
size = df_recovered_size*1000/scale,
line_color='rgb(40,40,40)',
line_width=0.5,
sizemode = 'area',
color = 'green',
opacity = 0.7,
),
name='total recovered'
)
)
fig_world.add_trace(go.Scattergeo(
locationmode = 'country names',
locations = df_deaths['Country'],
text = df_deaths.iloc[:,-1],
marker = dict(
size = df_deaths.iloc[:,-1]*1000/scale,
line_color='rgb(40,40,40)',
line_width=0.5,
sizemode = 'area',
color = 'yellow',
opacity = 0.7,
),
name='total deceased'
)
)
fig_world.update_layout(
title = 'World',
showlegend = True,
legend_orientation="h",
legend=dict(x=0.25, y=0),
height = 400,
margin = {"r":0,"t":50,"l":0,"b":0},
geo = dict(
scope = 'world',
landcolor = 'rgb(217, 217, 217)',
showcountries = True,
countrycolor = "white",
coastlinecolor = "white",
showframe = True,
#lonaxis_range= [ -150, None ],
#lataxis_range= [ -60, 90 ],
projection_type = 'natural earth'
)
)
# Europe Map
fig_europe = go.Figure()
fig_europe.add_trace(go.Scattergeo(
locationmode = 'country names',
locations = df_cases['Country'],
text = df_cases.iloc[:,-1],
marker = dict(
size = df_cases.iloc[:,-1]*1000/scale,
line_color='rgb(40,40,40)',
line_width=0.5,
sizemode = 'area',
color = 'red',
opacity = 0.7,
),
name='total confirmed'
)
)
fig_europe.add_trace(go.Scattergeo(
locationmode = 'country names',
locations = df_recovered['Country'],
text = df_recovered.iloc[:,-1],
marker = dict(
size = df_recovered_size*1000/scale,
line_color='rgb(40,40,40)',
line_width=0.5,
sizemode = 'area',
color = 'green',
opacity = 0.7,
),
name='total recovered'
)
)
fig_europe.add_trace(go.Scattergeo(
locationmode = 'country names',
locations = df_deaths['Country'],
text = df_deaths.iloc[:,-1],
marker = dict(
size = df_deaths.iloc[:,-1]*1000/scale,
line_color='rgb(40,40,40)',
line_width=0.5,
sizemode = 'area',
color = 'yellow',
opacity = 0.7,
),
name='total deceased'
)
)
fig_europe.update_layout(
title = 'Europe',
showlegend = True,
legend_orientation="h",
legend=dict(x=0.25, y=0),
height = 400,
margin = {"r":0,"t":50,"l":0,"b":0},
geo = dict(
scope = 'europe',
landcolor = 'rgb(217, 217, 217)',
showcountries = True,
countrycolor = "white",
coastlinecolor = "white",
showframe = True,
#lonaxis_range= [ -150, None ],
#lataxis_range= [ -60, 90 ],
projection_type = 'natural earth'
)
)
# Asia Map
fig_asia = go.Figure()
fig_asia.add_trace(go.Scattergeo(
locationmode = 'country names',
locations = df_cases['Country'],
text = df_cases.iloc[:,-1],
marker = dict(
size = df_cases.iloc[:,-1]*1000/scale,
line_color='rgb(40,40,40)',
line_width=0.5,
sizemode = 'area',
color = 'red',
opacity = 0.7,
),
name='total confirmed'
)
)
fig_asia.add_trace(go.Scattergeo(
locationmode = 'country names',
locations = df_recovered['Country'],
text = df_recovered.iloc[:,-1],
marker = dict(
size = df_recovered_size*1000/scale,
line_color='rgb(40,40,40)',
line_width=0.5,
sizemode = 'area',
color = 'green',
opacity = 0.7,
),
name='total recovered'
)
)
fig_asia.add_trace(go.Scattergeo(
locationmode = 'country names',
locations = df_deaths['Country'],
text = df_deaths.iloc[:,-1],
marker = dict(
size = df_deaths.iloc[:,-1]*1000/scale,
line_color='rgb(40,40,40)',
line_width=0.5,
sizemode = 'area',
color = 'yellow',
opacity = 0.7,
),
name='total deceased'
)
)
fig_asia.update_layout(
title = 'Asia',
showlegend = True,
legend_orientation="h",
legend=dict(x=0.25, y=0),
height = 400,
margin = {"r":0,"t":50,"l":0,"b":0},
geo = dict(
scope = 'asia',
landcolor = 'rgb(217, 217, 217)',
showcountries = True,
countrycolor = "white",
coastlinecolor = "white",
showframe = True,
#lonaxis_range= [ -150, None ],
#lataxis_range= [ -60, 90 ],
projection_type = 'natural earth'
)
)
### App Layout
app.layout = html.Div([
html.Div([
html.H1("COVID-19 Live Dashboard"),
dcc.Markdown("**Current Status: Under Construction!** Note: Graphs and figures only visualize the number of reported cases and not the actual number of cases. Testing, case definitions and reporting protocols varies between regions and strongly affects the number of reported cases.")
], className = "row"),
dcc.Tabs(
id="tabs-with-classes",
value='tab-1',
parent_className='custom-tabs',
className='custom-tabs-container',
children=[
dcc.Tab(
label='World',
value='tab-1',
className='custom-tab',
selected_className='custom-tab--selected'
),
dcc.Tab(
label='Country',
value='tab-2',
className='custom-tab',
selected_className='custom-tab--selected'
),
dcc.Tab(
label='Trends',
value='tab-3', className='custom-tab',
selected_className='custom-tab--selected'
),
dcc.Tab(
label='Models',
value='tab-4',
className='custom-tab',
selected_className='custom-tab--selected'
),
dcc.Tab(
label='Maps',
value='tab-5',
className='custom-tab',
selected_className='custom-tab--selected'
),
dcc.Tab(
label='About',
value='tab-6',
className='custom-tab',
selected_className='custom-tab--selected'
),
]),
html.Div(id='tabs-content-classes')
], className='ten columns offset-by-one')
### Callbacks
# Callback Dropdown - KPIs
@app.callback(Output('card-cases', 'children'),
[Input('my-dropdown', 'value')])
def update_children(X):
return html.H3(str(f'{requests.get("https://corona.lmao.ninja/countries/"+str(X)).json()["cases"] :,}'), className="card-title")
@app.callback(Output('card-recovered', 'children'),
[Input('my-dropdown', 'value')])
def update_children(X):
return html.H3(str(f'{requests.get("https://corona.lmao.ninja/countries/"+str(X)).json()["recovered"] :,}'), className="card-title")
@app.callback(Output('card-deceased', 'children'),
[Input('my-dropdown', 'value')])
def update_children(X):
return html.H3(str(f'{requests.get("https://corona.lmao.ninja/countries/"+str(X)).json()["deaths"] :,}'), className="card-title")
# Callbacks Dropdown - Curves
@app.callback(Output('graph-confirmed', 'figure'),
[Input('my-dropdown', 'value')])
def update_figure(X):
fig = {
'data': [
dict(
x=df_cases.columns[1:],
y=df_cases[df_cases['Country'] == i].sum()[1:],
mode='lines+markers',
opacity=0.7,
marker={
'size': 7,
'line': {'width': 1}
},
line={
'width': 5
},
name=i
) for i in [str(X)]
],
'layout': dict(
xaxis={'type': 'lin'},
yaxis={'type': 'lin', 'title': 'Total Confirmed Cases'},
margin={'l': 50, 'b': 100, 't': 50, 'r': 50},
legend={'x': 1, 'y': 1},
hovermode='closest',
title = 'Total Confirmed Cases in ' + str(X),
#paper_bgcolor='lightgrey',
# title="Trend of total confirmed cases"
)
}
return fig
@app.callback(Output('graph-deceased', 'figure'),
[Input('my-dropdown', 'value')])
def update_figure(X):
fig = {
'data': [
dict(
x=df_deaths.columns[1:],
y=df_deaths[df_deaths['Country'] == i].sum()[1:],
mode='lines+markers',
opacity=0.7,
marker={
'size': 7,
'line': {'width': 1},
'color':'orange'
},
line={
'width': 5,
'color':'orange'
},
name=i
) for i in [str(X)]
],
'layout': dict(
xaxis={'type': 'lin'},
yaxis={'type': 'lin', 'title': 'Total Deceased'},
margin={'l': 50, 'b': 100, 't': 50, 'r': 50},
legend={'x': 1, 'y': 1},
hovermode='closest',
title = 'Total Deceased in ' + str(X),
# title="Trend of total confirmed cases"
)
}
return fig
@app.callback(Output('graph-daily', 'figure'),
[Input('my-dropdown', 'value')])
def update_figure(X):
fig={
'data': [
dict(
x=df_cases.columns[1:],
y=df_cases[df_cases['Country'] == i].sum()[1:].diff(),
type='bar',
opacity=0.7,
marker={
'size': 7,
'line': {'width': 1},
},
name=i
) for i in [str(X)]
],
'layout': dict(
xaxis={},
yaxis={'title': 'Daily New Cases'},
margin={'l': 50, 'b': 100, 't': 50, 'r': 50},
legend={'x': 1, 'y': 1},
hovermode='closest',
title = 'Daily New Confirmed Cases in ' + str(X),
# title="Trend of total confirmed cases"
)
}
return fig
@app.callback(Output('graph-daily-deceased', 'figure'),
[Input('my-dropdown', 'value')])
def update_figure(X):
fig={
'data': [
dict(
x=df_deaths.columns[1:],
y=df_deaths[df_deaths['Country'] == i].sum()[1:].diff(),
type='bar',
opacity=0.7,
marker={
'size': 7,
'line': {'width': 1},
'color':'orange'
},
name=i
) for i in [str(X)]
],
'layout': dict(
xaxis={},
yaxis={'title': 'Daily New Deceased'},
margin={'l': 50, 'b': 100, 't': 50, 'r': 50},
legend={'x': 1, 'y': 1},
hovermode='closest',
title = 'Daily New Deceased in ' + str(X),
# title="Trend of total confirmed cases"
)
}
return fig
# Callback tabs
@app.callback(Output('tabs-content-classes', 'children'),
[Input('tabs-with-classes', 'value')])
def render_content(tab):
if tab == 'tab-1':
return html.Div([
html.Div([
html.Div([
dbc.Card(
[
dbc.CardHeader("Total Cases:"),
dbc.CardBody(
[html.H3(str(f'{live_all["cases"] :,}'), className="card-title")]
),
],
style={"width": "30rem"},
)
], className="three columns"),
html.Div([
dbc.Card(
[
dbc.CardHeader("Active Cases:"),
dbc.CardBody(
[
html.H3(str(f'{live_all["active"] :,}'), className="card-title")
]
),
],
style={"width": "30rem"},
)
], className="three columns"),
html.Div([
dbc.Card(
[
dbc.CardHeader("Total Recovered:"),
dbc.CardBody(
[
html.H3(str(f'{live_all["recovered"] :,}'), className="card-title")
]
),
],
style={"width": "30rem"},
)
], className="three columns"),
html.Div([
dbc.Card(
[
dbc.CardHeader("Total Deceased:"),
dbc.CardBody(
[
html.H3(str(f'{live_all["deaths"] :,}'), className="card-title")
]
),
],
style={"width": "30rem"},
)
], className="three columns"),
], className="row"),
html.Div([
html.Div([
dcc.Graph(
id='graph-confirmed-world',
figure={
'data': [
dict(
x=df_cases.columns[1:],
y=df_cases.sum()[1:],
mode='lines+markers',
opacity=0.7,
marker={
'size': 7,
'line': {'width': 1}
},
line={
'width': 5
},
name="World"
)
],
'layout': dict(
xaxis={'type': 'lin'},
yaxis={'type': 'lin', 'title': 'Total Confirmed Cases'},
margin={'l': 50, 'b': 100, 't': 50, 'r': 50},
legend={'x': 1, 'y': 1},
hovermode='closest',
title = 'Total Confirmed Cases'
# title="Trend of total confirmed cases"
)
}
)
], className="row"),
html.Div([
dcc.Graph(
id='graph-daily-world',
figure={
'data': [
dict(
x=df_cases.columns[1:],
y=df_cases.sum()[1:].diff(),
type='bar',
opacity=0.7,
marker={
'size': 7,
'line': {'width': 1},
},
name="World"
)
],
'layout': dict(
xaxis={},
yaxis={'title': 'Daily New Cases'},
margin={'l': 50, 'b': 100, 't': 50, 'r': 50},
legend={'x': 1, 'y': 1},
hovermode='closest',
title = 'Daily New Confirmed Cases'
# title="Trend of total confirmed cases"
)
}
)
], className="row"),
html.Div([
dcc.Graph(
id='graph-deceased-world',
figure={
'data': [
dict(
x=df_deaths.columns[1:],
y=df_deaths.sum()[1:],
mode='lines+markers',
opacity=0.7,
marker={
'size': 7,
'line': {'width': 1},
'color':'orange'
},
line={
'width': 5,
'color':'orange'
},
name="World"
)
],
'layout': dict(
xaxis={},
yaxis={'type': 'lin', 'title': 'Total Deceased'},
margin={'l': 50, 'b': 100, 't': 50, 'r': 50},
legend={'x': 1, 'y': 1},
hovermode='closest',
title = 'Total Deceased'
# title="Trend of total confirmed cases"
)
}
)
], className="row"),
html.Div([
dcc.Graph(
id='graph-daily-deceased-world',
figure={
'data': [
dict(
x=df_deaths.columns[1:],
y=df_deaths.sum()[1:].diff(),
type='bar',
opacity=0.7,
marker={
'size': 7,
'line': {'width': 1},
'color':'orange'
},
name="World"
)
],
'layout': dict(
xaxis={},
yaxis={'title': 'Daily New Deceased'},
margin={'l': 50, 'b': 100, 't': 50, 'r': 50},
legend={'x': 1, 'y': 1},
hovermode='closest',
title = 'Daily New Deceased'
# title="Trend of total confirmed cases"
)
}
)
], className="row"),
])
])
elif tab == 'tab-2':
return html.Div([
html.Div([
html.Div([
html.Label("Select a country:"),
dcc.Dropdown(
id="my-dropdown",
options=dropdown_options,
value="Germany",
placeholder="Select a country",
),
], className="three columns"),
html.Div([
dbc.Card(
[
dbc.CardHeader("Total Cases:"),
dbc.CardBody(id="card-cases", children=
[
html.H3(str(f'{requests.get("https://corona.lmao.ninja/countries/"+"Germany").json()["cases"] :,}'), className="card-title")
]
)
],
style={"width": "10rem"},
)
], className="three columns"),
html.Div([
dbc.Card(
[
dbc.CardHeader("Total Recovered:"),
dbc.CardBody(id="card-recovered", children=
[
html.H3(str(f'{requests.get("https://corona.lmao.ninja/countries/Germany").json()["recovered"] :,}'), className="card-title")
]
),
],
style={"width": "30rem"},
)
], className="three columns"),
html.Div([
dbc.Card(
[
dbc.CardHeader("Total Deceased:"),
dbc.CardBody(id="card-deceased", children=
[
html.H3(str(f'{requests.get("https://corona.lmao.ninja/countries/Germany").json()["deaths"] :,}'), className="card-title")
]
),
],
style={"width": "30rem"},
)
], className="three columns"),
], className="row"),
html.Div([
html.Div([
dcc.Graph(id='graph-confirmed')
], className="twelve columns")
], className="row"),
html.Div([
html.Div([
dcc.Graph(id='graph-daily')
], className="twelve columns"),
], className="row"),
html.Div([
html.Div([
dcc.Graph(id='graph-deceased')
], className="twelve columns"),
], className="row"),
html.Div([
html.Div([
dcc.Graph(id='graph-daily-deceased')
], className="twelve columns"),
], className="row"),
])
elif tab == 'tab-3':
return html.Div([
dcc.Markdown('To show only one country, double-click on the country in the legend. Single-click on other countries in the legend to add them to the selection. Double-click again to reset the selection.'),
html.Div([
dcc.Graph(
id='graph-trend-1',
figure={
'data': [
dict(
y=df_cases[df_cases['Country'] == i].sum()[1:][df_cases[df_cases['Country'] == i].sum()[1:].gt(threshold)],
mode='lines',
opacity=0.7,
marker={
'size': 5,
'line': {'width': 1},
},
name=i
) for i in df_cases["Country"].unique()
],
'layout': dict(
xaxis={'range':[0,120],'type': 'lin', 'title':'''Number of days since >100 cases'''},
yaxis={'type': 'log', 'title': 'Total Confirmed Cases'},
margin={'l': 100, 'b': 100, 't': 50, 'r': 100},
legend={'x': 1, 'y': 1},
hovermode='closest',
title="Trend of confirmed cases",
)
}
)
], className="row"),
html.Div([
dcc.Graph(
id='graph-trend-2',
figure={
'data': [
dict(
y=df_cases[df_cases['Country'] == i].sum()[1:][df_cases[df_cases['Country'] == i].sum()[1:].gt(threshold)],
mode='lines+markers',
opacity=0.7,
marker={
'size': 5,
'line': {'width': 1}
},
name=i
) for i in countries_europe
],
'layout': dict(
xaxis={'range':[0,120],'type': 'lin', 'title':'''Number of days since >100 cases'''},
yaxis={'range':[2,None], 'type': 'log', 'title': 'Total Confirmed Cases'},
legend={'x': 1, 'y': 1},
hovermode='closest',
title="Countries with most cases in Europe"
)
}
)
], className="row"),
html.Div([
dcc.Graph(
id='graph-trend-3',
figure={
'data': [
dict(
y=df_cases[df_cases['Country'] == i].sum()[1:][df_cases[df_cases['Country'] == i].sum()[1:].gt(threshold)],
mode='lines+markers',
opacity=0.7,
marker={
'size': 5,
'line': {'width': 1}
},
name=i
) for i in countries_asia
],
'layout': dict(
xaxis={'range':[0,120],'type': 'lin', 'title':'''Number of days since >100 cases'''},
yaxis={'range':[2,None], 'type': 'log', 'title': 'Total Confirmed Cases'},
legend={'x': 1, 'y': 1},
hovermode='closest',
title="Countries with most cases in Asia"
)
}
)
], className="row")
])
elif tab == 'tab-4':
return html.Div([
html.P('''Simulations/Projections by ML supported SEIR Model. Fit to currently available data (confirmed cases, active cases, measures, hospital capacity, ICU beds, ...). Goal: Visualize projections and effects of different measures in a way that can be understood by everybody. Display uncertainty of input data and projections.'''),
# PRELIMINARY MEASURE EXAMPLE GRAPHS
# html.Div([
# html.Div([
# dcc.Graph(
# id='graph-trend-2',
# figure={
# 'data': [
# dict(
# y=df_cases[df_cases['Country'] == i].sum()[1:][df_cases[df_cases['Country'] == i].sum()[1:].gt(threshold)],
# mode='lines+markers',
# opacity=0.7,
# marker={
# 'size': 5,
# 'line': {'width': 1}
# },
# name=i
# ) for i in countries_nomask
# ],
# 'layout': dict(
# xaxis={'range':[0,45],'type': 'lin', 'title':'''Number of days since >100 cases'''},
# yaxis={'range':[2,None], 'type': 'log', 'title': 'Total Confirmed Cases'},
# legend={'x': 1, 'y': 1},
# hovermode='closest',
# title="Countries without widespread public mask usage"
# )
# }
# )
# ], className="six columns"),
# html.Div([
# dcc.Graph(
# id='graph-trend-3',
# figure={
# 'data': [
# dict(
# y=df_cases[df_cases['Country'] == i].sum()[1:][df_cases[df_cases['Country'] == i].sum()[1:].gt(threshold)],
# mode='lines+markers',
# opacity=0.7,
# marker={
# 'size': 5,
# 'line': {'width': 1}
# },
# name=i
# ) for i in countries_mask
# ],
# 'layout': dict(
# xaxis={'range':[0,45],'type': 'lin', 'title':'''Number of days since >100 cases'''},
# yaxis={'range':[2,None], 'type': 'log', 'title': 'Total Confirmed Cases'},
# legend={'x': 1, 'y': 1},
# hovermode='closest',
# title="Countries with widespread public mask usage"
# )
# }
# )
# ], className="six columns")
# ], className="row")
])
elif tab == 'tab-5':
return html.Div([
#dcc.Markdown('''Visualization of available data on maps (World, Europe, Germany, ...) to display regional clusters and the spread of the pandemic.'''),
html.Div([
dcc.Graph(id='map-world', figure=fig_world),
], className='row'),
html.Div([
dcc.Graph(id='map-europe', figure=fig_europe),
], className='row'),
html.Div([
dcc.Graph(id='map-asia', figure=fig_asia),
], className='row'),
])
elif tab == 'tab-6':
return html.Div([
html.Div([
dcc.Markdown('''
* __Current Status: Under Construction!__ The main purpose of this dashboard is to provide a simple, interactive tool to visualize publicly available data about the COVID-19 pandemic.
* __Caution:__ Graphs and figures only display the number of reported cases and **not** the number of actual cases. In addition, the quantity and type of conducted tests, case definitions, reporting structures and protocols may vary between regions and strongly affect the reported numbers. (e.g. regions with low number of conducted tests may have much higher actual case numbers than shown here; some countries may detect and report cases later than others; some countries may focus on testing specific groups of people depending on age, profession, region, preexisting conditions; some countries may conduct a higher percentage of tests post-mortem than others).
* __Data Source:__ All graphs rely on data collected by the team at [John Hopkins University CSSE](https://github.com/CSSEGISandData/COVID-19). Live counts above the graphs rely on [NovelCovid APIs](https://github.com/NOVELCOVID/API). All Data is continuously updated and is subject to change and errors.
'''),
])
])
### Run App
if __name__ == '__main__':
app.run_server(debug=True)
|
import dash
import dash_html_components as html
import dash_core_components as dcc
import dash_bootstrap_components as dbc
import pandas as pd
import requests
import plotly.graph_objects as go
from dash.dependencies import Input, Output
### Launch app
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.config.suppress_callback_exceptions = True # suppress callback errors
server = app.server
app.title="COVID-19 Live Dashboard"
### Import Data from JHU CSSE & create new country dataframe
# Import Data
df_cases_jhu = pd.read_csv("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv")
df_recovered_jhu = pd.read_csv("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv")
df_deaths_jhu = pd.read_csv("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv")
# Country Dataframe: Create new dataframes with entries per country (sum over province) & rename columns to single words & drop Lat and Long columns
df_cases = df_cases_jhu.rename(columns={"Country/Region": "Country", "Province/State": "Province"}).groupby("Country").sum().reset_index().drop(["Lat", "Long"], axis=1)
df_recovered = df_recovered_jhu.rename(columns={"Country/Region": "Country", "Province/State": "Province"}).groupby("Country").sum().reset_index().drop(["Lat", "Long"], axis=1)
df_deaths = df_deaths_jhu.rename(columns={"Country/Region": "Country", "Province/State": "Province"}).groupby("Country").sum().reset_index().drop(["Lat", "Long"], axis=1)
### Call APIs for live counts world
live_all = requests.get("https://corona.lmao.ninja/all").json()
### Create Country Selection Lists
countries_top10 = ["US", "Italy", "Spain", "China", "Germany", "France", "Iran", "United Kingdom", "Switzerland", "Turkey"]
countries_top15 = ["US", "Italy", "Spain", "China", "Germany", "France", "Iran", "United Kingdom", "Switzerland", "Turkey", "Belgium", "Netherlands", "Austria", "Korea, South", "Canada"]
countries_top20 = ["US", "Italy", "Spain", "China", "Germany", "France", "Iran", "United Kingdom", "Switzerland", "Turkey", "Belgium", "Netherlands", "Austria", "Korea, South", "Canada", "Portugal", "Brazil", "Israel", "Norway", "Australia"]
countries_asia = ["China", "Korea, South", "Georgia", "Malaysia", "Philippines", "Japan", "Pakistan", "India", "Thailand", "Indonesia"]
countries_europe = ["Italy", "Spain", "Germany", "France",
"United Kingdom", "Switzerland", "Belgium", "Netherlands", "Austria", "Portugal"]
# Country selection depending on Measures:
countries_mask = ["China", "Korea, South", "Japan", "Singapore", "Taiwan*", "Czechia"]
countries_nomask = ["US", "Italy", "Spain", "Germany", "France", "United Kingdom"]
threshold = 100 # Minimum number of cases on first day for trend plots
### Create Dropdown options
dropdown_options = [{"label" : i, "value" : i} for i in df_cases["Country"].unique()]
### Create Map figure
#World Map
fig_world = go.Figure()
scale = df_cases["4/5/20"].max() # Use max cases in country on "4/5/20" as scaling factor
fig_world.add_trace(go.Scattergeo(
locationmode = 'country names',
locations = df_cases['Country'],
text = df_cases.iloc[:,-1],
marker = dict(
size = df_cases.iloc[:,-1]*1000/scale,
line_color='rgb(40,40,40)',
line_width=0.5,
sizemode = 'area',
color = 'red',
opacity = 0.7,
),
name='total confirmed'
)
)
df_recovered_size = df_recovered.iloc[:,-1] + df_deaths.iloc[:,-1] # Add Deaths to recovered size since deaths are displayed on top. This way at the end total confirmed size = total recovered size
fig_world.add_trace(go.Scattergeo(
locationmode = 'country names',
locations = df_recovered['Country'],
text = df_recovered.iloc[:,-1],
marker = dict(
size = df_recovered_size*1000/scale,
line_color='rgb(40,40,40)',
line_width=0.5,
sizemode = 'area',
color = 'green',
opacity = 0.7,
),
name='total recovered'
)
)
fig_world.add_trace(go.Scattergeo(
locationmode = 'country names',
locations = df_deaths['Country'],
text = df_deaths.iloc[:,-1],
marker = dict(
size = df_deaths.iloc[:,-1]*1000/scale,
line_color='rgb(40,40,40)',
line_width=0.5,
sizemode = 'area',
color = 'yellow',
opacity = 0.7,
),
name='total deceased'
)
)
fig_world.update_layout(
title = 'World',
showlegend = True,
legend_orientation="h",
legend=dict(x=0.25, y=0),
height = 400,
margin = {"r":0,"t":50,"l":0,"b":0},
geo = dict(
scope = 'world',
landcolor = 'rgb(217, 217, 217)',
showcountries = True,
countrycolor = "white",
coastlinecolor = "white",
showframe = True,
#lonaxis_range= [ -150, None ],
#lataxis_range= [ -60, 90 ],
projection_type = 'natural earth'
)
)
# Europe Map
fig_europe = go.Figure()
fig_europe.add_trace(go.Scattergeo(
locationmode = 'country names',
locations = df_cases['Country'],
text = df_cases.iloc[:,-1],
marker = dict(
size = df_cases.iloc[:,-1]*1000/scale,
line_color='rgb(40,40,40)',
line_width=0.5,
sizemode = 'area',
color = 'red',
opacity = 0.7,
),
name='total confirmed'
)
)
fig_europe.add_trace(go.Scattergeo(
locationmode = 'country names',
locations = df_recovered['Country'],
text = df_recovered.iloc[:,-1],
marker = dict(
size = df_recovered_size*1000/scale,
line_color='rgb(40,40,40)',
line_width=0.5,
sizemode = 'area',
color = 'green',
opacity = 0.7,
),
name='total recovered'
)
)
fig_europe.add_trace(go.Scattergeo(
locationmode = 'country names',
locations = df_deaths['Country'],
text = df_deaths.iloc[:,-1],
marker = dict(
size = df_deaths.iloc[:,-1]*1000/scale,
line_color='rgb(40,40,40)',
line_width=0.5,
sizemode = 'area',
color = 'yellow',
opacity = 0.7,
),
name='total deceased'
)
)
fig_europe.update_layout(
title = 'Europe',
showlegend = True,
legend_orientation="h",
legend=dict(x=0.25, y=0),
height = 400,
margin = {"r":0,"t":50,"l":0,"b":0},
geo = dict(
scope = 'europe',
landcolor = 'rgb(217, 217, 217)',
showcountries = True,
countrycolor = "white",
coastlinecolor = "white",
showframe = True,
#lonaxis_range= [ -150, None ],
#lataxis_range= [ -60, 90 ],
projection_type = 'natural earth'
)
)
# Asia Map
fig_asia = go.Figure()
fig_asia.add_trace(go.Scattergeo(
locationmode = 'country names',
locations = df_cases['Country'],
text = df_cases.iloc[:,-1],
marker = dict(
size = df_cases.iloc[:,-1]*1000/scale,
line_color='rgb(40,40,40)',
line_width=0.5,
sizemode = 'area',
color = 'red',
opacity = 0.7,
),
name='total confirmed'
)
)
fig_asia.add_trace(go.Scattergeo(
locationmode = 'country names',
locations = df_recovered['Country'],
text = df_recovered.iloc[:,-1],
marker = dict(
size = df_recovered_size*1000/scale,
line_color='rgb(40,40,40)',
line_width=0.5,
sizemode = 'area',
color = 'green',
opacity = 0.7,
),
name='total recovered'
)
)
fig_asia.add_trace(go.Scattergeo(
locationmode = 'country names',
locations = df_deaths['Country'],
text = df_deaths.iloc[:,-1],
marker = dict(
size = df_deaths.iloc[:,-1]*1000/scale,
line_color='rgb(40,40,40)',
line_width=0.5,
sizemode = 'area',
color = 'yellow',
opacity = 0.7,
),
name='total deceased'
)
)
fig_asia.update_layout(
title = 'Asia',
showlegend = True,
legend_orientation="h",
legend=dict(x=0.25, y=0),
height = 400,
margin = {"r":0,"t":50,"l":0,"b":0},
geo = dict(
scope = 'asia',
landcolor = 'rgb(217, 217, 217)',
showcountries = True,
countrycolor = "white",
coastlinecolor = "white",
showframe = True,
#lonaxis_range= [ -150, None ],
#lataxis_range= [ -60, 90 ],
projection_type = 'natural earth'
)
)
### App Layout
app.layout = html.Div([
html.Div([
html.H1("COVID-19 Live Dashboard"),
dcc.Markdown("**Current Status: Under Construction!** Note: Graphs and figures only visualize the number of reported cases and not the actual number of cases. Testing, case definitions and reporting protocols varies between regions and strongly affects the number of reported cases.")
], className = "row"),
dcc.Tabs(
id="tabs-with-classes",
value='tab-1',
parent_className='custom-tabs',
className='custom-tabs-container',
children=[
dcc.Tab(
label='World',
value='tab-1',
className='custom-tab',
selected_className='custom-tab--selected'
),
dcc.Tab(
label='Country',
value='tab-2',
className='custom-tab',
selected_className='custom-tab--selected'
),
dcc.Tab(
label='Trends',
value='tab-3', className='custom-tab',
selected_className='custom-tab--selected'
),
dcc.Tab(
label='Models',
value='tab-4',
className='custom-tab',
selected_className='custom-tab--selected'
),
dcc.Tab(
label='Maps',
value='tab-5',
className='custom-tab',
selected_className='custom-tab--selected'
),
dcc.Tab(
label='About',
value='tab-6',
className='custom-tab',
selected_className='custom-tab--selected'
),
]),
html.Div(id='tabs-content-classes')
], className='ten columns offset-by-one')
### Callbacks
# Callback Dropdown - KPIs
@app.callback(Output('card-cases', 'children'),
[Input('my-dropdown', 'value')])
def update_children(X):
return html.H3(str(f'{requests.get("https://corona.lmao.ninja/countries/"+str(X)).json()["cases"] :,}'), className="card-title")
@app.callback(Output('card-recovered', 'children'),
[Input('my-dropdown', 'value')])
def update_children(X):
return html.H3(str(f'{requests.get("https://corona.lmao.ninja/countries/"+str(X)).json()["recovered"] :,}'), className="card-title")
@app.callback(Output('card-deceased', 'children'),
[Input('my-dropdown', 'value')])
def update_children(X):
return html.H3(str(f'{requests.get("https://corona.lmao.ninja/countries/"+str(X)).json()["deaths"] :,}'), className="card-title")
# Callbacks Dropdown - Curves
@app.callback(Output('graph-confirmed', 'figure'),
[Input('my-dropdown', 'value')])
def update_figure(X):
fig = {
'data': [
dict(
x=df_cases.columns[1:],
y=df_cases[df_cases['Country'] == i].sum()[1:],
mode='lines+markers',
opacity=0.7,
marker={
'size': 7,
'line': {'width': 1}
},
line={
'width': 5
},
name=i
) for i in [str(X)]
],
'layout': dict(
xaxis={'type': 'lin'},
yaxis={'type': 'lin', 'title': 'Total Confirmed Cases'},
margin={'l': 50, 'b': 100, 't': 50, 'r': 50},
legend={'x': 1, 'y': 1},
hovermode='closest',
title = 'Total Confirmed Cases in ' + str(X),
#paper_bgcolor='lightgrey',
# title="Trend of total confirmed cases"
)
}
return fig
@app.callback(Output('graph-deceased', 'figure'),
[Input('my-dropdown', 'value')])
def update_figure(X):
fig = {
'data': [
dict(
x=df_deaths.columns[1:],
y=df_deaths[df_deaths['Country'] == i].sum()[1:],
mode='lines+markers',
opacity=0.7,
marker={
'size': 7,
'line': {'width': 1},
'color':'orange'
},
line={
'width': 5,
'color':'orange'
},
name=i
) for i in [str(X)]
],
'layout': dict(
xaxis={'type': 'lin'},
yaxis={'type': 'lin', 'title': 'Total Deceased'},
margin={'l': 50, 'b': 100, 't': 50, 'r': 50},
legend={'x': 1, 'y': 1},
hovermode='closest',
title = 'Total Deceased in ' + str(X),
# title="Trend of total confirmed cases"
)
}
return fig
@app.callback(Output('graph-daily', 'figure'),
[Input('my-dropdown', 'value')])
def update_figure(X):
fig={
'data': [
dict(
x=df_cases.columns[1:],
y=df_cases[df_cases['Country'] == i].sum()[1:].diff(),
type='bar',
opacity=0.7,
marker={
'size': 7,
'line': {'width': 1},
},
name=i
) for i in [str(X)]
],
'layout': dict(
xaxis={},
yaxis={'title': 'Daily New Cases'},
margin={'l': 50, 'b': 100, 't': 50, 'r': 50},
legend={'x': 1, 'y': 1},
hovermode='closest',
title = 'Daily New Confirmed Cases in ' + str(X),
# title="Trend of total confirmed cases"
)
}
return fig
@app.callback(Output('graph-daily-deceased', 'figure'),
[Input('my-dropdown', 'value')])
def update_figure(X):
fig={
'data': [
dict(
x=df_deaths.columns[1:],
y=df_deaths[df_deaths['Country'] == i].sum()[1:].diff(),
type='bar',
opacity=0.7,
marker={
'size': 7,
'line': {'width': 1},
'color':'orange'
},
name=i
) for i in [str(X)]
],
'layout': dict(
xaxis={},
yaxis={'title': 'Daily New Deceased'},
margin={'l': 50, 'b': 100, 't': 50, 'r': 50},
legend={'x': 1, 'y': 1},
hovermode='closest',
title = 'Daily New Deceased in ' + str(X),
# title="Trend of total confirmed cases"
)
}
return fig
# Callback tabs
@app.callback(Output('tabs-content-classes', 'children'),
[Input('tabs-with-classes', 'value')])
def render_content(tab):
if tab == 'tab-1':
return html.Div([
html.Div([
html.Div([
dbc.Card(
[
dbc.CardHeader("Total Cases:"),
dbc.CardBody(
[html.H3(str(f'{live_all["cases"] :,}'), className="card-title")]
),
],
style={"width": "30rem"},
)
], className="three columns"),
html.Div([
dbc.Card(
[
dbc.CardHeader("Active Cases:"),
dbc.CardBody(
[
html.H3(str(f'{live_all["active"] :,}'), className="card-title")
]
),
],
style={"width": "30rem"},
)
], className="three columns"),
html.Div([
dbc.Card(
[
dbc.CardHeader("Total Recovered:"),
dbc.CardBody(
[
html.H3(str(f'{live_all["recovered"] :,}'), className="card-title")
]
),
],
style={"width": "30rem"},
)
], className="three columns"),
html.Div([
dbc.Card(
[
dbc.CardHeader("Total Deceased:"),
dbc.CardBody(
[
html.H3(str(f'{live_all["deaths"] :,}'), className="card-title")
]
),
],
style={"width": "30rem"},
)
], className="three columns"),
], className="row"),
html.Div([
html.Div([
dcc.Graph(
id='graph-confirmed-world',
figure={
'data': [
dict(
x=df_cases.columns[1:],
y=df_cases.sum()[1:],
mode='lines+markers',
opacity=0.7,
marker={
'size': 7,
'line': {'width': 1}
},
line={
'width': 5
},
name="World"
)
],
'layout': dict(
xaxis={'type': 'lin'},
yaxis={'type': 'lin', 'title': 'Total Confirmed Cases'},
margin={'l': 50, 'b': 100, 't': 50, 'r': 50},
legend={'x': 1, 'y': 1},
hovermode='closest',
title = 'Total Confirmed Cases'
# title="Trend of total confirmed cases"
)
}
)
], className="row"),
html.Div([
dcc.Graph(
id='graph-daily-world',
figure={
'data': [
dict(
x=df_cases.columns[1:],
y=df_cases.sum()[1:].diff(),
type='bar',
opacity=0.7,
marker={
'size': 7,
'line': {'width': 1},
},
name="World"
)
],
'layout': dict(
xaxis={},
yaxis={'title': 'Daily New Cases'},
margin={'l': 50, 'b': 100, 't': 50, 'r': 50},
legend={'x': 1, 'y': 1},
hovermode='closest',
title = 'Daily New Confirmed Cases'
# title="Trend of total confirmed cases"
)
}
)
], className="row"),
html.Div([
dcc.Graph(
id='graph-deceased-world',
figure={
'data': [
dict(
x=df_deaths.columns[1:],
y=df_deaths.sum()[1:],
mode='lines+markers',
opacity=0.7,
marker={
'size': 7,
'line': {'width': 1},
'color':'orange'
},
line={
'width': 5,
'color':'orange'
},
name="World"
)
],
'layout': dict(
xaxis={},
yaxis={'type': 'lin', 'title': 'Total Deceased'},
margin={'l': 50, 'b': 100, 't': 50, 'r': 50},
legend={'x': 1, 'y': 1},
hovermode='closest',
title = 'Total Deceased'
# title="Trend of total confirmed cases"
)
}
)
], className="row"),
html.Div([
dcc.Graph(
id='graph-daily-deceased-world',
figure={
'data': [
dict(
x=df_deaths.columns[1:],
y=df_deaths.sum()[1:].diff(),
type='bar',
opacity=0.7,
marker={
'size': 7,
'line': {'width': 1},
'color':'orange'
},
name="World"
)
],
'layout': dict(
xaxis={},
yaxis={'title': 'Daily New Deceased'},
margin={'l': 50, 'b': 100, 't': 50, 'r': 50},
legend={'x': 1, 'y': 1},
hovermode='closest',
title = 'Daily New Deceased'
# title="Trend of total confirmed cases"
)
}
)
], className="row"),
])
])
elif tab == 'tab-2':
return html.Div([
html.Div([
html.Div([
html.Label("Select a country:"),
dcc.Dropdown(
id="my-dropdown",
options=dropdown_options,
value="Germany",
placeholder="Select a country",
),
], className="three columns"),
html.Div([
dbc.Card(
[
dbc.CardHeader("Total Cases:"),
dbc.CardBody(id="card-cases", children=
[
html.H3(str(f'{requests.get("https://corona.lmao.ninja/countries/"+"Germany").json()["cases"] :,}'), className="card-title")
]
)
],
style={"width": "10rem"},
)
], className="three columns"),
html.Div([
dbc.Card(
[
dbc.CardHeader("Total Recovered:"),
dbc.CardBody(id="card-recovered", children=
[
html.H3(str(f'{requests.get("https://corona.lmao.ninja/countries/Germany").json()["recovered"] :,}'), className="card-title")
]
),
],
style={"width": "30rem"},
)
], className="three columns"),
html.Div([
dbc.Card(
[
dbc.CardHeader("Total Deceased:"),
dbc.CardBody(id="card-deceased", children=
[
html.H3(str(f'{requests.get("https://corona.lmao.ninja/countries/Germany").json()["deaths"] :,}'), className="card-title")
]
),
],
style={"width": "30rem"},
)
], className="three columns"),
], className="row"),
html.Div([
html.Div([
dcc.Graph(id='graph-confirmed')
], className="twelve columns")
], className="row"),
html.Div([
html.Div([
dcc.Graph(id='graph-daily')
], className="twelve columns"),
], className="row"),
html.Div([
html.Div([
dcc.Graph(id='graph-deceased')
], className="twelve columns"),
], className="row"),
html.Div([
html.Div([
dcc.Graph(id='graph-daily-deceased')
], className="twelve columns"),
], className="row"),
])
elif tab == 'tab-3':
return html.Div([
dcc.Markdown('To show only one country, double-click on the country in the legend. Single-click on other countries in the legend to add them to the selection. Double-click again to reset the selection.'),
html.Div([
dcc.Graph(
id='graph-trend-1',
figure={
'data': [
dict(
y=df_cases[df_cases['Country'] == i].sum()[1:][df_cases[df_cases['Country'] == i].sum()[1:].gt(threshold)],
mode='lines',
opacity=0.7,
marker={
'size': 5,
'line': {'width': 1},
},
name=i
) for i in df_cases["Country"].unique()
],
'layout': dict(
xaxis={'range':[0,120],'type': 'lin', 'title':'''Number of days since >100 cases'''},
yaxis={'type': 'log', 'title': 'Total Confirmed Cases'},
margin={'l': 100, 'b': 100, 't': 50, 'r': 100},
legend={'x': 1, 'y': 1},
hovermode='closest',
title="Trend of confirmed cases",
)
}
)
], className="row"),
html.Div([
dcc.Graph(
id='graph-trend-2',
figure={
'data': [
dict(
y=df_cases[df_cases['Country'] == i].sum()[1:][df_cases[df_cases['Country'] == i].sum()[1:].gt(threshold)],
mode='lines+markers',
opacity=0.7,
marker={
'size': 5,
'line': {'width': 1}
},
name=i
) for i in countries_europe
],
'layout': dict(
xaxis={'range':[0,120],'type': 'lin', 'title':'''Number of days since >100 cases'''},
yaxis={'range':[2,None], 'type': 'log', 'title': 'Total Confirmed Cases'},
legend={'x': 1, 'y': 1},
hovermode='closest',
title="Countries with most cases in Europe"
)
}
)
], className="row"),
html.Div([
dcc.Graph(
id='graph-trend-3',
figure={
'data': [
dict(
y=df_cases[df_cases['Country'] == i].sum()[1:][df_cases[df_cases['Country'] == i].sum()[1:].gt(threshold)],
mode='lines+markers',
opacity=0.7,
marker={
'size': 5,
'line': {'width': 1}
},
name=i
) for i in countries_asia
],
'layout': dict(
xaxis={'range':[0,120],'type': 'lin', 'title':'''Number of days since >100 cases'''},
yaxis={'range':[2,None], 'type': 'log', 'title': 'Total Confirmed Cases'},
legend={'x': 1, 'y': 1},
hovermode='closest',
title="Countries with most cases in Asia"
)
}
)
], className="row")
])
elif tab == 'tab-4':
return html.Div([
html.P('''Simulations/Projections by ML supported SEIR Model. Fit to currently available data (confirmed cases, active cases, measures, hospital capacity, ICU beds, ...). Goal: Visualize projections and effects of different measures in a way that can be understood by everybody. Display uncertainty of input data and projections.'''),
# PRELIMINARY MEASURE EXAMPLE GRAPHS
# html.Div([
# html.Div([
# dcc.Graph(
# id='graph-trend-2',
# figure={
# 'data': [
# dict(
# y=df_cases[df_cases['Country'] == i].sum()[1:][df_cases[df_cases['Country'] == i].sum()[1:].gt(threshold)],
# mode='lines+markers',
# opacity=0.7,
# marker={
# 'size': 5,
# 'line': {'width': 1}
# },
# name=i
# ) for i in countries_nomask
# ],
# 'layout': dict(
# xaxis={'range':[0,45],'type': 'lin', 'title':'''Number of days since >100 cases'''},
# yaxis={'range':[2,None], 'type': 'log', 'title': 'Total Confirmed Cases'},
# legend={'x': 1, 'y': 1},
# hovermode='closest',
# title="Countries without widespread public mask usage"
# )
# }
# )
# ], className="six columns"),
# html.Div([
# dcc.Graph(
# id='graph-trend-3',
# figure={
# 'data': [
# dict(
# y=df_cases[df_cases['Country'] == i].sum()[1:][df_cases[df_cases['Country'] == i].sum()[1:].gt(threshold)],
# mode='lines+markers',
# opacity=0.7,
# marker={
# 'size': 5,
# 'line': {'width': 1}
# },
# name=i
# ) for i in countries_mask
# ],
# 'layout': dict(
# xaxis={'range':[0,45],'type': 'lin', 'title':'''Number of days since >100 cases'''},
# yaxis={'range':[2,None], 'type': 'log', 'title': 'Total Confirmed Cases'},
# legend={'x': 1, 'y': 1},
# hovermode='closest',
# title="Countries with widespread public mask usage"
# )
# }
# )
# ], className="six columns")
# ], className="row")
])
elif tab == 'tab-5':
return html.Div([
#dcc.Markdown('''Visualization of available data on maps (World, Europe, Germany, ...) to display regional clusters and the spread of the pandemic.'''),
html.Div([
dcc.Graph(id='map-world', figure=fig_world),
], className='row'),
html.Div([
dcc.Graph(id='map-europe', figure=fig_europe),
], className='row'),
html.Div([
dcc.Graph(id='map-asia', figure=fig_asia),
], className='row'),
])
elif tab == 'tab-6':
return html.Div([
html.Div([
dcc.Markdown('''
* __Current Status: Under Construction!__ The main purpose of this dashboard is to provide a simple, interactive tool to visualize publicly available data about the COVID-19 pandemic.
* __Caution:__ Graphs and figures only display the number of reported cases and **not** the number of actual cases. In addition, the quantity and type of conducted tests, case definitions, reporting structures and protocols may vary between regions and strongly affect the reported numbers. (e.g. regions with low number of conducted tests may have much higher actual case numbers than shown here; some countries may detect and report cases later than others; some countries may focus on testing specific groups of people depending on age, profession, region, preexisting conditions; some countries may conduct a higher percentage of tests post-mortem than others).
* __Data Source:__ All graphs rely on data collected by the team at [John Hopkins University CSSE](https://github.com/CSSEGISandData/COVID-19). Live counts above the graphs rely on [NovelCovid APIs](https://github.com/NOVELCOVID/API). All Data is continuously updated and is subject to change and errors.
'''),
])
])
### Run App
if __name__ == '__main__':
app.run_server(debug=True)
|
en
| 0.64107
|
### Launch app # suppress callback errors ### Import Data from JHU CSSE & create new country dataframe # Import Data # Country Dataframe: Create new dataframes with entries per country (sum over province) & rename columns to single words & drop Lat and Long columns ### Call APIs for live counts world ### Create Country Selection Lists # Country selection depending on Measures: # Minimum number of cases on first day for trend plots ### Create Dropdown options ### Create Map figure #World Map # Use max cases in country on "4/5/20" as scaling factor # Add Deaths to recovered size since deaths are displayed on top. This way at the end total confirmed size = total recovered size #lonaxis_range= [ -150, None ], #lataxis_range= [ -60, 90 ], # Europe Map #lonaxis_range= [ -150, None ], #lataxis_range= [ -60, 90 ], # Asia Map #lonaxis_range= [ -150, None ], #lataxis_range= [ -60, 90 ], ### App Layout ### Callbacks # Callback Dropdown - KPIs # Callbacks Dropdown - Curves #paper_bgcolor='lightgrey', # title="Trend of total confirmed cases" # title="Trend of total confirmed cases" # title="Trend of total confirmed cases" # title="Trend of total confirmed cases" # Callback tabs # title="Trend of total confirmed cases" # title="Trend of total confirmed cases" # title="Trend of total confirmed cases" # title="Trend of total confirmed cases" Number of days since >100 cases Number of days since >100 cases Number of days since >100 cases Simulations/Projections by ML supported SEIR Model. Fit to currently available data (confirmed cases, active cases, measures, hospital capacity, ICU beds, ...). Goal: Visualize projections and effects of different measures in a way that can be understood by everybody. Display uncertainty of input data and projections. # PRELIMINARY MEASURE EXAMPLE GRAPHS # html.Div([ # html.Div([ # dcc.Graph( # id='graph-trend-2', # figure={ # 'data': [ # dict( # y=df_cases[df_cases['Country'] == i].sum()[1:][df_cases[df_cases['Country'] == i].sum()[1:].gt(threshold)], # mode='lines+markers', # opacity=0.7, # marker={ # 'size': 5, # 'line': {'width': 1} # }, # name=i # ) for i in countries_nomask # ], # 'layout': dict( # xaxis={'range':[0,45],'type': 'lin', 'title':'''Number of days since >100 cases'''}, # yaxis={'range':[2,None], 'type': 'log', 'title': 'Total Confirmed Cases'}, # legend={'x': 1, 'y': 1}, # hovermode='closest', # title="Countries without widespread public mask usage" # ) # } # ) # ], className="six columns"), # html.Div([ # dcc.Graph( # id='graph-trend-3', # figure={ # 'data': [ # dict( # y=df_cases[df_cases['Country'] == i].sum()[1:][df_cases[df_cases['Country'] == i].sum()[1:].gt(threshold)], # mode='lines+markers', # opacity=0.7, # marker={ # 'size': 5, # 'line': {'width': 1} # }, # name=i # ) for i in countries_mask # ], # 'layout': dict( # xaxis={'range':[0,45],'type': 'lin', 'title':'''Number of days since >100 cases'''}, # yaxis={'range':[2,None], 'type': 'log', 'title': 'Total Confirmed Cases'}, # legend={'x': 1, 'y': 1}, # hovermode='closest', # title="Countries with widespread public mask usage" # ) # } # ) # ], className="six columns") # ], className="row") #dcc.Markdown('''Visualization of available data on maps (World, Europe, Germany, ...) to display regional clusters and the spread of the pandemic.'''), * __Current Status: Under Construction!__ The main purpose of this dashboard is to provide a simple, interactive tool to visualize publicly available data about the COVID-19 pandemic. * __Caution:__ Graphs and figures only display the number of reported cases and **not** the number of actual cases. In addition, the quantity and type of conducted tests, case definitions, reporting structures and protocols may vary between regions and strongly affect the reported numbers. (e.g. regions with low number of conducted tests may have much higher actual case numbers than shown here; some countries may detect and report cases later than others; some countries may focus on testing specific groups of people depending on age, profession, region, preexisting conditions; some countries may conduct a higher percentage of tests post-mortem than others). * __Data Source:__ All graphs rely on data collected by the team at [John Hopkins University CSSE](https://github.com/CSSEGISandData/COVID-19). Live counts above the graphs rely on [NovelCovid APIs](https://github.com/NOVELCOVID/API). All Data is continuously updated and is subject to change and errors. ### Run App
| 2.727175
| 3
|
medlinkersocial.py
|
danlou/MedLinker-Social
| 2
|
6627768
|
<reponame>danlou/MedLinker-Social
import pickle
from collections import defaultdict
from utils import cui_stys_map
from utils import cui_stys_map
from utils import yake_tokenizer
from utils import replace_variants
from utils import normalize_str
from utils import cui_mfa
from utils import sty_labels
from utils import efcni_cuis
from umls_simstring import SimString_UMLS
import yake
class MedLinkerSocial(object):
def __init__(self,
db_path='data/SimString/umls_2020_aa_cat0129_ext.3gram.5toks.db',
map_path='data/SimString/umls_2020_aa_cat0129_ext.5toks.alias.map',
alpha=0.5,
n=5):
self.matcher = None
self.extractor = None
self.alpha = alpha
self.load_matcher(db_path, map_path, self.alpha)
self.load_extractor(n)
def load_matcher(self, db_path, map_path, alpha=0.5):
self.matcher = SimString_UMLS(db_path, map_path, alpha=alpha)
def load_extractor(self, n=5):
self.extractor = yake.KeywordExtractor(lan='en', n=n, dedupLim=0.9, dedupFunc='seqm', windowsSize=1, top=999)
def extract_keywords(self, sent_tokens):
def locate_sublist(a, b):
if len(a) > len(b):
return None
for i in range(0, len(b) - len(a) + 1):
if b[i:i+len(a)] == a:
return (i, i+len(a))
return None
# tokens provided must be tokenized using the same tokenizer as yake
try:
kws = self.extractor.extract_keywords(' '.join(sent_tokens))
except ValueError:
kws = []
# invert scores
kws = [(1/score, kw) for (kw, score) in kws]
sum_score = sum([score for (score, kw) in kws])
kws = [(score/sum_score, kw) for (score, kw) in kws]
# locate extracted kws
kws_with_idxs = []
for score, kw in kws:
kw_tokens = kw.split()
try:
s, e = locate_sublist(kw_tokens, sent_tokens)
except:
continue
kws_with_idxs.append((score, kw, s, e))
return kws_with_idxs
def search(self, sentence, alpha=None, add_yake_score=True, overlapping=True):
if alpha is None:
alpha = self.alpha
r = {'sentence': sentence, 'tokens': yake_tokenizer(sentence), 'matches': []}
for score, kw, s, e in self.extract_keywords(r['tokens']):
kw = normalize_str(kw)
kw = replace_variants(kw)
try:
cui, alias, sim = self.matcher.match_cuis(kw)[0]
except IndexError:
continue
match = {'keyword': kw, 'cui': cui, 'stys': cui_stys_map[cui], 'alias': alias, 'start': s, 'end': e, 'score': score, 'similarity': sim}
r['matches'].append(match)
r['matches'] = [m for m in r['matches'] if m['similarity'] > alpha]
if add_yake_score: # better for matching longer sequences
r['matches'] = sorted(r['matches'], key=lambda m: m['similarity'] + m['score'], reverse=True)
else: # yake score just used to settle similarity ties
r['matches'] = sorted(r['matches'], key=lambda m: (m['similarity'], m['score']), reverse=True)
if not overlapping:
matched_idxs = set()
matches_filtered = []
for m in r['matches']:
idxs = list(range(m['start'], m['end']))
if all([i not in matched_idxs for i in idxs]):
matches_filtered.append(m)
matched_idxs.update(idxs)
r['matches'] = matches_filtered
return r
def get_mfa(self, cui):
# CUI's Most Frequent Alias according to Reddit Corpus
if cui in cui_mfa:
return cui_mfa[cui]
else:
return cui_alias_map[cui][0]
def get_types(self, cui, include_name=False):
stys = cui_stys_map[cui]
if include_name:
stys = ['%s (%s)' % (sty, sty_labels[sty]) for sty in stys]
return stys
def get_aliases(self, cui):
return cui_alias_map[cui]
if __name__ == "__main__":
db_path='data/SimString/umls_2020_aa_cat0129_ext.3gram.5toks.db'
map_path='data/SimString/umls_2020_aa_cat0129_ext.5toks.alias.map'
linker = MedLinkerSocial(db_path, map_path, alpha=0.5, n=5)
txt = "But I often check on her because I'm paranoid and scared of positional asphyxiation ."
r = linker.search(txt, alpha=0.7)
print(r)
for m in r['matches']:
print(m)
|
import pickle
from collections import defaultdict
from utils import cui_stys_map
from utils import cui_stys_map
from utils import yake_tokenizer
from utils import replace_variants
from utils import normalize_str
from utils import cui_mfa
from utils import sty_labels
from utils import efcni_cuis
from umls_simstring import SimString_UMLS
import yake
class MedLinkerSocial(object):
def __init__(self,
db_path='data/SimString/umls_2020_aa_cat0129_ext.3gram.5toks.db',
map_path='data/SimString/umls_2020_aa_cat0129_ext.5toks.alias.map',
alpha=0.5,
n=5):
self.matcher = None
self.extractor = None
self.alpha = alpha
self.load_matcher(db_path, map_path, self.alpha)
self.load_extractor(n)
def load_matcher(self, db_path, map_path, alpha=0.5):
self.matcher = SimString_UMLS(db_path, map_path, alpha=alpha)
def load_extractor(self, n=5):
self.extractor = yake.KeywordExtractor(lan='en', n=n, dedupLim=0.9, dedupFunc='seqm', windowsSize=1, top=999)
def extract_keywords(self, sent_tokens):
def locate_sublist(a, b):
if len(a) > len(b):
return None
for i in range(0, len(b) - len(a) + 1):
if b[i:i+len(a)] == a:
return (i, i+len(a))
return None
# tokens provided must be tokenized using the same tokenizer as yake
try:
kws = self.extractor.extract_keywords(' '.join(sent_tokens))
except ValueError:
kws = []
# invert scores
kws = [(1/score, kw) for (kw, score) in kws]
sum_score = sum([score for (score, kw) in kws])
kws = [(score/sum_score, kw) for (score, kw) in kws]
# locate extracted kws
kws_with_idxs = []
for score, kw in kws:
kw_tokens = kw.split()
try:
s, e = locate_sublist(kw_tokens, sent_tokens)
except:
continue
kws_with_idxs.append((score, kw, s, e))
return kws_with_idxs
def search(self, sentence, alpha=None, add_yake_score=True, overlapping=True):
if alpha is None:
alpha = self.alpha
r = {'sentence': sentence, 'tokens': yake_tokenizer(sentence), 'matches': []}
for score, kw, s, e in self.extract_keywords(r['tokens']):
kw = normalize_str(kw)
kw = replace_variants(kw)
try:
cui, alias, sim = self.matcher.match_cuis(kw)[0]
except IndexError:
continue
match = {'keyword': kw, 'cui': cui, 'stys': cui_stys_map[cui], 'alias': alias, 'start': s, 'end': e, 'score': score, 'similarity': sim}
r['matches'].append(match)
r['matches'] = [m for m in r['matches'] if m['similarity'] > alpha]
if add_yake_score: # better for matching longer sequences
r['matches'] = sorted(r['matches'], key=lambda m: m['similarity'] + m['score'], reverse=True)
else: # yake score just used to settle similarity ties
r['matches'] = sorted(r['matches'], key=lambda m: (m['similarity'], m['score']), reverse=True)
if not overlapping:
matched_idxs = set()
matches_filtered = []
for m in r['matches']:
idxs = list(range(m['start'], m['end']))
if all([i not in matched_idxs for i in idxs]):
matches_filtered.append(m)
matched_idxs.update(idxs)
r['matches'] = matches_filtered
return r
def get_mfa(self, cui):
# CUI's Most Frequent Alias according to Reddit Corpus
if cui in cui_mfa:
return cui_mfa[cui]
else:
return cui_alias_map[cui][0]
def get_types(self, cui, include_name=False):
stys = cui_stys_map[cui]
if include_name:
stys = ['%s (%s)' % (sty, sty_labels[sty]) for sty in stys]
return stys
def get_aliases(self, cui):
return cui_alias_map[cui]
if __name__ == "__main__":
db_path='data/SimString/umls_2020_aa_cat0129_ext.3gram.5toks.db'
map_path='data/SimString/umls_2020_aa_cat0129_ext.5toks.alias.map'
linker = MedLinkerSocial(db_path, map_path, alpha=0.5, n=5)
txt = "But I often check on her because I'm paranoid and scared of positional asphyxiation ."
r = linker.search(txt, alpha=0.7)
print(r)
for m in r['matches']:
print(m)
|
en
| 0.868313
|
# tokens provided must be tokenized using the same tokenizer as yake # invert scores # locate extracted kws # better for matching longer sequences # yake score just used to settle similarity ties # CUI's Most Frequent Alias according to Reddit Corpus
| 2.071905
| 2
|
markups.py
|
BrinzaBezrukoff/kmbo_bot
| 1
|
6627769
|
<gh_stars>1-10
from telebot.types import InlineKeyboardMarkup, InlineKeyboardButton
def get_subjects_markup(subjects):
mk = InlineKeyboardMarkup(row_width=1)
for v in subjects:
mk.add(InlineKeyboardButton(v.name, callback_data=f"subject_{v.id}"))
mk.add(InlineKeyboardButton("<<<", callback_data="menu"))
return mk
def get_deadlines_markup(deadlines):
mk = InlineKeyboardMarkup(row_width=1)
for v in deadlines:
mk.add(InlineKeyboardButton(f"{v.subject.name}: {v.name} ({v.dead_str})", callback_data=f"deadline_{v.id}"))
mk.add(InlineKeyboardButton("<<<", callback_data="menu"))
return mk
def get_back_markup(data, title=None):
if not title:
title = "<<<"
mk = InlineKeyboardMarkup(row_width=1)
mk.add(InlineKeyboardButton(f"{title}", callback_data=data))
return mk
def get_main_menu():
mk = InlineKeyboardMarkup(row_width=1)
mk.add(InlineKeyboardButton("Предметы", callback_data="all_subjects"))
mk.add(InlineKeyboardButton("Дедлайны", callback_data="all_deadlines"))
mk.add(InlineKeyboardButton("Раздел редактора", callback_data="open_editorial"))
return mk
def get_editorial_markup():
mk = InlineKeyboardMarkup(row_width=3)
mk.add(InlineKeyboardButton("Предметы", callback_data="*"),
InlineKeyboardButton("Добавить", callback_data="add_subject"),
InlineKeyboardButton("Удалить", callback_data="del_subject"))
mk.add(InlineKeyboardButton("Дедлайны", callback_data="*"),
InlineKeyboardButton("Добавить", callback_data="add_deadline"),
InlineKeyboardButton("Удалить", callback_data="del_deadline"))
return mk
|
from telebot.types import InlineKeyboardMarkup, InlineKeyboardButton
def get_subjects_markup(subjects):
mk = InlineKeyboardMarkup(row_width=1)
for v in subjects:
mk.add(InlineKeyboardButton(v.name, callback_data=f"subject_{v.id}"))
mk.add(InlineKeyboardButton("<<<", callback_data="menu"))
return mk
def get_deadlines_markup(deadlines):
mk = InlineKeyboardMarkup(row_width=1)
for v in deadlines:
mk.add(InlineKeyboardButton(f"{v.subject.name}: {v.name} ({v.dead_str})", callback_data=f"deadline_{v.id}"))
mk.add(InlineKeyboardButton("<<<", callback_data="menu"))
return mk
def get_back_markup(data, title=None):
if not title:
title = "<<<"
mk = InlineKeyboardMarkup(row_width=1)
mk.add(InlineKeyboardButton(f"{title}", callback_data=data))
return mk
def get_main_menu():
mk = InlineKeyboardMarkup(row_width=1)
mk.add(InlineKeyboardButton("Предметы", callback_data="all_subjects"))
mk.add(InlineKeyboardButton("Дедлайны", callback_data="all_deadlines"))
mk.add(InlineKeyboardButton("Раздел редактора", callback_data="open_editorial"))
return mk
def get_editorial_markup():
mk = InlineKeyboardMarkup(row_width=3)
mk.add(InlineKeyboardButton("Предметы", callback_data="*"),
InlineKeyboardButton("Добавить", callback_data="add_subject"),
InlineKeyboardButton("Удалить", callback_data="del_subject"))
mk.add(InlineKeyboardButton("Дедлайны", callback_data="*"),
InlineKeyboardButton("Добавить", callback_data="add_deadline"),
InlineKeyboardButton("Удалить", callback_data="del_deadline"))
return mk
|
none
| 1
| 2.307431
| 2
|
|
pytils/classes/meta/_static.py
|
d33jiang/pytils
| 0
|
6627770
|
<gh_stars>0
from typing import Any, Dict, NoReturn, Tuple
__all__ = [
'StaticMeta'
]
class StaticMeta(type):
"""
Metaclass for defining static classes.
The resulting static class cannot be instantiated. If the __init__ method is defined, then it is invoked with None
as the sole argument when the static class is defined.
"""
@staticmethod
def _raise_init():
raise NotImplementedError('Static classes cannot be instantiated')
def __init__(cls, name: str, bases: Tuple[type, ...], dct: Dict[str, Any]):
def on_init(*_args, **_kwargs) -> NoReturn:
StaticMeta._raise_init()
init_function = dct.get('__init__', lambda _: None)
dct['__init__'] = on_init
super(StaticMeta, cls).__init__(name, bases, dct)
init_function(None)
dct['__new__'] = on_init
def __call__(cls, *args, **kwargs) -> NoReturn:
StaticMeta._raise_init()
|
from typing import Any, Dict, NoReturn, Tuple
__all__ = [
'StaticMeta'
]
class StaticMeta(type):
"""
Metaclass for defining static classes.
The resulting static class cannot be instantiated. If the __init__ method is defined, then it is invoked with None
as the sole argument when the static class is defined.
"""
@staticmethod
def _raise_init():
raise NotImplementedError('Static classes cannot be instantiated')
def __init__(cls, name: str, bases: Tuple[type, ...], dct: Dict[str, Any]):
def on_init(*_args, **_kwargs) -> NoReturn:
StaticMeta._raise_init()
init_function = dct.get('__init__', lambda _: None)
dct['__init__'] = on_init
super(StaticMeta, cls).__init__(name, bases, dct)
init_function(None)
dct['__new__'] = on_init
def __call__(cls, *args, **kwargs) -> NoReturn:
StaticMeta._raise_init()
|
en
| 0.866894
|
Metaclass for defining static classes. The resulting static class cannot be instantiated. If the __init__ method is defined, then it is invoked with None as the sole argument when the static class is defined.
| 3.096821
| 3
|
proteus/config/tamucluster.py
|
robertsawko/proteus
| 0
|
6627771
|
from default import *
PROTEUS_MPI_INCLUDE_DIR, PROTEUS_MPI_LIB_DIR = get_flags('mpi')
PROTEUS_MPI_INCLUDE_DIRS = [PROTEUS_MPI_INCLUDE_DIR,'/apps/openmpi/1.6.5/include']
PROTEUS_MPI_LIB_DIRS = [PROTEUS_MPI_LIB_DIR,'/apps/openmpi/1.6.5/lib64']
PROTEUS_MPI_LIBS =[]
|
from default import *
PROTEUS_MPI_INCLUDE_DIR, PROTEUS_MPI_LIB_DIR = get_flags('mpi')
PROTEUS_MPI_INCLUDE_DIRS = [PROTEUS_MPI_INCLUDE_DIR,'/apps/openmpi/1.6.5/include']
PROTEUS_MPI_LIB_DIRS = [PROTEUS_MPI_LIB_DIR,'/apps/openmpi/1.6.5/lib64']
PROTEUS_MPI_LIBS =[]
|
none
| 1
| 1.171695
| 1
|
|
lrthubcore/ratings/api/views.py
|
xrojan/lrthub-core
| 0
|
6627772
|
<reponame>xrojan/lrthub-core
# Created by <NAME> on 07/07/2018
# @email <EMAIL>
from rest_framework.response import Response
from ..models import Rating
from . import serializers
from rest_framework import generics, status
from rest_framework.permissions import IsAuthenticated
class RatingList(generics.ListAPIView):
permission_classes = (IsAuthenticated,)
queryset = Rating.objects.all()
serializer_class = serializers.RatingSerializer
class RatingCreate(generics.CreateAPIView):
queryset = Rating.objects.all()
serializer_class = serializers.RatingSerializer
def create(self, request, *args, **kwargs):
super(RatingCreate, self).create(request, args, kwargs)
response = {"status_code": status.HTTP_200_OK, "message": "Successfully created", "result": request.data}
return Response(response)
class RatingDetail(generics.RetrieveUpdateDestroyAPIView):
permission_classes = (IsAuthenticated,)
queryset = Rating.objects.all()
serializer_class = serializers.RatingSerializer
def retrieve(self, request, *args, **kwargs):
super(RatingDetail, self).retrieve(request, args, kwargs)
instance = self.get_object()
serializer = self.get_serializer(instance)
data = serializer.data
response = {"status_code": status.HTTP_200_OK, "message": "Successfully retrieved", "result": data}
return Response(response)
def patch(self, request, *args, **kwargs):
super(RatingDetail, self).patch(request, args, kwargs)
instance = self.get_object()
serializer = self.get_serializer(instance)
data = serializer.data
response = {"status_code": status.HTTP_200_OK, "message": "Successfully updated", "result": data}
return Response(response)
def delete(self, request, *args, **kwargs):
super(RatingDetail, self).delete(request, args, kwargs)
response = {"status_code": status.HTTP_200_OK, "message": "Successfully deleted"}
return Response(response)
|
# Created by <NAME> on 07/07/2018
# @email <EMAIL>
from rest_framework.response import Response
from ..models import Rating
from . import serializers
from rest_framework import generics, status
from rest_framework.permissions import IsAuthenticated
class RatingList(generics.ListAPIView):
permission_classes = (IsAuthenticated,)
queryset = Rating.objects.all()
serializer_class = serializers.RatingSerializer
class RatingCreate(generics.CreateAPIView):
queryset = Rating.objects.all()
serializer_class = serializers.RatingSerializer
def create(self, request, *args, **kwargs):
super(RatingCreate, self).create(request, args, kwargs)
response = {"status_code": status.HTTP_200_OK, "message": "Successfully created", "result": request.data}
return Response(response)
class RatingDetail(generics.RetrieveUpdateDestroyAPIView):
permission_classes = (IsAuthenticated,)
queryset = Rating.objects.all()
serializer_class = serializers.RatingSerializer
def retrieve(self, request, *args, **kwargs):
super(RatingDetail, self).retrieve(request, args, kwargs)
instance = self.get_object()
serializer = self.get_serializer(instance)
data = serializer.data
response = {"status_code": status.HTTP_200_OK, "message": "Successfully retrieved", "result": data}
return Response(response)
def patch(self, request, *args, **kwargs):
super(RatingDetail, self).patch(request, args, kwargs)
instance = self.get_object()
serializer = self.get_serializer(instance)
data = serializer.data
response = {"status_code": status.HTTP_200_OK, "message": "Successfully updated", "result": data}
return Response(response)
def delete(self, request, *args, **kwargs):
super(RatingDetail, self).delete(request, args, kwargs)
response = {"status_code": status.HTTP_200_OK, "message": "Successfully deleted"}
return Response(response)
|
en
| 0.660774
|
# Created by <NAME> on 07/07/2018 # @email <EMAIL>
| 1.943849
| 2
|
pyrigate/jobs/job.py
|
pyrigate/pyrigate
| 1
|
6627773
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Base class for all jobs."""
import schedule
import threading
class Job:
"""A periodic job."""
def __init__(self):
self._running = False
self._runs = 0
self._event = threading.Event()
def schedule(self):
"""Schedule this job."""
raise NotImplementedError()
def stop(self):
"""Stop this job."""
schedule.cancel_job(self)
self._running = False
@property
def running(self):
return self._running
@property
def runs(self):
"""How many times this job has run."""
self._runs
@property
def tag(self):
"""The tag associated with this job."""
raise NotImplementedError()
def task(self):
"""Execute the job."""
raise NotImplementedError()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Base class for all jobs."""
import schedule
import threading
class Job:
"""A periodic job."""
def __init__(self):
self._running = False
self._runs = 0
self._event = threading.Event()
def schedule(self):
"""Schedule this job."""
raise NotImplementedError()
def stop(self):
"""Stop this job."""
schedule.cancel_job(self)
self._running = False
@property
def running(self):
return self._running
@property
def runs(self):
"""How many times this job has run."""
self._runs
@property
def tag(self):
"""The tag associated with this job."""
raise NotImplementedError()
def task(self):
"""Execute the job."""
raise NotImplementedError()
|
en
| 0.949313
|
#!/usr/bin/env python # -*- coding: utf-8 -*- Base class for all jobs. A periodic job. Schedule this job. Stop this job. How many times this job has run. The tag associated with this job. Execute the job.
| 3.182594
| 3
|
edexOsgi/com.raytheon.uf.common.aviation/utility/common_static/base/aviation/python/PlotEntry.py
|
srcarter3/awips2
| 0
|
6627774
|
<filename>edexOsgi/com.raytheon.uf.common.aviation/utility/common_static/base/aviation/python/PlotEntry.py
##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: <NAME>
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
##
# This is a base file that is not intended to be overridden.
##
import logging
import cPickle as pickle
import Avn, AvnParser, AvnLib
import TafDecoder
import JUtil
import MetarData, EtaData, MosData
_Logger = logging.getLogger(Avn.CATEGORY)
#
# Entry point for Weather Plot data retrieval
#
#
# SOFTWARE HISTORY
#
# Date Ticket# Engineer Description
# ------------ ---------- ----------- --------------------------
# 08/24/09 avarani Initial Creation.
# 04/28/11 8065 rferrel Use cached site objects
#
#
def getNam(siteObj):
model = 'etabuf'
o = pickle.loads(siteObj)
siteID = o['siteID']
etaData = o['data']
# print 'PlotEntry.getNam: model, siteID:', model, siteID
if etaData is not None:
data = [{'data' : eta.data} for eta in etaData]
else:
data = None
return JUtil.pyValToJavaObj(data)
def getMos(siteObj, model):
o = pickle.loads(siteObj)
siteID = o['siteID']
mosData = o['data']
# print 'PlotEntry.getMos: model, siteID:', model, siteID
if mosData is not None:
data = [{'data' : mos.data} for mos in mosData]
else:
data = None
return JUtil.pyValToJavaObj(data)
def getMetars(siteObj, size=99):
o = pickle.loads(siteObj)
siteID = o['siteID']
data = o['data']
# print 'PlotEntry.getMetars siteID, size:', siteID, size
if data is not None:
data = [{'header' : d.header, 'text' : d.text, 'dcd' : d.dcd} for d in data]
data.sort(lambda x, y: cmp(y['dcd']['itime']['str'], x['dcd']['itime']['str']))
return JUtil.pyValToJavaObj(data)
def decodeTaf(taf, wmoHeader):
# print 'plotEntry.decodeTaf: taf<%s>,\nwmoHeader<%s>:' % ( taf, wmoHeader)
decoder = TafDecoder.Decoder()
try:
bbb = wmoHeader.split()[3]
except IndexError:
bbb = ' '
dcd = decoder(taf, bbb)
tafDict = {'header': wmoHeader, 'text': taf, 'dcd': dcd}
return JUtil.pyValToJavaObj(tafDict)
|
<filename>edexOsgi/com.raytheon.uf.common.aviation/utility/common_static/base/aviation/python/PlotEntry.py
##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: <NAME>
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
##
# This is a base file that is not intended to be overridden.
##
import logging
import cPickle as pickle
import Avn, AvnParser, AvnLib
import TafDecoder
import JUtil
import MetarData, EtaData, MosData
_Logger = logging.getLogger(Avn.CATEGORY)
#
# Entry point for Weather Plot data retrieval
#
#
# SOFTWARE HISTORY
#
# Date Ticket# Engineer Description
# ------------ ---------- ----------- --------------------------
# 08/24/09 avarani Initial Creation.
# 04/28/11 8065 rferrel Use cached site objects
#
#
def getNam(siteObj):
model = 'etabuf'
o = pickle.loads(siteObj)
siteID = o['siteID']
etaData = o['data']
# print 'PlotEntry.getNam: model, siteID:', model, siteID
if etaData is not None:
data = [{'data' : eta.data} for eta in etaData]
else:
data = None
return JUtil.pyValToJavaObj(data)
def getMos(siteObj, model):
o = pickle.loads(siteObj)
siteID = o['siteID']
mosData = o['data']
# print 'PlotEntry.getMos: model, siteID:', model, siteID
if mosData is not None:
data = [{'data' : mos.data} for mos in mosData]
else:
data = None
return JUtil.pyValToJavaObj(data)
def getMetars(siteObj, size=99):
o = pickle.loads(siteObj)
siteID = o['siteID']
data = o['data']
# print 'PlotEntry.getMetars siteID, size:', siteID, size
if data is not None:
data = [{'header' : d.header, 'text' : d.text, 'dcd' : d.dcd} for d in data]
data.sort(lambda x, y: cmp(y['dcd']['itime']['str'], x['dcd']['itime']['str']))
return JUtil.pyValToJavaObj(data)
def decodeTaf(taf, wmoHeader):
# print 'plotEntry.decodeTaf: taf<%s>,\nwmoHeader<%s>:' % ( taf, wmoHeader)
decoder = TafDecoder.Decoder()
try:
bbb = wmoHeader.split()[3]
except IndexError:
bbb = ' '
dcd = decoder(taf, bbb)
tafDict = {'header': wmoHeader, 'text': taf, 'dcd': dcd}
return JUtil.pyValToJavaObj(tafDict)
|
en
| 0.632417
|
## # This software was developed and / or modified by Raytheon Company, # pursuant to Contract DG133W-05-CQ-1067 with the US Government. # # U.S. EXPORT CONTROLLED TECHNICAL DATA # This software product contains export-restricted data whose # export/transfer/disclosure is restricted by U.S. law. Dissemination # to non-U.S. persons whether in the United States or abroad requires # an export license or other authorization. # # Contractor Name: <NAME> # Contractor Address: 6825 Pine Street, Suite 340 # Mail Stop B8 # Omaha, NE 68106 # 402.291.0100 # # See the AWIPS II Master Rights File ("Master Rights File.pdf") for # further licensing information. ## ## # This is a base file that is not intended to be overridden. ## # # Entry point for Weather Plot data retrieval # # # SOFTWARE HISTORY # # Date Ticket# Engineer Description # ------------ ---------- ----------- -------------------------- # 08/24/09 avarani Initial Creation. # 04/28/11 8065 rferrel Use cached site objects # # # print 'PlotEntry.getNam: model, siteID:', model, siteID # print 'PlotEntry.getMos: model, siteID:', model, siteID # print 'PlotEntry.getMetars siteID, size:', siteID, size # print 'plotEntry.decodeTaf: taf<%s>,\nwmoHeader<%s>:' % ( taf, wmoHeader)
| 1.738683
| 2
|
dace/frontend/python/decorators.py
|
fthaler/dace
| 0
|
6627775
|
<reponame>fthaler/dace<filename>dace/frontend/python/decorators.py
# Copyright 2019-2020 ETH Zurich and the DaCe authors. All rights reserved.
""" Python decorators for DaCe functions. """
from __future__ import print_function
from dace import dtypes
from dace.dtypes import paramdec
from dace.frontend.python import parser
from typing import Callable
#############################################
# Type hint specifically for the @dace.program decorator
paramdec_program: Callable[..., Callable[..., parser.DaceProgram]] = paramdec
@paramdec_program
def program(f, *args, **kwargs) -> parser.DaceProgram:
""" DaCe program, entry point to a data-centric program. """
# Parses a python @dace.program function and returns an object that can
# be translated
return parser.DaceProgram(f, args, kwargs)
function = program
# Internal DaCe decorators, these are not actually run, but rewritten
# Dataflow constructs
@paramdec
def map(f, rng):
""" A Map is representation of parallel execution, containing
an integer set (Python range) for which its contents are run
concurrently.
:param rng: The map's range.
"""
pass
@paramdec
def consume(f, stream, pes):
""" Consume is a scope, like `Map`, that creates parallel execution.
Unlike `Map`, it creates a producer-consumer relationship between an
input stream and the contents. The contents are run by the given number
of processing elements, who will try to pop elements from the input
stream until a given quiescence condition is reached.
:param stream: The stream to pop from.
:param pes: The number of processing elements to use.
"""
pass
def tasklet(f):
""" A general procedure that cannot access any memory apart from incoming
and outgoing memlets. The DaCe framework cannot analyze these tasklets
for optimization. """
pass
# Control-flow constructs
@paramdec
def iterate(f, rng):
""" A decorator version of a for loop, with a range of `rng`.
:param rng: The range of the for loop.
"""
pass
@paramdec
def loop(f, cond):
""" A decorator version of a while loop, with a looping condition `cond`.
:param cond: The condition of the while loop.
"""
pass
@paramdec
def conditional(f, cond):
""" A decorator version of conditional execution, with an if-condition
`cond`.
:param cond: The condition of the branch.
"""
pass
|
# Copyright 2019-2020 ETH Zurich and the DaCe authors. All rights reserved.
""" Python decorators for DaCe functions. """
from __future__ import print_function
from dace import dtypes
from dace.dtypes import paramdec
from dace.frontend.python import parser
from typing import Callable
#############################################
# Type hint specifically for the @dace.program decorator
paramdec_program: Callable[..., Callable[..., parser.DaceProgram]] = paramdec
@paramdec_program
def program(f, *args, **kwargs) -> parser.DaceProgram:
""" DaCe program, entry point to a data-centric program. """
# Parses a python @dace.program function and returns an object that can
# be translated
return parser.DaceProgram(f, args, kwargs)
function = program
# Internal DaCe decorators, these are not actually run, but rewritten
# Dataflow constructs
@paramdec
def map(f, rng):
""" A Map is representation of parallel execution, containing
an integer set (Python range) for which its contents are run
concurrently.
:param rng: The map's range.
"""
pass
@paramdec
def consume(f, stream, pes):
""" Consume is a scope, like `Map`, that creates parallel execution.
Unlike `Map`, it creates a producer-consumer relationship between an
input stream and the contents. The contents are run by the given number
of processing elements, who will try to pop elements from the input
stream until a given quiescence condition is reached.
:param stream: The stream to pop from.
:param pes: The number of processing elements to use.
"""
pass
def tasklet(f):
""" A general procedure that cannot access any memory apart from incoming
and outgoing memlets. The DaCe framework cannot analyze these tasklets
for optimization. """
pass
# Control-flow constructs
@paramdec
def iterate(f, rng):
""" A decorator version of a for loop, with a range of `rng`.
:param rng: The range of the for loop.
"""
pass
@paramdec
def loop(f, cond):
""" A decorator version of a while loop, with a looping condition `cond`.
:param cond: The condition of the while loop.
"""
pass
@paramdec
def conditional(f, cond):
""" A decorator version of conditional execution, with an if-condition
`cond`.
:param cond: The condition of the branch.
"""
pass
|
en
| 0.825018
|
# Copyright 2019-2020 ETH Zurich and the DaCe authors. All rights reserved. Python decorators for DaCe functions. ############################################# # Type hint specifically for the @dace.program decorator DaCe program, entry point to a data-centric program. # Parses a python @dace.program function and returns an object that can # be translated # Internal DaCe decorators, these are not actually run, but rewritten # Dataflow constructs A Map is representation of parallel execution, containing an integer set (Python range) for which its contents are run concurrently. :param rng: The map's range. Consume is a scope, like `Map`, that creates parallel execution. Unlike `Map`, it creates a producer-consumer relationship between an input stream and the contents. The contents are run by the given number of processing elements, who will try to pop elements from the input stream until a given quiescence condition is reached. :param stream: The stream to pop from. :param pes: The number of processing elements to use. A general procedure that cannot access any memory apart from incoming and outgoing memlets. The DaCe framework cannot analyze these tasklets for optimization. # Control-flow constructs A decorator version of a for loop, with a range of `rng`. :param rng: The range of the for loop. A decorator version of a while loop, with a looping condition `cond`. :param cond: The condition of the while loop. A decorator version of conditional execution, with an if-condition `cond`. :param cond: The condition of the branch.
| 2.739018
| 3
|
photonsdi/util/lfsr.py
|
felixheld/photonSDI
| 8
|
6627776
|
<reponame>felixheld/photonSDI
from operator import xor
from migen import *
class LfsrScrambler(Module):
def __init__(self, lfsr_taps, datapath_width):
assert lfsr_taps
lfsr_length = max(lfsr_taps)
self.i_data = Signal(datapath_width)
self.o_data = Signal(datapath_width)
self.i_last_state = Signal(lfsr_length)
self.o_state = Signal(lfsr_length)
###
feedback_taps = lfsr_taps[:]
feedback_taps.remove(max(feedback_taps))
state = [self.i_last_state[i] for i in range(lfsr_length)]
for i in range(datapath_width):
state.append(reduce(xor, [state[tap] for tap in feedback_taps] + [self.i_data[i]]))
self.comb += [
self.o_data[i].eq(state.pop(0))
]
self.comb += [
self.o_state.eq(Cat(*state[:lfsr_length]))
]
class LfsrDescrambler(Module):
def __init__(self, lfsr_taps, datapath_width):
assert lfsr_taps
lfsr_length = max(lfsr_taps)
self.i_data = Signal(datapath_width)
self.o_data = Signal(datapath_width)
self.i_last_state = Signal(lfsr_length)
self.o_state = Signal(lfsr_length)
###
curval = Cat(self.i_last_state, self.i_data)
for i in range(datapath_width):
self.comb += [
self.o_data[i].eq(reduce(xor, [curval[tap + i] for tap in lfsr_taps]))
]
self.comb += [
self.o_state.eq(self.i_data[-lfsr_length:])
]
|
from operator import xor
from migen import *
class LfsrScrambler(Module):
def __init__(self, lfsr_taps, datapath_width):
assert lfsr_taps
lfsr_length = max(lfsr_taps)
self.i_data = Signal(datapath_width)
self.o_data = Signal(datapath_width)
self.i_last_state = Signal(lfsr_length)
self.o_state = Signal(lfsr_length)
###
feedback_taps = lfsr_taps[:]
feedback_taps.remove(max(feedback_taps))
state = [self.i_last_state[i] for i in range(lfsr_length)]
for i in range(datapath_width):
state.append(reduce(xor, [state[tap] for tap in feedback_taps] + [self.i_data[i]]))
self.comb += [
self.o_data[i].eq(state.pop(0))
]
self.comb += [
self.o_state.eq(Cat(*state[:lfsr_length]))
]
class LfsrDescrambler(Module):
def __init__(self, lfsr_taps, datapath_width):
assert lfsr_taps
lfsr_length = max(lfsr_taps)
self.i_data = Signal(datapath_width)
self.o_data = Signal(datapath_width)
self.i_last_state = Signal(lfsr_length)
self.o_state = Signal(lfsr_length)
###
curval = Cat(self.i_last_state, self.i_data)
for i in range(datapath_width):
self.comb += [
self.o_data[i].eq(reduce(xor, [curval[tap + i] for tap in lfsr_taps]))
]
self.comb += [
self.o_state.eq(self.i_data[-lfsr_length:])
]
|
none
| 1
| 2.450298
| 2
|
|
websites/migrations/0026_auto_20200630_2307.py
|
tsukasa-renato/personal-project-django-system
| 0
|
6627777
|
<reponame>tsukasa-renato/personal-project-django-system<gh_stars>0
# Generated by Django 3.0.7 on 2020-07-01 02:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('websites', '0025_auto_20200629_2249'),
]
operations = [
migrations.AlterField(
model_name='products',
name='title',
field=models.CharField(max_length=200),
),
]
|
# Generated by Django 3.0.7 on 2020-07-01 02:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('websites', '0025_auto_20200629_2249'),
]
operations = [
migrations.AlterField(
model_name='products',
name='title',
field=models.CharField(max_length=200),
),
]
|
en
| 0.798703
|
# Generated by Django 3.0.7 on 2020-07-01 02:07
| 1.501346
| 2
|
data/swing_equation/IEEE.py
|
yuandugu/AI_Swing
| 4
|
6627778
|
from mpi4py import MPI
import math
import numpy as np
from scipy.integrate import solve_ivp
from scipy.special import comb, perm
import xlrd
import time
import random
import pandas as pd
import timeit
import operator
import h5py
##################### parameters ####################
N = 39 # number of node
omega_s = 100 * math.pi # synchronous angular frequency
baseMVA = 10**8 # power reference value
M = 50000 # mass moments of inertia
alpha = 0.6 # damping
theta = math.pi # range of theta_0
omega = 20 # range of omega_0
step = 0.05 # time step to solve ODE
max_t = 120 # maximum time to sove ODE
t = np.arange(0, max_t, step) # time stream to solve ODE
data_number = 1000 # samping number
interval = False
if interval == True:
cut_out_num = 50 # collect data number, 100 for 14, 50 for 39
else:
cut_out_num = 100
def dmove(t, y, sets):
"""
定义ODE
"""
X = np.zeros((N * 2))
for i in range(N):
X[i] = y[i + N]
a = 0
for j in range(N):
a += sets[i + 1, j]/16 * math.sin(y[j] - y[i])
X[i + N] = -alpha * y[i + N] + sets[0, i]/16 + a
return X
def load_para():
parameter = xlrd.open_workbook('/parameter/parameter%s.xlsx' %(N))
# 功率矩阵
P_sheet1 = parameter.sheet_by_index(0)
nrows = P_sheet1.nrows
ncols = P_sheet1.ncols
P = np.zeros((N))
for i in range(nrows):
for j in range(ncols):
P[i] = P_sheet1.cell_value(i, j)
P = P * baseMVA
P = [i - np.sum(P)/N for i in P] # 功率补偿
P = np.array([i/(M*omega_s) for i in P])
# 导纳矩阵
Y_sheet1 = parameter.sheet_by_index(1)
nrows = Y_sheet1.nrows
ncols = Y_sheet1.ncols
Y = np.zeros((N, N))
for i in range(nrows):
for j in range(ncols):
Y[i, j] = Y_sheet1.cell_value(i, j)
Y = np.array([i*baseMVA/(M*omega_s) for i in Y])
# 参数合并
PY = np.vstack((P, Y))
# 初始条件
theta_sheet1 = parameter.sheet_by_index(2)
nrows = theta_sheet1.nrows
ncols = theta_sheet1.ncols
initial = np.zeros((N * 2))
for i in range(nrows):
for j in range(ncols):
initial[i] = theta_sheet1.cell_value(i, j)
initial = [i / 180 * math.pi for i in initial] # 转换为弧度制
print('原始数据导入完毕')
return PY, initial
def generate_uniform_init_array(Initial, init_num, node_num):
"""
产生多组单个节点服从均匀分布的随机初始条件
"""
np.random.seed(node_num*570)
init_array = np.random.rand(2, init_num)
init_array -= 0.5*np.ones((2, init_num))
init_array[0, :] *= 2 * theta
init_array[0, :] += Initial[node_num - 1] * np.ones((init_num))
init_array[1, :] *= 2 * omega
return init_array
def solve_one_ODE_updated(i):
"""
parallel function
"""
if N == 14:
length = 4000
elif N == 39:
length = 1000
names = locals()
a = np.array([-0.24219997, -0.16992011, -0.21896319, -0.22769395, -0.20274313, -0.18877805,
-0.23072831, -0.24088105, -0.25411382, -0.14792818, -0.16214242, -0.16401846,
-0.16169114, -0.1933527, -0.20324505, -0.17720979, -0.19711253, -0.21354782,
-0.08796499, -0.11204258, -0.13237097, -0.04721098, -0.05117464, -0.1747437,
-0.14210796, -0.16254737, -0.20094919, -0.09408921, -0.04086045, -0.12485783,
-0.021106, -0.01778558, 0.00184892, -0.02056255, 0.04571267, 0.10145837,
-0.01671788, 0.08897803, -0.26130884, 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0.]) # IEEE-39的同步状态
names['init_'+str(i)] = generate_uniform_init_array(Initial=a, init_num=data_number, node_num=i+1) # 第i+1个节点的初始条件
S = []
data_theta = np.zeros((data_number, cut_out_num * N))
data_omega = np.zeros((data_number, cut_out_num * N))
for j in range(data_number):
init = a
init[i] = names['init_'+str(i)][0, j]
init[i+N] = names['init_'+str(i)][1, j]
names['result' + str(i) + str(j)] = solve_ivp(fun=lambda t, y: dmove(t, y, PY), t_span=(0.0, max_t), y0=init, method='RK45', t_eval=t)
for num in range(N):
if interval == True:
data_theta[j, num*cut_out_num:(num*cut_out_num+cut_out_num)] = names['result' + str(i) + str(j)].y[num, 0:4*cut_out_num-3:4]
data_omega[j, num*cut_out_num:(num*cut_out_num+cut_out_num)] = names['result' + str(i) + str(j)].y[num+N, 0:4*cut_out_num-3:4]
else:
data_theta[j, num*cut_out_num:(num*cut_out_num+cut_out_num)] = names['result' + str(i) + str(j)].y[num, 0:cut_out_num]
data_omega[j, num*cut_out_num:(num*cut_out_num+cut_out_num)] = names['result' + str(i) + str(j)].y[num+N, 0:cut_out_num]
if(np.amax(abs(names['result' + str(i) + str(j)].y[N:, -1])) <= 0.2):
S.append(0) # 收敛
print(0)
else:
S.append(1) # 不收敛
print(1)
del names['result' + str(i) + str(j)], init
print('第(%s,%s)个ODE计算结束' % (i+1, j+1))
if interval == True:
f = h5py.File('/one/%s.h5' % (i+1), 'w')
else:
f = h5py.File('/one/%s.h5' % (i+1), 'w')
f.create_dataset('data_theta', data=data_theta)
f.create_dataset('data_omega', data=data_omega)
f.create_dataset('Y', data=np.array(S))
f.close()
def bigjobMPI_one_updated():
"""
calculate change_two_node data
"""
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
numjobs = N
job_content = [] # the collection of parameters [i,j]
for i_cur in range(N):
job_content.append(i_cur)
# arrange the works and jobs
if rank == 0:
# this is head worker
# jobs are arranged by this worker
job_all_idx = list(range(numjobs))
random.shuffle(job_all_idx)
# shuffle the job index to make all workers equal
# for unbalanced jobs
else:
job_all_idx = None
job_all_idx = comm.bcast(job_all_idx, root=0)
njob_per_worker, res = divmod(numjobs, size)
# the number of jobs should be a multiple of the NumProcess[MPI]
if rank < res:
this_worker_job = [job_all_idx[x] for x in range(rank*(njob_per_worker+1), (rank + 1)*(njob_per_worker+1))]
elif rank >= res:
this_worker_job = [job_all_idx[x] for x in range(rank*njob_per_worker + res, (rank + 1)*njob_per_worker + res)]
# map the index to parameterset [eps,anis]
work_content = [job_content[x] for x in this_worker_job]
for a_piece_of_work in work_content:
print('核心数为:%s' %(rank))
solve_one_ODE_updated(a_piece_of_work)
if __name__=="__main__":
PY, initial = load_para()
bigjobMPI_one_updated()
|
from mpi4py import MPI
import math
import numpy as np
from scipy.integrate import solve_ivp
from scipy.special import comb, perm
import xlrd
import time
import random
import pandas as pd
import timeit
import operator
import h5py
##################### parameters ####################
N = 39 # number of node
omega_s = 100 * math.pi # synchronous angular frequency
baseMVA = 10**8 # power reference value
M = 50000 # mass moments of inertia
alpha = 0.6 # damping
theta = math.pi # range of theta_0
omega = 20 # range of omega_0
step = 0.05 # time step to solve ODE
max_t = 120 # maximum time to sove ODE
t = np.arange(0, max_t, step) # time stream to solve ODE
data_number = 1000 # samping number
interval = False
if interval == True:
cut_out_num = 50 # collect data number, 100 for 14, 50 for 39
else:
cut_out_num = 100
def dmove(t, y, sets):
"""
定义ODE
"""
X = np.zeros((N * 2))
for i in range(N):
X[i] = y[i + N]
a = 0
for j in range(N):
a += sets[i + 1, j]/16 * math.sin(y[j] - y[i])
X[i + N] = -alpha * y[i + N] + sets[0, i]/16 + a
return X
def load_para():
parameter = xlrd.open_workbook('/parameter/parameter%s.xlsx' %(N))
# 功率矩阵
P_sheet1 = parameter.sheet_by_index(0)
nrows = P_sheet1.nrows
ncols = P_sheet1.ncols
P = np.zeros((N))
for i in range(nrows):
for j in range(ncols):
P[i] = P_sheet1.cell_value(i, j)
P = P * baseMVA
P = [i - np.sum(P)/N for i in P] # 功率补偿
P = np.array([i/(M*omega_s) for i in P])
# 导纳矩阵
Y_sheet1 = parameter.sheet_by_index(1)
nrows = Y_sheet1.nrows
ncols = Y_sheet1.ncols
Y = np.zeros((N, N))
for i in range(nrows):
for j in range(ncols):
Y[i, j] = Y_sheet1.cell_value(i, j)
Y = np.array([i*baseMVA/(M*omega_s) for i in Y])
# 参数合并
PY = np.vstack((P, Y))
# 初始条件
theta_sheet1 = parameter.sheet_by_index(2)
nrows = theta_sheet1.nrows
ncols = theta_sheet1.ncols
initial = np.zeros((N * 2))
for i in range(nrows):
for j in range(ncols):
initial[i] = theta_sheet1.cell_value(i, j)
initial = [i / 180 * math.pi for i in initial] # 转换为弧度制
print('原始数据导入完毕')
return PY, initial
def generate_uniform_init_array(Initial, init_num, node_num):
"""
产生多组单个节点服从均匀分布的随机初始条件
"""
np.random.seed(node_num*570)
init_array = np.random.rand(2, init_num)
init_array -= 0.5*np.ones((2, init_num))
init_array[0, :] *= 2 * theta
init_array[0, :] += Initial[node_num - 1] * np.ones((init_num))
init_array[1, :] *= 2 * omega
return init_array
def solve_one_ODE_updated(i):
"""
parallel function
"""
if N == 14:
length = 4000
elif N == 39:
length = 1000
names = locals()
a = np.array([-0.24219997, -0.16992011, -0.21896319, -0.22769395, -0.20274313, -0.18877805,
-0.23072831, -0.24088105, -0.25411382, -0.14792818, -0.16214242, -0.16401846,
-0.16169114, -0.1933527, -0.20324505, -0.17720979, -0.19711253, -0.21354782,
-0.08796499, -0.11204258, -0.13237097, -0.04721098, -0.05117464, -0.1747437,
-0.14210796, -0.16254737, -0.20094919, -0.09408921, -0.04086045, -0.12485783,
-0.021106, -0.01778558, 0.00184892, -0.02056255, 0.04571267, 0.10145837,
-0.01671788, 0.08897803, -0.26130884, 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0.]) # IEEE-39的同步状态
names['init_'+str(i)] = generate_uniform_init_array(Initial=a, init_num=data_number, node_num=i+1) # 第i+1个节点的初始条件
S = []
data_theta = np.zeros((data_number, cut_out_num * N))
data_omega = np.zeros((data_number, cut_out_num * N))
for j in range(data_number):
init = a
init[i] = names['init_'+str(i)][0, j]
init[i+N] = names['init_'+str(i)][1, j]
names['result' + str(i) + str(j)] = solve_ivp(fun=lambda t, y: dmove(t, y, PY), t_span=(0.0, max_t), y0=init, method='RK45', t_eval=t)
for num in range(N):
if interval == True:
data_theta[j, num*cut_out_num:(num*cut_out_num+cut_out_num)] = names['result' + str(i) + str(j)].y[num, 0:4*cut_out_num-3:4]
data_omega[j, num*cut_out_num:(num*cut_out_num+cut_out_num)] = names['result' + str(i) + str(j)].y[num+N, 0:4*cut_out_num-3:4]
else:
data_theta[j, num*cut_out_num:(num*cut_out_num+cut_out_num)] = names['result' + str(i) + str(j)].y[num, 0:cut_out_num]
data_omega[j, num*cut_out_num:(num*cut_out_num+cut_out_num)] = names['result' + str(i) + str(j)].y[num+N, 0:cut_out_num]
if(np.amax(abs(names['result' + str(i) + str(j)].y[N:, -1])) <= 0.2):
S.append(0) # 收敛
print(0)
else:
S.append(1) # 不收敛
print(1)
del names['result' + str(i) + str(j)], init
print('第(%s,%s)个ODE计算结束' % (i+1, j+1))
if interval == True:
f = h5py.File('/one/%s.h5' % (i+1), 'w')
else:
f = h5py.File('/one/%s.h5' % (i+1), 'w')
f.create_dataset('data_theta', data=data_theta)
f.create_dataset('data_omega', data=data_omega)
f.create_dataset('Y', data=np.array(S))
f.close()
def bigjobMPI_one_updated():
"""
calculate change_two_node data
"""
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
numjobs = N
job_content = [] # the collection of parameters [i,j]
for i_cur in range(N):
job_content.append(i_cur)
# arrange the works and jobs
if rank == 0:
# this is head worker
# jobs are arranged by this worker
job_all_idx = list(range(numjobs))
random.shuffle(job_all_idx)
# shuffle the job index to make all workers equal
# for unbalanced jobs
else:
job_all_idx = None
job_all_idx = comm.bcast(job_all_idx, root=0)
njob_per_worker, res = divmod(numjobs, size)
# the number of jobs should be a multiple of the NumProcess[MPI]
if rank < res:
this_worker_job = [job_all_idx[x] for x in range(rank*(njob_per_worker+1), (rank + 1)*(njob_per_worker+1))]
elif rank >= res:
this_worker_job = [job_all_idx[x] for x in range(rank*njob_per_worker + res, (rank + 1)*njob_per_worker + res)]
# map the index to parameterset [eps,anis]
work_content = [job_content[x] for x in this_worker_job]
for a_piece_of_work in work_content:
print('核心数为:%s' %(rank))
solve_one_ODE_updated(a_piece_of_work)
if __name__=="__main__":
PY, initial = load_para()
bigjobMPI_one_updated()
|
en
| 0.510896
|
##################### parameters #################### # number of node # synchronous angular frequency # power reference value # mass moments of inertia # damping # range of theta_0 # range of omega_0 # time step to solve ODE # maximum time to sove ODE # time stream to solve ODE # samping number # collect data number, 100 for 14, 50 for 39 定义ODE # 功率矩阵 # 功率补偿 # 导纳矩阵 # 参数合并 # 初始条件 # 转换为弧度制 产生多组单个节点服从均匀分布的随机初始条件 parallel function # IEEE-39的同步状态 # 第i+1个节点的初始条件 # 收敛 # 不收敛 calculate change_two_node data # the collection of parameters [i,j] # arrange the works and jobs # this is head worker # jobs are arranged by this worker # shuffle the job index to make all workers equal # for unbalanced jobs # the number of jobs should be a multiple of the NumProcess[MPI] # map the index to parameterset [eps,anis]
| 2.203194
| 2
|
tests/clpy_tests/opencl_tests/test_ndarray.py
|
fixstars/clpy
| 142
|
6627779
|
<filename>tests/clpy_tests/opencl_tests/test_ndarray.py<gh_stars>100-1000
import unittest
import numpy as np
import clpy
import clpy.backend.memory
# TODO(LWisteria): Merge to core_tests
class TestNdarray(unittest.TestCase):
"""test class of ndarray"""
def test_create(self):
clpy.ndarray([1, 2])
# Always OK if no exception when ndarray.__init__
self.assertTrue(True)
def test_set(self):
src = np.array([0, 1, 2, 3], dtype="float64")
dst = clpy.ndarray(src.shape)
dst.set(src)
self.assertTrue(True) # Always OK if no exception when ndarray.set
def test_single_getset(self):
expected = np.array([0, 1, 2, 3], dtype="float64")
ar = clpy.ndarray(expected.shape)
ar.set(expected)
actual = ar.get()
self.assertTrue((expected == actual).all())
def test_multiple_getset(self):
expected0 = np.array([0, 1, 2, 3], dtype="float64")
ar0 = clpy.ndarray(expected0.shape)
ar0.set(expected0)
expected1 = np.array([4, 5, 6, 7], dtype="float64")
ar1 = clpy.ndarray(expected1.shape)
ar1.set(expected1)
actual0 = ar0.get()
actual1 = ar1.get()
self.assertTrue((expected0 == actual0).all())
self.assertTrue((expected1 == actual1).all())
def test_array(self):
ar = clpy.core.array([
[1, 2, 3],
[4, 5, 6]], dtype='float32')
actual = ar.get()
expected = np.array([
[1, 2, 3],
[4, 5, 6]], dtype='float32')
self.assertTrue((expected == actual).all())
def test_data(self):
ar = clpy.ndarray([1, 2])
self.assertIsInstance(ar.data.buf, clpy.backend.memory.Buf)
def test_dot(self):
an_array = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
a = clpy.core.array(an_array, dtype='float32')
b = clpy.core.array(an_array, dtype='float32')
expected = np.array(an_array).dot(np.array(an_array))
actual = a.dot(b).get()
self.assertTrue((expected == actual).all())
def test_reshape(self):
an_array = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
a = clpy.core.array(an_array, dtype='float32')
expected = np.array(an_array, dtype='float32').reshape((2, 6))
actual = a.reshape((2, 6)).get()
self.assertTrue(expected.shape == actual.shape)
self.assertTrue((expected == actual).all())
def test_ravel(self):
an_array = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
a = clpy.core.array(an_array, dtype='float32')
expected = np.array(an_array, dtype='float32').ravel()
actual = a.ravel().get()
self.assertTrue((expected == actual).all())
def test_reduced_view(self):
# more sophisticated test may be needed
an_array = [[[1], [2], [3]], [[4], [5], [6]], [[7], [8], [9]]]
a = clpy.core.array(an_array, dtype='float32')
expected = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9], dtype='float32')
actual = a.reduced_view().get()
self.assertTrue(expected.shape == actual.shape)
self.assertTrue((expected == actual).all())
def test_fill(self):
expected = np.ndarray((3, 3), dtype='float32')
expected.fill(42.0)
a = clpy.ndarray((3, 3), dtype='float32')
a.fill(42.0)
actual = a.get()
self.assertTrue((expected == actual).all())
def test_astype(self):
an_array = [[1.3, 2.3, 3.3]]
a = clpy.array(an_array, dtype='float32')
expected = np.array(an_array, dtype='float32').astype('int32')
actual = a.astype('int32').get()
self.assertTrue(expected.dtype == actual.dtype)
self.assertTrue((expected == actual).all())
def test_transpose(self):
x_np = np.array([[5, 7, 9], [6, 8, 10]], dtype='int8')
x = clpy.array(x_np)
expected = x_np.transpose()
y = x.transpose()
actual = y.get()
self.assertTrue(np.all(expected == actual))
def test_transpose_float(self):
x_np = np.array([[1, 3], [2, 4]], dtype='float32')
x = clpy.array(x_np)
expected = x_np.transpose()
y = x.transpose()
actual = y.get()
self.assertTrue(np.all(expected == actual))
def test_max(self):
x_np = np.array([[1, 3, 2, 4]], dtype='float32')
x = clpy.array(x_np)
expected = x_np.max()
y = x.max()
actual = y.get()
self.assertTrue(np.all(expected == actual))
def test_argmax(self):
x_np = np.array([[1, 3, 2, 4]], dtype='float32')
x = clpy.array(x_np)
expected = x_np.argmax()
y = x.argmax()
actual = y.get()
self.assertTrue(np.all(expected == actual))
def test_min(self):
x_np = np.array([[1, 3, 2, 4]], dtype='float32')
x = clpy.array(x_np)
expected = x_np.min()
y = x.min()
actual = y.get()
self.assertTrue(np.all(expected == actual))
def test_argmin(self):
x_np = np.array([[4, 3, 1, 2]], dtype='float32')
x = clpy.array(x_np)
expected = x_np.argmin()
y = x.argmin()
actual = y.get()
self.assertTrue(np.all(expected == actual))
def test_sum(self):
x_np = np.array([[1, 3, 2, 4]], dtype='float32')
x = clpy.array(x_np)
expected = x_np.sum()
y = x.sum()
actual = y.get()
self.assertTrue(np.all(expected == actual))
def test_ellipsis(self):
x_np = np.array([1, 3, 2, 4], dtype='float32')
x = clpy.array(x_np)
x_np[...] = np.asarray(0)
x[...] = clpy.asarray(0)
expected = x_np
actual = x.get()
self.assertTrue(np.all(expected == actual))
if __name__ == "__main__":
unittest.main()
|
<filename>tests/clpy_tests/opencl_tests/test_ndarray.py<gh_stars>100-1000
import unittest
import numpy as np
import clpy
import clpy.backend.memory
# TODO(LWisteria): Merge to core_tests
class TestNdarray(unittest.TestCase):
"""test class of ndarray"""
def test_create(self):
clpy.ndarray([1, 2])
# Always OK if no exception when ndarray.__init__
self.assertTrue(True)
def test_set(self):
src = np.array([0, 1, 2, 3], dtype="float64")
dst = clpy.ndarray(src.shape)
dst.set(src)
self.assertTrue(True) # Always OK if no exception when ndarray.set
def test_single_getset(self):
expected = np.array([0, 1, 2, 3], dtype="float64")
ar = clpy.ndarray(expected.shape)
ar.set(expected)
actual = ar.get()
self.assertTrue((expected == actual).all())
def test_multiple_getset(self):
expected0 = np.array([0, 1, 2, 3], dtype="float64")
ar0 = clpy.ndarray(expected0.shape)
ar0.set(expected0)
expected1 = np.array([4, 5, 6, 7], dtype="float64")
ar1 = clpy.ndarray(expected1.shape)
ar1.set(expected1)
actual0 = ar0.get()
actual1 = ar1.get()
self.assertTrue((expected0 == actual0).all())
self.assertTrue((expected1 == actual1).all())
def test_array(self):
ar = clpy.core.array([
[1, 2, 3],
[4, 5, 6]], dtype='float32')
actual = ar.get()
expected = np.array([
[1, 2, 3],
[4, 5, 6]], dtype='float32')
self.assertTrue((expected == actual).all())
def test_data(self):
ar = clpy.ndarray([1, 2])
self.assertIsInstance(ar.data.buf, clpy.backend.memory.Buf)
def test_dot(self):
an_array = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
a = clpy.core.array(an_array, dtype='float32')
b = clpy.core.array(an_array, dtype='float32')
expected = np.array(an_array).dot(np.array(an_array))
actual = a.dot(b).get()
self.assertTrue((expected == actual).all())
def test_reshape(self):
an_array = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
a = clpy.core.array(an_array, dtype='float32')
expected = np.array(an_array, dtype='float32').reshape((2, 6))
actual = a.reshape((2, 6)).get()
self.assertTrue(expected.shape == actual.shape)
self.assertTrue((expected == actual).all())
def test_ravel(self):
an_array = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
a = clpy.core.array(an_array, dtype='float32')
expected = np.array(an_array, dtype='float32').ravel()
actual = a.ravel().get()
self.assertTrue((expected == actual).all())
def test_reduced_view(self):
# more sophisticated test may be needed
an_array = [[[1], [2], [3]], [[4], [5], [6]], [[7], [8], [9]]]
a = clpy.core.array(an_array, dtype='float32')
expected = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9], dtype='float32')
actual = a.reduced_view().get()
self.assertTrue(expected.shape == actual.shape)
self.assertTrue((expected == actual).all())
def test_fill(self):
expected = np.ndarray((3, 3), dtype='float32')
expected.fill(42.0)
a = clpy.ndarray((3, 3), dtype='float32')
a.fill(42.0)
actual = a.get()
self.assertTrue((expected == actual).all())
def test_astype(self):
an_array = [[1.3, 2.3, 3.3]]
a = clpy.array(an_array, dtype='float32')
expected = np.array(an_array, dtype='float32').astype('int32')
actual = a.astype('int32').get()
self.assertTrue(expected.dtype == actual.dtype)
self.assertTrue((expected == actual).all())
def test_transpose(self):
x_np = np.array([[5, 7, 9], [6, 8, 10]], dtype='int8')
x = clpy.array(x_np)
expected = x_np.transpose()
y = x.transpose()
actual = y.get()
self.assertTrue(np.all(expected == actual))
def test_transpose_float(self):
x_np = np.array([[1, 3], [2, 4]], dtype='float32')
x = clpy.array(x_np)
expected = x_np.transpose()
y = x.transpose()
actual = y.get()
self.assertTrue(np.all(expected == actual))
def test_max(self):
x_np = np.array([[1, 3, 2, 4]], dtype='float32')
x = clpy.array(x_np)
expected = x_np.max()
y = x.max()
actual = y.get()
self.assertTrue(np.all(expected == actual))
def test_argmax(self):
x_np = np.array([[1, 3, 2, 4]], dtype='float32')
x = clpy.array(x_np)
expected = x_np.argmax()
y = x.argmax()
actual = y.get()
self.assertTrue(np.all(expected == actual))
def test_min(self):
x_np = np.array([[1, 3, 2, 4]], dtype='float32')
x = clpy.array(x_np)
expected = x_np.min()
y = x.min()
actual = y.get()
self.assertTrue(np.all(expected == actual))
def test_argmin(self):
x_np = np.array([[4, 3, 1, 2]], dtype='float32')
x = clpy.array(x_np)
expected = x_np.argmin()
y = x.argmin()
actual = y.get()
self.assertTrue(np.all(expected == actual))
def test_sum(self):
x_np = np.array([[1, 3, 2, 4]], dtype='float32')
x = clpy.array(x_np)
expected = x_np.sum()
y = x.sum()
actual = y.get()
self.assertTrue(np.all(expected == actual))
def test_ellipsis(self):
x_np = np.array([1, 3, 2, 4], dtype='float32')
x = clpy.array(x_np)
x_np[...] = np.asarray(0)
x[...] = clpy.asarray(0)
expected = x_np
actual = x.get()
self.assertTrue(np.all(expected == actual))
if __name__ == "__main__":
unittest.main()
|
en
| 0.438475
|
# TODO(LWisteria): Merge to core_tests test class of ndarray # Always OK if no exception when ndarray.__init__ # Always OK if no exception when ndarray.set # more sophisticated test may be needed
| 2.592175
| 3
|
example/onnx/super_resolution.py
|
coderzbx/seg-mxnet
| 1
|
6627780
|
<filename>example/onnx/super_resolution.py<gh_stars>1-10
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Testing super_resolution model conversion"""
from __future__ import absolute_import as _abs
from __future__ import print_function
from collections import namedtuple
import logging
import numpy as np
from PIL import Image
import mxnet as mx
from mxnet.test_utils import download
import mxnet.contrib.onnx as onnx_mxnet
# set up logger
logging.basicConfig()
LOGGER = logging.getLogger()
LOGGER.setLevel(logging.INFO)
def import_onnx():
"""Import the onnx model into mxnet"""
model_url = 'https://s3.amazonaws.com/onnx-mxnet/examples/super_resolution.onnx'
download(model_url, 'super_resolution.onnx')
LOGGER.info("Converting onnx format to mxnet's symbol and params...")
sym, params = onnx_mxnet.import_model('super_resolution.onnx')
LOGGER.info("Successfully Converted onnx format to mxnet's symbol and params...")
return sym, params
def get_test_image():
"""Download and process the test image"""
# Load test image
input_image_dim = 224
img_url = 'https://s3.amazonaws.com/onnx-mxnet/examples/super_res_input.jpg'
download(img_url, 'super_res_input.jpg')
img = Image.open('super_res_input.jpg').resize((input_image_dim, input_image_dim))
img_ycbcr = img.convert("YCbCr")
img_y, img_cb, img_cr = img_ycbcr.split()
input_image = np.array(img_y)[np.newaxis, np.newaxis, :, :]
return input_image, img_cb, img_cr
def perform_inference(sym, params, input_img, img_cb, img_cr):
"""Perform inference on image using mxnet"""
# create module
mod = mx.mod.Module(symbol=sym, data_names=['input_0'], label_names=None)
mod.bind(for_training=False, data_shapes=[('input_0', input_img.shape)])
mod.set_params(arg_params=params, aux_params=None)
# run inference
batch = namedtuple('Batch', ['data'])
mod.forward(batch([mx.nd.array(input_img)]))
# Save the result
img_out_y = Image.fromarray(np.uint8(mod.get_outputs()[0][0][0].
asnumpy().clip(0, 255)), mode='L')
result_img = Image.merge(
"YCbCr", [img_out_y,
img_cb.resize(img_out_y.size, Image.BICUBIC),
img_cr.resize(img_out_y.size, Image.BICUBIC)]).convert("RGB")
output_img_dim = 672
assert result_img.size == (output_img_dim, output_img_dim)
LOGGER.info("Super Resolution example success.")
result_img.save("super_res_output.jpg")
return result_img
if __name__ == '__main__':
MX_SYM, MX_PARAM = import_onnx()
INPUT_IMG, IMG_CB, IMG_CR = get_test_image()
perform_inference(MX_SYM, MX_PARAM, INPUT_IMG, IMG_CB, IMG_CR)
|
<filename>example/onnx/super_resolution.py<gh_stars>1-10
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Testing super_resolution model conversion"""
from __future__ import absolute_import as _abs
from __future__ import print_function
from collections import namedtuple
import logging
import numpy as np
from PIL import Image
import mxnet as mx
from mxnet.test_utils import download
import mxnet.contrib.onnx as onnx_mxnet
# set up logger
logging.basicConfig()
LOGGER = logging.getLogger()
LOGGER.setLevel(logging.INFO)
def import_onnx():
"""Import the onnx model into mxnet"""
model_url = 'https://s3.amazonaws.com/onnx-mxnet/examples/super_resolution.onnx'
download(model_url, 'super_resolution.onnx')
LOGGER.info("Converting onnx format to mxnet's symbol and params...")
sym, params = onnx_mxnet.import_model('super_resolution.onnx')
LOGGER.info("Successfully Converted onnx format to mxnet's symbol and params...")
return sym, params
def get_test_image():
"""Download and process the test image"""
# Load test image
input_image_dim = 224
img_url = 'https://s3.amazonaws.com/onnx-mxnet/examples/super_res_input.jpg'
download(img_url, 'super_res_input.jpg')
img = Image.open('super_res_input.jpg').resize((input_image_dim, input_image_dim))
img_ycbcr = img.convert("YCbCr")
img_y, img_cb, img_cr = img_ycbcr.split()
input_image = np.array(img_y)[np.newaxis, np.newaxis, :, :]
return input_image, img_cb, img_cr
def perform_inference(sym, params, input_img, img_cb, img_cr):
"""Perform inference on image using mxnet"""
# create module
mod = mx.mod.Module(symbol=sym, data_names=['input_0'], label_names=None)
mod.bind(for_training=False, data_shapes=[('input_0', input_img.shape)])
mod.set_params(arg_params=params, aux_params=None)
# run inference
batch = namedtuple('Batch', ['data'])
mod.forward(batch([mx.nd.array(input_img)]))
# Save the result
img_out_y = Image.fromarray(np.uint8(mod.get_outputs()[0][0][0].
asnumpy().clip(0, 255)), mode='L')
result_img = Image.merge(
"YCbCr", [img_out_y,
img_cb.resize(img_out_y.size, Image.BICUBIC),
img_cr.resize(img_out_y.size, Image.BICUBIC)]).convert("RGB")
output_img_dim = 672
assert result_img.size == (output_img_dim, output_img_dim)
LOGGER.info("Super Resolution example success.")
result_img.save("super_res_output.jpg")
return result_img
if __name__ == '__main__':
MX_SYM, MX_PARAM = import_onnx()
INPUT_IMG, IMG_CB, IMG_CR = get_test_image()
perform_inference(MX_SYM, MX_PARAM, INPUT_IMG, IMG_CB, IMG_CR)
|
en
| 0.827745
|
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. Testing super_resolution model conversion # set up logger Import the onnx model into mxnet Download and process the test image # Load test image Perform inference on image using mxnet # create module # run inference # Save the result
| 2.182191
| 2
|
util/web.py
|
SmokeyLlama/TinyLlama-GameBot
| 0
|
6627781
|
""" Contains functions to make http GET and http POST with. version 0.0.6 """
import time
import logging
import requests
from requests.utils import quote, unquote
__all__ = ['quote', 'unquote']
log = logging.getLogger(__name__)
# A session that all requests will use...apparently not.
__request_session = requests.session()
def is_cookie_expired(cookie_name):
"""
Check if a cookie is expired.
:param cookie_name: str the name of the cookie to check.
:return: True if expired else False or None if no cookie by that name was found.
"""
if cookie_name:
expires = int
timestamp = int(time.time())
for cookie in __request_session.cookies:
if cookie.name == cookie_name:
expires = cookie.expires
else:
return None
if timestamp > expires:
log.debug('cookie[\'%s\'] is expired. time stamp: %s, expires: %s' %
(cookie_name, timestamp, expires))
return True
log.debug('cookie[\'%s\'] is not expired. time stamp: %s, expires: %s' %
(cookie_name, timestamp, expires))
return False
def delete_cookie(cookie_name):
"""
Delete a cookie by name.
:param cookie_name: str the cookie name.
:return: True if deleted else False
"""
if cookie_name in __request_session.cookies:
del __request_session.cookies[cookie_name]
log.debug('deleting cookie: %s session cookies: %s' % (cookie_name, __request_session.cookies))
return True
return False
def has_cookie(cookie_name):
"""
Check a cookie by name to see if it exist.
:param cookie_name: str the name of the cookie.
:return: object request.session.cookie[cookie_name] or False if no cookie.
"""
if cookie_name in __request_session.cookies:
log.debug('cookie found: %s' % __request_session.cookies[cookie_name])
return __request_session.cookies[cookie_name]
log.debug('no cookie named: %s found.' % cookie_name)
return False
def http_get(url, **kwargs):
json = kwargs.get('json', False)
proxy = kwargs.get('proxy', '')
header = kwargs.get('header')
timeout = kwargs.get('timeout', 20)
referer = kwargs.get('referer')
default_header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:44.0) Gecko/20100101 Firefox/50.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'Connection': 'keep-alive',
}
if referer is not None:
default_header['Referer'] = referer
if header is not None and type(header) is dict:
default_header.update(header)
if proxy:
proxy = {'https': 'http://' + proxy}
gr = None
json_response = None
try:
gr = __request_session.request(method='GET', url=url, headers=default_header, proxies=proxy, timeout=timeout)
if json:
json_response = gr.json()
except ValueError as ve:
log.error('error while decoding %s to json: %s' % (url, ve))
except (requests.ConnectionError, requests.RequestException) as re:
log.error('http_get error: %s' % re)
finally:
log.debug('cookies: %s' % __request_session.cookies)
if gr is None:
return dict(content=None, json=None,
cookies=None, headers=None, status_code=None)
else:
return dict(content=gr.text, json=json_response,
cookies=gr.cookies, headers=gr.headers, status_code=gr.status_code)
def http_post(post_url, post_data, **kwargs):
json = kwargs.get('json', False)
proxy = kwargs.get('proxy', '')
header = kwargs.get('header')
timeout = kwargs.get('timeout', 20)
referer = kwargs.get('referer')
stream = kwargs.get('is_stream', False)
redirect = kwargs.get('follow_redirect', False)
default_header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:44.0) Gecko/20100101 Firefox/50.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'Connection': 'keep-alive',
}
if referer is not None:
default_header['Referer'] = referer
if not post_url:
raise ValueError('no post_url provided. post_url=%s' % post_url)
elif proxy and type(proxy) is not str:
raise TypeError('proxy must be of type str and in the format ip:port. proxy type=%s'
% type(proxy))
else:
if header is not None and type(header) is dict:
default_header.update(header)
if proxy:
proxy = {'http': 'http://' + proxy}
pr = None
json_response = None
try:
pr = __request_session.request(method='POST', url=post_url, data=post_data, headers=default_header,
allow_redirects=redirect, proxies=proxy, timeout=timeout, stream=stream)
if json:
json_response = pr.json()
except ValueError as ve:
log.error('error while decoding %s to json: %s' % (post_url, ve))
except (requests.HTTPError, requests.RequestException) as pe:
log.error('http_post error %s' % pe)
finally:
log.debug('cookies: %s' % __request_session.cookies)
if pr is None:
return dict(content=None, json=None,
cookies=None, headers=None, status_code=None)
else:
return dict(content=pr.text, json=json_response,
cookies=pr.cookies, headers=pr.headers, status_code=pr.status_code)
|
""" Contains functions to make http GET and http POST with. version 0.0.6 """
import time
import logging
import requests
from requests.utils import quote, unquote
__all__ = ['quote', 'unquote']
log = logging.getLogger(__name__)
# A session that all requests will use...apparently not.
__request_session = requests.session()
def is_cookie_expired(cookie_name):
"""
Check if a cookie is expired.
:param cookie_name: str the name of the cookie to check.
:return: True if expired else False or None if no cookie by that name was found.
"""
if cookie_name:
expires = int
timestamp = int(time.time())
for cookie in __request_session.cookies:
if cookie.name == cookie_name:
expires = cookie.expires
else:
return None
if timestamp > expires:
log.debug('cookie[\'%s\'] is expired. time stamp: %s, expires: %s' %
(cookie_name, timestamp, expires))
return True
log.debug('cookie[\'%s\'] is not expired. time stamp: %s, expires: %s' %
(cookie_name, timestamp, expires))
return False
def delete_cookie(cookie_name):
"""
Delete a cookie by name.
:param cookie_name: str the cookie name.
:return: True if deleted else False
"""
if cookie_name in __request_session.cookies:
del __request_session.cookies[cookie_name]
log.debug('deleting cookie: %s session cookies: %s' % (cookie_name, __request_session.cookies))
return True
return False
def has_cookie(cookie_name):
"""
Check a cookie by name to see if it exist.
:param cookie_name: str the name of the cookie.
:return: object request.session.cookie[cookie_name] or False if no cookie.
"""
if cookie_name in __request_session.cookies:
log.debug('cookie found: %s' % __request_session.cookies[cookie_name])
return __request_session.cookies[cookie_name]
log.debug('no cookie named: %s found.' % cookie_name)
return False
def http_get(url, **kwargs):
json = kwargs.get('json', False)
proxy = kwargs.get('proxy', '')
header = kwargs.get('header')
timeout = kwargs.get('timeout', 20)
referer = kwargs.get('referer')
default_header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:44.0) Gecko/20100101 Firefox/50.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'Connection': 'keep-alive',
}
if referer is not None:
default_header['Referer'] = referer
if header is not None and type(header) is dict:
default_header.update(header)
if proxy:
proxy = {'https': 'http://' + proxy}
gr = None
json_response = None
try:
gr = __request_session.request(method='GET', url=url, headers=default_header, proxies=proxy, timeout=timeout)
if json:
json_response = gr.json()
except ValueError as ve:
log.error('error while decoding %s to json: %s' % (url, ve))
except (requests.ConnectionError, requests.RequestException) as re:
log.error('http_get error: %s' % re)
finally:
log.debug('cookies: %s' % __request_session.cookies)
if gr is None:
return dict(content=None, json=None,
cookies=None, headers=None, status_code=None)
else:
return dict(content=gr.text, json=json_response,
cookies=gr.cookies, headers=gr.headers, status_code=gr.status_code)
def http_post(post_url, post_data, **kwargs):
json = kwargs.get('json', False)
proxy = kwargs.get('proxy', '')
header = kwargs.get('header')
timeout = kwargs.get('timeout', 20)
referer = kwargs.get('referer')
stream = kwargs.get('is_stream', False)
redirect = kwargs.get('follow_redirect', False)
default_header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:44.0) Gecko/20100101 Firefox/50.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'Connection': 'keep-alive',
}
if referer is not None:
default_header['Referer'] = referer
if not post_url:
raise ValueError('no post_url provided. post_url=%s' % post_url)
elif proxy and type(proxy) is not str:
raise TypeError('proxy must be of type str and in the format ip:port. proxy type=%s'
% type(proxy))
else:
if header is not None and type(header) is dict:
default_header.update(header)
if proxy:
proxy = {'http': 'http://' + proxy}
pr = None
json_response = None
try:
pr = __request_session.request(method='POST', url=post_url, data=post_data, headers=default_header,
allow_redirects=redirect, proxies=proxy, timeout=timeout, stream=stream)
if json:
json_response = pr.json()
except ValueError as ve:
log.error('error while decoding %s to json: %s' % (post_url, ve))
except (requests.HTTPError, requests.RequestException) as pe:
log.error('http_post error %s' % pe)
finally:
log.debug('cookies: %s' % __request_session.cookies)
if pr is None:
return dict(content=None, json=None,
cookies=None, headers=None, status_code=None)
else:
return dict(content=pr.text, json=json_response,
cookies=pr.cookies, headers=pr.headers, status_code=pr.status_code)
|
en
| 0.834464
|
Contains functions to make http GET and http POST with. version 0.0.6 # A session that all requests will use...apparently not. Check if a cookie is expired. :param cookie_name: str the name of the cookie to check. :return: True if expired else False or None if no cookie by that name was found. Delete a cookie by name. :param cookie_name: str the cookie name. :return: True if deleted else False Check a cookie by name to see if it exist. :param cookie_name: str the name of the cookie. :return: object request.session.cookie[cookie_name] or False if no cookie.
| 3.059462
| 3
|
core/py/__init__.py
|
ZexuanTHU/llmt
| 0
|
6627782
|
import os
import matlab.engine
import sys
import signal
pid = os.getpid()
print(pid)
root_path = os.path.abspath('..')
print(root_path)
names = matlab.engine.find_matlab()
if(names):
print('MATLAB already started...Connecting to {}...'.format(names[0]))
eng = matlab.engine.connect_matlab(names[0])
print('Connect to MATLAB')
else:
print('Starting MATALB...')
eng = matlab.engine.start_matlab()
print('Done!')
add_paths_path = root_path + '/matlab_lib/mine/file/'
eng.cd(add_paths_path)
eng.add_paths(os.path.abspath('../../../'), nargout=0)
eng.mt_workflow_gui(nargout=0)
print('PATH add!')
print('lalala')
|
import os
import matlab.engine
import sys
import signal
pid = os.getpid()
print(pid)
root_path = os.path.abspath('..')
print(root_path)
names = matlab.engine.find_matlab()
if(names):
print('MATLAB already started...Connecting to {}...'.format(names[0]))
eng = matlab.engine.connect_matlab(names[0])
print('Connect to MATLAB')
else:
print('Starting MATALB...')
eng = matlab.engine.start_matlab()
print('Done!')
add_paths_path = root_path + '/matlab_lib/mine/file/'
eng.cd(add_paths_path)
eng.add_paths(os.path.abspath('../../../'), nargout=0)
eng.mt_workflow_gui(nargout=0)
print('PATH add!')
print('lalala')
|
none
| 1
| 2.091232
| 2
|
|
rqalpha/mod/rqalpha_mod_sys_accounts/api/api_stock.py
|
lucifersteph/rqalpha
| 0
|
6627783
|
<reponame>lucifersteph/rqalpha
# -*- coding: utf-8 -*-
# 版权所有 2019 深圳米筐科技有限公司(下称“米筐科技”)
#
# 除非遵守当前许可,否则不得使用本软件。
#
# * 非商业用途(非商业用途指个人出于非商业目的使用本软件,或者高校、研究所等非营利机构出于教育、科研等目的使用本软件):
# 遵守 Apache License 2.0(下称“Apache 2.0 许可”),
# 您可以在以下位置获得 Apache 2.0 许可的副本:http://www.apache.org/licenses/LICENSE-2.0。
# 除非法律有要求或以书面形式达成协议,否则本软件分发时需保持当前许可“原样”不变,且不得附加任何条件。
#
# * 商业用途(商业用途指个人出于任何商业目的使用本软件,或者法人或其他组织出于任何目的使用本软件):
# 未经米筐科技授权,任何个人不得出于任何商业目的使用本软件(包括但不限于向第三方提供、销售、出租、出借、转让本软件、
# 本软件的衍生产品、引用或借鉴了本软件功能或源代码的产品或服务),任何法人或其他组织不得出于任何目的使用本软件,
# 否则米筐科技有权追究相应的知识产权侵权责任。
# 在此前提下,对本软件的使用同样需要遵守 Apache 2.0 许可,Apache 2.0 许可与本许可冲突之处,以本许可为准。
# 详细的授权流程,请联系 <EMAIL> 获取。
import math
import datetime
from itertools import chain
from decimal import Decimal, getcontext
from typing import Dict, List, Union, Optional
import six
import numpy as np
import pandas as pd
from rqalpha.api import export_as_api
from rqalpha.apis.api_base import cal_style, assure_order_book_id, assure_instrument
from rqalpha.apis.api_abstract import (
order_shares, order_value, order_percent, order_target_value, order_target_percent, order, order_to
)
from rqalpha.const import (
DEFAULT_ACCOUNT_TYPE, EXECUTION_PHASE, SIDE, ORDER_TYPE, POSITION_EFFECT, POSITION_DIRECTION,
INSTRUMENT_TYPE
)
from rqalpha.environment import Environment
from rqalpha.execution_context import ExecutionContext
from rqalpha.model.instrument import (
Instrument, IndustryCode as industry_code, IndustryCodeItem, SectorCode as sector_code, SectorCodeItem
)
from rqalpha.model.order import Order, MarketOrder, LimitOrder, OrderStyle
from rqalpha.utils import is_valid_price, INST_TYPE_IN_STOCK_ACCOUNT
from rqalpha.utils.arg_checker import apply_rules, verify_that
from rqalpha.utils.exception import RQInvalidArgument
from rqalpha.utils.i18n import gettext as _
from rqalpha.utils.logger import user_system_log
from rqalpha.utils.datetime_func import to_date
from rqalpha.mod.rqalpha_mod_sys_risk.validators.cash_validator import is_cash_enough
# 使用Decimal 解决浮点数运算精度问题
getcontext().prec = 10
export_as_api(industry_code, name='industry_code')
export_as_api(sector_code, name='sector_code')
def _get_account_position_ins(id_or_ins):
ins = assure_instrument(id_or_ins)
account = Environment.get_instance().portfolio.accounts[DEFAULT_ACCOUNT_TYPE.STOCK]
position = account.get_position(ins.order_book_id, POSITION_DIRECTION.LONG)
return account, position, ins
def _submit_order(ins, amount, side, position_effect, style, auto_switch_order_value):
env = Environment.get_instance()
if isinstance(style, LimitOrder):
if style.get_limit_price() <= 0:
raise RQInvalidArgument(_(u"Limit order price should be positive"))
price = env.data_proxy.get_last_price(ins.order_book_id)
if not is_valid_price(price):
user_system_log.warn(
_(u"Order Creation Failed: [{order_book_id}] No market data").format(order_book_id=ins.order_book_id))
return
if side == SIDE.BUY:
round_lot = int(ins.round_lot)
amount = int(Decimal(amount) / Decimal(round_lot)) * round_lot
if amount == 0:
user_system_log.warn(_(u"Order Creation Failed: 0 order quantity"))
return
order = Order.__from_create__(ins.order_book_id, abs(amount), side, style, position_effect)
if order.type == ORDER_TYPE.MARKET:
order.set_frozen_price(price)
if side == SIDE.BUY and auto_switch_order_value:
account, position, ins = _get_account_position_ins(ins)
if not is_cash_enough(env, order, account):
user_system_log.warn(_(
"insufficient cash, use all remaining cash({}) to create order"
).format(account.cash))
return _order_value(account, position, ins, account.cash, style)
if env.can_submit_order(order):
env.broker.submit_order(order)
return order
def _order_shares(ins, amount, style, auto_switch_order_value):
side, position_effect = (SIDE.BUY, POSITION_EFFECT.OPEN) if amount > 0 else (SIDE.SELL, POSITION_EFFECT.CLOSE)
return _submit_order(ins, amount, side, position_effect, style, auto_switch_order_value)
def _order_value(account, position, ins, cash_amount, style):
env = Environment.get_instance()
if cash_amount > 0:
cash_amount = min(cash_amount, account.cash)
if isinstance(style, LimitOrder):
price = style.get_limit_price()
else:
price = env.data_proxy.get_last_price(ins.order_book_id)
if not is_valid_price(price):
user_system_log.warn(
_(u"Order Creation Failed: [{order_book_id}] No market data").format(order_book_id=ins.order_book_id)
)
return
amount = int(Decimal(cash_amount) / Decimal(price))
if cash_amount > 0:
round_lot = int(ins.round_lot)
amount = int(Decimal(amount) / Decimal(round_lot)) * round_lot
while amount > 0:
expected_transaction_cost = env.get_order_transaction_cost(Order.__from_create__(
ins.order_book_id, amount, SIDE.BUY, LimitOrder(price), POSITION_EFFECT.OPEN
))
if amount * price + expected_transaction_cost <= cash_amount:
break
amount -= round_lot
else:
user_system_log.warn(_(u"Order Creation Failed: 0 order quantity"))
return
if amount < 0:
amount = max(amount, -position.closable)
return _order_shares(ins, amount, style, auto_switch_order_value=False)
@order_shares.register(INST_TYPE_IN_STOCK_ACCOUNT)
def stock_order_shares(id_or_ins, amount, price=None, style=None):
auto_switch_order_value = Environment.get_instance().config.mod.sys_accounts.auto_switch_order_value
return _order_shares(assure_instrument(id_or_ins), amount, cal_style(price, style), auto_switch_order_value)
@order_value.register(INST_TYPE_IN_STOCK_ACCOUNT)
def stock_order_value(id_or_ins, cash_amount, price=None, style=None):
account, position, ins = _get_account_position_ins(id_or_ins)
return _order_value(account, position, ins, cash_amount, cal_style(price, style))
@order_percent.register(INST_TYPE_IN_STOCK_ACCOUNT)
def stock_order_percent(id_or_ins, percent, price=None, style=None):
account, position, ins = _get_account_position_ins(id_or_ins)
return _order_value(account, position, ins, account.total_value * percent, cal_style(price, style))
@order_target_value.register(INST_TYPE_IN_STOCK_ACCOUNT)
def stock_order_target_value(id_or_ins, cash_amount, price=None, style=None):
account, position, ins = _get_account_position_ins(id_or_ins)
if cash_amount == 0:
return _submit_order(ins, position.closable, SIDE.SELL, POSITION_EFFECT.CLOSE, cal_style(price, style), False)
return _order_value(account, position, ins, cash_amount - position.market_value, cal_style(price, style))
@order_target_percent.register(INST_TYPE_IN_STOCK_ACCOUNT)
def stock_order_target_percent(id_or_ins, percent, price=None, style=None):
account, position, ins = _get_account_position_ins(id_or_ins)
if percent == 0:
return _submit_order(ins, position.closable, SIDE.SELL, POSITION_EFFECT.CLOSE, cal_style(price, style), False)
else:
return _order_value(
account, position, ins, account.total_value * percent - position.market_value, cal_style(price, style)
)
@order.register(INST_TYPE_IN_STOCK_ACCOUNT)
def stock_order(order_book_id, quantity, price=None, style=None):
result_order = stock_order_shares(order_book_id, quantity, price, style)
if result_order:
return [result_order]
return []
@order_to.register(INST_TYPE_IN_STOCK_ACCOUNT)
def stock_order_to(order_book_id, quantity, price=None, style=None):
position = Environment.get_instance().portfolio.get_position(order_book_id, POSITION_DIRECTION.LONG)
quantity = quantity - position.quantity
result_order = stock_order_shares(order_book_id, quantity, price, style)
if result_order:
return [result_order]
return []
@export_as_api
@ExecutionContext.enforce_phase(
EXECUTION_PHASE.OPEN_AUCTION,
EXECUTION_PHASE.ON_BAR,
EXECUTION_PHASE.ON_TICK,
EXECUTION_PHASE.SCHEDULED,
EXECUTION_PHASE.GLOBAL
)
@apply_rules(verify_that('id_or_ins').is_valid_stock(),
verify_that('amount').is_number(),
verify_that('style').is_instance_of((MarketOrder, LimitOrder, type(None))))
def order_lots(id_or_ins, amount, price=None, style=None):
# type: (Union[str, Instrument], int, Optional[float], Optional[OrderStyle]) -> Optional[Order]
"""
指定手数发送买/卖单。如有需要落单类型当做一个参量传入,如果忽略掉落单类型,那么默认是市价单(market order)。
:param id_or_ins: 下单标的物
:param int amount: 下单量, 正数代表买入,负数代表卖出。将会根据一手xx股来向下调整到一手的倍数,比如中国A股就是调整成100股的倍数。
:param float price: 下单价格,默认为None,表示 :class:`~MarketOrder`, 此参数主要用于简化 `style` 参数。
:param style: 下单类型, 默认是市价单。目前支持的订单类型有 :class:`~LimitOrder` 和 :class:`~MarketOrder`
:example:
.. code-block:: python
#买入20手的平安银行股票,并且发送市价单:
order_lots('000001.XSHE', 20)
#买入10手平安银行股票,并且发送限价单,价格为¥10:
order_lots('000001.XSHE', 10, style=LimitOrder(10))
"""
ins = assure_instrument(id_or_ins)
auto_switch_order_value = Environment.get_instance().config.mod.sys_accounts.auto_switch_order_value
return _order_shares(ins, amount * int(ins.round_lot), cal_style(price, style), auto_switch_order_value)
@export_as_api
@ExecutionContext.enforce_phase(
EXECUTION_PHASE.OPEN_AUCTION,
EXECUTION_PHASE.ON_BAR,
EXECUTION_PHASE.ON_TICK,
EXECUTION_PHASE.SCHEDULED,
EXECUTION_PHASE.GLOBAL
)
def order_target_portfolio(target_portfolio):
# type: (Dict[Union[str, Instrument], float]) -> List[Order]
"""
买入/卖出证券以批量调整证券的仓位,以期使其持仓市值占账户总权益的比重达到指定值。
:param target_portfolio: 下单标的物及其目标市值占比的字典
:example:
.. code-block:: python
# 调整仓位,以使平安银行和万科 A 的持仓占比分别达到 10% 和 15%
order_target_portfolio({
'000001.XSHE': 0.1
'000002.XSHE': 0.15
})
"""
if isinstance(target_portfolio, pd.Series):
# FIXME: kind of dirty
total_percent = sum(target_portfolio)
else:
total_percent = sum(six.itervalues(target_portfolio))
if total_percent > 1:
raise RQInvalidArgument(_(u"total percent should be lower than 1, current: {}").format(total_percent))
env = Environment.get_instance()
account = env.portfolio.accounts[DEFAULT_ACCOUNT_TYPE.STOCK]
account_value = account.total_value
target_quantities = {}
for id_or_ins, target_percent in target_portfolio.items():
order_book_id = assure_order_book_id(id_or_ins)
if target_percent < 0:
raise RQInvalidArgument(_(u"target percent of {} should between 0 and 1, current: {}").format(
order_book_id, target_percent
))
price = env.data_proxy.get_last_price(order_book_id)
if not is_valid_price(price):
user_system_log.warn(
_(u"Order Creation Failed: [{order_book_id}] No market data").format(order_book_id=order_book_id)
)
continue
target_quantities[order_book_id] = account_value * target_percent / price
close_orders, open_orders = [], []
current_quantities = {
p.order_book_id: p.quantity for p in account.get_positions() if p.direction == POSITION_DIRECTION.LONG
}
for order_book_id, quantity in current_quantities.items():
if order_book_id not in target_portfolio:
close_orders.append(Order.__from_create__(
order_book_id, quantity, SIDE.SELL, MarketOrder(), POSITION_EFFECT.CLOSE
))
round_lot = 100
for order_book_id, target_quantity in target_quantities.items():
if order_book_id in current_quantities:
delta_quantity = target_quantity - current_quantities[order_book_id]
else:
delta_quantity = target_quantity
if delta_quantity >= round_lot:
delta_quantity = math.floor(delta_quantity / round_lot) * round_lot
open_orders.append(Order.__from_create__(
order_book_id, delta_quantity, SIDE.BUY, MarketOrder(), POSITION_EFFECT.OPEN
))
elif delta_quantity < -1:
delta_quantity = math.floor(delta_quantity)
close_orders.append(Order.__from_create__(
order_book_id, abs(delta_quantity), SIDE.SELL, MarketOrder(), POSITION_EFFECT.CLOSE
))
submit_orders = []
for order in chain(close_orders, open_orders):
if env.can_submit_order(order):
submit_orders.append(order)
env.broker.submit_order(order)
return submit_orders
@export_as_api
@ExecutionContext.enforce_phase(EXECUTION_PHASE.ON_INIT,
EXECUTION_PHASE.BEFORE_TRADING,
EXECUTION_PHASE.OPEN_AUCTION,
EXECUTION_PHASE.ON_BAR,
EXECUTION_PHASE.ON_TICK,
EXECUTION_PHASE.AFTER_TRADING,
EXECUTION_PHASE.SCHEDULED)
@apply_rules(verify_that('order_book_id').is_valid_instrument(),
verify_that('count').is_greater_than(0))
def is_suspended(order_book_id, count=1):
# type: (str, Optional[int]) -> Union[bool, pd.DataFrame]
"""
判断某只股票是否全天停牌。
:param order_book_id: 某只股票的代码或股票代码,可传入单只股票的order_book_id, symbol
:param count: 回溯获取的数据个数。默认为当前能够获取到的最近的数据
"""
dt = Environment.get_instance().calendar_dt.date()
order_book_id = assure_order_book_id(order_book_id)
return Environment.get_instance().data_proxy.is_suspended(order_book_id, dt, count)
@export_as_api
@ExecutionContext.enforce_phase(EXECUTION_PHASE.ON_INIT,
EXECUTION_PHASE.BEFORE_TRADING,
EXECUTION_PHASE.OPEN_AUCTION,
EXECUTION_PHASE.ON_BAR,
EXECUTION_PHASE.ON_TICK,
EXECUTION_PHASE.AFTER_TRADING,
EXECUTION_PHASE.SCHEDULED)
@apply_rules(verify_that('order_book_id').is_valid_instrument())
def is_st_stock(order_book_id, count=1):
# type: (str, Optional[int]) -> Union[bool, pd.DataFrame]
"""
判断股票在一段时间内是否为ST股(包括ST与*ST)。
ST股是有退市风险因此风险比较大的股票,很多时候您也会希望判断自己使用的股票是否是'ST'股来避开这些风险大的股票。另外,我们目前的策略比赛也禁止了使用'ST'股。
:param order_book_id: 某只股票的代码,可传入单只股票的order_book_id, symbol
:param count: 回溯获取的数据个数。默认为当前能够获取到的最近的数据
"""
dt = Environment.get_instance().calendar_dt.date()
order_book_id = assure_order_book_id(order_book_id)
return Environment.get_instance().data_proxy.is_st_stock(order_book_id, dt, count)
@export_as_api
@ExecutionContext.enforce_phase(
EXECUTION_PHASE.ON_INIT,
EXECUTION_PHASE.BEFORE_TRADING,
EXECUTION_PHASE.OPEN_AUCTION,
EXECUTION_PHASE.ON_BAR,
EXECUTION_PHASE.ON_TICK,
EXECUTION_PHASE.AFTER_TRADING,
EXECUTION_PHASE.SCHEDULED,
)
@apply_rules(verify_that("code").is_instance_of((str, IndustryCodeItem)))
def industry(code):
# type: (str) -> List[str]
"""
获得属于某一行业的所有股票列表。
:param code: 行业名称或行业代码。例如,农业可填写industry_code.A01 或 'A01'
我们目前使用的行业分类来自于中国国家统计局的 `国民经济行业分类 <http://www.stats.gov.cn/tjsj/tjbz/hyflbz/>`_ ,可以使用这里的任何一个行业代码来调用行业的股票列表:
========================= ===================================================
行业代码 行业名称
========================= ===================================================
A01 农业
A02 林业
A03 畜牧业
A04 渔业
A05 农、林、牧、渔服务业
B06 煤炭开采和洗选业
B07 石油和天然气开采业
B08 黑色金属矿采选业
B09 有色金属矿采选业
B10 非金属矿采选业
B11 开采辅助活动
B12 其他采矿业
C13 农副食品加工业
C14 食品制造业
C15 酒、饮料和精制茶制造业
C16 烟草制品业
C17 纺织业
C18 纺织服装、服饰业
C19 皮革、毛皮、羽毛及其制品和制鞋业
C20 木材加工及木、竹、藤、棕、草制品业
C21 家具制造业
C22 造纸及纸制品业
C23 印刷和记录媒介复制业
C24 文教、工美、体育和娱乐用品制造业
C25 石油加工、炼焦及核燃料加工业
C26 化学原料及化学制品制造业
C27 医药制造业
C28 化学纤维制造业
C29 橡胶和塑料制品业
C30 非金属矿物制品业
C31 黑色金属冶炼及压延加工业
C32 有色金属冶炼和压延加工业
C33 金属制品业
C34 通用设备制造业
C35 专用设备制造业
C36 汽车制造业
C37 铁路、船舶、航空航天和其它运输设备制造业
C38 电气机械及器材制造业
C39 计算机、通信和其他电子设备制造业
C40 仪器仪表制造业
C41 其他制造业
C42 废弃资源综合利用业
C43 金属制品、机械和设备修理业
D44 电力、热力生产和供应业
D45 燃气生产和供应业
D46 水的生产和供应业
E47 房屋建筑业
E48 土木工程建筑业
E49 建筑安装业
E50 建筑装饰和其他建筑业
F51 批发业
F52 零售业
G53 铁路运输业
G54 道路运输业
G55 水上运输业
G56 航空运输业
G57 管道运输业
G58 装卸搬运和运输代理业
G59 仓储业
G60 邮政业
H61 住宿业
H62 餐饮业
I63 电信、广播电视和卫星传输服务
I64 互联网和相关服务
I65 软件和信息技术服务业
J66 货币金融服务
J67 资本市场服务
J68 保险业
J69 其他金融业
K70 房地产业
L71 租赁业
L72 商务服务业
M73 研究和试验发展
M74 专业技术服务业
M75 科技推广和应用服务业
N76 水利管理业
N77 生态保护和环境治理业
N78 公共设施管理业
O79 居民服务业
O80 机动车、电子产品和日用产品修理业
O81 其他服务业
P82 教育
Q83 卫生
Q84 社会工作
R85 新闻和出版业
R86 广播、电视、电影和影视录音制作业
R87 文化艺术业
R88 体育
R89 娱乐业
S90 综合
========================= ===================================================
:example:
.. code-block:: python3
:linenos:
def init(context):
stock_list = industry('A01')
logger.info("农业股票列表:" + str(stock_list))
#INITINFO 农业股票列表:['600354.XSHG', '601118.XSHG', '002772.XSHE', '600371.XSHG', '600313.XSHG', '600672.XSHG', '600359.XSHG', '300143.XSHE', '002041.XSHE', '600762.XSHG', '600540.XSHG', '300189.XSHE', '600108.XSHG', '300087.XSHE', '600598.XSHG', '000998.XSHE', '600506.XSHG']
"""
if isinstance(code, IndustryCodeItem):
code = code.code
else:
code = to_industry_code(code)
cs_instruments = Environment.get_instance().data_proxy.all_instruments((INSTRUMENT_TYPE.CS, ))
return [i.order_book_id for i in cs_instruments if i.industry_code == code]
@export_as_api
@ExecutionContext.enforce_phase(
EXECUTION_PHASE.ON_INIT,
EXECUTION_PHASE.BEFORE_TRADING,
EXECUTION_PHASE.OPEN_AUCTION,
EXECUTION_PHASE.ON_BAR,
EXECUTION_PHASE.ON_TICK,
EXECUTION_PHASE.AFTER_TRADING,
EXECUTION_PHASE.SCHEDULED,
)
@apply_rules(verify_that("code").is_instance_of((str, SectorCodeItem)))
def sector(code):
# type: (str) -> List[str]
"""
获得属于某一板块的所有股票列表。
:param code: 板块名称或板块代码。例如,能源板块可填写'Energy'、'能源'或sector_code.Energy
目前支持的板块分类如下,其取值参考自MSCI发布的全球行业标准分类:
========================= ========================= ==============================================================================
板块代码 中文板块名称 英文板块名称
========================= ========================= ==============================================================================
Energy 能源 energy
Materials 原材料 materials
ConsumerDiscretionary 非必需消费品 consumer discretionary
ConsumerStaples 必需消费品 consumer staples
HealthCare 医疗保健 health care
Financials 金融 financials
InformationTechnology 信息技术 information technology
TelecommunicationServices 电信服务 telecommunication services
Utilities 公共服务 utilities
Industrials 工业 industrials
========================= ========================= ==============================================================================
:example:
.. code-block:: python3
:linenos:
def init(context):
ids1 = sector("consumer discretionary")
ids2 = sector("非必需消费品")
ids3 = sector("ConsumerDiscretionary")
assert ids1 == ids2 and ids1 == ids3
logger.info(ids1)
#INIT INFO
#['002045.XSHE', '603099.XSHG', '002486.XSHE', '002536.XSHE', '300100.XSHE', '600633.XSHG', '002291.XSHE', ..., '600233.XSHG']
"""
if isinstance(code, SectorCodeItem):
code = code.name
else:
code = to_sector_name(code)
cs_instruments = Environment.get_instance().data_proxy.all_instruments((INSTRUMENT_TYPE.CS,))
return [i.order_book_id for i in cs_instruments if i.sector_code == code]
@export_as_api
@apply_rules(
verify_that("order_book_id").is_valid_instrument(),
verify_that("start_date").is_valid_date(ignore_none=False),
)
def get_dividend(order_book_id, start_date):
# type: (str, Union[str, datetime.date, datetime.datetime, pd.Timestamp]) -> Optional[np.ndarray]
"""
获取某只股票到策略当前日期前一天的分红情况(包含起止日期)。
:param order_book_id: 股票代码
:param start_date: 开始日期,需要早于策略当前日期
========================= ===================================================
fields 字段名
========================= ===================================================
announcement_date 分红宣布日
book_closure_date 股权登记日
dividend_cash_before_tax 税前分红
ex_dividend_date 除权除息日
payable_date 分红到帐日
round_lot 分红最小单位
========================= ===================================================
:example:
获取平安银行2013-01-04 到策略当前日期前一天的分红数据:
.. code-block:: python3
:linenos:
get_dividend('000001.XSHE', start_date='20130104')
#[Out]
#array([(20130614, 20130619, 20130620, 20130620, 1.7 , 10),
# (20140606, 20140611, 20140612, 20140612, 1.6 , 10),
# (20150407, 20150410, 20150413, 20150413, 1.74, 10),
# (20160608, 20160615, 20160616, 20160616, 1.53, 10)],
# dtype=[('announcement_date', '<u4'), ('book_closure_date', '<u4'), ('ex_dividend_date', '<u4'), ('payable_date', '<u4'), ('dividend_cash_before_tax', '<f8'), ('round_lot', '<u4')])
"""
# adjusted 参数在不复权数据回测时不再提供
env = Environment.get_instance()
dt = env.trading_dt.date() - datetime.timedelta(days=1)
start_date = to_date(start_date)
if start_date > dt:
raise RQInvalidArgument(
_(
u"in get_dividend, start_date {} is later than the previous test day {}"
).format(start_date, dt)
)
order_book_id = assure_order_book_id(order_book_id)
array = env.data_proxy.get_dividend(order_book_id)
if array is None:
return None
sd = start_date.year * 10000 + start_date.month * 100 + start_date.day
ed = dt.year * 10000 + dt.month * 100 + dt.day
return array[
(array["announcement_date"] >= sd) & (array["announcement_date"] <= ed)
]
def to_industry_code(s):
for __, v in industry_code.__dict__.items():
if isinstance(v, IndustryCodeItem):
if v.name == s:
return v.code
return s
def to_sector_name(s):
for __, v in sector_code.__dict__.items():
if isinstance(v, SectorCodeItem):
if v.cn == s or v.en == s or v.name == s:
return v.name
# not found
return s
|
# -*- coding: utf-8 -*-
# 版权所有 2019 深圳米筐科技有限公司(下称“米筐科技”)
#
# 除非遵守当前许可,否则不得使用本软件。
#
# * 非商业用途(非商业用途指个人出于非商业目的使用本软件,或者高校、研究所等非营利机构出于教育、科研等目的使用本软件):
# 遵守 Apache License 2.0(下称“Apache 2.0 许可”),
# 您可以在以下位置获得 Apache 2.0 许可的副本:http://www.apache.org/licenses/LICENSE-2.0。
# 除非法律有要求或以书面形式达成协议,否则本软件分发时需保持当前许可“原样”不变,且不得附加任何条件。
#
# * 商业用途(商业用途指个人出于任何商业目的使用本软件,或者法人或其他组织出于任何目的使用本软件):
# 未经米筐科技授权,任何个人不得出于任何商业目的使用本软件(包括但不限于向第三方提供、销售、出租、出借、转让本软件、
# 本软件的衍生产品、引用或借鉴了本软件功能或源代码的产品或服务),任何法人或其他组织不得出于任何目的使用本软件,
# 否则米筐科技有权追究相应的知识产权侵权责任。
# 在此前提下,对本软件的使用同样需要遵守 Apache 2.0 许可,Apache 2.0 许可与本许可冲突之处,以本许可为准。
# 详细的授权流程,请联系 <EMAIL> 获取。
import math
import datetime
from itertools import chain
from decimal import Decimal, getcontext
from typing import Dict, List, Union, Optional
import six
import numpy as np
import pandas as pd
from rqalpha.api import export_as_api
from rqalpha.apis.api_base import cal_style, assure_order_book_id, assure_instrument
from rqalpha.apis.api_abstract import (
order_shares, order_value, order_percent, order_target_value, order_target_percent, order, order_to
)
from rqalpha.const import (
DEFAULT_ACCOUNT_TYPE, EXECUTION_PHASE, SIDE, ORDER_TYPE, POSITION_EFFECT, POSITION_DIRECTION,
INSTRUMENT_TYPE
)
from rqalpha.environment import Environment
from rqalpha.execution_context import ExecutionContext
from rqalpha.model.instrument import (
Instrument, IndustryCode as industry_code, IndustryCodeItem, SectorCode as sector_code, SectorCodeItem
)
from rqalpha.model.order import Order, MarketOrder, LimitOrder, OrderStyle
from rqalpha.utils import is_valid_price, INST_TYPE_IN_STOCK_ACCOUNT
from rqalpha.utils.arg_checker import apply_rules, verify_that
from rqalpha.utils.exception import RQInvalidArgument
from rqalpha.utils.i18n import gettext as _
from rqalpha.utils.logger import user_system_log
from rqalpha.utils.datetime_func import to_date
from rqalpha.mod.rqalpha_mod_sys_risk.validators.cash_validator import is_cash_enough
# 使用Decimal 解决浮点数运算精度问题
getcontext().prec = 10
export_as_api(industry_code, name='industry_code')
export_as_api(sector_code, name='sector_code')
def _get_account_position_ins(id_or_ins):
ins = assure_instrument(id_or_ins)
account = Environment.get_instance().portfolio.accounts[DEFAULT_ACCOUNT_TYPE.STOCK]
position = account.get_position(ins.order_book_id, POSITION_DIRECTION.LONG)
return account, position, ins
def _submit_order(ins, amount, side, position_effect, style, auto_switch_order_value):
env = Environment.get_instance()
if isinstance(style, LimitOrder):
if style.get_limit_price() <= 0:
raise RQInvalidArgument(_(u"Limit order price should be positive"))
price = env.data_proxy.get_last_price(ins.order_book_id)
if not is_valid_price(price):
user_system_log.warn(
_(u"Order Creation Failed: [{order_book_id}] No market data").format(order_book_id=ins.order_book_id))
return
if side == SIDE.BUY:
round_lot = int(ins.round_lot)
amount = int(Decimal(amount) / Decimal(round_lot)) * round_lot
if amount == 0:
user_system_log.warn(_(u"Order Creation Failed: 0 order quantity"))
return
order = Order.__from_create__(ins.order_book_id, abs(amount), side, style, position_effect)
if order.type == ORDER_TYPE.MARKET:
order.set_frozen_price(price)
if side == SIDE.BUY and auto_switch_order_value:
account, position, ins = _get_account_position_ins(ins)
if not is_cash_enough(env, order, account):
user_system_log.warn(_(
"insufficient cash, use all remaining cash({}) to create order"
).format(account.cash))
return _order_value(account, position, ins, account.cash, style)
if env.can_submit_order(order):
env.broker.submit_order(order)
return order
def _order_shares(ins, amount, style, auto_switch_order_value):
side, position_effect = (SIDE.BUY, POSITION_EFFECT.OPEN) if amount > 0 else (SIDE.SELL, POSITION_EFFECT.CLOSE)
return _submit_order(ins, amount, side, position_effect, style, auto_switch_order_value)
def _order_value(account, position, ins, cash_amount, style):
env = Environment.get_instance()
if cash_amount > 0:
cash_amount = min(cash_amount, account.cash)
if isinstance(style, LimitOrder):
price = style.get_limit_price()
else:
price = env.data_proxy.get_last_price(ins.order_book_id)
if not is_valid_price(price):
user_system_log.warn(
_(u"Order Creation Failed: [{order_book_id}] No market data").format(order_book_id=ins.order_book_id)
)
return
amount = int(Decimal(cash_amount) / Decimal(price))
if cash_amount > 0:
round_lot = int(ins.round_lot)
amount = int(Decimal(amount) / Decimal(round_lot)) * round_lot
while amount > 0:
expected_transaction_cost = env.get_order_transaction_cost(Order.__from_create__(
ins.order_book_id, amount, SIDE.BUY, LimitOrder(price), POSITION_EFFECT.OPEN
))
if amount * price + expected_transaction_cost <= cash_amount:
break
amount -= round_lot
else:
user_system_log.warn(_(u"Order Creation Failed: 0 order quantity"))
return
if amount < 0:
amount = max(amount, -position.closable)
return _order_shares(ins, amount, style, auto_switch_order_value=False)
@order_shares.register(INST_TYPE_IN_STOCK_ACCOUNT)
def stock_order_shares(id_or_ins, amount, price=None, style=None):
auto_switch_order_value = Environment.get_instance().config.mod.sys_accounts.auto_switch_order_value
return _order_shares(assure_instrument(id_or_ins), amount, cal_style(price, style), auto_switch_order_value)
@order_value.register(INST_TYPE_IN_STOCK_ACCOUNT)
def stock_order_value(id_or_ins, cash_amount, price=None, style=None):
account, position, ins = _get_account_position_ins(id_or_ins)
return _order_value(account, position, ins, cash_amount, cal_style(price, style))
@order_percent.register(INST_TYPE_IN_STOCK_ACCOUNT)
def stock_order_percent(id_or_ins, percent, price=None, style=None):
account, position, ins = _get_account_position_ins(id_or_ins)
return _order_value(account, position, ins, account.total_value * percent, cal_style(price, style))
@order_target_value.register(INST_TYPE_IN_STOCK_ACCOUNT)
def stock_order_target_value(id_or_ins, cash_amount, price=None, style=None):
account, position, ins = _get_account_position_ins(id_or_ins)
if cash_amount == 0:
return _submit_order(ins, position.closable, SIDE.SELL, POSITION_EFFECT.CLOSE, cal_style(price, style), False)
return _order_value(account, position, ins, cash_amount - position.market_value, cal_style(price, style))
@order_target_percent.register(INST_TYPE_IN_STOCK_ACCOUNT)
def stock_order_target_percent(id_or_ins, percent, price=None, style=None):
account, position, ins = _get_account_position_ins(id_or_ins)
if percent == 0:
return _submit_order(ins, position.closable, SIDE.SELL, POSITION_EFFECT.CLOSE, cal_style(price, style), False)
else:
return _order_value(
account, position, ins, account.total_value * percent - position.market_value, cal_style(price, style)
)
@order.register(INST_TYPE_IN_STOCK_ACCOUNT)
def stock_order(order_book_id, quantity, price=None, style=None):
result_order = stock_order_shares(order_book_id, quantity, price, style)
if result_order:
return [result_order]
return []
@order_to.register(INST_TYPE_IN_STOCK_ACCOUNT)
def stock_order_to(order_book_id, quantity, price=None, style=None):
position = Environment.get_instance().portfolio.get_position(order_book_id, POSITION_DIRECTION.LONG)
quantity = quantity - position.quantity
result_order = stock_order_shares(order_book_id, quantity, price, style)
if result_order:
return [result_order]
return []
@export_as_api
@ExecutionContext.enforce_phase(
EXECUTION_PHASE.OPEN_AUCTION,
EXECUTION_PHASE.ON_BAR,
EXECUTION_PHASE.ON_TICK,
EXECUTION_PHASE.SCHEDULED,
EXECUTION_PHASE.GLOBAL
)
@apply_rules(verify_that('id_or_ins').is_valid_stock(),
verify_that('amount').is_number(),
verify_that('style').is_instance_of((MarketOrder, LimitOrder, type(None))))
def order_lots(id_or_ins, amount, price=None, style=None):
# type: (Union[str, Instrument], int, Optional[float], Optional[OrderStyle]) -> Optional[Order]
"""
指定手数发送买/卖单。如有需要落单类型当做一个参量传入,如果忽略掉落单类型,那么默认是市价单(market order)。
:param id_or_ins: 下单标的物
:param int amount: 下单量, 正数代表买入,负数代表卖出。将会根据一手xx股来向下调整到一手的倍数,比如中国A股就是调整成100股的倍数。
:param float price: 下单价格,默认为None,表示 :class:`~MarketOrder`, 此参数主要用于简化 `style` 参数。
:param style: 下单类型, 默认是市价单。目前支持的订单类型有 :class:`~LimitOrder` 和 :class:`~MarketOrder`
:example:
.. code-block:: python
#买入20手的平安银行股票,并且发送市价单:
order_lots('000001.XSHE', 20)
#买入10手平安银行股票,并且发送限价单,价格为¥10:
order_lots('000001.XSHE', 10, style=LimitOrder(10))
"""
ins = assure_instrument(id_or_ins)
auto_switch_order_value = Environment.get_instance().config.mod.sys_accounts.auto_switch_order_value
return _order_shares(ins, amount * int(ins.round_lot), cal_style(price, style), auto_switch_order_value)
@export_as_api
@ExecutionContext.enforce_phase(
EXECUTION_PHASE.OPEN_AUCTION,
EXECUTION_PHASE.ON_BAR,
EXECUTION_PHASE.ON_TICK,
EXECUTION_PHASE.SCHEDULED,
EXECUTION_PHASE.GLOBAL
)
def order_target_portfolio(target_portfolio):
# type: (Dict[Union[str, Instrument], float]) -> List[Order]
"""
买入/卖出证券以批量调整证券的仓位,以期使其持仓市值占账户总权益的比重达到指定值。
:param target_portfolio: 下单标的物及其目标市值占比的字典
:example:
.. code-block:: python
# 调整仓位,以使平安银行和万科 A 的持仓占比分别达到 10% 和 15%
order_target_portfolio({
'000001.XSHE': 0.1
'000002.XSHE': 0.15
})
"""
if isinstance(target_portfolio, pd.Series):
# FIXME: kind of dirty
total_percent = sum(target_portfolio)
else:
total_percent = sum(six.itervalues(target_portfolio))
if total_percent > 1:
raise RQInvalidArgument(_(u"total percent should be lower than 1, current: {}").format(total_percent))
env = Environment.get_instance()
account = env.portfolio.accounts[DEFAULT_ACCOUNT_TYPE.STOCK]
account_value = account.total_value
target_quantities = {}
for id_or_ins, target_percent in target_portfolio.items():
order_book_id = assure_order_book_id(id_or_ins)
if target_percent < 0:
raise RQInvalidArgument(_(u"target percent of {} should between 0 and 1, current: {}").format(
order_book_id, target_percent
))
price = env.data_proxy.get_last_price(order_book_id)
if not is_valid_price(price):
user_system_log.warn(
_(u"Order Creation Failed: [{order_book_id}] No market data").format(order_book_id=order_book_id)
)
continue
target_quantities[order_book_id] = account_value * target_percent / price
close_orders, open_orders = [], []
current_quantities = {
p.order_book_id: p.quantity for p in account.get_positions() if p.direction == POSITION_DIRECTION.LONG
}
for order_book_id, quantity in current_quantities.items():
if order_book_id not in target_portfolio:
close_orders.append(Order.__from_create__(
order_book_id, quantity, SIDE.SELL, MarketOrder(), POSITION_EFFECT.CLOSE
))
round_lot = 100
for order_book_id, target_quantity in target_quantities.items():
if order_book_id in current_quantities:
delta_quantity = target_quantity - current_quantities[order_book_id]
else:
delta_quantity = target_quantity
if delta_quantity >= round_lot:
delta_quantity = math.floor(delta_quantity / round_lot) * round_lot
open_orders.append(Order.__from_create__(
order_book_id, delta_quantity, SIDE.BUY, MarketOrder(), POSITION_EFFECT.OPEN
))
elif delta_quantity < -1:
delta_quantity = math.floor(delta_quantity)
close_orders.append(Order.__from_create__(
order_book_id, abs(delta_quantity), SIDE.SELL, MarketOrder(), POSITION_EFFECT.CLOSE
))
submit_orders = []
for order in chain(close_orders, open_orders):
if env.can_submit_order(order):
submit_orders.append(order)
env.broker.submit_order(order)
return submit_orders
@export_as_api
@ExecutionContext.enforce_phase(EXECUTION_PHASE.ON_INIT,
EXECUTION_PHASE.BEFORE_TRADING,
EXECUTION_PHASE.OPEN_AUCTION,
EXECUTION_PHASE.ON_BAR,
EXECUTION_PHASE.ON_TICK,
EXECUTION_PHASE.AFTER_TRADING,
EXECUTION_PHASE.SCHEDULED)
@apply_rules(verify_that('order_book_id').is_valid_instrument(),
verify_that('count').is_greater_than(0))
def is_suspended(order_book_id, count=1):
# type: (str, Optional[int]) -> Union[bool, pd.DataFrame]
"""
判断某只股票是否全天停牌。
:param order_book_id: 某只股票的代码或股票代码,可传入单只股票的order_book_id, symbol
:param count: 回溯获取的数据个数。默认为当前能够获取到的最近的数据
"""
dt = Environment.get_instance().calendar_dt.date()
order_book_id = assure_order_book_id(order_book_id)
return Environment.get_instance().data_proxy.is_suspended(order_book_id, dt, count)
@export_as_api
@ExecutionContext.enforce_phase(EXECUTION_PHASE.ON_INIT,
EXECUTION_PHASE.BEFORE_TRADING,
EXECUTION_PHASE.OPEN_AUCTION,
EXECUTION_PHASE.ON_BAR,
EXECUTION_PHASE.ON_TICK,
EXECUTION_PHASE.AFTER_TRADING,
EXECUTION_PHASE.SCHEDULED)
@apply_rules(verify_that('order_book_id').is_valid_instrument())
def is_st_stock(order_book_id, count=1):
# type: (str, Optional[int]) -> Union[bool, pd.DataFrame]
"""
判断股票在一段时间内是否为ST股(包括ST与*ST)。
ST股是有退市风险因此风险比较大的股票,很多时候您也会希望判断自己使用的股票是否是'ST'股来避开这些风险大的股票。另外,我们目前的策略比赛也禁止了使用'ST'股。
:param order_book_id: 某只股票的代码,可传入单只股票的order_book_id, symbol
:param count: 回溯获取的数据个数。默认为当前能够获取到的最近的数据
"""
dt = Environment.get_instance().calendar_dt.date()
order_book_id = assure_order_book_id(order_book_id)
return Environment.get_instance().data_proxy.is_st_stock(order_book_id, dt, count)
@export_as_api
@ExecutionContext.enforce_phase(
EXECUTION_PHASE.ON_INIT,
EXECUTION_PHASE.BEFORE_TRADING,
EXECUTION_PHASE.OPEN_AUCTION,
EXECUTION_PHASE.ON_BAR,
EXECUTION_PHASE.ON_TICK,
EXECUTION_PHASE.AFTER_TRADING,
EXECUTION_PHASE.SCHEDULED,
)
@apply_rules(verify_that("code").is_instance_of((str, IndustryCodeItem)))
def industry(code):
# type: (str) -> List[str]
"""
获得属于某一行业的所有股票列表。
:param code: 行业名称或行业代码。例如,农业可填写industry_code.A01 或 'A01'
我们目前使用的行业分类来自于中国国家统计局的 `国民经济行业分类 <http://www.stats.gov.cn/tjsj/tjbz/hyflbz/>`_ ,可以使用这里的任何一个行业代码来调用行业的股票列表:
========================= ===================================================
行业代码 行业名称
========================= ===================================================
A01 农业
A02 林业
A03 畜牧业
A04 渔业
A05 农、林、牧、渔服务业
B06 煤炭开采和洗选业
B07 石油和天然气开采业
B08 黑色金属矿采选业
B09 有色金属矿采选业
B10 非金属矿采选业
B11 开采辅助活动
B12 其他采矿业
C13 农副食品加工业
C14 食品制造业
C15 酒、饮料和精制茶制造业
C16 烟草制品业
C17 纺织业
C18 纺织服装、服饰业
C19 皮革、毛皮、羽毛及其制品和制鞋业
C20 木材加工及木、竹、藤、棕、草制品业
C21 家具制造业
C22 造纸及纸制品业
C23 印刷和记录媒介复制业
C24 文教、工美、体育和娱乐用品制造业
C25 石油加工、炼焦及核燃料加工业
C26 化学原料及化学制品制造业
C27 医药制造业
C28 化学纤维制造业
C29 橡胶和塑料制品业
C30 非金属矿物制品业
C31 黑色金属冶炼及压延加工业
C32 有色金属冶炼和压延加工业
C33 金属制品业
C34 通用设备制造业
C35 专用设备制造业
C36 汽车制造业
C37 铁路、船舶、航空航天和其它运输设备制造业
C38 电气机械及器材制造业
C39 计算机、通信和其他电子设备制造业
C40 仪器仪表制造业
C41 其他制造业
C42 废弃资源综合利用业
C43 金属制品、机械和设备修理业
D44 电力、热力生产和供应业
D45 燃气生产和供应业
D46 水的生产和供应业
E47 房屋建筑业
E48 土木工程建筑业
E49 建筑安装业
E50 建筑装饰和其他建筑业
F51 批发业
F52 零售业
G53 铁路运输业
G54 道路运输业
G55 水上运输业
G56 航空运输业
G57 管道运输业
G58 装卸搬运和运输代理业
G59 仓储业
G60 邮政业
H61 住宿业
H62 餐饮业
I63 电信、广播电视和卫星传输服务
I64 互联网和相关服务
I65 软件和信息技术服务业
J66 货币金融服务
J67 资本市场服务
J68 保险业
J69 其他金融业
K70 房地产业
L71 租赁业
L72 商务服务业
M73 研究和试验发展
M74 专业技术服务业
M75 科技推广和应用服务业
N76 水利管理业
N77 生态保护和环境治理业
N78 公共设施管理业
O79 居民服务业
O80 机动车、电子产品和日用产品修理业
O81 其他服务业
P82 教育
Q83 卫生
Q84 社会工作
R85 新闻和出版业
R86 广播、电视、电影和影视录音制作业
R87 文化艺术业
R88 体育
R89 娱乐业
S90 综合
========================= ===================================================
:example:
.. code-block:: python3
:linenos:
def init(context):
stock_list = industry('A01')
logger.info("农业股票列表:" + str(stock_list))
#INITINFO 农业股票列表:['600354.XSHG', '601118.XSHG', '002772.XSHE', '600371.XSHG', '600313.XSHG', '600672.XSHG', '600359.XSHG', '300143.XSHE', '002041.XSHE', '600762.XSHG', '600540.XSHG', '300189.XSHE', '600108.XSHG', '300087.XSHE', '600598.XSHG', '000998.XSHE', '600506.XSHG']
"""
if isinstance(code, IndustryCodeItem):
code = code.code
else:
code = to_industry_code(code)
cs_instruments = Environment.get_instance().data_proxy.all_instruments((INSTRUMENT_TYPE.CS, ))
return [i.order_book_id for i in cs_instruments if i.industry_code == code]
@export_as_api
@ExecutionContext.enforce_phase(
EXECUTION_PHASE.ON_INIT,
EXECUTION_PHASE.BEFORE_TRADING,
EXECUTION_PHASE.OPEN_AUCTION,
EXECUTION_PHASE.ON_BAR,
EXECUTION_PHASE.ON_TICK,
EXECUTION_PHASE.AFTER_TRADING,
EXECUTION_PHASE.SCHEDULED,
)
@apply_rules(verify_that("code").is_instance_of((str, SectorCodeItem)))
def sector(code):
# type: (str) -> List[str]
"""
获得属于某一板块的所有股票列表。
:param code: 板块名称或板块代码。例如,能源板块可填写'Energy'、'能源'或sector_code.Energy
目前支持的板块分类如下,其取值参考自MSCI发布的全球行业标准分类:
========================= ========================= ==============================================================================
板块代码 中文板块名称 英文板块名称
========================= ========================= ==============================================================================
Energy 能源 energy
Materials 原材料 materials
ConsumerDiscretionary 非必需消费品 consumer discretionary
ConsumerStaples 必需消费品 consumer staples
HealthCare 医疗保健 health care
Financials 金融 financials
InformationTechnology 信息技术 information technology
TelecommunicationServices 电信服务 telecommunication services
Utilities 公共服务 utilities
Industrials 工业 industrials
========================= ========================= ==============================================================================
:example:
.. code-block:: python3
:linenos:
def init(context):
ids1 = sector("consumer discretionary")
ids2 = sector("非必需消费品")
ids3 = sector("ConsumerDiscretionary")
assert ids1 == ids2 and ids1 == ids3
logger.info(ids1)
#INIT INFO
#['002045.XSHE', '603099.XSHG', '002486.XSHE', '002536.XSHE', '300100.XSHE', '600633.XSHG', '002291.XSHE', ..., '600233.XSHG']
"""
if isinstance(code, SectorCodeItem):
code = code.name
else:
code = to_sector_name(code)
cs_instruments = Environment.get_instance().data_proxy.all_instruments((INSTRUMENT_TYPE.CS,))
return [i.order_book_id for i in cs_instruments if i.sector_code == code]
@export_as_api
@apply_rules(
verify_that("order_book_id").is_valid_instrument(),
verify_that("start_date").is_valid_date(ignore_none=False),
)
def get_dividend(order_book_id, start_date):
# type: (str, Union[str, datetime.date, datetime.datetime, pd.Timestamp]) -> Optional[np.ndarray]
"""
获取某只股票到策略当前日期前一天的分红情况(包含起止日期)。
:param order_book_id: 股票代码
:param start_date: 开始日期,需要早于策略当前日期
========================= ===================================================
fields 字段名
========================= ===================================================
announcement_date 分红宣布日
book_closure_date 股权登记日
dividend_cash_before_tax 税前分红
ex_dividend_date 除权除息日
payable_date 分红到帐日
round_lot 分红最小单位
========================= ===================================================
:example:
获取平安银行2013-01-04 到策略当前日期前一天的分红数据:
.. code-block:: python3
:linenos:
get_dividend('000001.XSHE', start_date='20130104')
#[Out]
#array([(20130614, 20130619, 20130620, 20130620, 1.7 , 10),
# (20140606, 20140611, 20140612, 20140612, 1.6 , 10),
# (20150407, 20150410, 20150413, 20150413, 1.74, 10),
# (20160608, 20160615, 20160616, 20160616, 1.53, 10)],
# dtype=[('announcement_date', '<u4'), ('book_closure_date', '<u4'), ('ex_dividend_date', '<u4'), ('payable_date', '<u4'), ('dividend_cash_before_tax', '<f8'), ('round_lot', '<u4')])
"""
# adjusted 参数在不复权数据回测时不再提供
env = Environment.get_instance()
dt = env.trading_dt.date() - datetime.timedelta(days=1)
start_date = to_date(start_date)
if start_date > dt:
raise RQInvalidArgument(
_(
u"in get_dividend, start_date {} is later than the previous test day {}"
).format(start_date, dt)
)
order_book_id = assure_order_book_id(order_book_id)
array = env.data_proxy.get_dividend(order_book_id)
if array is None:
return None
sd = start_date.year * 10000 + start_date.month * 100 + start_date.day
ed = dt.year * 10000 + dt.month * 100 + dt.day
return array[
(array["announcement_date"] >= sd) & (array["announcement_date"] <= ed)
]
def to_industry_code(s):
for __, v in industry_code.__dict__.items():
if isinstance(v, IndustryCodeItem):
if v.name == s:
return v.code
return s
def to_sector_name(s):
for __, v in sector_code.__dict__.items():
if isinstance(v, SectorCodeItem):
if v.cn == s or v.en == s or v.name == s:
return v.name
# not found
return s
|
zh
| 0.806531
|
# -*- coding: utf-8 -*- # 版权所有 2019 深圳米筐科技有限公司(下称“米筐科技”) # # 除非遵守当前许可,否则不得使用本软件。 # # * 非商业用途(非商业用途指个人出于非商业目的使用本软件,或者高校、研究所等非营利机构出于教育、科研等目的使用本软件): # 遵守 Apache License 2.0(下称“Apache 2.0 许可”), # 您可以在以下位置获得 Apache 2.0 许可的副本:http://www.apache.org/licenses/LICENSE-2.0。 # 除非法律有要求或以书面形式达成协议,否则本软件分发时需保持当前许可“原样”不变,且不得附加任何条件。 # # * 商业用途(商业用途指个人出于任何商业目的使用本软件,或者法人或其他组织出于任何目的使用本软件): # 未经米筐科技授权,任何个人不得出于任何商业目的使用本软件(包括但不限于向第三方提供、销售、出租、出借、转让本软件、 # 本软件的衍生产品、引用或借鉴了本软件功能或源代码的产品或服务),任何法人或其他组织不得出于任何目的使用本软件, # 否则米筐科技有权追究相应的知识产权侵权责任。 # 在此前提下,对本软件的使用同样需要遵守 Apache 2.0 许可,Apache 2.0 许可与本许可冲突之处,以本许可为准。 # 详细的授权流程,请联系 <EMAIL> 获取。 # 使用Decimal 解决浮点数运算精度问题 # type: (Union[str, Instrument], int, Optional[float], Optional[OrderStyle]) -> Optional[Order] 指定手数发送买/卖单。如有需要落单类型当做一个参量传入,如果忽略掉落单类型,那么默认是市价单(market order)。 :param id_or_ins: 下单标的物 :param int amount: 下单量, 正数代表买入,负数代表卖出。将会根据一手xx股来向下调整到一手的倍数,比如中国A股就是调整成100股的倍数。 :param float price: 下单价格,默认为None,表示 :class:`~MarketOrder`, 此参数主要用于简化 `style` 参数。 :param style: 下单类型, 默认是市价单。目前支持的订单类型有 :class:`~LimitOrder` 和 :class:`~MarketOrder` :example: .. code-block:: python #买入20手的平安银行股票,并且发送市价单: order_lots('000001.XSHE', 20) #买入10手平安银行股票,并且发送限价单,价格为¥10: order_lots('000001.XSHE', 10, style=LimitOrder(10)) # type: (Dict[Union[str, Instrument], float]) -> List[Order] 买入/卖出证券以批量调整证券的仓位,以期使其持仓市值占账户总权益的比重达到指定值。 :param target_portfolio: 下单标的物及其目标市值占比的字典 :example: .. code-block:: python # 调整仓位,以使平安银行和万科 A 的持仓占比分别达到 10% 和 15% order_target_portfolio({ '000001.XSHE': 0.1 '000002.XSHE': 0.15 }) # FIXME: kind of dirty # type: (str, Optional[int]) -> Union[bool, pd.DataFrame] 判断某只股票是否全天停牌。 :param order_book_id: 某只股票的代码或股票代码,可传入单只股票的order_book_id, symbol :param count: 回溯获取的数据个数。默认为当前能够获取到的最近的数据 # type: (str, Optional[int]) -> Union[bool, pd.DataFrame] 判断股票在一段时间内是否为ST股(包括ST与*ST)。 ST股是有退市风险因此风险比较大的股票,很多时候您也会希望判断自己使用的股票是否是'ST'股来避开这些风险大的股票。另外,我们目前的策略比赛也禁止了使用'ST'股。 :param order_book_id: 某只股票的代码,可传入单只股票的order_book_id, symbol :param count: 回溯获取的数据个数。默认为当前能够获取到的最近的数据 # type: (str) -> List[str] 获得属于某一行业的所有股票列表。 :param code: 行业名称或行业代码。例如,农业可填写industry_code.A01 或 'A01' 我们目前使用的行业分类来自于中国国家统计局的 `国民经济行业分类 <http://www.stats.gov.cn/tjsj/tjbz/hyflbz/>`_ ,可以使用这里的任何一个行业代码来调用行业的股票列表: ========================= =================================================== 行业代码 行业名称 ========================= =================================================== A01 农业 A02 林业 A03 畜牧业 A04 渔业 A05 农、林、牧、渔服务业 B06 煤炭开采和洗选业 B07 石油和天然气开采业 B08 黑色金属矿采选业 B09 有色金属矿采选业 B10 非金属矿采选业 B11 开采辅助活动 B12 其他采矿业 C13 农副食品加工业 C14 食品制造业 C15 酒、饮料和精制茶制造业 C16 烟草制品业 C17 纺织业 C18 纺织服装、服饰业 C19 皮革、毛皮、羽毛及其制品和制鞋业 C20 木材加工及木、竹、藤、棕、草制品业 C21 家具制造业 C22 造纸及纸制品业 C23 印刷和记录媒介复制业 C24 文教、工美、体育和娱乐用品制造业 C25 石油加工、炼焦及核燃料加工业 C26 化学原料及化学制品制造业 C27 医药制造业 C28 化学纤维制造业 C29 橡胶和塑料制品业 C30 非金属矿物制品业 C31 黑色金属冶炼及压延加工业 C32 有色金属冶炼和压延加工业 C33 金属制品业 C34 通用设备制造业 C35 专用设备制造业 C36 汽车制造业 C37 铁路、船舶、航空航天和其它运输设备制造业 C38 电气机械及器材制造业 C39 计算机、通信和其他电子设备制造业 C40 仪器仪表制造业 C41 其他制造业 C42 废弃资源综合利用业 C43 金属制品、机械和设备修理业 D44 电力、热力生产和供应业 D45 燃气生产和供应业 D46 水的生产和供应业 E47 房屋建筑业 E48 土木工程建筑业 E49 建筑安装业 E50 建筑装饰和其他建筑业 F51 批发业 F52 零售业 G53 铁路运输业 G54 道路运输业 G55 水上运输业 G56 航空运输业 G57 管道运输业 G58 装卸搬运和运输代理业 G59 仓储业 G60 邮政业 H61 住宿业 H62 餐饮业 I63 电信、广播电视和卫星传输服务 I64 互联网和相关服务 I65 软件和信息技术服务业 J66 货币金融服务 J67 资本市场服务 J68 保险业 J69 其他金融业 K70 房地产业 L71 租赁业 L72 商务服务业 M73 研究和试验发展 M74 专业技术服务业 M75 科技推广和应用服务业 N76 水利管理业 N77 生态保护和环境治理业 N78 公共设施管理业 O79 居民服务业 O80 机动车、电子产品和日用产品修理业 O81 其他服务业 P82 教育 Q83 卫生 Q84 社会工作 R85 新闻和出版业 R86 广播、电视、电影和影视录音制作业 R87 文化艺术业 R88 体育 R89 娱乐业 S90 综合 ========================= =================================================== :example: .. code-block:: python3 :linenos: def init(context): stock_list = industry('A01') logger.info("农业股票列表:" + str(stock_list)) #INITINFO 农业股票列表:['600354.XSHG', '601118.XSHG', '002772.XSHE', '600371.XSHG', '600313.XSHG', '600672.XSHG', '600359.XSHG', '300143.XSHE', '002041.XSHE', '600762.XSHG', '600540.XSHG', '300189.XSHE', '600108.XSHG', '300087.XSHE', '600598.XSHG', '000998.XSHE', '600506.XSHG'] # type: (str) -> List[str] 获得属于某一板块的所有股票列表。 :param code: 板块名称或板块代码。例如,能源板块可填写'Energy'、'能源'或sector_code.Energy 目前支持的板块分类如下,其取值参考自MSCI发布的全球行业标准分类: ========================= ========================= ============================================================================== 板块代码 中文板块名称 英文板块名称 ========================= ========================= ============================================================================== Energy 能源 energy Materials 原材料 materials ConsumerDiscretionary 非必需消费品 consumer discretionary ConsumerStaples 必需消费品 consumer staples HealthCare 医疗保健 health care Financials 金融 financials InformationTechnology 信息技术 information technology TelecommunicationServices 电信服务 telecommunication services Utilities 公共服务 utilities Industrials 工业 industrials ========================= ========================= ============================================================================== :example: .. code-block:: python3 :linenos: def init(context): ids1 = sector("consumer discretionary") ids2 = sector("非必需消费品") ids3 = sector("ConsumerDiscretionary") assert ids1 == ids2 and ids1 == ids3 logger.info(ids1) #INIT INFO #['002045.XSHE', '603099.XSHG', '002486.XSHE', '002536.XSHE', '300100.XSHE', '600633.XSHG', '002291.XSHE', ..., '600233.XSHG'] # type: (str, Union[str, datetime.date, datetime.datetime, pd.Timestamp]) -> Optional[np.ndarray] 获取某只股票到策略当前日期前一天的分红情况(包含起止日期)。 :param order_book_id: 股票代码 :param start_date: 开始日期,需要早于策略当前日期 ========================= =================================================== fields 字段名 ========================= =================================================== announcement_date 分红宣布日 book_closure_date 股权登记日 dividend_cash_before_tax 税前分红 ex_dividend_date 除权除息日 payable_date 分红到帐日 round_lot 分红最小单位 ========================= =================================================== :example: 获取平安银行2013-01-04 到策略当前日期前一天的分红数据: .. code-block:: python3 :linenos: get_dividend('000001.XSHE', start_date='20130104') #[Out] #array([(20130614, 20130619, 20130620, 20130620, 1.7 , 10), # (20140606, 20140611, 20140612, 20140612, 1.6 , 10), # (20150407, 20150410, 20150413, 20150413, 1.74, 10), # (20160608, 20160615, 20160616, 20160616, 1.53, 10)], # dtype=[('announcement_date', '<u4'), ('book_closure_date', '<u4'), ('ex_dividend_date', '<u4'), ('payable_date', '<u4'), ('dividend_cash_before_tax', '<f8'), ('round_lot', '<u4')]) # adjusted 参数在不复权数据回测时不再提供 # not found
| 2.411117
| 2
|
wagtailmetadata/tests/test_mixin.py
|
boltaffect/wagtail-metadata-mixin
| 0
|
6627784
|
# -*- coding: utf-8 -*-
from django.test import TestCase
from django.utils import timezone
from django.test.utils import override_settings
from django.conf import settings
from wagtail.core.models import Site
from wagtail.images.models import Image
from meta import settings as meta_settings
from wagtail.images.tests.utils import get_test_image_file
from wagtailmetadata.tests.testapp.models import SimplePage
class TestMetadataPageMixin(TestCase):
def setUp(self):
self.site = Site.objects.first()
self.site.site_name = 'Example'
self.site.hostname = 'example.com'
self.site.save()
self.image = Image.objects.create(
title='Image',
file=get_test_image_file(),
)
self.page = self.site.root_page.add_child(instance=SimplePage(
title='Simple Page',
))
def test_published_time(self):
self.assertEqual(self.page.published_time, self.page.first_published_at)
self.page.go_live_at = timezone.now()
self.assertEqual(self.page.published_time, self.page.go_live_at)
def test_get_meta_title(self):
self.assertEqual(self.page.get_meta_title(), 'Simple Page')
self.page.seo_title = 'Lorem ipsum...'
self.assertEqual(self.page.get_meta_title(), 'Lorem ipsum...')
def test_get_meta_description(self):
self.assertEqual(self.page.get_meta_description(), '')
self.page.search_description = 'Lorem ipsum dolor sit amet...'
self.assertEqual(self.page.get_meta_description(), 'Lorem ipsum dolor sit amet...')
def test_get_meta_keywords(self):
self.assertEqual(self.page.get_meta_keywords(), [])
def test_get_meta_url(self):
self.assertEqual(
self.page.get_meta_url(), self.page.build_absolute_uri('/simple-page/'))
def test_get_meta_image(self):
self.assertEqual(self.page.get_meta_image(), None)
self.page.search_image = self.image
self.assertEqual(
self.page.get_meta_image(), self.page.build_absolute_uri(self.image.get_rendition('fill-800x450').url))
def test_get_meta_image_with_settings(self):
self.assertEqual(self.page.get_meta_image(), None)
old_DEFAULT_IMAGE = meta_settings.DEFAULT_IMAGE
meta_settings.DEFAULT_IMAGE = 'image.png'
self.assertEqual(
self.page.get_meta_image(), self.page.build_absolute_uri('image.png'))
meta_settings.DEFAULT_IMAGE = old_DEFAULT_IMAGE
def test_get_meta_object_type(self):
self.assertEqual(self.page.get_meta_object_type(), None)
self.page.object_type = 'article'
self.assertEqual(self.page.get_meta_object_type(), 'article')
def test_get_meta_site_name(self):
self.assertEqual(self.page.get_meta_site_name(), 'Example')
self.site.site_name = "Site Name"
self.site.save()
self.assertEqual(self.page.get_meta_site_name(), 'Site Name')
def test_get_meta_site_name_with_settings(self):
self.assertEqual(self.page.get_meta_site_name(), 'Example')
self.site.site_name = '' # for testing purpose
self.site.save()
with self.settings(WAGTAIL_SITE_NAME='Site Name'):
self.assertEqual(self.page.get_meta_site_name(), 'Site Name')
def test_get_meta_twitter_site(self):
self.assertEqual(self.page.get_meta_twitter_site(), '')
old_TWITTER_SITE = meta_settings.TWITTER_SITE
meta_settings.TWITTER_SITE = "@site"
self.assertEqual(self.page.get_meta_twitter_site(), '@site')
meta_settings.TWITTER_SITE = old_TWITTER_SITE
def test_get_meta_twitter_creator(self):
self.assertEqual(self.page.get_meta_twitter_creator(), '')
old_TWITTER_AUTHOR = meta_settings.TWITTER_AUTHOR
meta_settings.TWITTER_AUTHOR = '@author'
self.assertEqual(self.page.get_meta_twitter_creator(), '@author')
meta_settings.TWITTER_AUTHOR = old_TWITTER_AUTHOR
def test_get_meta_twitter_card(self):
self.assertEqual(self.page.get_meta_twitter_card(), 'summary')
self.page.search_image = self.image
self.assertEqual(self.page.get_meta_twitter_card(), 'summary_large_image')
def test_get_meta_locale(self):
self.assertEqual(self.page.get_meta_locale(), getattr(settings, 'LANGUAGE_CODE', 'en_US'))
with self.settings(LANGUAGE_CODE='ru_RU'):
self.assertEqual(self.page.get_meta_locale(), 'ru_RU')
def test_get_meta_custom_namespace(self):
self.assertEqual(self.page.get_meta_custom_namespace(), None)
self.page.custom_namespace = 'website'
self.assertEqual(self.page.get_meta_custom_namespace(), 'website')
def test_get_meta_custom_namespace_with_settings(self):
self.assertEqual(self.page.get_meta_custom_namespace(), None)
old_OG_NAMESPACES = meta_settings.OG_NAMESPACES
meta_settings.OG_NAMESPACES = ['foo', 'bar']
self.assertEqual(self.page.get_meta_custom_namespace(), ['foo', 'bar'])
meta_settings.OG_NAMESPACES = old_OG_NAMESPACES
def test_get_domain(self):
self.assertEqual(self.page.get_domain(), 'example.com')
self.site.hostname = "domain.com"
self.site.save()
self.assertEqual(self.page.get_domain(), 'domain.com')
|
# -*- coding: utf-8 -*-
from django.test import TestCase
from django.utils import timezone
from django.test.utils import override_settings
from django.conf import settings
from wagtail.core.models import Site
from wagtail.images.models import Image
from meta import settings as meta_settings
from wagtail.images.tests.utils import get_test_image_file
from wagtailmetadata.tests.testapp.models import SimplePage
class TestMetadataPageMixin(TestCase):
def setUp(self):
self.site = Site.objects.first()
self.site.site_name = 'Example'
self.site.hostname = 'example.com'
self.site.save()
self.image = Image.objects.create(
title='Image',
file=get_test_image_file(),
)
self.page = self.site.root_page.add_child(instance=SimplePage(
title='Simple Page',
))
def test_published_time(self):
self.assertEqual(self.page.published_time, self.page.first_published_at)
self.page.go_live_at = timezone.now()
self.assertEqual(self.page.published_time, self.page.go_live_at)
def test_get_meta_title(self):
self.assertEqual(self.page.get_meta_title(), 'Simple Page')
self.page.seo_title = 'Lorem ipsum...'
self.assertEqual(self.page.get_meta_title(), 'Lorem ipsum...')
def test_get_meta_description(self):
self.assertEqual(self.page.get_meta_description(), '')
self.page.search_description = 'Lorem ipsum dolor sit amet...'
self.assertEqual(self.page.get_meta_description(), 'Lorem ipsum dolor sit amet...')
def test_get_meta_keywords(self):
self.assertEqual(self.page.get_meta_keywords(), [])
def test_get_meta_url(self):
self.assertEqual(
self.page.get_meta_url(), self.page.build_absolute_uri('/simple-page/'))
def test_get_meta_image(self):
self.assertEqual(self.page.get_meta_image(), None)
self.page.search_image = self.image
self.assertEqual(
self.page.get_meta_image(), self.page.build_absolute_uri(self.image.get_rendition('fill-800x450').url))
def test_get_meta_image_with_settings(self):
self.assertEqual(self.page.get_meta_image(), None)
old_DEFAULT_IMAGE = meta_settings.DEFAULT_IMAGE
meta_settings.DEFAULT_IMAGE = 'image.png'
self.assertEqual(
self.page.get_meta_image(), self.page.build_absolute_uri('image.png'))
meta_settings.DEFAULT_IMAGE = old_DEFAULT_IMAGE
def test_get_meta_object_type(self):
self.assertEqual(self.page.get_meta_object_type(), None)
self.page.object_type = 'article'
self.assertEqual(self.page.get_meta_object_type(), 'article')
def test_get_meta_site_name(self):
self.assertEqual(self.page.get_meta_site_name(), 'Example')
self.site.site_name = "Site Name"
self.site.save()
self.assertEqual(self.page.get_meta_site_name(), 'Site Name')
def test_get_meta_site_name_with_settings(self):
self.assertEqual(self.page.get_meta_site_name(), 'Example')
self.site.site_name = '' # for testing purpose
self.site.save()
with self.settings(WAGTAIL_SITE_NAME='Site Name'):
self.assertEqual(self.page.get_meta_site_name(), 'Site Name')
def test_get_meta_twitter_site(self):
self.assertEqual(self.page.get_meta_twitter_site(), '')
old_TWITTER_SITE = meta_settings.TWITTER_SITE
meta_settings.TWITTER_SITE = "@site"
self.assertEqual(self.page.get_meta_twitter_site(), '@site')
meta_settings.TWITTER_SITE = old_TWITTER_SITE
def test_get_meta_twitter_creator(self):
self.assertEqual(self.page.get_meta_twitter_creator(), '')
old_TWITTER_AUTHOR = meta_settings.TWITTER_AUTHOR
meta_settings.TWITTER_AUTHOR = '@author'
self.assertEqual(self.page.get_meta_twitter_creator(), '@author')
meta_settings.TWITTER_AUTHOR = old_TWITTER_AUTHOR
def test_get_meta_twitter_card(self):
self.assertEqual(self.page.get_meta_twitter_card(), 'summary')
self.page.search_image = self.image
self.assertEqual(self.page.get_meta_twitter_card(), 'summary_large_image')
def test_get_meta_locale(self):
self.assertEqual(self.page.get_meta_locale(), getattr(settings, 'LANGUAGE_CODE', 'en_US'))
with self.settings(LANGUAGE_CODE='ru_RU'):
self.assertEqual(self.page.get_meta_locale(), 'ru_RU')
def test_get_meta_custom_namespace(self):
self.assertEqual(self.page.get_meta_custom_namespace(), None)
self.page.custom_namespace = 'website'
self.assertEqual(self.page.get_meta_custom_namespace(), 'website')
def test_get_meta_custom_namespace_with_settings(self):
self.assertEqual(self.page.get_meta_custom_namespace(), None)
old_OG_NAMESPACES = meta_settings.OG_NAMESPACES
meta_settings.OG_NAMESPACES = ['foo', 'bar']
self.assertEqual(self.page.get_meta_custom_namespace(), ['foo', 'bar'])
meta_settings.OG_NAMESPACES = old_OG_NAMESPACES
def test_get_domain(self):
self.assertEqual(self.page.get_domain(), 'example.com')
self.site.hostname = "domain.com"
self.site.save()
self.assertEqual(self.page.get_domain(), 'domain.com')
|
en
| 0.826538
|
# -*- coding: utf-8 -*- # for testing purpose
| 1.978923
| 2
|
Bottleneck Based Gridlock Prediction in Urban Road Network Using Long Short-Term Memory/retrieveDatawithRandomVehicles.py
|
Sinadalee/Smart-Mobility-Chula
| 1
|
6627785
|
import os, sys
if 'SUMO_HOME' in os.environ:
tools = os.path.join(os.environ['SUMO_HOME'], 'tools')
sys.path.append(tools)
else:
sys.exit("please declare environment variable 'SUMO_HOME'")
sys.path.append(os.path.join('c:', os.sep, 'whatever', 'path', 'to', 'sumo', 'tools'))
import traci
import sumolib
import csv
import pathlib
import glob
import pandas as pd
import random
from datetime import datetime, timedelta
import os
from random import randint as r
def createFile(POIEdges,percentage,frequency,outputFile):
# to get the current directory
dirpath = os.getcwd()
for freq in frequency:
for pcent in percentage:
for road,links in POIEdges.items():
myfile1 = open(
dirpath + '/' +outputFile+'/'+ road + '_' + str(freq) + '_' + pcent + '.csv', 'w', newline='')
writer1 = csv.writer(myfile1)
heading = ["Time","Time(s)",*links, "Total Vehicles", "Mean Speed (km/h)","Low Mean Speed","Persistently Low Mean Speed Indicator"]
writer1.writerow(
heading)
myfile1.close()
def parseFloat(str):
try:
return float(str)
except:
str = str.strip()
if str.endswith("%"):
return float(str.strip("%").strip()) / 100
raise Exception("Don't know how to parse %s" % str)
#this function is to get the time string like h:m:s
#========================================================================================
def getTime(time):
time=time%(24*3600)
hours=time//3600
time%=3600
minutes=time//60
time%=60
seconds=time
periods=[('hours',int(hours)),('minutes',int(minutes)),('seconds',int(seconds))]
time_string=':'.join('{}'.format(value) for name,value in periods)
return time_string
#========================================================================================
#main functin
def main():
POIEdges = { 'Sathorn_Thai_1':['L197#1','L197#2'],
'Sathorn_Thai_2': ['L30', 'L58#1','L58#2'],
'Charoenkrung_1': ['L30032'],
'Charoenkrung_2': ['L60', 'L73', 'L10149#1','L10149#2'],
'Charoenkrung_3': ['L67'],
'Silom_1': ['L138'],
'Silom_2': ['L133.25'],
'Silom_3': ['L49'],
'Mehasak':['L64'],
'Surasak': ['L10130', 'L10189'],
'Charoen_Rat': ['L40']
}
percentage = ['1%', '5%', '10%', '15%', '20%', '25%', '30%', '35%', '40%', '45%', '50%','100%']
frequency = [1, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60]
randomlist = []
dirpath = os.getcwd()
path = dirpath + 'RetrieveOnly100%DATAFROMSUMO_RANDOMSEED(One time)-DATASET-WithoutReplicatedVID/random.csv'
myfile1 = open(path,'w', newline='')
writer1 = csv.writer(myfile1)
heading = ["Output File","random seed"]
writer1.writerow(heading)
myfile1.close()
for outputFile in range(0,100):
random_number = random.randint(50, 23423)
#################### this code block is to keep random seed number permanently####################
random_df = pd.read_csv(path)
# print(random_df.columns)
randomlist = random_df['random seed'].values.tolist()
if len(randomlist) >=0:
while random_number in randomlist:
random_number = random.randint(50, 23423)
#randomlist.append(r)
myfile = open(path, 'a', newline='')
writer = csv.writer(myfile)
with myfile:
writer.writerow([(outputFile+1),random_number])
myfile.close()
##################################################################################################
print(randomlist)
print('Random Seed Number : ', random_number)
os.mkdir(dirpath + '/'+ str(outputFile+1))
createFile(POIEdges, percentage, frequency, str(outputFile+1))
# sumoBinary = sumolib.checkBinary('sumo-gui')
sumoBinary = sumolib.checkBinary('sumo')
sumoCmd = [sumoBinary,
"--no-internal-links", 'true',
"--ignore-junction-blocker", '1',
'--start', 'true',
'--quit-on-end', 'true',
# "--random",'true',
"-c", "sathorn_w.sumo.cfg",
# '-a',"sathon_wide_tls_20160418_edited.add(upperSurasak)_withoutLaneclose.xml",
'-a', "sathon_wide_tls_20160418_edited.add.xml",
'--time-to-teleport', "-1",
'--seed', str(random_number),
'--no-warnings','true'
]
# sumoCmd = [sumoBinary, "-c", "sathorn_w.sumo.cfg",'-a',"sathon_wide_tls_20160418_edited.add.xml",'--time-to-teleport',"-1"]
# sumoCmd = [sumoBinary, "-c", "sathorn_w.sumo.cfg"]
traci.start(sumoCmd)
step = 21600
import time
start_time = time.time()
while step <= 32400:
traci.simulationStep()
for freq in frequency:
if step % freq == 0:
for pcent in percentage:
percent = parseFloat(pcent)
for road, links in POIEdges.items():
temp = []
vList = []
for link in links:
IDs = list(traci.edge.getLastStepVehicleIDs(link))
vList.extend(IDs)
# print(vList)
temp.append(len(list(traci.edge.getLastStepVehicleIDs(link))))
###### This code segment has issues about replicated vehicle IDs##########
# random_v = []
# g = (r(0, len(vList) - 1) for _ in range(int(len(vList) * (percent))))
# for i in g:
# random_v.append(vList[i])
# print(percent,random_v)
##########################################################################
random_v = random.sample(vList, int(len(vList) * (percent)))
# print(percent,random_v)
totalSpeed = 0.0
for v in random_v:
totalSpeed += float(traci.vehicle.getSpeed(v))
if (len(random_v)) > 0:
meanSpeed = float(totalSpeed / int(len(random_v)))
else:
meanSpeed = -1.00
format_time = datetime.strptime(getTime(float(step)), '%H:%M:%S')
time = format_time.time()
if meanSpeed * 3.6 <= 5 and meanSpeed >= 0:
low_meanSpeed = 1
else:
low_meanSpeed = 0
persistent_low_meanSpeed = 0
myfile = open(
dirpath + '/' + str(
outputFile + 1) + '/' + road + '_' + str(
freq) + '_' + pcent + '.csv', 'a', newline='')
writer = csv.writer(myfile)
with myfile:
writer.writerow(
[time, step, *temp, int(len(random_v)), meanSpeed, low_meanSpeed,
persistent_low_meanSpeed])
myfile.close()
step += 1
traci.close()
import time
print("--- %s seconds ---" % (time.time() - start_time))
if __name__=="__main__":
main()
|
import os, sys
if 'SUMO_HOME' in os.environ:
tools = os.path.join(os.environ['SUMO_HOME'], 'tools')
sys.path.append(tools)
else:
sys.exit("please declare environment variable 'SUMO_HOME'")
sys.path.append(os.path.join('c:', os.sep, 'whatever', 'path', 'to', 'sumo', 'tools'))
import traci
import sumolib
import csv
import pathlib
import glob
import pandas as pd
import random
from datetime import datetime, timedelta
import os
from random import randint as r
def createFile(POIEdges,percentage,frequency,outputFile):
# to get the current directory
dirpath = os.getcwd()
for freq in frequency:
for pcent in percentage:
for road,links in POIEdges.items():
myfile1 = open(
dirpath + '/' +outputFile+'/'+ road + '_' + str(freq) + '_' + pcent + '.csv', 'w', newline='')
writer1 = csv.writer(myfile1)
heading = ["Time","Time(s)",*links, "Total Vehicles", "Mean Speed (km/h)","Low Mean Speed","Persistently Low Mean Speed Indicator"]
writer1.writerow(
heading)
myfile1.close()
def parseFloat(str):
try:
return float(str)
except:
str = str.strip()
if str.endswith("%"):
return float(str.strip("%").strip()) / 100
raise Exception("Don't know how to parse %s" % str)
#this function is to get the time string like h:m:s
#========================================================================================
def getTime(time):
time=time%(24*3600)
hours=time//3600
time%=3600
minutes=time//60
time%=60
seconds=time
periods=[('hours',int(hours)),('minutes',int(minutes)),('seconds',int(seconds))]
time_string=':'.join('{}'.format(value) for name,value in periods)
return time_string
#========================================================================================
#main functin
def main():
POIEdges = { 'Sathorn_Thai_1':['L197#1','L197#2'],
'Sathorn_Thai_2': ['L30', 'L58#1','L58#2'],
'Charoenkrung_1': ['L30032'],
'Charoenkrung_2': ['L60', 'L73', 'L10149#1','L10149#2'],
'Charoenkrung_3': ['L67'],
'Silom_1': ['L138'],
'Silom_2': ['L133.25'],
'Silom_3': ['L49'],
'Mehasak':['L64'],
'Surasak': ['L10130', 'L10189'],
'Charoen_Rat': ['L40']
}
percentage = ['1%', '5%', '10%', '15%', '20%', '25%', '30%', '35%', '40%', '45%', '50%','100%']
frequency = [1, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60]
randomlist = []
dirpath = os.getcwd()
path = dirpath + 'RetrieveOnly100%DATAFROMSUMO_RANDOMSEED(One time)-DATASET-WithoutReplicatedVID/random.csv'
myfile1 = open(path,'w', newline='')
writer1 = csv.writer(myfile1)
heading = ["Output File","random seed"]
writer1.writerow(heading)
myfile1.close()
for outputFile in range(0,100):
random_number = random.randint(50, 23423)
#################### this code block is to keep random seed number permanently####################
random_df = pd.read_csv(path)
# print(random_df.columns)
randomlist = random_df['random seed'].values.tolist()
if len(randomlist) >=0:
while random_number in randomlist:
random_number = random.randint(50, 23423)
#randomlist.append(r)
myfile = open(path, 'a', newline='')
writer = csv.writer(myfile)
with myfile:
writer.writerow([(outputFile+1),random_number])
myfile.close()
##################################################################################################
print(randomlist)
print('Random Seed Number : ', random_number)
os.mkdir(dirpath + '/'+ str(outputFile+1))
createFile(POIEdges, percentage, frequency, str(outputFile+1))
# sumoBinary = sumolib.checkBinary('sumo-gui')
sumoBinary = sumolib.checkBinary('sumo')
sumoCmd = [sumoBinary,
"--no-internal-links", 'true',
"--ignore-junction-blocker", '1',
'--start', 'true',
'--quit-on-end', 'true',
# "--random",'true',
"-c", "sathorn_w.sumo.cfg",
# '-a',"sathon_wide_tls_20160418_edited.add(upperSurasak)_withoutLaneclose.xml",
'-a', "sathon_wide_tls_20160418_edited.add.xml",
'--time-to-teleport', "-1",
'--seed', str(random_number),
'--no-warnings','true'
]
# sumoCmd = [sumoBinary, "-c", "sathorn_w.sumo.cfg",'-a',"sathon_wide_tls_20160418_edited.add.xml",'--time-to-teleport',"-1"]
# sumoCmd = [sumoBinary, "-c", "sathorn_w.sumo.cfg"]
traci.start(sumoCmd)
step = 21600
import time
start_time = time.time()
while step <= 32400:
traci.simulationStep()
for freq in frequency:
if step % freq == 0:
for pcent in percentage:
percent = parseFloat(pcent)
for road, links in POIEdges.items():
temp = []
vList = []
for link in links:
IDs = list(traci.edge.getLastStepVehicleIDs(link))
vList.extend(IDs)
# print(vList)
temp.append(len(list(traci.edge.getLastStepVehicleIDs(link))))
###### This code segment has issues about replicated vehicle IDs##########
# random_v = []
# g = (r(0, len(vList) - 1) for _ in range(int(len(vList) * (percent))))
# for i in g:
# random_v.append(vList[i])
# print(percent,random_v)
##########################################################################
random_v = random.sample(vList, int(len(vList) * (percent)))
# print(percent,random_v)
totalSpeed = 0.0
for v in random_v:
totalSpeed += float(traci.vehicle.getSpeed(v))
if (len(random_v)) > 0:
meanSpeed = float(totalSpeed / int(len(random_v)))
else:
meanSpeed = -1.00
format_time = datetime.strptime(getTime(float(step)), '%H:%M:%S')
time = format_time.time()
if meanSpeed * 3.6 <= 5 and meanSpeed >= 0:
low_meanSpeed = 1
else:
low_meanSpeed = 0
persistent_low_meanSpeed = 0
myfile = open(
dirpath + '/' + str(
outputFile + 1) + '/' + road + '_' + str(
freq) + '_' + pcent + '.csv', 'a', newline='')
writer = csv.writer(myfile)
with myfile:
writer.writerow(
[time, step, *temp, int(len(random_v)), meanSpeed, low_meanSpeed,
persistent_low_meanSpeed])
myfile.close()
step += 1
traci.close()
import time
print("--- %s seconds ---" % (time.time() - start_time))
if __name__=="__main__":
main()
|
en
| 0.414793
|
# to get the current directory #this function is to get the time string like h:m:s #======================================================================================== #======================================================================================== #main functin #1','L197#2'], #1','L58#2'], #1','L10149#2'], #################### this code block is to keep random seed number permanently#################### # print(random_df.columns) #randomlist.append(r) ################################################################################################## # sumoBinary = sumolib.checkBinary('sumo-gui') # "--random",'true', # '-a',"sathon_wide_tls_20160418_edited.add(upperSurasak)_withoutLaneclose.xml", # sumoCmd = [sumoBinary, "-c", "sathorn_w.sumo.cfg",'-a',"sathon_wide_tls_20160418_edited.add.xml",'--time-to-teleport',"-1"] # sumoCmd = [sumoBinary, "-c", "sathorn_w.sumo.cfg"] # print(vList) ###### This code segment has issues about replicated vehicle IDs########## # random_v = [] # g = (r(0, len(vList) - 1) for _ in range(int(len(vList) * (percent)))) # for i in g: # random_v.append(vList[i]) # print(percent,random_v) ########################################################################## # print(percent,random_v)
| 2.556447
| 3
|
tests/test_settings.py
|
fullstack-commit/django-bootstarp-admin
| 1
|
6627786
|
# -*- coding: utf-8 -*-
import django
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.test import override_settings, TestCase
from admin_interface.settings import check_installed_apps
class AdminInterfaceSettingsTestCase(TestCase):
DJANGO_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
]
def setUp(self):
pass
def tearDown(self):
pass
def __test_installed_apps(self):
dj_version = django.VERSION
installed_apps = settings.INSTALLED_APPS
if 'colorfield' not in installed_apps:
self.assertRaises(ImproperlyConfigured, check_installed_apps)
elif 'flat' not in installed_apps and dj_version < (1, 9):
self.assertRaises(ImproperlyConfigured, check_installed_apps)
elif 'flat' in installed_apps and dj_version >= (1, 9):
self.assertRaises(ImproperlyConfigured, check_installed_apps)
elif 'flat_responsive' not in installed_apps and dj_version < (2, 0):
self.assertRaises(ImproperlyConfigured, check_installed_apps)
elif 'flat_responsive' in installed_apps and dj_version >= (2, 0):
self.assertRaises(ImproperlyConfigured, check_installed_apps)
else:
check_installed_apps()
@override_settings(
INSTALLED_APPS = [
'admin_interface',
'colorfield',
'flat',
'flat_responsive',
] + DJANGO_APPS
)
def test_installed_apps_all(self):
self.__test_installed_apps()
@override_settings(
INSTALLED_APPS = [
'admin_interface',
# 'colorfield',
'flat',
'flat_responsive',
] + DJANGO_APPS
)
def test_installed_apps_no_colorfield(self):
self.__test_installed_apps()
@override_settings(
INSTALLED_APPS = [
'admin_interface',
'colorfield',
# 'flat',
'flat_responsive',
] + DJANGO_APPS
)
def test_installed_apps_no_flat(self):
self.__test_installed_apps()
@override_settings(
INSTALLED_APPS = [
'admin_interface',
'colorfield',
'flat',
# 'flat_responsive',
] + DJANGO_APPS
)
def test_installed_apps_no_flat_responsive(self):
self.__test_installed_apps()
|
# -*- coding: utf-8 -*-
import django
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.test import override_settings, TestCase
from admin_interface.settings import check_installed_apps
class AdminInterfaceSettingsTestCase(TestCase):
DJANGO_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
]
def setUp(self):
pass
def tearDown(self):
pass
def __test_installed_apps(self):
dj_version = django.VERSION
installed_apps = settings.INSTALLED_APPS
if 'colorfield' not in installed_apps:
self.assertRaises(ImproperlyConfigured, check_installed_apps)
elif 'flat' not in installed_apps and dj_version < (1, 9):
self.assertRaises(ImproperlyConfigured, check_installed_apps)
elif 'flat' in installed_apps and dj_version >= (1, 9):
self.assertRaises(ImproperlyConfigured, check_installed_apps)
elif 'flat_responsive' not in installed_apps and dj_version < (2, 0):
self.assertRaises(ImproperlyConfigured, check_installed_apps)
elif 'flat_responsive' in installed_apps and dj_version >= (2, 0):
self.assertRaises(ImproperlyConfigured, check_installed_apps)
else:
check_installed_apps()
@override_settings(
INSTALLED_APPS = [
'admin_interface',
'colorfield',
'flat',
'flat_responsive',
] + DJANGO_APPS
)
def test_installed_apps_all(self):
self.__test_installed_apps()
@override_settings(
INSTALLED_APPS = [
'admin_interface',
# 'colorfield',
'flat',
'flat_responsive',
] + DJANGO_APPS
)
def test_installed_apps_no_colorfield(self):
self.__test_installed_apps()
@override_settings(
INSTALLED_APPS = [
'admin_interface',
'colorfield',
# 'flat',
'flat_responsive',
] + DJANGO_APPS
)
def test_installed_apps_no_flat(self):
self.__test_installed_apps()
@override_settings(
INSTALLED_APPS = [
'admin_interface',
'colorfield',
'flat',
# 'flat_responsive',
] + DJANGO_APPS
)
def test_installed_apps_no_flat_responsive(self):
self.__test_installed_apps()
|
en
| 0.357801
|
# -*- coding: utf-8 -*- # 'colorfield', # 'flat', # 'flat_responsive',
| 2.197598
| 2
|
model.py
|
BergesIrani/BehavioralClone
| 0
|
6627787
|
import csv
import cv2
import numpy as np
import argparse
import sklearn
import tensorflow as tf
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
from keras.models import Sequential, load_model
from keras.layers import Flatten, Dense, Lambda
from keras.layers import Convolution2D, Cropping2D, MaxPooling2D
from math import ceil
def get_generator(folder):
lines = []
with open(folder + '/driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
next(reader, None)
for line in reader:
lines.append(line)
train_samples, validation_samples = train_test_split(lines, test_size=0.2)
print(len(lines))
correction = 0.2
def generator(samples, batch_size=32):
num_samples = len(lines)
while 1: # Loop forever so the generator never terminates
shuffle(lines)
for offset in range(0, num_samples, batch_size):
batch_samples = lines[offset:offset+batch_size]
images = []
angles = []
for batch_sample in batch_samples:
for i in range(3):
name = folder + '/IMG/'+batch_sample[i].split('\\')[-1]
image = cv2.imread(name)
angle = float(batch_sample[3])
images.append(image)
if i == 1:
angle += correction
elif i == 2:
angle -= correction
angles.append(angle)
image_flipped = np.fliplr(image)
angle_flipped = -angle
images.append(image_flipped)
angles.append(angle_flipped)
# trim image to only see section with road
X_train = np.array(images)
y_train = np.array(angles)
yield shuffle(X_train, y_train)
return generator, train_samples, validation_samples
def get_model(generator, train_samples, validation_samples):
model = Sequential()
model.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(160,320,3)))
model.add(Cropping2D(cropping=((70,25), (0,0)), input_shape=(160,320,3)))
model.add(Convolution2D(24,5,5,subsample=(2,2),activation="relu", border_mode='same'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(1, 1)))
model.add(Convolution2D(36,5,5,subsample=(2,2),activation="relu", border_mode='same'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(1, 1)))
model.add(Convolution2D(48,5,5,subsample=(2,2),activation="relu", border_mode='same'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(1, 1)))
model.add(Convolution2D(64,3,3,activation="relu", border_mode='same'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(1, 1)))
model.add(Convolution2D(64,3,3,activation="relu", border_mode='same'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(1, 1)))
model.add(Flatten())
model.add(Dense(100))
model.add(Dense(50))
model.add(Dense(10))
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam')
return model
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Model Training')
parser.add_argument(
'model',
type=str,
help='Path to model h5 file. Model should be on the same path.'
)
parser.add_argument(
'image_folder',
type=str,
nargs='?',
default='',
help='Path to image folder. This is where the images for training will be pulled from.'
)
parser.add_argument(
'--update',
action='store_true')
args = parser.parse_args()
generator, train_samples, validation_samples = get_generator(args.image_folder)
print("created generator")
if args.update:
model = load_model(args.model)
print("model loaded")
else:
model = get_model(generator, train_samples, validation_samples)
print("model generated")
# Set our batch size
batch_size=32
train_generator = generator(train_samples, batch_size=batch_size)
validation_generator = generator(validation_samples, batch_size=batch_size)
model.fit_generator(train_generator,
samples_per_epoch=len(train_samples),
validation_data=validation_generator,
nb_val_samples=len(validation_samples),
epochs=5, verbose=1)
model.save(args.model)
print("model saved as " + args.model)
|
import csv
import cv2
import numpy as np
import argparse
import sklearn
import tensorflow as tf
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
from keras.models import Sequential, load_model
from keras.layers import Flatten, Dense, Lambda
from keras.layers import Convolution2D, Cropping2D, MaxPooling2D
from math import ceil
def get_generator(folder):
lines = []
with open(folder + '/driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
next(reader, None)
for line in reader:
lines.append(line)
train_samples, validation_samples = train_test_split(lines, test_size=0.2)
print(len(lines))
correction = 0.2
def generator(samples, batch_size=32):
num_samples = len(lines)
while 1: # Loop forever so the generator never terminates
shuffle(lines)
for offset in range(0, num_samples, batch_size):
batch_samples = lines[offset:offset+batch_size]
images = []
angles = []
for batch_sample in batch_samples:
for i in range(3):
name = folder + '/IMG/'+batch_sample[i].split('\\')[-1]
image = cv2.imread(name)
angle = float(batch_sample[3])
images.append(image)
if i == 1:
angle += correction
elif i == 2:
angle -= correction
angles.append(angle)
image_flipped = np.fliplr(image)
angle_flipped = -angle
images.append(image_flipped)
angles.append(angle_flipped)
# trim image to only see section with road
X_train = np.array(images)
y_train = np.array(angles)
yield shuffle(X_train, y_train)
return generator, train_samples, validation_samples
def get_model(generator, train_samples, validation_samples):
model = Sequential()
model.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(160,320,3)))
model.add(Cropping2D(cropping=((70,25), (0,0)), input_shape=(160,320,3)))
model.add(Convolution2D(24,5,5,subsample=(2,2),activation="relu", border_mode='same'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(1, 1)))
model.add(Convolution2D(36,5,5,subsample=(2,2),activation="relu", border_mode='same'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(1, 1)))
model.add(Convolution2D(48,5,5,subsample=(2,2),activation="relu", border_mode='same'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(1, 1)))
model.add(Convolution2D(64,3,3,activation="relu", border_mode='same'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(1, 1)))
model.add(Convolution2D(64,3,3,activation="relu", border_mode='same'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(1, 1)))
model.add(Flatten())
model.add(Dense(100))
model.add(Dense(50))
model.add(Dense(10))
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam')
return model
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Model Training')
parser.add_argument(
'model',
type=str,
help='Path to model h5 file. Model should be on the same path.'
)
parser.add_argument(
'image_folder',
type=str,
nargs='?',
default='',
help='Path to image folder. This is where the images for training will be pulled from.'
)
parser.add_argument(
'--update',
action='store_true')
args = parser.parse_args()
generator, train_samples, validation_samples = get_generator(args.image_folder)
print("created generator")
if args.update:
model = load_model(args.model)
print("model loaded")
else:
model = get_model(generator, train_samples, validation_samples)
print("model generated")
# Set our batch size
batch_size=32
train_generator = generator(train_samples, batch_size=batch_size)
validation_generator = generator(validation_samples, batch_size=batch_size)
model.fit_generator(train_generator,
samples_per_epoch=len(train_samples),
validation_data=validation_generator,
nb_val_samples=len(validation_samples),
epochs=5, verbose=1)
model.save(args.model)
print("model saved as " + args.model)
|
en
| 0.77605
|
# Loop forever so the generator never terminates # trim image to only see section with road # Set our batch size
| 2.785485
| 3
|
fytnet/migrations/0022_auto_20210327_1607.py
|
Code-Institute-Submissions/danielboots-fytletic
| 1
|
6627788
|
# Generated by Django 3.1.6 on 2021-03-27 16:07
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('fytnet', '0021_auto_20210327_1538'),
]
operations = [
migrations.RenameField(
model_name='fighter',
old_name='video',
new_name='video_1',
),
migrations.AddField(
model_name='fighter',
name='video_2',
field=models.URLField(blank=True, null=True),
),
migrations.AddField(
model_name='fighter',
name='video_3',
field=models.URLField(blank=True, null=True),
),
migrations.AddField(
model_name='fighter',
name='video_4',
field=models.URLField(blank=True, null=True),
),
migrations.AddField(
model_name='fighter',
name='video_5',
field=models.URLField(blank=True, null=True),
),
migrations.AlterField(
model_name='fighter',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
# Generated by Django 3.1.6 on 2021-03-27 16:07
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('fytnet', '0021_auto_20210327_1538'),
]
operations = [
migrations.RenameField(
model_name='fighter',
old_name='video',
new_name='video_1',
),
migrations.AddField(
model_name='fighter',
name='video_2',
field=models.URLField(blank=True, null=True),
),
migrations.AddField(
model_name='fighter',
name='video_3',
field=models.URLField(blank=True, null=True),
),
migrations.AddField(
model_name='fighter',
name='video_4',
field=models.URLField(blank=True, null=True),
),
migrations.AddField(
model_name='fighter',
name='video_5',
field=models.URLField(blank=True, null=True),
),
migrations.AlterField(
model_name='fighter',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
en
| 0.830193
|
# Generated by Django 3.1.6 on 2021-03-27 16:07
| 1.556539
| 2
|
Nova pasta (2)/maian.py
|
cristest/python
| 0
|
6627789
|
from alunos import Aluno
from pessoas import Pessoa
from professores import Professor
from diciplinas import Disciplina
aluno1 = Aluno()
aluno1.altera_celular('55555555555')
aluno1.nome = 'Cristopher'
celular_aluno = aluno1.retorna_celular()
print(celular_aluno)
professor1 = Professor()
professor1.nome = 'Cristopher'
lp2 = Disciplina()
lp2.altera_nome('linguagem de programação 2')
professor1.adiciona_Diciplina(lp2)
lp1 = Disciplina()
lp1.altera_nome("limguagem de programação 1")
professor1.adiciona_diciplina(lp1)
lista_disciplinas = professor1.disciplinas_professor()
for disciplina in lista_disciplinas:
print(disciplina.retorna_nome())
disciplina_remover = Disciplina()
disciplina_remover.altera_nome('Linguagem de Programação 1')
professor1.remove_disciplina(disciplinas)
prinrt(disciplina == lp1)
def remove_disciplina(self, disciplinas):
disciplina_remover(disciplinas)
matricola = Matricola()
matricola.altera_aluno(aluno1)
matricola.altera_disciplina(lp2)
disciplinaAluno = matricola.retorna_value
print('Disciplina aluno: ', disciplinaAluno.retorna_nome)
|
from alunos import Aluno
from pessoas import Pessoa
from professores import Professor
from diciplinas import Disciplina
aluno1 = Aluno()
aluno1.altera_celular('55555555555')
aluno1.nome = 'Cristopher'
celular_aluno = aluno1.retorna_celular()
print(celular_aluno)
professor1 = Professor()
professor1.nome = 'Cristopher'
lp2 = Disciplina()
lp2.altera_nome('linguagem de programação 2')
professor1.adiciona_Diciplina(lp2)
lp1 = Disciplina()
lp1.altera_nome("limguagem de programação 1")
professor1.adiciona_diciplina(lp1)
lista_disciplinas = professor1.disciplinas_professor()
for disciplina in lista_disciplinas:
print(disciplina.retorna_nome())
disciplina_remover = Disciplina()
disciplina_remover.altera_nome('Linguagem de Programação 1')
professor1.remove_disciplina(disciplinas)
prinrt(disciplina == lp1)
def remove_disciplina(self, disciplinas):
disciplina_remover(disciplinas)
matricola = Matricola()
matricola.altera_aluno(aluno1)
matricola.altera_disciplina(lp2)
disciplinaAluno = matricola.retorna_value
print('Disciplina aluno: ', disciplinaAluno.retorna_nome)
|
none
| 1
| 3.113082
| 3
|
|
ribosome/util/menu/codes.py
|
tek/ribosome-py
| 0
|
6627790
|
from amino import List, Map
special_codes = Map({
b'\x80\xffX': 'c-@',
b'\x80kb': 'bs',
9: 'tab',
b'\x80kB': 's-tab',
10: 'c-j',
11: 'c-k',
12: 'fe',
13: 'cr',
27: 'esc',
32: 'space',
60: 'lt',
92: 'bslash',
124: 'bar',
b'\x0b': 'c-k',
b'\x80kD': 'del',
b'\x9B': 'csi',
b'\x80\xfdP': 'xcsi',
b'\x80ku': 'up',
b'\x80kd': 'down',
b'\x80kl': 'left',
b'\x80kr': 'right',
b'\x80\xfd': 's-up',
b'\x80\xfd': 's-down',
b'\x80#4': 's-left',
b'\x80%i': 's-right',
b'\x80\xfdT': 'c-left',
b'\x80\xfdU': 'c-right',
b'\x80k1': 'f1',
b'\x80k2': 'f2',
b'\x80k3': 'f3',
b'\x80k4': 'f4',
b'\x80k5': 'f5',
b'\x80k6': 'f6',
b'\x80k7': 'f7',
b'\x80k8': 'f8',
b'\x80k9': 'f9',
b'\x80k;': 'f10',
b'\x80F1': 'f11',
b'\x80F2': 'f12',
b'\x80\xfd\x06': 's-f1',
b'\x80\xfd\x07': 's-f2',
b'\x80\xfd\x08': 's-f3',
b'\x80\xfd\x09': 's-f4',
b'\x80\xfd\x0A': 's-f5',
b'\x80\xfd\x0B': 's-f6',
b'\x80\xfd\x0C': 's-f7',
b'\x80\xfd\x0D': 's-f8',
b'\x80\xfd\x0E': 's-f9',
b'\x80\xfd\x0F': 's-f10',
b'\x80\xfd\x10': 's-f11',
b'\x80\xfd\x11': 's-f12',
b'\x80%1': 'help',
b'\x80&8': 'undo',
b'\x80kI': 'insert',
b'\x80kh': 'home',
b'\x80@7': 'end',
b'\x80kP': 'pageup',
b'\x80kN': 'pagedown',
b'\x80K1': 'khome',
b'\x80K4': 'kend',
b'\x80K3': 'kpageup',
b'\x80K5': 'kpagedown',
b'\x80K6': 'kplus',
b'\x80K7': 'kminus',
b'\x80K9': 'kmultiply',
b'\x80K8': 'kdivide',
b'\x80KA': 'kenter',
b'\x80KB': 'kpoint',
b'\x80KC': 'k0',
b'\x80KD': 'k1',
b'\x80KE': 'k2',
b'\x80KF': 'k3',
b'\x80KG': 'k4',
b'\x80KH': 'k5',
b'\x80KI': 'k6',
b'\x80KJ': 'k7',
b'\x80KK': 'k8',
b'\x80KL': 'k9',
})
modifier_codes = List(
(2, 'shift'),
(4, 'control'),
(8, 'alt'),
(16, 'meta'),
(32, 'mouse_double'),
(64, 'mouse_triple'),
(96, 'mouse_quadruple'),
(128, 'command'),
)
__all__ = ('special_codes', 'modifier_codes',)
|
from amino import List, Map
special_codes = Map({
b'\x80\xffX': 'c-@',
b'\x80kb': 'bs',
9: 'tab',
b'\x80kB': 's-tab',
10: 'c-j',
11: 'c-k',
12: 'fe',
13: 'cr',
27: 'esc',
32: 'space',
60: 'lt',
92: 'bslash',
124: 'bar',
b'\x0b': 'c-k',
b'\x80kD': 'del',
b'\x9B': 'csi',
b'\x80\xfdP': 'xcsi',
b'\x80ku': 'up',
b'\x80kd': 'down',
b'\x80kl': 'left',
b'\x80kr': 'right',
b'\x80\xfd': 's-up',
b'\x80\xfd': 's-down',
b'\x80#4': 's-left',
b'\x80%i': 's-right',
b'\x80\xfdT': 'c-left',
b'\x80\xfdU': 'c-right',
b'\x80k1': 'f1',
b'\x80k2': 'f2',
b'\x80k3': 'f3',
b'\x80k4': 'f4',
b'\x80k5': 'f5',
b'\x80k6': 'f6',
b'\x80k7': 'f7',
b'\x80k8': 'f8',
b'\x80k9': 'f9',
b'\x80k;': 'f10',
b'\x80F1': 'f11',
b'\x80F2': 'f12',
b'\x80\xfd\x06': 's-f1',
b'\x80\xfd\x07': 's-f2',
b'\x80\xfd\x08': 's-f3',
b'\x80\xfd\x09': 's-f4',
b'\x80\xfd\x0A': 's-f5',
b'\x80\xfd\x0B': 's-f6',
b'\x80\xfd\x0C': 's-f7',
b'\x80\xfd\x0D': 's-f8',
b'\x80\xfd\x0E': 's-f9',
b'\x80\xfd\x0F': 's-f10',
b'\x80\xfd\x10': 's-f11',
b'\x80\xfd\x11': 's-f12',
b'\x80%1': 'help',
b'\x80&8': 'undo',
b'\x80kI': 'insert',
b'\x80kh': 'home',
b'\x80@7': 'end',
b'\x80kP': 'pageup',
b'\x80kN': 'pagedown',
b'\x80K1': 'khome',
b'\x80K4': 'kend',
b'\x80K3': 'kpageup',
b'\x80K5': 'kpagedown',
b'\x80K6': 'kplus',
b'\x80K7': 'kminus',
b'\x80K9': 'kmultiply',
b'\x80K8': 'kdivide',
b'\x80KA': 'kenter',
b'\x80KB': 'kpoint',
b'\x80KC': 'k0',
b'\x80KD': 'k1',
b'\x80KE': 'k2',
b'\x80KF': 'k3',
b'\x80KG': 'k4',
b'\x80KH': 'k5',
b'\x80KI': 'k6',
b'\x80KJ': 'k7',
b'\x80KK': 'k8',
b'\x80KL': 'k9',
})
modifier_codes = List(
(2, 'shift'),
(4, 'control'),
(8, 'alt'),
(16, 'meta'),
(32, 'mouse_double'),
(64, 'mouse_triple'),
(96, 'mouse_quadruple'),
(128, 'command'),
)
__all__ = ('special_codes', 'modifier_codes',)
|
pt
| 0.278865
|
#4': 's-left',
| 1.726244
| 2
|
lquery/extras/mongodb/core.py
|
Cologler/lquery-python
| 2
|
6627791
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018~2999 - Cologler <<EMAIL>>
# ----------
#
# ----------
import copy
from ...queryable import AbstractQueryable, ReduceInfo
from ...funcs import LinqQuery
from ...iterable import IterableQueryProvider
from ...expr import Make
from ...empty import EmptyQuery
from .._common import NotSupportError, AlwaysEmptyError
from .options import QueryOptions
from .visitors import QueryOptionsRootExprVisitor
class NextMongoDbQuery(AbstractQueryable):
def __init__(self, expr, collection, query_options):
super().__init__(expr, PROVIDER)
self._collection = collection
self._query_options = query_options or QueryOptions()
def __str__(self):
return f'IQueryable({self._collection})'
def get_cursor(self):
cursor = self._query_options.get_cursor(self._collection)
return cursor
def __iter__(self):
yield from self.get_cursor()
@property
def collection(self):
return self._collection
@property
def query_options(self):
return self._query_options
def update_reduce_info(self, reduce_info: ReduceInfo):
reduce_info.add_node(ReduceInfo.TYPE_SQL, self.expr)
class MongoDbQuery(NextMongoDbQuery):
def __init__(self, collection):
super().__init__(Make.ref(self), collection, QueryOptions())
def update_reduce_info(self, reduce_info: ReduceInfo):
reduce_info.add_node(ReduceInfo.TYPE_SRC, self.expr)
class MongoDbQueryProvider(IterableQueryProvider):
def create_query(self, expr):
if expr.func.resolve_value() in (LinqQuery.where, LinqQuery.skip, LinqQuery.take):
queryable = expr.args[0].value
query_options = copy.deepcopy(queryable.query_options)
visitor = QueryOptionsRootExprVisitor(query_options)
try:
expr.accept(visitor)
return NextMongoDbQuery(expr, queryable.collection, query_options)
except AlwaysEmptyError as always_empty:
return EmptyQuery(expr, always_empty.reason)
except NotSupportError:
pass
return super().create_query(expr)
PROVIDER = MongoDbQueryProvider()
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018~2999 - Cologler <<EMAIL>>
# ----------
#
# ----------
import copy
from ...queryable import AbstractQueryable, ReduceInfo
from ...funcs import LinqQuery
from ...iterable import IterableQueryProvider
from ...expr import Make
from ...empty import EmptyQuery
from .._common import NotSupportError, AlwaysEmptyError
from .options import QueryOptions
from .visitors import QueryOptionsRootExprVisitor
class NextMongoDbQuery(AbstractQueryable):
def __init__(self, expr, collection, query_options):
super().__init__(expr, PROVIDER)
self._collection = collection
self._query_options = query_options or QueryOptions()
def __str__(self):
return f'IQueryable({self._collection})'
def get_cursor(self):
cursor = self._query_options.get_cursor(self._collection)
return cursor
def __iter__(self):
yield from self.get_cursor()
@property
def collection(self):
return self._collection
@property
def query_options(self):
return self._query_options
def update_reduce_info(self, reduce_info: ReduceInfo):
reduce_info.add_node(ReduceInfo.TYPE_SQL, self.expr)
class MongoDbQuery(NextMongoDbQuery):
def __init__(self, collection):
super().__init__(Make.ref(self), collection, QueryOptions())
def update_reduce_info(self, reduce_info: ReduceInfo):
reduce_info.add_node(ReduceInfo.TYPE_SRC, self.expr)
class MongoDbQueryProvider(IterableQueryProvider):
def create_query(self, expr):
if expr.func.resolve_value() in (LinqQuery.where, LinqQuery.skip, LinqQuery.take):
queryable = expr.args[0].value
query_options = copy.deepcopy(queryable.query_options)
visitor = QueryOptionsRootExprVisitor(query_options)
try:
expr.accept(visitor)
return NextMongoDbQuery(expr, queryable.collection, query_options)
except AlwaysEmptyError as always_empty:
return EmptyQuery(expr, always_empty.reason)
except NotSupportError:
pass
return super().create_query(expr)
PROVIDER = MongoDbQueryProvider()
|
en
| 0.454365
|
# -*- coding: utf-8 -*- # # Copyright (c) 2018~2999 - Cologler <<EMAIL>> # ---------- # # ----------
| 1.90782
| 2
|
fiftyone/utils/cityscapes.py
|
seantrue/fiftyone
| 0
|
6627792
|
"""
Utilities for working with the
`Cityscapes dataset <https://www.cityscapes-dataset.com>`_.
| Copyright 2017-2020, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
import logging
import os
import eta.core.serial as etas
import eta.core.utils as etau
import fiftyone.core.dataset as fod
import fiftyone.core.fields as fof
import fiftyone.core.labels as fol
import fiftyone.core.media as fom
import fiftyone.core.sample as fos
import fiftyone.core.utils as fou
import fiftyone.utils.data as foud
logger = logging.getLogger(__name__)
_IMAGES_ZIP = "leftImg8bit_trainvaltest.zip"
_FINE_ANNOS_ZIP = "gtFine_trainvaltest.zip"
_COARSE_ANNOS_ZIP = "gtCoarse.zip"
_PERSON_ANNOS_ZIP = "gtBbox_cityPersons_trainval.zip"
def parse_cityscapes_dataset(
source_dir,
dataset_dir,
scratch_dir,
splits,
fine_annos=None,
coarse_annos=None,
person_annos=None,
):
"""Parses the Cityscapes archive(s) in the specified directory and writes
the requested splits in subdirectories of ``dataset_dir`` in
:class:`fiftyone.types.dataset_types.FiftyOneDataset` format.
The archives must have been manually downloaded into the directory before
this method is called.
The ``source_dir`` should contain the following files::
source_dir/
leftImg8bit_trainvaltest.zip
gtFine_trainvaltest.zip # optional
gtCoarse.zip # optional
gtBbox_cityPersons_trainval.zip # optional
Args:
source_dir: the directory continaining the manually downloaded
Cityscapes files
dataset_dir: the directory in which to build the output dataset
scratch_dir: a scratch directory to use for temporary files
splits: a list of splits to parse. Supported values are
``(train, test, validation)``
fine_annos (None): whether to load the fine annotations (True), or not
(False), or only if the ZIP file exists (None)
coarse_annos (None): whether to load the coarse annotations (True), or
not (False), or only if the ZIP file exists (None)
person_annos (None): whether to load the personn detections (True), or
not (False), or only if the ZIP file exists (None)
Raises:
OSError: if any required source files are not present
"""
(
images_zip_path,
fine_annos_zip_path,
coarse_annos_zip_path,
person_annos_zip_path,
) = _parse_source_dir(source_dir, fine_annos, coarse_annos, person_annos)
_splits = [_parse_split(s) for s in splits]
images_dir = _extract_images(images_zip_path, scratch_dir)
if fine_annos_zip_path:
fine_annos_dir = _extract_fine_annos(fine_annos_zip_path, scratch_dir)
else:
fine_annos_dir = None
if coarse_annos_zip_path:
coarse_annos_dir = _extract_coarse_annos(
coarse_annos_zip_path, scratch_dir
)
else:
coarse_annos_dir = None
if person_annos_zip_path:
person_annos_dir = _extract_person_annos(
person_annos_zip_path, scratch_dir
)
else:
person_annos_dir = None
for split, _split in zip(splits, _splits):
split_dir = os.path.join(dataset_dir, split)
_export_split(
_split,
split_dir,
images_dir,
fine_annos_dir,
coarse_annos_dir,
person_annos_dir,
)
def _parse_source_dir(source_dir, fine_annos, coarse_annos, person_annos):
if source_dir is None:
_raise_cityscapes_error(
"You must provide a `source_dir` in order to load the Cityscapes "
"dataset."
)
if not os.path.isdir(source_dir):
_raise_cityscapes_error(
"Source directory '%s' does not exist." % source_dir
)
files = etau.list_files(source_dir)
if _IMAGES_ZIP not in files:
_raise_cityscapes_error(
"Images zip '%s' not found within '%s'."
% (_IMAGES_ZIP, source_dir)
)
images_zip_path = os.path.join(source_dir, _IMAGES_ZIP)
if fine_annos is None:
fine_annos = _FINE_ANNOS_ZIP in files
if fine_annos:
if _FINE_ANNOS_ZIP not in files:
_raise_cityscapes_error(
"Fine annotations zip '%s' not found within '%s'."
% (_FINE_ANNOS_ZIP, source_dir)
)
fine_annos_zip_path = os.path.join(source_dir, _FINE_ANNOS_ZIP)
else:
fine_annos_zip_path = None
if coarse_annos is None:
coarse_annos = _COARSE_ANNOS_ZIP in files
if coarse_annos:
if _COARSE_ANNOS_ZIP not in files:
_raise_cityscapes_error(
"Coarse annotations zip '%s' not found within '%s'."
% (_COARSE_ANNOS_ZIP, source_dir)
)
coarse_annos_zip_path = os.path.join(source_dir, _COARSE_ANNOS_ZIP)
else:
coarse_annos_zip_path = None
if person_annos is None:
person_annos = _PERSON_ANNOS_ZIP in files
if person_annos:
if _PERSON_ANNOS_ZIP not in files:
_raise_cityscapes_error(
"Person annotations zip '%s' not found within '%s'."
% (_PERSON_ANNOS_ZIP, source_dir)
)
person_annos_zip_path = os.path.join(source_dir, _PERSON_ANNOS_ZIP)
else:
person_annos_zip_path = None
return (
images_zip_path,
fine_annos_zip_path,
coarse_annos_zip_path,
person_annos_zip_path,
)
def _raise_cityscapes_error(msg):
raise OSError(
"\n\n"
+ msg
+ "\n\n"
+ "You must download the source files for the Cityscapes dataset "
"manually."
+ "\n\n"
+ "Run `fiftyone zoo info cityscapes` for more information"
)
def _parse_split(split):
if split == "validation":
return "val"
if split not in ("test", "train"):
raise ValueError(
"Invalid split '%s''; supported values are %s"
% (split, ("train", "test", "validation"))
)
return split
def _export_split(
split,
split_dir,
images_dir,
fine_annos_dir,
coarse_annos_dir,
person_annos_dir,
):
images_map = _parse_images(images_dir, split)
if fine_annos_dir:
fine_annos_map = _parse_fine_annos(fine_annos_dir, split)
else:
fine_annos_map = {}
if coarse_annos_dir:
coarse_annos_map = _parse_coarse_annos(coarse_annos_dir, split)
else:
coarse_annos_map = {}
if person_annos_dir:
person_annos_map = _parse_person_annos(person_annos_dir, split)
else:
person_annos_map = {}
dataset = fod.Dataset()
dataset.media_type = fom.IMAGE
has_fine_annos = bool(fine_annos_map)
has_coarse_annos = bool(coarse_annos_map)
has_person_annos = bool(person_annos_map)
if has_fine_annos:
dataset.add_sample_field(
"gt_fine",
fof.EmbeddedDocumentField,
embedded_doc_type=fol.Polylines,
)
if has_coarse_annos:
dataset.add_sample_field(
"gt_coarse",
fof.EmbeddedDocumentField,
embedded_doc_type=fol.Polylines,
)
if has_person_annos:
dataset.add_sample_field(
"gt_person",
fof.EmbeddedDocumentField,
embedded_doc_type=fol.Detections,
)
uuids = sorted(images_map.keys())
logger.info("Finalizing split '%s'...", split)
exporter = foud.FiftyOneDatasetExporter(split_dir, move_media=False)
pb = fou.ProgressBar()
with exporter, pb:
exporter.log_collection(dataset)
for uuid in pb(uuids):
sample = fos.Sample(filepath=images_map[uuid])
if has_fine_annos:
sample["gt_fine"] = fine_annos_map.get(uuid, None)
if has_coarse_annos:
sample["gt_coarse"] = coarse_annos_map.get(uuid, None)
if has_person_annos:
sample["gt_person"] = person_annos_map.get(uuid, None)
exporter.export_sample(sample)
dataset.delete()
def _extract_images(images_zip_path, scratch_dir):
tmp_dir = os.path.join(scratch_dir, "images")
images_dir = os.path.join(tmp_dir, "leftImg8bit")
if not os.path.isdir(images_dir):
logger.info("Extracting images...")
etau.extract_zip(images_zip_path, outdir=tmp_dir, delete_zip=False)
return images_dir
def _extract_fine_annos(fine_annos_zip_path, scratch_dir):
tmp_dir = os.path.join(scratch_dir, "fine-annos")
fine_annos_dir = os.path.join(tmp_dir, "gtFine")
if not os.path.isdir(fine_annos_dir):
logger.info("Extracting fine annotations...")
etau.extract_zip(fine_annos_zip_path, outdir=tmp_dir, delete_zip=False)
return fine_annos_dir
def _extract_coarse_annos(coarse_annos_zip_path, scratch_dir):
tmp_dir = os.path.join(scratch_dir, "coarse-annos")
coarse_annos_dir = os.path.join(tmp_dir, "gtCoarse")
if not os.path.isdir(coarse_annos_dir):
logger.info("Extracting coarse annotations...")
etau.extract_zip(
coarse_annos_zip_path, outdir=tmp_dir, delete_zip=False
)
return coarse_annos_dir
def _extract_person_annos(person_annos_zip_path, scratch_dir):
tmp_dir = os.path.join(scratch_dir, "person-annos")
person_annos_dir = os.path.join(tmp_dir, "gtBboxCityPersons")
if not os.path.isdir(person_annos_dir):
logger.info("Extracting person annotations...")
etau.extract_zip(
person_annos_zip_path, outdir=tmp_dir, delete_zip=False
)
return person_annos_dir
def _parse_images(images_dir, split):
paths_patt = os.path.join(images_dir, split, "*", "*")
images_map = {}
for image_path in etau.get_glob_matches(paths_patt):
uuid = os.path.splitext(os.path.basename(image_path))[0][
: -len("_leftImg8bit")
]
images_map[uuid] = image_path
return images_map
def _parse_fine_annos(fine_annos_dir, split):
glob_patt = os.path.join(fine_annos_dir, split, "*", "*.json")
return _parse_polygon_annos(glob_patt, split, "fine", "_gtFine_polygons")
def _parse_coarse_annos(coarse_annos_dir, split):
glob_patt = os.path.join(coarse_annos_dir, split, "*", "*.json")
return _parse_polygon_annos(
glob_patt, split, "coarse", "_gtCoarse_polygons"
)
def _parse_polygon_annos(glob_patt, split, anno_type, suffix):
anno_paths = etau.get_glob_matches(glob_patt)
if not anno_paths:
return {}
logger.info("Parsing %s annotations for split '%s'...", anno_type, split)
annos_map = {}
with fou.ProgressBar() as pb:
for anno_path in pb(anno_paths):
uuid = os.path.splitext(os.path.basename(anno_path))[0][
: -len(suffix)
]
annos_map[uuid] = _parse_polygons_file(anno_path)
return annos_map
def _parse_person_annos(person_annos_dir, split):
paths_patt = os.path.join(person_annos_dir, split, "*", "*.json")
anno_paths = etau.get_glob_matches(paths_patt)
if not anno_paths:
return {}
logger.info("Parsing person annotations for split '%s'...", split)
detections_map = {}
with fou.ProgressBar() as pb:
for anno_path in pb(anno_paths):
uuid = os.path.splitext(os.path.basename(anno_path))[0][
: -len("_gtBboxCityPersons")
]
detections_map[uuid] = _parse_bbox_file(anno_path)
return detections_map
def _parse_polygons_file(json_path):
d = etas.load_json(json_path)
width = d["imgWidth"]
height = d["imgHeight"]
polylines = []
for obj in d.get("objects", []):
label = obj["label"]
points = [(x / width, y / height) for x, y in obj["polygon"]]
polyline = fol.Polyline(
label=label, points=[points], closed=True, filled=True
)
polylines.append(polyline)
return fol.Polylines(polylines=polylines)
def _parse_bbox_file(json_path):
d = etas.load_json(json_path)
width = d["imgWidth"]
height = d["imgHeight"]
detections = []
for obj in d.get("objects", []):
label = obj["label"]
x, y, w, h = obj["bbox"]
bounding_box = [x / width, y / height, w / width, h / height]
detection = fol.Detection(label=label, bounding_box=bounding_box)
detections.append(detection)
return fol.Detections(detections=detections)
|
"""
Utilities for working with the
`Cityscapes dataset <https://www.cityscapes-dataset.com>`_.
| Copyright 2017-2020, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
import logging
import os
import eta.core.serial as etas
import eta.core.utils as etau
import fiftyone.core.dataset as fod
import fiftyone.core.fields as fof
import fiftyone.core.labels as fol
import fiftyone.core.media as fom
import fiftyone.core.sample as fos
import fiftyone.core.utils as fou
import fiftyone.utils.data as foud
logger = logging.getLogger(__name__)
_IMAGES_ZIP = "leftImg8bit_trainvaltest.zip"
_FINE_ANNOS_ZIP = "gtFine_trainvaltest.zip"
_COARSE_ANNOS_ZIP = "gtCoarse.zip"
_PERSON_ANNOS_ZIP = "gtBbox_cityPersons_trainval.zip"
def parse_cityscapes_dataset(
source_dir,
dataset_dir,
scratch_dir,
splits,
fine_annos=None,
coarse_annos=None,
person_annos=None,
):
"""Parses the Cityscapes archive(s) in the specified directory and writes
the requested splits in subdirectories of ``dataset_dir`` in
:class:`fiftyone.types.dataset_types.FiftyOneDataset` format.
The archives must have been manually downloaded into the directory before
this method is called.
The ``source_dir`` should contain the following files::
source_dir/
leftImg8bit_trainvaltest.zip
gtFine_trainvaltest.zip # optional
gtCoarse.zip # optional
gtBbox_cityPersons_trainval.zip # optional
Args:
source_dir: the directory continaining the manually downloaded
Cityscapes files
dataset_dir: the directory in which to build the output dataset
scratch_dir: a scratch directory to use for temporary files
splits: a list of splits to parse. Supported values are
``(train, test, validation)``
fine_annos (None): whether to load the fine annotations (True), or not
(False), or only if the ZIP file exists (None)
coarse_annos (None): whether to load the coarse annotations (True), or
not (False), or only if the ZIP file exists (None)
person_annos (None): whether to load the personn detections (True), or
not (False), or only if the ZIP file exists (None)
Raises:
OSError: if any required source files are not present
"""
(
images_zip_path,
fine_annos_zip_path,
coarse_annos_zip_path,
person_annos_zip_path,
) = _parse_source_dir(source_dir, fine_annos, coarse_annos, person_annos)
_splits = [_parse_split(s) for s in splits]
images_dir = _extract_images(images_zip_path, scratch_dir)
if fine_annos_zip_path:
fine_annos_dir = _extract_fine_annos(fine_annos_zip_path, scratch_dir)
else:
fine_annos_dir = None
if coarse_annos_zip_path:
coarse_annos_dir = _extract_coarse_annos(
coarse_annos_zip_path, scratch_dir
)
else:
coarse_annos_dir = None
if person_annos_zip_path:
person_annos_dir = _extract_person_annos(
person_annos_zip_path, scratch_dir
)
else:
person_annos_dir = None
for split, _split in zip(splits, _splits):
split_dir = os.path.join(dataset_dir, split)
_export_split(
_split,
split_dir,
images_dir,
fine_annos_dir,
coarse_annos_dir,
person_annos_dir,
)
def _parse_source_dir(source_dir, fine_annos, coarse_annos, person_annos):
if source_dir is None:
_raise_cityscapes_error(
"You must provide a `source_dir` in order to load the Cityscapes "
"dataset."
)
if not os.path.isdir(source_dir):
_raise_cityscapes_error(
"Source directory '%s' does not exist." % source_dir
)
files = etau.list_files(source_dir)
if _IMAGES_ZIP not in files:
_raise_cityscapes_error(
"Images zip '%s' not found within '%s'."
% (_IMAGES_ZIP, source_dir)
)
images_zip_path = os.path.join(source_dir, _IMAGES_ZIP)
if fine_annos is None:
fine_annos = _FINE_ANNOS_ZIP in files
if fine_annos:
if _FINE_ANNOS_ZIP not in files:
_raise_cityscapes_error(
"Fine annotations zip '%s' not found within '%s'."
% (_FINE_ANNOS_ZIP, source_dir)
)
fine_annos_zip_path = os.path.join(source_dir, _FINE_ANNOS_ZIP)
else:
fine_annos_zip_path = None
if coarse_annos is None:
coarse_annos = _COARSE_ANNOS_ZIP in files
if coarse_annos:
if _COARSE_ANNOS_ZIP not in files:
_raise_cityscapes_error(
"Coarse annotations zip '%s' not found within '%s'."
% (_COARSE_ANNOS_ZIP, source_dir)
)
coarse_annos_zip_path = os.path.join(source_dir, _COARSE_ANNOS_ZIP)
else:
coarse_annos_zip_path = None
if person_annos is None:
person_annos = _PERSON_ANNOS_ZIP in files
if person_annos:
if _PERSON_ANNOS_ZIP not in files:
_raise_cityscapes_error(
"Person annotations zip '%s' not found within '%s'."
% (_PERSON_ANNOS_ZIP, source_dir)
)
person_annos_zip_path = os.path.join(source_dir, _PERSON_ANNOS_ZIP)
else:
person_annos_zip_path = None
return (
images_zip_path,
fine_annos_zip_path,
coarse_annos_zip_path,
person_annos_zip_path,
)
def _raise_cityscapes_error(msg):
raise OSError(
"\n\n"
+ msg
+ "\n\n"
+ "You must download the source files for the Cityscapes dataset "
"manually."
+ "\n\n"
+ "Run `fiftyone zoo info cityscapes` for more information"
)
def _parse_split(split):
if split == "validation":
return "val"
if split not in ("test", "train"):
raise ValueError(
"Invalid split '%s''; supported values are %s"
% (split, ("train", "test", "validation"))
)
return split
def _export_split(
split,
split_dir,
images_dir,
fine_annos_dir,
coarse_annos_dir,
person_annos_dir,
):
images_map = _parse_images(images_dir, split)
if fine_annos_dir:
fine_annos_map = _parse_fine_annos(fine_annos_dir, split)
else:
fine_annos_map = {}
if coarse_annos_dir:
coarse_annos_map = _parse_coarse_annos(coarse_annos_dir, split)
else:
coarse_annos_map = {}
if person_annos_dir:
person_annos_map = _parse_person_annos(person_annos_dir, split)
else:
person_annos_map = {}
dataset = fod.Dataset()
dataset.media_type = fom.IMAGE
has_fine_annos = bool(fine_annos_map)
has_coarse_annos = bool(coarse_annos_map)
has_person_annos = bool(person_annos_map)
if has_fine_annos:
dataset.add_sample_field(
"gt_fine",
fof.EmbeddedDocumentField,
embedded_doc_type=fol.Polylines,
)
if has_coarse_annos:
dataset.add_sample_field(
"gt_coarse",
fof.EmbeddedDocumentField,
embedded_doc_type=fol.Polylines,
)
if has_person_annos:
dataset.add_sample_field(
"gt_person",
fof.EmbeddedDocumentField,
embedded_doc_type=fol.Detections,
)
uuids = sorted(images_map.keys())
logger.info("Finalizing split '%s'...", split)
exporter = foud.FiftyOneDatasetExporter(split_dir, move_media=False)
pb = fou.ProgressBar()
with exporter, pb:
exporter.log_collection(dataset)
for uuid in pb(uuids):
sample = fos.Sample(filepath=images_map[uuid])
if has_fine_annos:
sample["gt_fine"] = fine_annos_map.get(uuid, None)
if has_coarse_annos:
sample["gt_coarse"] = coarse_annos_map.get(uuid, None)
if has_person_annos:
sample["gt_person"] = person_annos_map.get(uuid, None)
exporter.export_sample(sample)
dataset.delete()
def _extract_images(images_zip_path, scratch_dir):
tmp_dir = os.path.join(scratch_dir, "images")
images_dir = os.path.join(tmp_dir, "leftImg8bit")
if not os.path.isdir(images_dir):
logger.info("Extracting images...")
etau.extract_zip(images_zip_path, outdir=tmp_dir, delete_zip=False)
return images_dir
def _extract_fine_annos(fine_annos_zip_path, scratch_dir):
tmp_dir = os.path.join(scratch_dir, "fine-annos")
fine_annos_dir = os.path.join(tmp_dir, "gtFine")
if not os.path.isdir(fine_annos_dir):
logger.info("Extracting fine annotations...")
etau.extract_zip(fine_annos_zip_path, outdir=tmp_dir, delete_zip=False)
return fine_annos_dir
def _extract_coarse_annos(coarse_annos_zip_path, scratch_dir):
tmp_dir = os.path.join(scratch_dir, "coarse-annos")
coarse_annos_dir = os.path.join(tmp_dir, "gtCoarse")
if not os.path.isdir(coarse_annos_dir):
logger.info("Extracting coarse annotations...")
etau.extract_zip(
coarse_annos_zip_path, outdir=tmp_dir, delete_zip=False
)
return coarse_annos_dir
def _extract_person_annos(person_annos_zip_path, scratch_dir):
tmp_dir = os.path.join(scratch_dir, "person-annos")
person_annos_dir = os.path.join(tmp_dir, "gtBboxCityPersons")
if not os.path.isdir(person_annos_dir):
logger.info("Extracting person annotations...")
etau.extract_zip(
person_annos_zip_path, outdir=tmp_dir, delete_zip=False
)
return person_annos_dir
def _parse_images(images_dir, split):
paths_patt = os.path.join(images_dir, split, "*", "*")
images_map = {}
for image_path in etau.get_glob_matches(paths_patt):
uuid = os.path.splitext(os.path.basename(image_path))[0][
: -len("_leftImg8bit")
]
images_map[uuid] = image_path
return images_map
def _parse_fine_annos(fine_annos_dir, split):
glob_patt = os.path.join(fine_annos_dir, split, "*", "*.json")
return _parse_polygon_annos(glob_patt, split, "fine", "_gtFine_polygons")
def _parse_coarse_annos(coarse_annos_dir, split):
glob_patt = os.path.join(coarse_annos_dir, split, "*", "*.json")
return _parse_polygon_annos(
glob_patt, split, "coarse", "_gtCoarse_polygons"
)
def _parse_polygon_annos(glob_patt, split, anno_type, suffix):
anno_paths = etau.get_glob_matches(glob_patt)
if not anno_paths:
return {}
logger.info("Parsing %s annotations for split '%s'...", anno_type, split)
annos_map = {}
with fou.ProgressBar() as pb:
for anno_path in pb(anno_paths):
uuid = os.path.splitext(os.path.basename(anno_path))[0][
: -len(suffix)
]
annos_map[uuid] = _parse_polygons_file(anno_path)
return annos_map
def _parse_person_annos(person_annos_dir, split):
paths_patt = os.path.join(person_annos_dir, split, "*", "*.json")
anno_paths = etau.get_glob_matches(paths_patt)
if not anno_paths:
return {}
logger.info("Parsing person annotations for split '%s'...", split)
detections_map = {}
with fou.ProgressBar() as pb:
for anno_path in pb(anno_paths):
uuid = os.path.splitext(os.path.basename(anno_path))[0][
: -len("_gtBboxCityPersons")
]
detections_map[uuid] = _parse_bbox_file(anno_path)
return detections_map
def _parse_polygons_file(json_path):
d = etas.load_json(json_path)
width = d["imgWidth"]
height = d["imgHeight"]
polylines = []
for obj in d.get("objects", []):
label = obj["label"]
points = [(x / width, y / height) for x, y in obj["polygon"]]
polyline = fol.Polyline(
label=label, points=[points], closed=True, filled=True
)
polylines.append(polyline)
return fol.Polylines(polylines=polylines)
def _parse_bbox_file(json_path):
d = etas.load_json(json_path)
width = d["imgWidth"]
height = d["imgHeight"]
detections = []
for obj in d.get("objects", []):
label = obj["label"]
x, y, w, h = obj["bbox"]
bounding_box = [x / width, y / height, w / width, h / height]
detection = fol.Detection(label=label, bounding_box=bounding_box)
detections.append(detection)
return fol.Detections(detections=detections)
|
en
| 0.658476
|
Utilities for working with the `Cityscapes dataset <https://www.cityscapes-dataset.com>`_. | Copyright 2017-2020, Voxel51, Inc. | `voxel51.com <https://voxel51.com/>`_ | Parses the Cityscapes archive(s) in the specified directory and writes the requested splits in subdirectories of ``dataset_dir`` in :class:`fiftyone.types.dataset_types.FiftyOneDataset` format. The archives must have been manually downloaded into the directory before this method is called. The ``source_dir`` should contain the following files:: source_dir/ leftImg8bit_trainvaltest.zip gtFine_trainvaltest.zip # optional gtCoarse.zip # optional gtBbox_cityPersons_trainval.zip # optional Args: source_dir: the directory continaining the manually downloaded Cityscapes files dataset_dir: the directory in which to build the output dataset scratch_dir: a scratch directory to use for temporary files splits: a list of splits to parse. Supported values are ``(train, test, validation)`` fine_annos (None): whether to load the fine annotations (True), or not (False), or only if the ZIP file exists (None) coarse_annos (None): whether to load the coarse annotations (True), or not (False), or only if the ZIP file exists (None) person_annos (None): whether to load the personn detections (True), or not (False), or only if the ZIP file exists (None) Raises: OSError: if any required source files are not present
| 2.610003
| 3
|
openstack_dashboard/dashboards/admin/backups/views.py
|
stackhpc/horizon
| 930
|
6627793
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.urls import reverse
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tables
from horizon.utils import memoized
from openstack_dashboard import api
from openstack_dashboard.api import cinder
from openstack_dashboard.dashboards.admin.backups \
import forms as admin_forms
from openstack_dashboard.dashboards.admin.backups \
import tables as admin_tables
from openstack_dashboard.dashboards.admin.backups \
import tabs as admin_tabs
from openstack_dashboard.dashboards.project.backups \
import views as project_views
from openstack_dashboard.dashboards.project.volumes \
import views as volumes_views
LOG = logging.getLogger(__name__)
class AdminBackupsView(tables.PagedTableWithPageMenu, tables.DataTableView,
volumes_views.VolumeTableMixIn):
table_class = admin_tables.AdminBackupsTable
page_title = _("Volume Backups")
def allowed(self, request):
return api.cinder.volume_backup_supported(self.request)
def get_data(self):
try:
search_opts = {'all_tenants': 1}
self._current_page = self._get_page_number()
(backups, self._page_size, self._total_of_entries,
self._number_of_pages) = \
api.cinder.volume_backup_list_paged_with_page_menu(
self.request, page_number=self._current_page,
all_tenants=True)
except Exception as e:
LOG.exception(e)
backups = []
exceptions.handle(self.request, _("Unable to retrieve "
"volume backups."))
if not backups:
return backups
volumes = api.cinder.volume_list(self.request, search_opts=search_opts)
volumes = dict((v.id, v) for v in volumes)
snapshots = api.cinder.volume_snapshot_list(self.request,
search_opts=search_opts)
snapshots = dict((s.id, s) for s in snapshots)
# Gather our tenants to correlate against Backup IDs
try:
tenants, has_more = api.keystone.tenant_list(self.request)
except Exception:
tenants = []
msg = _('Unable to retrieve volume backup project information.')
exceptions.handle(self.request, msg)
tenant_dict = dict((t.id, t) for t in tenants)
for backup in backups:
backup.volume = volumes.get(backup.volume_id)
backup.snapshot = snapshots.get(backup.snapshot_id)
tenant_id = getattr(backup, "project_id", None)
tenant = tenant_dict.get(tenant_id)
backup.tenant_name = getattr(tenant, "name", None)
return backups
class UpdateStatusView(forms.ModalFormView):
form_class = admin_forms.UpdateStatus
modal_id = "update_backup_status_modal"
template_name = 'admin/backups/update_status.html'
submit_label = _("Update Status")
submit_url = "horizon:admin:backups:update_status"
success_url = reverse_lazy('horizon:admin:backups:index')
page_title = _("Update Volume backup Status")
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["backup_id"] = self.kwargs['backup_id']
args = (self.kwargs['backup_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def get_data(self):
try:
backup_id = self.kwargs['backup_id']
backup = cinder.volume_backup_get(self.request, backup_id)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve volume backup details.'),
redirect=self.success_url)
return backup
def get_initial(self):
backup = self.get_data()
return {'backup_id': self.kwargs["backup_id"],
'status': backup.status}
class AdminBackupDetailView(project_views.BackupDetailView):
tab_group_class = admin_tabs.AdminBackupDetailTabs
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
table = admin_tables.AdminBackupsTable(self.request)
context["actions"] = table.render_row_actions(context["backup"])
return context
@staticmethod
def get_redirect_url():
return reverse('horizon:admin:backups:index')
class AdminRestoreBackupView(project_views.RestoreBackupView):
form_class = admin_forms.AdminRestoreBackupForm
submit_url = "horizon:admin:backups:restore"
success_url = reverse_lazy('horizon:admin:volumes:index')
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.urls import reverse
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tables
from horizon.utils import memoized
from openstack_dashboard import api
from openstack_dashboard.api import cinder
from openstack_dashboard.dashboards.admin.backups \
import forms as admin_forms
from openstack_dashboard.dashboards.admin.backups \
import tables as admin_tables
from openstack_dashboard.dashboards.admin.backups \
import tabs as admin_tabs
from openstack_dashboard.dashboards.project.backups \
import views as project_views
from openstack_dashboard.dashboards.project.volumes \
import views as volumes_views
LOG = logging.getLogger(__name__)
class AdminBackupsView(tables.PagedTableWithPageMenu, tables.DataTableView,
volumes_views.VolumeTableMixIn):
table_class = admin_tables.AdminBackupsTable
page_title = _("Volume Backups")
def allowed(self, request):
return api.cinder.volume_backup_supported(self.request)
def get_data(self):
try:
search_opts = {'all_tenants': 1}
self._current_page = self._get_page_number()
(backups, self._page_size, self._total_of_entries,
self._number_of_pages) = \
api.cinder.volume_backup_list_paged_with_page_menu(
self.request, page_number=self._current_page,
all_tenants=True)
except Exception as e:
LOG.exception(e)
backups = []
exceptions.handle(self.request, _("Unable to retrieve "
"volume backups."))
if not backups:
return backups
volumes = api.cinder.volume_list(self.request, search_opts=search_opts)
volumes = dict((v.id, v) for v in volumes)
snapshots = api.cinder.volume_snapshot_list(self.request,
search_opts=search_opts)
snapshots = dict((s.id, s) for s in snapshots)
# Gather our tenants to correlate against Backup IDs
try:
tenants, has_more = api.keystone.tenant_list(self.request)
except Exception:
tenants = []
msg = _('Unable to retrieve volume backup project information.')
exceptions.handle(self.request, msg)
tenant_dict = dict((t.id, t) for t in tenants)
for backup in backups:
backup.volume = volumes.get(backup.volume_id)
backup.snapshot = snapshots.get(backup.snapshot_id)
tenant_id = getattr(backup, "project_id", None)
tenant = tenant_dict.get(tenant_id)
backup.tenant_name = getattr(tenant, "name", None)
return backups
class UpdateStatusView(forms.ModalFormView):
form_class = admin_forms.UpdateStatus
modal_id = "update_backup_status_modal"
template_name = 'admin/backups/update_status.html'
submit_label = _("Update Status")
submit_url = "horizon:admin:backups:update_status"
success_url = reverse_lazy('horizon:admin:backups:index')
page_title = _("Update Volume backup Status")
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["backup_id"] = self.kwargs['backup_id']
args = (self.kwargs['backup_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def get_data(self):
try:
backup_id = self.kwargs['backup_id']
backup = cinder.volume_backup_get(self.request, backup_id)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve volume backup details.'),
redirect=self.success_url)
return backup
def get_initial(self):
backup = self.get_data()
return {'backup_id': self.kwargs["backup_id"],
'status': backup.status}
class AdminBackupDetailView(project_views.BackupDetailView):
tab_group_class = admin_tabs.AdminBackupDetailTabs
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
table = admin_tables.AdminBackupsTable(self.request)
context["actions"] = table.render_row_actions(context["backup"])
return context
@staticmethod
def get_redirect_url():
return reverse('horizon:admin:backups:index')
class AdminRestoreBackupView(project_views.RestoreBackupView):
form_class = admin_forms.AdminRestoreBackupForm
submit_url = "horizon:admin:backups:restore"
success_url = reverse_lazy('horizon:admin:volumes:index')
|
en
| 0.86785
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Gather our tenants to correlate against Backup IDs
| 1.652914
| 2
|
psql2mysql/__init__.py
|
matthewoliver/psql2mysql
| 0
|
6627794
|
# (c) Copyright 2018, SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import re
import six
import sys
import warnings
import yaml
from oslo_config import cfg
from oslo_log import log as logging
from prettytable import PrettyTable
from rfc3986 import uri_reference
from sqlalchemy import create_engine, MetaData, or_, text, types
from sqlalchemy import exc as sa_exc
from datetime2decimal import PreciseTimestamp
LOG = logging.getLogger(__name__)
MAX_TEXT_LEN = 65536
regex = re.compile(
six.u(r'[\U00010000-\U0010ffff]')
)
typeConversions = [
{"src": types.DateTime,
"dest": types.DECIMAL,
"decorator": PreciseTimestamp}
]
def lookupTypeDecorator(srcType, destType):
for conv in typeConversions:
if isinstance(srcType, conv["src"]) and isinstance(destType,
conv["dest"]):
return conv["decorator"]
return None
class DbWrapper(object):
def __init__(self, uri=""):
self.uri = uri
def connect(self):
self.engine = create_engine(self.uri)
self.connection = self.engine.connect()
def getSortedTables(self):
metadata = MetaData(bind=self.engine)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=sa_exc.SAWarning)
metadata.reflect()
return metadata.sorted_tables
def getTables(self):
metadata = MetaData(bind=self.engine)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=sa_exc.SAWarning)
metadata.reflect()
return metadata.tables
# adapt given query so it excludes deleted items
def _exclude_deleted(self, table, query):
if not cfg.CONF.exclude_deleted:
return query
if "deleted" not in table.columns:
return query
if isinstance(table.columns["deleted"].type, types.INTEGER):
return query.where(table.c.deleted == 0)
if isinstance(table.columns["deleted"].type, types.VARCHAR):
return query.where(table.c.deleted == 'False')
return query.where(table.c.deleted.is_(False))
def getTextColumns(self, table):
columns = table.columns
return [c.name for c in columns if str(c.type) == 'TEXT']
def _query_long_text_rows(self, table, textColumns):
filters = [
text("length(\"%s\") > %i" % (x, MAX_TEXT_LEN))
for x in textColumns
]
q = table.select().where(or_(f for f in filters))
rows = q.execute()
return rows
def scanTableForLongTexts(self, table):
textColumns = self.getTextColumns(table)
if not textColumns:
return []
LOG.debug("Scanning Table %s (columns: %s) for too long TEXT values ",
table.name, textColumns)
rows = self._query_long_text_rows(table, textColumns)
long_values = []
primary_keys = []
if table.primary_key:
primary_keys = list(table.primary_key)
for row in rows:
for col in textColumns:
if not isinstance(row[col], six.string_types):
continue
if len(row[col]) > MAX_TEXT_LEN:
long_values.append({
"column": col,
"primary": ["%s=%s" % (k.name, row[k.name])
for k in primary_keys]
})
return long_values
def getStringColumns(self, table):
columns = table.columns
textColumns = [
c.name for c in columns if str(c.type).startswith('VARCHAR') or
str(c.type) == 'TEXT'
]
return textColumns
def _query_utf8mb4_rows(self, table, stringColumns):
# Need a raw text filter here as SQLalchemy doesn't seem to provide
# abstractions for regex
filters = [
text("\"%s\" ~ '[\\x10000-\\x10ffff]'" % x) for x in stringColumns
]
q = table.select().where(or_(f for f in filters))
result = self._exclude_deleted(table, q).execute()
return result
def scanTablefor4ByteUtf8Char(self, table):
stringColumns = self.getStringColumns(table)
LOG.debug("Scanning Table %s (columns: %s) for problematic UTF8 "
"characters", table.name, stringColumns)
rows = self._query_utf8mb4_rows(table, stringColumns)
incompatible = []
primary_keys = []
if table.primary_key:
primary_keys = list(table.primary_key)
for row in rows:
for col in stringColumns:
if not isinstance(row[col], six.string_types):
continue
if regex.search(row[col]):
incompatible.append({
"column": col,
"value": row[col],
"primary": ["%s=%s" % (k.name, row[k.name])
for k in primary_keys]
})
return incompatible
def readTableRows(self, table):
return self.engine.execute(self._exclude_deleted(table,
table.select()))
# FIXME move this to a MariaDB specific class?
def disable_constraints(self):
self.connection.execute(
"SET SESSION check_constraint_checks='OFF'"
)
self.connection.execute(
"SET SESSION foreign_key_checks='OFF'"
)
def writeTableRows(self, table, rows):
# FIXME: Allow to process this in batches instead one possibly
# huge transcation?
self.connection.execute(
table.insert(),
rows.fetchall()
)
def clearTable(self, table):
self.connection.execute(table.delete())
class SourceDatabaseEmpty(Exception):
pass
class TargetDatabaseEmpty(Exception):
pass
class DbDataMigrator(object):
def __init__(self, config, source, target):
self.cfg = config
self.src_uri = source if source else cfg.CONF.source
self.target_uri = target if target else cfg.CONF.target
def setup(self):
self.src_db = DbWrapper(self.src_uri)
self.src_db.connect()
self.target_db = DbWrapper(self.target_uri)
self.target_db.connect()
def migrate(self):
source_tables = self.src_db.getSortedTables()
target_tables = self.target_db.getTables()
if not source_tables:
raise SourceDatabaseEmpty()
if not target_tables:
raise TargetDatabaseEmpty()
# disable constraints on the MariaDB side for the duration of
# the migration
LOG.info("Disabling constraints on target DB for the migration")
self.target_db.disable_constraints()
# FIXME: Make this optional
for table in self.target_db.getSortedTables():
if (table.name == "migrate_version" or
table.name.startswith("alembic_")):
continue
self.target_db.clearTable(table)
for table in source_tables:
LOG.info("Migrating table: '%s'" % table.name)
self.setupTypeDecorators(table, target_tables[table.name])
if table.name not in target_tables:
raise Exception(
"Table '%s' does not exist in target database" %
table.name)
# skip the schema migration related tables
# FIXME: Should we put this into a config setting
# (e.g. --skiptables?)
if (table.name == "migrate_version" or
table.name.startswith("alembic_")):
continue
result = self.src_db.readTableRows(table)
if result.returns_rows and result.rowcount > 0:
LOG.info("Rowcount %s" % result.rowcount)
# FIXME: Allow to process this in batches instead one possibly
# huge transcation?
self.target_db.writeTableRows(target_tables[table.name],
result)
else:
LOG.debug("Table '%s' is empty" % table.name)
def setupTypeDecorators(self, srcTable, targetTable):
"""
Compare the types of all columns in srcTable and targetTable. If
they do not match, try to figure out if we have a TypeDecorator
configured that could be used for converting the values. If there
is one, change the type of the targetTable's columns accordingly.
FIXME: Currently we ignore case where on TypeDecorator is found
optimistically assuming that SQLalchemy will do the right thing,
e.g. as for converting from Boolean (PostgreSQL) to TinyInt (MySQL)
ideally we should makes this more explicit by adding a proper precheck
and defining supported conversion and raising Errors/Warnings if a non
supported combination of Types occurs)
"""
srcColumns = srcTable.columns
for col in srcColumns:
targetCol = targetTable.c[col.name]
if not isinstance(targetCol.type, col.type.__class__):
decorator = lookupTypeDecorator(col.type, targetCol.type)
if decorator:
LOG.info("Converting values in column '%s' from type "
"'%s' to type '%s' using TypeDecorator '%s'",
col.name, col.type, targetCol.type,
decorator.__name__)
targetTable.c[targetCol.name].type = decorator
def add_subcommands(subparsers):
parser = subparsers.add_parser('precheck',
help='Run prechecks on the PostgreSQL '
'database')
parser.add_argument("mariadb-utf8",
action='store_true',
default=True,
help='Check all string columns for incompatibilities '
'with mysql\'s utf8 encoding.')
parser.set_defaults(func=do_prechecks)
parser = subparsers.add_parser(
'migrate',
help='Migrate data from PostgreSQL to MariaDB')
parser.set_defaults(func=do_migration)
def do_prechecks(config, source, target):
src_uri = source if source else cfg.CONF.source
db = DbWrapper(src_uri)
db.connect()
tables = db.getSortedTables()
prechecks_ok = True
for table in tables:
incompatibles = db.scanTablefor4ByteUtf8Char(table)
if incompatibles:
print("Table '%s' contains 4 Byte UTF8 characters which are "
"incompatible with the 'utf8' encoding used by MariaDB" %
table.name)
print("The following rows are affected:")
output_table = PrettyTable()
output_table.field_names = [
"Primary Key",
"Affected Column",
"Value"
]
for item in incompatibles:
output_table.add_row([', '.join(item["primary"]),
item['column'],
item['value']])
print(output_table)
print("Error during prechecks. "
"4 Byte UTF8 characters found in the source database.")
prechecks_ok = False
long_values = db.scanTableForLongTexts(table)
if long_values:
print("Table '%s' contains TEXT values that are more than %s "
"characters long. This is incompatible with MariaDB setup.",
table.name, MAX_TEXT_LEN)
print("The following rows are affected:")
output_table = PrettyTable()
output_table.field_names = [
"Primary Key",
"Affected Column"
]
for item in long_values:
output_table.add_row([', '.join(item["primary"]),
item['column']])
print(output_table)
print("Error during prechecks. "
"Too long text values found in the source database.")
prechecks_ok = False
if prechecks_ok:
print("Success. No errors found during prechecks.")
def do_migration(config, source, target):
migrator = DbDataMigrator(config, source, target)
migrator.setup()
try:
migrator.migrate()
except SourceDatabaseEmpty:
print("The source database doesn't contain any Tables. "
"Nothing to migrate.")
except TargetDatabaseEmpty:
print("Error: The target database doesn't contain any Tables. Make "
"sure to create the Schema in the target database before "
"starting the migration.")
sys.exit(1)
# restrict the source database to postgresql for now
def check_source_schema(source):
if uri_reference(source).scheme != "postgresql":
print('Error: Only "postgresql" is supported as the source database '
'currently', file=sys.stderr)
sys.exit(1)
# restrict the target database to mysql+pymsql
def check_target_schema(target):
uri = uri_reference(target)
if uri.scheme != "mysql+pymysql":
print('Error: Only "mysql" with the "pymysql" driver is supported '
'as the target database currently',
file=sys.stderr)
sys.exit(1)
if uri.query is None or "charset=utf8" not in uri.query:
print('Error: The target connection is missing the "charset=utf8" '
'parameter.', file=sys.stderr)
sys.exit(1)
def main():
# FIXME: Split these up into separate components?
# host, port, username, password, database
cli_opts = [
cfg.SubCommandOpt('command',
title="Commands",
help="Available commands",
handler=add_subcommands),
cfg.URIOpt('source',
required=False,
help='connection URL to the src DB server'),
cfg.URIOpt('target',
required=False,
help='connection URL to the target server'),
cfg.StrOpt('batch',
required=False,
help='YAML file containing connection URLs'),
cfg.BoolOpt('exclude-deleted',
default=True,
help='Exclude table rows marked as deleted. '
'True by default.')
]
cfg.CONF.register_cli_opts(cli_opts)
logging.register_options(cfg.CONF)
logging.set_defaults()
# read config and initialize logging
cfg.CONF(project='pg2my')
# cfg.CONF.set_override("use_stderr", True)
logging.setup(cfg.CONF, 'pg2my')
# We expect batch file with this syntax:
#
# keystone:
# source: postgresql://keystone:p@192.168.243.86/keystone
# target: mysql+pymysql://keystone:p@192.168.243.87/keystone?charset=utf8
# cinder:
# source: postgresql://cinder:idRll2gJPodv@192.168.243.86/cinder
# target:
if cfg.CONF.batch:
try:
with open(cfg.CONF.batch, 'r') as f:
for db_name, db in yaml.load(f).iteritems():
print('Processing database "%s"... ' % db_name)
check_source_schema(db['source'])
if db['target']:
check_target_schema(db['target'])
cfg.CONF.command.func(cfg, db['source'], db['target'])
except IOError:
print('Batch file "%s" does not exist or cannot be read'
% cfg.CONF.batch)
sys.exit(2)
print("Batch processing done.")
sys.exit(0)
check_source_schema(cfg.CONF.source)
if cfg.CONF.target:
check_target_schema(cfg.CONF.target)
cfg.CONF.command.func(cfg, cfg.CONF.source, cfg.CONF.target)
|
# (c) Copyright 2018, SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import re
import six
import sys
import warnings
import yaml
from oslo_config import cfg
from oslo_log import log as logging
from prettytable import PrettyTable
from rfc3986 import uri_reference
from sqlalchemy import create_engine, MetaData, or_, text, types
from sqlalchemy import exc as sa_exc
from datetime2decimal import PreciseTimestamp
LOG = logging.getLogger(__name__)
MAX_TEXT_LEN = 65536
regex = re.compile(
six.u(r'[\U00010000-\U0010ffff]')
)
typeConversions = [
{"src": types.DateTime,
"dest": types.DECIMAL,
"decorator": PreciseTimestamp}
]
def lookupTypeDecorator(srcType, destType):
for conv in typeConversions:
if isinstance(srcType, conv["src"]) and isinstance(destType,
conv["dest"]):
return conv["decorator"]
return None
class DbWrapper(object):
def __init__(self, uri=""):
self.uri = uri
def connect(self):
self.engine = create_engine(self.uri)
self.connection = self.engine.connect()
def getSortedTables(self):
metadata = MetaData(bind=self.engine)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=sa_exc.SAWarning)
metadata.reflect()
return metadata.sorted_tables
def getTables(self):
metadata = MetaData(bind=self.engine)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=sa_exc.SAWarning)
metadata.reflect()
return metadata.tables
# adapt given query so it excludes deleted items
def _exclude_deleted(self, table, query):
if not cfg.CONF.exclude_deleted:
return query
if "deleted" not in table.columns:
return query
if isinstance(table.columns["deleted"].type, types.INTEGER):
return query.where(table.c.deleted == 0)
if isinstance(table.columns["deleted"].type, types.VARCHAR):
return query.where(table.c.deleted == 'False')
return query.where(table.c.deleted.is_(False))
def getTextColumns(self, table):
columns = table.columns
return [c.name for c in columns if str(c.type) == 'TEXT']
def _query_long_text_rows(self, table, textColumns):
filters = [
text("length(\"%s\") > %i" % (x, MAX_TEXT_LEN))
for x in textColumns
]
q = table.select().where(or_(f for f in filters))
rows = q.execute()
return rows
def scanTableForLongTexts(self, table):
textColumns = self.getTextColumns(table)
if not textColumns:
return []
LOG.debug("Scanning Table %s (columns: %s) for too long TEXT values ",
table.name, textColumns)
rows = self._query_long_text_rows(table, textColumns)
long_values = []
primary_keys = []
if table.primary_key:
primary_keys = list(table.primary_key)
for row in rows:
for col in textColumns:
if not isinstance(row[col], six.string_types):
continue
if len(row[col]) > MAX_TEXT_LEN:
long_values.append({
"column": col,
"primary": ["%s=%s" % (k.name, row[k.name])
for k in primary_keys]
})
return long_values
def getStringColumns(self, table):
columns = table.columns
textColumns = [
c.name for c in columns if str(c.type).startswith('VARCHAR') or
str(c.type) == 'TEXT'
]
return textColumns
def _query_utf8mb4_rows(self, table, stringColumns):
# Need a raw text filter here as SQLalchemy doesn't seem to provide
# abstractions for regex
filters = [
text("\"%s\" ~ '[\\x10000-\\x10ffff]'" % x) for x in stringColumns
]
q = table.select().where(or_(f for f in filters))
result = self._exclude_deleted(table, q).execute()
return result
def scanTablefor4ByteUtf8Char(self, table):
stringColumns = self.getStringColumns(table)
LOG.debug("Scanning Table %s (columns: %s) for problematic UTF8 "
"characters", table.name, stringColumns)
rows = self._query_utf8mb4_rows(table, stringColumns)
incompatible = []
primary_keys = []
if table.primary_key:
primary_keys = list(table.primary_key)
for row in rows:
for col in stringColumns:
if not isinstance(row[col], six.string_types):
continue
if regex.search(row[col]):
incompatible.append({
"column": col,
"value": row[col],
"primary": ["%s=%s" % (k.name, row[k.name])
for k in primary_keys]
})
return incompatible
def readTableRows(self, table):
return self.engine.execute(self._exclude_deleted(table,
table.select()))
# FIXME move this to a MariaDB specific class?
def disable_constraints(self):
self.connection.execute(
"SET SESSION check_constraint_checks='OFF'"
)
self.connection.execute(
"SET SESSION foreign_key_checks='OFF'"
)
def writeTableRows(self, table, rows):
# FIXME: Allow to process this in batches instead one possibly
# huge transcation?
self.connection.execute(
table.insert(),
rows.fetchall()
)
def clearTable(self, table):
self.connection.execute(table.delete())
class SourceDatabaseEmpty(Exception):
pass
class TargetDatabaseEmpty(Exception):
pass
class DbDataMigrator(object):
def __init__(self, config, source, target):
self.cfg = config
self.src_uri = source if source else cfg.CONF.source
self.target_uri = target if target else cfg.CONF.target
def setup(self):
self.src_db = DbWrapper(self.src_uri)
self.src_db.connect()
self.target_db = DbWrapper(self.target_uri)
self.target_db.connect()
def migrate(self):
source_tables = self.src_db.getSortedTables()
target_tables = self.target_db.getTables()
if not source_tables:
raise SourceDatabaseEmpty()
if not target_tables:
raise TargetDatabaseEmpty()
# disable constraints on the MariaDB side for the duration of
# the migration
LOG.info("Disabling constraints on target DB for the migration")
self.target_db.disable_constraints()
# FIXME: Make this optional
for table in self.target_db.getSortedTables():
if (table.name == "migrate_version" or
table.name.startswith("alembic_")):
continue
self.target_db.clearTable(table)
for table in source_tables:
LOG.info("Migrating table: '%s'" % table.name)
self.setupTypeDecorators(table, target_tables[table.name])
if table.name not in target_tables:
raise Exception(
"Table '%s' does not exist in target database" %
table.name)
# skip the schema migration related tables
# FIXME: Should we put this into a config setting
# (e.g. --skiptables?)
if (table.name == "migrate_version" or
table.name.startswith("alembic_")):
continue
result = self.src_db.readTableRows(table)
if result.returns_rows and result.rowcount > 0:
LOG.info("Rowcount %s" % result.rowcount)
# FIXME: Allow to process this in batches instead one possibly
# huge transcation?
self.target_db.writeTableRows(target_tables[table.name],
result)
else:
LOG.debug("Table '%s' is empty" % table.name)
def setupTypeDecorators(self, srcTable, targetTable):
"""
Compare the types of all columns in srcTable and targetTable. If
they do not match, try to figure out if we have a TypeDecorator
configured that could be used for converting the values. If there
is one, change the type of the targetTable's columns accordingly.
FIXME: Currently we ignore case where on TypeDecorator is found
optimistically assuming that SQLalchemy will do the right thing,
e.g. as for converting from Boolean (PostgreSQL) to TinyInt (MySQL)
ideally we should makes this more explicit by adding a proper precheck
and defining supported conversion and raising Errors/Warnings if a non
supported combination of Types occurs)
"""
srcColumns = srcTable.columns
for col in srcColumns:
targetCol = targetTable.c[col.name]
if not isinstance(targetCol.type, col.type.__class__):
decorator = lookupTypeDecorator(col.type, targetCol.type)
if decorator:
LOG.info("Converting values in column '%s' from type "
"'%s' to type '%s' using TypeDecorator '%s'",
col.name, col.type, targetCol.type,
decorator.__name__)
targetTable.c[targetCol.name].type = decorator
def add_subcommands(subparsers):
parser = subparsers.add_parser('precheck',
help='Run prechecks on the PostgreSQL '
'database')
parser.add_argument("mariadb-utf8",
action='store_true',
default=True,
help='Check all string columns for incompatibilities '
'with mysql\'s utf8 encoding.')
parser.set_defaults(func=do_prechecks)
parser = subparsers.add_parser(
'migrate',
help='Migrate data from PostgreSQL to MariaDB')
parser.set_defaults(func=do_migration)
def do_prechecks(config, source, target):
src_uri = source if source else cfg.CONF.source
db = DbWrapper(src_uri)
db.connect()
tables = db.getSortedTables()
prechecks_ok = True
for table in tables:
incompatibles = db.scanTablefor4ByteUtf8Char(table)
if incompatibles:
print("Table '%s' contains 4 Byte UTF8 characters which are "
"incompatible with the 'utf8' encoding used by MariaDB" %
table.name)
print("The following rows are affected:")
output_table = PrettyTable()
output_table.field_names = [
"Primary Key",
"Affected Column",
"Value"
]
for item in incompatibles:
output_table.add_row([', '.join(item["primary"]),
item['column'],
item['value']])
print(output_table)
print("Error during prechecks. "
"4 Byte UTF8 characters found in the source database.")
prechecks_ok = False
long_values = db.scanTableForLongTexts(table)
if long_values:
print("Table '%s' contains TEXT values that are more than %s "
"characters long. This is incompatible with MariaDB setup.",
table.name, MAX_TEXT_LEN)
print("The following rows are affected:")
output_table = PrettyTable()
output_table.field_names = [
"Primary Key",
"Affected Column"
]
for item in long_values:
output_table.add_row([', '.join(item["primary"]),
item['column']])
print(output_table)
print("Error during prechecks. "
"Too long text values found in the source database.")
prechecks_ok = False
if prechecks_ok:
print("Success. No errors found during prechecks.")
def do_migration(config, source, target):
migrator = DbDataMigrator(config, source, target)
migrator.setup()
try:
migrator.migrate()
except SourceDatabaseEmpty:
print("The source database doesn't contain any Tables. "
"Nothing to migrate.")
except TargetDatabaseEmpty:
print("Error: The target database doesn't contain any Tables. Make "
"sure to create the Schema in the target database before "
"starting the migration.")
sys.exit(1)
# restrict the source database to postgresql for now
def check_source_schema(source):
if uri_reference(source).scheme != "postgresql":
print('Error: Only "postgresql" is supported as the source database '
'currently', file=sys.stderr)
sys.exit(1)
# restrict the target database to mysql+pymsql
def check_target_schema(target):
uri = uri_reference(target)
if uri.scheme != "mysql+pymysql":
print('Error: Only "mysql" with the "pymysql" driver is supported '
'as the target database currently',
file=sys.stderr)
sys.exit(1)
if uri.query is None or "charset=utf8" not in uri.query:
print('Error: The target connection is missing the "charset=utf8" '
'parameter.', file=sys.stderr)
sys.exit(1)
def main():
# FIXME: Split these up into separate components?
# host, port, username, password, database
cli_opts = [
cfg.SubCommandOpt('command',
title="Commands",
help="Available commands",
handler=add_subcommands),
cfg.URIOpt('source',
required=False,
help='connection URL to the src DB server'),
cfg.URIOpt('target',
required=False,
help='connection URL to the target server'),
cfg.StrOpt('batch',
required=False,
help='YAML file containing connection URLs'),
cfg.BoolOpt('exclude-deleted',
default=True,
help='Exclude table rows marked as deleted. '
'True by default.')
]
cfg.CONF.register_cli_opts(cli_opts)
logging.register_options(cfg.CONF)
logging.set_defaults()
# read config and initialize logging
cfg.CONF(project='pg2my')
# cfg.CONF.set_override("use_stderr", True)
logging.setup(cfg.CONF, 'pg2my')
# We expect batch file with this syntax:
#
# keystone:
# source: postgresql://keystone:p@192.168.243.86/keystone
# target: mysql+pymysql://keystone:p@192.168.243.87/keystone?charset=utf8
# cinder:
# source: postgresql://cinder:idRll2gJPodv@192.168.243.86/cinder
# target:
if cfg.CONF.batch:
try:
with open(cfg.CONF.batch, 'r') as f:
for db_name, db in yaml.load(f).iteritems():
print('Processing database "%s"... ' % db_name)
check_source_schema(db['source'])
if db['target']:
check_target_schema(db['target'])
cfg.CONF.command.func(cfg, db['source'], db['target'])
except IOError:
print('Batch file "%s" does not exist or cannot be read'
% cfg.CONF.batch)
sys.exit(2)
print("Batch processing done.")
sys.exit(0)
check_source_schema(cfg.CONF.source)
if cfg.CONF.target:
check_target_schema(cfg.CONF.target)
cfg.CONF.command.func(cfg, cfg.CONF.source, cfg.CONF.target)
|
en
| 0.803199
|
# (c) Copyright 2018, SUSE LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # adapt given query so it excludes deleted items # Need a raw text filter here as SQLalchemy doesn't seem to provide # abstractions for regex # FIXME move this to a MariaDB specific class? # FIXME: Allow to process this in batches instead one possibly # huge transcation? # disable constraints on the MariaDB side for the duration of # the migration # FIXME: Make this optional # skip the schema migration related tables # FIXME: Should we put this into a config setting # (e.g. --skiptables?) # FIXME: Allow to process this in batches instead one possibly # huge transcation? Compare the types of all columns in srcTable and targetTable. If they do not match, try to figure out if we have a TypeDecorator configured that could be used for converting the values. If there is one, change the type of the targetTable's columns accordingly. FIXME: Currently we ignore case where on TypeDecorator is found optimistically assuming that SQLalchemy will do the right thing, e.g. as for converting from Boolean (PostgreSQL) to TinyInt (MySQL) ideally we should makes this more explicit by adding a proper precheck and defining supported conversion and raising Errors/Warnings if a non supported combination of Types occurs) # restrict the source database to postgresql for now # restrict the target database to mysql+pymsql # FIXME: Split these up into separate components? # host, port, username, password, database # read config and initialize logging # cfg.CONF.set_override("use_stderr", True) # We expect batch file with this syntax: # # keystone: # source: postgresql://keystone:p@192.168.243.86/keystone # target: mysql+pymysql://keystone:p@192.168.243.87/keystone?charset=utf8 # cinder: # source: postgresql://cinder:idRll2gJPodv@192.168.243.86/cinder # target:
| 1.894235
| 2
|
examples/nlp/text_extraction_with_bert.py
|
lsgrep/keras-io
| 1
|
6627795
|
<filename>examples/nlp/text_extraction_with_bert.py
"""
Title: BERT (from HuggingFace Transformers) for Text Extraction
Author: [<NAME>](https://twitter.com/NandanApoorv)
Date created: 2020/05/23
Last modified: 2020/05/23
Description: Fine tune pretrained BERT from HuggingFace Transformers on SQuAD.
"""
"""
## Introduction
This demonstration uses SQuAD (Stanford Question-Answering Dataset).
In SQuAD, an input consists of a question, and a paragraph for context.
The goal is to find the span of text in the paragraph that answers the question.
We evaluate our performance on this data with the "Exact Match" metric,
which measures the percentage of predictions that exactly match any one of the
ground-truth answers.
We fine-tune a BERT model to perform this task as follows:
1. Feed the context and the question as inputs to BERT.
2. Take two vectors S and T with dimensions equal to that of
hidden states in BERT.
3. Compute the probability of each token being the start and end of
the answer span. The probability of a token being the start of
the answer is given by a dot product between S and the representation
of the token in the last layer of BERT, followed by a softmax over all tokens.
The probability of a token being the end of the answer is computed
similarly with the vector T.
4. Fine-tune BERT and learn S and T along the way.
**References:**
- [BERT](https://arxiv.org/pdf/1810.04805.pdf)
- [SQuAD](https://arxiv.org/abs/1606.05250)
"""
"""
## Setup
"""
import os
import re
import json
import string
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tokenizers import BertWordPieceTokenizer
from transformers import BertTokenizer, TFBertModel, BertConfig
max_len = 384
configuration = BertConfig() # default parameters and configuration for BERT
"""
## Set-up BERT tokenizer
"""
# Save the slow pretrained tokenizer
slow_tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
save_path = "bert_base_uncased/"
if not os.path.exists(save_path):
os.makedirs(save_path)
slow_tokenizer.save_pretrained(save_path)
# Load the fast tokenizer from saved file
tokenizer = BertWordPieceTokenizer("bert_base_uncased/vocab.txt", lowercase=True)
"""
## Load the data
"""
train_data_url = "https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json"
train_path = keras.utils.get_file("train.json", train_data_url)
eval_data_url = "https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json"
eval_path = keras.utils.get_file("eval.json", eval_data_url)
"""
## Preprocess the data
1. Go through the JSON file and store every record as a `SquadExample` object.
2. Go through each `SquadExample` and create `x_train, y_train, x_eval, y_eval`.
"""
class SquadExample:
def __init__(self, question, context, start_char_idx, answer_text, all_answers):
self.question = question
self.context = context
self.start_char_idx = start_char_idx
self.answer_text = answer_text
self.all_answers = all_answers
self.skip = False
def preprocess(self):
context = self.context
question = self.question
answer_text = self.answer_text
start_char_idx = self.start_char_idx
# Clean context, answer and question
context = " ".join(str(context).split())
question = " ".join(str(question).split())
answer = " ".join(str(answer_text).split())
# Find end character index of answer in context
end_char_idx = start_char_idx + len(answer)
if end_char_idx >= len(context):
self.skip = True
return
# Mark the character indexes in context that are in answer
is_char_in_ans = [0] * len(context)
for idx in range(start_char_idx, end_char_idx):
is_char_in_ans[idx] = 1
# Tokenize context
tokenized_context = tokenizer.encode(context)
# Find tokens that were created from answer characters
ans_token_idx = []
for idx, (start, end) in enumerate(tokenized_context.offsets):
if sum(is_char_in_ans[start:end]) > 0:
ans_token_idx.append(idx)
if len(ans_token_idx) == 0:
self.skip = True
return
# Find start and end token index for tokens from answer
start_token_idx = ans_token_idx[0]
end_token_idx = ans_token_idx[-1]
# Tokenize question
tokenized_question = tokenizer.encode(question)
# Create inputs
input_ids = tokenized_context.ids + tokenized_question.ids[1:]
token_type_ids = [0] * len(tokenized_context.ids) + [1] * len(
tokenized_question.ids[1:]
)
attention_mask = [1] * len(input_ids)
# Pad and create attention masks.
# Skip if truncation is needed
padding_length = max_len - len(input_ids)
if padding_length > 0: # pad
input_ids = input_ids + ([0] * padding_length)
attention_mask = attention_mask + ([0] * padding_length)
token_type_ids = token_type_ids + ([0] * padding_length)
elif padding_length < 0: # skip
self.skip = True
return
self.input_ids = input_ids
self.token_type_ids = token_type_ids
self.attention_mask = attention_mask
self.start_token_idx = start_token_idx
self.end_token_idx = end_token_idx
self.context_token_to_char = tokenized_context.offsets
with open(train_path) as f:
raw_train_data = json.load(f)
with open(eval_path) as f:
raw_eval_data = json.load(f)
def create_squad_examples(raw_data):
squad_examples = []
for item in raw_data["data"]:
for para in item["paragraphs"]:
context = para["context"]
for qa in para["qas"]:
question = qa["question"]
answer_text = qa["answers"][0]["text"]
all_answers = [_["text"] for _ in qa["answers"]]
start_char_idx = qa["answers"][0]["answer_start"]
squad_eg = SquadExample(
question, context, start_char_idx, answer_text, all_answers
)
squad_eg.preprocess()
squad_examples.append(squad_eg)
return squad_examples
def create_inputs_targets(squad_examples):
dataset_dict = {
"input_ids": [],
"token_type_ids": [],
"attention_mask": [],
"start_token_idx": [],
"end_token_idx": [],
}
for item in squad_examples:
if item.skip == False:
for key in dataset_dict:
dataset_dict[key].append(getattr(item, key))
for key in dataset_dict:
dataset_dict[key] = np.array(dataset_dict[key])
x = [
dataset_dict["input_ids"],
dataset_dict["token_type_ids"],
dataset_dict["attention_mask"],
]
y = [dataset_dict["start_token_idx"], dataset_dict["end_token_idx"]]
return x, y
train_squad_examples = create_squad_examples(raw_train_data)
x_train, y_train = create_inputs_targets(train_squad_examples)
print(f"{len(train_squad_examples)} training points created.")
eval_squad_examples = create_squad_examples(raw_eval_data)
x_eval, y_eval = create_inputs_targets(eval_squad_examples)
print(f"{len(eval_squad_examples)} evaluation points created.")
"""
Create the Question-Answering Model using BERT and Functional API
"""
def create_model():
## BERT encoder
encoder = TFBertModel.from_pretrained("bert-base-uncased")
## QA Model
input_ids = layers.Input(shape=(max_len,), dtype=tf.int32)
token_type_ids = layers.Input(shape=(max_len,), dtype=tf.int32)
attention_mask = layers.Input(shape=(max_len,), dtype=tf.int32)
embedding = encoder(
input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask
)[0]
start_logits = layers.Dense(1, name="start_logit", use_bias=False)(embedding)
start_logits = layers.Flatten()(start_logits)
end_logits = layers.Dense(1, name="end_logit", use_bias=False)(embedding)
end_logits = layers.Flatten()(end_logits)
start_probs = layers.Activation(keras.activations.softmax)(start_logits)
end_probs = layers.Activation(keras.activations.softmax)(end_logits)
model = keras.Model(
inputs=[input_ids, token_type_ids, attention_mask],
outputs=[start_probs, end_probs],
)
loss = keras.losses.SparseCategoricalCrossentropy(from_logits=False)
optimizer = keras.optimizers.Adam(lr=5e-5)
model.compile(optimizer=optimizer, loss=[loss, loss])
return model
"""
This code should preferably be run on Google Colab TPU runtime.
With Colab TPUs, each epoch will take 5-6 minutes.
"""
use_tpu = True
if use_tpu:
# Create distribution strategy
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
# Create model
with strategy.scope():
model = create_model()
else:
model = create_model()
model.summary()
"""
## Create evaluation Callback
This callback will compute the exact match score using the validation data
after every epoch.
"""
def normalize_text(text):
text = text.lower()
# Remove punctuations
exclude = set(string.punctuation)
text = "".join(ch for ch in text if ch not in exclude)
# Remove articles
regex = re.compile(r"\b(a|an|the)\b", re.UNICODE)
text = re.sub(regex, " ", text)
# Remove extra white space
text = " ".join(text.split())
return text
class ExactMatch(keras.callbacks.Callback):
"""
Each `SquadExample` object contains the character level offsets for each token
in its input paragraph. We use them to get back the span of text corresponding
to the tokens between our predicted start and end tokens.
All the ground-truth answers are also present in each `SquadExample` object.
We calculate the percentage of data points where the span of text obtained
from model predictions matches one of the ground-truth answers.
"""
def __init__(self, x_eval, y_eval):
self.x_eval = x_eval
self.y_eval = y_eval
def on_epoch_end(self, epoch, logs=None):
pred_start, pred_end = self.model.predict(self.x_eval)
count = 0
eval_examples_no_skip = [_ for _ in eval_squad_examples if _.skip == False]
for idx, (start, end) in enumerate(zip(pred_start, pred_end)):
squad_eg = eval_examples_no_skip[idx]
offsets = squad_eg.context_token_to_char
start = np.argmax(start)
end = np.argmax(end)
if start >= len(offsets):
continue
pred_char_start = offsets[start][0]
if end < len(offsets):
pred_char_end = offsets[end][1]
pred_ans = squad_eg.context[pred_char_start:pred_char_end]
else:
pred_ans = squad_eg.context[pred_char_start:]
normalized_pred_ans = normalize_text(pred_ans)
normalized_true_ans = [normalize_text(_) for _ in squad_eg.all_answers]
if normalized_pred_ans in normalized_true_ans:
count += 1
acc = count / len(self.y_eval[0])
print(f"\nepoch={epoch+1}, exact match score={acc:.2f}")
"""
## Train and Evaluate
"""
exact_match_callback = ExactMatch(x_eval, y_eval)
model.fit(
x_train,
y_train,
epochs=1, # For demonstration, 3 epochs are recommended
verbose=2,
batch_size=64,
callbacks=[exact_match_callback],
)
|
<filename>examples/nlp/text_extraction_with_bert.py
"""
Title: BERT (from HuggingFace Transformers) for Text Extraction
Author: [<NAME>](https://twitter.com/NandanApoorv)
Date created: 2020/05/23
Last modified: 2020/05/23
Description: Fine tune pretrained BERT from HuggingFace Transformers on SQuAD.
"""
"""
## Introduction
This demonstration uses SQuAD (Stanford Question-Answering Dataset).
In SQuAD, an input consists of a question, and a paragraph for context.
The goal is to find the span of text in the paragraph that answers the question.
We evaluate our performance on this data with the "Exact Match" metric,
which measures the percentage of predictions that exactly match any one of the
ground-truth answers.
We fine-tune a BERT model to perform this task as follows:
1. Feed the context and the question as inputs to BERT.
2. Take two vectors S and T with dimensions equal to that of
hidden states in BERT.
3. Compute the probability of each token being the start and end of
the answer span. The probability of a token being the start of
the answer is given by a dot product between S and the representation
of the token in the last layer of BERT, followed by a softmax over all tokens.
The probability of a token being the end of the answer is computed
similarly with the vector T.
4. Fine-tune BERT and learn S and T along the way.
**References:**
- [BERT](https://arxiv.org/pdf/1810.04805.pdf)
- [SQuAD](https://arxiv.org/abs/1606.05250)
"""
"""
## Setup
"""
import os
import re
import json
import string
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tokenizers import BertWordPieceTokenizer
from transformers import BertTokenizer, TFBertModel, BertConfig
max_len = 384
configuration = BertConfig() # default parameters and configuration for BERT
"""
## Set-up BERT tokenizer
"""
# Save the slow pretrained tokenizer
slow_tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
save_path = "bert_base_uncased/"
if not os.path.exists(save_path):
os.makedirs(save_path)
slow_tokenizer.save_pretrained(save_path)
# Load the fast tokenizer from saved file
tokenizer = BertWordPieceTokenizer("bert_base_uncased/vocab.txt", lowercase=True)
"""
## Load the data
"""
train_data_url = "https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json"
train_path = keras.utils.get_file("train.json", train_data_url)
eval_data_url = "https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json"
eval_path = keras.utils.get_file("eval.json", eval_data_url)
"""
## Preprocess the data
1. Go through the JSON file and store every record as a `SquadExample` object.
2. Go through each `SquadExample` and create `x_train, y_train, x_eval, y_eval`.
"""
class SquadExample:
def __init__(self, question, context, start_char_idx, answer_text, all_answers):
self.question = question
self.context = context
self.start_char_idx = start_char_idx
self.answer_text = answer_text
self.all_answers = all_answers
self.skip = False
def preprocess(self):
context = self.context
question = self.question
answer_text = self.answer_text
start_char_idx = self.start_char_idx
# Clean context, answer and question
context = " ".join(str(context).split())
question = " ".join(str(question).split())
answer = " ".join(str(answer_text).split())
# Find end character index of answer in context
end_char_idx = start_char_idx + len(answer)
if end_char_idx >= len(context):
self.skip = True
return
# Mark the character indexes in context that are in answer
is_char_in_ans = [0] * len(context)
for idx in range(start_char_idx, end_char_idx):
is_char_in_ans[idx] = 1
# Tokenize context
tokenized_context = tokenizer.encode(context)
# Find tokens that were created from answer characters
ans_token_idx = []
for idx, (start, end) in enumerate(tokenized_context.offsets):
if sum(is_char_in_ans[start:end]) > 0:
ans_token_idx.append(idx)
if len(ans_token_idx) == 0:
self.skip = True
return
# Find start and end token index for tokens from answer
start_token_idx = ans_token_idx[0]
end_token_idx = ans_token_idx[-1]
# Tokenize question
tokenized_question = tokenizer.encode(question)
# Create inputs
input_ids = tokenized_context.ids + tokenized_question.ids[1:]
token_type_ids = [0] * len(tokenized_context.ids) + [1] * len(
tokenized_question.ids[1:]
)
attention_mask = [1] * len(input_ids)
# Pad and create attention masks.
# Skip if truncation is needed
padding_length = max_len - len(input_ids)
if padding_length > 0: # pad
input_ids = input_ids + ([0] * padding_length)
attention_mask = attention_mask + ([0] * padding_length)
token_type_ids = token_type_ids + ([0] * padding_length)
elif padding_length < 0: # skip
self.skip = True
return
self.input_ids = input_ids
self.token_type_ids = token_type_ids
self.attention_mask = attention_mask
self.start_token_idx = start_token_idx
self.end_token_idx = end_token_idx
self.context_token_to_char = tokenized_context.offsets
with open(train_path) as f:
raw_train_data = json.load(f)
with open(eval_path) as f:
raw_eval_data = json.load(f)
def create_squad_examples(raw_data):
squad_examples = []
for item in raw_data["data"]:
for para in item["paragraphs"]:
context = para["context"]
for qa in para["qas"]:
question = qa["question"]
answer_text = qa["answers"][0]["text"]
all_answers = [_["text"] for _ in qa["answers"]]
start_char_idx = qa["answers"][0]["answer_start"]
squad_eg = SquadExample(
question, context, start_char_idx, answer_text, all_answers
)
squad_eg.preprocess()
squad_examples.append(squad_eg)
return squad_examples
def create_inputs_targets(squad_examples):
dataset_dict = {
"input_ids": [],
"token_type_ids": [],
"attention_mask": [],
"start_token_idx": [],
"end_token_idx": [],
}
for item in squad_examples:
if item.skip == False:
for key in dataset_dict:
dataset_dict[key].append(getattr(item, key))
for key in dataset_dict:
dataset_dict[key] = np.array(dataset_dict[key])
x = [
dataset_dict["input_ids"],
dataset_dict["token_type_ids"],
dataset_dict["attention_mask"],
]
y = [dataset_dict["start_token_idx"], dataset_dict["end_token_idx"]]
return x, y
train_squad_examples = create_squad_examples(raw_train_data)
x_train, y_train = create_inputs_targets(train_squad_examples)
print(f"{len(train_squad_examples)} training points created.")
eval_squad_examples = create_squad_examples(raw_eval_data)
x_eval, y_eval = create_inputs_targets(eval_squad_examples)
print(f"{len(eval_squad_examples)} evaluation points created.")
"""
Create the Question-Answering Model using BERT and Functional API
"""
def create_model():
## BERT encoder
encoder = TFBertModel.from_pretrained("bert-base-uncased")
## QA Model
input_ids = layers.Input(shape=(max_len,), dtype=tf.int32)
token_type_ids = layers.Input(shape=(max_len,), dtype=tf.int32)
attention_mask = layers.Input(shape=(max_len,), dtype=tf.int32)
embedding = encoder(
input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask
)[0]
start_logits = layers.Dense(1, name="start_logit", use_bias=False)(embedding)
start_logits = layers.Flatten()(start_logits)
end_logits = layers.Dense(1, name="end_logit", use_bias=False)(embedding)
end_logits = layers.Flatten()(end_logits)
start_probs = layers.Activation(keras.activations.softmax)(start_logits)
end_probs = layers.Activation(keras.activations.softmax)(end_logits)
model = keras.Model(
inputs=[input_ids, token_type_ids, attention_mask],
outputs=[start_probs, end_probs],
)
loss = keras.losses.SparseCategoricalCrossentropy(from_logits=False)
optimizer = keras.optimizers.Adam(lr=5e-5)
model.compile(optimizer=optimizer, loss=[loss, loss])
return model
"""
This code should preferably be run on Google Colab TPU runtime.
With Colab TPUs, each epoch will take 5-6 minutes.
"""
use_tpu = True
if use_tpu:
# Create distribution strategy
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
# Create model
with strategy.scope():
model = create_model()
else:
model = create_model()
model.summary()
"""
## Create evaluation Callback
This callback will compute the exact match score using the validation data
after every epoch.
"""
def normalize_text(text):
text = text.lower()
# Remove punctuations
exclude = set(string.punctuation)
text = "".join(ch for ch in text if ch not in exclude)
# Remove articles
regex = re.compile(r"\b(a|an|the)\b", re.UNICODE)
text = re.sub(regex, " ", text)
# Remove extra white space
text = " ".join(text.split())
return text
class ExactMatch(keras.callbacks.Callback):
"""
Each `SquadExample` object contains the character level offsets for each token
in its input paragraph. We use them to get back the span of text corresponding
to the tokens between our predicted start and end tokens.
All the ground-truth answers are also present in each `SquadExample` object.
We calculate the percentage of data points where the span of text obtained
from model predictions matches one of the ground-truth answers.
"""
def __init__(self, x_eval, y_eval):
self.x_eval = x_eval
self.y_eval = y_eval
def on_epoch_end(self, epoch, logs=None):
pred_start, pred_end = self.model.predict(self.x_eval)
count = 0
eval_examples_no_skip = [_ for _ in eval_squad_examples if _.skip == False]
for idx, (start, end) in enumerate(zip(pred_start, pred_end)):
squad_eg = eval_examples_no_skip[idx]
offsets = squad_eg.context_token_to_char
start = np.argmax(start)
end = np.argmax(end)
if start >= len(offsets):
continue
pred_char_start = offsets[start][0]
if end < len(offsets):
pred_char_end = offsets[end][1]
pred_ans = squad_eg.context[pred_char_start:pred_char_end]
else:
pred_ans = squad_eg.context[pred_char_start:]
normalized_pred_ans = normalize_text(pred_ans)
normalized_true_ans = [normalize_text(_) for _ in squad_eg.all_answers]
if normalized_pred_ans in normalized_true_ans:
count += 1
acc = count / len(self.y_eval[0])
print(f"\nepoch={epoch+1}, exact match score={acc:.2f}")
"""
## Train and Evaluate
"""
exact_match_callback = ExactMatch(x_eval, y_eval)
model.fit(
x_train,
y_train,
epochs=1, # For demonstration, 3 epochs are recommended
verbose=2,
batch_size=64,
callbacks=[exact_match_callback],
)
|
en
| 0.849307
|
Title: BERT (from HuggingFace Transformers) for Text Extraction Author: [<NAME>](https://twitter.com/NandanApoorv) Date created: 2020/05/23 Last modified: 2020/05/23 Description: Fine tune pretrained BERT from HuggingFace Transformers on SQuAD. ## Introduction This demonstration uses SQuAD (Stanford Question-Answering Dataset). In SQuAD, an input consists of a question, and a paragraph for context. The goal is to find the span of text in the paragraph that answers the question. We evaluate our performance on this data with the "Exact Match" metric, which measures the percentage of predictions that exactly match any one of the ground-truth answers. We fine-tune a BERT model to perform this task as follows: 1. Feed the context and the question as inputs to BERT. 2. Take two vectors S and T with dimensions equal to that of hidden states in BERT. 3. Compute the probability of each token being the start and end of the answer span. The probability of a token being the start of the answer is given by a dot product between S and the representation of the token in the last layer of BERT, followed by a softmax over all tokens. The probability of a token being the end of the answer is computed similarly with the vector T. 4. Fine-tune BERT and learn S and T along the way. **References:** - [BERT](https://arxiv.org/pdf/1810.04805.pdf) - [SQuAD](https://arxiv.org/abs/1606.05250) ## Setup # default parameters and configuration for BERT ## Set-up BERT tokenizer # Save the slow pretrained tokenizer # Load the fast tokenizer from saved file ## Load the data ## Preprocess the data 1. Go through the JSON file and store every record as a `SquadExample` object. 2. Go through each `SquadExample` and create `x_train, y_train, x_eval, y_eval`. # Clean context, answer and question # Find end character index of answer in context # Mark the character indexes in context that are in answer # Tokenize context # Find tokens that were created from answer characters # Find start and end token index for tokens from answer # Tokenize question # Create inputs # Pad and create attention masks. # Skip if truncation is needed # pad # skip Create the Question-Answering Model using BERT and Functional API ## BERT encoder ## QA Model This code should preferably be run on Google Colab TPU runtime. With Colab TPUs, each epoch will take 5-6 minutes. # Create distribution strategy # Create model ## Create evaluation Callback This callback will compute the exact match score using the validation data after every epoch. # Remove punctuations # Remove articles # Remove extra white space Each `SquadExample` object contains the character level offsets for each token in its input paragraph. We use them to get back the span of text corresponding to the tokens between our predicted start and end tokens. All the ground-truth answers are also present in each `SquadExample` object. We calculate the percentage of data points where the span of text obtained from model predictions matches one of the ground-truth answers. ## Train and Evaluate # For demonstration, 3 epochs are recommended
| 2.791757
| 3
|
projects/capstone/open_projects/robot_motion_planning/showmaze.py
|
anandsaha/ml-nanodegree
| 2
|
6627796
|
from maze import Maze
import turtle
import sys
if __name__ == '__main__':
'''
This function uses Python's turtle library to draw a picture of the maze
given as an argument when running the script.
'''
# Create a maze based on input argument on command line.
testmaze = Maze( str(sys.argv[1]) )
# Intialize the window and drawing turtle.
window = turtle.Screen()
wally = turtle.Turtle()
wally.speed(0)
wally.hideturtle()
wally.penup()
# maze centered on (0,0), squares are 20 units in length.
sq_size = 20
origin = testmaze.dim * sq_size / -2
# iterate through squares one by one to decide where to draw walls
for x in range(testmaze.dim):
for y in range(testmaze.dim):
if not testmaze.is_permissible([x,y], 'up'):
wally.goto(origin + sq_size * x, origin + sq_size * (y+1))
wally.setheading(0)
wally.pendown()
wally.forward(sq_size)
wally.penup()
if not testmaze.is_permissible([x,y], 'right'):
wally.goto(origin + sq_size * (x+1), origin + sq_size * y)
wally.setheading(90)
wally.pendown()
wally.forward(sq_size)
wally.penup()
# only check bottom wall if on lowest row
if y == 0 and not testmaze.is_permissible([x,y], 'down'):
wally.goto(origin + sq_size * x, origin)
wally.setheading(0)
wally.pendown()
wally.forward(sq_size)
wally.penup()
# only check left wall if on leftmost column
if x == 0 and not testmaze.is_permissible([x,y], 'left'):
wally.goto(origin, origin + sq_size * y)
wally.setheading(90)
wally.pendown()
wally.forward(sq_size)
wally.penup()
window.exitonclick()
|
from maze import Maze
import turtle
import sys
if __name__ == '__main__':
'''
This function uses Python's turtle library to draw a picture of the maze
given as an argument when running the script.
'''
# Create a maze based on input argument on command line.
testmaze = Maze( str(sys.argv[1]) )
# Intialize the window and drawing turtle.
window = turtle.Screen()
wally = turtle.Turtle()
wally.speed(0)
wally.hideturtle()
wally.penup()
# maze centered on (0,0), squares are 20 units in length.
sq_size = 20
origin = testmaze.dim * sq_size / -2
# iterate through squares one by one to decide where to draw walls
for x in range(testmaze.dim):
for y in range(testmaze.dim):
if not testmaze.is_permissible([x,y], 'up'):
wally.goto(origin + sq_size * x, origin + sq_size * (y+1))
wally.setheading(0)
wally.pendown()
wally.forward(sq_size)
wally.penup()
if not testmaze.is_permissible([x,y], 'right'):
wally.goto(origin + sq_size * (x+1), origin + sq_size * y)
wally.setheading(90)
wally.pendown()
wally.forward(sq_size)
wally.penup()
# only check bottom wall if on lowest row
if y == 0 and not testmaze.is_permissible([x,y], 'down'):
wally.goto(origin + sq_size * x, origin)
wally.setheading(0)
wally.pendown()
wally.forward(sq_size)
wally.penup()
# only check left wall if on leftmost column
if x == 0 and not testmaze.is_permissible([x,y], 'left'):
wally.goto(origin, origin + sq_size * y)
wally.setheading(90)
wally.pendown()
wally.forward(sq_size)
wally.penup()
window.exitonclick()
|
en
| 0.85433
|
This function uses Python's turtle library to draw a picture of the maze given as an argument when running the script. # Create a maze based on input argument on command line. # Intialize the window and drawing turtle. # maze centered on (0,0), squares are 20 units in length. # iterate through squares one by one to decide where to draw walls # only check bottom wall if on lowest row # only check left wall if on leftmost column
| 4.22191
| 4
|
tests/app/plugins/only-endpoint/__init__.py
|
dumpmemory/flask-plugin
| 29
|
6627797
|
<filename>tests/app/plugins/only-endpoint/__init__.py
from src import Plugin
plugin = Plugin()
@plugin.endpoint('index')
def index():
return 'index'
plugin.add_url_rule('/', endpoint='index', methods=['GET'])
|
<filename>tests/app/plugins/only-endpoint/__init__.py
from src import Plugin
plugin = Plugin()
@plugin.endpoint('index')
def index():
return 'index'
plugin.add_url_rule('/', endpoint='index', methods=['GET'])
|
none
| 1
| 1.69382
| 2
|
|
midonet/neutron/tests/unit/neutronclient_ext/test_cli20.py
|
NeCTAR-RC/networking-midonet
| 0
|
6627798
|
<gh_stars>0
# Copyright (C) 2016 <NAME>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutronclient import shell
from neutronclient.tests.unit import test_cli20
TOKEN = test_cli20.TOKEN
class MyResp(test_cli20.MyResp):
pass
class MyApp(test_cli20.MyApp):
pass
class MyComparator(test_cli20.MyComparator):
pass
class CLIExtTestV20Base(test_cli20.CLITestV20Base):
def setUp(self, plurals=None):
super(CLIExtTestV20Base, self).setUp(plurals=plurals)
def _setup_mock_patch(self, name):
patcher = mock.patch(name)
thing = patcher.start()
return thing
def _mock_load_extensions(self, resource):
load_method = ('neutronclient.common.extension.' +
'_discover_via_entry_points')
load_ext_mock = self._setup_mock_patch(load_method)
load_ext_mock.return_value = [resource]
return load_ext_mock
def _test_update_ext_resource(self, resource, cmd, myid, args,
extrafields,
cmd_resource=None, parent_id=None):
if not cmd_resource:
cmd_resource = resource
body = {resource: extrafields}
path = getattr(self.client, cmd_resource + "_path")
if parent_id:
path = path % parent_id
path = path % myid
mock_body = MyComparator(body, self.client)
resp = (MyResp(204), None)
cmd_parser = cmd.get_parser("update_" + cmd_resource)
with mock.patch.object(cmd, 'get_client',
return_value=self.client)as mock_get_client, \
mock.patch.object(self.client.httpclient, 'request',
return_value=resp) as mock_request:
shell.run_command(cmd, cmd_parser, args)
self.assert_mock_multiple_calls_with_same_arguments(
mock_get_client, mock.call(), None)
mock_request.assert_called_once_with(
test_cli20.end_url(path),
'PUT',
body=mock_body,
headers=test_cli20.ContainsKeyValue({'X-Auth-Token': TOKEN}))
_str = self.fake_stdout.make_string()
self.assertIn(myid, _str)
def _test_show_ext_resource(self, resource, cmd, myid, args, fields=(),
cmd_resource=None, parent_id=None):
if not cmd_resource:
cmd_resource = resource
query = "&".join(["fields=%s" % field for field in fields])
expected_res = {resource:
{self.id_field: myid,
'name': 'myname', }, }
resstr = self.client.serialize(expected_res)
path = getattr(self.client, cmd_resource + "_path")
if parent_id:
path = path % parent_id
path = path % myid
cmd_parser = cmd.get_parser("show_" + cmd_resource)
resp = (MyResp(200), resstr)
with mock.patch.object(cmd, 'get_client',
return_value=self.client)as mock_get_client, \
mock.patch.object(self.client.httpclient, 'request',
return_value=resp) as mock_request:
shell.run_command(cmd, cmd_parser, args)
self.assert_mock_multiple_calls_with_same_arguments(
mock_get_client, mock.call(), None)
mock_request.assert_called_once_with(
test_cli20.end_url(path, query),
'GET',
body=None,
headers=test_cli20.ContainsKeyValue({'X-Auth-Token': TOKEN}))
_str = self.fake_stdout.make_string()
self.assertIn(myid, _str)
self.assertIn('myname', _str)
def _test_delete_ext_resource(self, resource, cmd, myid, args,
cmd_resource=None, parent_id=None):
if not cmd_resource:
cmd_resource = resource
path = getattr(self.client, cmd_resource + "_path")
if parent_id:
path = path % parent_id
path = path % myid
cmd_parser = cmd.get_parser("delete_" + cmd_resource)
resp = (MyResp(204), None)
with mock.patch.object(cmd, 'get_client',
return_value=self.client)as mock_get_client, \
mock.patch.object(self.client.httpclient, 'request',
return_value=resp) as mock_request:
shell.run_command(cmd, cmd_parser, args)
self.assert_mock_multiple_calls_with_same_arguments(
mock_get_client, mock.call(), None)
mock_request.assert_called_once_with(
test_cli20.end_url(path),
'DELETE',
body=None,
headers=test_cli20.ContainsKeyValue({'X-Auth-Token': TOKEN}))
_str = self.fake_stdout.make_string()
self.assertIn(myid, _str)
|
# Copyright (C) 2016 <NAME>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutronclient import shell
from neutronclient.tests.unit import test_cli20
TOKEN = test_cli20.TOKEN
class MyResp(test_cli20.MyResp):
pass
class MyApp(test_cli20.MyApp):
pass
class MyComparator(test_cli20.MyComparator):
pass
class CLIExtTestV20Base(test_cli20.CLITestV20Base):
def setUp(self, plurals=None):
super(CLIExtTestV20Base, self).setUp(plurals=plurals)
def _setup_mock_patch(self, name):
patcher = mock.patch(name)
thing = patcher.start()
return thing
def _mock_load_extensions(self, resource):
load_method = ('neutronclient.common.extension.' +
'_discover_via_entry_points')
load_ext_mock = self._setup_mock_patch(load_method)
load_ext_mock.return_value = [resource]
return load_ext_mock
def _test_update_ext_resource(self, resource, cmd, myid, args,
extrafields,
cmd_resource=None, parent_id=None):
if not cmd_resource:
cmd_resource = resource
body = {resource: extrafields}
path = getattr(self.client, cmd_resource + "_path")
if parent_id:
path = path % parent_id
path = path % myid
mock_body = MyComparator(body, self.client)
resp = (MyResp(204), None)
cmd_parser = cmd.get_parser("update_" + cmd_resource)
with mock.patch.object(cmd, 'get_client',
return_value=self.client)as mock_get_client, \
mock.patch.object(self.client.httpclient, 'request',
return_value=resp) as mock_request:
shell.run_command(cmd, cmd_parser, args)
self.assert_mock_multiple_calls_with_same_arguments(
mock_get_client, mock.call(), None)
mock_request.assert_called_once_with(
test_cli20.end_url(path),
'PUT',
body=mock_body,
headers=test_cli20.ContainsKeyValue({'X-Auth-Token': TOKEN}))
_str = self.fake_stdout.make_string()
self.assertIn(myid, _str)
def _test_show_ext_resource(self, resource, cmd, myid, args, fields=(),
cmd_resource=None, parent_id=None):
if not cmd_resource:
cmd_resource = resource
query = "&".join(["fields=%s" % field for field in fields])
expected_res = {resource:
{self.id_field: myid,
'name': 'myname', }, }
resstr = self.client.serialize(expected_res)
path = getattr(self.client, cmd_resource + "_path")
if parent_id:
path = path % parent_id
path = path % myid
cmd_parser = cmd.get_parser("show_" + cmd_resource)
resp = (MyResp(200), resstr)
with mock.patch.object(cmd, 'get_client',
return_value=self.client)as mock_get_client, \
mock.patch.object(self.client.httpclient, 'request',
return_value=resp) as mock_request:
shell.run_command(cmd, cmd_parser, args)
self.assert_mock_multiple_calls_with_same_arguments(
mock_get_client, mock.call(), None)
mock_request.assert_called_once_with(
test_cli20.end_url(path, query),
'GET',
body=None,
headers=test_cli20.ContainsKeyValue({'X-Auth-Token': TOKEN}))
_str = self.fake_stdout.make_string()
self.assertIn(myid, _str)
self.assertIn('myname', _str)
def _test_delete_ext_resource(self, resource, cmd, myid, args,
cmd_resource=None, parent_id=None):
if not cmd_resource:
cmd_resource = resource
path = getattr(self.client, cmd_resource + "_path")
if parent_id:
path = path % parent_id
path = path % myid
cmd_parser = cmd.get_parser("delete_" + cmd_resource)
resp = (MyResp(204), None)
with mock.patch.object(cmd, 'get_client',
return_value=self.client)as mock_get_client, \
mock.patch.object(self.client.httpclient, 'request',
return_value=resp) as mock_request:
shell.run_command(cmd, cmd_parser, args)
self.assert_mock_multiple_calls_with_same_arguments(
mock_get_client, mock.call(), None)
mock_request.assert_called_once_with(
test_cli20.end_url(path),
'DELETE',
body=None,
headers=test_cli20.ContainsKeyValue({'X-Auth-Token': TOKEN}))
_str = self.fake_stdout.make_string()
self.assertIn(myid, _str)
|
en
| 0.851946
|
# Copyright (C) 2016 <NAME> # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License.
| 2.037127
| 2
|
Python3/no86_Partition_List.py
|
mistwave/leetcode
| 0
|
6627799
|
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def partition(self, head, x):
"""
:type head: ListNode
:type x: int
:rtype: ListNode
"""
if head is None or head.next is None:
return head
newhead = ListNode(-1)
newhead.next = head
pre = newhead
precur = newhead
cur = head
while cur is not None:
if cur.val >= x:
cur, precur = cur.next, precur.next
else: # cur.val < x
if pre.next is cur:
pre, cur, precur = pre.next, cur.next, precur.next
else: # pre.next is not cur, need to be replaced
# change the order
precur.next = cur.next
cur.next = pre.next
pre.next = cur
# continue the iteration
pre = pre.next
cur = precur.next
return newhead.next
|
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def partition(self, head, x):
"""
:type head: ListNode
:type x: int
:rtype: ListNode
"""
if head is None or head.next is None:
return head
newhead = ListNode(-1)
newhead.next = head
pre = newhead
precur = newhead
cur = head
while cur is not None:
if cur.val >= x:
cur, precur = cur.next, precur.next
else: # cur.val < x
if pre.next is cur:
pre, cur, precur = pre.next, cur.next, precur.next
else: # pre.next is not cur, need to be replaced
# change the order
precur.next = cur.next
cur.next = pre.next
pre.next = cur
# continue the iteration
pre = pre.next
cur = precur.next
return newhead.next
|
en
| 0.593619
|
# Definition for singly-linked list. # class ListNode(object): # def __init__(self, x): # self.val = x # self.next = None :type head: ListNode :type x: int :rtype: ListNode # cur.val < x # pre.next is not cur, need to be replaced # change the order # continue the iteration
| 3.843296
| 4
|
models/Unet_nested/layers.py
|
emrecanaltinsoy/chromosome-semantic-segmentation
| 2
|
6627800
|
import torch
import torch.nn as nn
from models.Unet_nested.utils import init_weights
class unetConv2(nn.Module):
def __init__(self, in_size, out_size, is_batchnorm, n=2, ks=3, stride=1, padding=1):
super().__init__()
self.n = n
self.ks = ks
self.stride = stride
self.padding = padding
s = stride
p = padding
for i in range(1, n + 1):
if is_batchnorm:
conv = nn.Sequential(
nn.Conv2d(in_size, out_size, ks, s, p),
nn.BatchNorm2d(out_size),
nn.ReLU(inplace=True),
)
else:
conv = nn.Sequential(
nn.Conv2d(in_size, out_size, ks, s, p),
nn.ReLU(inplace=True),
)
setattr(self, "conv%d" % i, conv)
in_size = out_size
# initialise the blocks
for m in self.children():
init_weights(m, init_type="kaiming")
def forward(self, inputs):
x = inputs
for i in range(1, self.n + 1):
conv = getattr(self, "conv%d" % i)
x = conv(x)
return x
class unetUp(nn.Module):
def __init__(self, in_size, out_size, is_deconv, n_concat=2):
super().__init__()
self.conv = unetConv2(in_size + (n_concat - 2) * out_size, out_size, False)
if is_deconv:
self.up = nn.ConvTranspose2d(
in_size, out_size, kernel_size=2, stride=2, padding=0
)
else:
self.up = nn.Sequential(
nn.UpsamplingBilinear2d(scale_factor=2), nn.Conv2d(in_size, out_size, 1)
)
# initialise the blocks
for m in self.children():
if m.__class__.__name__.find("unetConv2") != -1:
continue
init_weights(m, init_type="kaiming")
def forward(self, high_feature, *low_feature):
outputs0 = self.up(high_feature)
for feature in low_feature:
outputs0 = torch.cat([outputs0, feature], 1)
return self.conv(outputs0)
|
import torch
import torch.nn as nn
from models.Unet_nested.utils import init_weights
class unetConv2(nn.Module):
def __init__(self, in_size, out_size, is_batchnorm, n=2, ks=3, stride=1, padding=1):
super().__init__()
self.n = n
self.ks = ks
self.stride = stride
self.padding = padding
s = stride
p = padding
for i in range(1, n + 1):
if is_batchnorm:
conv = nn.Sequential(
nn.Conv2d(in_size, out_size, ks, s, p),
nn.BatchNorm2d(out_size),
nn.ReLU(inplace=True),
)
else:
conv = nn.Sequential(
nn.Conv2d(in_size, out_size, ks, s, p),
nn.ReLU(inplace=True),
)
setattr(self, "conv%d" % i, conv)
in_size = out_size
# initialise the blocks
for m in self.children():
init_weights(m, init_type="kaiming")
def forward(self, inputs):
x = inputs
for i in range(1, self.n + 1):
conv = getattr(self, "conv%d" % i)
x = conv(x)
return x
class unetUp(nn.Module):
def __init__(self, in_size, out_size, is_deconv, n_concat=2):
super().__init__()
self.conv = unetConv2(in_size + (n_concat - 2) * out_size, out_size, False)
if is_deconv:
self.up = nn.ConvTranspose2d(
in_size, out_size, kernel_size=2, stride=2, padding=0
)
else:
self.up = nn.Sequential(
nn.UpsamplingBilinear2d(scale_factor=2), nn.Conv2d(in_size, out_size, 1)
)
# initialise the blocks
for m in self.children():
if m.__class__.__name__.find("unetConv2") != -1:
continue
init_weights(m, init_type="kaiming")
def forward(self, high_feature, *low_feature):
outputs0 = self.up(high_feature)
for feature in low_feature:
outputs0 = torch.cat([outputs0, feature], 1)
return self.conv(outputs0)
|
en
| 0.411353
|
# initialise the blocks # initialise the blocks
| 2.427201
| 2
|
cras/tools/create_volume_curve.py
|
mtk09422/chromiumos-third_party-adhd
| 1
|
6627801
|
#!/usr/bin/python
#
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
def GenerateSimpleStep(name, max_volume, step_size):
print '[%s]' % name
print ' ; Generated by create_volume_curve.py'
print ' ; simple_step curve, max %d, step %d' % (max_volume, step_size)
print ' volume_curve = simple_step'
print ' volume_step = %d' % step_size
print ' max_volume = %d' % max_volume
def WriteExplicitCurveVal(step, value):
print ' db_at_%d = %d' % (step, value)
def GenerateExplicit(name):
print '[%s]' % name
print ' ; Generated by create_volume_curve.py'
print ' ; explicit curve'
print ' volume_curve = explicit'
for i in range(100):
print 'Level at step %d:' % (100 - i)
level = int(raw_input(">"))
WriteExplicitCurveVal(100 - i, level)
print 'Level at step 0:'
level = int(raw_input(">"))
WriteExplicitCurveVal(0, level)
def GenerateTwoSlope(name, max_volume, step_1, step_2, pivot_point):
print '[%s]' % name
print ' ; Generated by create_volume_curve.py'
print (' ; two_slope, max = %d, pivot = %d, steps %d, %d' %
(max_volume, pivot_point, step_1, step_2))
print ' volume_curve = explicit'
for i in range(0, (100 - pivot_point)):
WriteExplicitCurveVal(100 - i, max_volume - step_1 * i)
pivot_dB_val = max_volume - step_1 * (100 - pivot_point)
WriteExplicitCurveVal(pivot_point, max_volume - step_1 * (100 - pivot_point))
for i in range(1, pivot_point):
WriteExplicitCurveVal(pivot_point - i, pivot_dB_val - step_2 * i)
WriteExplicitCurveVal(0, pivot_dB_val - pivot_point * step_2)
def main():
print 'What is the name of the jack or output to generate a curve for?'
jack_name = raw_input(">");
print 'Which type of curve? (simple_step, explicit, two_slope): '
curve_type = raw_input(">");
if curve_type == 'simple_step':
print 'max volume (dBFS * 100):'
max_volume = int(raw_input(">"))
print 'step size (in dBFS * 100)'
step_size = int(raw_input(">"))
GenerateSimpleStep(jack_name, max_volume, step_size)
elif curve_type == 'explicit':
GenerateExplicit(jack_name)
elif curve_type == 'two_slope':
print 'max volume (dBFS * 100):'
max_volume = int(raw_input(">"))
print 'Volume step where slope changes:'
pivot_point = int(raw_input(">"))
print 'step size 100 to %d(in dBFS * 100)' % pivot_point
step_1 = int(raw_input(">"))
print 'step size %d to 0(in dBFS * 100)' % pivot_point
step_2 = int(raw_input(">"))
GenerateTwoSlope(jack_name, max_volume, step_1, step_2, pivot_point)
if __name__ == '__main__':
main()
|
#!/usr/bin/python
#
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
def GenerateSimpleStep(name, max_volume, step_size):
print '[%s]' % name
print ' ; Generated by create_volume_curve.py'
print ' ; simple_step curve, max %d, step %d' % (max_volume, step_size)
print ' volume_curve = simple_step'
print ' volume_step = %d' % step_size
print ' max_volume = %d' % max_volume
def WriteExplicitCurveVal(step, value):
print ' db_at_%d = %d' % (step, value)
def GenerateExplicit(name):
print '[%s]' % name
print ' ; Generated by create_volume_curve.py'
print ' ; explicit curve'
print ' volume_curve = explicit'
for i in range(100):
print 'Level at step %d:' % (100 - i)
level = int(raw_input(">"))
WriteExplicitCurveVal(100 - i, level)
print 'Level at step 0:'
level = int(raw_input(">"))
WriteExplicitCurveVal(0, level)
def GenerateTwoSlope(name, max_volume, step_1, step_2, pivot_point):
print '[%s]' % name
print ' ; Generated by create_volume_curve.py'
print (' ; two_slope, max = %d, pivot = %d, steps %d, %d' %
(max_volume, pivot_point, step_1, step_2))
print ' volume_curve = explicit'
for i in range(0, (100 - pivot_point)):
WriteExplicitCurveVal(100 - i, max_volume - step_1 * i)
pivot_dB_val = max_volume - step_1 * (100 - pivot_point)
WriteExplicitCurveVal(pivot_point, max_volume - step_1 * (100 - pivot_point))
for i in range(1, pivot_point):
WriteExplicitCurveVal(pivot_point - i, pivot_dB_val - step_2 * i)
WriteExplicitCurveVal(0, pivot_dB_val - pivot_point * step_2)
def main():
print 'What is the name of the jack or output to generate a curve for?'
jack_name = raw_input(">");
print 'Which type of curve? (simple_step, explicit, two_slope): '
curve_type = raw_input(">");
if curve_type == 'simple_step':
print 'max volume (dBFS * 100):'
max_volume = int(raw_input(">"))
print 'step size (in dBFS * 100)'
step_size = int(raw_input(">"))
GenerateSimpleStep(jack_name, max_volume, step_size)
elif curve_type == 'explicit':
GenerateExplicit(jack_name)
elif curve_type == 'two_slope':
print 'max volume (dBFS * 100):'
max_volume = int(raw_input(">"))
print 'Volume step where slope changes:'
pivot_point = int(raw_input(">"))
print 'step size 100 to %d(in dBFS * 100)' % pivot_point
step_1 = int(raw_input(">"))
print 'step size %d to 0(in dBFS * 100)' % pivot_point
step_2 = int(raw_input(">"))
GenerateTwoSlope(jack_name, max_volume, step_1, step_2, pivot_point)
if __name__ == '__main__':
main()
|
en
| 0.886373
|
#!/usr/bin/python # # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file.
| 2.733974
| 3
|
saleor/order/migrations/0037_auto_20180228_0450.py
|
dedhio/bellastore
| 3
|
6627802
|
<filename>saleor/order/migrations/0037_auto_20180228_0450.py
# Generated by Django 2.0.2 on 2018-02-28 10:50
from django.conf import settings
from django.db import migrations
import django_prices.models
class Migration(migrations.Migration):
dependencies = [("order", "0036_remove_order_total_tax")]
operations = [
migrations.RenameField(
model_name="order",
old_name="shipping_price",
new_name="shipping_price_gross",
),
migrations.AlterField(
model_name="order",
name="shipping_price_gross",
field=django_prices.models.MoneyField(
currency=settings.DEFAULT_CURRENCY,
decimal_places=2,
default=0,
editable=False,
max_digits=12,
),
),
migrations.AddField(
model_name="order",
name="shipping_price_net",
field=django_prices.models.MoneyField(
currency=settings.DEFAULT_CURRENCY,
decimal_places=2,
default=0,
editable=False,
max_digits=12,
),
),
]
|
<filename>saleor/order/migrations/0037_auto_20180228_0450.py
# Generated by Django 2.0.2 on 2018-02-28 10:50
from django.conf import settings
from django.db import migrations
import django_prices.models
class Migration(migrations.Migration):
dependencies = [("order", "0036_remove_order_total_tax")]
operations = [
migrations.RenameField(
model_name="order",
old_name="shipping_price",
new_name="shipping_price_gross",
),
migrations.AlterField(
model_name="order",
name="shipping_price_gross",
field=django_prices.models.MoneyField(
currency=settings.DEFAULT_CURRENCY,
decimal_places=2,
default=0,
editable=False,
max_digits=12,
),
),
migrations.AddField(
model_name="order",
name="shipping_price_net",
field=django_prices.models.MoneyField(
currency=settings.DEFAULT_CURRENCY,
decimal_places=2,
default=0,
editable=False,
max_digits=12,
),
),
]
|
en
| 0.830012
|
# Generated by Django 2.0.2 on 2018-02-28 10:50
| 1.533122
| 2
|
actors/SampleActor.py
|
RobbieMcKinstry/simulation-skeleton
| 0
|
6627803
|
class Actor:
def say_hello(self):
print('Hello')
|
class Actor:
def say_hello(self):
print('Hello')
|
none
| 1
| 2.042818
| 2
|
|
traffic_sign/syndata-generation/pb.py
|
chrmertz/synth_train_data
| 24
|
6627804
|
<gh_stars>10-100
"""
pb: Poisson Image Blending implemented by Python
"""
import numpy as np
from skimage import data, io
import scipy.sparse
from scipy.sparse import coo_matrix
import pyamg
import matplotlib.pyplot as plt
import pdb
def create_mask(img_mask, img_target, img_src, offset=(0, 0)):
'''
Takes the np.array from the grayscale image
'''
# crop img_mask and img_src to fit to the img_target
hm, wm = img_mask.shape
ht, wt, nl = img_target.shape
hd0 = max(0, -offset[0])
wd0 = max(0, -offset[1])
hd1 = hm - max(hm + offset[0] - ht, 0)
wd1 = wm - max(wm + offset[1] - wt, 0)
mask = np.zeros((hm, wm))
mask[img_mask > 0] = 1
mask[img_mask == 0] = 0
mask = mask[hd0:hd1, wd0:wd1]
src = img_src[hd0:hd1, wd0:wd1]
# fix offset
offset_adj = (max(offset[0], 0), max(offset[1], 0))
# remove edge from the mask so that we don't have to check the
# edge condition
mask[:, -1] = 0
mask[:, 0] = 0
mask[-1, :] = 0
mask[0, :] = 0
return mask, src, offset_adj
def get_gradient_sum(img, i, j, h, w):
"""
Return the sum of the gradient of the source imgae.
* 3D array for RGB
"""
v_sum = np.array([0.0, 0.0, 0.0])
v_sum = img[i, j] * 4 \
- img[i + 1, j] - img[i - 1, j] - img[i, j + 1] - img[i, j - 1]
return v_sum
def get_mixed_gradient_sum(img_src, img_target, i, j, h, w, ofs,
c=1.0):
"""
Return the sum of the gradient of the source imgae.
* 3D array for RGB
c(>=0): larger, the more important the target image gradient is
"""
v_sum = np.array([0.0, 0.0, 0.0])
nb = np.array([[1, 0], [-1, 0], [0, 1], [0, -1]])
for kk in range(4):
fp = img_src[i, j] - img_src[i + nb[kk, 0], j + nb[kk, 1]]
gp = img_target[i + ofs[0], j + ofs[1]] \
- img_target[i + nb[kk, 0] + ofs[0], j + nb[kk, 1] + ofs[1]]
# if np.linalg.norm(fp) > np.linalg.norm(gp):
# v_sum += fp
# else:
# v_sum += gp
v_sum += np.array([fp[0] if abs(fp[0] * c) > abs(gp[0]) else gp[0],
fp[1] if abs(fp[1] * c) > abs(gp[1]) else gp[1],
fp[2] if abs(fp[2] * c) > abs(gp[2]) else gp[2]])
return v_sum
def poisson_blend(img_mask, img_src, img_target, method='mix', c=1.0,
offset_adj=(0,0)):
hm, wm = img_mask.shape
region_size = hm * wm
F = np.zeros((region_size, 3))
A = scipy.sparse.identity(region_size, format='lil')
get_k = lambda i, j: i + j * hm
# plane insertion
if method in ['target', 'src']:
for i in range(hm):
for j in range(wm):
k = get_k(i, j)
# ignore the edge case (# of neighboor is always 4)
if img_mask[i, j] == 1:
if method == 'target':
F[k] = img_target[i + offset_adj[0], j + offset_adj[1]]
elif method == 'src':
F[k] = img_src[i, j]
else:
F[k] = img_target[i + offset_adj[0], j + offset_adj[1]]
# poisson blending
else:
if method == 'mix':
grad_func = lambda ii, jj: get_mixed_gradient_sum(
img_src, img_target, ii, jj, hm, wm, offset_adj, c=c)
else:
grad_func = lambda ii, jj: get_gradient_sum(
img_src, ii, jj, hm, wm)
for i in range(hm):
for j in range(wm):
k = get_k(i, j)
# ignore the edge case (# of neighboor is always 4)
if img_mask[i, j] == 1:
f_star = np.array([0.0, 0.0, 0.0])
if img_mask[i - 1, j] == 1:
A[k, k - 1] = -1
else:
f_star += img_target[i - 1 +
offset_adj[0], j + offset_adj[1]]
if img_mask[i + 1, j] == 1:
A[k, k + 1] = -1
else:
f_star += img_target[i + 1 +
offset_adj[0], j + offset_adj[1]]
if img_mask[i, j - 1] == 1:
A[k, k - hm] = -1
else:
f_star += img_target[i +
offset_adj[0], j - 1 + offset_adj[1]]
if img_mask[i, j + 1] == 1:
A[k, k + hm] = -1
else:
f_star += img_target[i +
offset_adj[0], j + 1 + offset_adj[1]]
A[k, k] = 4
F[k] = grad_func(i, j) + f_star
else:
F[k] = img_target[i + offset_adj[0], j + offset_adj[1]]
A = A.tocsr()
img_pro = np.empty_like(img_target.astype(np.uint8))
img_pro[:] = img_target.astype(np.uint8)
for l in range(3):
# x = pyamg.solve(A, F[:, l], verb=True, tol=1e-15, maxiter=100)
x = scipy.sparse.linalg.spsolve(A, F[:, l])
x[x > 255] = 255
x[x < 0] = 0
x = np.array(x, img_pro.dtype)
img_pro[offset_adj[0]:offset_adj[0] + hm,
offset_adj[1]:offset_adj[1] + wm, l]\
= x.reshape(hm, wm, order='F')
return img_pro
if __name__ == "__main__":
offset = (40, -30)
img_mask = io.imread('/Users/ysakamoto/Projects/sccomp/mask.png', as_grey=True)
img_src = io.imread('./testimages/0.png').astype(np.float64)
img_target = io.imread('./testimages/0.png')
# img_src = io.imread('./testimages/test1_src.png').astype(np.float64)
# img_target = io.imread('./testimages/test1_target.png')
# img_mask = io.imread('./testimages/test1_mask.png', as_grey=True)
# resize src and mask images
# import skimage.transform
# from skimage import color
# fac = 3
# img_src = skimage.transform.resize(img_src, (np.array(img_src.shape)//fac)[:2])
# img_mask = io.imread('/Users/ysakamoto/Desktop/mask.png', as_grey=True)
# img_mask = skimage.transform.resize(img_mask, (np.array(img_mask.shape)//fac)[:2])
# img_mask = color.rgb2grey(img_mask)
img_mask, img_src, offset_adj \
= create_mask(img_mask.astype(np.float64),
img_target, img_src, offset=offset)
img_pro = poisson_blend(img_mask, img_src, img_target,
method='normal', offset_adj=offset_adj)
plt.imshow(img_pro)
plt.show()
io.imsave('./testimages/poisson_normal.png', img_pro)
import pdb
# pdb.set_trace()
# i=14
# for c in np.linspace(10.0, 50.0, 5):
# i+=1
# img_pro = poisson_blend(img_mask, img_src, img_target, method='mix', c=c)
# plt.imshow(img_pro)
# plt.show()
# io.imsave('./testimages/poisson_mix_%d.png' %i, img_pro)
# img_pro = poisson_blend(img_mask, img_src, img_target, method='src')
# io.imsave('./testimages/poisson_src.png', img_pro)
# img_pro = poisson_blend(img_mask, img_src, img_target, method='target')
# io.imsave('./testimages/poisson_target.png', img_pro)
# def plot_coo_matrix(m):
# if not isinstance(m, coo_matrix):
# m = coo_matrix(m)
# fig = plt.figure()
# ax = fig.add_subplot(111, axisbg='white')
# ax.plot(m.col, m.row, 's', color='black', ms=1)
# ax.set_xlim(0, m.shape[1])
# ax.set_ylim(0, m.shape[0])
# ax.set_aspect('equal')
# for spine in ax.spines.values():
# spine.set_visible(False)
# ax.invert_yaxis()
# ax.set_aspect('equal')
# ax.set_xticks([])
# ax.set_yticks([])
# return ax
# B = A.tocoo()
# plot_coo_matrix(B)
# plt.show()
|
"""
pb: Poisson Image Blending implemented by Python
"""
import numpy as np
from skimage import data, io
import scipy.sparse
from scipy.sparse import coo_matrix
import pyamg
import matplotlib.pyplot as plt
import pdb
def create_mask(img_mask, img_target, img_src, offset=(0, 0)):
'''
Takes the np.array from the grayscale image
'''
# crop img_mask and img_src to fit to the img_target
hm, wm = img_mask.shape
ht, wt, nl = img_target.shape
hd0 = max(0, -offset[0])
wd0 = max(0, -offset[1])
hd1 = hm - max(hm + offset[0] - ht, 0)
wd1 = wm - max(wm + offset[1] - wt, 0)
mask = np.zeros((hm, wm))
mask[img_mask > 0] = 1
mask[img_mask == 0] = 0
mask = mask[hd0:hd1, wd0:wd1]
src = img_src[hd0:hd1, wd0:wd1]
# fix offset
offset_adj = (max(offset[0], 0), max(offset[1], 0))
# remove edge from the mask so that we don't have to check the
# edge condition
mask[:, -1] = 0
mask[:, 0] = 0
mask[-1, :] = 0
mask[0, :] = 0
return mask, src, offset_adj
def get_gradient_sum(img, i, j, h, w):
"""
Return the sum of the gradient of the source imgae.
* 3D array for RGB
"""
v_sum = np.array([0.0, 0.0, 0.0])
v_sum = img[i, j] * 4 \
- img[i + 1, j] - img[i - 1, j] - img[i, j + 1] - img[i, j - 1]
return v_sum
def get_mixed_gradient_sum(img_src, img_target, i, j, h, w, ofs,
c=1.0):
"""
Return the sum of the gradient of the source imgae.
* 3D array for RGB
c(>=0): larger, the more important the target image gradient is
"""
v_sum = np.array([0.0, 0.0, 0.0])
nb = np.array([[1, 0], [-1, 0], [0, 1], [0, -1]])
for kk in range(4):
fp = img_src[i, j] - img_src[i + nb[kk, 0], j + nb[kk, 1]]
gp = img_target[i + ofs[0], j + ofs[1]] \
- img_target[i + nb[kk, 0] + ofs[0], j + nb[kk, 1] + ofs[1]]
# if np.linalg.norm(fp) > np.linalg.norm(gp):
# v_sum += fp
# else:
# v_sum += gp
v_sum += np.array([fp[0] if abs(fp[0] * c) > abs(gp[0]) else gp[0],
fp[1] if abs(fp[1] * c) > abs(gp[1]) else gp[1],
fp[2] if abs(fp[2] * c) > abs(gp[2]) else gp[2]])
return v_sum
def poisson_blend(img_mask, img_src, img_target, method='mix', c=1.0,
offset_adj=(0,0)):
hm, wm = img_mask.shape
region_size = hm * wm
F = np.zeros((region_size, 3))
A = scipy.sparse.identity(region_size, format='lil')
get_k = lambda i, j: i + j * hm
# plane insertion
if method in ['target', 'src']:
for i in range(hm):
for j in range(wm):
k = get_k(i, j)
# ignore the edge case (# of neighboor is always 4)
if img_mask[i, j] == 1:
if method == 'target':
F[k] = img_target[i + offset_adj[0], j + offset_adj[1]]
elif method == 'src':
F[k] = img_src[i, j]
else:
F[k] = img_target[i + offset_adj[0], j + offset_adj[1]]
# poisson blending
else:
if method == 'mix':
grad_func = lambda ii, jj: get_mixed_gradient_sum(
img_src, img_target, ii, jj, hm, wm, offset_adj, c=c)
else:
grad_func = lambda ii, jj: get_gradient_sum(
img_src, ii, jj, hm, wm)
for i in range(hm):
for j in range(wm):
k = get_k(i, j)
# ignore the edge case (# of neighboor is always 4)
if img_mask[i, j] == 1:
f_star = np.array([0.0, 0.0, 0.0])
if img_mask[i - 1, j] == 1:
A[k, k - 1] = -1
else:
f_star += img_target[i - 1 +
offset_adj[0], j + offset_adj[1]]
if img_mask[i + 1, j] == 1:
A[k, k + 1] = -1
else:
f_star += img_target[i + 1 +
offset_adj[0], j + offset_adj[1]]
if img_mask[i, j - 1] == 1:
A[k, k - hm] = -1
else:
f_star += img_target[i +
offset_adj[0], j - 1 + offset_adj[1]]
if img_mask[i, j + 1] == 1:
A[k, k + hm] = -1
else:
f_star += img_target[i +
offset_adj[0], j + 1 + offset_adj[1]]
A[k, k] = 4
F[k] = grad_func(i, j) + f_star
else:
F[k] = img_target[i + offset_adj[0], j + offset_adj[1]]
A = A.tocsr()
img_pro = np.empty_like(img_target.astype(np.uint8))
img_pro[:] = img_target.astype(np.uint8)
for l in range(3):
# x = pyamg.solve(A, F[:, l], verb=True, tol=1e-15, maxiter=100)
x = scipy.sparse.linalg.spsolve(A, F[:, l])
x[x > 255] = 255
x[x < 0] = 0
x = np.array(x, img_pro.dtype)
img_pro[offset_adj[0]:offset_adj[0] + hm,
offset_adj[1]:offset_adj[1] + wm, l]\
= x.reshape(hm, wm, order='F')
return img_pro
if __name__ == "__main__":
offset = (40, -30)
img_mask = io.imread('/Users/ysakamoto/Projects/sccomp/mask.png', as_grey=True)
img_src = io.imread('./testimages/0.png').astype(np.float64)
img_target = io.imread('./testimages/0.png')
# img_src = io.imread('./testimages/test1_src.png').astype(np.float64)
# img_target = io.imread('./testimages/test1_target.png')
# img_mask = io.imread('./testimages/test1_mask.png', as_grey=True)
# resize src and mask images
# import skimage.transform
# from skimage import color
# fac = 3
# img_src = skimage.transform.resize(img_src, (np.array(img_src.shape)//fac)[:2])
# img_mask = io.imread('/Users/ysakamoto/Desktop/mask.png', as_grey=True)
# img_mask = skimage.transform.resize(img_mask, (np.array(img_mask.shape)//fac)[:2])
# img_mask = color.rgb2grey(img_mask)
img_mask, img_src, offset_adj \
= create_mask(img_mask.astype(np.float64),
img_target, img_src, offset=offset)
img_pro = poisson_blend(img_mask, img_src, img_target,
method='normal', offset_adj=offset_adj)
plt.imshow(img_pro)
plt.show()
io.imsave('./testimages/poisson_normal.png', img_pro)
import pdb
# pdb.set_trace()
# i=14
# for c in np.linspace(10.0, 50.0, 5):
# i+=1
# img_pro = poisson_blend(img_mask, img_src, img_target, method='mix', c=c)
# plt.imshow(img_pro)
# plt.show()
# io.imsave('./testimages/poisson_mix_%d.png' %i, img_pro)
# img_pro = poisson_blend(img_mask, img_src, img_target, method='src')
# io.imsave('./testimages/poisson_src.png', img_pro)
# img_pro = poisson_blend(img_mask, img_src, img_target, method='target')
# io.imsave('./testimages/poisson_target.png', img_pro)
# def plot_coo_matrix(m):
# if not isinstance(m, coo_matrix):
# m = coo_matrix(m)
# fig = plt.figure()
# ax = fig.add_subplot(111, axisbg='white')
# ax.plot(m.col, m.row, 's', color='black', ms=1)
# ax.set_xlim(0, m.shape[1])
# ax.set_ylim(0, m.shape[0])
# ax.set_aspect('equal')
# for spine in ax.spines.values():
# spine.set_visible(False)
# ax.invert_yaxis()
# ax.set_aspect('equal')
# ax.set_xticks([])
# ax.set_yticks([])
# return ax
# B = A.tocoo()
# plot_coo_matrix(B)
# plt.show()
|
en
| 0.294352
|
pb: Poisson Image Blending implemented by Python Takes the np.array from the grayscale image # crop img_mask and img_src to fit to the img_target # fix offset # remove edge from the mask so that we don't have to check the # edge condition Return the sum of the gradient of the source imgae. * 3D array for RGB Return the sum of the gradient of the source imgae. * 3D array for RGB c(>=0): larger, the more important the target image gradient is # if np.linalg.norm(fp) > np.linalg.norm(gp): # v_sum += fp # else: # v_sum += gp # plane insertion # ignore the edge case (# of neighboor is always 4) # poisson blending # ignore the edge case (# of neighboor is always 4) # x = pyamg.solve(A, F[:, l], verb=True, tol=1e-15, maxiter=100) # img_src = io.imread('./testimages/test1_src.png').astype(np.float64) # img_target = io.imread('./testimages/test1_target.png') # img_mask = io.imread('./testimages/test1_mask.png', as_grey=True) # resize src and mask images # import skimage.transform # from skimage import color # fac = 3 # img_src = skimage.transform.resize(img_src, (np.array(img_src.shape)//fac)[:2]) # img_mask = io.imread('/Users/ysakamoto/Desktop/mask.png', as_grey=True) # img_mask = skimage.transform.resize(img_mask, (np.array(img_mask.shape)//fac)[:2]) # img_mask = color.rgb2grey(img_mask) # pdb.set_trace() # i=14 # for c in np.linspace(10.0, 50.0, 5): # i+=1 # img_pro = poisson_blend(img_mask, img_src, img_target, method='mix', c=c) # plt.imshow(img_pro) # plt.show() # io.imsave('./testimages/poisson_mix_%d.png' %i, img_pro) # img_pro = poisson_blend(img_mask, img_src, img_target, method='src') # io.imsave('./testimages/poisson_src.png', img_pro) # img_pro = poisson_blend(img_mask, img_src, img_target, method='target') # io.imsave('./testimages/poisson_target.png', img_pro) # def plot_coo_matrix(m): # if not isinstance(m, coo_matrix): # m = coo_matrix(m) # fig = plt.figure() # ax = fig.add_subplot(111, axisbg='white') # ax.plot(m.col, m.row, 's', color='black', ms=1) # ax.set_xlim(0, m.shape[1]) # ax.set_ylim(0, m.shape[0]) # ax.set_aspect('equal') # for spine in ax.spines.values(): # spine.set_visible(False) # ax.invert_yaxis() # ax.set_aspect('equal') # ax.set_xticks([]) # ax.set_yticks([]) # return ax # B = A.tocoo() # plot_coo_matrix(B) # plt.show()
| 2.715762
| 3
|
figSIevol/figure-SIevol.py
|
andim/evolimmune
| 7
|
6627805
|
<reponame>andim/evolimmune
# coding: utf-8
# # Figure S3: Finite population size simulations
# Prerequisites: opt.npz from Figure SIopt, and finite population size simulations results generated with:
#
# make run
# make agg
# Import packages.
# In[1]:
import sys
sys.path.append('../lib')
from cycler import cycler
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import palettable
import plotting
import analysis
from evolimmune import varname_to_tex, derived_quantities
plt.style.use(['paper'])
# Import results for population of infinite size.
# In[2]:
dfinf = analysis.loadnpz('../figSIopt/data/opt.npz')
derived_quantities(dfinf)
analysis.printunique(dfinf)
# Import results from finite population size simulations.
# In[3]:
df = analysis.loadnpz('data/scan.npz')
derived_quantities(df)
analysis.printunique(df)
# In[4]:
# number of runs per parameter
df.groupby(by=['nind', 'tauenv', 'ngen', 'pienv']).count().max().max()
# Putting things together to produce final plot
# In[5]:
median = True
plt.rc('axes', prop_cycle=cycler('color', palettable.colorbrewer.qualitative.Dark2_6.mpl_colors))
black = matplotlib.rcParams['text.color']
linewidth = matplotlib.rcParams['lines.linewidth']
columns = sorted(df.pienv.unique())
variables = ['cconstitutive', 'q', 'p', 'pup']
ymargin = 0.05
xmargin = 0.02
plotkwargs = dict()
lims = dict(pup=(0, 0.2), q=(0, 0.2))
fig, axes = plt.subplots(ncols=len(columns), nrows=len(variables), figsize=(7.0, 1.0+3.5*len(variables)/len(columns)))
for i, val in enumerate(columns):
for j, var in enumerate(variables):
ax = axes[j, i]
lim = lims[var] if var in lims else (0, 1)
dlim = lim[1]-lim[0]
closestval = dfinf.ix[(dfinf.pienv-val).abs().argmin()]['pienv']
dfsub = dfinf[np.abs(dfinf.pienv-closestval)<1e-3]
dfsub.sort_values(by='tauenv', inplace=True)
x, y = dfsub.tauenv, dfsub[var]
ax.plot(x, y, '-', label=r'$\infty$', c=black, lw=linewidth*2, **plotkwargs)
for nind, dfg in sorted(df.groupby(by='nind')):
dfgg = dfg[df.pienv==val].groupby(by='tauenv', as_index=False)
dfgg_tauenv = dfgg[['tauenv']].mean()['tauenv']
if median:
dfggm = dfgg[[var]].median()[var]
else:
dfggm = dfgg[[var]].mean()[var]
x, y = dfgg_tauenv, dfggm
line, = ax.plot(x, y, label='%i'%nind, **plotkwargs)
if median:
dfggu = dfgg[[var]].quantile(0.75)[var]
dfggl = dfgg[[var]].quantile(0.25)[var]
else:
dfggs = dfgg[[var]].std(ddof=1)[var]
dfggu = dfggm + dfggs
dfggl = dfggm - dfggs
ax.fill_between(dfgg_tauenv, dfggl, dfggu,
facecolor=line.get_color(), edgecolor='none', alpha=0.5)
ax.set_ylim(lim[0]-ymargin*dlim, lim[1]+ymargin*dlim)
ax.set_xlim(0.09, 11.0)
ax.set_xscale('log')
ax.margins(x=xmargin, y=ymargin*dlim)
plotting.despine(ax, spines='all')
ax.grid()
ax.locator_params(axis='y', nbins=5)
ax.legend(loc='upper center', title='population size', bbox_to_anchor=(0.54, 1),
bbox_transform=plt.gcf().transFigure, ncol=4)
for ax in analysis.flatten(axes[:-1, :]):
plt.setp(ax.get_xticklabels(), visible=False)
for ax in analysis.flatten(axes[:, 1:]):
plt.setp(ax.get_yticklabels(), visible=False)
for ax in axes[-1, :]:
ax.set_xlabel(varname_to_tex['tauenv'])
for j, var in enumerate(variables):
axes[j, 0].set_ylabel(varname_to_tex[var])
plotting.label_axes(axes[0, :], labels=[(varname_to_tex['pienv'][1:-1] + r'\, = \, %s' % val) for val in columns],
labelstyle='$%s$',
xy=(.5, 0.9), xycoords=('axes fraction', 'figure fraction'), fontweight = 'bold', fontsize='medium',
verticalalignment='top', horizontalalignment='center')
fig.tight_layout(h_pad=1.5, w_pad=1.0, rect=(0.0, 0.0, 1.0, 0.87), pad=0.25)
fig.savefig('SIevol.pdf')
fig.savefig('SIevol.svg')
# **Influence of finite population size on optimal immune strategies from an agent-based simulation with evolving strategy parameters (switching rates and degree of adaptability) as described in the text.**
# For the infinite population, $p$ is only shown for $q > 0$, because for $q = 0$ the value of $p$ is not constrained other than being positive.
# Subplots show the median (solid line) and interquartile range (shaded area) of the strategy parameters at the end of a simulation of $100000$ generations length.
# Both are calculated from 500 independent simulations.
# In each simulation, the strategy parameters evolve from a random initial distribution via mutation and selection.
# Mutations take place with a rate $0.01 \exp(-t/10000)$ per generation and are normally distributed with mean zero and standard deviation $0.25 \exp(-t/10000)$.
# The bound constraints on the parameters were enforced by setting the strategy parameters to the boundary value if outside after a mutation.
# Costs of different immune states as in Fig. 2.
# In[ ]:
|
# coding: utf-8
# # Figure S3: Finite population size simulations
# Prerequisites: opt.npz from Figure SIopt, and finite population size simulations results generated with:
#
# make run
# make agg
# Import packages.
# In[1]:
import sys
sys.path.append('../lib')
from cycler import cycler
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import palettable
import plotting
import analysis
from evolimmune import varname_to_tex, derived_quantities
plt.style.use(['paper'])
# Import results for population of infinite size.
# In[2]:
dfinf = analysis.loadnpz('../figSIopt/data/opt.npz')
derived_quantities(dfinf)
analysis.printunique(dfinf)
# Import results from finite population size simulations.
# In[3]:
df = analysis.loadnpz('data/scan.npz')
derived_quantities(df)
analysis.printunique(df)
# In[4]:
# number of runs per parameter
df.groupby(by=['nind', 'tauenv', 'ngen', 'pienv']).count().max().max()
# Putting things together to produce final plot
# In[5]:
median = True
plt.rc('axes', prop_cycle=cycler('color', palettable.colorbrewer.qualitative.Dark2_6.mpl_colors))
black = matplotlib.rcParams['text.color']
linewidth = matplotlib.rcParams['lines.linewidth']
columns = sorted(df.pienv.unique())
variables = ['cconstitutive', 'q', 'p', 'pup']
ymargin = 0.05
xmargin = 0.02
plotkwargs = dict()
lims = dict(pup=(0, 0.2), q=(0, 0.2))
fig, axes = plt.subplots(ncols=len(columns), nrows=len(variables), figsize=(7.0, 1.0+3.5*len(variables)/len(columns)))
for i, val in enumerate(columns):
for j, var in enumerate(variables):
ax = axes[j, i]
lim = lims[var] if var in lims else (0, 1)
dlim = lim[1]-lim[0]
closestval = dfinf.ix[(dfinf.pienv-val).abs().argmin()]['pienv']
dfsub = dfinf[np.abs(dfinf.pienv-closestval)<1e-3]
dfsub.sort_values(by='tauenv', inplace=True)
x, y = dfsub.tauenv, dfsub[var]
ax.plot(x, y, '-', label=r'$\infty$', c=black, lw=linewidth*2, **plotkwargs)
for nind, dfg in sorted(df.groupby(by='nind')):
dfgg = dfg[df.pienv==val].groupby(by='tauenv', as_index=False)
dfgg_tauenv = dfgg[['tauenv']].mean()['tauenv']
if median:
dfggm = dfgg[[var]].median()[var]
else:
dfggm = dfgg[[var]].mean()[var]
x, y = dfgg_tauenv, dfggm
line, = ax.plot(x, y, label='%i'%nind, **plotkwargs)
if median:
dfggu = dfgg[[var]].quantile(0.75)[var]
dfggl = dfgg[[var]].quantile(0.25)[var]
else:
dfggs = dfgg[[var]].std(ddof=1)[var]
dfggu = dfggm + dfggs
dfggl = dfggm - dfggs
ax.fill_between(dfgg_tauenv, dfggl, dfggu,
facecolor=line.get_color(), edgecolor='none', alpha=0.5)
ax.set_ylim(lim[0]-ymargin*dlim, lim[1]+ymargin*dlim)
ax.set_xlim(0.09, 11.0)
ax.set_xscale('log')
ax.margins(x=xmargin, y=ymargin*dlim)
plotting.despine(ax, spines='all')
ax.grid()
ax.locator_params(axis='y', nbins=5)
ax.legend(loc='upper center', title='population size', bbox_to_anchor=(0.54, 1),
bbox_transform=plt.gcf().transFigure, ncol=4)
for ax in analysis.flatten(axes[:-1, :]):
plt.setp(ax.get_xticklabels(), visible=False)
for ax in analysis.flatten(axes[:, 1:]):
plt.setp(ax.get_yticklabels(), visible=False)
for ax in axes[-1, :]:
ax.set_xlabel(varname_to_tex['tauenv'])
for j, var in enumerate(variables):
axes[j, 0].set_ylabel(varname_to_tex[var])
plotting.label_axes(axes[0, :], labels=[(varname_to_tex['pienv'][1:-1] + r'\, = \, %s' % val) for val in columns],
labelstyle='$%s$',
xy=(.5, 0.9), xycoords=('axes fraction', 'figure fraction'), fontweight = 'bold', fontsize='medium',
verticalalignment='top', horizontalalignment='center')
fig.tight_layout(h_pad=1.5, w_pad=1.0, rect=(0.0, 0.0, 1.0, 0.87), pad=0.25)
fig.savefig('SIevol.pdf')
fig.savefig('SIevol.svg')
# **Influence of finite population size on optimal immune strategies from an agent-based simulation with evolving strategy parameters (switching rates and degree of adaptability) as described in the text.**
# For the infinite population, $p$ is only shown for $q > 0$, because for $q = 0$ the value of $p$ is not constrained other than being positive.
# Subplots show the median (solid line) and interquartile range (shaded area) of the strategy parameters at the end of a simulation of $100000$ generations length.
# Both are calculated from 500 independent simulations.
# In each simulation, the strategy parameters evolve from a random initial distribution via mutation and selection.
# Mutations take place with a rate $0.01 \exp(-t/10000)$ per generation and are normally distributed with mean zero and standard deviation $0.25 \exp(-t/10000)$.
# The bound constraints on the parameters were enforced by setting the strategy parameters to the boundary value if outside after a mutation.
# Costs of different immune states as in Fig. 2.
# In[ ]:
|
en
| 0.815736
|
# coding: utf-8 # # Figure S3: Finite population size simulations # Prerequisites: opt.npz from Figure SIopt, and finite population size simulations results generated with: # # make run # make agg # Import packages. # In[1]: # Import results for population of infinite size. # In[2]: # Import results from finite population size simulations. # In[3]: # In[4]: # number of runs per parameter # Putting things together to produce final plot # In[5]: # **Influence of finite population size on optimal immune strategies from an agent-based simulation with evolving strategy parameters (switching rates and degree of adaptability) as described in the text.** # For the infinite population, $p$ is only shown for $q > 0$, because for $q = 0$ the value of $p$ is not constrained other than being positive. # Subplots show the median (solid line) and interquartile range (shaded area) of the strategy parameters at the end of a simulation of $100000$ generations length. # Both are calculated from 500 independent simulations. # In each simulation, the strategy parameters evolve from a random initial distribution via mutation and selection. # Mutations take place with a rate $0.01 \exp(-t/10000)$ per generation and are normally distributed with mean zero and standard deviation $0.25 \exp(-t/10000)$. # The bound constraints on the parameters were enforced by setting the strategy parameters to the boundary value if outside after a mutation. # Costs of different immune states as in Fig. 2. # In[ ]:
| 2.406948
| 2
|
src/openprocurement/api/views/health.py
|
JrooTJunior/openprocurement.api
| 102
|
6627806
|
# -*- coding: utf-8 -*-
from cornice.service import Service
from pyramid.response import Response
health = Service(name='health', path='/health', renderer='json')
HEALTH_THRESHOLD_FUNCTIONS = {
'any': any,
'all': all
}
@health.get()
def get_spore(request):
tasks = getattr(request.registry, 'admin_couchdb_server', request.registry.couchdb_server).tasks()
output = {task['replication_id']: task['progress'] for task in tasks if 'type' in task and task['type'] == 'replication'}
try:
health_threshold = float(request.params.get('health_threshold', request.registry.health_threshold))
except ValueError, e:
health_threshold = request.registry.health_threshold
health_threshold_func_name = request.params.get('health_threshold_func', request.registry.health_threshold_func)
health_threshold_func = HEALTH_THRESHOLD_FUNCTIONS.get(health_threshold_func_name, all)
if not(output and health_threshold_func(
[True if (task['source_seq'] - task['checkpointed_source_seq']) <= health_threshold else False
for task in tasks if 'type' in task and task['type'] == 'replication']
)):
return Response(json_body=output, status=503)
return output
|
# -*- coding: utf-8 -*-
from cornice.service import Service
from pyramid.response import Response
health = Service(name='health', path='/health', renderer='json')
HEALTH_THRESHOLD_FUNCTIONS = {
'any': any,
'all': all
}
@health.get()
def get_spore(request):
tasks = getattr(request.registry, 'admin_couchdb_server', request.registry.couchdb_server).tasks()
output = {task['replication_id']: task['progress'] for task in tasks if 'type' in task and task['type'] == 'replication'}
try:
health_threshold = float(request.params.get('health_threshold', request.registry.health_threshold))
except ValueError, e:
health_threshold = request.registry.health_threshold
health_threshold_func_name = request.params.get('health_threshold_func', request.registry.health_threshold_func)
health_threshold_func = HEALTH_THRESHOLD_FUNCTIONS.get(health_threshold_func_name, all)
if not(output and health_threshold_func(
[True if (task['source_seq'] - task['checkpointed_source_seq']) <= health_threshold else False
for task in tasks if 'type' in task and task['type'] == 'replication']
)):
return Response(json_body=output, status=503)
return output
|
en
| 0.769321
|
# -*- coding: utf-8 -*-
| 2.265143
| 2
|
django_covid19/management/commands/crawl.py
|
zhangguoyuanshuai/Python-Covid19API
| 103
|
6627807
|
import os
import sys
import django_covid19
app_dir = os.path.dirname(django_covid19.__file__)
sys.path.insert(0, os.path.join(app_dir, 'spider'))
from nCoV.spiders.dxy import DXYSpider
from nCoV.spiders.covidtracking import CovidTrackingSpider
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
from django.core.management.base import BaseCommand
from django.utils.translation import ugettext_lazy as _
class Scraper:
def __init__(self):
settings_file_path = 'nCoV.settings'
os.environ.setdefault('SCRAPY_SETTINGS_MODULE', settings_file_path)
self.process = CrawlerProcess(get_project_settings())
self.spider = DXYSpider
self.covidtracking_spider = CovidTrackingSpider
def run_spiders(self, spider):
if spider == 'covidtracking':
self.process.crawl(self.covidtracking_spider)
else:
self.process.crawl(self.spider)
self.process.start()
class Command(BaseCommand):
help = _('Crawl data from DingXiangYuan.')
def add_arguments(self, parser):
parser.add_argument('spider', type=str, help='spider name')
def handle(self, *args, **options):
spider = options['spider']
scraper = Scraper()
scraper.run_spiders(spider)
|
import os
import sys
import django_covid19
app_dir = os.path.dirname(django_covid19.__file__)
sys.path.insert(0, os.path.join(app_dir, 'spider'))
from nCoV.spiders.dxy import DXYSpider
from nCoV.spiders.covidtracking import CovidTrackingSpider
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
from django.core.management.base import BaseCommand
from django.utils.translation import ugettext_lazy as _
class Scraper:
def __init__(self):
settings_file_path = 'nCoV.settings'
os.environ.setdefault('SCRAPY_SETTINGS_MODULE', settings_file_path)
self.process = CrawlerProcess(get_project_settings())
self.spider = DXYSpider
self.covidtracking_spider = CovidTrackingSpider
def run_spiders(self, spider):
if spider == 'covidtracking':
self.process.crawl(self.covidtracking_spider)
else:
self.process.crawl(self.spider)
self.process.start()
class Command(BaseCommand):
help = _('Crawl data from DingXiangYuan.')
def add_arguments(self, parser):
parser.add_argument('spider', type=str, help='spider name')
def handle(self, *args, **options):
spider = options['spider']
scraper = Scraper()
scraper.run_spiders(spider)
|
none
| 1
| 2.066168
| 2
|
|
src/lesson_application_building_blocks/argparse_short.py
|
jasonwee/asus-rt-n14uhp-mrtg
| 3
|
6627808
|
<reponame>jasonwee/asus-rt-n14uhp-mrtg
import argparse
parser = argparse.ArgumentParser(description='Short sample app')
parser.add_argument('-a', action="store_true", default=False)
parser.add_argument('-b', action="store", dest="b")
parser.add_argument('-c', action="store", dest="c", type=int)
print(parser.parse_args(['-a', '-bval', '-c', '3']))
|
import argparse
parser = argparse.ArgumentParser(description='Short sample app')
parser.add_argument('-a', action="store_true", default=False)
parser.add_argument('-b', action="store", dest="b")
parser.add_argument('-c', action="store", dest="c", type=int)
print(parser.parse_args(['-a', '-bval', '-c', '3']))
|
none
| 1
| 3.037551
| 3
|
|
tests/unit/test_advanced_collectible.py
|
szaboako/nft
| 0
|
6627809
|
<filename>tests/unit/test_advanced_collectible.py
from brownie import network, AdvancedCollectible
from scripts.helpful_scripts import (
get_account,
get_contract,
LOCAL_BLOCKCHAIN_ENVIRONMENTS,
)
from scripts.advanced_collectible.deploy_and_create import deploy_and_create_nft
import pytest
def test_can_create_advanced_collectible():
if network.show_active() not in LOCAL_BLOCKCHAIN_ENVIRONMENTS:
pytest.skip("Only for local testing")
#Act
advanced_collectible, creation_transaction = deploy_and_create_nft()
requestId = creation_transaction.events["requestedCollectible"]["requestId"]
random_number = 777
get_contract("vrf_coordinator").callBackWithRandomness(requestId, random_number, advanced_collectible.address, {"from": get_account()})
# Assert
assert advanced_collectible.tokenCounter() == 1
assert advanced_collectible.tokenIdToBreed(0) == random_number % 3
|
<filename>tests/unit/test_advanced_collectible.py
from brownie import network, AdvancedCollectible
from scripts.helpful_scripts import (
get_account,
get_contract,
LOCAL_BLOCKCHAIN_ENVIRONMENTS,
)
from scripts.advanced_collectible.deploy_and_create import deploy_and_create_nft
import pytest
def test_can_create_advanced_collectible():
if network.show_active() not in LOCAL_BLOCKCHAIN_ENVIRONMENTS:
pytest.skip("Only for local testing")
#Act
advanced_collectible, creation_transaction = deploy_and_create_nft()
requestId = creation_transaction.events["requestedCollectible"]["requestId"]
random_number = 777
get_contract("vrf_coordinator").callBackWithRandomness(requestId, random_number, advanced_collectible.address, {"from": get_account()})
# Assert
assert advanced_collectible.tokenCounter() == 1
assert advanced_collectible.tokenIdToBreed(0) == random_number % 3
|
en
| 0.423515
|
#Act # Assert
| 2.211683
| 2
|
src/refinement/config.py
|
lenaWitterauf/Domain-Guided-Monitoring
| 1
|
6627810
|
import dataclass_cli
import dataclasses
from pathlib import Path
@dataclass_cli.add
@dataclasses.dataclass
class RefinementConfig:
num_refinements: int = 1
min_edge_weight: float = 0.8
max_train_examples: int = 10
refinement_metric: str = "mean_outlier_score"
refinement_metric_maxrank: int = -1
max_edges_to_remove: int = 10
max_refinement_metric: int = -1
original_file_knowledge: Path = Path("data/original_file_knowledge.json")
edges_to_add: float = -1
reference_file_knowledge: Path = Path("data/reference_file_knowledge.json")
mlflow_dir: str = "mlruns/1/"
|
import dataclass_cli
import dataclasses
from pathlib import Path
@dataclass_cli.add
@dataclasses.dataclass
class RefinementConfig:
num_refinements: int = 1
min_edge_weight: float = 0.8
max_train_examples: int = 10
refinement_metric: str = "mean_outlier_score"
refinement_metric_maxrank: int = -1
max_edges_to_remove: int = 10
max_refinement_metric: int = -1
original_file_knowledge: Path = Path("data/original_file_knowledge.json")
edges_to_add: float = -1
reference_file_knowledge: Path = Path("data/reference_file_knowledge.json")
mlflow_dir: str = "mlruns/1/"
|
none
| 1
| 2.257454
| 2
|
|
SQL_obj_new/DB_interaction_DDI_sql_new.py
|
diogo1790team/inphinity_DM
| 1
|
6627811
|
# -*- coding: utf-8 -*-
"""
Created on Wen Apr 11 10:38:22 2018
@author: <NAME>
"""
from DAL import *
from configuration.configuration_data import *
class _DB_interaction_DDI_SQL(object):
"""
This class manipulate the DB_INTERACTIONS_DDI table in the database. It used to know the sources that give the information of a DDI
The FK are manipulated in the lasts positions of the parameters
"""
def __init__(self):
self.db_name = self.get_database_name()
def get_database_name(self):
"""
This method is used to get the database name used in factory
:return: database name
:rtype string
"""
conf_data_obj = Configuration_data('INPHINITY')
db_name = conf_data_obj.get_database_name()
return db_name
def select_all_sources_DDI_name(self):
"""
return all the sources of DDIs
:return: cursor with all sources DDI
:rtype Cursor list
"""
sql_string = "SELECT id_db_int_DBI, designation_DBI FROM DB_INTERACTIONS_DDI"
dalObj = DAL(self.db_name, sql_string)
results = dalObj.executeSelect()
return results
def insert_DDI_source_return_id(self, DDI_source):
"""
Insert a ddi_source WITHOUT ANY VERIFICATION.
:param DDI_source: designation of source
:type DDI_source: string - required
:return: id of the domain inserted
:rtype int
"""
sqlObj = "INSERT INTO DB_INTERACTIONS_DDI (designation_DBI) VALUES (%s)"
params = [DDI_source]
dalObj = DAL(self.db_name, sqlObj, params)
results = dalObj.executeInsert()
return results.lastrowid
def insert_DDI_source_return_id_if_not_exists(self, DDI_source):
"""
Insert a ddi_source if not exist else return its id
:param DDI_source: designation of source
:type DDI_source: string - required
:return: id of the DDI_source inserted
:rtype int
"""
id_DDI_source = self.get_id_DDI_source_by_name(DDI_source)
if id_DDI_source == -1:
sqlObj = "INSERT INTO DB_INTERACTIONS_DDI (designation_DBI) VALUES (%s)"
params = [DDI_source]
dalObj = DAL(self.db_name, sqlObj, params)
results = dalObj.executeInsert()
return results.lastrowid
else:
return id_DDI_source
def get_id_DDI_source_by_name(self, DDI_source):
"""
Return the id o a DDI source
:param DDI_source: designation of DDI source
:type DDI_source: string - required
:return: id of the interaction or -1 i don't exists
:rtype int
"""
sql_string = "SELECT id_db_int_DBI FROM DB_INTERACTIONS_DDI WHERE designation_DBI LIKE '" + str(DDI_source) + "'"
dalObj = DAL(self.db_name, sql_string)
results = dalObj.executeSelect()
if len(results) is 0:
return -1
else:
return results[0][0]
|
# -*- coding: utf-8 -*-
"""
Created on Wen Apr 11 10:38:22 2018
@author: <NAME>
"""
from DAL import *
from configuration.configuration_data import *
class _DB_interaction_DDI_SQL(object):
"""
This class manipulate the DB_INTERACTIONS_DDI table in the database. It used to know the sources that give the information of a DDI
The FK are manipulated in the lasts positions of the parameters
"""
def __init__(self):
self.db_name = self.get_database_name()
def get_database_name(self):
"""
This method is used to get the database name used in factory
:return: database name
:rtype string
"""
conf_data_obj = Configuration_data('INPHINITY')
db_name = conf_data_obj.get_database_name()
return db_name
def select_all_sources_DDI_name(self):
"""
return all the sources of DDIs
:return: cursor with all sources DDI
:rtype Cursor list
"""
sql_string = "SELECT id_db_int_DBI, designation_DBI FROM DB_INTERACTIONS_DDI"
dalObj = DAL(self.db_name, sql_string)
results = dalObj.executeSelect()
return results
def insert_DDI_source_return_id(self, DDI_source):
"""
Insert a ddi_source WITHOUT ANY VERIFICATION.
:param DDI_source: designation of source
:type DDI_source: string - required
:return: id of the domain inserted
:rtype int
"""
sqlObj = "INSERT INTO DB_INTERACTIONS_DDI (designation_DBI) VALUES (%s)"
params = [DDI_source]
dalObj = DAL(self.db_name, sqlObj, params)
results = dalObj.executeInsert()
return results.lastrowid
def insert_DDI_source_return_id_if_not_exists(self, DDI_source):
"""
Insert a ddi_source if not exist else return its id
:param DDI_source: designation of source
:type DDI_source: string - required
:return: id of the DDI_source inserted
:rtype int
"""
id_DDI_source = self.get_id_DDI_source_by_name(DDI_source)
if id_DDI_source == -1:
sqlObj = "INSERT INTO DB_INTERACTIONS_DDI (designation_DBI) VALUES (%s)"
params = [DDI_source]
dalObj = DAL(self.db_name, sqlObj, params)
results = dalObj.executeInsert()
return results.lastrowid
else:
return id_DDI_source
def get_id_DDI_source_by_name(self, DDI_source):
"""
Return the id o a DDI source
:param DDI_source: designation of DDI source
:type DDI_source: string - required
:return: id of the interaction or -1 i don't exists
:rtype int
"""
sql_string = "SELECT id_db_int_DBI FROM DB_INTERACTIONS_DDI WHERE designation_DBI LIKE '" + str(DDI_source) + "'"
dalObj = DAL(self.db_name, sql_string)
results = dalObj.executeSelect()
if len(results) is 0:
return -1
else:
return results[0][0]
|
en
| 0.626681
|
# -*- coding: utf-8 -*- Created on Wen Apr 11 10:38:22 2018 @author: <NAME> This class manipulate the DB_INTERACTIONS_DDI table in the database. It used to know the sources that give the information of a DDI The FK are manipulated in the lasts positions of the parameters This method is used to get the database name used in factory :return: database name :rtype string return all the sources of DDIs :return: cursor with all sources DDI :rtype Cursor list Insert a ddi_source WITHOUT ANY VERIFICATION. :param DDI_source: designation of source :type DDI_source: string - required :return: id of the domain inserted :rtype int Insert a ddi_source if not exist else return its id :param DDI_source: designation of source :type DDI_source: string - required :return: id of the DDI_source inserted :rtype int Return the id o a DDI source :param DDI_source: designation of DDI source :type DDI_source: string - required :return: id of the interaction or -1 i don't exists :rtype int
| 2.71191
| 3
|
Statistical Mechanics - Coursera/Lecture 1 Programs/pebble_basic_movie.py
|
samisaf/Learning-Data-Science
| 0
|
6627812
|
<filename>Statistical Mechanics - Coursera/Lecture 1 Programs/pebble_basic_movie.py
import random, pylab
sigma = 0.4 # sigma and s_map are needed for the graphical output
s_map = [(1.0, 1.0), (2.0, 1.0), (3.0, 1.0),
(1.0, 2.0), (2.0, 2.0), (3.0, 2.0),
(1.0, 3.0), (2.0, 3.0), (3.0, 3.0)]
neighbor = [[1, 3, 0, 0], [2, 4, 0, 1], [2, 5, 1, 2],
[4, 6, 3, 0], [5, 7, 3, 1], [5, 8, 4, 2],
[7, 6, 6, 3], [8, 7, 6, 4], [8, 8, 7, 5]]
site = 8
N_runs = 10
for run in range(N_runs):
if run < 10: number_string = '0'+str(run)
else: number_string = str(run)
# Begin of graphical output
cir = pylab.Circle(s_map[site], radius=sigma, fc='r')
pylab.gca().add_patch(cir)
pylab.plot([0.5, 3.5], [1.5, 1.5], 'b')
pylab.plot([0.5, 3.5], [2.5, 2.5], 'b')
pylab.plot([1.5, 1.5], [0.5, 3.5], 'b')
pylab.plot([2.5, 2.5], [0.5, 3.5], 'b')
pylab.title('t = '+ number_string)
pylab.axis('scaled')
pylab.axis([0.5, 3.5, 0.5, 3.5])
pylab.xticks([])
pylab.yticks([])
pylab.savefig('pebble_basic_movie_'+number_string+'.png', transparent=False)
pylab.show()
pylab.clf()
# End of graphical output
site = neighbor[site][ random.randint(0, 3)]
|
<filename>Statistical Mechanics - Coursera/Lecture 1 Programs/pebble_basic_movie.py
import random, pylab
sigma = 0.4 # sigma and s_map are needed for the graphical output
s_map = [(1.0, 1.0), (2.0, 1.0), (3.0, 1.0),
(1.0, 2.0), (2.0, 2.0), (3.0, 2.0),
(1.0, 3.0), (2.0, 3.0), (3.0, 3.0)]
neighbor = [[1, 3, 0, 0], [2, 4, 0, 1], [2, 5, 1, 2],
[4, 6, 3, 0], [5, 7, 3, 1], [5, 8, 4, 2],
[7, 6, 6, 3], [8, 7, 6, 4], [8, 8, 7, 5]]
site = 8
N_runs = 10
for run in range(N_runs):
if run < 10: number_string = '0'+str(run)
else: number_string = str(run)
# Begin of graphical output
cir = pylab.Circle(s_map[site], radius=sigma, fc='r')
pylab.gca().add_patch(cir)
pylab.plot([0.5, 3.5], [1.5, 1.5], 'b')
pylab.plot([0.5, 3.5], [2.5, 2.5], 'b')
pylab.plot([1.5, 1.5], [0.5, 3.5], 'b')
pylab.plot([2.5, 2.5], [0.5, 3.5], 'b')
pylab.title('t = '+ number_string)
pylab.axis('scaled')
pylab.axis([0.5, 3.5, 0.5, 3.5])
pylab.xticks([])
pylab.yticks([])
pylab.savefig('pebble_basic_movie_'+number_string+'.png', transparent=False)
pylab.show()
pylab.clf()
# End of graphical output
site = neighbor[site][ random.randint(0, 3)]
|
en
| 0.756274
|
# sigma and s_map are needed for the graphical output # Begin of graphical output # End of graphical output
| 3.922105
| 4
|
lecture_10/hello_flask.py
|
darinabird/python_developer
| 20
|
6627813
|
<filename>lecture_10/hello_flask.py
from flask import Flask
from werkzeug.serving import run_simple
app = Flask(__name__)
@app.route('/')
def index():
return 'Hello World!'
if __name__ == '__main__':
run_simple('localhost', 5000, app)
|
<filename>lecture_10/hello_flask.py
from flask import Flask
from werkzeug.serving import run_simple
app = Flask(__name__)
@app.route('/')
def index():
return 'Hello World!'
if __name__ == '__main__':
run_simple('localhost', 5000, app)
|
none
| 1
| 2.863489
| 3
|
|
salt/modules/slsutil.py
|
AsocPro/salt
| 1
|
6627814
|
# -*- coding: utf-8 -*-
"""
Utility functions for use with or in SLS files
"""
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import textwrap
# Import Salt libs
import salt.exceptions
import salt.loader
import salt.template
import salt.utils.args
import salt.utils.dictupdate
def update(dest, upd, recursive_update=True, merge_lists=False):
"""
Merge ``upd`` recursively into ``dest``
If ``merge_lists=True``, will aggregate list object types instead of
replacing. This behavior is only activated when ``recursive_update=True``.
CLI Example:
.. code-block:: shell
salt '*' slsutil.update '{foo: Foo}' '{bar: Bar}'
"""
return salt.utils.dictupdate.update(dest, upd, recursive_update, merge_lists)
def merge(obj_a, obj_b, strategy="smart", renderer="yaml", merge_lists=False):
"""
Merge a data structure into another by choosing a merge strategy
Strategies:
* aggregate
* list
* overwrite
* recurse
* smart
CLI Example:
.. code-block:: shell
salt '*' slsutil.merge '{foo: Foo}' '{bar: Bar}'
"""
return salt.utils.dictupdate.merge(obj_a, obj_b, strategy, renderer, merge_lists)
def merge_all(lst, strategy="smart", renderer="yaml", merge_lists=False):
"""
.. versionadded:: 2019.2.0
Merge a list of objects into each other in order
:type lst: Iterable
:param lst: List of objects to be merged.
:type strategy: String
:param strategy: Merge strategy. See utils.dictupdate.
:type renderer: String
:param renderer:
Renderer type. Used to determine strategy when strategy is 'smart'.
:type merge_lists: Bool
:param merge_lists: Defines whether to merge embedded object lists.
CLI Example:
.. code-block:: shell
$ salt-call --output=txt slsutil.merge_all '[{foo: Foo}, {foo: Bar}]'
local: {u'foo': u'Bar'}
"""
ret = {}
for obj in lst:
ret = salt.utils.dictupdate.merge(ret, obj, strategy, renderer, merge_lists)
return ret
def renderer(path=None, string=None, default_renderer="jinja|yaml", **kwargs):
"""
Parse a string or file through Salt's renderer system
.. versionchanged:: 2018.3.0
Add support for Salt fileserver URIs.
This is an open-ended function and can be used for a variety of tasks. It
makes use of Salt's "renderer pipes" system to run a string or file through
a pipe of any of the loaded renderer modules.
:param path: The path to a file on Salt's fileserver (any URIs supported by
:py:func:`cp.get_url <salt.modules.cp.get_url>`) or on the local file
system.
:param string: An inline string to be used as the file to send through the
renderer system. Note, not all renderer modules can work with strings;
the 'py' renderer requires a file, for example.
:param default_renderer: The renderer pipe to send the file through; this
is overridden by a "she-bang" at the top of the file.
:param kwargs: Keyword args to pass to Salt's compile_template() function.
Keep in mind the goal of each renderer when choosing a render-pipe; for
example, the Jinja renderer processes a text file and produces a string,
however the YAML renderer processes a text file and produces a data
structure.
One possible use is to allow writing "map files", as are commonly seen in
Salt formulas, but without tying the renderer of the map file to the
renderer used in the other sls files. In other words, a map file could use
the Python renderer and still be included and used by an sls file that uses
the default 'jinja|yaml' renderer.
For example, the two following map files produce identical results but one
is written using the normal 'jinja|yaml' and the other is using 'py':
.. code-block:: jinja
#!jinja|yaml
{% set apache = salt.grains.filter_by({
...normal jinja map file here...
}, merge=salt.pillar.get('apache:lookup')) %}
{{ apache | yaml() }}
.. code-block:: python
#!py
def run():
apache = __salt__.grains.filter_by({
...normal map here but as a python dict...
}, merge=__salt__.pillar.get('apache:lookup'))
return apache
Regardless of which of the above map files is used, it can be accessed from
any other sls file by calling this function. The following is a usage
example in Jinja:
.. code-block:: jinja
{% set apache = salt.slsutil.renderer('map.sls') %}
CLI Example:
.. code-block:: bash
salt '*' slsutil.renderer salt://path/to/file
salt '*' slsutil.renderer /path/to/file
salt '*' slsutil.renderer /path/to/file.jinja 'jinja'
salt '*' slsutil.renderer /path/to/file.sls 'jinja|yaml'
salt '*' slsutil.renderer string='Inline template! {{ saltenv }}'
salt '*' slsutil.renderer string='Hello, {{ name }}.' name='world'
"""
if not path and not string:
raise salt.exceptions.SaltInvocationError("Must pass either path or string")
renderers = salt.loader.render(__opts__, __salt__)
if path:
path_or_string = __salt__["cp.get_url"](
path, saltenv=kwargs.get("saltenv", "base")
)
elif string:
path_or_string = ":string:"
kwargs["input_data"] = string
ret = salt.template.compile_template(
path_or_string,
renderers,
default_renderer,
__opts__["renderer_blacklist"],
__opts__["renderer_whitelist"],
**kwargs
)
return ret.read() if __utils__["stringio.is_readable"](ret) else ret
def _get_serialize_fn(serializer, fn_name):
serializers = salt.loader.serializers(__opts__)
fns = getattr(serializers, serializer, None)
fn = getattr(fns, fn_name, None)
if not fns:
raise salt.exceptions.CommandExecutionError(
"Serializer '{0}' not found.".format(serializer)
)
if not fn:
raise salt.exceptions.CommandExecutionError(
"Serializer '{0}' does not implement {1}.".format(serializer, fn_name)
)
return fn
def serialize(serializer, obj, **mod_kwargs):
"""
Serialize a Python object using one of the available
:ref:`all-salt.serializers`.
CLI Example:
.. code-block:: bash
salt '*' --no-parse=obj slsutil.serialize 'json' obj="{'foo': 'Foo!'}
Jinja Example:
.. code-block:: jinja
{% set json_string = salt.slsutil.serialize('json',
{'foo': 'Foo!'}) %}
"""
kwargs = salt.utils.args.clean_kwargs(**mod_kwargs)
return _get_serialize_fn(serializer, "serialize")(obj, **kwargs)
def deserialize(serializer, stream_or_string, **mod_kwargs):
"""
Deserialize a Python object using one of the available
:ref:`all-salt.serializers`.
CLI Example:
.. code-block:: bash
salt '*' slsutil.deserialize 'json' '{"foo": "Foo!"}'
salt '*' --no-parse=stream_or_string slsutil.deserialize 'json' \\
stream_or_string='{"foo": "Foo!"}'
Jinja Example:
.. code-block:: jinja
{% set python_object = salt.slsutil.deserialize('json',
'{"foo": "Foo!"}') %}
"""
kwargs = salt.utils.args.clean_kwargs(**mod_kwargs)
return _get_serialize_fn(serializer, "deserialize")(stream_or_string, **kwargs)
def banner(
width=72,
commentchar="#",
borderchar="#",
blockstart=None,
blockend=None,
title=None,
text=None,
newline=False,
):
"""
Create a standardized comment block to include in a templated file.
A common technique in configuration management is to include a comment
block in managed files, warning users not to modify the file. This
function simplifies and standardizes those comment blocks.
:param width: The width, in characters, of the banner. Default is 72.
:param commentchar: The character to be used in the starting position of
each line. This value should be set to a valid line comment character
for the syntax of the file in which the banner is being inserted.
Multiple character sequences, like '//' are supported.
If the file's syntax does not support line comments (such as XML),
use the ``blockstart`` and ``blockend`` options.
:param borderchar: The character to use in the top and bottom border of
the comment box. Must be a single character.
:param blockstart: The character sequence to use at the beginning of a
block comment. Should be used in conjunction with ``blockend``
:param blockend: The character sequence to use at the end of a
block comment. Should be used in conjunction with ``blockstart``
:param title: The first field of the comment block. This field appears
centered at the top of the box.
:param text: The second filed of the comment block. This field appears
left-justifed at the bottom of the box.
:param newline: Boolean value to indicate whether the comment block should
end with a newline. Default is ``False``.
**Example 1 - the default banner:**
.. code-block:: jinja
{{ salt['slsutil.banner']() }}
.. code-block:: none
########################################################################
# #
# THIS FILE IS MANAGED BY SALT - DO NOT EDIT #
# #
# The contents of this file are managed by Salt. Any changes to this #
# file may be overwritten automatically and without warning. #
########################################################################
**Example 2 - a Javadoc-style banner:**
.. code-block:: jinja
{{ salt['slsutil.banner'](commentchar=' *', borderchar='*', blockstart='/**', blockend=' */') }}
.. code-block:: none
/**
***********************************************************************
* *
* THIS FILE IS MANAGED BY SALT - DO NOT EDIT *
* *
* The contents of this file are managed by Salt. Any changes to this *
* file may be overwritten automatically and without warning. *
***********************************************************************
*/
**Example 3 - custom text:**
.. code-block:: jinja
{{ set copyright='This file may not be copied or distributed without permission of SaltStack, Inc.' }}
{{ salt['slsutil.banner'](title='Copyright 2019 SaltStack, Inc.', text=copyright, width=60) }}
.. code-block:: none
############################################################
# #
# Copyright 2019 SaltStack, Inc. #
# #
# This file may not be copied or distributed without #
# permission of SaltStack, Inc. #
############################################################
"""
if title is None:
title = "THIS FILE IS MANAGED BY SALT - DO NOT EDIT"
if text is None:
text = (
"The contents of this file are managed by Salt. "
"Any changes to this file may be overwritten "
"automatically and without warning."
)
# Set up some typesetting variables
ledge = commentchar.rstrip()
redge = commentchar.strip()
lgutter = ledge + " "
rgutter = " " + redge
textwidth = width - len(lgutter) - len(rgutter)
# Check the width
if textwidth <= 0:
raise salt.exceptions.ArgumentValueError("Width is too small to render banner")
# Define the static elements
border_line = (
commentchar + borderchar[:1] * (width - len(ledge) - len(redge)) + redge
)
spacer_line = commentchar + " " * (width - len(commentchar) * 2) + commentchar
# Create the banner
wrapper = textwrap.TextWrapper(width=textwidth)
block = list()
if blockstart is not None:
block.append(blockstart)
block.append(border_line)
block.append(spacer_line)
for line in wrapper.wrap(title):
block.append(lgutter + line.center(textwidth) + rgutter)
block.append(spacer_line)
for line in wrapper.wrap(text):
block.append(lgutter + line + " " * (textwidth - len(line)) + rgutter)
block.append(border_line)
if blockend is not None:
block.append(blockend)
# Convert list to multi-line string
result = os.linesep.join(block)
# Add a newline character to the end of the banner
if newline:
return result + os.linesep
return result
def boolstr(value, true="true", false="false"):
"""
Convert a boolean value into a string. This function is
intended to be used from within file templates to provide
an easy way to take boolean values stored in Pillars or
Grains, and write them out in the apprpriate syntax for
a particular file template.
:param value: The boolean value to be converted
:param true: The value to return if ``value`` is ``True``
:param false: The value to return if ``value`` is ``False``
In this example, a pillar named ``smtp:encrypted`` stores a boolean
value, but the template that uses that value needs ``yes`` or ``no``
to be written, based on the boolean value.
*Note: this is written on two lines for clarity. The same result
could be achieved in one line.*
.. code-block:: jinja
{% set encrypted = salt[pillar.get]('smtp:encrypted', false) %}
use_tls: {{ salt['slsutil.boolstr'](encrypted, 'yes', 'no') }}
Result (assuming the value is ``True``):
.. code-block:: none
use_tls: yes
"""
if value:
return true
return false
|
# -*- coding: utf-8 -*-
"""
Utility functions for use with or in SLS files
"""
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import textwrap
# Import Salt libs
import salt.exceptions
import salt.loader
import salt.template
import salt.utils.args
import salt.utils.dictupdate
def update(dest, upd, recursive_update=True, merge_lists=False):
"""
Merge ``upd`` recursively into ``dest``
If ``merge_lists=True``, will aggregate list object types instead of
replacing. This behavior is only activated when ``recursive_update=True``.
CLI Example:
.. code-block:: shell
salt '*' slsutil.update '{foo: Foo}' '{bar: Bar}'
"""
return salt.utils.dictupdate.update(dest, upd, recursive_update, merge_lists)
def merge(obj_a, obj_b, strategy="smart", renderer="yaml", merge_lists=False):
"""
Merge a data structure into another by choosing a merge strategy
Strategies:
* aggregate
* list
* overwrite
* recurse
* smart
CLI Example:
.. code-block:: shell
salt '*' slsutil.merge '{foo: Foo}' '{bar: Bar}'
"""
return salt.utils.dictupdate.merge(obj_a, obj_b, strategy, renderer, merge_lists)
def merge_all(lst, strategy="smart", renderer="yaml", merge_lists=False):
"""
.. versionadded:: 2019.2.0
Merge a list of objects into each other in order
:type lst: Iterable
:param lst: List of objects to be merged.
:type strategy: String
:param strategy: Merge strategy. See utils.dictupdate.
:type renderer: String
:param renderer:
Renderer type. Used to determine strategy when strategy is 'smart'.
:type merge_lists: Bool
:param merge_lists: Defines whether to merge embedded object lists.
CLI Example:
.. code-block:: shell
$ salt-call --output=txt slsutil.merge_all '[{foo: Foo}, {foo: Bar}]'
local: {u'foo': u'Bar'}
"""
ret = {}
for obj in lst:
ret = salt.utils.dictupdate.merge(ret, obj, strategy, renderer, merge_lists)
return ret
def renderer(path=None, string=None, default_renderer="jinja|yaml", **kwargs):
"""
Parse a string or file through Salt's renderer system
.. versionchanged:: 2018.3.0
Add support for Salt fileserver URIs.
This is an open-ended function and can be used for a variety of tasks. It
makes use of Salt's "renderer pipes" system to run a string or file through
a pipe of any of the loaded renderer modules.
:param path: The path to a file on Salt's fileserver (any URIs supported by
:py:func:`cp.get_url <salt.modules.cp.get_url>`) or on the local file
system.
:param string: An inline string to be used as the file to send through the
renderer system. Note, not all renderer modules can work with strings;
the 'py' renderer requires a file, for example.
:param default_renderer: The renderer pipe to send the file through; this
is overridden by a "she-bang" at the top of the file.
:param kwargs: Keyword args to pass to Salt's compile_template() function.
Keep in mind the goal of each renderer when choosing a render-pipe; for
example, the Jinja renderer processes a text file and produces a string,
however the YAML renderer processes a text file and produces a data
structure.
One possible use is to allow writing "map files", as are commonly seen in
Salt formulas, but without tying the renderer of the map file to the
renderer used in the other sls files. In other words, a map file could use
the Python renderer and still be included and used by an sls file that uses
the default 'jinja|yaml' renderer.
For example, the two following map files produce identical results but one
is written using the normal 'jinja|yaml' and the other is using 'py':
.. code-block:: jinja
#!jinja|yaml
{% set apache = salt.grains.filter_by({
...normal jinja map file here...
}, merge=salt.pillar.get('apache:lookup')) %}
{{ apache | yaml() }}
.. code-block:: python
#!py
def run():
apache = __salt__.grains.filter_by({
...normal map here but as a python dict...
}, merge=__salt__.pillar.get('apache:lookup'))
return apache
Regardless of which of the above map files is used, it can be accessed from
any other sls file by calling this function. The following is a usage
example in Jinja:
.. code-block:: jinja
{% set apache = salt.slsutil.renderer('map.sls') %}
CLI Example:
.. code-block:: bash
salt '*' slsutil.renderer salt://path/to/file
salt '*' slsutil.renderer /path/to/file
salt '*' slsutil.renderer /path/to/file.jinja 'jinja'
salt '*' slsutil.renderer /path/to/file.sls 'jinja|yaml'
salt '*' slsutil.renderer string='Inline template! {{ saltenv }}'
salt '*' slsutil.renderer string='Hello, {{ name }}.' name='world'
"""
if not path and not string:
raise salt.exceptions.SaltInvocationError("Must pass either path or string")
renderers = salt.loader.render(__opts__, __salt__)
if path:
path_or_string = __salt__["cp.get_url"](
path, saltenv=kwargs.get("saltenv", "base")
)
elif string:
path_or_string = ":string:"
kwargs["input_data"] = string
ret = salt.template.compile_template(
path_or_string,
renderers,
default_renderer,
__opts__["renderer_blacklist"],
__opts__["renderer_whitelist"],
**kwargs
)
return ret.read() if __utils__["stringio.is_readable"](ret) else ret
def _get_serialize_fn(serializer, fn_name):
serializers = salt.loader.serializers(__opts__)
fns = getattr(serializers, serializer, None)
fn = getattr(fns, fn_name, None)
if not fns:
raise salt.exceptions.CommandExecutionError(
"Serializer '{0}' not found.".format(serializer)
)
if not fn:
raise salt.exceptions.CommandExecutionError(
"Serializer '{0}' does not implement {1}.".format(serializer, fn_name)
)
return fn
def serialize(serializer, obj, **mod_kwargs):
"""
Serialize a Python object using one of the available
:ref:`all-salt.serializers`.
CLI Example:
.. code-block:: bash
salt '*' --no-parse=obj slsutil.serialize 'json' obj="{'foo': 'Foo!'}
Jinja Example:
.. code-block:: jinja
{% set json_string = salt.slsutil.serialize('json',
{'foo': 'Foo!'}) %}
"""
kwargs = salt.utils.args.clean_kwargs(**mod_kwargs)
return _get_serialize_fn(serializer, "serialize")(obj, **kwargs)
def deserialize(serializer, stream_or_string, **mod_kwargs):
"""
Deserialize a Python object using one of the available
:ref:`all-salt.serializers`.
CLI Example:
.. code-block:: bash
salt '*' slsutil.deserialize 'json' '{"foo": "Foo!"}'
salt '*' --no-parse=stream_or_string slsutil.deserialize 'json' \\
stream_or_string='{"foo": "Foo!"}'
Jinja Example:
.. code-block:: jinja
{% set python_object = salt.slsutil.deserialize('json',
'{"foo": "Foo!"}') %}
"""
kwargs = salt.utils.args.clean_kwargs(**mod_kwargs)
return _get_serialize_fn(serializer, "deserialize")(stream_or_string, **kwargs)
def banner(
width=72,
commentchar="#",
borderchar="#",
blockstart=None,
blockend=None,
title=None,
text=None,
newline=False,
):
"""
Create a standardized comment block to include in a templated file.
A common technique in configuration management is to include a comment
block in managed files, warning users not to modify the file. This
function simplifies and standardizes those comment blocks.
:param width: The width, in characters, of the banner. Default is 72.
:param commentchar: The character to be used in the starting position of
each line. This value should be set to a valid line comment character
for the syntax of the file in which the banner is being inserted.
Multiple character sequences, like '//' are supported.
If the file's syntax does not support line comments (such as XML),
use the ``blockstart`` and ``blockend`` options.
:param borderchar: The character to use in the top and bottom border of
the comment box. Must be a single character.
:param blockstart: The character sequence to use at the beginning of a
block comment. Should be used in conjunction with ``blockend``
:param blockend: The character sequence to use at the end of a
block comment. Should be used in conjunction with ``blockstart``
:param title: The first field of the comment block. This field appears
centered at the top of the box.
:param text: The second filed of the comment block. This field appears
left-justifed at the bottom of the box.
:param newline: Boolean value to indicate whether the comment block should
end with a newline. Default is ``False``.
**Example 1 - the default banner:**
.. code-block:: jinja
{{ salt['slsutil.banner']() }}
.. code-block:: none
########################################################################
# #
# THIS FILE IS MANAGED BY SALT - DO NOT EDIT #
# #
# The contents of this file are managed by Salt. Any changes to this #
# file may be overwritten automatically and without warning. #
########################################################################
**Example 2 - a Javadoc-style banner:**
.. code-block:: jinja
{{ salt['slsutil.banner'](commentchar=' *', borderchar='*', blockstart='/**', blockend=' */') }}
.. code-block:: none
/**
***********************************************************************
* *
* THIS FILE IS MANAGED BY SALT - DO NOT EDIT *
* *
* The contents of this file are managed by Salt. Any changes to this *
* file may be overwritten automatically and without warning. *
***********************************************************************
*/
**Example 3 - custom text:**
.. code-block:: jinja
{{ set copyright='This file may not be copied or distributed without permission of SaltStack, Inc.' }}
{{ salt['slsutil.banner'](title='Copyright 2019 SaltStack, Inc.', text=copyright, width=60) }}
.. code-block:: none
############################################################
# #
# Copyright 2019 SaltStack, Inc. #
# #
# This file may not be copied or distributed without #
# permission of SaltStack, Inc. #
############################################################
"""
if title is None:
title = "THIS FILE IS MANAGED BY SALT - DO NOT EDIT"
if text is None:
text = (
"The contents of this file are managed by Salt. "
"Any changes to this file may be overwritten "
"automatically and without warning."
)
# Set up some typesetting variables
ledge = commentchar.rstrip()
redge = commentchar.strip()
lgutter = ledge + " "
rgutter = " " + redge
textwidth = width - len(lgutter) - len(rgutter)
# Check the width
if textwidth <= 0:
raise salt.exceptions.ArgumentValueError("Width is too small to render banner")
# Define the static elements
border_line = (
commentchar + borderchar[:1] * (width - len(ledge) - len(redge)) + redge
)
spacer_line = commentchar + " " * (width - len(commentchar) * 2) + commentchar
# Create the banner
wrapper = textwrap.TextWrapper(width=textwidth)
block = list()
if blockstart is not None:
block.append(blockstart)
block.append(border_line)
block.append(spacer_line)
for line in wrapper.wrap(title):
block.append(lgutter + line.center(textwidth) + rgutter)
block.append(spacer_line)
for line in wrapper.wrap(text):
block.append(lgutter + line + " " * (textwidth - len(line)) + rgutter)
block.append(border_line)
if blockend is not None:
block.append(blockend)
# Convert list to multi-line string
result = os.linesep.join(block)
# Add a newline character to the end of the banner
if newline:
return result + os.linesep
return result
def boolstr(value, true="true", false="false"):
"""
Convert a boolean value into a string. This function is
intended to be used from within file templates to provide
an easy way to take boolean values stored in Pillars or
Grains, and write them out in the apprpriate syntax for
a particular file template.
:param value: The boolean value to be converted
:param true: The value to return if ``value`` is ``True``
:param false: The value to return if ``value`` is ``False``
In this example, a pillar named ``smtp:encrypted`` stores a boolean
value, but the template that uses that value needs ``yes`` or ``no``
to be written, based on the boolean value.
*Note: this is written on two lines for clarity. The same result
could be achieved in one line.*
.. code-block:: jinja
{% set encrypted = salt[pillar.get]('smtp:encrypted', false) %}
use_tls: {{ salt['slsutil.boolstr'](encrypted, 'yes', 'no') }}
Result (assuming the value is ``True``):
.. code-block:: none
use_tls: yes
"""
if value:
return true
return false
|
en
| 0.605953
|
# -*- coding: utf-8 -*- Utility functions for use with or in SLS files # Import Python libs # Import Salt libs Merge ``upd`` recursively into ``dest`` If ``merge_lists=True``, will aggregate list object types instead of replacing. This behavior is only activated when ``recursive_update=True``. CLI Example: .. code-block:: shell salt '*' slsutil.update '{foo: Foo}' '{bar: Bar}' Merge a data structure into another by choosing a merge strategy Strategies: * aggregate * list * overwrite * recurse * smart CLI Example: .. code-block:: shell salt '*' slsutil.merge '{foo: Foo}' '{bar: Bar}' .. versionadded:: 2019.2.0 Merge a list of objects into each other in order :type lst: Iterable :param lst: List of objects to be merged. :type strategy: String :param strategy: Merge strategy. See utils.dictupdate. :type renderer: String :param renderer: Renderer type. Used to determine strategy when strategy is 'smart'. :type merge_lists: Bool :param merge_lists: Defines whether to merge embedded object lists. CLI Example: .. code-block:: shell $ salt-call --output=txt slsutil.merge_all '[{foo: Foo}, {foo: Bar}]' local: {u'foo': u'Bar'} Parse a string or file through Salt's renderer system .. versionchanged:: 2018.3.0 Add support for Salt fileserver URIs. This is an open-ended function and can be used for a variety of tasks. It makes use of Salt's "renderer pipes" system to run a string or file through a pipe of any of the loaded renderer modules. :param path: The path to a file on Salt's fileserver (any URIs supported by :py:func:`cp.get_url <salt.modules.cp.get_url>`) or on the local file system. :param string: An inline string to be used as the file to send through the renderer system. Note, not all renderer modules can work with strings; the 'py' renderer requires a file, for example. :param default_renderer: The renderer pipe to send the file through; this is overridden by a "she-bang" at the top of the file. :param kwargs: Keyword args to pass to Salt's compile_template() function. Keep in mind the goal of each renderer when choosing a render-pipe; for example, the Jinja renderer processes a text file and produces a string, however the YAML renderer processes a text file and produces a data structure. One possible use is to allow writing "map files", as are commonly seen in Salt formulas, but without tying the renderer of the map file to the renderer used in the other sls files. In other words, a map file could use the Python renderer and still be included and used by an sls file that uses the default 'jinja|yaml' renderer. For example, the two following map files produce identical results but one is written using the normal 'jinja|yaml' and the other is using 'py': .. code-block:: jinja #!jinja|yaml {% set apache = salt.grains.filter_by({ ...normal jinja map file here... }, merge=salt.pillar.get('apache:lookup')) %} {{ apache | yaml() }} .. code-block:: python #!py def run(): apache = __salt__.grains.filter_by({ ...normal map here but as a python dict... }, merge=__salt__.pillar.get('apache:lookup')) return apache Regardless of which of the above map files is used, it can be accessed from any other sls file by calling this function. The following is a usage example in Jinja: .. code-block:: jinja {% set apache = salt.slsutil.renderer('map.sls') %} CLI Example: .. code-block:: bash salt '*' slsutil.renderer salt://path/to/file salt '*' slsutil.renderer /path/to/file salt '*' slsutil.renderer /path/to/file.jinja 'jinja' salt '*' slsutil.renderer /path/to/file.sls 'jinja|yaml' salt '*' slsutil.renderer string='Inline template! {{ saltenv }}' salt '*' slsutil.renderer string='Hello, {{ name }}.' name='world' Serialize a Python object using one of the available :ref:`all-salt.serializers`. CLI Example: .. code-block:: bash salt '*' --no-parse=obj slsutil.serialize 'json' obj="{'foo': 'Foo!'} Jinja Example: .. code-block:: jinja {% set json_string = salt.slsutil.serialize('json', {'foo': 'Foo!'}) %} Deserialize a Python object using one of the available :ref:`all-salt.serializers`. CLI Example: .. code-block:: bash salt '*' slsutil.deserialize 'json' '{"foo": "Foo!"}' salt '*' --no-parse=stream_or_string slsutil.deserialize 'json' \\ stream_or_string='{"foo": "Foo!"}' Jinja Example: .. code-block:: jinja {% set python_object = salt.slsutil.deserialize('json', '{"foo": "Foo!"}') %} Create a standardized comment block to include in a templated file. A common technique in configuration management is to include a comment block in managed files, warning users not to modify the file. This function simplifies and standardizes those comment blocks. :param width: The width, in characters, of the banner. Default is 72. :param commentchar: The character to be used in the starting position of each line. This value should be set to a valid line comment character for the syntax of the file in which the banner is being inserted. Multiple character sequences, like '//' are supported. If the file's syntax does not support line comments (such as XML), use the ``blockstart`` and ``blockend`` options. :param borderchar: The character to use in the top and bottom border of the comment box. Must be a single character. :param blockstart: The character sequence to use at the beginning of a block comment. Should be used in conjunction with ``blockend`` :param blockend: The character sequence to use at the end of a block comment. Should be used in conjunction with ``blockstart`` :param title: The first field of the comment block. This field appears centered at the top of the box. :param text: The second filed of the comment block. This field appears left-justifed at the bottom of the box. :param newline: Boolean value to indicate whether the comment block should end with a newline. Default is ``False``. **Example 1 - the default banner:** .. code-block:: jinja {{ salt['slsutil.banner']() }} .. code-block:: none ######################################################################## # # # THIS FILE IS MANAGED BY SALT - DO NOT EDIT # # # # The contents of this file are managed by Salt. Any changes to this # # file may be overwritten automatically and without warning. # ######################################################################## **Example 2 - a Javadoc-style banner:** .. code-block:: jinja {{ salt['slsutil.banner'](commentchar=' *', borderchar='*', blockstart='/**', blockend=' */') }} .. code-block:: none /** *********************************************************************** * * * THIS FILE IS MANAGED BY SALT - DO NOT EDIT * * * * The contents of this file are managed by Salt. Any changes to this * * file may be overwritten automatically and without warning. * *********************************************************************** */ **Example 3 - custom text:** .. code-block:: jinja {{ set copyright='This file may not be copied or distributed without permission of SaltStack, Inc.' }} {{ salt['slsutil.banner'](title='Copyright 2019 SaltStack, Inc.', text=copyright, width=60) }} .. code-block:: none ############################################################ # # # Copyright 2019 SaltStack, Inc. # # # # This file may not be copied or distributed without # # permission of SaltStack, Inc. # ############################################################ # Set up some typesetting variables # Check the width # Define the static elements # Create the banner # Convert list to multi-line string # Add a newline character to the end of the banner Convert a boolean value into a string. This function is intended to be used from within file templates to provide an easy way to take boolean values stored in Pillars or Grains, and write them out in the apprpriate syntax for a particular file template. :param value: The boolean value to be converted :param true: The value to return if ``value`` is ``True`` :param false: The value to return if ``value`` is ``False`` In this example, a pillar named ``smtp:encrypted`` stores a boolean value, but the template that uses that value needs ``yes`` or ``no`` to be written, based on the boolean value. *Note: this is written on two lines for clarity. The same result could be achieved in one line.* .. code-block:: jinja {% set encrypted = salt[pillar.get]('smtp:encrypted', false) %} use_tls: {{ salt['slsutil.boolstr'](encrypted, 'yes', 'no') }} Result (assuming the value is ``True``): .. code-block:: none use_tls: yes
| 2.387042
| 2
|
positions.py
|
PRASAD-DANGARE/PYTHON
| 1
|
6627815
|
# Python Program To Display All Positions Of A Sub String In A Given Main String
'''
Function Name : Display All Position Of A Sub String In Main String .
Function Date : 2 Sep 2020
Function Author : <NAME>
Input : String
Output : Integer
'''
str = input('Enter Main String : ')
print("\n")
sub = input('Enter Sub String : ')
print("\n")
i = 0
flag = False # Becomes True If String Is Found
n = len(str)
while i < n: # Repeat From 0th To nth Characters
pos = str.find(sub, i, n)
if pos != -1: # If Found Display Its Position
print('Found At Position : ', pos + 1)
print("\n")
i = pos + 1 # Search From pos+1 Position Onwards
flag = True
else:
i = i + 1 # Search From Next Characters Onwards
if flag == False:
print('Sub String Not Found')
print("\n")
|
# Python Program To Display All Positions Of A Sub String In A Given Main String
'''
Function Name : Display All Position Of A Sub String In Main String .
Function Date : 2 Sep 2020
Function Author : <NAME>
Input : String
Output : Integer
'''
str = input('Enter Main String : ')
print("\n")
sub = input('Enter Sub String : ')
print("\n")
i = 0
flag = False # Becomes True If String Is Found
n = len(str)
while i < n: # Repeat From 0th To nth Characters
pos = str.find(sub, i, n)
if pos != -1: # If Found Display Its Position
print('Found At Position : ', pos + 1)
print("\n")
i = pos + 1 # Search From pos+1 Position Onwards
flag = True
else:
i = i + 1 # Search From Next Characters Onwards
if flag == False:
print('Sub String Not Found')
print("\n")
|
en
| 0.495601
|
# Python Program To Display All Positions Of A Sub String In A Given Main String Function Name : Display All Position Of A Sub String In Main String .
Function Date : 2 Sep 2020
Function Author : <NAME>
Input : String
Output : Integer # Becomes True If String Is Found # Repeat From 0th To nth Characters # If Found Display Its Position # Search From pos+1 Position Onwards # Search From Next Characters Onwards
| 3.979868
| 4
|
OpenCV 104/Histograms/opencv-histogram-equalization/simple_equalization_practice.py
|
jjaramillo34/pyimagesearchuniversity_course
| 1
|
6627816
|
# USAGE
# python simple_equalization_practice.py --image images/moon.png
# import the necessary packages
import argparse
import cv2
# construct the argument parser and the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", type=str, required=True,
help="path to the input image")
args = vars(ap.parse_args())
# load the input image from disk and convert it to grayscale
print("[INFO] loading input image...")
image = cv2.imread(args["image"])
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# apply histogram equalization
print("[INFO] performing histogram equalization...")
equalized = cv2.equalizeHist(gray)
# show the original grayscle image and equalized image
cv2.imshow("Input", gray)
cv2.imshow("Histrogram Equalization", equalized)
cv2.waitKey(0)
|
# USAGE
# python simple_equalization_practice.py --image images/moon.png
# import the necessary packages
import argparse
import cv2
# construct the argument parser and the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", type=str, required=True,
help="path to the input image")
args = vars(ap.parse_args())
# load the input image from disk and convert it to grayscale
print("[INFO] loading input image...")
image = cv2.imread(args["image"])
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# apply histogram equalization
print("[INFO] performing histogram equalization...")
equalized = cv2.equalizeHist(gray)
# show the original grayscle image and equalized image
cv2.imshow("Input", gray)
cv2.imshow("Histrogram Equalization", equalized)
cv2.waitKey(0)
|
en
| 0.502536
|
# USAGE # python simple_equalization_practice.py --image images/moon.png # import the necessary packages # construct the argument parser and the arguments # load the input image from disk and convert it to grayscale # apply histogram equalization # show the original grayscle image and equalized image
| 3.679477
| 4
|
scrapli/driver/network/base_driver.py
|
verbosemode/scrapli
| 0
|
6627817
|
"""scrapli.driver.network.base_driver"""
import re
from collections import defaultdict
from datetime import datetime
from enum import Enum
from functools import lru_cache
from logging import LoggerAdapter
from typing import DefaultDict, Dict, List, Optional, Set, Tuple, Union
from scrapli.exceptions import ScrapliPrivilegeError, ScrapliTypeError
from scrapli.helper import user_warning
from scrapli.response import MultiResponse, Response
class PrivilegeLevel:
__slots__ = (
"pattern",
"name",
"previous_priv",
"deescalate",
"escalate",
"escalate_auth",
"escalate_prompt",
"not_contains",
)
def __init__(
self,
pattern: str,
name: str,
previous_priv: str,
deescalate: str,
escalate: str,
escalate_auth: bool,
escalate_prompt: str,
not_contains: Optional[List[str]] = None,
):
"""
PrivilegeLevel Object
Args:
pattern: regex pattern to use to identify this privilege level by the prompt
name: friendly name of this privilege level
previous_priv: name of the lower/previous privilege level
deescalate: how to deescalate *from* this privilege level (to the lower/previous priv)
escalate: how to escalate *to* this privilege level (from the lower/previous priv)
escalate_auth: True/False escalation requires authentication
escalate_prompt: prompt pattern to search for during escalation if escalate auth is True
not_contains: list of substrings that should *not* be seen in a prompt for this
privilege level
Returns:
None
Raises:
N/A
"""
self.pattern = pattern
self.name = name
self.previous_priv = previous_priv
self.deescalate = deescalate
self.escalate = escalate
self.escalate_auth = escalate_auth
self.escalate_prompt = escalate_prompt
self.not_contains: List[str] = not_contains or list()
DUMMY_PRIV_LEVEL = PrivilegeLevel("", "DUMMY", "", "", "", False, "")
PRIVS: Dict[str, PrivilegeLevel] = {}
class PrivilegeAction(Enum):
NO_ACTION = "no action"
ESCALATE = "escalate"
DEESCALATE = "deescalate"
class BaseNetworkDriver:
# BaseNetworkDriver Mixin vars for typing/linting purposes
logger: LoggerAdapter
auth_secondary: str
failed_when_contains: List[str]
textfsm_platform: str
genie_platform: str
privilege_levels: Dict[str, PrivilegeLevel]
comms_prompt_pattern: str
_current_priv_level = DUMMY_PRIV_LEVEL
_priv_graph: DefaultDict[str, Set[str]]
def _generate_comms_prompt_pattern(self) -> None:
"""
Generate the `comms_prompt_pattern` from the currently assigned privilege levels
Args:
N/A
Returns:
None
Raises:
N/A
"""
self.logger.debug("generating combined network comms prompt pattern")
self.comms_prompt_pattern = r"|".join(
rf"({priv_level_data.pattern})" for priv_level_data in self.privilege_levels.values()
)
@lru_cache()
def _determine_current_priv(self, current_prompt: str) -> List[str]:
"""
Determine current privilege level from prompt string
Args:
current_prompt: string of current prompt
Returns:
list: list of string names of matching privilege levels
Raises:
ScrapliPrivilegeError: if privilege level cannot be determined
"""
matching_priv_levels = []
for priv_level in self.privilege_levels.values():
if priv_level.not_contains:
# starting at 2021.07.30 the `not_contains` field was added to privilege levels
# (defaulting to an empty tuple) -- this helps us to simplify the priv patterns
# greatly, as well as have no reliance on look arounds which makes the "normal"
# scrapli privilege levels more go friendly -- useful for scrapligo!
if any(not_contains in current_prompt for not_contains in priv_level.not_contains):
continue
search_result = re.search(
pattern=priv_level.pattern, string=current_prompt, flags=re.M | re.I
)
if not search_result:
continue
matching_priv_levels.append(priv_level.name)
if not matching_priv_levels:
msg = f"could not determine privilege level from provided prompt: '{current_prompt}'"
self.logger.critical(msg)
raise ScrapliPrivilegeError(msg)
self.logger.debug(f"determined current privilege level is one of '{matching_priv_levels}'")
return matching_priv_levels
def _build_priv_graph(self) -> None:
"""
Build a graph of privilege levels
`_priv_graph` is a "graph" of all privilege levels and how to acquire them from any given
priv level. This is probably not very efficient but we should never have more than a
handful of priv levels so this should never be a big issue.
While at the moment priv levels are always... "linear" in that there is only ever one "up"
and one "down" privilege from any given priv, we still have "forks" in the road -- for
example, in IOSXR we can go from privilege exec to configuration or configuration exclusive.
This method builds a graph that allows us to make intelligent decisions about how to get
from where we are to where we want to be!
Args:
N/A
Returns:
None
Raises:
N/A
"""
self._priv_graph = defaultdict(set)
privilege_levels = self.privilege_levels.values()
for privilege_level in privilege_levels:
if privilege_level.previous_priv:
self._priv_graph[privilege_level.name].add(privilege_level.previous_priv)
else:
self._priv_graph[privilege_level.name] = set()
for higher_privilege_level, privilege_level_list in self._priv_graph.items():
for privilege_level_name in privilege_level_list:
self._priv_graph[privilege_level_name].add(higher_privilege_level)
def _build_priv_change_map(
self,
starting_priv_name: str,
destination_priv_name: str,
priv_change_map: Optional[List[str]] = None,
) -> List[str]:
"""
Generate a list of priv levels from starting priv to destination priv
Args:
starting_priv_name: name of starting priv
destination_priv_name: name of destination priv
priv_change_map: current priv_change_map; should only be passed when this function
calls itself
Returns:
list: list of strings of priv names to get from starting to destination priv level
Raises:
N/A
"""
if priv_change_map is None:
priv_change_map = []
priv_change_map = priv_change_map + [starting_priv_name]
if starting_priv_name == destination_priv_name:
return priv_change_map
for privilege_name in self._priv_graph[starting_priv_name]:
if privilege_name not in priv_change_map:
updated_priv_change_map = self._build_priv_change_map(
starting_priv_name=privilege_name,
destination_priv_name=destination_priv_name,
priv_change_map=priv_change_map,
)
if updated_priv_change_map:
return updated_priv_change_map
# shouldnt ever get to this i dont think... putting here to appease pylint and ignoring cov
return [] # pragma: nocover
def update_privilege_levels(self) -> None:
"""
Re-generate the privilege graph, and update the comms prompt pattern
Args:
N/A
Returns:
None
Raises:
N/A
"""
# build/update the priv graph
self._build_priv_graph()
# build/update the joined comms prompt pattern
self._generate_comms_prompt_pattern()
# ensure the channel has the updated prompt pattern so it knows how to match any newly
# updated priv levels (such as registered configuration sessions)
self.channel.comms_prompt_pattern = ( # type: ignore # pylint: disable=E1101
self.comms_prompt_pattern
)
# finally, clear the lru caches as patterns may have been updated
self._determine_current_priv.cache_clear()
def _validate_privilege_level_name(self, privilege_level_name: str) -> None:
"""
Get privilege level name if provided privilege is valid
Args:
privilege_level_name: string name of desired privilege level
Returns:
None
Raises:
ScrapliPrivilegeError: if attempting to acquire an unknown priv
"""
desired_privilege_level = self.privilege_levels.get(privilege_level_name)
if desired_privilege_level is None:
msg = (
f"requested privilege level '{privilege_level_name}' not a valid privilege level of"
f" '{self.__class__.__name__}'"
)
self.logger.critical(msg)
raise ScrapliPrivilegeError(msg)
def _pre_escalate(self, escalate_priv: PrivilegeLevel) -> None:
"""
Handle pre "_escalate" tasks for consistency between sync/async versions
Args:
escalate_priv: privilege level to escalate to
Returns:
None
Raises:
N/A
"""
if escalate_priv.escalate_auth is True and not self.auth_secondary:
title = "Authentication Warning!"
message = (
"scrapli will try to escalate privilege without entering a password but may "
"fail.\nSet an 'auth_secondary' password if your device requires a password to "
"increase privilege, otherwise ignore this message."
)
user_warning(title=title, message=message)
def _process_acquire_priv(
self,
destination_priv: str,
current_prompt: str,
) -> Tuple[PrivilegeAction, PrivilegeLevel]:
"""
Handle non channel "acquire_priv" tasks for consistency between sync/async versions
Args:
destination_priv: string name of desired privilege level
current_prompt: string of the current prompt
Returns:
Tuple[PrivilegeAction, PrivilegeLevel]: enum set to appropriate value for no action,
escalate or deescalate and privilege level object to pass to either escalate or
deescalate method
Raises:
N/A
"""
self.logger.info(f"attempting to acquire '{destination_priv}' privilege level")
# decide if we are already at the desired priv, then we don't need to do any thing!
current_priv_patterns = self._determine_current_priv(current_prompt=current_prompt)
if self._current_priv_level.name in current_priv_patterns:
current_priv = self.privilege_levels[self._current_priv_level.name]
elif destination_priv in current_priv_patterns:
current_priv = self.privilege_levels[destination_priv]
else:
# if multiple patterns match pick the zeroith... hopefully this never happens though...
# and it *shouldn't* because right now the only way to have the same priv patterns is
# to be *basically* the same privilege level -- i.e. configuration and configuration
# exclusive for iosxr
current_priv = self.privilege_levels[current_priv_patterns[0]]
if current_priv.name == destination_priv:
self.logger.debug(
"determined current privilege level is target privilege level, no action needed"
)
self._current_priv_level = self.privilege_levels[destination_priv]
return PrivilegeAction.NO_ACTION, self.privilege_levels[destination_priv]
map_to_destination_priv = self._build_priv_change_map(
starting_priv_name=current_priv.name, destination_priv_name=destination_priv
)
# at this point we basically dont *know* the privilege leve we are at (or we wont/cant after
# we do an escalation or deescalation, so we reset to the dummy priv level
self._current_priv_level = DUMMY_PRIV_LEVEL
if self.privilege_levels[map_to_destination_priv[1]].previous_priv != current_priv.name:
self.logger.debug("determined privilege deescalation necessary")
return PrivilegeAction.DEESCALATE, current_priv
self.logger.debug("determined privilege escalation necessary")
return PrivilegeAction.ESCALATE, self.privilege_levels[map_to_destination_priv[1]]
@property
def _generic_driver_mode(self) -> bool:
"""
Getter for `_generic_driver_mode` attribute
Args:
N/A
Returns:
bool: _generic_driver_mode value
Raises:
N/A
"""
try:
return self.__generic_driver_mode
except AttributeError:
return False
@_generic_driver_mode.setter
def _generic_driver_mode(self, value: bool) -> None:
"""
Setter for `_generic_driver_mode` attribute
Args:
value: bool value for _generic_driver_mode
Returns:
None
Raises:
ScrapliTypeError: if value is not of type bool
"""
self.logger.debug(f"setting '_generic_driver_mode' value to '{value}'")
if not isinstance(value, bool):
raise ScrapliTypeError
if value is True:
# if we are setting ingore priv level we reset current priv to the dummy priv so that
# once (if) a user turns ignore priv back off we know we need to reset/reacquire priv
# as the user coulda done pretty much anything and we could end up at who knows what
# priv level
self._current_priv_level = DUMMY_PRIV_LEVEL
self.__generic_driver_mode = value
def _update_response(self, response: Response) -> None:
"""
Update response with network driver specific data
This happens here as the underlying channel provides a response object but is unaware of any
of the network/platform specific attributes that may need to get updated
Args:
response: response to update
Returns:
None
Raises:
N/A
"""
response.textfsm_platform = self.textfsm_platform
response.genie_platform = self.genie_platform
@staticmethod
def _pre_send_config(config: str) -> List[str]:
"""
Handle pre "send_config" tasks for consistency between sync/async versions
Args:
config: string configuration to send to the device, supports sending multi-line strings
Returns:
list: list of config lines from provided "config" input
Raises:
ScrapliTypeError: if anything but a string is provided for `file`
"""
if not isinstance(config, str):
raise ScrapliTypeError(
f"'send_config' expects a single string, got {type(config)}, "
"to send a list of configs use the 'send_configs' method instead."
)
# in order to handle multi-line strings, we split lines
split_config = config.splitlines()
return split_config
def _post_send_config(
self,
config: str,
multi_response: MultiResponse,
) -> Response:
"""
Handle post "send_config" tasks for consistency between sync/async versions
Args:
config: string configuration to send to the device, supports sending multi-line strings
multi_response: multi_response object send_config got from calling self.send_configs;
we need this to parse out the multi_response back into a single Response object
Returns:
Response: Unified response object
Raises:
N/A
"""
# capture failed_when_contains and host from zeroith multi_response element (there should
# always be at least a zeroith element here!); getting host just lets us keep the mixin
# class a little cleaner without having to deal with sync vs async transport classes from
# a typing perspective
failed_when_contains = multi_response[0].failed_when_contains
host = multi_response[0].host
# create a new unified response object
response = Response(
host=host,
channel_input=config,
failed_when_contains=failed_when_contains,
)
response.start_time = multi_response[0].start_time
response.finish_time = datetime.now()
response.elapsed_time = (response.finish_time - response.start_time).total_seconds()
# join all the results together into a single final result
response.result = "\n".join(response.result for response in multi_response)
response.failed = False
if any(r.failed for r in multi_response):
response.failed = True
self._update_response(response=response)
return response
def _pre_send_configs(
self,
configs: List[str],
failed_when_contains: Optional[Union[str, List[str]]] = None,
privilege_level: str = "",
) -> Tuple[str, Union[str, List[str]]]:
"""
Handle pre "send_configs" tasks for consistency between sync/async versions
Args:
configs: list of strings to send to device in config mode
failed_when_contains: string or list of strings indicating failure if found in response
privilege_level: name of configuration privilege level/type to acquire; this is platform
dependent, so check the device driver for specifics. Examples of privilege_name
would be "configuration_exclusive" for IOSXRDriver, or "configuration_private" for
JunosDriver. You can also pass in a name of a configuration session such as
"my-config-session" if you have registered a session using the
"register_config_session" method of the EOSDriver or NXOSDriver.
Returns:
Tuple[str, Union[str, List[str]]]: string of resolved privilege level name, and failed
when contains which may be a string or list of strings
Raises:
ScrapliTypeError: if configs is anything but a list
ScrapliPrivilegeError: if connection is in 'generic_driver_mode' -- this should be a
non-standard use case so there is no reason to complicate the config(s) methods
with supporting generic driver mode (plus if there was config modes in generic
driver mode that wouldn't be very generic driver like, would it!)
"""
if not isinstance(configs, list):
raise ScrapliTypeError(
f"'send_configs' expects a list of strings, got {type(configs)}, "
"to send a single configuration line/string use the 'send_config' method instead."
)
if self._generic_driver_mode is True:
raise ScrapliPrivilegeError(
"connection is in 'generic_driver_mode', send config(s|s_from_file) is disabled"
)
if failed_when_contains is None:
final_failed_when_contains = self.failed_when_contains
elif isinstance(failed_when_contains, str):
final_failed_when_contains = [failed_when_contains]
else:
final_failed_when_contains = failed_when_contains
if privilege_level:
self._validate_privilege_level_name(privilege_level_name=privilege_level)
resolved_privilege_level = privilege_level
else:
resolved_privilege_level = "configuration"
return resolved_privilege_level, final_failed_when_contains
def _post_send_configs(self, responses: MultiResponse) -> MultiResponse:
"""
Handle post "send_configs" tasks for consistency between sync/async versions
Args:
responses: multi_response object to update
Returns:
MultiResponse: Unified response object
Raises:
N/A
"""
for response in responses:
self._update_response(response=response)
return responses
|
"""scrapli.driver.network.base_driver"""
import re
from collections import defaultdict
from datetime import datetime
from enum import Enum
from functools import lru_cache
from logging import LoggerAdapter
from typing import DefaultDict, Dict, List, Optional, Set, Tuple, Union
from scrapli.exceptions import ScrapliPrivilegeError, ScrapliTypeError
from scrapli.helper import user_warning
from scrapli.response import MultiResponse, Response
class PrivilegeLevel:
__slots__ = (
"pattern",
"name",
"previous_priv",
"deescalate",
"escalate",
"escalate_auth",
"escalate_prompt",
"not_contains",
)
def __init__(
self,
pattern: str,
name: str,
previous_priv: str,
deescalate: str,
escalate: str,
escalate_auth: bool,
escalate_prompt: str,
not_contains: Optional[List[str]] = None,
):
"""
PrivilegeLevel Object
Args:
pattern: regex pattern to use to identify this privilege level by the prompt
name: friendly name of this privilege level
previous_priv: name of the lower/previous privilege level
deescalate: how to deescalate *from* this privilege level (to the lower/previous priv)
escalate: how to escalate *to* this privilege level (from the lower/previous priv)
escalate_auth: True/False escalation requires authentication
escalate_prompt: prompt pattern to search for during escalation if escalate auth is True
not_contains: list of substrings that should *not* be seen in a prompt for this
privilege level
Returns:
None
Raises:
N/A
"""
self.pattern = pattern
self.name = name
self.previous_priv = previous_priv
self.deescalate = deescalate
self.escalate = escalate
self.escalate_auth = escalate_auth
self.escalate_prompt = escalate_prompt
self.not_contains: List[str] = not_contains or list()
DUMMY_PRIV_LEVEL = PrivilegeLevel("", "DUMMY", "", "", "", False, "")
PRIVS: Dict[str, PrivilegeLevel] = {}
class PrivilegeAction(Enum):
NO_ACTION = "no action"
ESCALATE = "escalate"
DEESCALATE = "deescalate"
class BaseNetworkDriver:
# BaseNetworkDriver Mixin vars for typing/linting purposes
logger: LoggerAdapter
auth_secondary: str
failed_when_contains: List[str]
textfsm_platform: str
genie_platform: str
privilege_levels: Dict[str, PrivilegeLevel]
comms_prompt_pattern: str
_current_priv_level = DUMMY_PRIV_LEVEL
_priv_graph: DefaultDict[str, Set[str]]
def _generate_comms_prompt_pattern(self) -> None:
"""
Generate the `comms_prompt_pattern` from the currently assigned privilege levels
Args:
N/A
Returns:
None
Raises:
N/A
"""
self.logger.debug("generating combined network comms prompt pattern")
self.comms_prompt_pattern = r"|".join(
rf"({priv_level_data.pattern})" for priv_level_data in self.privilege_levels.values()
)
@lru_cache()
def _determine_current_priv(self, current_prompt: str) -> List[str]:
"""
Determine current privilege level from prompt string
Args:
current_prompt: string of current prompt
Returns:
list: list of string names of matching privilege levels
Raises:
ScrapliPrivilegeError: if privilege level cannot be determined
"""
matching_priv_levels = []
for priv_level in self.privilege_levels.values():
if priv_level.not_contains:
# starting at 2021.07.30 the `not_contains` field was added to privilege levels
# (defaulting to an empty tuple) -- this helps us to simplify the priv patterns
# greatly, as well as have no reliance on look arounds which makes the "normal"
# scrapli privilege levels more go friendly -- useful for scrapligo!
if any(not_contains in current_prompt for not_contains in priv_level.not_contains):
continue
search_result = re.search(
pattern=priv_level.pattern, string=current_prompt, flags=re.M | re.I
)
if not search_result:
continue
matching_priv_levels.append(priv_level.name)
if not matching_priv_levels:
msg = f"could not determine privilege level from provided prompt: '{current_prompt}'"
self.logger.critical(msg)
raise ScrapliPrivilegeError(msg)
self.logger.debug(f"determined current privilege level is one of '{matching_priv_levels}'")
return matching_priv_levels
def _build_priv_graph(self) -> None:
"""
Build a graph of privilege levels
`_priv_graph` is a "graph" of all privilege levels and how to acquire them from any given
priv level. This is probably not very efficient but we should never have more than a
handful of priv levels so this should never be a big issue.
While at the moment priv levels are always... "linear" in that there is only ever one "up"
and one "down" privilege from any given priv, we still have "forks" in the road -- for
example, in IOSXR we can go from privilege exec to configuration or configuration exclusive.
This method builds a graph that allows us to make intelligent decisions about how to get
from where we are to where we want to be!
Args:
N/A
Returns:
None
Raises:
N/A
"""
self._priv_graph = defaultdict(set)
privilege_levels = self.privilege_levels.values()
for privilege_level in privilege_levels:
if privilege_level.previous_priv:
self._priv_graph[privilege_level.name].add(privilege_level.previous_priv)
else:
self._priv_graph[privilege_level.name] = set()
for higher_privilege_level, privilege_level_list in self._priv_graph.items():
for privilege_level_name in privilege_level_list:
self._priv_graph[privilege_level_name].add(higher_privilege_level)
def _build_priv_change_map(
self,
starting_priv_name: str,
destination_priv_name: str,
priv_change_map: Optional[List[str]] = None,
) -> List[str]:
"""
Generate a list of priv levels from starting priv to destination priv
Args:
starting_priv_name: name of starting priv
destination_priv_name: name of destination priv
priv_change_map: current priv_change_map; should only be passed when this function
calls itself
Returns:
list: list of strings of priv names to get from starting to destination priv level
Raises:
N/A
"""
if priv_change_map is None:
priv_change_map = []
priv_change_map = priv_change_map + [starting_priv_name]
if starting_priv_name == destination_priv_name:
return priv_change_map
for privilege_name in self._priv_graph[starting_priv_name]:
if privilege_name not in priv_change_map:
updated_priv_change_map = self._build_priv_change_map(
starting_priv_name=privilege_name,
destination_priv_name=destination_priv_name,
priv_change_map=priv_change_map,
)
if updated_priv_change_map:
return updated_priv_change_map
# shouldnt ever get to this i dont think... putting here to appease pylint and ignoring cov
return [] # pragma: nocover
def update_privilege_levels(self) -> None:
"""
Re-generate the privilege graph, and update the comms prompt pattern
Args:
N/A
Returns:
None
Raises:
N/A
"""
# build/update the priv graph
self._build_priv_graph()
# build/update the joined comms prompt pattern
self._generate_comms_prompt_pattern()
# ensure the channel has the updated prompt pattern so it knows how to match any newly
# updated priv levels (such as registered configuration sessions)
self.channel.comms_prompt_pattern = ( # type: ignore # pylint: disable=E1101
self.comms_prompt_pattern
)
# finally, clear the lru caches as patterns may have been updated
self._determine_current_priv.cache_clear()
def _validate_privilege_level_name(self, privilege_level_name: str) -> None:
"""
Get privilege level name if provided privilege is valid
Args:
privilege_level_name: string name of desired privilege level
Returns:
None
Raises:
ScrapliPrivilegeError: if attempting to acquire an unknown priv
"""
desired_privilege_level = self.privilege_levels.get(privilege_level_name)
if desired_privilege_level is None:
msg = (
f"requested privilege level '{privilege_level_name}' not a valid privilege level of"
f" '{self.__class__.__name__}'"
)
self.logger.critical(msg)
raise ScrapliPrivilegeError(msg)
def _pre_escalate(self, escalate_priv: PrivilegeLevel) -> None:
"""
Handle pre "_escalate" tasks for consistency between sync/async versions
Args:
escalate_priv: privilege level to escalate to
Returns:
None
Raises:
N/A
"""
if escalate_priv.escalate_auth is True and not self.auth_secondary:
title = "Authentication Warning!"
message = (
"scrapli will try to escalate privilege without entering a password but may "
"fail.\nSet an 'auth_secondary' password if your device requires a password to "
"increase privilege, otherwise ignore this message."
)
user_warning(title=title, message=message)
def _process_acquire_priv(
self,
destination_priv: str,
current_prompt: str,
) -> Tuple[PrivilegeAction, PrivilegeLevel]:
"""
Handle non channel "acquire_priv" tasks for consistency between sync/async versions
Args:
destination_priv: string name of desired privilege level
current_prompt: string of the current prompt
Returns:
Tuple[PrivilegeAction, PrivilegeLevel]: enum set to appropriate value for no action,
escalate or deescalate and privilege level object to pass to either escalate or
deescalate method
Raises:
N/A
"""
self.logger.info(f"attempting to acquire '{destination_priv}' privilege level")
# decide if we are already at the desired priv, then we don't need to do any thing!
current_priv_patterns = self._determine_current_priv(current_prompt=current_prompt)
if self._current_priv_level.name in current_priv_patterns:
current_priv = self.privilege_levels[self._current_priv_level.name]
elif destination_priv in current_priv_patterns:
current_priv = self.privilege_levels[destination_priv]
else:
# if multiple patterns match pick the zeroith... hopefully this never happens though...
# and it *shouldn't* because right now the only way to have the same priv patterns is
# to be *basically* the same privilege level -- i.e. configuration and configuration
# exclusive for iosxr
current_priv = self.privilege_levels[current_priv_patterns[0]]
if current_priv.name == destination_priv:
self.logger.debug(
"determined current privilege level is target privilege level, no action needed"
)
self._current_priv_level = self.privilege_levels[destination_priv]
return PrivilegeAction.NO_ACTION, self.privilege_levels[destination_priv]
map_to_destination_priv = self._build_priv_change_map(
starting_priv_name=current_priv.name, destination_priv_name=destination_priv
)
# at this point we basically dont *know* the privilege leve we are at (or we wont/cant after
# we do an escalation or deescalation, so we reset to the dummy priv level
self._current_priv_level = DUMMY_PRIV_LEVEL
if self.privilege_levels[map_to_destination_priv[1]].previous_priv != current_priv.name:
self.logger.debug("determined privilege deescalation necessary")
return PrivilegeAction.DEESCALATE, current_priv
self.logger.debug("determined privilege escalation necessary")
return PrivilegeAction.ESCALATE, self.privilege_levels[map_to_destination_priv[1]]
@property
def _generic_driver_mode(self) -> bool:
"""
Getter for `_generic_driver_mode` attribute
Args:
N/A
Returns:
bool: _generic_driver_mode value
Raises:
N/A
"""
try:
return self.__generic_driver_mode
except AttributeError:
return False
@_generic_driver_mode.setter
def _generic_driver_mode(self, value: bool) -> None:
"""
Setter for `_generic_driver_mode` attribute
Args:
value: bool value for _generic_driver_mode
Returns:
None
Raises:
ScrapliTypeError: if value is not of type bool
"""
self.logger.debug(f"setting '_generic_driver_mode' value to '{value}'")
if not isinstance(value, bool):
raise ScrapliTypeError
if value is True:
# if we are setting ingore priv level we reset current priv to the dummy priv so that
# once (if) a user turns ignore priv back off we know we need to reset/reacquire priv
# as the user coulda done pretty much anything and we could end up at who knows what
# priv level
self._current_priv_level = DUMMY_PRIV_LEVEL
self.__generic_driver_mode = value
def _update_response(self, response: Response) -> None:
"""
Update response with network driver specific data
This happens here as the underlying channel provides a response object but is unaware of any
of the network/platform specific attributes that may need to get updated
Args:
response: response to update
Returns:
None
Raises:
N/A
"""
response.textfsm_platform = self.textfsm_platform
response.genie_platform = self.genie_platform
@staticmethod
def _pre_send_config(config: str) -> List[str]:
"""
Handle pre "send_config" tasks for consistency between sync/async versions
Args:
config: string configuration to send to the device, supports sending multi-line strings
Returns:
list: list of config lines from provided "config" input
Raises:
ScrapliTypeError: if anything but a string is provided for `file`
"""
if not isinstance(config, str):
raise ScrapliTypeError(
f"'send_config' expects a single string, got {type(config)}, "
"to send a list of configs use the 'send_configs' method instead."
)
# in order to handle multi-line strings, we split lines
split_config = config.splitlines()
return split_config
def _post_send_config(
self,
config: str,
multi_response: MultiResponse,
) -> Response:
"""
Handle post "send_config" tasks for consistency between sync/async versions
Args:
config: string configuration to send to the device, supports sending multi-line strings
multi_response: multi_response object send_config got from calling self.send_configs;
we need this to parse out the multi_response back into a single Response object
Returns:
Response: Unified response object
Raises:
N/A
"""
# capture failed_when_contains and host from zeroith multi_response element (there should
# always be at least a zeroith element here!); getting host just lets us keep the mixin
# class a little cleaner without having to deal with sync vs async transport classes from
# a typing perspective
failed_when_contains = multi_response[0].failed_when_contains
host = multi_response[0].host
# create a new unified response object
response = Response(
host=host,
channel_input=config,
failed_when_contains=failed_when_contains,
)
response.start_time = multi_response[0].start_time
response.finish_time = datetime.now()
response.elapsed_time = (response.finish_time - response.start_time).total_seconds()
# join all the results together into a single final result
response.result = "\n".join(response.result for response in multi_response)
response.failed = False
if any(r.failed for r in multi_response):
response.failed = True
self._update_response(response=response)
return response
def _pre_send_configs(
self,
configs: List[str],
failed_when_contains: Optional[Union[str, List[str]]] = None,
privilege_level: str = "",
) -> Tuple[str, Union[str, List[str]]]:
"""
Handle pre "send_configs" tasks for consistency between sync/async versions
Args:
configs: list of strings to send to device in config mode
failed_when_contains: string or list of strings indicating failure if found in response
privilege_level: name of configuration privilege level/type to acquire; this is platform
dependent, so check the device driver for specifics. Examples of privilege_name
would be "configuration_exclusive" for IOSXRDriver, or "configuration_private" for
JunosDriver. You can also pass in a name of a configuration session such as
"my-config-session" if you have registered a session using the
"register_config_session" method of the EOSDriver or NXOSDriver.
Returns:
Tuple[str, Union[str, List[str]]]: string of resolved privilege level name, and failed
when contains which may be a string or list of strings
Raises:
ScrapliTypeError: if configs is anything but a list
ScrapliPrivilegeError: if connection is in 'generic_driver_mode' -- this should be a
non-standard use case so there is no reason to complicate the config(s) methods
with supporting generic driver mode (plus if there was config modes in generic
driver mode that wouldn't be very generic driver like, would it!)
"""
if not isinstance(configs, list):
raise ScrapliTypeError(
f"'send_configs' expects a list of strings, got {type(configs)}, "
"to send a single configuration line/string use the 'send_config' method instead."
)
if self._generic_driver_mode is True:
raise ScrapliPrivilegeError(
"connection is in 'generic_driver_mode', send config(s|s_from_file) is disabled"
)
if failed_when_contains is None:
final_failed_when_contains = self.failed_when_contains
elif isinstance(failed_when_contains, str):
final_failed_when_contains = [failed_when_contains]
else:
final_failed_when_contains = failed_when_contains
if privilege_level:
self._validate_privilege_level_name(privilege_level_name=privilege_level)
resolved_privilege_level = privilege_level
else:
resolved_privilege_level = "configuration"
return resolved_privilege_level, final_failed_when_contains
def _post_send_configs(self, responses: MultiResponse) -> MultiResponse:
"""
Handle post "send_configs" tasks for consistency between sync/async versions
Args:
responses: multi_response object to update
Returns:
MultiResponse: Unified response object
Raises:
N/A
"""
for response in responses:
self._update_response(response=response)
return responses
|
en
| 0.814508
|
scrapli.driver.network.base_driver PrivilegeLevel Object Args: pattern: regex pattern to use to identify this privilege level by the prompt name: friendly name of this privilege level previous_priv: name of the lower/previous privilege level deescalate: how to deescalate *from* this privilege level (to the lower/previous priv) escalate: how to escalate *to* this privilege level (from the lower/previous priv) escalate_auth: True/False escalation requires authentication escalate_prompt: prompt pattern to search for during escalation if escalate auth is True not_contains: list of substrings that should *not* be seen in a prompt for this privilege level Returns: None Raises: N/A # BaseNetworkDriver Mixin vars for typing/linting purposes Generate the `comms_prompt_pattern` from the currently assigned privilege levels Args: N/A Returns: None Raises: N/A Determine current privilege level from prompt string Args: current_prompt: string of current prompt Returns: list: list of string names of matching privilege levels Raises: ScrapliPrivilegeError: if privilege level cannot be determined # starting at 2021.07.30 the `not_contains` field was added to privilege levels # (defaulting to an empty tuple) -- this helps us to simplify the priv patterns # greatly, as well as have no reliance on look arounds which makes the "normal" # scrapli privilege levels more go friendly -- useful for scrapligo! Build a graph of privilege levels `_priv_graph` is a "graph" of all privilege levels and how to acquire them from any given priv level. This is probably not very efficient but we should never have more than a handful of priv levels so this should never be a big issue. While at the moment priv levels are always... "linear" in that there is only ever one "up" and one "down" privilege from any given priv, we still have "forks" in the road -- for example, in IOSXR we can go from privilege exec to configuration or configuration exclusive. This method builds a graph that allows us to make intelligent decisions about how to get from where we are to where we want to be! Args: N/A Returns: None Raises: N/A Generate a list of priv levels from starting priv to destination priv Args: starting_priv_name: name of starting priv destination_priv_name: name of destination priv priv_change_map: current priv_change_map; should only be passed when this function calls itself Returns: list: list of strings of priv names to get from starting to destination priv level Raises: N/A # shouldnt ever get to this i dont think... putting here to appease pylint and ignoring cov # pragma: nocover Re-generate the privilege graph, and update the comms prompt pattern Args: N/A Returns: None Raises: N/A # build/update the priv graph # build/update the joined comms prompt pattern # ensure the channel has the updated prompt pattern so it knows how to match any newly # updated priv levels (such as registered configuration sessions) # type: ignore # pylint: disable=E1101 # finally, clear the lru caches as patterns may have been updated Get privilege level name if provided privilege is valid Args: privilege_level_name: string name of desired privilege level Returns: None Raises: ScrapliPrivilegeError: if attempting to acquire an unknown priv Handle pre "_escalate" tasks for consistency between sync/async versions Args: escalate_priv: privilege level to escalate to Returns: None Raises: N/A Handle non channel "acquire_priv" tasks for consistency between sync/async versions Args: destination_priv: string name of desired privilege level current_prompt: string of the current prompt Returns: Tuple[PrivilegeAction, PrivilegeLevel]: enum set to appropriate value for no action, escalate or deescalate and privilege level object to pass to either escalate or deescalate method Raises: N/A # decide if we are already at the desired priv, then we don't need to do any thing! # if multiple patterns match pick the zeroith... hopefully this never happens though... # and it *shouldn't* because right now the only way to have the same priv patterns is # to be *basically* the same privilege level -- i.e. configuration and configuration # exclusive for iosxr # at this point we basically dont *know* the privilege leve we are at (or we wont/cant after # we do an escalation or deescalation, so we reset to the dummy priv level Getter for `_generic_driver_mode` attribute Args: N/A Returns: bool: _generic_driver_mode value Raises: N/A Setter for `_generic_driver_mode` attribute Args: value: bool value for _generic_driver_mode Returns: None Raises: ScrapliTypeError: if value is not of type bool # if we are setting ingore priv level we reset current priv to the dummy priv so that # once (if) a user turns ignore priv back off we know we need to reset/reacquire priv # as the user coulda done pretty much anything and we could end up at who knows what # priv level Update response with network driver specific data This happens here as the underlying channel provides a response object but is unaware of any of the network/platform specific attributes that may need to get updated Args: response: response to update Returns: None Raises: N/A Handle pre "send_config" tasks for consistency between sync/async versions Args: config: string configuration to send to the device, supports sending multi-line strings Returns: list: list of config lines from provided "config" input Raises: ScrapliTypeError: if anything but a string is provided for `file` # in order to handle multi-line strings, we split lines Handle post "send_config" tasks for consistency between sync/async versions Args: config: string configuration to send to the device, supports sending multi-line strings multi_response: multi_response object send_config got from calling self.send_configs; we need this to parse out the multi_response back into a single Response object Returns: Response: Unified response object Raises: N/A # capture failed_when_contains and host from zeroith multi_response element (there should # always be at least a zeroith element here!); getting host just lets us keep the mixin # class a little cleaner without having to deal with sync vs async transport classes from # a typing perspective # create a new unified response object # join all the results together into a single final result Handle pre "send_configs" tasks for consistency between sync/async versions Args: configs: list of strings to send to device in config mode failed_when_contains: string or list of strings indicating failure if found in response privilege_level: name of configuration privilege level/type to acquire; this is platform dependent, so check the device driver for specifics. Examples of privilege_name would be "configuration_exclusive" for IOSXRDriver, or "configuration_private" for JunosDriver. You can also pass in a name of a configuration session such as "my-config-session" if you have registered a session using the "register_config_session" method of the EOSDriver or NXOSDriver. Returns: Tuple[str, Union[str, List[str]]]: string of resolved privilege level name, and failed when contains which may be a string or list of strings Raises: ScrapliTypeError: if configs is anything but a list ScrapliPrivilegeError: if connection is in 'generic_driver_mode' -- this should be a non-standard use case so there is no reason to complicate the config(s) methods with supporting generic driver mode (plus if there was config modes in generic driver mode that wouldn't be very generic driver like, would it!) Handle post "send_configs" tasks for consistency between sync/async versions Args: responses: multi_response object to update Returns: MultiResponse: Unified response object Raises: N/A
| 2.196244
| 2
|
pacote-download/ex047.py
|
LeticiaTr/Exerc-cios-em-Python
| 0
|
6627818
|
<filename>pacote-download/ex047.py
#Crie um programa que mostre na tela todos os números pares que estão no intervalo entre 1 e 50.
'''for c in range (1,51): # Laço C no intervalor(range) de (1 até 51)
if c % 2 == 0:
print(c, end= ' ')'''
#MELHOR SOLUCÇÃO QUE USA MENOS ESPAÇO NA MEMÓRIA
for num in range(2,51 , 2):
print (num, end=' ')
|
<filename>pacote-download/ex047.py
#Crie um programa que mostre na tela todos os números pares que estão no intervalo entre 1 e 50.
'''for c in range (1,51): # Laço C no intervalor(range) de (1 até 51)
if c % 2 == 0:
print(c, end= ' ')'''
#MELHOR SOLUCÇÃO QUE USA MENOS ESPAÇO NA MEMÓRIA
for num in range(2,51 , 2):
print (num, end=' ')
|
pt
| 0.817132
|
#Crie um programa que mostre na tela todos os números pares que estão no intervalo entre 1 e 50. for c in range (1,51): # Laço C no intervalor(range) de (1 até 51) if c % 2 == 0: print(c, end= ' ') #MELHOR SOLUCÇÃO QUE USA MENOS ESPAÇO NA MEMÓRIA
| 3.685834
| 4
|
tests/core/commands/test_doctor.py
|
tcchrist/renku-python
| 0
|
6627819
|
# -*- coding: utf-8 -*-
#
# Copyright 2017-2020 - Swiss Data Science Center (SDSC)
# A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and
# Eidgenössische Technische Hochschule Zürich (ETHZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Renku doctor tests."""
from pathlib import Path
from renku.cli import cli
def test_new_project_is_ok(runner, project):
"""Test renku doctor initially is OK on a new project."""
# Initially, every thing is OK
result = runner.invoke(cli, ["doctor"])
assert 0 == result.exit_code
assert "Everything seems to be ok." in result.output
def test_git_hooks_not_available(runner, project):
"""Test detection of not-installed git hooks."""
result = runner.invoke(cli, ["githooks", "uninstall"])
assert 0 == result.exit_code
result = runner.invoke(cli, ["doctor"])
assert 1 == result.exit_code
assert "Git hooks are not installed." in result.output
def test_git_hooks_modified(runner, project):
"""Test detection of modified git hooks."""
result = runner.invoke(cli, ["githooks", "install", "--force"])
assert 0 == result.exit_code
hook_path = Path(project) / ".git" / "hooks" / "pre-commit"
lines = hook_path.read_text().split("\n")
# Append some more commands
appended = lines + ["# Some more commands", "ls"]
hook_path.write_text("\n".join(appended))
# Check passes as long as Renku hook is not modified
result = runner.invoke(cli, ["doctor"])
assert 0 == result.exit_code
assert "Everything seems to be ok." in result.output
# Modify Renku hook
modified = [line for line in lines if "# END RENKU HOOK." not in line]
hook_path.write_text("\n".join(modified))
result = runner.invoke(cli, ["doctor"])
assert 1 == result.exit_code
assert "Git hooks are outdated or not installed." in result.output
def test_lfs_broken_history(runner, client, tmp_path):
"""Test lfs migrate info check on a broken history."""
big_file = tmp_path / "big-file.bin"
with open(big_file, "w") as file_:
file_.seek(client.minimum_lfs_file_size)
file_.write("some-data")
# Add a file without adding it to LFS
result = runner.invoke(
cli,
["--no-external-storage", "dataset", "add", "--create", "new-dataset", str(big_file)],
catch_exceptions=False,
)
assert 0 == result.exit_code
result = runner.invoke(cli, ["doctor"])
assert 1 == result.exit_code
assert "Git history contains large files" in result.output
assert "*.bin" in result.output
# Exclude *.ipynb files from LFS in .renkulfsignore
(client.path / client.RENKU_LFS_IGNORE_PATH).write_text("\n".join(["*swp", "*.bin", ".DS_Store"]))
result = runner.invoke(cli, ["doctor"])
assert 0 == result.exit_code
assert "Git history contains large files" not in result.output
|
# -*- coding: utf-8 -*-
#
# Copyright 2017-2020 - Swiss Data Science Center (SDSC)
# A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and
# Eidgenössische Technische Hochschule Zürich (ETHZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Renku doctor tests."""
from pathlib import Path
from renku.cli import cli
def test_new_project_is_ok(runner, project):
"""Test renku doctor initially is OK on a new project."""
# Initially, every thing is OK
result = runner.invoke(cli, ["doctor"])
assert 0 == result.exit_code
assert "Everything seems to be ok." in result.output
def test_git_hooks_not_available(runner, project):
"""Test detection of not-installed git hooks."""
result = runner.invoke(cli, ["githooks", "uninstall"])
assert 0 == result.exit_code
result = runner.invoke(cli, ["doctor"])
assert 1 == result.exit_code
assert "Git hooks are not installed." in result.output
def test_git_hooks_modified(runner, project):
"""Test detection of modified git hooks."""
result = runner.invoke(cli, ["githooks", "install", "--force"])
assert 0 == result.exit_code
hook_path = Path(project) / ".git" / "hooks" / "pre-commit"
lines = hook_path.read_text().split("\n")
# Append some more commands
appended = lines + ["# Some more commands", "ls"]
hook_path.write_text("\n".join(appended))
# Check passes as long as Renku hook is not modified
result = runner.invoke(cli, ["doctor"])
assert 0 == result.exit_code
assert "Everything seems to be ok." in result.output
# Modify Renku hook
modified = [line for line in lines if "# END RENKU HOOK." not in line]
hook_path.write_text("\n".join(modified))
result = runner.invoke(cli, ["doctor"])
assert 1 == result.exit_code
assert "Git hooks are outdated or not installed." in result.output
def test_lfs_broken_history(runner, client, tmp_path):
"""Test lfs migrate info check on a broken history."""
big_file = tmp_path / "big-file.bin"
with open(big_file, "w") as file_:
file_.seek(client.minimum_lfs_file_size)
file_.write("some-data")
# Add a file without adding it to LFS
result = runner.invoke(
cli,
["--no-external-storage", "dataset", "add", "--create", "new-dataset", str(big_file)],
catch_exceptions=False,
)
assert 0 == result.exit_code
result = runner.invoke(cli, ["doctor"])
assert 1 == result.exit_code
assert "Git history contains large files" in result.output
assert "*.bin" in result.output
# Exclude *.ipynb files from LFS in .renkulfsignore
(client.path / client.RENKU_LFS_IGNORE_PATH).write_text("\n".join(["*swp", "*.bin", ".DS_Store"]))
result = runner.invoke(cli, ["doctor"])
assert 0 == result.exit_code
assert "Git history contains large files" not in result.output
|
en
| 0.824827
|
# -*- coding: utf-8 -*- # # Copyright 2017-2020 - Swiss Data Science Center (SDSC) # A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and # Eidgenössische Technische Hochschule Zürich (ETHZ). # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Renku doctor tests. Test renku doctor initially is OK on a new project. # Initially, every thing is OK Test detection of not-installed git hooks. Test detection of modified git hooks. # Append some more commands # Check passes as long as Renku hook is not modified # Modify Renku hook Test lfs migrate info check on a broken history. # Add a file without adding it to LFS # Exclude *.ipynb files from LFS in .renkulfsignore
| 2.068122
| 2
|
metaopt/tests/unit/core/return/returnspec.py
|
cigroup-ol/metaopt
| 8
|
6627820
|
<reponame>cigroup-ol/metaopt<gh_stars>1-10
# -*- coding: utf-8 -*-
# -!- coding: utf-8 -!-
"""
Test for ReturnSpc
"""
# Future
from __future__ import absolute_import, division, print_function, \
unicode_literals, with_statement
# Third Party
import nose
from nose.tools import raises
# First Party
from metaopt.core.returnspec.returnspec import ReturnSpec
from metaopt.core.returnspec.util. \
exception import MultiObjectivesNotSupportedError
from metaopt.core.returnspec.util.wrapper import ReturnValuesWrapper
class TestRetunspec(object):
def test_return_spec_maximize(self):
return_spec = ReturnSpec()
return_spec.maximize("y")
returned_values = ReturnValuesWrapper(return_spec, 1)
other_returned_values = ReturnValuesWrapper(return_spec, 2)
assert returned_values > other_returned_values
def test_return_spec_minimize(self):
return_spec = ReturnSpec()
return_spec.minimize("y")
returned_values = ReturnValuesWrapper(return_spec, 1)
other_returned_values = ReturnValuesWrapper(return_spec, 2)
assert returned_values < other_returned_values
@raises(MultiObjectivesNotSupportedError)
def test_return_spec_multiple_objective_raises_error(self):
return_spec = ReturnSpec()
return_spec.minimize("y")
return_spec.minimize("z")
def test_return_spec_given_function_create_default_return_values(self):
def f(a):
return a
return_spec = ReturnSpec(f)
assert return_spec.return_values[0]["name"] == "Fitness"
assert return_spec.return_values[0]["minimize"] == True
def test_is_minimization_without_return_spec(self):
returned_values = ReturnValuesWrapper(None, 1)
other_returned_values = ReturnValuesWrapper(None, 2)
assert returned_values < other_returned_values
def test_raw_values(self):
returned_values = ReturnValuesWrapper(None, 1)
assert returned_values.raw_values == 1
if __name__ == '__main__':
nose.runmodule()
|
# -*- coding: utf-8 -*-
# -!- coding: utf-8 -!-
"""
Test for ReturnSpc
"""
# Future
from __future__ import absolute_import, division, print_function, \
unicode_literals, with_statement
# Third Party
import nose
from nose.tools import raises
# First Party
from metaopt.core.returnspec.returnspec import ReturnSpec
from metaopt.core.returnspec.util. \
exception import MultiObjectivesNotSupportedError
from metaopt.core.returnspec.util.wrapper import ReturnValuesWrapper
class TestRetunspec(object):
def test_return_spec_maximize(self):
return_spec = ReturnSpec()
return_spec.maximize("y")
returned_values = ReturnValuesWrapper(return_spec, 1)
other_returned_values = ReturnValuesWrapper(return_spec, 2)
assert returned_values > other_returned_values
def test_return_spec_minimize(self):
return_spec = ReturnSpec()
return_spec.minimize("y")
returned_values = ReturnValuesWrapper(return_spec, 1)
other_returned_values = ReturnValuesWrapper(return_spec, 2)
assert returned_values < other_returned_values
@raises(MultiObjectivesNotSupportedError)
def test_return_spec_multiple_objective_raises_error(self):
return_spec = ReturnSpec()
return_spec.minimize("y")
return_spec.minimize("z")
def test_return_spec_given_function_create_default_return_values(self):
def f(a):
return a
return_spec = ReturnSpec(f)
assert return_spec.return_values[0]["name"] == "Fitness"
assert return_spec.return_values[0]["minimize"] == True
def test_is_minimization_without_return_spec(self):
returned_values = ReturnValuesWrapper(None, 1)
other_returned_values = ReturnValuesWrapper(None, 2)
assert returned_values < other_returned_values
def test_raw_values(self):
returned_values = ReturnValuesWrapper(None, 1)
assert returned_values.raw_values == 1
if __name__ == '__main__':
nose.runmodule()
|
en
| 0.77253
|
# -*- coding: utf-8 -*- # -!- coding: utf-8 -!- Test for ReturnSpc # Future # Third Party # First Party
| 2.162085
| 2
|
qm_detection/tfjs_convert.py
|
hqbao/dlp_tf
| 0
|
6627821
|
import tensorflow as tf
import tensorflowjs as tfjs
from models import build_infer_model
from utils import genanchors
from datetime import datetime
print('tensorflow version: {}'.format(tf.__version__))
ishape = [240, 200, 3]
ssize = [60, 50]
asizes = [[8, 8]]
resnet_settings = [[5, 5, 20], [2, [1, 1]], [8, [2, 2]]]
total_classes = 2
output_path = 'output'
nsm_iou_threshold = 0.1
nsm_score_threshold = 0.9
nsm_max_output_size = 330
abox4d = genanchors(isize=ishape[:2], ssize=ssize, asizes=asizes)
abox_4dtensor = tf.constant(value=abox4d, dtype='float32')
abox_2dtensor = tf.reshape(tensor=abox_4dtensor, shape=[-1, 4])
model = build_infer_model(
ishape=ishape,
resnet_settings=resnet_settings,
k=len(asizes),
total_classes=total_classes,
abox_2dtensor=abox_2dtensor,
nsm_iou_threshold=nsm_iou_threshold,
nsm_score_threshold=nsm_score_threshold,
nsm_max_output_size=nsm_max_output_size)
# model.summary()
model.load_weights(output_path+'/weights_best_recall.h5', by_name=True)
model.save(output_path+'/model')
# Then run this command under output folder
# > tensorflowjs_converter --input_format=tf_saved_model model/ tfjs/
|
import tensorflow as tf
import tensorflowjs as tfjs
from models import build_infer_model
from utils import genanchors
from datetime import datetime
print('tensorflow version: {}'.format(tf.__version__))
ishape = [240, 200, 3]
ssize = [60, 50]
asizes = [[8, 8]]
resnet_settings = [[5, 5, 20], [2, [1, 1]], [8, [2, 2]]]
total_classes = 2
output_path = 'output'
nsm_iou_threshold = 0.1
nsm_score_threshold = 0.9
nsm_max_output_size = 330
abox4d = genanchors(isize=ishape[:2], ssize=ssize, asizes=asizes)
abox_4dtensor = tf.constant(value=abox4d, dtype='float32')
abox_2dtensor = tf.reshape(tensor=abox_4dtensor, shape=[-1, 4])
model = build_infer_model(
ishape=ishape,
resnet_settings=resnet_settings,
k=len(asizes),
total_classes=total_classes,
abox_2dtensor=abox_2dtensor,
nsm_iou_threshold=nsm_iou_threshold,
nsm_score_threshold=nsm_score_threshold,
nsm_max_output_size=nsm_max_output_size)
# model.summary()
model.load_weights(output_path+'/weights_best_recall.h5', by_name=True)
model.save(output_path+'/model')
# Then run this command under output folder
# > tensorflowjs_converter --input_format=tf_saved_model model/ tfjs/
|
en
| 0.301682
|
# model.summary() # Then run this command under output folder # > tensorflowjs_converter --input_format=tf_saved_model model/ tfjs/
| 2.172333
| 2
|
conary_test/libtest/graphtest.py
|
sassoftware/conary
| 43
|
6627822
|
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from testrunner import testhelp
from StringIO import StringIO
#conary
from conary.lib import graph
#test
class GraphTest(testhelp.TestCase):
def testDFS(self):
g = graph.DirectedGraph()
a = g.addNode('a')
b = g.addNode('b')
c = g.addNode('c')
d = g.addNode('d')
g.addEdge('a','b')
g.addEdge('b','c')
g.addEdge('c','b')
g.addEdge('c','d')
starts, finishes, trees = g.doDFS(start='a')
assert(max(finishes.values()) == finishes[a])
assert(min(finishes.values()) == finishes[d])
assert(min(starts.values()) == starts[a])
assert(max(starts.values()) == starts[d])
assert(len(trees) == 1)
starts, finishes, trees = g.doDFS(start='b')
assert(max(finishes.values()) == finishes[a])
assert(min(finishes.values()) == finishes[d])
assert(min(starts.values()) == starts[b])
assert(max(starts.values()) == starts[a])
assert(len(trees) == 2)
assert(len(trees[a]) == 1)
assert(len(trees[b]) == 3)
def testBFS(self):
g = graph.DirectedGraph()
a = g.addNode('a')
b = g.addNode('b')
c = g.addNode('c')
d = g.addNode('d')
g.addEdge('a','b')
g.addEdge('b','c')
g.addEdge('c','b')
g.addEdge('c','d')
starts, finishes, trees, pred, depth = g.doBFS(start='a')
self.assertEqual([ starts[x] for x in [ a, b, c, d ] ],
[0, 1, 3, 5])
self.assertEqual([ finishes[x] for x in [ a, b, c, d ] ],
[2, 4, 6, 7])
assert(len(trees) == 1)
self.assertEqual(depth[a], 0)
self.assertEqual(depth[b], 1)
self.assertEqual(depth[c], 2)
self.assertEqual(depth[d], 3)
starts, finishes, trees, pred, depth = g.doBFS(start='b')
self.assertEqual([ starts[x] for x in [ a, b, c, d ] ],
[6, 0, 1, 3])
self.assertEqual([ finishes[x] for x in [ a, b, c, d ] ],
[7, 2, 4, 5])
assert(len(trees) == 2)
assert(len(trees[a]) == 1)
assert(len(trees[b]) == 3)
self.assertEqual(depth[a], 0)
self.assertEqual(depth[b], 0)
self.assertEqual(depth[c], 1)
self.assertEqual(depth[d], 2)
def testDynamicBFS(self):
# Dynamic graphs (the graph structure is not known in advance)
g = graph.DirectedGraph()
a = g.addNode('a')
initialized = {}
def getChildrenCallback(nodeIdx):
node = g.get(nodeIdx)
if nodeIdx not in initialized:
if node == 'a':
for toIdx in ['b', 'c', 'd']:
g.addEdge(node, toIdx)
elif node == 'b':
for toIdx in ['d', 'e', 'f']:
g.addEdge(node, toIdx)
elif node in [ 'c', 'd' ]:
for toIdx in ['g', 'h']:
g.addEdge(node, toIdx)
elif node == 'e':
for toIdx in ['i']:
g.addEdge(node, toIdx)
elif node == 'i':
for toIdx in ['j']:
g.addEdge(node, toIdx)
elif node == 'j':
for toIdx in ['k']:
g.addEdge(node, toIdx)
initialized[nodeIdx] = True
return g.edges[nodeIdx]
starts, finishes, trees, pred, depth = g.doBFS(start='a',
getChildrenCallback = getChildrenCallback)
self.assertTrue(len([ x for x in g.iterNodes()]), 13)
self.assertFalse(g.getIndex('a') in pred)
self.assertEqual(pred[g.getIndex('b')], g.getIndex('a'))
self.assertEqual(pred[g.getIndex('c')], g.getIndex('a'))
self.assertEqual(pred[g.getIndex('d')], g.getIndex('a'))
self.assertEqual(pred[g.getIndex('e')], g.getIndex('b'))
self.assertEqual(pred[g.getIndex('f')], g.getIndex('b'))
self.assertEqual(pred[g.getIndex('g')], g.getIndex('c'))
self.assertEqual(pred[g.getIndex('h')], g.getIndex('c'))
self.assertEqual(pred[g.getIndex('i')], g.getIndex('e'))
self.assertEqual(pred[g.getIndex('j')], g.getIndex('i'))
self.assertEqual(pred[g.getIndex('k')], g.getIndex('j'))
self.assertEqual(depth[g.getIndex('a')], 0)
self.assertEqual(depth[g.getIndex('b')], 1)
self.assertEqual(depth[g.getIndex('c')], 1)
for i in ['e', 'f', 'g', 'h']:
self.assertEqual(depth[g.getIndex(i)], 2)
self.assertEqual(depth[g.getIndex('i')], 3)
self.assertEqual(depth[g.getIndex('j')], 4)
self.assertEqual(depth[g.getIndex('k')], 5)
# Same thing, but limit the depth
initialized.clear()
starts, finishes, trees, pred, depth = g.doBFS(start='a',
getChildrenCallback = getChildrenCallback, depthLimit = 3)
self.assertEqual(len(trees), 1)
self.assertEqual(depth[g.getIndex('a')], 0)
self.assertEqual(depth[g.getIndex('b')], 1)
self.assertEqual(depth[g.getIndex('c')], 1)
for i in ['e', 'f', 'g', 'h']:
self.assertEqual(depth[g.getIndex(i)], 2)
self.assertEqual(depth[g.getIndex('i')], 3)
self.assertFalse(g.getIndex('j') in pred)
self.assertFalse(g.getIndex('k') in pred)
def testSCC(self):
g = graph.DirectedGraph()
a = g.addNode('a')
b = g.addNode('b')
c = g.addNode('c')
d = g.addNode('d')
g.addEdge('a', 'b')
g.addEdge('b', 'c')
g.addEdge('c', 'b')
g.addEdge('c', 'd')
components = g.getStronglyConnectedComponents()
assert(components == [set(['a']), set(['b', 'c']), set(['d'])])
g.addEdge('d', 'a')
components = g.getStronglyConnectedComponents()
assert(components == [set(['a', 'b', 'c', 'd'])])
def testTotalOrdering(self):
g = graph.DirectedGraph()
a = g.addNode('a')
b = g.addNode('b')
c = g.addNode('c')
d = g.addNode('d')
d = g.addNode('e')
g.addEdge('a', 'b')
g.addEdge('a', 'c')
g.addEdge('a', 'd')
g.addEdge('a', 'e')
g.addEdge('b', 'e')
g.addEdge('c', 'e')
g.addEdge('d', 'e')
def nodeSort(a, b):
return cmp(ord(a[1]), ord(b[1]))
assert(g.getTotalOrdering(nodeSort) == ['a', 'b', 'c', 'd', 'e'])
# add back edge
g.addNode('f')
g.addEdge('e', 'f')
g.addEdge('f', 'a')
self.assertRaises(graph.BackEdgeError, g.getTotalOrdering, nodeSort)
g.delete('f')
g.delete('d')
assert(g.getTotalOrdering(nodeSort) == ['a', 'b', 'c', 'e'])
g.delete('a')
assert(g.getTotalOrdering(nodeSort) == ['b', 'c', 'e'])
g.delete('c')
assert(g.getTotalOrdering(nodeSort) == ['b', 'e'])
g.delete('e')
assert(g.getTotalOrdering(nodeSort) == ['b'])
assert(not g.isEmpty())
g.delete('b')
assert(g.getTotalOrdering(nodeSort) == [])
assert(g.isEmpty())
def testFlatten(self):
g = graph.DirectedGraph()
a = g.addNode('a')
b = g.addNode('b')
c = g.addNode('c')
d = g.addNode('d')
d = g.addNode('e')
g.addEdge('a', 'b')
g.addEdge('a', 'c')
g.addEdge('a', 'd')
g.addEdge('a', 'e')
g.addEdge('b', 'e')
g.addEdge('c', 'e')
g.addEdge('d', 'e')
g.flatten()
assert(sorted(g.iterChildren('a')) == ['b', 'c', 'd', 'e'])
assert(sorted(g.iterChildren('b')) == ['e'])
assert(sorted(g.iterChildren('c')) == ['e'])
assert(sorted(g.iterChildren('d')) == ['e'])
assert(sorted(g.iterChildren('e')) == [])
def testGetDisconnected(self):
g = graph.DirectedGraph()
g.addNode('a')
assert(sorted(g.getDisconnected()) == ['a'])
g.addNode('b')
assert(sorted(g.getDisconnected()) == ['a', 'b'])
g.addEdge('a', 'b')
assert(sorted(g.getDisconnected()) == [])
g.addNode('c')
g.addNode('d')
assert(sorted(g.getDisconnected()) == ['c', 'd'])
g.addEdge('a', 'c')
assert(sorted(g.getDisconnected()) == ['d'])
def testCreateDotFile(self):
g = graph.DirectedGraph()
s = StringIO()
g.addNode('a')
g.addNode('b')
g.addEdge('a', 'b')
g.generateDotFile(s)
s.seek(0)
self.assertEquals(s.read(), """\
digraph graphName {
n0 [label="a"]
n1 [label="b"]
n0 -> n1
}
""")
s = StringIO()
g.generateDotFile(s, lambda x: 'Node %s' % x,
lambda fromNode, toNode, value: '%s -> %s: %s' % (fromNode, toNode, value))
s.seek(0)
self.assertEquals(s.read(), """\
digraph graphName {
n0 [label="Node a"]
n1 [label="Node b"]
n0 -> n1 [label="a -> b: 1"]
}
""")
|
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from testrunner import testhelp
from StringIO import StringIO
#conary
from conary.lib import graph
#test
class GraphTest(testhelp.TestCase):
def testDFS(self):
g = graph.DirectedGraph()
a = g.addNode('a')
b = g.addNode('b')
c = g.addNode('c')
d = g.addNode('d')
g.addEdge('a','b')
g.addEdge('b','c')
g.addEdge('c','b')
g.addEdge('c','d')
starts, finishes, trees = g.doDFS(start='a')
assert(max(finishes.values()) == finishes[a])
assert(min(finishes.values()) == finishes[d])
assert(min(starts.values()) == starts[a])
assert(max(starts.values()) == starts[d])
assert(len(trees) == 1)
starts, finishes, trees = g.doDFS(start='b')
assert(max(finishes.values()) == finishes[a])
assert(min(finishes.values()) == finishes[d])
assert(min(starts.values()) == starts[b])
assert(max(starts.values()) == starts[a])
assert(len(trees) == 2)
assert(len(trees[a]) == 1)
assert(len(trees[b]) == 3)
def testBFS(self):
g = graph.DirectedGraph()
a = g.addNode('a')
b = g.addNode('b')
c = g.addNode('c')
d = g.addNode('d')
g.addEdge('a','b')
g.addEdge('b','c')
g.addEdge('c','b')
g.addEdge('c','d')
starts, finishes, trees, pred, depth = g.doBFS(start='a')
self.assertEqual([ starts[x] for x in [ a, b, c, d ] ],
[0, 1, 3, 5])
self.assertEqual([ finishes[x] for x in [ a, b, c, d ] ],
[2, 4, 6, 7])
assert(len(trees) == 1)
self.assertEqual(depth[a], 0)
self.assertEqual(depth[b], 1)
self.assertEqual(depth[c], 2)
self.assertEqual(depth[d], 3)
starts, finishes, trees, pred, depth = g.doBFS(start='b')
self.assertEqual([ starts[x] for x in [ a, b, c, d ] ],
[6, 0, 1, 3])
self.assertEqual([ finishes[x] for x in [ a, b, c, d ] ],
[7, 2, 4, 5])
assert(len(trees) == 2)
assert(len(trees[a]) == 1)
assert(len(trees[b]) == 3)
self.assertEqual(depth[a], 0)
self.assertEqual(depth[b], 0)
self.assertEqual(depth[c], 1)
self.assertEqual(depth[d], 2)
def testDynamicBFS(self):
# Dynamic graphs (the graph structure is not known in advance)
g = graph.DirectedGraph()
a = g.addNode('a')
initialized = {}
def getChildrenCallback(nodeIdx):
node = g.get(nodeIdx)
if nodeIdx not in initialized:
if node == 'a':
for toIdx in ['b', 'c', 'd']:
g.addEdge(node, toIdx)
elif node == 'b':
for toIdx in ['d', 'e', 'f']:
g.addEdge(node, toIdx)
elif node in [ 'c', 'd' ]:
for toIdx in ['g', 'h']:
g.addEdge(node, toIdx)
elif node == 'e':
for toIdx in ['i']:
g.addEdge(node, toIdx)
elif node == 'i':
for toIdx in ['j']:
g.addEdge(node, toIdx)
elif node == 'j':
for toIdx in ['k']:
g.addEdge(node, toIdx)
initialized[nodeIdx] = True
return g.edges[nodeIdx]
starts, finishes, trees, pred, depth = g.doBFS(start='a',
getChildrenCallback = getChildrenCallback)
self.assertTrue(len([ x for x in g.iterNodes()]), 13)
self.assertFalse(g.getIndex('a') in pred)
self.assertEqual(pred[g.getIndex('b')], g.getIndex('a'))
self.assertEqual(pred[g.getIndex('c')], g.getIndex('a'))
self.assertEqual(pred[g.getIndex('d')], g.getIndex('a'))
self.assertEqual(pred[g.getIndex('e')], g.getIndex('b'))
self.assertEqual(pred[g.getIndex('f')], g.getIndex('b'))
self.assertEqual(pred[g.getIndex('g')], g.getIndex('c'))
self.assertEqual(pred[g.getIndex('h')], g.getIndex('c'))
self.assertEqual(pred[g.getIndex('i')], g.getIndex('e'))
self.assertEqual(pred[g.getIndex('j')], g.getIndex('i'))
self.assertEqual(pred[g.getIndex('k')], g.getIndex('j'))
self.assertEqual(depth[g.getIndex('a')], 0)
self.assertEqual(depth[g.getIndex('b')], 1)
self.assertEqual(depth[g.getIndex('c')], 1)
for i in ['e', 'f', 'g', 'h']:
self.assertEqual(depth[g.getIndex(i)], 2)
self.assertEqual(depth[g.getIndex('i')], 3)
self.assertEqual(depth[g.getIndex('j')], 4)
self.assertEqual(depth[g.getIndex('k')], 5)
# Same thing, but limit the depth
initialized.clear()
starts, finishes, trees, pred, depth = g.doBFS(start='a',
getChildrenCallback = getChildrenCallback, depthLimit = 3)
self.assertEqual(len(trees), 1)
self.assertEqual(depth[g.getIndex('a')], 0)
self.assertEqual(depth[g.getIndex('b')], 1)
self.assertEqual(depth[g.getIndex('c')], 1)
for i in ['e', 'f', 'g', 'h']:
self.assertEqual(depth[g.getIndex(i)], 2)
self.assertEqual(depth[g.getIndex('i')], 3)
self.assertFalse(g.getIndex('j') in pred)
self.assertFalse(g.getIndex('k') in pred)
def testSCC(self):
g = graph.DirectedGraph()
a = g.addNode('a')
b = g.addNode('b')
c = g.addNode('c')
d = g.addNode('d')
g.addEdge('a', 'b')
g.addEdge('b', 'c')
g.addEdge('c', 'b')
g.addEdge('c', 'd')
components = g.getStronglyConnectedComponents()
assert(components == [set(['a']), set(['b', 'c']), set(['d'])])
g.addEdge('d', 'a')
components = g.getStronglyConnectedComponents()
assert(components == [set(['a', 'b', 'c', 'd'])])
def testTotalOrdering(self):
g = graph.DirectedGraph()
a = g.addNode('a')
b = g.addNode('b')
c = g.addNode('c')
d = g.addNode('d')
d = g.addNode('e')
g.addEdge('a', 'b')
g.addEdge('a', 'c')
g.addEdge('a', 'd')
g.addEdge('a', 'e')
g.addEdge('b', 'e')
g.addEdge('c', 'e')
g.addEdge('d', 'e')
def nodeSort(a, b):
return cmp(ord(a[1]), ord(b[1]))
assert(g.getTotalOrdering(nodeSort) == ['a', 'b', 'c', 'd', 'e'])
# add back edge
g.addNode('f')
g.addEdge('e', 'f')
g.addEdge('f', 'a')
self.assertRaises(graph.BackEdgeError, g.getTotalOrdering, nodeSort)
g.delete('f')
g.delete('d')
assert(g.getTotalOrdering(nodeSort) == ['a', 'b', 'c', 'e'])
g.delete('a')
assert(g.getTotalOrdering(nodeSort) == ['b', 'c', 'e'])
g.delete('c')
assert(g.getTotalOrdering(nodeSort) == ['b', 'e'])
g.delete('e')
assert(g.getTotalOrdering(nodeSort) == ['b'])
assert(not g.isEmpty())
g.delete('b')
assert(g.getTotalOrdering(nodeSort) == [])
assert(g.isEmpty())
def testFlatten(self):
g = graph.DirectedGraph()
a = g.addNode('a')
b = g.addNode('b')
c = g.addNode('c')
d = g.addNode('d')
d = g.addNode('e')
g.addEdge('a', 'b')
g.addEdge('a', 'c')
g.addEdge('a', 'd')
g.addEdge('a', 'e')
g.addEdge('b', 'e')
g.addEdge('c', 'e')
g.addEdge('d', 'e')
g.flatten()
assert(sorted(g.iterChildren('a')) == ['b', 'c', 'd', 'e'])
assert(sorted(g.iterChildren('b')) == ['e'])
assert(sorted(g.iterChildren('c')) == ['e'])
assert(sorted(g.iterChildren('d')) == ['e'])
assert(sorted(g.iterChildren('e')) == [])
def testGetDisconnected(self):
g = graph.DirectedGraph()
g.addNode('a')
assert(sorted(g.getDisconnected()) == ['a'])
g.addNode('b')
assert(sorted(g.getDisconnected()) == ['a', 'b'])
g.addEdge('a', 'b')
assert(sorted(g.getDisconnected()) == [])
g.addNode('c')
g.addNode('d')
assert(sorted(g.getDisconnected()) == ['c', 'd'])
g.addEdge('a', 'c')
assert(sorted(g.getDisconnected()) == ['d'])
def testCreateDotFile(self):
g = graph.DirectedGraph()
s = StringIO()
g.addNode('a')
g.addNode('b')
g.addEdge('a', 'b')
g.generateDotFile(s)
s.seek(0)
self.assertEquals(s.read(), """\
digraph graphName {
n0 [label="a"]
n1 [label="b"]
n0 -> n1
}
""")
s = StringIO()
g.generateDotFile(s, lambda x: 'Node %s' % x,
lambda fromNode, toNode, value: '%s -> %s: %s' % (fromNode, toNode, value))
s.seek(0)
self.assertEquals(s.read(), """\
digraph graphName {
n0 [label="Node a"]
n1 [label="Node b"]
n0 -> n1 [label="a -> b: 1"]
}
""")
|
en
| 0.829643
|
# # Copyright (c) SAS Institute Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # #conary #test # Dynamic graphs (the graph structure is not known in advance) # Same thing, but limit the depth # add back edge \ digraph graphName { n0 [label="a"] n1 [label="b"] n0 -> n1 } \ digraph graphName { n0 [label="Node a"] n1 [label="Node b"] n0 -> n1 [label="a -> b: 1"] }
| 2.583697
| 3
|
dataprofiler/tests/profilers/test_text_options.py
|
ChrisWallace2020/DataProfiler
| 0
|
6627823
|
<reponame>ChrisWallace2020/DataProfiler<filename>dataprofiler/tests/profilers/test_text_options.py
from dataprofiler.profilers.profiler_options import TextOptions
from dataprofiler.tests.profilers.test_numerical_options import TestNumericalOptions
class TestTextOptions(TestNumericalOptions):
option_class = TextOptions
keys = TestNumericalOptions.keys + ["vocab"]
def test_init(self):
super().test_init()
def test_set_helper(self):
super().test_set_helper()
def test_set(self):
super().test_set()
def test_validate_helper(self):
super().test_validate_helper()
def test_validate(self):
super().test_validate()
def test_is_numeric_stats_enabled(self):
super().test_is_numeric_stats_enabled()
|
from dataprofiler.profilers.profiler_options import TextOptions
from dataprofiler.tests.profilers.test_numerical_options import TestNumericalOptions
class TestTextOptions(TestNumericalOptions):
option_class = TextOptions
keys = TestNumericalOptions.keys + ["vocab"]
def test_init(self):
super().test_init()
def test_set_helper(self):
super().test_set_helper()
def test_set(self):
super().test_set()
def test_validate_helper(self):
super().test_validate_helper()
def test_validate(self):
super().test_validate()
def test_is_numeric_stats_enabled(self):
super().test_is_numeric_stats_enabled()
|
none
| 1
| 2.080865
| 2
|
|
var/spack/repos/builtin/packages/lhapdf/package.py
|
carlabguillen/spack
| 1
|
6627824
|
<reponame>carlabguillen/spack
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Lhapdf(AutotoolsPackage):
"""LHAPDF is a general purpose C++ interpolator,
used for evaluating PDFs from discretised data files. """
homepage = "https://lhapdf.hepforge.org/"
url = "https://lhapdf.hepforge.org/downloads/?f=LHAPDF-6.2.3.tar.gz"
version('6.3.0', sha256='ed4d8772b7e6be26d1a7682a13c87338d67821847aa1640d78d67d2cef8b9b5d')
version('6.2.3', sha256='d6e63addc56c57b6286dc43ffc56d901516f4779a93a0f1547e14b32cfd82dd1')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('m4', type='build')
depends_on('python', type=('build', 'run'))
depends_on('py-cython', type='build')
depends_on('py-setuptools', type='build')
depends_on('boost', type='build')
depends_on('yaml-cpp', type='build', when='@:6.1.5')
def configure_args(self):
args = ['--with-boost=' + self.spec['boost'].prefix,
'FCFLAGS=-O3', 'CFLAGS=-O3', 'CXXFLAGS=-O3']
if self.spec.satisfies('@:6.1.5'):
args.append('--with-yaml-cpp=' + self.spec['yaml-cpp'].prefix)
return args
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Lhapdf(AutotoolsPackage):
"""LHAPDF is a general purpose C++ interpolator,
used for evaluating PDFs from discretised data files. """
homepage = "https://lhapdf.hepforge.org/"
url = "https://lhapdf.hepforge.org/downloads/?f=LHAPDF-6.2.3.tar.gz"
version('6.3.0', sha256='ed4d8772b7e6be26d1a7682a13c87338d67821847aa1640d78d67d2cef8b9b5d')
version('6.2.3', sha256='d6e63addc56c57b6286dc43ffc56d901516f4779a93a0f1547e14b32cfd82dd1')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('m4', type='build')
depends_on('python', type=('build', 'run'))
depends_on('py-cython', type='build')
depends_on('py-setuptools', type='build')
depends_on('boost', type='build')
depends_on('yaml-cpp', type='build', when='@:6.1.5')
def configure_args(self):
args = ['--with-boost=' + self.spec['boost'].prefix,
'FCFLAGS=-O3', 'CFLAGS=-O3', 'CXXFLAGS=-O3']
if self.spec.satisfies('@:6.1.5'):
args.append('--with-yaml-cpp=' + self.spec['yaml-cpp'].prefix)
return args
|
en
| 0.681475
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) LHAPDF is a general purpose C++ interpolator, used for evaluating PDFs from discretised data files.
| 1.828739
| 2
|
test/profiling/memory_tests.py
|
grcanosa/Fast-RTPS
| 2
|
6627825
|
# Copyright 2016 Proyectos y Sistemas de Mantenimiento SL (eProsima).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shlex, subprocess, time, os, socket, sys, threading
if os.environ.get("PROFILING_BINS"):
binaries = os.environ.get("PROFILING_BINS").split(';')
valgrind = os.environ.get("VALGRIND_BIN")
certs_path = os.environ.get("CERTS_PATH")
test_time = "10"
if not valgrind:
valgrind = "valgrind"
def start_test(command, pubsub, time):
os.system("mkdir -p output")
valgrind_command_rel = [valgrind, "--tool=massif", "--stacks=yes", "--detailed-freq=1", "--max-snapshots=1000", "--massif-out-file=./output/consumption_" + pubsub + "_rel.out"]
valgrind_command_be = [valgrind, "--tool=massif", "--stacks=yes", "--detailed-freq=1", "--max-snapshots=1000", "--massif-out-file=./output/consumption_" + pubsub + "_be.out"]
options = ["--time=" + time]
if certs_path:
options.extend(["--security=true", "--certs=" + certs_path])
# Best effort
print(valgrind_command_be +
[command, pubsub] +
options)
proc = subprocess.Popen(valgrind_command_be +
[command, pubsub] +
options)
proc.communicate()
py_command = "python3 ./memory_analysis.py ./output/consumption_" + pubsub + "_be.out ./output/MemoryTest_" + pubsub + "_be.csv"
p = subprocess.Popen(py_command, shell=True)
# Reliable
proc = subprocess.Popen(valgrind_command_rel +
[command, pubsub, "-r", "reliable"] +
options)
proc.communicate()
py_command = "python3 ./memory_analysis.py ./output/consumption_" + pubsub + "_rel.out ./output/MemoryTest_" + pubsub + "_rel.csv"
# print("Command: " + py_command)
p = subprocess.Popen(py_command, shell=True)
if len(sys.argv) >= 4:
test_time = sys.argv[3]
if len(sys.argv) >= 3:
binaries = [sys.argv[2]]
for command in binaries:
if len(sys.argv) >= 2:
pubsub = sys.argv[1]
start_test(command, pubsub, test_time)
else:
tpub = threading.Thread(target=start_test, args=(command, "publisher", test_time))
tpub.start()
tsub = threading.Thread(target=start_test, args=(command, "subscriber", test_time))
tsub.start()
quit()
|
# Copyright 2016 Proyectos y Sistemas de Mantenimiento SL (eProsima).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shlex, subprocess, time, os, socket, sys, threading
if os.environ.get("PROFILING_BINS"):
binaries = os.environ.get("PROFILING_BINS").split(';')
valgrind = os.environ.get("VALGRIND_BIN")
certs_path = os.environ.get("CERTS_PATH")
test_time = "10"
if not valgrind:
valgrind = "valgrind"
def start_test(command, pubsub, time):
os.system("mkdir -p output")
valgrind_command_rel = [valgrind, "--tool=massif", "--stacks=yes", "--detailed-freq=1", "--max-snapshots=1000", "--massif-out-file=./output/consumption_" + pubsub + "_rel.out"]
valgrind_command_be = [valgrind, "--tool=massif", "--stacks=yes", "--detailed-freq=1", "--max-snapshots=1000", "--massif-out-file=./output/consumption_" + pubsub + "_be.out"]
options = ["--time=" + time]
if certs_path:
options.extend(["--security=true", "--certs=" + certs_path])
# Best effort
print(valgrind_command_be +
[command, pubsub] +
options)
proc = subprocess.Popen(valgrind_command_be +
[command, pubsub] +
options)
proc.communicate()
py_command = "python3 ./memory_analysis.py ./output/consumption_" + pubsub + "_be.out ./output/MemoryTest_" + pubsub + "_be.csv"
p = subprocess.Popen(py_command, shell=True)
# Reliable
proc = subprocess.Popen(valgrind_command_rel +
[command, pubsub, "-r", "reliable"] +
options)
proc.communicate()
py_command = "python3 ./memory_analysis.py ./output/consumption_" + pubsub + "_rel.out ./output/MemoryTest_" + pubsub + "_rel.csv"
# print("Command: " + py_command)
p = subprocess.Popen(py_command, shell=True)
if len(sys.argv) >= 4:
test_time = sys.argv[3]
if len(sys.argv) >= 3:
binaries = [sys.argv[2]]
for command in binaries:
if len(sys.argv) >= 2:
pubsub = sys.argv[1]
start_test(command, pubsub, test_time)
else:
tpub = threading.Thread(target=start_test, args=(command, "publisher", test_time))
tpub.start()
tsub = threading.Thread(target=start_test, args=(command, "subscriber", test_time))
tsub.start()
quit()
|
en
| 0.758723
|
# Copyright 2016 Proyectos y Sistemas de Mantenimiento SL (eProsima). # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Best effort # Reliable # print("Command: " + py_command)
| 1.937194
| 2
|
sdk/tests/tutorials/properties/test_transaction_properties.py
|
slemasne/lusid-sdk-python-preview
| 5
|
6627826
|
<reponame>slemasne/lusid-sdk-python-preview
# import general python packages
import unittest
import json
from datetime import datetime
import pytz
import logging
# import lusid specific packages
import lusid
import lusid.models as models
from utilities import InstrumentLoader
from utilities import TestDataUtilities
class TransactionProperty(unittest.TestCase):
@classmethod
def setUpClass(cls):
# setup logging configuration
cls.root_logger = logging.getLogger(__name__)
cls.root_logger.setLevel(logging.INFO)
# create a configured API client
api_client = TestDataUtilities.api_client()
cls.property_definitions_api = lusid.PropertyDefinitionsApi(api_client)
cls.instruments_api = lusid.InstrumentsApi(api_client)
cls.transaction_portfolios_api = lusid.TransactionPortfoliosApi(api_client)
# load instruments from InstrumentLoader
instrument_loader = InstrumentLoader(cls.instruments_api)
cls.instrument_ids = instrument_loader.load_instruments()
# set test scope and code
cls.scope = "TransactionProperty"
cls.code = "TransactionTaxDetail"
def create_transaction_property(self):
# Details of the property
property_definition = models.CreatePropertyDefinitionRequest(
domain="Transaction",
scope=self.scope,
code=self.code,
display_name=self.code,
data_type_id=lusid.ResourceId(scope="system", code="string"),
)
# create property definition
try:
self.property_definitions_api.create_property_definition(
create_property_definition_request=property_definition
)
except lusid.ApiException as e:
if json.loads(e.body)["name"] == "PropertyAlreadyExists":
self.root_logger.info(
f"Property {property_definition.domain}/{property_definition.scope}/{property_definition.code} already exists"
)
def create_portfolio(self):
# Details of new portfolio to be created
effective_date = datetime(2020, 12, 1, 0, 0, tzinfo=pytz.utc)
create_portfolio_request = models.CreateTransactionPortfolioRequest(
code=self.code,
display_name=self.code,
base_currency="GBP",
created=effective_date,
)
# create portfolio
try:
self.transaction_portfolios_api.create_portfolio(
scope=self.scope,
create_transaction_portfolio_request=create_portfolio_request,
)
except lusid.ApiException as e:
if json.loads(e.body)["name"] == "PortfolioWithIdAlreadyExists":
self.root_logger.info(
f"Portfolio {create_portfolio_request.code} already exists"
)
def create_txn_with_property(self, instrument_id, property_value):
# setup the transaction
effective_date = datetime(2020, 12, 1, 0, 0, tzinfo=pytz.utc)
txn = models.TransactionRequest(
transaction_id="TXN001",
type="Buy",
instrument_identifiers={"Instrument/default/Figi": instrument_id},
transaction_date=effective_date,
settlement_date=effective_date,
units=1000,
transaction_price=models.TransactionPrice(price=100, type="Price"),
total_consideration=models.CurrencyAndAmount(amount=1, currency="GBP"),
exchange_rate=1,
transaction_currency="GBP",
properties={
f"Transaction/{self.scope}/{self.code}": lusid.PerpetualProperty(
key=f"Transaction/{self.scope}/{self.code}",
value=lusid.PropertyValue(label_value=property_value),
)
},
)
return self.transaction_portfolios_api.upsert_transactions(
scope=self.scope, code=self.code, transaction_request=[txn]
)
def get_transaction(self, scope, code):
return self.transaction_portfolios_api.get_transactions(scope=scope, code=code)
def test_transaction_property(self):
# Value for our property
transaction_tax_data = {"Tax": 1.0, "Rate": 0.01, "Schedule": "A"}
# Convert property to string representation
transaction_tax_string = json.dumps(transaction_tax_data)
# Setup property and portfolio
self.create_transaction_property()
self.create_portfolio()
# Setup transaction with txn tax details as the property value
response = self.create_txn_with_property("BBG00KTDTF73", transaction_tax_string)
self.assertIsNotNone(response)
# Get transaction with property
txn_response = self.get_transaction(scope=self.scope, code=self.code)
self.assertIsNotNone(txn_response)
# Parse property value from transaction and assert is equal to original string object
queried_property_string = (
txn_response.values[0]
.properties[f"Transaction/{self.scope}/{self.code}"]
.value.label_value
)
self.assertIsNotNone(queried_property_string)
self.assertEqual(queried_property_string, transaction_tax_string)
# Test individual key-value pairs against original data
queried_property_dict = json.loads(queried_property_string)
self.assertEqual(transaction_tax_data["Tax"], queried_property_dict["Tax"])
self.assertEqual(transaction_tax_data["Rate"], queried_property_dict["Rate"])
self.assertEqual(
transaction_tax_data["Schedule"], queried_property_dict["Schedule"]
)
if __name__ == "__main__":
unittest.main()
|
# import general python packages
import unittest
import json
from datetime import datetime
import pytz
import logging
# import lusid specific packages
import lusid
import lusid.models as models
from utilities import InstrumentLoader
from utilities import TestDataUtilities
class TransactionProperty(unittest.TestCase):
@classmethod
def setUpClass(cls):
# setup logging configuration
cls.root_logger = logging.getLogger(__name__)
cls.root_logger.setLevel(logging.INFO)
# create a configured API client
api_client = TestDataUtilities.api_client()
cls.property_definitions_api = lusid.PropertyDefinitionsApi(api_client)
cls.instruments_api = lusid.InstrumentsApi(api_client)
cls.transaction_portfolios_api = lusid.TransactionPortfoliosApi(api_client)
# load instruments from InstrumentLoader
instrument_loader = InstrumentLoader(cls.instruments_api)
cls.instrument_ids = instrument_loader.load_instruments()
# set test scope and code
cls.scope = "TransactionProperty"
cls.code = "TransactionTaxDetail"
def create_transaction_property(self):
# Details of the property
property_definition = models.CreatePropertyDefinitionRequest(
domain="Transaction",
scope=self.scope,
code=self.code,
display_name=self.code,
data_type_id=lusid.ResourceId(scope="system", code="string"),
)
# create property definition
try:
self.property_definitions_api.create_property_definition(
create_property_definition_request=property_definition
)
except lusid.ApiException as e:
if json.loads(e.body)["name"] == "PropertyAlreadyExists":
self.root_logger.info(
f"Property {property_definition.domain}/{property_definition.scope}/{property_definition.code} already exists"
)
def create_portfolio(self):
# Details of new portfolio to be created
effective_date = datetime(2020, 12, 1, 0, 0, tzinfo=pytz.utc)
create_portfolio_request = models.CreateTransactionPortfolioRequest(
code=self.code,
display_name=self.code,
base_currency="GBP",
created=effective_date,
)
# create portfolio
try:
self.transaction_portfolios_api.create_portfolio(
scope=self.scope,
create_transaction_portfolio_request=create_portfolio_request,
)
except lusid.ApiException as e:
if json.loads(e.body)["name"] == "PortfolioWithIdAlreadyExists":
self.root_logger.info(
f"Portfolio {create_portfolio_request.code} already exists"
)
def create_txn_with_property(self, instrument_id, property_value):
# setup the transaction
effective_date = datetime(2020, 12, 1, 0, 0, tzinfo=pytz.utc)
txn = models.TransactionRequest(
transaction_id="TXN001",
type="Buy",
instrument_identifiers={"Instrument/default/Figi": instrument_id},
transaction_date=effective_date,
settlement_date=effective_date,
units=1000,
transaction_price=models.TransactionPrice(price=100, type="Price"),
total_consideration=models.CurrencyAndAmount(amount=1, currency="GBP"),
exchange_rate=1,
transaction_currency="GBP",
properties={
f"Transaction/{self.scope}/{self.code}": lusid.PerpetualProperty(
key=f"Transaction/{self.scope}/{self.code}",
value=lusid.PropertyValue(label_value=property_value),
)
},
)
return self.transaction_portfolios_api.upsert_transactions(
scope=self.scope, code=self.code, transaction_request=[txn]
)
def get_transaction(self, scope, code):
return self.transaction_portfolios_api.get_transactions(scope=scope, code=code)
def test_transaction_property(self):
# Value for our property
transaction_tax_data = {"Tax": 1.0, "Rate": 0.01, "Schedule": "A"}
# Convert property to string representation
transaction_tax_string = json.dumps(transaction_tax_data)
# Setup property and portfolio
self.create_transaction_property()
self.create_portfolio()
# Setup transaction with txn tax details as the property value
response = self.create_txn_with_property("BBG00KTDTF73", transaction_tax_string)
self.assertIsNotNone(response)
# Get transaction with property
txn_response = self.get_transaction(scope=self.scope, code=self.code)
self.assertIsNotNone(txn_response)
# Parse property value from transaction and assert is equal to original string object
queried_property_string = (
txn_response.values[0]
.properties[f"Transaction/{self.scope}/{self.code}"]
.value.label_value
)
self.assertIsNotNone(queried_property_string)
self.assertEqual(queried_property_string, transaction_tax_string)
# Test individual key-value pairs against original data
queried_property_dict = json.loads(queried_property_string)
self.assertEqual(transaction_tax_data["Tax"], queried_property_dict["Tax"])
self.assertEqual(transaction_tax_data["Rate"], queried_property_dict["Rate"])
self.assertEqual(
transaction_tax_data["Schedule"], queried_property_dict["Schedule"]
)
if __name__ == "__main__":
unittest.main()
|
en
| 0.693152
|
# import general python packages # import lusid specific packages # setup logging configuration # create a configured API client # load instruments from InstrumentLoader # set test scope and code # Details of the property # create property definition # Details of new portfolio to be created # create portfolio # setup the transaction # Value for our property # Convert property to string representation # Setup property and portfolio # Setup transaction with txn tax details as the property value # Get transaction with property # Parse property value from transaction and assert is equal to original string object # Test individual key-value pairs against original data
| 2.550003
| 3
|
experiments/murtaza/multiworld/reset_free/sawyer_push/sawyer_push_her_td3_count_based_goal_sampling.py
|
Asap7772/railrl_evalsawyer
| 0
|
6627827
|
import rlkit.misc.hyperparameter as hyp
from multiworld.envs.mujoco.cameras import sawyer_pusher_camera_upright
from multiworld.envs.mujoco.sawyer_xyz.sawyer_push_and_reach_env_reset_full_goal import SawyerPushAndReachXYEnv
from rlkit.data_management.obs_dict_replay_buffer import \
ObsDictRelabelingBuffer
from rlkit.exploration_strategies.count_based.count_based_goal_sampling_env import CountBasedGoalSamplingEnv
from rlkit.images.camera import sawyer_init_camera_zoomed_in_fixed
from rlkit.launchers.launcher_util import run_experiment
import rlkit.torch.pytorch_util as ptu
from rlkit.exploration_strategies.base import (
PolicyWrappedWithExplorationStrategy
)
from rlkit.exploration_strategies.epsilon_greedy import EpsilonGreedy
from rlkit.exploration_strategies.gaussian_strategy import GaussianStrategy
from rlkit.exploration_strategies.ou_strategy import OUStrategy
from rlkit.torch.grill.launcher import get_video_save_func
from rlkit.torch.her.her_td3 import HerTd3
from rlkit.torch.networks import ConcatMlp, TanhMlpPolicy
import rlkit.samplers.rollout_functions as rf
def her_td3_experiment(variant):
env = variant['env_class'](**variant['env_kwargs'])
observation_key = variant['observation_key']
desired_goal_key = variant['desired_goal_key']
achieved_goal_key = desired_goal_key.replace("desired", "achieved")
variant['algo_kwargs']['her_kwargs']['observation_key'] = observation_key
variant['algo_kwargs']['her_kwargs']['desired_goal_key'] = desired_goal_key
replay_buffer = variant['replay_buffer_class'](
env=env,
observation_key=observation_key,
desired_goal_key=desired_goal_key,
achieved_goal_key=achieved_goal_key,
**variant['replay_buffer_kwargs']
)
variant['count_based_sampler_kwargs']['replay_buffer'] = replay_buffer
env = CountBasedGoalSamplingEnv(wrapped_env=env, **variant['count_based_sampler_kwargs'])
obs_dim = env.observation_space.spaces['observation'].low.size
action_dim = env.action_space.low.size
goal_dim = env.observation_space.spaces['desired_goal'].low.size
exploration_type = variant['exploration_type']
if exploration_type == 'ou':
es = OUStrategy(
action_space=env.action_space,
**variant['es_kwargs']
)
elif exploration_type == 'gaussian':
es = GaussianStrategy(
action_space=env.action_space,
**variant['es_kwargs'],
)
elif exploration_type == 'epsilon':
es = EpsilonGreedy(
action_space=env.action_space,
**variant['es_kwargs'],
)
else:
raise Exception("Invalid type: " + exploration_type)
qf1 = ConcatMlp(
input_size=obs_dim + action_dim + goal_dim,
output_size=1,
**variant['qf_kwargs']
)
qf2 = ConcatMlp(
input_size=obs_dim + action_dim + goal_dim,
output_size=1,
**variant['qf_kwargs']
)
policy = TanhMlpPolicy(
input_size=obs_dim + goal_dim,
output_size=action_dim,
**variant['policy_kwargs']
)
exploration_policy = PolicyWrappedWithExplorationStrategy(
exploration_strategy=es,
policy=policy,
)
algorithm = HerTd3(
env,
qf1=qf1,
qf2=qf2,
policy=policy,
training_env=env,
exploration_policy=exploration_policy,
replay_buffer=replay_buffer,
**variant['algo_kwargs']
)
if variant.get("save_video", False):
rollout_function = rf.create_rollout_function(
rf.multitask_rollout,
max_path_length=algorithm.max_path_length,
observation_key=algorithm.observation_key,
desired_goal_key=algorithm.desired_goal_key,
)
video_func = get_video_save_func(
rollout_function,
env,
policy,
variant,
)
algorithm.post_epoch_funcs.append(video_func)
if ptu.gpu_enabled():
algorithm.cuda()
algorithm.train()
if __name__ == "__main__":
# noinspection PyTypeChecker
variant = dict(
algo_kwargs=dict(
base_kwargs=dict(
num_epochs=5003,
num_steps_per_epoch=1000,
num_steps_per_eval=1000,
max_path_length=100,
num_updates_per_env_step=1,
batch_size=128,
discount=0.99,
min_num_steps_before_training=128,
reward_scale=100,
),
her_kwargs=dict(),
td3_kwargs=dict(),
),
env_class=SawyerPushAndReachXYEnv,
env_kwargs=dict(
reward_type='puck_distance',
reset_free=False,
action_scale=.02,
# hand_low=(-0.275, 0.275, 0.02),
# hand_high=(0.275, 0.825, .02),
# puck_low=(-0.25, 0.3),
# puck_high=(0.25, 0.8),
# goal_low=(-0.25, 0.3),
# goal_high=(0.25, 0.8),
hand_low=(-0.275, 0.275, 0.02),
hand_high=(0.275, 0.825, .02),
puck_low=(-0.25, 0.3),
puck_high=(0.25, 0.8),
goal_low=(-0.25, 0.3, 0.02, -0.25, 0.3),
goal_high=(0.25, 0.8, .02, 0.25, 0.8),
),
replay_buffer_class=ObsDictRelabelingBuffer,
replay_buffer_kwargs=dict(
max_size=int(1E6),
fraction_goals_are_rollout_goals=0.5,
fraction_resampled_goals_are_env_goals=0.5,
ob_keys_to_save=['state_achieved_goal']
),
qf_kwargs=dict(
hidden_sizes=[400, 300],
),
policy_kwargs=dict(
hidden_sizes=[400, 300],
),
normalize=False,
algorithm='HER-TD3',
version='normal',
es_kwargs=dict(
max_sigma=.8,
),
observation_key='state_observation',
desired_goal_key='state_desired_goal',
exploration_type='ou',
save_video_period=500,
do_state_exp=True,
init_camera=sawyer_pusher_camera_upright,
save_video=True,
count_based_sampler_kwargs=dict(
num_samples=1000,
obs_key='state_achieved_goal',
goal_key='state_desired_goal',
use_count_based_goal=True,
theta=1,
hash_dim=16,
use_softmax=True,
)
)
search_space = {
'env_kwargs.reset_free':[True, False],
'count_based_sampler_kwargs.theta':[10, 1, 1/10],
'env_kwargs.reward_type': ['puck_distance', 'state_distance'],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
# n_seeds= 1
# mode='local'
# exp_prefix= 'test'
n_seeds=2
mode = 'ec2'
exp_prefix = 'sawyer_push_env_her_td3_count_based_goal_sampling_from_buffer_full_goal'
for exp_id, variant in enumerate(sweeper.iterate_hyperparameters()):
if variant['count_based_sampler_kwargs']['use_softmax'] == False and variant['count_based_sampler_kwargs']['theta'] != 1:
continue
for i in range(n_seeds):
run_experiment(
her_td3_experiment,
exp_prefix=exp_prefix,
mode=mode,
variant=variant,
)
|
import rlkit.misc.hyperparameter as hyp
from multiworld.envs.mujoco.cameras import sawyer_pusher_camera_upright
from multiworld.envs.mujoco.sawyer_xyz.sawyer_push_and_reach_env_reset_full_goal import SawyerPushAndReachXYEnv
from rlkit.data_management.obs_dict_replay_buffer import \
ObsDictRelabelingBuffer
from rlkit.exploration_strategies.count_based.count_based_goal_sampling_env import CountBasedGoalSamplingEnv
from rlkit.images.camera import sawyer_init_camera_zoomed_in_fixed
from rlkit.launchers.launcher_util import run_experiment
import rlkit.torch.pytorch_util as ptu
from rlkit.exploration_strategies.base import (
PolicyWrappedWithExplorationStrategy
)
from rlkit.exploration_strategies.epsilon_greedy import EpsilonGreedy
from rlkit.exploration_strategies.gaussian_strategy import GaussianStrategy
from rlkit.exploration_strategies.ou_strategy import OUStrategy
from rlkit.torch.grill.launcher import get_video_save_func
from rlkit.torch.her.her_td3 import HerTd3
from rlkit.torch.networks import ConcatMlp, TanhMlpPolicy
import rlkit.samplers.rollout_functions as rf
def her_td3_experiment(variant):
env = variant['env_class'](**variant['env_kwargs'])
observation_key = variant['observation_key']
desired_goal_key = variant['desired_goal_key']
achieved_goal_key = desired_goal_key.replace("desired", "achieved")
variant['algo_kwargs']['her_kwargs']['observation_key'] = observation_key
variant['algo_kwargs']['her_kwargs']['desired_goal_key'] = desired_goal_key
replay_buffer = variant['replay_buffer_class'](
env=env,
observation_key=observation_key,
desired_goal_key=desired_goal_key,
achieved_goal_key=achieved_goal_key,
**variant['replay_buffer_kwargs']
)
variant['count_based_sampler_kwargs']['replay_buffer'] = replay_buffer
env = CountBasedGoalSamplingEnv(wrapped_env=env, **variant['count_based_sampler_kwargs'])
obs_dim = env.observation_space.spaces['observation'].low.size
action_dim = env.action_space.low.size
goal_dim = env.observation_space.spaces['desired_goal'].low.size
exploration_type = variant['exploration_type']
if exploration_type == 'ou':
es = OUStrategy(
action_space=env.action_space,
**variant['es_kwargs']
)
elif exploration_type == 'gaussian':
es = GaussianStrategy(
action_space=env.action_space,
**variant['es_kwargs'],
)
elif exploration_type == 'epsilon':
es = EpsilonGreedy(
action_space=env.action_space,
**variant['es_kwargs'],
)
else:
raise Exception("Invalid type: " + exploration_type)
qf1 = ConcatMlp(
input_size=obs_dim + action_dim + goal_dim,
output_size=1,
**variant['qf_kwargs']
)
qf2 = ConcatMlp(
input_size=obs_dim + action_dim + goal_dim,
output_size=1,
**variant['qf_kwargs']
)
policy = TanhMlpPolicy(
input_size=obs_dim + goal_dim,
output_size=action_dim,
**variant['policy_kwargs']
)
exploration_policy = PolicyWrappedWithExplorationStrategy(
exploration_strategy=es,
policy=policy,
)
algorithm = HerTd3(
env,
qf1=qf1,
qf2=qf2,
policy=policy,
training_env=env,
exploration_policy=exploration_policy,
replay_buffer=replay_buffer,
**variant['algo_kwargs']
)
if variant.get("save_video", False):
rollout_function = rf.create_rollout_function(
rf.multitask_rollout,
max_path_length=algorithm.max_path_length,
observation_key=algorithm.observation_key,
desired_goal_key=algorithm.desired_goal_key,
)
video_func = get_video_save_func(
rollout_function,
env,
policy,
variant,
)
algorithm.post_epoch_funcs.append(video_func)
if ptu.gpu_enabled():
algorithm.cuda()
algorithm.train()
if __name__ == "__main__":
# noinspection PyTypeChecker
variant = dict(
algo_kwargs=dict(
base_kwargs=dict(
num_epochs=5003,
num_steps_per_epoch=1000,
num_steps_per_eval=1000,
max_path_length=100,
num_updates_per_env_step=1,
batch_size=128,
discount=0.99,
min_num_steps_before_training=128,
reward_scale=100,
),
her_kwargs=dict(),
td3_kwargs=dict(),
),
env_class=SawyerPushAndReachXYEnv,
env_kwargs=dict(
reward_type='puck_distance',
reset_free=False,
action_scale=.02,
# hand_low=(-0.275, 0.275, 0.02),
# hand_high=(0.275, 0.825, .02),
# puck_low=(-0.25, 0.3),
# puck_high=(0.25, 0.8),
# goal_low=(-0.25, 0.3),
# goal_high=(0.25, 0.8),
hand_low=(-0.275, 0.275, 0.02),
hand_high=(0.275, 0.825, .02),
puck_low=(-0.25, 0.3),
puck_high=(0.25, 0.8),
goal_low=(-0.25, 0.3, 0.02, -0.25, 0.3),
goal_high=(0.25, 0.8, .02, 0.25, 0.8),
),
replay_buffer_class=ObsDictRelabelingBuffer,
replay_buffer_kwargs=dict(
max_size=int(1E6),
fraction_goals_are_rollout_goals=0.5,
fraction_resampled_goals_are_env_goals=0.5,
ob_keys_to_save=['state_achieved_goal']
),
qf_kwargs=dict(
hidden_sizes=[400, 300],
),
policy_kwargs=dict(
hidden_sizes=[400, 300],
),
normalize=False,
algorithm='HER-TD3',
version='normal',
es_kwargs=dict(
max_sigma=.8,
),
observation_key='state_observation',
desired_goal_key='state_desired_goal',
exploration_type='ou',
save_video_period=500,
do_state_exp=True,
init_camera=sawyer_pusher_camera_upright,
save_video=True,
count_based_sampler_kwargs=dict(
num_samples=1000,
obs_key='state_achieved_goal',
goal_key='state_desired_goal',
use_count_based_goal=True,
theta=1,
hash_dim=16,
use_softmax=True,
)
)
search_space = {
'env_kwargs.reset_free':[True, False],
'count_based_sampler_kwargs.theta':[10, 1, 1/10],
'env_kwargs.reward_type': ['puck_distance', 'state_distance'],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
# n_seeds= 1
# mode='local'
# exp_prefix= 'test'
n_seeds=2
mode = 'ec2'
exp_prefix = 'sawyer_push_env_her_td3_count_based_goal_sampling_from_buffer_full_goal'
for exp_id, variant in enumerate(sweeper.iterate_hyperparameters()):
if variant['count_based_sampler_kwargs']['use_softmax'] == False and variant['count_based_sampler_kwargs']['theta'] != 1:
continue
for i in range(n_seeds):
run_experiment(
her_td3_experiment,
exp_prefix=exp_prefix,
mode=mode,
variant=variant,
)
|
en
| 0.388704
|
# noinspection PyTypeChecker # hand_low=(-0.275, 0.275, 0.02), # hand_high=(0.275, 0.825, .02), # puck_low=(-0.25, 0.3), # puck_high=(0.25, 0.8), # goal_low=(-0.25, 0.3), # goal_high=(0.25, 0.8), # n_seeds= 1 # mode='local' # exp_prefix= 'test'
| 1.599877
| 2
|
espider/__init__.py
|
gitduk/espider
| 0
|
6627828
|
import os
import sys
import time
import json
import random
import os.path
import logging
import aiohttp
import asyncio
from w3lib.url import canonicalize_url
from espider.request import Request
from espider.utils import (
get_md5, human_time, pretty_error
)
from espider.response import Response
from espider.settings import SpiderSetting
from espider._utils._colorlog import ColoredFormatter
from inspect import isgenerator
from collections.abc import Iterable, Coroutine, Generator
logger = logging.getLogger(__name__)
class Spider(object):
def __init__(self, name=None):
self.name = name or self.__class__.__name__
self.setting = SpiderSetting(self)
self._priority_callback_map = {}
self._next_priority_index = 1
self.headers = self.headers if hasattr(self, 'headers') else {}
self.cookies = self.cookies if hasattr(self, 'cookies') else {}
self._start_time = time.time()
self.logger = logging.getLogger(self.name)
self._stop = 1
self._loop = asyncio.get_event_loop()
self.logger.setLevel(self.setting.log_level or logging.DEBUG)
if not self.logger.handlers:
sh = logging.StreamHandler()
sh.setLevel(self.setting.log_level or logging.DEBUG)
formatter = ColoredFormatter(fmt=self.setting.log_format, datefmt=self.setting.log_datefmt)
sh.setFormatter(formatter)
self.logger.addHandler(sh)
self._msg = {
'requests': 0,
'requesting': 0,
'request_speed': 0,
'downloaded': 0,
'download_failed': 0,
'runtime': 0.0,
'items': 0,
'item_speed': 0,
'item_dropped': 0,
'download_speed': 0,
'request_dropped': 0,
'response_dropped': 0,
'yield_item_map': {},
'yield_request_map': {},
'callback_runtime_map': {},
}
@property
def loop(self):
if not self._loop:
try:
self._loop = asyncio.get_event_loop()
except:
self._loop = asyncio.get_running_loop()
return self._loop
def start(self):
try:
self.loop.run_until_complete(self._downloader())
except KeyboardInterrupt:
self.logger.warning('KeyboardInterrupt')
self._close()
try:
sys.exit(0)
except SystemExit:
os._exit(0)
except Exception as e:
pretty_error(e, self.logger)
self._close()
async def _init_(self):
# assert params
if callable(self.setting.item_pipelines):
self.setting.item_pipelines = [self.setting.item_pipelines]
assert isinstance(self.setting.item_pipelines, Iterable), \
'ITEM_PIPELINE type error: except function or function list, get {}.'.format(self.setting.item_pipelines)
for pipe in self.setting.item_pipelines:
assert callable(pipe), 'ITEM_PIPELINE({}) not callable'.format(pipe)
if self.setting.request_middlewares is not None:
if callable(self.setting.request_middlewares):
self.setting.request_middlewares = [self.setting.request_middlewares]
self._check_middlewares(self.setting.request_middlewares)
if self.setting.response_middlewares is not None:
if callable(self.setting.response_middlewares):
self.setting.response_middlewares = [self.setting.response_middlewares]
self._check_middlewares(self.setting.response_middlewares)
# init request queue
self._msg['callback_runtime_map'][self.start_requests.__name__] = (time.time(), 0)
try:
request_list = self.start_requests()
except Exception as e:
pretty_error(e, self)
else:
if not request_list: return
if not isinstance(request_list, Iterable): request_list = [request_list]
for r in request_list:
if not r: continue
await self._process_return(self.start_requests.__name__, r)
start_time = self._msg['callback_runtime_map'].get(self.start_requests.__name__)[0]
end_time = time.time()
self._msg['callback_runtime_map'][self.start_requests.__name__] = (
start_time, end_time, human_time(end_time - start_time)
)
async def _downloader(self):
"""
请求调度函数
"""
try:
self.prepare()
await self._init_()
req_list = []
while not self.stop:
req = await self.setting.request_queue.pop()
if req: req_list.append(self.async_request(req))
if len(req_list) >= self.setting.request_batch_size or await self.setting.request_queue.empty():
# 异步请求
resp_list = await asyncio.gather(*req_list)
# 处理响应
await asyncio.gather(*[self._process_response(resp) for resp in resp_list if resp is not None])
req_list.clear()
if await self.setting.request_queue.empty(): self._stop -= 1
except Exception as e:
pretty_error(e, self.logger)
finally:
if self.setting.aiohttp_clientsession: await self.setting.aiohttp_clientsession.close()
if self.setting.redis_msg: await self._write_msg_to_redis()
if self.setting.clear_filter: await self._clear_filter()
self._close()
@staticmethod
def _check_middlewares(middlewares):
assert isinstance(middlewares, Iterable), \
'MIDDLEWARES type error: except function or function list, get {}.'.format(middlewares)
for mid in middlewares:
assert callable(mid), 'Middleware {} not callable.'.format(mid)
def request(self, url=None, method=None, data=None, json=None, headers=None, cookies=None, callback=None,
error_callback=None, cb_args=None, cb_kwargs=None, priority=None, allow_redirects=True, **kwargs):
"""
请求创建函数
"""
if callback is None: callback = self.parse
if callback.__name__ not in self._priority_callback_map.keys():
self._priority_callback_map[callback.__name__] = self._next_priority_index
self._next_priority_index += 1
if priority is None: priority = self._priority_callback_map.get(callback.__name__)
request_params = {
'url': url,
'method': method or 'GET',
'data': data,
'json': json,
'headers': headers or self.headers or {'User-Agent': random.choice(self.setting.user_agent_list)},
'cookies': cookies or self.cookies or {},
'allow_redirects': allow_redirects,
'priority': priority,
'callback': callback,
'error_callback': error_callback,
'cb_args': cb_args,
'cb_kwargs': cb_kwargs,
**kwargs,
}
return Request(**request_params)
@property
def client_session(self):
if not self.setting.aiohttp_clientsession or self.setting.aiohttp_clientsession.closed:
self.setting.aiohttp_clientsession = aiohttp.ClientSession(
connector=aiohttp.TCPConnector(limit=self.setting.request_batch_size * 2),
timeout=aiohttp.ClientTimeout(total=self.setting.request_timeout)
)
return self.setting.aiohttp_clientsession
async def async_request(self, req):
"""
异步请求
"""
# 处理请求
req = await self._process_request(req)
if req is None: return
try:
msg = self._collect_msg(req.callback.__name__, req)
self.logger.info(msg)
self._msg['requesting'] += 1
self._msg['requests'] += 1
self._msg['request_speed'] = self._msg['requests'] / (self._msg['runtime'] or 1)
if self.setting.redis_msg: await self._write_msg_to_redis()
async with self.client_session.request(
**{k: v for k, v in req.__dict__.items() if k in self.setting.request_keys}
) as _resp:
data = await _resp.read()
resp = Response(_resp)
resp.content = data
resp.request = req
except Exception as e:
self._msg['requesting'] -= 1
self._msg['download_failed'] += 1
if self.setting.redis_msg: await self._write_msg_to_redis()
result, cb_name = (req.error_callback(req, e), req.error_callback.__name__) if callable(
req.error_callback) else (self.error_callback(req, e), self.error_callback.__name__)
if result is None: return
if isinstance(result, Coroutine): result = await result
if isinstance(result, Request):
self.setting.request_queue.push(result)
elif isinstance(result, Response):
return result
else:
if not isinstance(result, Generator): result = [result]
for r in result:
await self._process_item(cb_name, r)
else:
# 更新爬虫信息
self._msg['requesting'] -= 1
await self._update_msg(req.callback.__name__)
return resp
async def _process_request(self, req):
# 调用请求中间件
req = await self._process_middleware(req, self.setting.request_middlewares)
if req is None:
self._msg['request_dropped'] += 1
return
return req
async def _process_response(self, resp):
# 调用响应中间件
resp = await self._process_middleware(resp, self.setting.response_middlewares)
if resp is None:
self._msg['response_dropped'] += 1
return
else:
try:
for r in await self._process_callback(resp):
await self._process_return(resp.request.callback.__name__, r)
except Exception as e:
pretty_error(e, self.logger)
finally:
self._stop -= 1
async def _process_middleware(self, resq, middlewares):
if not middlewares: return resq
try:
for mid in middlewares:
resq = mid(resq)
if isinstance(resq, Coroutine): resq = await resq
if not resq: return
except Exception as e:
pretty_error(e, self.logger)
else:
return resq
async def _process_callback(self, resp):
"""
处理回调函数
"""
try:
if isinstance(resp, list):
result = resp[0].request.callback(resp)
else:
result = resp.request.callback(resp, *resp.request.cb_args, **resp.request.cb_kwargs)
except Exception as e:
pretty_error(e, self.logger)
return []
else:
if isinstance(result, Coroutine): result = await result
if not result: return []
if not isgenerator(result): result = [result]
return result
async def _process_return(self, cb_name, r):
if isinstance(r, Request):
await self.setting.request_queue.push(r)
else:
if isinstance(r, list):
for i in r:
if isinstance(i, Request):
continue
else:
break
else:
resp_list = await asyncio.gather(*[self.async_request(_) for _ in r if _])
for r in await self._process_callback(resp_list):
await self._process_return(resp_list[0].request.callback.__name__, r)
self._stop -= len(resp_list)
return
await self._process_item(cb_name, r)
async def _process_item(self, cb_name, item):
"""
处理数据管道
"""
try:
for pipe in self.setting.item_pipelines:
item = pipe(item)
if isinstance(item, Coroutine): item = await item
if item is None:
self._msg['item_dropped'] += 1
except Exception as e:
pretty_error(e, self.logger)
else:
# 更新 Item 信息
self._msg['items'] += 1
self._msg['item_speed'] = self._msg['items'] / (self._msg['runtime'] or 1)
if cb_name not in self._msg['yield_item_map'].keys(): self._msg['yield_item_map'][cb_name] = 0
self._msg['yield_item_map'][cb_name] += 1
if self.setting.redis_msg: await self._write_msg_to_redis()
@property
def stop(self):
return self._stop - self._msg['response_dropped'] <= 0
def _close(self):
try:
self.close()
except Exception as e:
pretty_error(e, self.logger)
finally:
self._close_msg()
async def request_finger(self, req):
url = req.url
try:
args = [canonicalize_url(url)]
for arg in ('data', 'files', 'auth', 'cert', 'json', 'cookies'):
if req.__dict__.get(arg):
args.append(req.__dict__.get(arg))
finger = get_md5(*args)
except Exception as e:
pretty_error(e, self.logger)
else:
if isinstance(self.setting.request_filter, set):
if finger not in self.setting.request_filter:
self.setting.request_filter.add(finger)
return req
else:
self.logger.warning("filter: {}".format(req))
elif hasattr(self.setting.request_filter, 'sadd'):
if await self.setting.request_filter.sadd(self.setting.request_filter_key, finger):
return req
else:
self.logger.warning("filter: {}".format(req))
else:
self.logger.warning('Invalid request filter type: {}'.format(self.setting.request_filter))
def response_filter(self, resp):
if resp.status_code in self.setting.response_filter_code:
self.logger.warning("filter: {}".format(resp))
return resp
@staticmethod
def __split_result(result):
if isinstance(result, (dict, list, str)):
return result, 1
if isinstance(result, tuple):
if len(result) == 1:
return result[0], 1
else:
if isinstance(result[-1], dict):
item, *args, kwargs = result
return item, args, kwargs, 3
else:
item, *args = result
return item, args, 2
else:
return result, 1
def prepare(self):
pass
def start_requests(self):
yield ...
def parse(self, response, *args, **kwargs):
pass
def pipeline(self, item):
self.logger.debug('Pipeline: {}'.format(item))
return item
def error_callback(self, req, error):
pretty_error(error, self.logger)
if error.__class__.__name__ == 'TimeoutError': self.logger.warning('RequestTimeout: {}'.format(req))
return None
def close(self):
pass
async def _update_msg(self, cb_name):
current_time = time.time()
self._msg['runtime'] = current_time - self._start_time
self._msg['downloaded'] += 1
self._msg['download_speed'] = self._msg['downloaded'] / (self._msg['runtime'] or 1)
self._stop += 1
if cb_name not in self._msg['yield_request_map'].keys():
self._msg['yield_request_map'][cb_name] = 0
self._msg['yield_request_map'][cb_name] += 1
if cb_name not in self._msg['callback_runtime_map'].keys():
self._msg['callback_runtime_map'][cb_name] = (current_time, 0)
self._msg['callback_runtime_map'][cb_name] = (
self._msg['callback_runtime_map'].get(cb_name)[0], current_time,
human_time(current_time - self._msg['callback_runtime_map'].get(cb_name)[0]))
if self.setting.redis_msg: await self._write_msg_to_redis()
async def _write_msg_to_redis(self):
try:
await self.setting.redis_client.set('Spider:{}'.format(self.name), json.dumps(self.msg))
except Exception as e:
if e.__class__.__name__ == 'ConnectionError':
logger.warning('Redis 连接失败')
else:
pretty_error(e, self.logger)
async def _clear_filter(self):
await self.setting.redis_client.delete('{}:filter'.format(self.name))
async def _drop_spider(self):
self._stop = 0
if not self.setting.clear_filter: self.setting.clear_filter = True
await self.setting.redis_client.delete('{}:requests'.format(self.name))
if self.setting.aiohttp_clientsession: await self.setting.aiohttp_clientsession.close()
self._close_msg()
async def _stop_spider(self):
self._stop = 0
if self.setting.clear_filter: self.setting.clear_filter = False
if self.setting.aiohttp_clientsession: await self.setting.aiohttp_clientsession.close()
self._close_msg()
def clear_filter(self):
self.exec_coro(self._clear_filter())
def stop_spider(self):
self.exec_coro(self._stop_spider())
def drop_spider(self):
self.exec_coro(self._drop_spider())
def exec_coro(self, coro):
try:
coro.send(None)
except StopIteration as e:
return e.value
def _collect_msg(self, cb_name, req):
return '[R {}/{:.2f}] [D {}/{:.2f}] [I {}/{:.2f}] [{:.2f}] {} -> [{}] {} '.format(
self._msg['requests'],
self._msg['request_speed'],
self._msg['downloaded'],
self._msg['download_speed'],
self._msg['items'],
self._msg['item_speed'],
self._msg['runtime'],
req.__dict__.get('method'),
cb_name,
req.__dict__.get('url'),
)
@property
def msg(self):
return {
'status': 'Closed' if self._stop <= 0 else 'Running',
'item': self._msg['items'],
'requests': self._msg['requests'],
'requesting': self._msg['requesting'],
'request_speed': self._msg['request_speed'],
'downloaded': self._msg['downloaded'],
'runtime': round(self._msg['runtime'], 2),
'item_speed': round(self._msg['item_speed'], 2),
'download_speed': round(self._msg['download_speed'], 2),
'download_failed': self._msg['download_failed'],
'item_dropped': self._msg['item_dropped'],
'request_dropped': self._msg['request_dropped'],
'response_dropped': self._msg['response_dropped'],
'yield_item': self._msg['yield_item_map'],
'yield_request': self._msg['yield_request_map'],
'item_pipelines': [_.__name__ for _ in self.setting.item_pipelines],
'request_middlewares': [_.__name__ for _ in self.setting.request_middlewares],
'response_middlewares': [_.__name__ for _ in self.setting.response_middlewares],
'callback_runtime': self._msg['callback_runtime_map'],
}
def _close_msg(self):
if self._stop > 0: self._stop = 0
self.logger.info('')
fmt = '{:21s}: {}'
self.logger.info(fmt.format('Dropped', {k: v for k, v in self.msg.items() if 'dropped' in k}))
self.logger.info(fmt.format(
'Download', {k: v for k, v in self.msg.items() if k in ['item', 'requests', 'downloaded', 'request_failed']}
))
self.logger.info(fmt.format('Speed', {k: v for k, v in self.msg.items() if 'speed' in k}))
self.logger.info(fmt.format('Received Response', self._msg['yield_request_map']))
self.logger.info(fmt.format('Yield Item', self._msg['yield_item_map']))
self.logger.info(fmt.format('Item Pipelines', [_.__name__ for _ in self.setting.item_pipelines]))
self.logger.info(fmt.format('Request Middlewares', [_.__name__ for _ in self.setting.request_middlewares]))
self.logger.info(fmt.format('Response Middlewares', [_.__name__ for _ in self.setting.response_middlewares]))
self.logger.info(fmt.format(
'Runtime', {
'total': human_time(self._msg['runtime']),
**{k: v[-1] for k, v in self._msg['callback_runtime_map'].items()}
}
))
self.logger.info(f' {self.name} Closed '.center(100, '='))
|
import os
import sys
import time
import json
import random
import os.path
import logging
import aiohttp
import asyncio
from w3lib.url import canonicalize_url
from espider.request import Request
from espider.utils import (
get_md5, human_time, pretty_error
)
from espider.response import Response
from espider.settings import SpiderSetting
from espider._utils._colorlog import ColoredFormatter
from inspect import isgenerator
from collections.abc import Iterable, Coroutine, Generator
logger = logging.getLogger(__name__)
class Spider(object):
def __init__(self, name=None):
self.name = name or self.__class__.__name__
self.setting = SpiderSetting(self)
self._priority_callback_map = {}
self._next_priority_index = 1
self.headers = self.headers if hasattr(self, 'headers') else {}
self.cookies = self.cookies if hasattr(self, 'cookies') else {}
self._start_time = time.time()
self.logger = logging.getLogger(self.name)
self._stop = 1
self._loop = asyncio.get_event_loop()
self.logger.setLevel(self.setting.log_level or logging.DEBUG)
if not self.logger.handlers:
sh = logging.StreamHandler()
sh.setLevel(self.setting.log_level or logging.DEBUG)
formatter = ColoredFormatter(fmt=self.setting.log_format, datefmt=self.setting.log_datefmt)
sh.setFormatter(formatter)
self.logger.addHandler(sh)
self._msg = {
'requests': 0,
'requesting': 0,
'request_speed': 0,
'downloaded': 0,
'download_failed': 0,
'runtime': 0.0,
'items': 0,
'item_speed': 0,
'item_dropped': 0,
'download_speed': 0,
'request_dropped': 0,
'response_dropped': 0,
'yield_item_map': {},
'yield_request_map': {},
'callback_runtime_map': {},
}
@property
def loop(self):
if not self._loop:
try:
self._loop = asyncio.get_event_loop()
except:
self._loop = asyncio.get_running_loop()
return self._loop
def start(self):
try:
self.loop.run_until_complete(self._downloader())
except KeyboardInterrupt:
self.logger.warning('KeyboardInterrupt')
self._close()
try:
sys.exit(0)
except SystemExit:
os._exit(0)
except Exception as e:
pretty_error(e, self.logger)
self._close()
async def _init_(self):
# assert params
if callable(self.setting.item_pipelines):
self.setting.item_pipelines = [self.setting.item_pipelines]
assert isinstance(self.setting.item_pipelines, Iterable), \
'ITEM_PIPELINE type error: except function or function list, get {}.'.format(self.setting.item_pipelines)
for pipe in self.setting.item_pipelines:
assert callable(pipe), 'ITEM_PIPELINE({}) not callable'.format(pipe)
if self.setting.request_middlewares is not None:
if callable(self.setting.request_middlewares):
self.setting.request_middlewares = [self.setting.request_middlewares]
self._check_middlewares(self.setting.request_middlewares)
if self.setting.response_middlewares is not None:
if callable(self.setting.response_middlewares):
self.setting.response_middlewares = [self.setting.response_middlewares]
self._check_middlewares(self.setting.response_middlewares)
# init request queue
self._msg['callback_runtime_map'][self.start_requests.__name__] = (time.time(), 0)
try:
request_list = self.start_requests()
except Exception as e:
pretty_error(e, self)
else:
if not request_list: return
if not isinstance(request_list, Iterable): request_list = [request_list]
for r in request_list:
if not r: continue
await self._process_return(self.start_requests.__name__, r)
start_time = self._msg['callback_runtime_map'].get(self.start_requests.__name__)[0]
end_time = time.time()
self._msg['callback_runtime_map'][self.start_requests.__name__] = (
start_time, end_time, human_time(end_time - start_time)
)
async def _downloader(self):
"""
请求调度函数
"""
try:
self.prepare()
await self._init_()
req_list = []
while not self.stop:
req = await self.setting.request_queue.pop()
if req: req_list.append(self.async_request(req))
if len(req_list) >= self.setting.request_batch_size or await self.setting.request_queue.empty():
# 异步请求
resp_list = await asyncio.gather(*req_list)
# 处理响应
await asyncio.gather(*[self._process_response(resp) for resp in resp_list if resp is not None])
req_list.clear()
if await self.setting.request_queue.empty(): self._stop -= 1
except Exception as e:
pretty_error(e, self.logger)
finally:
if self.setting.aiohttp_clientsession: await self.setting.aiohttp_clientsession.close()
if self.setting.redis_msg: await self._write_msg_to_redis()
if self.setting.clear_filter: await self._clear_filter()
self._close()
@staticmethod
def _check_middlewares(middlewares):
assert isinstance(middlewares, Iterable), \
'MIDDLEWARES type error: except function or function list, get {}.'.format(middlewares)
for mid in middlewares:
assert callable(mid), 'Middleware {} not callable.'.format(mid)
def request(self, url=None, method=None, data=None, json=None, headers=None, cookies=None, callback=None,
error_callback=None, cb_args=None, cb_kwargs=None, priority=None, allow_redirects=True, **kwargs):
"""
请求创建函数
"""
if callback is None: callback = self.parse
if callback.__name__ not in self._priority_callback_map.keys():
self._priority_callback_map[callback.__name__] = self._next_priority_index
self._next_priority_index += 1
if priority is None: priority = self._priority_callback_map.get(callback.__name__)
request_params = {
'url': url,
'method': method or 'GET',
'data': data,
'json': json,
'headers': headers or self.headers or {'User-Agent': random.choice(self.setting.user_agent_list)},
'cookies': cookies or self.cookies or {},
'allow_redirects': allow_redirects,
'priority': priority,
'callback': callback,
'error_callback': error_callback,
'cb_args': cb_args,
'cb_kwargs': cb_kwargs,
**kwargs,
}
return Request(**request_params)
@property
def client_session(self):
if not self.setting.aiohttp_clientsession or self.setting.aiohttp_clientsession.closed:
self.setting.aiohttp_clientsession = aiohttp.ClientSession(
connector=aiohttp.TCPConnector(limit=self.setting.request_batch_size * 2),
timeout=aiohttp.ClientTimeout(total=self.setting.request_timeout)
)
return self.setting.aiohttp_clientsession
async def async_request(self, req):
"""
异步请求
"""
# 处理请求
req = await self._process_request(req)
if req is None: return
try:
msg = self._collect_msg(req.callback.__name__, req)
self.logger.info(msg)
self._msg['requesting'] += 1
self._msg['requests'] += 1
self._msg['request_speed'] = self._msg['requests'] / (self._msg['runtime'] or 1)
if self.setting.redis_msg: await self._write_msg_to_redis()
async with self.client_session.request(
**{k: v for k, v in req.__dict__.items() if k in self.setting.request_keys}
) as _resp:
data = await _resp.read()
resp = Response(_resp)
resp.content = data
resp.request = req
except Exception as e:
self._msg['requesting'] -= 1
self._msg['download_failed'] += 1
if self.setting.redis_msg: await self._write_msg_to_redis()
result, cb_name = (req.error_callback(req, e), req.error_callback.__name__) if callable(
req.error_callback) else (self.error_callback(req, e), self.error_callback.__name__)
if result is None: return
if isinstance(result, Coroutine): result = await result
if isinstance(result, Request):
self.setting.request_queue.push(result)
elif isinstance(result, Response):
return result
else:
if not isinstance(result, Generator): result = [result]
for r in result:
await self._process_item(cb_name, r)
else:
# 更新爬虫信息
self._msg['requesting'] -= 1
await self._update_msg(req.callback.__name__)
return resp
async def _process_request(self, req):
# 调用请求中间件
req = await self._process_middleware(req, self.setting.request_middlewares)
if req is None:
self._msg['request_dropped'] += 1
return
return req
async def _process_response(self, resp):
# 调用响应中间件
resp = await self._process_middleware(resp, self.setting.response_middlewares)
if resp is None:
self._msg['response_dropped'] += 1
return
else:
try:
for r in await self._process_callback(resp):
await self._process_return(resp.request.callback.__name__, r)
except Exception as e:
pretty_error(e, self.logger)
finally:
self._stop -= 1
async def _process_middleware(self, resq, middlewares):
if not middlewares: return resq
try:
for mid in middlewares:
resq = mid(resq)
if isinstance(resq, Coroutine): resq = await resq
if not resq: return
except Exception as e:
pretty_error(e, self.logger)
else:
return resq
async def _process_callback(self, resp):
"""
处理回调函数
"""
try:
if isinstance(resp, list):
result = resp[0].request.callback(resp)
else:
result = resp.request.callback(resp, *resp.request.cb_args, **resp.request.cb_kwargs)
except Exception as e:
pretty_error(e, self.logger)
return []
else:
if isinstance(result, Coroutine): result = await result
if not result: return []
if not isgenerator(result): result = [result]
return result
async def _process_return(self, cb_name, r):
if isinstance(r, Request):
await self.setting.request_queue.push(r)
else:
if isinstance(r, list):
for i in r:
if isinstance(i, Request):
continue
else:
break
else:
resp_list = await asyncio.gather(*[self.async_request(_) for _ in r if _])
for r in await self._process_callback(resp_list):
await self._process_return(resp_list[0].request.callback.__name__, r)
self._stop -= len(resp_list)
return
await self._process_item(cb_name, r)
async def _process_item(self, cb_name, item):
"""
处理数据管道
"""
try:
for pipe in self.setting.item_pipelines:
item = pipe(item)
if isinstance(item, Coroutine): item = await item
if item is None:
self._msg['item_dropped'] += 1
except Exception as e:
pretty_error(e, self.logger)
else:
# 更新 Item 信息
self._msg['items'] += 1
self._msg['item_speed'] = self._msg['items'] / (self._msg['runtime'] or 1)
if cb_name not in self._msg['yield_item_map'].keys(): self._msg['yield_item_map'][cb_name] = 0
self._msg['yield_item_map'][cb_name] += 1
if self.setting.redis_msg: await self._write_msg_to_redis()
@property
def stop(self):
return self._stop - self._msg['response_dropped'] <= 0
def _close(self):
try:
self.close()
except Exception as e:
pretty_error(e, self.logger)
finally:
self._close_msg()
async def request_finger(self, req):
url = req.url
try:
args = [canonicalize_url(url)]
for arg in ('data', 'files', 'auth', 'cert', 'json', 'cookies'):
if req.__dict__.get(arg):
args.append(req.__dict__.get(arg))
finger = get_md5(*args)
except Exception as e:
pretty_error(e, self.logger)
else:
if isinstance(self.setting.request_filter, set):
if finger not in self.setting.request_filter:
self.setting.request_filter.add(finger)
return req
else:
self.logger.warning("filter: {}".format(req))
elif hasattr(self.setting.request_filter, 'sadd'):
if await self.setting.request_filter.sadd(self.setting.request_filter_key, finger):
return req
else:
self.logger.warning("filter: {}".format(req))
else:
self.logger.warning('Invalid request filter type: {}'.format(self.setting.request_filter))
def response_filter(self, resp):
if resp.status_code in self.setting.response_filter_code:
self.logger.warning("filter: {}".format(resp))
return resp
@staticmethod
def __split_result(result):
if isinstance(result, (dict, list, str)):
return result, 1
if isinstance(result, tuple):
if len(result) == 1:
return result[0], 1
else:
if isinstance(result[-1], dict):
item, *args, kwargs = result
return item, args, kwargs, 3
else:
item, *args = result
return item, args, 2
else:
return result, 1
def prepare(self):
pass
def start_requests(self):
yield ...
def parse(self, response, *args, **kwargs):
pass
def pipeline(self, item):
self.logger.debug('Pipeline: {}'.format(item))
return item
def error_callback(self, req, error):
pretty_error(error, self.logger)
if error.__class__.__name__ == 'TimeoutError': self.logger.warning('RequestTimeout: {}'.format(req))
return None
def close(self):
pass
async def _update_msg(self, cb_name):
current_time = time.time()
self._msg['runtime'] = current_time - self._start_time
self._msg['downloaded'] += 1
self._msg['download_speed'] = self._msg['downloaded'] / (self._msg['runtime'] or 1)
self._stop += 1
if cb_name not in self._msg['yield_request_map'].keys():
self._msg['yield_request_map'][cb_name] = 0
self._msg['yield_request_map'][cb_name] += 1
if cb_name not in self._msg['callback_runtime_map'].keys():
self._msg['callback_runtime_map'][cb_name] = (current_time, 0)
self._msg['callback_runtime_map'][cb_name] = (
self._msg['callback_runtime_map'].get(cb_name)[0], current_time,
human_time(current_time - self._msg['callback_runtime_map'].get(cb_name)[0]))
if self.setting.redis_msg: await self._write_msg_to_redis()
async def _write_msg_to_redis(self):
try:
await self.setting.redis_client.set('Spider:{}'.format(self.name), json.dumps(self.msg))
except Exception as e:
if e.__class__.__name__ == 'ConnectionError':
logger.warning('Redis 连接失败')
else:
pretty_error(e, self.logger)
async def _clear_filter(self):
await self.setting.redis_client.delete('{}:filter'.format(self.name))
async def _drop_spider(self):
self._stop = 0
if not self.setting.clear_filter: self.setting.clear_filter = True
await self.setting.redis_client.delete('{}:requests'.format(self.name))
if self.setting.aiohttp_clientsession: await self.setting.aiohttp_clientsession.close()
self._close_msg()
async def _stop_spider(self):
self._stop = 0
if self.setting.clear_filter: self.setting.clear_filter = False
if self.setting.aiohttp_clientsession: await self.setting.aiohttp_clientsession.close()
self._close_msg()
def clear_filter(self):
self.exec_coro(self._clear_filter())
def stop_spider(self):
self.exec_coro(self._stop_spider())
def drop_spider(self):
self.exec_coro(self._drop_spider())
def exec_coro(self, coro):
try:
coro.send(None)
except StopIteration as e:
return e.value
def _collect_msg(self, cb_name, req):
return '[R {}/{:.2f}] [D {}/{:.2f}] [I {}/{:.2f}] [{:.2f}] {} -> [{}] {} '.format(
self._msg['requests'],
self._msg['request_speed'],
self._msg['downloaded'],
self._msg['download_speed'],
self._msg['items'],
self._msg['item_speed'],
self._msg['runtime'],
req.__dict__.get('method'),
cb_name,
req.__dict__.get('url'),
)
@property
def msg(self):
return {
'status': 'Closed' if self._stop <= 0 else 'Running',
'item': self._msg['items'],
'requests': self._msg['requests'],
'requesting': self._msg['requesting'],
'request_speed': self._msg['request_speed'],
'downloaded': self._msg['downloaded'],
'runtime': round(self._msg['runtime'], 2),
'item_speed': round(self._msg['item_speed'], 2),
'download_speed': round(self._msg['download_speed'], 2),
'download_failed': self._msg['download_failed'],
'item_dropped': self._msg['item_dropped'],
'request_dropped': self._msg['request_dropped'],
'response_dropped': self._msg['response_dropped'],
'yield_item': self._msg['yield_item_map'],
'yield_request': self._msg['yield_request_map'],
'item_pipelines': [_.__name__ for _ in self.setting.item_pipelines],
'request_middlewares': [_.__name__ for _ in self.setting.request_middlewares],
'response_middlewares': [_.__name__ for _ in self.setting.response_middlewares],
'callback_runtime': self._msg['callback_runtime_map'],
}
def _close_msg(self):
if self._stop > 0: self._stop = 0
self.logger.info('')
fmt = '{:21s}: {}'
self.logger.info(fmt.format('Dropped', {k: v for k, v in self.msg.items() if 'dropped' in k}))
self.logger.info(fmt.format(
'Download', {k: v for k, v in self.msg.items() if k in ['item', 'requests', 'downloaded', 'request_failed']}
))
self.logger.info(fmt.format('Speed', {k: v for k, v in self.msg.items() if 'speed' in k}))
self.logger.info(fmt.format('Received Response', self._msg['yield_request_map']))
self.logger.info(fmt.format('Yield Item', self._msg['yield_item_map']))
self.logger.info(fmt.format('Item Pipelines', [_.__name__ for _ in self.setting.item_pipelines]))
self.logger.info(fmt.format('Request Middlewares', [_.__name__ for _ in self.setting.request_middlewares]))
self.logger.info(fmt.format('Response Middlewares', [_.__name__ for _ in self.setting.response_middlewares]))
self.logger.info(fmt.format(
'Runtime', {
'total': human_time(self._msg['runtime']),
**{k: v[-1] for k, v in self._msg['callback_runtime_map'].items()}
}
))
self.logger.info(f' {self.name} Closed '.center(100, '='))
|
zh
| 0.986937
|
# assert params # init request queue 请求调度函数 # 异步请求 # 处理响应 请求创建函数 异步请求 # 处理请求 # 更新爬虫信息 # 调用请求中间件 # 调用响应中间件 处理回调函数 处理数据管道 # 更新 Item 信息
| 1.993469
| 2
|
number digits to words.py
|
roseler/python
| 2
|
6627829
|
one_digit_words = {
'0': ["zero"],
'1': ["one"],
'2': ["two", "twen"],
'3': ["three", "thir"],
'4': ["four", "for"],
'5': ["five", "fif"],
'6': ["six"],
'7': ["seven"],
'8': ["eight"],
'9': ["nine"],
}
two_digit_words = ["ten", "eleven", "twelve"]
hundred = "hundred"
large_sum_words = ["thousand", "million", "billion", "trillion", "quadrillion", "quintillion", "sextillion", "septillion", "octillion", "nonillion"]
def converter(n):
word = []
if n.startswith('-'):
word.append("(negative)")
n = n[1:]
if len(n) % 3 != 0 and len(n) > 3:
n = n.zfill(3 * (((len(n)-1) // 3) + 1))
sum_list = [n[i:i + 3] for i in range(0, len(n), 3)]
skip = False
for i, num in enumerate(sum_list):
if num != '000': skip = False
for _ in range(len(num)):
num = num.lstrip('0')
if len(num) == 1:
if (len(sum_list) > 1 or (len(sum_list) == 1 and len(sum_list[0]) == 3)) and i == len(sum_list) - 1 and (word[-1] in large_sum_words or hundred in word[-1]):
word.append("and")
word.append(one_digit_words[num][0])
num = num[1:]
break
if len(num) == 2:
if num[0] != '0':
if (len(sum_list) > 1 or (len(sum_list) == 1 and len(sum_list[0]) == 3)) and i == len(sum_list) - 1:
word.append("and")
if num.startswith('1'):
if int(num[1]) in range(3):
word.append(two_digit_words[int(num[1])])
else:
number = one_digit_words[num[1]][1 if int(num[1]) in range(3, 6, 2) else 0]
word.append(number + ("teen" if not number[-1] == 't' else "een"))
else:
word.append(one_digit_words[num[0]][1 if int(num[0]) in range(2, 6) else 0] + ("ty " if num[0] != '8' else 'y ') + (one_digit_words[num[1]][0] if num[1] != '0' else ""))
break
else:
num = num[1:]
continue
if len(num) == 3:
if num[0] != '0':
word.append(one_digit_words[num[0]][0] + " " + hundred)
if num[1:] == '00': break
num = num[1:]
if len(sum_list[i:]) > 1 and not skip:
word.append(large_sum_words[len(sum_list[i:]) - 2])
skip = True
word = " ".join(map(str.strip, word))
return word[0].lstrip().upper() + word[1:].rstrip().lower() if "negative" not in word else word[:11].lstrip() + word[11].upper() + word[12:].rstrip().lower()
if __name__ == "__main__":
while True:
try:
n = input("Enter any number to convert it into words or 'exit' to stop: ")
if n == "exit":
break
int(n)
print(n, "-->", converter(n))
except ValueError:
print("Error: Invalid Number!")
|
one_digit_words = {
'0': ["zero"],
'1': ["one"],
'2': ["two", "twen"],
'3': ["three", "thir"],
'4': ["four", "for"],
'5': ["five", "fif"],
'6': ["six"],
'7': ["seven"],
'8': ["eight"],
'9': ["nine"],
}
two_digit_words = ["ten", "eleven", "twelve"]
hundred = "hundred"
large_sum_words = ["thousand", "million", "billion", "trillion", "quadrillion", "quintillion", "sextillion", "septillion", "octillion", "nonillion"]
def converter(n):
word = []
if n.startswith('-'):
word.append("(negative)")
n = n[1:]
if len(n) % 3 != 0 and len(n) > 3:
n = n.zfill(3 * (((len(n)-1) // 3) + 1))
sum_list = [n[i:i + 3] for i in range(0, len(n), 3)]
skip = False
for i, num in enumerate(sum_list):
if num != '000': skip = False
for _ in range(len(num)):
num = num.lstrip('0')
if len(num) == 1:
if (len(sum_list) > 1 or (len(sum_list) == 1 and len(sum_list[0]) == 3)) and i == len(sum_list) - 1 and (word[-1] in large_sum_words or hundred in word[-1]):
word.append("and")
word.append(one_digit_words[num][0])
num = num[1:]
break
if len(num) == 2:
if num[0] != '0':
if (len(sum_list) > 1 or (len(sum_list) == 1 and len(sum_list[0]) == 3)) and i == len(sum_list) - 1:
word.append("and")
if num.startswith('1'):
if int(num[1]) in range(3):
word.append(two_digit_words[int(num[1])])
else:
number = one_digit_words[num[1]][1 if int(num[1]) in range(3, 6, 2) else 0]
word.append(number + ("teen" if not number[-1] == 't' else "een"))
else:
word.append(one_digit_words[num[0]][1 if int(num[0]) in range(2, 6) else 0] + ("ty " if num[0] != '8' else 'y ') + (one_digit_words[num[1]][0] if num[1] != '0' else ""))
break
else:
num = num[1:]
continue
if len(num) == 3:
if num[0] != '0':
word.append(one_digit_words[num[0]][0] + " " + hundred)
if num[1:] == '00': break
num = num[1:]
if len(sum_list[i:]) > 1 and not skip:
word.append(large_sum_words[len(sum_list[i:]) - 2])
skip = True
word = " ".join(map(str.strip, word))
return word[0].lstrip().upper() + word[1:].rstrip().lower() if "negative" not in word else word[:11].lstrip() + word[11].upper() + word[12:].rstrip().lower()
if __name__ == "__main__":
while True:
try:
n = input("Enter any number to convert it into words or 'exit' to stop: ")
if n == "exit":
break
int(n)
print(n, "-->", converter(n))
except ValueError:
print("Error: Invalid Number!")
|
none
| 1
| 3.537859
| 4
|
|
utils/metrics/CD/unit_test.py
|
Dizzy-cell/HOUV
| 99
|
6627830
|
import torch, time
import chamfer2D.dist_chamfer_2D
import chamfer3D.dist_chamfer_3D
import chamfer5D.dist_chamfer_5D
import chamfer_python
cham2D = chamfer2D.dist_chamfer_2D.chamfer_2DDist()
cham3D = chamfer3D.dist_chamfer_3D.chamfer_3DDist()
cham5D = chamfer5D.dist_chamfer_5D.chamfer_5DDist()
from torch.autograd import Variable
from fscore import fscore
def test_chamfer(distChamfer, dim):
points1 = torch.rand(4, 100, dim).cuda()
points2 = torch.rand(4, 200, dim, requires_grad=True).cuda()
dist1, dist2, idx1, idx2= distChamfer(points1, points2)
loss = torch.sum(dist1)
loss.backward()
mydist1, mydist2, myidx1, myidx2 = chamfer_python.distChamfer(points1, points2)
d1 = (dist1 - mydist1) ** 2
d2 = (dist2 - mydist2) ** 2
assert (
torch.mean(d1) + torch.mean(d2) < 0.00000001
), "chamfer cuda and chamfer normal are not giving the same results"
xd1 = idx1 - myidx1
xd2 = idx2 - myidx2
assert (
torch.norm(xd1.float()) + torch.norm(xd2.float()) == 0
), "chamfer cuda and chamfer normal are not giving the same results"
print(f"fscore :", fscore(dist1, dist2))
print("Unit test passed")
def timings(distChamfer, dim):
p1 = torch.rand(32, 2000, dim).cuda()
p2 = torch.rand(32, 1000, dim).cuda()
print("Timings : Start CUDA version")
start = time.time()
num_it = 100
for i in range(num_it):
points1 = Variable(p1, requires_grad=True)
points2 = Variable(p2)
mydist1, mydist2, idx1, idx2 = distChamfer(points1, points2)
loss = torch.sum(mydist1)
loss.backward()
print(f"Ellapsed time forward backward is {(time.time() - start)/num_it} seconds.")
print("Timings : Start Pythonic version")
start = time.time()
for i in range(num_it):
points1 = Variable(p1, requires_grad=True)
points2 = Variable(p2)
mydist1, mydist2, idx1, idx2 = chamfer_python.distChamfer(points1, points2)
loss = torch.sum(mydist1)
loss.backward()
print(f"Ellapsed time forward backward is {(time.time() - start)/num_it} seconds.")
dims = [2,3,5]
for i,cham in enumerate([cham2D, cham3D, cham5D]):
print(f"testing Chamfer {dims[i]}D")
test_chamfer(cham, dims[i])
timings(cham, dims[i])
|
import torch, time
import chamfer2D.dist_chamfer_2D
import chamfer3D.dist_chamfer_3D
import chamfer5D.dist_chamfer_5D
import chamfer_python
cham2D = chamfer2D.dist_chamfer_2D.chamfer_2DDist()
cham3D = chamfer3D.dist_chamfer_3D.chamfer_3DDist()
cham5D = chamfer5D.dist_chamfer_5D.chamfer_5DDist()
from torch.autograd import Variable
from fscore import fscore
def test_chamfer(distChamfer, dim):
points1 = torch.rand(4, 100, dim).cuda()
points2 = torch.rand(4, 200, dim, requires_grad=True).cuda()
dist1, dist2, idx1, idx2= distChamfer(points1, points2)
loss = torch.sum(dist1)
loss.backward()
mydist1, mydist2, myidx1, myidx2 = chamfer_python.distChamfer(points1, points2)
d1 = (dist1 - mydist1) ** 2
d2 = (dist2 - mydist2) ** 2
assert (
torch.mean(d1) + torch.mean(d2) < 0.00000001
), "chamfer cuda and chamfer normal are not giving the same results"
xd1 = idx1 - myidx1
xd2 = idx2 - myidx2
assert (
torch.norm(xd1.float()) + torch.norm(xd2.float()) == 0
), "chamfer cuda and chamfer normal are not giving the same results"
print(f"fscore :", fscore(dist1, dist2))
print("Unit test passed")
def timings(distChamfer, dim):
p1 = torch.rand(32, 2000, dim).cuda()
p2 = torch.rand(32, 1000, dim).cuda()
print("Timings : Start CUDA version")
start = time.time()
num_it = 100
for i in range(num_it):
points1 = Variable(p1, requires_grad=True)
points2 = Variable(p2)
mydist1, mydist2, idx1, idx2 = distChamfer(points1, points2)
loss = torch.sum(mydist1)
loss.backward()
print(f"Ellapsed time forward backward is {(time.time() - start)/num_it} seconds.")
print("Timings : Start Pythonic version")
start = time.time()
for i in range(num_it):
points1 = Variable(p1, requires_grad=True)
points2 = Variable(p2)
mydist1, mydist2, idx1, idx2 = chamfer_python.distChamfer(points1, points2)
loss = torch.sum(mydist1)
loss.backward()
print(f"Ellapsed time forward backward is {(time.time() - start)/num_it} seconds.")
dims = [2,3,5]
for i,cham in enumerate([cham2D, cham3D, cham5D]):
print(f"testing Chamfer {dims[i]}D")
test_chamfer(cham, dims[i])
timings(cham, dims[i])
|
none
| 1
| 2.335685
| 2
|
|
databuilder/models/hive_watermark.py
|
feng-tao/amundsendatabuilder
| 0
|
6627831
|
<reponame>feng-tao/amundsendatabuilder
from typing import Any, Dict, List, Union # noqa: F401
from databuilder.models.neo4j_csv_serde import Neo4jCsvSerializable, NODE_KEY, \
NODE_LABEL, RELATION_START_KEY, RELATION_START_LABEL, RELATION_END_KEY, \
RELATION_END_LABEL, RELATION_TYPE, RELATION_REVERSE_TYPE
class HiveWatermark(Neo4jCsvSerializable):
# type: (...) -> None
"""
Hive table watermark result model.
Each instance represents one row of hive watermark result.
"""
LABEL = 'Watermark'
KEY_FORMAT = 'hive://{cluster}.{schema}' \
'/{table}/{part_type}/'
WATERMARK_TABLE_RELATION_TYPE = 'BELONG_TO_TABLE'
TABLE_WATERMARK_RELATION_TYPE = 'WATERMARK'
def __init__(self,
create_time, # type: str
schema_name, # type: str
table_name, # type: str
part_name, # type: str
part_type='high_watermark', # type: str
cluster='gold', # type: str
):
# type: (...) -> None
self.create_time = create_time
self.schema = schema_name.lower()
self.table = table_name.lower()
self.parts = [] # type: list
if '=' not in part_name:
raise Exception('Only partition table has high watermark')
# currently we don't consider nested partitions
idx = part_name.find('=')
name, value = part_name.lower()[:idx], part_name.lower()[idx + 1:]
self.parts = [(name, value)]
self.part_type = part_type.lower()
self.cluster = cluster.lower()
self._node_iter = iter(self.create_nodes())
self._relation_iter = iter(self.create_relation())
def create_next_node(self):
# type: (...) -> Union[Dict[str, Any], None]
# return the string representation of the data
try:
return next(self._node_iter)
except StopIteration:
return None
def create_next_relation(self):
# type: (...) -> Union[Dict[str, Any], None]
try:
return next(self._relation_iter)
except StopIteration:
return None
def get_watermark_model_key(self):
# type: (...) -> str
return HiveWatermark.KEY_FORMAT.format(cluster=self.cluster,
schema=self.schema,
table=self.table,
part_type=self.part_type)
def get_metadata_model_key(self):
# type: (...) -> str
return 'hive://{cluster}.{schema}/{table}'.format(cluster=self.cluster,
schema=self.schema,
table=self.table)
def create_nodes(self):
# type: () -> List[Dict[str, Any]]
"""
Create a list of Neo4j node records
:return:
"""
results = []
for part in self.parts:
results.append({
NODE_KEY: self.get_watermark_model_key(),
NODE_LABEL: HiveWatermark.LABEL,
'partition_key': part[0],
'partition_value': part[1],
'create_time': self.create_time
})
return results
def create_relation(self):
# type: () -> List[Dict[str, Any]]
"""
Create a list of relation map between watermark record with original hive table
:return:
"""
results = [{
RELATION_START_KEY: self.get_watermark_model_key(),
RELATION_START_LABEL: HiveWatermark.LABEL,
RELATION_END_KEY: self.get_metadata_model_key(),
RELATION_END_LABEL: 'Table',
RELATION_TYPE: HiveWatermark.WATERMARK_TABLE_RELATION_TYPE,
RELATION_REVERSE_TYPE: HiveWatermark.TABLE_WATERMARK_RELATION_TYPE
}]
return results
|
from typing import Any, Dict, List, Union # noqa: F401
from databuilder.models.neo4j_csv_serde import Neo4jCsvSerializable, NODE_KEY, \
NODE_LABEL, RELATION_START_KEY, RELATION_START_LABEL, RELATION_END_KEY, \
RELATION_END_LABEL, RELATION_TYPE, RELATION_REVERSE_TYPE
class HiveWatermark(Neo4jCsvSerializable):
# type: (...) -> None
"""
Hive table watermark result model.
Each instance represents one row of hive watermark result.
"""
LABEL = 'Watermark'
KEY_FORMAT = 'hive://{cluster}.{schema}' \
'/{table}/{part_type}/'
WATERMARK_TABLE_RELATION_TYPE = 'BELONG_TO_TABLE'
TABLE_WATERMARK_RELATION_TYPE = 'WATERMARK'
def __init__(self,
create_time, # type: str
schema_name, # type: str
table_name, # type: str
part_name, # type: str
part_type='high_watermark', # type: str
cluster='gold', # type: str
):
# type: (...) -> None
self.create_time = create_time
self.schema = schema_name.lower()
self.table = table_name.lower()
self.parts = [] # type: list
if '=' not in part_name:
raise Exception('Only partition table has high watermark')
# currently we don't consider nested partitions
idx = part_name.find('=')
name, value = part_name.lower()[:idx], part_name.lower()[idx + 1:]
self.parts = [(name, value)]
self.part_type = part_type.lower()
self.cluster = cluster.lower()
self._node_iter = iter(self.create_nodes())
self._relation_iter = iter(self.create_relation())
def create_next_node(self):
# type: (...) -> Union[Dict[str, Any], None]
# return the string representation of the data
try:
return next(self._node_iter)
except StopIteration:
return None
def create_next_relation(self):
# type: (...) -> Union[Dict[str, Any], None]
try:
return next(self._relation_iter)
except StopIteration:
return None
def get_watermark_model_key(self):
# type: (...) -> str
return HiveWatermark.KEY_FORMAT.format(cluster=self.cluster,
schema=self.schema,
table=self.table,
part_type=self.part_type)
def get_metadata_model_key(self):
# type: (...) -> str
return 'hive://{cluster}.{schema}/{table}'.format(cluster=self.cluster,
schema=self.schema,
table=self.table)
def create_nodes(self):
# type: () -> List[Dict[str, Any]]
"""
Create a list of Neo4j node records
:return:
"""
results = []
for part in self.parts:
results.append({
NODE_KEY: self.get_watermark_model_key(),
NODE_LABEL: HiveWatermark.LABEL,
'partition_key': part[0],
'partition_value': part[1],
'create_time': self.create_time
})
return results
def create_relation(self):
# type: () -> List[Dict[str, Any]]
"""
Create a list of relation map between watermark record with original hive table
:return:
"""
results = [{
RELATION_START_KEY: self.get_watermark_model_key(),
RELATION_START_LABEL: HiveWatermark.LABEL,
RELATION_END_KEY: self.get_metadata_model_key(),
RELATION_END_LABEL: 'Table',
RELATION_TYPE: HiveWatermark.WATERMARK_TABLE_RELATION_TYPE,
RELATION_REVERSE_TYPE: HiveWatermark.TABLE_WATERMARK_RELATION_TYPE
}]
return results
|
en
| 0.54238
|
# noqa: F401 # type: (...) -> None Hive table watermark result model. Each instance represents one row of hive watermark result. # type: str # type: str # type: str # type: str # type: str # type: str # type: (...) -> None # type: list # currently we don't consider nested partitions # type: (...) -> Union[Dict[str, Any], None] # return the string representation of the data # type: (...) -> Union[Dict[str, Any], None] # type: (...) -> str # type: (...) -> str # type: () -> List[Dict[str, Any]] Create a list of Neo4j node records :return: # type: () -> List[Dict[str, Any]] Create a list of relation map between watermark record with original hive table :return:
| 2.414249
| 2
|
ecommerce_app/models.py
|
raonyguimaraes/mendelmd
| 33
|
6627832
|
<reponame>raonyguimaraes/mendelmd
from enum import Enum
from django.db import models
from django.contrib.auth.models import User
class OrderType(Enum):
SUBSCRIPTION = 'Subscription'
PRODUCT = 'Product'
def __str__(self):
return str(self.value)
@classmethod
def choices(cls):
return [(x.value, x.name) for x in cls]
class PaymentStatus(Enum):
PROCESSING = 'Processing'
PAID = 'Paid'
REFUSED = 'Refused'
CANCELED = 'Canceled'
def __str__(self):
return str(self.value)
@classmethod
def choices(cls):
return [(x.value, x.name) for x in cls]
class Product(models.Model):
name = models.CharField(max_length=191)
price = models.DecimalField(max_digits=7, decimal_places=2)
slug = models.SlugField()
description = models.TextField()
image = models.ImageField(upload_to='products_images/', blank=True)
is_subscription = models.BooleanField(default=True)
def __str__(self):
return self.name
class CartItem(models.Model):
cart_id = models.CharField(max_length=50)
price = models.DecimalField(max_digits=7, decimal_places=2)
quantity = models.IntegerField()
date_added = models.DateTimeField(auto_now_add=True)
product = models.ForeignKey(Product, on_delete=models.PROTECT)
def __str__(self):
return "{}:{}".format(self.product.name, self.id)
def update_quantity(self, quantity):
self.quantity = self.quantity + quantity
self.save()
def total_cost(self):
return self.quantity * self.price
class Order(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
date = models.DateTimeField(auto_now_add=True)
paid = models.BooleanField(default=False)
payment_status = models.CharField(max_length=20,
choices=PaymentStatus.choices(),
default=PaymentStatus.PROCESSING.__str__())
order_type = models.CharField(max_length=20,
choices=OrderType.choices(),
default=OrderType.SUBSCRIPTION.__str__())
def __str__(self):
return "{}:{}:{}".format(self.id, self.order_type, self.user.email)
def total_cost(self):
return sum([li.cost() for li in self.lineitem_set.all()])
class LineItem(models.Model):
order = models.ForeignKey(Order, on_delete=models.CASCADE)
product = models.ForeignKey(Product, on_delete=models.CASCADE)
price = models.DecimalField(max_digits=7, decimal_places=2)
quantity = models.IntegerField()
date_added = models.DateTimeField(auto_now_add=True)
def __str__(self):
return "{}:{}".format(self.product.name, self.id)
def cost(self):
return self.price * self.quantity
|
from enum import Enum
from django.db import models
from django.contrib.auth.models import User
class OrderType(Enum):
SUBSCRIPTION = 'Subscription'
PRODUCT = 'Product'
def __str__(self):
return str(self.value)
@classmethod
def choices(cls):
return [(x.value, x.name) for x in cls]
class PaymentStatus(Enum):
PROCESSING = 'Processing'
PAID = 'Paid'
REFUSED = 'Refused'
CANCELED = 'Canceled'
def __str__(self):
return str(self.value)
@classmethod
def choices(cls):
return [(x.value, x.name) for x in cls]
class Product(models.Model):
name = models.CharField(max_length=191)
price = models.DecimalField(max_digits=7, decimal_places=2)
slug = models.SlugField()
description = models.TextField()
image = models.ImageField(upload_to='products_images/', blank=True)
is_subscription = models.BooleanField(default=True)
def __str__(self):
return self.name
class CartItem(models.Model):
cart_id = models.CharField(max_length=50)
price = models.DecimalField(max_digits=7, decimal_places=2)
quantity = models.IntegerField()
date_added = models.DateTimeField(auto_now_add=True)
product = models.ForeignKey(Product, on_delete=models.PROTECT)
def __str__(self):
return "{}:{}".format(self.product.name, self.id)
def update_quantity(self, quantity):
self.quantity = self.quantity + quantity
self.save()
def total_cost(self):
return self.quantity * self.price
class Order(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
date = models.DateTimeField(auto_now_add=True)
paid = models.BooleanField(default=False)
payment_status = models.CharField(max_length=20,
choices=PaymentStatus.choices(),
default=PaymentStatus.PROCESSING.__str__())
order_type = models.CharField(max_length=20,
choices=OrderType.choices(),
default=OrderType.SUBSCRIPTION.__str__())
def __str__(self):
return "{}:{}:{}".format(self.id, self.order_type, self.user.email)
def total_cost(self):
return sum([li.cost() for li in self.lineitem_set.all()])
class LineItem(models.Model):
order = models.ForeignKey(Order, on_delete=models.CASCADE)
product = models.ForeignKey(Product, on_delete=models.CASCADE)
price = models.DecimalField(max_digits=7, decimal_places=2)
quantity = models.IntegerField()
date_added = models.DateTimeField(auto_now_add=True)
def __str__(self):
return "{}:{}".format(self.product.name, self.id)
def cost(self):
return self.price * self.quantity
|
none
| 1
| 2.186939
| 2
|
|
secretcrypt/tests/test_password.py
|
Zemanta/secretcrypt
| 49
|
6627833
|
<gh_stars>10-100
import getpass
import mock
import unittest
from secretcrypt import password
class TestPassword(unittest.TestCase):
@mock.patch.object(getpass, 'getpass')
def test_encrypt_decrypt(self, mock_getpass):
mock_getpass.return_value = 'testpass'
plaintext = b'myplaintext'
ciphertext, decrypt_params = password.encrypt(plaintext)
self.assertEqual(plaintext, password.decrypt(ciphertext, **decrypt_params))
|
import getpass
import mock
import unittest
from secretcrypt import password
class TestPassword(unittest.TestCase):
@mock.patch.object(getpass, 'getpass')
def test_encrypt_decrypt(self, mock_getpass):
mock_getpass.return_value = 'testpass'
plaintext = b'myplaintext'
ciphertext, decrypt_params = password.encrypt(plaintext)
self.assertEqual(plaintext, password.decrypt(ciphertext, **decrypt_params))
|
none
| 1
| 3.113997
| 3
|
|
core/migrations/0003_auto_20161007_1830.py
|
kiloreven/challenge
| 0
|
6627834
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-10-07 18:30
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0002_userprofile'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='latest_correct_answer',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='userprofile',
name='score',
field=models.IntegerField(default=0),
),
]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-10-07 18:30
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0002_userprofile'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='latest_correct_answer',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='userprofile',
name='score',
field=models.IntegerField(default=0),
),
]
|
en
| 0.844601
|
# -*- coding: utf-8 -*- # Generated by Django 1.9.6 on 2016-10-07 18:30
| 1.742552
| 2
|
bills/models.py
|
xNovax/RoomScout
| 24
|
6627835
|
from django.db import models
from accounts.models import User
from houses.models import House
class BillSet(models.Model):
month = models.IntegerField(default=-1)
year = models.IntegerField(default=-1)
house = models.ForeignKey(House, on_delete=models.CASCADE)
def __str__(self):
return self.get_month_name() + ' ' + str(self.year)
def get_month_name(self):
months = ["Unknown",
"January",
"Febuary",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December"]
if self.month < 1 or self.month > 12:
return 'Error'
return months[self.month]
def get_total(self):
total = 0
bills = Bill.objects.filter(set=self)
for bill in bills:
total += bill.amount
return total
# TODO: This needs to be reworked to account for the owner if living in house and if members haven't registered yet. Maybe use a number in the house for the bill split multiplier
def get_total_per_person(self):
return self.get_total() / self.house.members.count()
class Meta:
ordering = ['year', 'month']
class Bill(models.Model):
TYPE_CHOICES = [('ELEC', 'Electricity'), ('WATER', 'Water'), ('GAS', 'Gas'), ('INTER', 'Internet'), ('OTHER', 'Other')]
set = models.ForeignKey(BillSet, on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
type = models.CharField(choices=TYPE_CHOICES, max_length=5)
date = models.DateField()
amount = models.DecimalField(max_digits=19, decimal_places=2, default=0.00)
class Meta:
ordering = ['date']
|
from django.db import models
from accounts.models import User
from houses.models import House
class BillSet(models.Model):
month = models.IntegerField(default=-1)
year = models.IntegerField(default=-1)
house = models.ForeignKey(House, on_delete=models.CASCADE)
def __str__(self):
return self.get_month_name() + ' ' + str(self.year)
def get_month_name(self):
months = ["Unknown",
"January",
"Febuary",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December"]
if self.month < 1 or self.month > 12:
return 'Error'
return months[self.month]
def get_total(self):
total = 0
bills = Bill.objects.filter(set=self)
for bill in bills:
total += bill.amount
return total
# TODO: This needs to be reworked to account for the owner if living in house and if members haven't registered yet. Maybe use a number in the house for the bill split multiplier
def get_total_per_person(self):
return self.get_total() / self.house.members.count()
class Meta:
ordering = ['year', 'month']
class Bill(models.Model):
TYPE_CHOICES = [('ELEC', 'Electricity'), ('WATER', 'Water'), ('GAS', 'Gas'), ('INTER', 'Internet'), ('OTHER', 'Other')]
set = models.ForeignKey(BillSet, on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
type = models.CharField(choices=TYPE_CHOICES, max_length=5)
date = models.DateField()
amount = models.DecimalField(max_digits=19, decimal_places=2, default=0.00)
class Meta:
ordering = ['date']
|
en
| 0.927149
|
# TODO: This needs to be reworked to account for the owner if living in house and if members haven't registered yet. Maybe use a number in the house for the bill split multiplier
| 2.464127
| 2
|
build_package.py
|
smlerman/emunah-hanukiah
| 0
|
6627836
|
#!/usr/bin/python
import argparse
import os
import shutil
import subprocess
parser = argparse.ArgumentParser(description="Build a Raspbian .deb package for the Emunah Menorah")
parser.add_argument("-d", "--tempdir", dest="tempdir", required=True, help="Temporary working directory for building the package; the directory must not exist")
args = parser.parse_args()
# Check that the temp working directory is clean
if os.path.exists(args.tempdir):
raise Exception("The temp directory must be empty or not exist")
# Copy files to the temp directory
shutil.copytree("package_files", args.tempdir, symlinks=True)
# Build the package
os.chdir(args.tempdir)
subprocess.check_call(["dpkg-buildpackage", "-A", "-us", "-uc"])
|
#!/usr/bin/python
import argparse
import os
import shutil
import subprocess
parser = argparse.ArgumentParser(description="Build a Raspbian .deb package for the Emunah Menorah")
parser.add_argument("-d", "--tempdir", dest="tempdir", required=True, help="Temporary working directory for building the package; the directory must not exist")
args = parser.parse_args()
# Check that the temp working directory is clean
if os.path.exists(args.tempdir):
raise Exception("The temp directory must be empty or not exist")
# Copy files to the temp directory
shutil.copytree("package_files", args.tempdir, symlinks=True)
# Build the package
os.chdir(args.tempdir)
subprocess.check_call(["dpkg-buildpackage", "-A", "-us", "-uc"])
|
en
| 0.739784
|
#!/usr/bin/python # Check that the temp working directory is clean # Copy files to the temp directory # Build the package
| 2.989536
| 3
|
src/argument_parser.py
|
SzymonZos/dwarf-parser
| 0
|
6627837
|
from argparse import ArgumentParser
def create_parser():
parser = ArgumentParser(description="Extract dwarf info")
parser.add_argument("-e", "--elf", type=str, action="store",
help="Select elf file")
return parser
|
from argparse import ArgumentParser
def create_parser():
parser = ArgumentParser(description="Extract dwarf info")
parser.add_argument("-e", "--elf", type=str, action="store",
help="Select elf file")
return parser
|
none
| 1
| 2.850627
| 3
|
|
python/network/Foundations-of-Python-Network-Programming/foundations-of-python-network-programming-14/source/chapter05/blocks.py
|
bosserbosser/codetest
| 0
|
6627838
|
#!/usr/bin/env python3
# Foundations of Python Network Programming, Third Edition
# https://github.com/brandon-rhodes/fopnp/blob/m/py3/chapter05/blocks.py
# Sending data over a stream but delimited as length-prefixed blocks.
import socket, struct
from argparse import ArgumentParser
header_struct = struct.Struct('!I') # messages up to 2**32 - 1 in length
def recvall(sock, length):
blocks = []
while length:
block = sock.recv(length)
if not block:
raise EOFError('socket closed with %d bytes left'
' in this block'.format(length))
length -= len(block)
blocks.append(block)
return b''.join(blocks)
def get_block(sock):
data = recvall(sock, header_struct.size)
(block_length,) = header_struct.unpack(data)
return recvall(sock, block_length)
def put_block(sock, message):
block_length = len(message)
sock.send(header_struct.pack(block_length))
sock.send(message)
def server(address):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(address)
sock.listen(1)
print('Run this script in another window with "-c" to connect')
print('Listening at', sock.getsockname())
sc, sockname = sock.accept()
print('Accepted connection from', sockname)
sc.shutdown(socket.SHUT_WR)
while True:
block = get_block(sc)
if not block:
break
print('Block says:', repr(block))
sc.close()
sock.close()
def client(address):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(address)
sock.shutdown(socket.SHUT_RD)
put_block(sock, b'Beautiful is better than ugly.')
put_block(sock, b'Explicit is better than implicit.')
put_block(sock, b'Simple is better than complex.')
put_block(sock, b'')
sock.close()
if __name__ == '__main__':
parser = ArgumentParser(description='Transmit & receive blocks over TCP')
parser.add_argument('hostname', nargs='?', default='127.0.0.1',
help='IP address or hostname (default: %(default)s)')
parser.add_argument('-c', action='store_true', help='run as the client')
parser.add_argument('-p', type=int, metavar='port', default=1060,
help='TCP port number (default: %(default)s)')
args = parser.parse_args()
function = client if args.c else server
function((args.hostname, args.p))
|
#!/usr/bin/env python3
# Foundations of Python Network Programming, Third Edition
# https://github.com/brandon-rhodes/fopnp/blob/m/py3/chapter05/blocks.py
# Sending data over a stream but delimited as length-prefixed blocks.
import socket, struct
from argparse import ArgumentParser
header_struct = struct.Struct('!I') # messages up to 2**32 - 1 in length
def recvall(sock, length):
blocks = []
while length:
block = sock.recv(length)
if not block:
raise EOFError('socket closed with %d bytes left'
' in this block'.format(length))
length -= len(block)
blocks.append(block)
return b''.join(blocks)
def get_block(sock):
data = recvall(sock, header_struct.size)
(block_length,) = header_struct.unpack(data)
return recvall(sock, block_length)
def put_block(sock, message):
block_length = len(message)
sock.send(header_struct.pack(block_length))
sock.send(message)
def server(address):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(address)
sock.listen(1)
print('Run this script in another window with "-c" to connect')
print('Listening at', sock.getsockname())
sc, sockname = sock.accept()
print('Accepted connection from', sockname)
sc.shutdown(socket.SHUT_WR)
while True:
block = get_block(sc)
if not block:
break
print('Block says:', repr(block))
sc.close()
sock.close()
def client(address):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(address)
sock.shutdown(socket.SHUT_RD)
put_block(sock, b'Beautiful is better than ugly.')
put_block(sock, b'Explicit is better than implicit.')
put_block(sock, b'Simple is better than complex.')
put_block(sock, b'')
sock.close()
if __name__ == '__main__':
parser = ArgumentParser(description='Transmit & receive blocks over TCP')
parser.add_argument('hostname', nargs='?', default='127.0.0.1',
help='IP address or hostname (default: %(default)s)')
parser.add_argument('-c', action='store_true', help='run as the client')
parser.add_argument('-p', type=int, metavar='port', default=1060,
help='TCP port number (default: %(default)s)')
args = parser.parse_args()
function = client if args.c else server
function((args.hostname, args.p))
|
en
| 0.707205
|
#!/usr/bin/env python3 # Foundations of Python Network Programming, Third Edition # https://github.com/brandon-rhodes/fopnp/blob/m/py3/chapter05/blocks.py # Sending data over a stream but delimited as length-prefixed blocks. # messages up to 2**32 - 1 in length
| 3.552656
| 4
|
trac/util/tests/presentation.py
|
lelit/trac
| 1
|
6627839
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2013 Edgewall Software
# Copyright (C) 2006 <NAME> <<EMAIL>>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
import doctest
import unittest
from trac.util import presentation
class ToJsonTestCase(unittest.TestCase):
def test_simple_types(self):
self.assertEqual('42', presentation.to_json(42))
self.assertEqual('123.456', presentation.to_json(123.456))
self.assertEqual('true', presentation.to_json(True))
self.assertEqual('false', presentation.to_json(False))
self.assertEqual('null', presentation.to_json(None))
self.assertEqual('"String"', presentation.to_json('String'))
self.assertEqual(r'"a \" quote"', presentation.to_json('a " quote'))
self.assertEqual('''"a ' single quote"''',
presentation.to_json("a ' single quote"))
self.assertEqual(r'"\u003cb\u003e\u0026\u003c/b\u003e"',
presentation.to_json('<b>&</b>'))
self.assertEqual(r'"\n\r\u2028\u2029"',
presentation.to_json(u'\x0a\x0d\u2028\u2029'))
def test_compound_types(self):
self.assertEqual('[1,2,[true,false]]',
presentation.to_json([1, 2, [True, False]]))
self.assertEqual(r'{"one":1,"other":[null,0],'
r'''"three":[3,"\u0026\u003c\u003e'"],'''
r'"two":2,"\u2028\n":"\u2029\r"}',
presentation.to_json({"one": 1, "two": 2,
"other": [None, 0],
"three": [3, "&<>'"],
u"\u2028\x0a": u"\u2029\x0d"}))
def suite():
suite = unittest.TestSuite()
suite.addTest(doctest.DocTestSuite(presentation))
suite.addTest(unittest.makeSuite(ToJsonTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2013 Edgewall Software
# Copyright (C) 2006 <NAME> <<EMAIL>>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
import doctest
import unittest
from trac.util import presentation
class ToJsonTestCase(unittest.TestCase):
def test_simple_types(self):
self.assertEqual('42', presentation.to_json(42))
self.assertEqual('123.456', presentation.to_json(123.456))
self.assertEqual('true', presentation.to_json(True))
self.assertEqual('false', presentation.to_json(False))
self.assertEqual('null', presentation.to_json(None))
self.assertEqual('"String"', presentation.to_json('String'))
self.assertEqual(r'"a \" quote"', presentation.to_json('a " quote'))
self.assertEqual('''"a ' single quote"''',
presentation.to_json("a ' single quote"))
self.assertEqual(r'"\u003cb\u003e\u0026\u003c/b\u003e"',
presentation.to_json('<b>&</b>'))
self.assertEqual(r'"\n\r\u2028\u2029"',
presentation.to_json(u'\x0a\x0d\u2028\u2029'))
def test_compound_types(self):
self.assertEqual('[1,2,[true,false]]',
presentation.to_json([1, 2, [True, False]]))
self.assertEqual(r'{"one":1,"other":[null,0],'
r'''"three":[3,"\u0026\u003c\u003e'"],'''
r'"two":2,"\u2028\n":"\u2029\r"}',
presentation.to_json({"one": 1, "two": 2,
"other": [None, 0],
"three": [3, "&<>'"],
u"\u2028\x0a": u"\u2029\x0d"}))
def suite():
suite = unittest.TestSuite()
suite.addTest(doctest.DocTestSuite(presentation))
suite.addTest(unittest.makeSuite(ToJsonTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
en
| 0.929285
|
# -*- coding: utf-8 -*- # # Copyright (C) 2006-2013 Edgewall Software # Copyright (C) 2006 <NAME> <<EMAIL>> # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://trac.edgewall.org/wiki/TracLicense. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://trac.edgewall.org/log/. "a ' single quote" "three":[3,"\u0026\u003c\u003e'"],
| 2.14459
| 2
|
pycatia/navigator_interfaces/annotated_views.py
|
evereux/catia_python
| 90
|
6627840
|
<gh_stars>10-100
#! usr/bin/python3.6
"""
Module initially auto generated using V5Automation files from CATIA V5 R28 on 2020-06-11 12:40:47.360445
.. warning::
The notes denoted "CAA V5 Visual Basic Help" are to be used as reference only.
They are there as a guide as to how the visual basic / catscript functions work
and thus help debugging in pycatia.
"""
from typing import Iterator
from pycatia.in_interfaces.viewpoint_3d import Viewpoint3D
from pycatia.navigator_interfaces.annotated_view import AnnotatedView
from pycatia.system_interfaces.collection import Collection
from pycatia.types.general import cat_variant
class AnnotatedViews(Collection):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| System.IUnknown
| System.IDispatch
| System.CATBaseUnknown
| System.CATBaseDispatch
| System.Collection
| AnnotatedViews
|
| A collection of AnnotatedView objects.
|
| The method Product.GetTechnologicalObject ("AnnotatedViews") on the root
| product retrieves this collection.
"""
def __init__(self, com_object):
super().__init__(com_object)
self.annotated_views = com_object
def add(self) -> AnnotatedView:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func Add() As AnnotatedView
|
| Creates an annotated view using the current viewpoint and adds it to the
| AnnotatedView collection.
|
| Returns:
| The created AnnotatedView
| Example:
|
| This example creates a new AnnotatedView in the TheAnnotatedViews
| collection.
|
|
| Dim NewAnnotatedView As AnnotatedView
| Set NewAnnotatedView = TheAnnotatedViews.Add
:return: AnnotatedView
:rtype: AnnotatedView
"""
return AnnotatedView(self.annotated_views.Add())
def add_from_viewpoint(self, i_viewpoint: Viewpoint3D) -> AnnotatedView:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func AddFromViewpoint(Viewpoint3D iViewpoint) As
| AnnotatedView
|
| Creates an annotated view using a given viewpoint and adds it to the
| AnnotatedView collection.
|
| Parameters:
|
| iViewpoint
| The viewpoint.
|
| Returns:
| The created AnnotatedView
| Example:
|
| This example creates a new AnnotatedView in the TheAnnotatedViews
| collection using a
| AViewpoint viewpoint object.
|
|
| Dim NewAnnotatedView As AnnotatedView
| Set NewAnnotatedView = TheAnnotatedViews.AddFromViewpoint(AViewpoint)
:param Viewpoint3D i_viewpoint:
:return: AnnotatedView
:rtype: AnnotatedView
"""
return AnnotatedView(self.annotated_views.AddFromViewpoint(i_viewpoint.com_object))
def item(self, i_index: cat_variant) -> AnnotatedView:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func Item(CATVariant iIndex) As AnnotatedView
|
| Returns an annotated view using its index or its name from the
| AnnotatedViews collection.
|
| Parameters:
|
| iIndex
| The index or the name of the AnnotatedView to retrieve from the
| collection of AnnotatedViews. As a numerics, this index is the rank of the
| AnnotatedView in the collection. The index of the first AnnotatedView in the
| collection is 1, and the index of the last AnnotatedView is Count. As a string,
| it is the name you assigned to the AnnotatedView.
|
| Returns:
| The retrieved AnnotatedView
| Example:
|
| This example retrieves in ThisAnnotatedView the ninth
| AnnotatedView,
| and in ThatAnnotatedView the AnnotatedView named
| AnnotatedView3 from the TheAnnotatedViews collection.
|
|
|
| Dim ThisAnnotatedView As AnnotatedView
| Set ThisAnnotatedView = TheAnnotatedViews.Item(9)
| Dim ThatAnnotatedView As AnnotatedView
| Set ThatAnnotatedView = TheAnnotatedViews.Item("AnnotatedView3")
:param cat_variant i_index:
:return: AnnotatedView
:rtype: AnnotatedView
"""
return AnnotatedView(self.annotated_views.Item(i_index))
def remove(self, i_index: cat_variant) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub Remove(CATVariant iIndex)
|
| Removes an annotated view from the AnnotatedViews
| collection.
|
| Parameters:
|
| iIndex
| The index or the name of the AnnotatedView to retrieve from he
| collection of AnnotatedViews. As a numerics, this index is the rank of the
| AnnotatedView in the collection. The index of the first AnnotatedView in the
| collection is 1, and the index of the last AnnotatedView is Count. As a string,
| it is the name you assigned to the AnnotatedView.
|
| Example:
|
| The following example removes the tenth AnnotatedView and the
| AnnotatedView named
| AnnotatedView2 from the TheAnnotatedViews
| collection.
|
|
| TheAnnotatedViews.Remove(10)
| TheAnnotatedViews.Remove("AnnotatedView2")
:param cat_variant i_index:
:return: None
:rtype: None
"""
return self.annotated_views.Remove(i_index)
def __getitem__(self, n: int) -> AnnotatedView:
if (n + 1) > self.count:
raise StopIteration
return AnnotatedView(self.annotated_views.item(n + 1))
def __iter__(self) -> Iterator[AnnotatedView]:
for i in range(self.count):
yield self.child_object(self.com_object.item(i + 1))
def __repr__(self):
return f'AnnotatedViews(name="{self.name}")'
|
#! usr/bin/python3.6
"""
Module initially auto generated using V5Automation files from CATIA V5 R28 on 2020-06-11 12:40:47.360445
.. warning::
The notes denoted "CAA V5 Visual Basic Help" are to be used as reference only.
They are there as a guide as to how the visual basic / catscript functions work
and thus help debugging in pycatia.
"""
from typing import Iterator
from pycatia.in_interfaces.viewpoint_3d import Viewpoint3D
from pycatia.navigator_interfaces.annotated_view import AnnotatedView
from pycatia.system_interfaces.collection import Collection
from pycatia.types.general import cat_variant
class AnnotatedViews(Collection):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| System.IUnknown
| System.IDispatch
| System.CATBaseUnknown
| System.CATBaseDispatch
| System.Collection
| AnnotatedViews
|
| A collection of AnnotatedView objects.
|
| The method Product.GetTechnologicalObject ("AnnotatedViews") on the root
| product retrieves this collection.
"""
def __init__(self, com_object):
super().__init__(com_object)
self.annotated_views = com_object
def add(self) -> AnnotatedView:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func Add() As AnnotatedView
|
| Creates an annotated view using the current viewpoint and adds it to the
| AnnotatedView collection.
|
| Returns:
| The created AnnotatedView
| Example:
|
| This example creates a new AnnotatedView in the TheAnnotatedViews
| collection.
|
|
| Dim NewAnnotatedView As AnnotatedView
| Set NewAnnotatedView = TheAnnotatedViews.Add
:return: AnnotatedView
:rtype: AnnotatedView
"""
return AnnotatedView(self.annotated_views.Add())
def add_from_viewpoint(self, i_viewpoint: Viewpoint3D) -> AnnotatedView:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func AddFromViewpoint(Viewpoint3D iViewpoint) As
| AnnotatedView
|
| Creates an annotated view using a given viewpoint and adds it to the
| AnnotatedView collection.
|
| Parameters:
|
| iViewpoint
| The viewpoint.
|
| Returns:
| The created AnnotatedView
| Example:
|
| This example creates a new AnnotatedView in the TheAnnotatedViews
| collection using a
| AViewpoint viewpoint object.
|
|
| Dim NewAnnotatedView As AnnotatedView
| Set NewAnnotatedView = TheAnnotatedViews.AddFromViewpoint(AViewpoint)
:param Viewpoint3D i_viewpoint:
:return: AnnotatedView
:rtype: AnnotatedView
"""
return AnnotatedView(self.annotated_views.AddFromViewpoint(i_viewpoint.com_object))
def item(self, i_index: cat_variant) -> AnnotatedView:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func Item(CATVariant iIndex) As AnnotatedView
|
| Returns an annotated view using its index or its name from the
| AnnotatedViews collection.
|
| Parameters:
|
| iIndex
| The index or the name of the AnnotatedView to retrieve from the
| collection of AnnotatedViews. As a numerics, this index is the rank of the
| AnnotatedView in the collection. The index of the first AnnotatedView in the
| collection is 1, and the index of the last AnnotatedView is Count. As a string,
| it is the name you assigned to the AnnotatedView.
|
| Returns:
| The retrieved AnnotatedView
| Example:
|
| This example retrieves in ThisAnnotatedView the ninth
| AnnotatedView,
| and in ThatAnnotatedView the AnnotatedView named
| AnnotatedView3 from the TheAnnotatedViews collection.
|
|
|
| Dim ThisAnnotatedView As AnnotatedView
| Set ThisAnnotatedView = TheAnnotatedViews.Item(9)
| Dim ThatAnnotatedView As AnnotatedView
| Set ThatAnnotatedView = TheAnnotatedViews.Item("AnnotatedView3")
:param cat_variant i_index:
:return: AnnotatedView
:rtype: AnnotatedView
"""
return AnnotatedView(self.annotated_views.Item(i_index))
def remove(self, i_index: cat_variant) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub Remove(CATVariant iIndex)
|
| Removes an annotated view from the AnnotatedViews
| collection.
|
| Parameters:
|
| iIndex
| The index or the name of the AnnotatedView to retrieve from he
| collection of AnnotatedViews. As a numerics, this index is the rank of the
| AnnotatedView in the collection. The index of the first AnnotatedView in the
| collection is 1, and the index of the last AnnotatedView is Count. As a string,
| it is the name you assigned to the AnnotatedView.
|
| Example:
|
| The following example removes the tenth AnnotatedView and the
| AnnotatedView named
| AnnotatedView2 from the TheAnnotatedViews
| collection.
|
|
| TheAnnotatedViews.Remove(10)
| TheAnnotatedViews.Remove("AnnotatedView2")
:param cat_variant i_index:
:return: None
:rtype: None
"""
return self.annotated_views.Remove(i_index)
def __getitem__(self, n: int) -> AnnotatedView:
if (n + 1) > self.count:
raise StopIteration
return AnnotatedView(self.annotated_views.item(n + 1))
def __iter__(self) -> Iterator[AnnotatedView]:
for i in range(self.count):
yield self.child_object(self.com_object.item(i + 1))
def __repr__(self):
return f'AnnotatedViews(name="{self.name}")'
|
en
| 0.736065
|
#! usr/bin/python3.6 Module initially auto generated using V5Automation files from CATIA V5 R28 on 2020-06-11 12:40:47.360445 .. warning:: The notes denoted "CAA V5 Visual Basic Help" are to be used as reference only. They are there as a guide as to how the visual basic / catscript functions work and thus help debugging in pycatia. .. note:: :class: toggle CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445) | System.IUnknown | System.IDispatch | System.CATBaseUnknown | System.CATBaseDispatch | System.Collection | AnnotatedViews | | A collection of AnnotatedView objects. | | The method Product.GetTechnologicalObject ("AnnotatedViews") on the root | product retrieves this collection. .. note:: :class: toggle CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)) | o Func Add() As AnnotatedView | | Creates an annotated view using the current viewpoint and adds it to the | AnnotatedView collection. | | Returns: | The created AnnotatedView | Example: | | This example creates a new AnnotatedView in the TheAnnotatedViews | collection. | | | Dim NewAnnotatedView As AnnotatedView | Set NewAnnotatedView = TheAnnotatedViews.Add :return: AnnotatedView :rtype: AnnotatedView .. note:: :class: toggle CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)) | o Func AddFromViewpoint(Viewpoint3D iViewpoint) As | AnnotatedView | | Creates an annotated view using a given viewpoint and adds it to the | AnnotatedView collection. | | Parameters: | | iViewpoint | The viewpoint. | | Returns: | The created AnnotatedView | Example: | | This example creates a new AnnotatedView in the TheAnnotatedViews | collection using a | AViewpoint viewpoint object. | | | Dim NewAnnotatedView As AnnotatedView | Set NewAnnotatedView = TheAnnotatedViews.AddFromViewpoint(AViewpoint) :param Viewpoint3D i_viewpoint: :return: AnnotatedView :rtype: AnnotatedView .. note:: :class: toggle CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)) | o Func Item(CATVariant iIndex) As AnnotatedView | | Returns an annotated view using its index or its name from the | AnnotatedViews collection. | | Parameters: | | iIndex | The index or the name of the AnnotatedView to retrieve from the | collection of AnnotatedViews. As a numerics, this index is the rank of the | AnnotatedView in the collection. The index of the first AnnotatedView in the | collection is 1, and the index of the last AnnotatedView is Count. As a string, | it is the name you assigned to the AnnotatedView. | | Returns: | The retrieved AnnotatedView | Example: | | This example retrieves in ThisAnnotatedView the ninth | AnnotatedView, | and in ThatAnnotatedView the AnnotatedView named | AnnotatedView3 from the TheAnnotatedViews collection. | | | | Dim ThisAnnotatedView As AnnotatedView | Set ThisAnnotatedView = TheAnnotatedViews.Item(9) | Dim ThatAnnotatedView As AnnotatedView | Set ThatAnnotatedView = TheAnnotatedViews.Item("AnnotatedView3") :param cat_variant i_index: :return: AnnotatedView :rtype: AnnotatedView .. note:: :class: toggle CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)) | o Sub Remove(CATVariant iIndex) | | Removes an annotated view from the AnnotatedViews | collection. | | Parameters: | | iIndex | The index or the name of the AnnotatedView to retrieve from he | collection of AnnotatedViews. As a numerics, this index is the rank of the | AnnotatedView in the collection. The index of the first AnnotatedView in the | collection is 1, and the index of the last AnnotatedView is Count. As a string, | it is the name you assigned to the AnnotatedView. | | Example: | | The following example removes the tenth AnnotatedView and the | AnnotatedView named | AnnotatedView2 from the TheAnnotatedViews | collection. | | | TheAnnotatedViews.Remove(10) | TheAnnotatedViews.Remove("AnnotatedView2") :param cat_variant i_index: :return: None :rtype: None
| 1.959887
| 2
|
salt/modules/glance.py
|
yuriks/salt
| 1
|
6627841
|
<reponame>yuriks/salt
# -*- coding: utf-8 -*-
'''
Module for handling openstack glance calls.
:optdepends: - glanceclient Python adapter
:configuration: This module is not usable until the following are specified
either in a pillar or in the minion's config file::
keystone.user: admin
keystone.password: <PASSWORD>
keystone.tenant: admin
keystone.insecure: False #(optional)
keystone.auth_url: 'http://127.0.0.1:5000/v2.0/'
If configuration for multiple openstack accounts is required, they can be
set up as different configuration profiles:
For example::
openstack1:
keystone.user: admin
keystone.password: <PASSWORD>
keystone.tenant: admin
keystone.auth_url: 'http://127.0.0.1:5000/v2.0/'
openstack2:
keystone.user: admin
keystone.password: <PASSWORD>
keystone.tenant: admin
keystone.auth_url: 'http://127.0.0.2:5000/v2.0/'
With this configuration in place, any of the glance functions can
make use of a configuration profile by declaring it explicitly.
For example::
salt '*' glance.image_list profile=openstack1
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import re
# Import salt libs
from salt.exceptions import (
SaltInvocationError
)
from salt.ext import six
# pylint: disable=import-error
HAS_GLANCE = False
try:
from glanceclient import client
from glanceclient import exc
HAS_GLANCE = True
except ImportError:
pass
# Workaround, as the Glance API v2 requires you to
# already have a keystone session token
HAS_KEYSTONE = False
try:
from keystoneclient.v2_0 import client as kstone
#import keystoneclient.apiclient.exceptions as kstone_exc
HAS_KEYSTONE = True
except ImportError:
pass
import logging
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(__name__)
import pprint
def __virtual__():
'''
Only load this module if glance
is installed on this minion.
'''
if HAS_GLANCE:
return 'glance'
return (False, 'The glance execution module cannot be loaded: the glanceclient python library is not available.')
__opts__ = {}
def _auth(profile=None, api_version=2, **connection_args):
'''
Set up glance credentials, returns
`glanceclient.client.Client`. Optional parameter
"api_version" defaults to 2.
Only intended to be used within glance-enabled modules
'''
__utils__['versions.warn_until'](
'Aluminium',
(
'The glance module has been deprecated and will be removed in {version}. '
'Please update to using the glanceng module'
),
)
if profile:
prefix = profile + ":keystone."
else:
prefix = "keystone."
def get(key, default=None):
'''
Checks connection_args, then salt-minion config,
falls back to specified default value.
'''
return connection_args.get('connection_' + key,
__salt__['config.get'](prefix + key, default))
user = get('user', 'admin')
password = get('password', None)
tenant = get('tenant', 'admin')
tenant_id = get('tenant_id')
auth_url = get('auth_url', 'http://127.0.0.1:35357/v2.0')
insecure = get('insecure', False)
admin_token = get('token')
region = get('region')
ks_endpoint = get('endpoint', 'http://127.0.0.1:9292/')
g_endpoint_url = __salt__['keystone.endpoint_get']('glance', profile)
# The trailing 'v2' causes URLs like thise one:
# http://127.0.0.1:9292/v2/v1/images
g_endpoint_url = re.sub('/v2', '', g_endpoint_url['internalurl'])
if admin_token and api_version != 1 and not password:
# If we had a password we could just
# ignore the admin-token and move on...
raise SaltInvocationError('Only can use keystone admin token ' +
'with Glance API v1')
elif password:
# Can't use the admin-token anyway
kwargs = {'username': user,
'password': password,
'tenant_id': tenant_id,
'auth_url': auth_url,
'endpoint_url': g_endpoint_url,
'region_name': region,
'tenant_name': tenant}
# 'insecure' keyword not supported by all v2.0 keystone clients
# this ensures it's only passed in when defined
if insecure:
kwargs['insecure'] = True
elif api_version == 1 and admin_token:
kwargs = {'token': admin_token,
'auth_url': auth_url,
'endpoint_url': g_endpoint_url}
else:
raise SaltInvocationError('No credentials to authenticate with.')
if HAS_KEYSTONE:
log.debug(
'Calling keystoneclient.v2_0.client.Client(%s, **%s)',
ks_endpoint, kwargs
)
keystone = kstone.Client(**kwargs)
kwargs['token'] = keystone.get_token(keystone.session)
# This doesn't realy prevent the password to show up
# in the minion log as keystoneclient.session is
# logging it anyway when in debug-mode
kwargs.pop('password')
log.debug(
'Calling glanceclient.client.Client(%s, %s, **%s)',
api_version,
g_endpoint_url,
kwargs
)
# may raise exc.HTTPUnauthorized, exc.HTTPNotFound
# but we deal with those elsewhere
return client.Client(api_version, g_endpoint_url, **kwargs)
else:
raise NotImplementedError(
"Can't retrieve a auth_token without keystone")
def _add_image(collection, image):
'''
Add image to given dictionary
'''
image_prep = {
'id': image.id,
'name': image.name,
'created_at': image.created_at,
'file': image.file,
'min_disk': image.min_disk,
'min_ram': image.min_ram,
'owner': image.owner,
'protected': image.protected,
'status': image.status,
'tags': image.tags,
'updated_at': image.updated_at,
'visibility': image.visibility,
}
# Those cause AttributeErrors in Icehouse' glanceclient
for attr in ['container_format', 'disk_format', 'size']:
if attr in image:
image_prep[attr] = image[attr]
if type(collection) is dict:
collection[image.name] = image_prep
elif type(collection) is list:
collection.append(image_prep)
else:
msg = '"collection" is {0}'.format(type(collection)) +\
'instead of dict or list.'
log.error(msg)
raise TypeError(msg)
return collection
def image_create(name,
location=None,
profile=None,
visibility=None,
container_format='bare',
disk_format='raw',
protected=None,):
'''
Create an image (glance image-create)
CLI Example, old format:
.. code-block:: bash
salt '*' glance.image_create name=f16-jeos \\
disk_format=qcow2 container_format=ovf
CLI Example, new format resembling Glance API v2:
.. code-block:: bash
salt '*' glance.image_create name=f16-jeos visibility=public \\
disk_format=qcow2 container_format=ovf
The parameter 'visibility' defaults to 'public' if not specified.
'''
kwargs = {}
# valid options for "visibility":
v_list = ['public', 'private']
# valid options for "container_format":
cf_list = ['ami', 'ari', 'aki', 'bare', 'ovf']
# valid options for "disk_format":
df_list = ['ami', 'ari', 'aki', 'vhd', 'vmdk',
'raw', 'qcow2', 'vdi', 'iso']
kwargs['copy_from'] = location
if visibility is not None:
if visibility not in v_list:
raise SaltInvocationError('"visibility" needs to be one ' +
'of the following: {0}'.format(', '.join(v_list)))
elif visibility == 'public':
kwargs['is_public'] = True
else:
kwargs['is_public'] = False
else:
kwargs['is_public'] = True
if container_format not in cf_list:
raise SaltInvocationError('"container_format" needs to be ' +
'one of the following: {0}'.format(', '.join(cf_list)))
else:
kwargs['container_format'] = container_format
if disk_format not in df_list:
raise SaltInvocationError('"disk_format" needs to be one ' +
'of the following: {0}'.format(', '.join(df_list)))
else:
kwargs['disk_format'] = disk_format
if protected is not None:
kwargs['protected'] = protected
# Icehouse's glanceclient doesn't have add_location() and
# glanceclient.v2 doesn't implement Client.images.create()
# in a usable fashion. Thus we have to use v1 for now.
g_client = _auth(profile, api_version=1)
image = g_client.images.create(name=name, **kwargs)
return image_show(image.id, profile=profile)
def image_delete(id=None, name=None, profile=None): # pylint: disable=C0103
'''
Delete an image (glance image-delete)
CLI Examples:
.. code-block:: bash
salt '*' glance.image_delete c2eb2eb0-53e1-4a80-b990-8ec887eae7df
salt '*' glance.image_delete id=c2eb2eb0-53e1-4a80-b990-8ec887eae7df
salt '*' glance.image_delete name=f16-jeos
'''
g_client = _auth(profile)
image = {'id': False, 'name': None}
if name:
for image in g_client.images.list():
if image.name == name:
id = image.id # pylint: disable=C0103
continue
if not id:
return {
'result': False,
'comment':
'Unable to resolve image id '
'for name {0}'.format(name)
}
elif not name:
name = image['name']
try:
g_client.images.delete(id)
except exc.HTTPNotFound:
return {
'result': False,
'comment': 'No image with ID {0}'.format(id)
}
except exc.HTTPForbidden as forbidden:
log.error(six.text_type(forbidden))
return {
'result': False,
'comment': six.text_type(forbidden)
}
return {
'result': True,
'comment': 'Deleted image \'{0}\' ({1}).'.format(name, id),
}
def image_show(id=None, name=None, profile=None): # pylint: disable=C0103
'''
Return details about a specific image (glance image-show)
CLI Example:
.. code-block:: bash
salt '*' glance.image_show
'''
g_client = _auth(profile)
ret = {}
if name:
for image in g_client.images.list():
if image.name == name:
id = image.id # pylint: disable=C0103
continue
if not id:
return {
'result': False,
'comment':
'Unable to resolve image ID '
'for name \'{0}\''.format(name)
}
try:
image = g_client.images.get(id)
except exc.HTTPNotFound:
return {
'result': False,
'comment': 'No image with ID {0}'.format(id)
}
pformat = pprint.PrettyPrinter(indent=4).pformat
log.debug('Properties of image {0}:\n{1}'.format(
image.name, pformat(image)))
schema = image_schema(profile=profile)
if len(schema.keys()) == 1:
schema = schema['image']
for key in schema:
if key in image:
ret[key] = image[key]
return ret
def image_list(id=None, profile=None, name=None): # pylint: disable=C0103
'''
Return a list of available images (glance image-list)
CLI Example:
.. code-block:: bash
salt '*' glance.image_list
'''
g_client = _auth(profile)
ret = []
for image in g_client.images.list():
if id is None and name is None:
_add_image(ret, image)
else:
if id is not None and id == image.id:
_add_image(ret, image)
return ret
if name == image.name:
if name in ret and __salt__['salt_version.less_than']('Boron'):
# Not really worth an exception
return {
'result': False,
'comment':
'More than one image with '
'name "{0}"'.format(name)
}
_add_image(ret, image)
log.debug('Returning images: {0}'.format(ret))
return ret
def image_schema(profile=None):
'''
Returns names and descriptions of the schema "image"'s
properties for this profile's instance of glance
CLI Example:
.. code-block:: bash
salt '*' glance.image_schema
'''
return schema_get('image', profile)
def image_update(id=None, name=None, profile=None, **kwargs): # pylint: disable=C0103
'''
Update properties of given image.
Known to work for:
- min_ram (in MB)
- protected (bool)
- visibility ('public' or 'private')
CLI Example:
.. code-block:: bash
salt '*' glance.image_update id=c2eb2eb0-53e1-4a80-b990-8ec887eae7df
salt '*' glance.image_update name=f16-jeos
'''
if id:
image = image_show(id=id, profile=profile)
if 'result' in image and not image['result']:
return image
elif len(image) == 1:
image = image.values()[0]
elif name:
img_list = image_list(name=name, profile=profile)
if img_list is dict and 'result' in img_list:
return img_list
elif len(img_list) == 0:
return {
'result': False,
'comment':
'No image with name \'{0}\' '
'found.'.format(name)
}
elif len(img_list) == 1:
try:
image = img_list[0]
except KeyError:
image = img_list[name]
else:
raise SaltInvocationError
log.debug('Found image:\n{0}'.format(image))
to_update = {}
for key, value in kwargs.items():
if key.startswith('_'):
continue
if key not in image or image[key] != value:
log.debug('add <{0}={1}> to to_update'.format(key, value))
to_update[key] = value
g_client = _auth(profile)
updated = g_client.images.update(image['id'], **to_update)
return updated
def schema_get(name, profile=None):
'''
Known valid names of schemas are:
- image
- images
- member
- members
CLI Example:
.. code-block:: bash
salt '*' glance.schema_get name=f16-jeos
'''
g_client = _auth(profile)
pformat = pprint.PrettyPrinter(indent=4).pformat
schema_props = {}
for prop in g_client.schemas.get(name).properties:
schema_props[prop.name] = prop.description
log.debug('Properties of schema {0}:\n{1}'.format(
name, pformat(schema_props)))
return {name: schema_props}
def _item_list(profile=None):
'''
Template for writing list functions
Return a list of available items (glance items-list)
CLI Example:
.. code-block:: bash
salt '*' glance.item_list
'''
g_client = _auth(profile)
ret = []
for item in g_client.items.list():
ret.append(item.__dict__)
#ret[item.name] = {
# 'name': item.name,
# }
return ret
# The following is a list of functions that need to be incorporated in the
# glance module. This list should be updated as functions are added.
# image-download Download a specific image.
# member-create Share a specific image with a tenant.
# member-delete Remove a shared image from a tenant.
# member-list Describe sharing permissions by image or tenant.
|
# -*- coding: utf-8 -*-
'''
Module for handling openstack glance calls.
:optdepends: - glanceclient Python adapter
:configuration: This module is not usable until the following are specified
either in a pillar or in the minion's config file::
keystone.user: admin
keystone.password: <PASSWORD>
keystone.tenant: admin
keystone.insecure: False #(optional)
keystone.auth_url: 'http://127.0.0.1:5000/v2.0/'
If configuration for multiple openstack accounts is required, they can be
set up as different configuration profiles:
For example::
openstack1:
keystone.user: admin
keystone.password: <PASSWORD>
keystone.tenant: admin
keystone.auth_url: 'http://127.0.0.1:5000/v2.0/'
openstack2:
keystone.user: admin
keystone.password: <PASSWORD>
keystone.tenant: admin
keystone.auth_url: 'http://127.0.0.2:5000/v2.0/'
With this configuration in place, any of the glance functions can
make use of a configuration profile by declaring it explicitly.
For example::
salt '*' glance.image_list profile=openstack1
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import re
# Import salt libs
from salt.exceptions import (
SaltInvocationError
)
from salt.ext import six
# pylint: disable=import-error
HAS_GLANCE = False
try:
from glanceclient import client
from glanceclient import exc
HAS_GLANCE = True
except ImportError:
pass
# Workaround, as the Glance API v2 requires you to
# already have a keystone session token
HAS_KEYSTONE = False
try:
from keystoneclient.v2_0 import client as kstone
#import keystoneclient.apiclient.exceptions as kstone_exc
HAS_KEYSTONE = True
except ImportError:
pass
import logging
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(__name__)
import pprint
def __virtual__():
'''
Only load this module if glance
is installed on this minion.
'''
if HAS_GLANCE:
return 'glance'
return (False, 'The glance execution module cannot be loaded: the glanceclient python library is not available.')
__opts__ = {}
def _auth(profile=None, api_version=2, **connection_args):
'''
Set up glance credentials, returns
`glanceclient.client.Client`. Optional parameter
"api_version" defaults to 2.
Only intended to be used within glance-enabled modules
'''
__utils__['versions.warn_until'](
'Aluminium',
(
'The glance module has been deprecated and will be removed in {version}. '
'Please update to using the glanceng module'
),
)
if profile:
prefix = profile + ":keystone."
else:
prefix = "keystone."
def get(key, default=None):
'''
Checks connection_args, then salt-minion config,
falls back to specified default value.
'''
return connection_args.get('connection_' + key,
__salt__['config.get'](prefix + key, default))
user = get('user', 'admin')
password = get('password', None)
tenant = get('tenant', 'admin')
tenant_id = get('tenant_id')
auth_url = get('auth_url', 'http://127.0.0.1:35357/v2.0')
insecure = get('insecure', False)
admin_token = get('token')
region = get('region')
ks_endpoint = get('endpoint', 'http://127.0.0.1:9292/')
g_endpoint_url = __salt__['keystone.endpoint_get']('glance', profile)
# The trailing 'v2' causes URLs like thise one:
# http://127.0.0.1:9292/v2/v1/images
g_endpoint_url = re.sub('/v2', '', g_endpoint_url['internalurl'])
if admin_token and api_version != 1 and not password:
# If we had a password we could just
# ignore the admin-token and move on...
raise SaltInvocationError('Only can use keystone admin token ' +
'with Glance API v1')
elif password:
# Can't use the admin-token anyway
kwargs = {'username': user,
'password': password,
'tenant_id': tenant_id,
'auth_url': auth_url,
'endpoint_url': g_endpoint_url,
'region_name': region,
'tenant_name': tenant}
# 'insecure' keyword not supported by all v2.0 keystone clients
# this ensures it's only passed in when defined
if insecure:
kwargs['insecure'] = True
elif api_version == 1 and admin_token:
kwargs = {'token': admin_token,
'auth_url': auth_url,
'endpoint_url': g_endpoint_url}
else:
raise SaltInvocationError('No credentials to authenticate with.')
if HAS_KEYSTONE:
log.debug(
'Calling keystoneclient.v2_0.client.Client(%s, **%s)',
ks_endpoint, kwargs
)
keystone = kstone.Client(**kwargs)
kwargs['token'] = keystone.get_token(keystone.session)
# This doesn't realy prevent the password to show up
# in the minion log as keystoneclient.session is
# logging it anyway when in debug-mode
kwargs.pop('password')
log.debug(
'Calling glanceclient.client.Client(%s, %s, **%s)',
api_version,
g_endpoint_url,
kwargs
)
# may raise exc.HTTPUnauthorized, exc.HTTPNotFound
# but we deal with those elsewhere
return client.Client(api_version, g_endpoint_url, **kwargs)
else:
raise NotImplementedError(
"Can't retrieve a auth_token without keystone")
def _add_image(collection, image):
'''
Add image to given dictionary
'''
image_prep = {
'id': image.id,
'name': image.name,
'created_at': image.created_at,
'file': image.file,
'min_disk': image.min_disk,
'min_ram': image.min_ram,
'owner': image.owner,
'protected': image.protected,
'status': image.status,
'tags': image.tags,
'updated_at': image.updated_at,
'visibility': image.visibility,
}
# Those cause AttributeErrors in Icehouse' glanceclient
for attr in ['container_format', 'disk_format', 'size']:
if attr in image:
image_prep[attr] = image[attr]
if type(collection) is dict:
collection[image.name] = image_prep
elif type(collection) is list:
collection.append(image_prep)
else:
msg = '"collection" is {0}'.format(type(collection)) +\
'instead of dict or list.'
log.error(msg)
raise TypeError(msg)
return collection
def image_create(name,
location=None,
profile=None,
visibility=None,
container_format='bare',
disk_format='raw',
protected=None,):
'''
Create an image (glance image-create)
CLI Example, old format:
.. code-block:: bash
salt '*' glance.image_create name=f16-jeos \\
disk_format=qcow2 container_format=ovf
CLI Example, new format resembling Glance API v2:
.. code-block:: bash
salt '*' glance.image_create name=f16-jeos visibility=public \\
disk_format=qcow2 container_format=ovf
The parameter 'visibility' defaults to 'public' if not specified.
'''
kwargs = {}
# valid options for "visibility":
v_list = ['public', 'private']
# valid options for "container_format":
cf_list = ['ami', 'ari', 'aki', 'bare', 'ovf']
# valid options for "disk_format":
df_list = ['ami', 'ari', 'aki', 'vhd', 'vmdk',
'raw', 'qcow2', 'vdi', 'iso']
kwargs['copy_from'] = location
if visibility is not None:
if visibility not in v_list:
raise SaltInvocationError('"visibility" needs to be one ' +
'of the following: {0}'.format(', '.join(v_list)))
elif visibility == 'public':
kwargs['is_public'] = True
else:
kwargs['is_public'] = False
else:
kwargs['is_public'] = True
if container_format not in cf_list:
raise SaltInvocationError('"container_format" needs to be ' +
'one of the following: {0}'.format(', '.join(cf_list)))
else:
kwargs['container_format'] = container_format
if disk_format not in df_list:
raise SaltInvocationError('"disk_format" needs to be one ' +
'of the following: {0}'.format(', '.join(df_list)))
else:
kwargs['disk_format'] = disk_format
if protected is not None:
kwargs['protected'] = protected
# Icehouse's glanceclient doesn't have add_location() and
# glanceclient.v2 doesn't implement Client.images.create()
# in a usable fashion. Thus we have to use v1 for now.
g_client = _auth(profile, api_version=1)
image = g_client.images.create(name=name, **kwargs)
return image_show(image.id, profile=profile)
def image_delete(id=None, name=None, profile=None): # pylint: disable=C0103
'''
Delete an image (glance image-delete)
CLI Examples:
.. code-block:: bash
salt '*' glance.image_delete c2eb2eb0-53e1-4a80-b990-8ec887eae7df
salt '*' glance.image_delete id=c2eb2eb0-53e1-4a80-b990-8ec887eae7df
salt '*' glance.image_delete name=f16-jeos
'''
g_client = _auth(profile)
image = {'id': False, 'name': None}
if name:
for image in g_client.images.list():
if image.name == name:
id = image.id # pylint: disable=C0103
continue
if not id:
return {
'result': False,
'comment':
'Unable to resolve image id '
'for name {0}'.format(name)
}
elif not name:
name = image['name']
try:
g_client.images.delete(id)
except exc.HTTPNotFound:
return {
'result': False,
'comment': 'No image with ID {0}'.format(id)
}
except exc.HTTPForbidden as forbidden:
log.error(six.text_type(forbidden))
return {
'result': False,
'comment': six.text_type(forbidden)
}
return {
'result': True,
'comment': 'Deleted image \'{0}\' ({1}).'.format(name, id),
}
def image_show(id=None, name=None, profile=None): # pylint: disable=C0103
'''
Return details about a specific image (glance image-show)
CLI Example:
.. code-block:: bash
salt '*' glance.image_show
'''
g_client = _auth(profile)
ret = {}
if name:
for image in g_client.images.list():
if image.name == name:
id = image.id # pylint: disable=C0103
continue
if not id:
return {
'result': False,
'comment':
'Unable to resolve image ID '
'for name \'{0}\''.format(name)
}
try:
image = g_client.images.get(id)
except exc.HTTPNotFound:
return {
'result': False,
'comment': 'No image with ID {0}'.format(id)
}
pformat = pprint.PrettyPrinter(indent=4).pformat
log.debug('Properties of image {0}:\n{1}'.format(
image.name, pformat(image)))
schema = image_schema(profile=profile)
if len(schema.keys()) == 1:
schema = schema['image']
for key in schema:
if key in image:
ret[key] = image[key]
return ret
def image_list(id=None, profile=None, name=None): # pylint: disable=C0103
'''
Return a list of available images (glance image-list)
CLI Example:
.. code-block:: bash
salt '*' glance.image_list
'''
g_client = _auth(profile)
ret = []
for image in g_client.images.list():
if id is None and name is None:
_add_image(ret, image)
else:
if id is not None and id == image.id:
_add_image(ret, image)
return ret
if name == image.name:
if name in ret and __salt__['salt_version.less_than']('Boron'):
# Not really worth an exception
return {
'result': False,
'comment':
'More than one image with '
'name "{0}"'.format(name)
}
_add_image(ret, image)
log.debug('Returning images: {0}'.format(ret))
return ret
def image_schema(profile=None):
'''
Returns names and descriptions of the schema "image"'s
properties for this profile's instance of glance
CLI Example:
.. code-block:: bash
salt '*' glance.image_schema
'''
return schema_get('image', profile)
def image_update(id=None, name=None, profile=None, **kwargs): # pylint: disable=C0103
'''
Update properties of given image.
Known to work for:
- min_ram (in MB)
- protected (bool)
- visibility ('public' or 'private')
CLI Example:
.. code-block:: bash
salt '*' glance.image_update id=c2eb2eb0-53e1-4a80-b990-8ec887eae7df
salt '*' glance.image_update name=f16-jeos
'''
if id:
image = image_show(id=id, profile=profile)
if 'result' in image and not image['result']:
return image
elif len(image) == 1:
image = image.values()[0]
elif name:
img_list = image_list(name=name, profile=profile)
if img_list is dict and 'result' in img_list:
return img_list
elif len(img_list) == 0:
return {
'result': False,
'comment':
'No image with name \'{0}\' '
'found.'.format(name)
}
elif len(img_list) == 1:
try:
image = img_list[0]
except KeyError:
image = img_list[name]
else:
raise SaltInvocationError
log.debug('Found image:\n{0}'.format(image))
to_update = {}
for key, value in kwargs.items():
if key.startswith('_'):
continue
if key not in image or image[key] != value:
log.debug('add <{0}={1}> to to_update'.format(key, value))
to_update[key] = value
g_client = _auth(profile)
updated = g_client.images.update(image['id'], **to_update)
return updated
def schema_get(name, profile=None):
'''
Known valid names of schemas are:
- image
- images
- member
- members
CLI Example:
.. code-block:: bash
salt '*' glance.schema_get name=f16-jeos
'''
g_client = _auth(profile)
pformat = pprint.PrettyPrinter(indent=4).pformat
schema_props = {}
for prop in g_client.schemas.get(name).properties:
schema_props[prop.name] = prop.description
log.debug('Properties of schema {0}:\n{1}'.format(
name, pformat(schema_props)))
return {name: schema_props}
def _item_list(profile=None):
'''
Template for writing list functions
Return a list of available items (glance items-list)
CLI Example:
.. code-block:: bash
salt '*' glance.item_list
'''
g_client = _auth(profile)
ret = []
for item in g_client.items.list():
ret.append(item.__dict__)
#ret[item.name] = {
# 'name': item.name,
# }
return ret
# The following is a list of functions that need to be incorporated in the
# glance module. This list should be updated as functions are added.
# image-download Download a specific image.
# member-create Share a specific image with a tenant.
# member-delete Remove a shared image from a tenant.
# member-list Describe sharing permissions by image or tenant.
|
en
| 0.629207
|
# -*- coding: utf-8 -*- Module for handling openstack glance calls. :optdepends: - glanceclient Python adapter :configuration: This module is not usable until the following are specified either in a pillar or in the minion's config file:: keystone.user: admin keystone.password: <PASSWORD> keystone.tenant: admin keystone.insecure: False #(optional) keystone.auth_url: 'http://127.0.0.1:5000/v2.0/' If configuration for multiple openstack accounts is required, they can be set up as different configuration profiles: For example:: openstack1: keystone.user: admin keystone.password: <PASSWORD> keystone.tenant: admin keystone.auth_url: 'http://127.0.0.1:5000/v2.0/' openstack2: keystone.user: admin keystone.password: <PASSWORD> keystone.tenant: admin keystone.auth_url: 'http://127.0.0.2:5000/v2.0/' With this configuration in place, any of the glance functions can make use of a configuration profile by declaring it explicitly. For example:: salt '*' glance.image_list profile=openstack1 # Import Python libs # Import salt libs # pylint: disable=import-error # Workaround, as the Glance API v2 requires you to # already have a keystone session token #import keystoneclient.apiclient.exceptions as kstone_exc Only load this module if glance is installed on this minion. Set up glance credentials, returns `glanceclient.client.Client`. Optional parameter "api_version" defaults to 2. Only intended to be used within glance-enabled modules Checks connection_args, then salt-minion config, falls back to specified default value. # The trailing 'v2' causes URLs like thise one: # http://127.0.0.1:9292/v2/v1/images # If we had a password we could just # ignore the admin-token and move on... # Can't use the admin-token anyway # 'insecure' keyword not supported by all v2.0 keystone clients # this ensures it's only passed in when defined # This doesn't realy prevent the password to show up # in the minion log as keystoneclient.session is # logging it anyway when in debug-mode # may raise exc.HTTPUnauthorized, exc.HTTPNotFound # but we deal with those elsewhere Add image to given dictionary # Those cause AttributeErrors in Icehouse' glanceclient Create an image (glance image-create) CLI Example, old format: .. code-block:: bash salt '*' glance.image_create name=f16-jeos \\ disk_format=qcow2 container_format=ovf CLI Example, new format resembling Glance API v2: .. code-block:: bash salt '*' glance.image_create name=f16-jeos visibility=public \\ disk_format=qcow2 container_format=ovf The parameter 'visibility' defaults to 'public' if not specified. # valid options for "visibility": # valid options for "container_format": # valid options for "disk_format": # Icehouse's glanceclient doesn't have add_location() and # glanceclient.v2 doesn't implement Client.images.create() # in a usable fashion. Thus we have to use v1 for now. # pylint: disable=C0103 Delete an image (glance image-delete) CLI Examples: .. code-block:: bash salt '*' glance.image_delete c2eb2eb0-53e1-4a80-b990-8ec887eae7df salt '*' glance.image_delete id=c2eb2eb0-53e1-4a80-b990-8ec887eae7df salt '*' glance.image_delete name=f16-jeos # pylint: disable=C0103 # pylint: disable=C0103 Return details about a specific image (glance image-show) CLI Example: .. code-block:: bash salt '*' glance.image_show # pylint: disable=C0103 # pylint: disable=C0103 Return a list of available images (glance image-list) CLI Example: .. code-block:: bash salt '*' glance.image_list # Not really worth an exception Returns names and descriptions of the schema "image"'s properties for this profile's instance of glance CLI Example: .. code-block:: bash salt '*' glance.image_schema # pylint: disable=C0103 Update properties of given image. Known to work for: - min_ram (in MB) - protected (bool) - visibility ('public' or 'private') CLI Example: .. code-block:: bash salt '*' glance.image_update id=c2eb2eb0-53e1-4a80-b990-8ec887eae7df salt '*' glance.image_update name=f16-jeos Known valid names of schemas are: - image - images - member - members CLI Example: .. code-block:: bash salt '*' glance.schema_get name=f16-jeos Template for writing list functions Return a list of available items (glance items-list) CLI Example: .. code-block:: bash salt '*' glance.item_list #ret[item.name] = { # 'name': item.name, # } # The following is a list of functions that need to be incorporated in the # glance module. This list should be updated as functions are added. # image-download Download a specific image. # member-create Share a specific image with a tenant. # member-delete Remove a shared image from a tenant. # member-list Describe sharing permissions by image or tenant.
| 1.981174
| 2
|
tests/unit/stationsapi30/test_stations_parser.py
|
ChuckVanHoff/pyowm
| 1
|
6627842
|
<filename>tests/unit/stationsapi30/test_stations_parser.py
import unittest
import json
from pyowm.stationsapi30.station_parser import StationParser
from pyowm.stationsapi30.station import Station
from pyowm.exceptions import parse_response_error
class TestStationsParser(unittest.TestCase):
test_station_json = '''{"ID": "583436dd9643a9000196b8d6",
"created_at": "2016-11-22T12:15:25.967Z",
"updated_at": "2016-11-22T12:15:25.967Z",
"external_id": "SF_TEST001",
"name": "San Francisco Test Station",
"longitude": -122.43,
"latitude": 37.76,
"altitude": 150,
"rank": 0}'''
test_station = Station("583436dd9643a9000196b8d6",
"2016-11-22T12:15:25.967Z",
"2016-11-22T12:15:25.967Z",
"SF_TEST001",
"San Francisco Test Station",
-122.43, 37.76, 150, 0)
def test_parse_JSON(self):
instance = StationParser()
result = instance.parse_JSON(self.test_station_json)
self.assertTrue(isinstance(result, Station))
self.assertEqual(self.test_station.id, result.id)
self.assertEqual(self.test_station.created_at, result.created_at)
self.assertEqual(self.test_station.updated_at, result.updated_at)
self.assertEqual(self.test_station.name, result.name)
self.assertEqual(self.test_station.lon, result.lon)
self.assertEqual(self.test_station.lat, result.lat)
self.assertEqual(self.test_station.alt, result.alt)
self.assertEqual(self.test_station.rank, result.rank)
def test_parse_JSON_fails_with_none_input(self):
instance = StationParser()
with self.assertRaises(parse_response_error.ParseResponseError):
instance.parse_JSON(None)
def test_parse_dict(self):
data_dict = json.loads(self.test_station_json)
instance = StationParser()
result = instance.parse_dict(data_dict)
self.assertTrue(isinstance(result, Station))
self.assertEqual(self.test_station.id, result.id)
self.assertEqual(self.test_station.created_at, result.created_at)
self.assertEqual(self.test_station.updated_at, result.updated_at)
self.assertEqual(self.test_station.name, result.name)
self.assertEqual(self.test_station.lon, result.lon)
self.assertEqual(self.test_station.lat, result.lat)
self.assertEqual(self.test_station.alt, result.alt)
self.assertEqual(self.test_station.rank, result.rank)
def test_parse_dict_fails_with_wrong_input(self):
instance = StationParser()
with self.assertRaises(AssertionError):
instance.parse_dict(1234)
|
<filename>tests/unit/stationsapi30/test_stations_parser.py
import unittest
import json
from pyowm.stationsapi30.station_parser import StationParser
from pyowm.stationsapi30.station import Station
from pyowm.exceptions import parse_response_error
class TestStationsParser(unittest.TestCase):
test_station_json = '''{"ID": "583436dd9643a9000196b8d6",
"created_at": "2016-11-22T12:15:25.967Z",
"updated_at": "2016-11-22T12:15:25.967Z",
"external_id": "SF_TEST001",
"name": "San Francisco Test Station",
"longitude": -122.43,
"latitude": 37.76,
"altitude": 150,
"rank": 0}'''
test_station = Station("583436dd9643a9000196b8d6",
"2016-11-22T12:15:25.967Z",
"2016-11-22T12:15:25.967Z",
"SF_TEST001",
"San Francisco Test Station",
-122.43, 37.76, 150, 0)
def test_parse_JSON(self):
instance = StationParser()
result = instance.parse_JSON(self.test_station_json)
self.assertTrue(isinstance(result, Station))
self.assertEqual(self.test_station.id, result.id)
self.assertEqual(self.test_station.created_at, result.created_at)
self.assertEqual(self.test_station.updated_at, result.updated_at)
self.assertEqual(self.test_station.name, result.name)
self.assertEqual(self.test_station.lon, result.lon)
self.assertEqual(self.test_station.lat, result.lat)
self.assertEqual(self.test_station.alt, result.alt)
self.assertEqual(self.test_station.rank, result.rank)
def test_parse_JSON_fails_with_none_input(self):
instance = StationParser()
with self.assertRaises(parse_response_error.ParseResponseError):
instance.parse_JSON(None)
def test_parse_dict(self):
data_dict = json.loads(self.test_station_json)
instance = StationParser()
result = instance.parse_dict(data_dict)
self.assertTrue(isinstance(result, Station))
self.assertEqual(self.test_station.id, result.id)
self.assertEqual(self.test_station.created_at, result.created_at)
self.assertEqual(self.test_station.updated_at, result.updated_at)
self.assertEqual(self.test_station.name, result.name)
self.assertEqual(self.test_station.lon, result.lon)
self.assertEqual(self.test_station.lat, result.lat)
self.assertEqual(self.test_station.alt, result.alt)
self.assertEqual(self.test_station.rank, result.rank)
def test_parse_dict_fails_with_wrong_input(self):
instance = StationParser()
with self.assertRaises(AssertionError):
instance.parse_dict(1234)
|
en
| 0.234984
|
{"ID": "583436dd9643a9000196b8d6", "created_at": "2016-11-22T12:15:25.967Z", "updated_at": "2016-11-22T12:15:25.967Z", "external_id": "SF_TEST001", "name": "San Francisco Test Station", "longitude": -122.43, "latitude": 37.76, "altitude": 150, "rank": 0}
| 2.845214
| 3
|
main.py
|
yfiua/unikob-comment-classifier
| 1
|
6627843
|
<reponame>yfiua/unikob-comment-classifier
import nltk
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.ensemble import *
from sklearn.metrics import *
from sklearn.externals import joblib
def get_features(document, word_features):
document_words = set(document)
features = [(word in document_words) for word in word_features]
# number of words in document
features.append(len(document))
# TODO: add more features
return features
# main
def main():
# params
test_size = 0.2
n_trees = 128
# read data
df_c = pd.read_csv('data/native_comments.csv', header=None, encoding='utf8')
df_nc = pd.read_csv('data/area_without_comments.csv', encoding='utf8')
comments = df_c[1].values
non_comments = df_nc['1'].values
# removes punctuation
tokenizer = nltk.tokenize.RegexpTokenizer(r'\w+')
# tokenize
#tokenized_c = [nltk.word_tokenize(i) for i in comments]
#tokenized_nc = [nltk.word_tokenize(i) for i in non_comments]
tokenized_c = [tokenizer.tokenize(i) for i in comments]
tokenized_nc = [tokenizer.tokenize(i) for i in non_comments]
# domain specific stop words
stop_words = np.array(['share', 'shares', 'like', 'report', 'sign in', 'register', 'sign up', 'facebook',
'twitter', 'tumblr', 'reddit', 'login', 'reply', 'replies', 'flag', 'minutes ago',
'hours ago', 'days ago', 'months ago', 'likes', 'sort By', 'newest', 'oldest', 'follow',
'view all comments', 'recommendations', 'loading comments'])
# split into training and test sets
c_train, c_test = train_test_split(tokenized_c, test_size=test_size)
nc_train, nc_test = train_test_split(tokenized_nc, test_size=test_size)
freq_words = np.array(nltk.corpus.stopwords.words('english'))
for i in range(10):
freq_words = np.concatenate([freq_words, freq_words])
# concatenate all words in the training set
#all_words = np.concatenate(map(np.concatenate, [c_train, nc_train]))
all_words = np.concatenate([np.concatenate(c_train), freq_words])
#remove domain specific stop words
all_words = np.setdiff1d(all_words, stop_words)
np.set_printoptions(threshold=np.inf)
word_freq = nltk.FreqDist(w.lower() for w in all_words)
word_features = list(word_freq)[:2000]
#joblib.dump(word_features, 'word_features.pkl')
# get features
X_train = [get_features(i, word_features) for i in c_train + nc_train]
X_test = [get_features(i, word_features) for i in c_test + nc_test]
# ground truth
y_train = [1] * len(c_train) + [0] * len(nc_train)
y_test = [1] * len(c_test) + [0] * len(nc_test)
#classifier
clf = RandomForestClassifier(n_estimators=n_trees)
clf.fit(X_train, y_train)
v_pred = clf.predict_proba(X_test)[:,1]
auc = roc_auc_score(y_test, v_pred)
print auc
#joblib.dump(clf, 'comment_clf.pkl')
#joblib.dump(word_features, 'word_features.pkl')
if __name__ == '__main__':
# init
main()
|
import nltk
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.ensemble import *
from sklearn.metrics import *
from sklearn.externals import joblib
def get_features(document, word_features):
document_words = set(document)
features = [(word in document_words) for word in word_features]
# number of words in document
features.append(len(document))
# TODO: add more features
return features
# main
def main():
# params
test_size = 0.2
n_trees = 128
# read data
df_c = pd.read_csv('data/native_comments.csv', header=None, encoding='utf8')
df_nc = pd.read_csv('data/area_without_comments.csv', encoding='utf8')
comments = df_c[1].values
non_comments = df_nc['1'].values
# removes punctuation
tokenizer = nltk.tokenize.RegexpTokenizer(r'\w+')
# tokenize
#tokenized_c = [nltk.word_tokenize(i) for i in comments]
#tokenized_nc = [nltk.word_tokenize(i) for i in non_comments]
tokenized_c = [tokenizer.tokenize(i) for i in comments]
tokenized_nc = [tokenizer.tokenize(i) for i in non_comments]
# domain specific stop words
stop_words = np.array(['share', 'shares', 'like', 'report', 'sign in', 'register', 'sign up', 'facebook',
'twitter', 'tumblr', 'reddit', 'login', 'reply', 'replies', 'flag', 'minutes ago',
'hours ago', 'days ago', 'months ago', 'likes', 'sort By', 'newest', 'oldest', 'follow',
'view all comments', 'recommendations', 'loading comments'])
# split into training and test sets
c_train, c_test = train_test_split(tokenized_c, test_size=test_size)
nc_train, nc_test = train_test_split(tokenized_nc, test_size=test_size)
freq_words = np.array(nltk.corpus.stopwords.words('english'))
for i in range(10):
freq_words = np.concatenate([freq_words, freq_words])
# concatenate all words in the training set
#all_words = np.concatenate(map(np.concatenate, [c_train, nc_train]))
all_words = np.concatenate([np.concatenate(c_train), freq_words])
#remove domain specific stop words
all_words = np.setdiff1d(all_words, stop_words)
np.set_printoptions(threshold=np.inf)
word_freq = nltk.FreqDist(w.lower() for w in all_words)
word_features = list(word_freq)[:2000]
#joblib.dump(word_features, 'word_features.pkl')
# get features
X_train = [get_features(i, word_features) for i in c_train + nc_train]
X_test = [get_features(i, word_features) for i in c_test + nc_test]
# ground truth
y_train = [1] * len(c_train) + [0] * len(nc_train)
y_test = [1] * len(c_test) + [0] * len(nc_test)
#classifier
clf = RandomForestClassifier(n_estimators=n_trees)
clf.fit(X_train, y_train)
v_pred = clf.predict_proba(X_test)[:,1]
auc = roc_auc_score(y_test, v_pred)
print auc
#joblib.dump(clf, 'comment_clf.pkl')
#joblib.dump(word_features, 'word_features.pkl')
if __name__ == '__main__':
# init
main()
|
en
| 0.655994
|
# number of words in document # TODO: add more features # main # params # read data # removes punctuation # tokenize #tokenized_c = [nltk.word_tokenize(i) for i in comments] #tokenized_nc = [nltk.word_tokenize(i) for i in non_comments] # domain specific stop words # split into training and test sets # concatenate all words in the training set #all_words = np.concatenate(map(np.concatenate, [c_train, nc_train])) #remove domain specific stop words #joblib.dump(word_features, 'word_features.pkl') # get features # ground truth #classifier #joblib.dump(clf, 'comment_clf.pkl') #joblib.dump(word_features, 'word_features.pkl') # init
| 2.923518
| 3
|
Source/images_rc.py
|
jonapachanga/mp3TagEditor
| 0
|
6627844
|
<reponame>jonapachanga/mp3TagEditor
# -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.12.2)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x0c\x3e\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x30\x00\x00\x00\x30\x08\x06\x00\x00\x00\x57\x02\xf9\x87\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x00\x13\x74\x45\
\x58\x74\x54\x69\x74\x6c\x65\x00\x4f\x70\x74\x69\x63\x61\x6c\x20\
\x44\x72\x69\x76\x65\x3e\x67\xba\x0c\x00\x00\x0b\x9c\x49\x44\x41\
\x54\x68\xde\xed\x59\xe9\x73\x53\xd7\x15\x3f\xef\x69\x97\x2d\x4b\
\x5e\xb0\xbc\x60\x3b\xd8\x18\x0c\x18\x12\xa0\xcd\x4c\xd3\x0c\x24\
\x21\xcd\x52\xda\x4c\x92\x1a\x27\x40\x20\x26\x09\xb4\x99\x34\xfd\
\xd0\x99\x7e\xe8\x4c\xbf\x24\x4c\xa7\xfd\x07\x9a\x99\x92\x2f\xe9\
\x24\x2d\x60\x53\xb2\xe1\x84\x7d\x4c\xa6\x6c\x06\x12\x84\x37\xd9\
\x50\x23\x2f\x04\x4b\x78\x91\xa5\xa7\xed\xad\x3d\xe7\x4a\x4f\x96\
\xb1\x62\x3b\x33\x49\xeb\x74\xf2\xe0\xf0\xc4\x7b\x4f\xf7\x9e\xdf\
\xf9\xfd\xce\xb9\xe7\x3e\xf1\x0d\x0d\x0d\xf0\x5d\x36\x1e\xbe\xe3\
\xc7\xf7\x00\xfe\xaf\x00\x34\xbd\xbc\xe3\xd9\x17\x76\x3e\x7f\x66\
\x47\xd3\xb6\xd0\x8e\xa6\xad\xa1\xed\x3b\x9f\x6b\xa3\x6b\x0b\x1e\
\xc0\xf6\xed\xdb\x5d\xdb\x76\x3e\xf7\xa9\xd5\x62\x7d\x6f\xc9\x92\
\xa5\x1b\xd6\xd4\xdf\xe7\xa8\x5f\xb1\xda\x51\xe2\x2e\xdb\xc8\x71\
\xdc\xbb\x5b\x77\x34\x7e\x4a\xcf\x2c\x48\x00\x9b\x5f\xdb\x6c\x55\
\x40\x3c\x55\x51\x5e\xf5\xd8\x7d\xf7\xae\xb3\xb9\x8b\x8b\xc1\x6c\
\x36\x81\xc5\x6a\x85\xc5\xe5\x8b\xa1\x7e\x45\xbd\x3d\x37\xc7\xf1\
\x13\x49\x4b\x9c\xa2\x67\x17\x1c\x80\x9c\x71\x4b\x73\x79\x69\xd9\
\xea\xda\x9a\x5a\x5e\x51\x64\x38\x7f\xe1\x1c\xbc\xf9\xe6\x9b\xf0\
\xfa\x6f\x5e\x87\xbd\x7b\xf7\xc2\xc5\x4b\x97\xa0\xae\x76\xb9\x21\
\xd7\x66\xaf\xb7\x8f\x59\x9a\x17\x14\x80\xe7\xb6\x35\xfc\xcc\xe9\
\x74\x3e\xba\x6a\xe5\x1a\x93\xa2\xca\x70\xf8\xfd\xf7\xa1\xf9\x40\
\x0b\xd8\xec\x36\xa8\x5d\x56\x0b\x66\xab\x19\x5a\x5a\x5a\xe0\xe3\
\xd6\x23\x50\x57\xb7\xd2\x6c\x31\x5b\x1f\xa5\xef\xcc\x77\xfc\x43\
\x2d\x2d\xdc\xb7\x06\xa0\xb1\xb1\xd1\x2c\x49\xd2\xbe\x55\x75\xab\
\x6d\xf4\xff\xeb\x7d\xd7\xe1\xdc\xd9\xb3\x50\x5b\x57\x0b\x79\xce\
\x3c\x30\x9b\x4c\xe0\xc8\xcd\x85\xaa\xaa\x0a\x38\x7b\xf6\x1c\x0c\
\x0e\x0e\x41\x89\xbb\xc4\x26\x26\xc4\x7d\xf4\xdd\xf9\xcc\xf1\xbb\
\x77\x2e\xb9\xbf\x35\x00\xbc\x55\xfb\xc5\x9a\xfa\x7b\x8b\x90\x01\
\x88\x44\x04\xd8\x7f\x60\x3f\x94\x2f\x2e\x07\x93\xc9\x08\x06\x03\
\x8f\x66\x60\x66\x44\x20\x85\x85\x05\x70\xe8\x50\x33\x9e\x0b\x89\
\x1d\x97\x08\xb1\xa7\xe6\x1a\x3f\x77\xf3\x6b\x56\x49\x35\xae\xfd\
\xd6\x00\x54\x57\x2e\xfd\xfd\xaa\x95\xab\x4d\x58\x65\xe0\x93\x4f\
\x3f\x01\x55\x55\xc0\xe5\x72\xa6\x1d\xe7\x53\x66\xe0\x79\xc8\xc9\
\xb1\x43\x34\x16\x83\x53\xa7\x4e\xc2\xa2\xc2\x62\x9b\x14\x97\x9a\
\xe6\x1a\xff\x9d\x5d\x0f\x25\x38\x9e\x5b\x5b\xfa\xf4\x9f\xf2\xbe\
\x71\x00\xbb\x76\x6d\xad\x1a\x18\xf0\xad\x4c\x88\x09\x08\x85\x42\
\x70\xe5\xca\x15\x16\x7d\xdd\x79\x03\x6f\x48\xb2\x80\xce\xf3\xa9\
\x6b\xf9\x05\xf9\x70\xe9\xf2\x65\x58\xb4\xa8\x08\x2c\x26\xf3\xa6\
\xcd\x9b\x67\xaf\x48\x0d\x0d\x5b\x34\x1e\xb4\xb0\x51\x0a\x3d\xf0\
\x8d\x02\xd8\xbd\x7b\xf7\x12\x49\x03\x4f\x41\x41\x81\x21\x1a\x8d\
\x80\xb7\xd7\x0b\x98\x9d\xcc\x92\x4e\x67\x82\x48\x32\xc0\xa3\x99\
\x8c\x46\x20\xb6\x7a\xaf\x5f\x87\xdc\x3c\x0c\xaa\x51\x79\x62\xae\
\xb9\x34\x0e\xba\x39\xce\xd0\xf0\x8d\x01\x78\xf5\xd5\x97\x6a\x38\
\xa3\xd2\xae\xc8\x4a\x5e\x69\x49\x29\x88\x89\x38\xf4\xdd\xe8\x4b\
\x4b\x87\x4f\xcb\x87\x67\x60\xf8\x4c\x43\x10\x56\x9b\x15\x7a\xba\
\xbb\xc1\xe5\x74\x59\x31\x99\x5f\xcc\x1c\x9b\x6b\x6c\x34\xdc\x3d\
\x9f\x90\x50\xcf\x83\x06\xcf\x14\x37\xbe\x91\x3b\x6f\x00\x2f\xed\
\xd9\xb9\xf1\x95\x3d\x4d\x97\x5f\xde\xf3\xa2\xa6\xdb\x2b\xbf\x6c\
\x52\x77\xff\x6a\xd7\xe9\xb8\x2c\x9f\x2d\x29\x2e\x2b\x0c\x4e\x06\
\xb9\xc2\xc2\x22\x88\x27\x12\x30\x3c\x34\x0c\x79\x18\xd1\xb4\x7c\
\x52\x09\xcc\xeb\x2c\xa0\xf1\xcc\x10\x00\x2e\x6e\x43\xf8\xbc\xcb\
\xe5\xc2\xe8\xaa\x8f\x6f\xd8\xb0\xc1\xa2\xcf\x5b\x2e\xd4\x6c\xbc\
\xbb\x6c\x8e\x9f\xf8\x73\x14\x34\xcd\x63\x89\x49\x2b\xe7\x05\xa0\
\xa9\x69\x6b\x35\xa8\xd0\xba\xb4\x66\xd9\xfa\xf5\x6b\xef\x07\x32\
\x93\x91\xa2\xd6\xcb\xf9\xfd\xfe\x87\x0b\x5c\x85\xee\x9c\x9c\x1c\
\xae\xac\xa4\x1c\xc7\xd5\x20\x10\x18\x01\x51\x12\x59\xdd\xcf\x4c\
\x5e\x5d\x42\x7a\xe4\x33\x65\xa4\x6a\x0a\x4c\x06\x83\x60\xe4\xcc\
\x8a\xc9\xc6\xfd\x28\xed\x08\x6f\x5c\x9e\xb5\x6c\xf2\x70\x8b\xd3\
\xb8\x8a\x79\x01\x10\x15\xe5\x6f\xcb\x6a\xeb\xac\x76\x7b\x0e\x8c\
\x8d\x8d\x82\xc7\x73\x15\xce\x9f\x3f\x0b\x92\x2c\x61\x94\x9d\xb0\
\x62\xf9\x4a\x88\x46\x63\xf8\xd9\x01\x92\x28\xc2\xf0\xad\x5b\xe0\
\x9c\x16\xfd\x0c\xdd\xa7\x9c\x4f\x9f\x53\x66\x31\x9b\xe1\xf6\xed\
\x11\xb0\xe7\xd8\xcc\x88\x65\x71\x5a\x42\x9a\x6a\x56\xc0\xb4\x2e\
\x8b\x8f\x23\x9c\x2a\xd7\xcc\x09\xa0\x7e\x6d\x7d\x7d\x59\x69\xc5\
\x8f\x6b\xaa\x6b\x0d\x16\x8b\x05\xec\x18\xd5\xcf\xbf\xb8\x02\x8a\
\xa2\x40\x41\x81\x0b\xea\x96\xaf\x60\x52\x88\xc5\xa2\xe8\x84\x05\
\x64\x6c\x1b\x62\x08\x86\x64\x71\x37\x00\xbd\x7c\x1a\xf8\xe9\xce\
\x93\x19\x91\x05\x0a\x8e\xdd\x66\x37\xc9\xb2\x52\x9a\x4e\x58\x8d\
\x1b\xc1\x7f\x66\xac\xd2\x1a\x68\x23\x2a\xcf\x97\xcc\x0a\x00\xab\
\x83\x21\x12\x0a\x7f\x64\x36\x19\x39\x72\x98\xe8\x1f\x1b\x9b\xc0\
\x05\x2a\xc2\x1e\x32\x9b\xad\x50\xbc\xc8\x8d\xcf\x01\x84\xc2\x61\
\xb0\x58\xac\x20\xcb\x32\x08\x78\xbf\xab\xb3\x1b\x4e\x9e\x3c\x05\
\x17\x2f\xb4\xc3\xe8\x9d\x3b\xd3\xca\x27\xb3\x0c\xe7\x39\x02\x85\
\x00\xc2\x42\x08\xc7\xb0\x70\x18\x84\x7b\xa6\x18\x80\x5b\x38\xc1\
\x9a\x99\x0e\x72\x23\x78\xcf\x3d\x3b\x03\x06\xe5\x89\x45\xc5\x45\
\x15\x0e\x87\x13\x1d\xbf\x03\xe1\x70\x08\xbc\xde\x6e\x16\x7d\x8a\
\xb0\xbb\xb8\x84\x42\x01\xd4\xac\x4d\x4c\x8c\xd3\xe4\x20\xa3\xf6\
\xbd\x7d\x7d\xac\x7c\xde\xff\xc3\xfb\x61\x69\x6d\x35\xf4\xf4\xf4\
\x62\x4b\x71\x23\x8b\xe3\x1c\x33\x1e\x23\x40\xe0\xe2\x09\x11\x4c\
\x28\x25\x1c\x3f\x03\x00\x17\xc4\x93\x2b\x1b\x03\x68\xb3\x33\xa0\
\xca\xda\x6e\x94\x87\xd1\x88\x13\x87\xc3\x02\x8b\xfc\x88\xdf\x8f\
\x13\xa8\xd8\x0a\x18\x71\xf5\x2c\x84\x04\x96\xcc\x68\x34\xca\xc0\
\x99\x51\x42\x12\x32\x20\x63\x15\xaa\xac\xac\x00\x47\x5e\x2e\xd4\
\xd4\xd4\xc0\x13\x4f\x3e\x86\xfa\xf6\x63\x10\xc6\x66\xca\x87\x9b\
\xfa\x2c\x4b\x12\x98\x29\xa1\x55\x25\x9d\x03\xbc\x06\x59\x01\x70\
\x2a\xdc\x46\xf8\xb3\x03\x00\x0e\xd6\x61\x79\xc0\xbf\x1a\x08\x42\
\x98\x01\x20\x20\xd4\x1e\x90\x9c\xc8\xe1\x04\x3a\x4b\x2b\xaf\x80\
\x7d\x8f\x09\xfb\x1b\x49\x16\xb1\x14\x72\x2c\x49\x69\x81\x62\x09\
\x8a\xcc\xd4\xd5\x2d\x83\xbe\xde\xeb\x33\x9c\x67\x2c\x70\x1c\x93\
\x11\x36\x81\xc0\x19\x79\x1c\x5f\x4d\x4b\x43\xb5\xc0\x44\x36\x00\
\xaa\x6a\x19\xc1\xd3\x9c\x39\x50\xac\x60\x49\x50\x31\xe2\x21\x8c\
\x30\x39\x49\x0d\x1a\x49\x48\x45\x60\x46\xa3\x09\x22\xb8\xea\x12\
\x08\xea\x69\xa8\x61\x4b\x24\x24\x50\xb0\x3a\x61\x22\xb2\xeb\xe1\
\x50\x98\x81\xa6\xbe\x47\xcf\x1d\x92\x4c\xd2\x69\x2e\x0d\x82\xae\
\x31\x69\x28\x1a\x8d\x9d\xaf\x3b\xe2\xfb\x68\x6f\x04\x03\x69\x69\
\xb9\x6b\x2d\x88\x28\xb1\x30\x9e\x1c\x73\x56\x21\x49\x94\x20\x1e\
\x8f\x43\x44\x88\x30\xa3\xe8\x93\x84\x24\x39\xc1\x6a\x7d\x3c\x16\
\x67\xf7\x55\x04\x85\x44\x81\xc6\xee\xa3\xf3\x78\x4d\x10\x04\x08\
\x4e\x4e\x62\x7d\x9f\x44\xa0\x51\xd2\x6d\x3a\x69\xd9\x59\x07\x92\
\x61\x34\x3e\x32\x90\x76\xa4\xbe\xfe\x35\x23\x7e\x4d\xdd\xb2\x65\
\x8b\x96\xe9\xa0\x8b\x37\xb9\xb1\xa5\xf0\xcf\x0a\x00\x17\xa5\x40\
\x0c\x1d\x0c\x85\x26\x31\xc1\xe2\x8c\x01\x9a\x98\x39\x18\x13\xd1\
\xb1\x09\x26\x9f\x68\x04\xa3\x8f\x6c\xd0\x67\xce\xc0\x33\xed\x51\
\x25\x0a\x61\xf4\x83\xb8\x38\x91\x05\x46\x02\x90\x9b\x93\x83\xd5\
\xc6\x30\xc5\x40\x86\x51\x29\xa3\x52\x4b\xce\xab\x8a\x3c\xa1\x3b\
\x32\x56\xe9\x24\x36\x26\xee\x76\x50\x31\xaa\xa5\x58\x85\xbe\x9c\
\x9d\x01\x0d\x3e\x8f\xa1\x0c\x26\xb1\xb3\x64\x2c\x44\x88\xcd\x24\
\x00\x92\xcc\x38\x3a\x46\x0c\x51\xa4\x59\x3e\xe0\x33\x1c\xf1\x8d\
\x3b\x2e\x01\xcb\xea\x24\x46\x3f\x88\xd1\x1f\x1f\x9f\x80\x00\x96\
\xd2\xd5\x6b\x56\xa3\x9f\x59\x22\x4f\x7f\x10\x03\x49\x92\x16\x42\
\x45\xd5\xd2\x91\x35\x19\xf8\x7c\xf4\x63\x7c\x66\x43\xa7\xd1\x5a\
\x71\x7b\x56\x00\xbc\x91\x7b\x5b\x8c\xc7\xe4\xb1\xf1\x31\xd4\xb5\
\xc2\x00\x24\xa3\xa4\xb0\xca\x33\x34\x3c\xc8\x5a\x07\x21\x12\x66\
\xfa\x27\x90\x24\x0f\x47\xae\x93\x45\x7f\x72\x62\x12\x46\x47\x47\
\xb1\xb5\x08\x40\x75\xf5\x12\x28\x2b\x2b\xcd\x70\x1a\x52\x91\x87\
\x34\x08\xda\xf4\xc7\x90\x45\x1c\x7f\x38\xed\x88\xc2\xe5\xe3\xad\
\x19\x00\x40\xe5\x71\xb0\xb9\x18\x50\x0c\x47\xc3\x82\x30\x84\x3d\
\x3e\x9b\x88\x55\x1a\xb3\x91\xe5\x00\x39\x3b\xe0\xf3\x81\x88\x09\
\x4b\x60\xb0\x67\x61\xab\x31\x8f\x09\x79\xdf\xba\x35\x50\x84\x4d\
\x1d\xe5\x08\xad\x17\x0f\x3e\xf8\x00\xac\xff\xc1\xfa\x74\xa4\x21\
\x25\x19\xf6\x11\x92\x20\xc8\xa8\x5a\xc5\x91\x59\x4c\x62\x5f\x5a\
\x2a\x9c\x9c\xcf\x81\x36\x13\x00\x31\xa0\x69\xb3\x33\x80\x99\xaf\
\x46\xc3\xf1\x67\x03\x7e\xbf\x4a\x55\x87\x12\x56\x55\x92\xb9\x44\
\x8b\x57\x24\x12\x85\x9b\xfd\x37\x99\x2c\x92\x4c\x44\x59\x55\xa1\
\x96\xe2\xe1\x4d\x1b\x61\xeb\xb6\xe7\xe1\xe9\xa7\x9f\x82\xaa\x7b\
\x52\xeb\x52\x46\x1d\xe1\x40\x07\xa2\xff\x9f\x03\x1b\xb6\xd5\x98\
\x73\x58\x07\x54\x5f\xc6\x42\xb6\x0c\x53\xff\xd2\x0c\x09\x69\x50\
\xaa\xcd\xc9\x00\x9e\x11\x44\xb7\x10\x8d\x5e\xf0\x0d\xdc\x54\xb0\
\xe3\x64\x65\x93\xf4\x4e\x2c\x44\x51\x52\x1d\x9d\xd7\x92\x8c\x12\
\x20\xcc\x05\x92\x18\xf9\xa5\x21\x60\x4d\x53\x93\x49\x89\x46\x00\
\x35\x2d\xfb\x64\x5c\x0a\x85\x03\x9b\x41\xdc\xc9\x49\xaa\x2c\x8e\
\x4c\xdd\xe4\x0a\xb0\x92\x1f\x9b\xe1\x20\x32\x80\x69\xef\xfb\x4a\
\x00\x87\x0e\x1d\x4a\x11\x0b\xdc\xad\x91\x2f\xf7\xf4\xf7\xf7\x27\
\xa8\xb2\x94\x97\x2d\x86\xea\x25\xd5\x50\x56\x52\xc6\x5a\x03\x5a\
\x17\x3c\xd7\xae\xb2\xea\xe2\xbf\x13\x00\xab\xc5\xc6\x16\x34\x95\
\x9c\x47\x90\xc9\x35\x43\x61\x60\xa6\x40\x50\x41\x65\xa7\x69\x87\
\xc3\xe1\xa0\xa4\x17\x45\x55\xbe\x95\x01\x2f\x14\x51\xe4\xce\x2c\
\xb0\x2b\x70\xe8\xe1\xd9\x18\x48\x03\x38\xf6\xf1\x31\xdf\x68\x20\
\xb0\xc5\xeb\xed\xb9\x7a\xe9\x72\x3b\x90\xdd\x1c\xb8\x49\xda\xd7\
\x04\x21\x72\x61\x78\x78\x78\x74\xe4\x4b\xbf\xda\xeb\xed\x61\x95\
\x84\xb4\x4c\xa5\x52\x49\xad\x09\x49\x10\xea\x74\x10\x74\x26\x04\
\x5a\x12\x0e\x05\xc0\x8c\x7d\x10\xee\x09\x0c\xdd\xd7\xfa\x2e\xa6\
\x02\x08\x9a\xc1\xf0\x19\xdb\xc0\x64\x1c\x15\x3f\xff\x43\x39\x7e\
\xed\x9e\x98\x2c\xf5\xcf\x7b\x47\x76\xe4\xc8\xd1\x7f\x1d\xdc\xdf\
\xf2\xc8\x81\x7f\x34\x2f\x42\x2b\x41\x2b\x47\xab\x6a\x39\xf8\xcf\
\xad\x5d\x1d\x1d\x8d\x5d\x3d\x5d\x93\x28\x1b\x2d\x70\xc7\x0f\x39\
\xf6\x5c\xec\x95\xcc\x69\x06\x48\x6e\xf4\x59\x4d\xc9\x8a\x81\x48\
\x31\xa0\x83\xa0\xdd\x58\x18\x4b\xaf\x24\x2b\x27\x8e\x1f\x3f\x9e\
\x68\x68\x68\x60\x8f\x0c\x7d\xf4\xc6\xe5\x99\x7d\x90\xfa\x38\xa2\
\x7b\x1b\x81\x49\xb3\x01\xd0\xe6\x69\xaa\xc7\xd3\xe5\xf3\x76\xf7\
\x3d\xe3\x0f\xf8\x05\x6f\x6f\x2f\x2e\x58\xb9\x2c\x9a\x3a\x03\xb4\
\x4a\x2b\xaa\x9e\x0f\x5a\x8a\x85\xe9\x0c\x14\x15\x15\xd1\x86\x26\
\x21\x8a\xf1\x77\x67\x8a\x6b\x86\x7b\xab\x70\xc5\x3c\x38\xeb\x13\
\xa9\x08\xa8\x5f\x61\x4a\xca\x64\xfd\xdc\xd1\xd1\xe1\xbb\x76\xb5\
\xeb\xa7\xd7\x3c\x5f\x28\x56\xab\x8d\xd5\x74\x8d\x55\x2b\x35\x0d\
\x82\xc9\x48\xb7\x0c\x10\xb4\xa5\x74\x3a\xf3\x60\x70\x60\x40\xeb\
\xed\xbe\x71\x34\x35\x47\xd6\xa3\xa2\xf1\xb7\x36\xbc\x1d\x19\xb4\
\x7b\xaf\xce\xf9\x56\x02\x41\xdc\xed\xf0\x34\xa7\x33\x8c\xa8\x94\
\x3c\x1e\xcf\x4d\x6c\xdc\xda\xc6\x71\x6f\x40\xdb\x4f\x9b\xcd\x96\
\x94\x90\x9a\x34\xbd\xcf\x99\x02\x92\x04\x51\xec\x76\xe3\x7e\x62\
\x82\xfa\xa5\xd3\x6d\x6d\x6d\x51\x5d\x3e\x59\x8f\x90\xd3\x86\xed\
\xca\x71\xad\xb9\x59\x99\xd7\x6b\x15\x1c\x6c\x5e\xce\xeb\x36\x11\
\x0c\xed\xef\xb8\xd6\x91\xc8\xc7\x16\x86\xde\x81\x52\x59\xd5\xf3\
\x80\xe5\x02\x01\xd0\x92\x2d\x3a\xe5\x83\x1d\x41\x96\x94\xb8\xa1\
\xbf\xdf\x17\x93\x24\xf5\x9d\x54\xd0\xbe\xf2\x70\x0e\xb7\x04\x15\
\x45\xeb\xfc\x5a\xaf\x16\x53\x83\x66\x75\x38\xc3\x44\xb2\x33\xa7\
\xcf\x1c\x39\x77\xfe\x5c\x30\x8c\xfb\x07\x67\x9e\x0b\xf2\xf3\x5d\
\x4c\xf7\x7a\x25\x9a\xca\x05\x64\x00\x8b\x5c\x65\x55\x25\x84\x26\
\x43\xd0\xff\xef\xfe\x60\x68\x22\xf4\xe1\x5c\x8e\x75\x76\x76\xaa\
\xc3\xad\x7f\x0c\x7e\xed\x77\xa3\x44\x2b\xb1\x81\x96\xd5\x71\xdd\
\xb0\xef\x11\x84\x50\xf8\xd7\x1f\xb7\xb6\xc6\xec\x76\x3b\x90\x91\
\xbe\xd5\x8c\x72\xaa\x5b\x69\xa9\x9b\x25\x7b\x7b\xfb\xe5\x98\x22\
\xc9\x7b\x50\x4e\xe2\x7f\xe5\xf7\x01\x62\x04\x4d\x46\xcb\x74\x3e\
\x0d\xe6\xc0\x81\x96\x0f\x87\x07\x87\x4e\x5f\x6c\xbf\x28\xb9\x50\
\x4a\xf4\xa6\xba\xd8\x5d\xcc\x5e\xad\x93\xe6\xe9\x15\x4a\x65\x65\
\x25\x7b\x2b\xdd\xd1\xd1\x25\x06\xfc\x77\x4e\x86\xc3\xd1\x23\xff\
\x93\x1f\x38\x52\xcc\xe8\x80\xc8\x24\xc6\x92\xca\x37\xb4\xb5\x7d\
\xd6\xd1\xe3\xf5\xaa\x05\xf9\x85\x28\xa5\x02\x26\x97\xe5\xcb\x97\
\x41\xcd\xd2\x1a\xf6\xda\xd1\xeb\xed\x53\x3c\x57\x3d\x9d\xd8\x1b\
\x37\x2e\xb8\x9f\x98\x70\x8f\x10\x37\x80\xb0\xa9\xb5\xf5\x93\xe3\
\x1f\x1c\xfe\x20\x16\xc5\x46\x8f\x5e\xbb\x18\x0c\x46\xac\x38\x41\
\x38\x76\xf4\x44\xb4\xfd\xc2\xc5\x13\x06\x2e\xb2\x89\x9e\x5d\x90\
\xbf\x52\x0a\x82\x31\x18\x8f\x24\x9e\xec\xbd\xd1\xfb\xc2\x7b\xef\
\xfe\xfd\xb3\xb7\xfe\xf2\x56\x78\xdf\x5f\xf7\x85\x0f\x1f\xfe\xe0\
\xcc\xc0\xa0\x6f\x47\x3c\x26\x3d\x49\xcf\x2c\xf8\xdf\x89\x15\x11\
\x0e\x27\xe2\xd2\x46\x29\xa1\xe6\x91\x89\x31\xe9\x21\xba\xf6\xfd\
\x2f\xf5\xdf\x03\x58\xc0\xc7\x7f\x00\x01\x9b\xbf\xfb\xe5\xb7\x98\
\x3f\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x02\x78\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x30\x00\x00\x00\x30\x08\x03\x00\x00\x00\x60\xdc\x09\xb5\
\x00\x00\x01\x11\x50\x4c\x54\x45\xff\xff\xff\x00\x00\x00\x24\x24\
\x24\x00\x00\x00\x00\x00\x00\x2e\x2e\x2e\x3b\x3b\x3b\x00\x00\x00\
\x1e\x1e\x1e\x00\x00\x00\x2b\x2b\x2b\x00\x00\x00\x24\x24\x24\x31\
\x31\x31\xe2\xe2\xe2\xc1\xc1\xc1\xff\xff\xff\xd2\xd2\xd2\xbf\xbf\
\xbf\xe1\xe1\xe1\xe2\xe2\xe2\xe0\xe0\xe0\xe1\xe1\xe1\xff\xff\xff\
\xfb\xfb\xfb\xfd\xfd\xfd\xff\xff\xff\xff\xff\xff\xbc\xbf\xb8\xbd\
\xc0\xb8\x9a\x9d\x99\xa5\xa6\xa2\x89\x8b\x86\x8c\x8e\x88\x8e\x90\
\x8b\x90\x92\x8d\x92\x95\x8f\x95\x97\x91\x97\x99\x94\x99\x9c\x96\
\x9c\x9e\x98\x9e\xa0\x9b\xa0\xa3\x9d\xa3\xa5\x9f\xa5\xa7\xa1\xa7\
\xaa\xa4\xaa\xac\xa6\xac\xaf\xa8\xae\xb1\xaa\xb1\xb3\xad\xb3\xb6\
\xaf\xb5\xb8\xb1\xb7\xba\xb4\xba\xbd\xb6\xd4\xd8\xd0\xd4\xd8\xd1\
\xd6\xda\xd2\xd7\xda\xd3\xd8\xdc\xd5\xda\xdd\xd6\xdb\xde\xd7\xdc\
\xdf\xd9\xdd\xe0\xda\xdf\xe1\xdb\xdf\xe2\xdc\xe1\xe3\xde\xe1\xe4\
\xdf\xe4\xe5\xe1\xe4\xe6\xe1\xe6\xe7\xe4\xe6\xe8\xe4\xe8\xea\xe6\
\xe9\xea\xe6\xea\xec\xe9\xeb\xec\xe9\xed\xee\xeb\xee\xee\xec\xef\
\xf0\xed\xf1\xf2\xf0\xf3\xf4\xf2\xf6\xf7\xf5\xf8\xf9\xf7\xfa\xfb\
\xfa\xfb\xfb\xfb\xfc\xfc\xfb\xfc\xfc\xfc\xfc\xfd\xfc\xfd\xfd\xfc\
\xfd\xfd\xfd\xfe\xfe\xfe\xff\xff\xff\x77\x19\x90\xf0\x00\x00\x00\
\x20\x74\x52\x4e\x53\x00\x07\x07\x09\x0a\x0b\x0d\x0f\x11\x12\x12\
\x13\x15\x15\x1a\x29\x2a\x2d\x34\x3c\x46\x4b\x4c\x64\x77\x7b\x7c\
\x7f\xb0\xb1\xc3\xd7\x8b\xc9\x16\x4b\x00\x00\x00\xf6\x49\x44\x41\
\x54\x78\xda\x62\x18\x81\x60\x14\x30\xb2\x73\x02\xe8\x96\xa7\xc3\
\x08\x03\x00\x08\xc2\xf1\x29\xf6\xd9\x36\xfe\xd8\xb6\x75\xd3\x7f\
\x21\x69\x60\x77\x9e\xe7\x93\x85\x26\xcd\x1f\x0b\xcd\x4c\x8b\x66\
\x42\x31\x2d\x42\xa1\x54\x20\x4b\x87\x43\x12\x44\x66\x02\x64\xc1\
\xca\x82\x06\xb3\x01\x4f\x0f\x77\xd7\x97\xe7\xa7\xc7\x87\xfb\xbb\
\x3b\x7b\x07\x47\x27\x67\x17\x57\x37\x04\xdb\xab\x1a\xcc\x8d\x10\
\xff\x2d\xa3\xc4\x86\x01\x43\xc4\x7f\xcf\x30\xb1\x69\xc0\x00\xf1\
\x3f\x32\xb0\xa0\x8f\xf8\x9f\xe9\x5b\xd0\x43\xfc\x2f\xf4\x2c\xe8\
\x22\xfe\x57\xba\x16\x74\x10\xff\x1b\x1d\x0b\xda\x88\xff\x9d\xb6\
\x05\x2d\xc4\xff\x41\xcb\x82\x26\xe2\xff\xa1\x69\x41\x03\xf5\xd3\
\xb0\xa0\x8e\xfa\xa9\x5b\x50\x43\xfc\x63\x6a\x16\x54\x11\xff\x98\
\xaa\x05\x15\xc4\xff\x47\xc5\x82\x32\xe2\xff\xa5\x6c\x41\x09\xf1\
\x7f\x53\xb2\xa0\x88\xf8\xbf\x28\x5a\x50\x40\xfc\x9f\x14\x2c\xc8\
\x23\xcb\x1b\x10\x8a\x66\x72\xb2\x6c\x62\x4d\x82\xc9\xf9\xf5\x44\
\x52\x96\x58\x9c\x90\x4d\x2d\xc5\xe5\xbf\xb5\xfc\x3f\x86\x91\x07\
\x46\x01\x00\x70\x39\xa7\x90\x59\xe1\x0b\xb9\x00\x00\x00\x00\x49\
\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x05\xc1\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x30\x00\x00\x00\x30\x08\x03\x00\x00\x00\x60\xdc\x09\xb5\
\x00\x00\x02\xd9\x50\x4c\x54\x45\xff\xff\xff\x00\x00\x00\xff\xff\
\xff\x00\x00\x00\xff\xff\xff\x00\x00\x00\x00\x00\x00\xff\xff\xff\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\xff\xff\xff\x00\x00\x00\xff\xff\xff\x00\x00\
\x00\x00\x00\x00\x27\x27\x27\x4e\x4e\x4e\x00\x00\x00\x49\x49\x49\
\x00\x00\x00\x44\x44\x44\xee\xee\xee\x00\x00\x00\x10\x10\x10\x30\
\x30\x30\x40\x40\x40\x00\x00\x00\x00\x00\x00\x55\x55\x55\x28\x28\
\x28\x5e\x5e\x5e\xae\xae\xae\xff\xff\xff\x2e\x2e\x2e\x7a\x7a\x7a\
\xff\xff\xff\xff\xff\xff\x8c\x94\x8c\xf7\xf7\xf7\xff\xff\xff\x94\
\x94\x8d\xb9\xb9\xb9\xf9\xf9\xf9\x92\x97\x8d\xac\xac\xa7\xfa\xfa\
\xfa\xfa\xfa\xfa\xfb\xfb\xfb\xae\xae\xaa\xd9\xd9\xd9\xfb\xfb\xfb\
\xe6\xe6\xe6\xfb\xfb\xfb\xa8\xab\xa4\xff\xff\xff\xfc\xfc\xfc\xff\
\xff\xff\x9e\xa1\x9b\xb6\xb9\xb3\xfc\xfc\xfc\xff\xff\xff\xfd\xfd\
\xfd\xba\xbc\xb7\xff\xff\xff\xff\xff\xff\xff\xff\xff\xf8\xf8\xf8\
\xff\xff\xff\xb0\xb4\xae\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\
\xff\xff\xfd\xfd\xfd\xff\xff\xff\xfd\xfd\xfd\xf9\xfb\xf9\xf5\xf5\
\xf5\xf5\xf5\xf5\xba\xbc\xb7\xa2\xa2\x9f\xf0\xf0\xee\xa8\xa9\xa4\
\xe9\xe9\xe7\xe3\xe3\xe1\xd6\xd6\xd4\xdd\xdd\xdc\xbd\xbe\xb9\xce\
\xcf\xcc\xc8\xca\xc7\xb6\xb8\xb5\xb7\xba\xb2\xc5\xc6\xc3\xb2\xb4\
\xaf\xb1\xb3\xb0\xac\xaf\xa8\xaf\xb0\xad\x9c\x9e\x9a\xb1\xb2\xae\
\x9a\x9b\x97\xbd\xc0\xba\xa2\xa5\x9e\xbd\xbf\xb8\x9b\x9e\x99\xb9\
\xbb\xb5\x7b\x7d\x78\x9e\xa0\x9c\x86\x88\x82\xaf\xb1\xab\xa3\xa5\
\x9f\x8a\x8b\x86\xbd\xc0\xba\xa4\xa7\xa1\x90\x92\x8c\xbc\xbf\xb8\
\x77\x79\x74\x97\x99\x93\xb3\xb5\xaf\xac\xad\xa8\x7e\x80\x7a\xa7\
\xa8\xa3\xb9\xbb\xb5\xba\xbb\xb6\xcc\xd0\xca\xbb\xbe\xb8\xc1\xc3\
\xbd\xcc\xcf\xc9\x89\x8a\x85\xcc\xce\xca\xdb\xdc\xd9\x93\x94\x90\
\x96\x98\x93\x98\x99\x95\xa3\xa6\xa0\xa4\xa5\xa0\xaa\xac\xa8\xb6\
\xb8\xb4\xd2\xd3\xd0\x82\x84\x7e\xb8\xba\xb6\xbe\xbf\xbb\xcf\xd0\
\xcd\xc8\xca\xc7\xdf\xe1\xdd\xd7\xd8\xd6\xdd\xdf\xdb\xe1\xe2\xe0\
\xe4\xe5\xe3\xed\xed\xeb\xed\xed\xec\xee\xee\xed\x6b\x6d\x67\x6d\
\x6f\x69\x71\x73\x6d\x74\x76\x70\x78\x7a\x74\x7c\x7e\x78\x7f\x81\
\x7b\x83\x85\x7f\x87\x89\x83\x8a\x8c\x86\x8e\x90\x8a\x91\x94\x8d\
\x95\x98\x91\x99\x9b\x95\x9c\x9f\x98\xa0\xa3\x9c\xa4\xa6\xa0\xa7\
\xaa\xa3\xab\xae\xa7\xaf\xb2\xab\xb2\xb5\xae\xb6\xb9\xb2\xba\xbd\
\xb6\xd4\xd8\xd0\xd4\xd8\xd1\xd6\xda\xd2\xd6\xda\xd3\xd7\xda\xd3\
\xd8\xdc\xd5\xda\xdd\xd6\xdb\xde\xd7\xdc\xdf\xd9\xdd\xe0\xda\xde\
\xe1\xdb\xdf\xe1\xdb\xdf\xe2\xdc\xe1\xe3\xde\xe1\xe4\xdf\xe4\xe5\
\xe1\xe4\xe6\xe1\xe5\xe7\xe2\xe6\xe7\xe4\xe6\xe8\xe4\xe7\xe8\xe4\
\xe7\xe9\xe4\xe8\xea\xe6\xe9\xea\xe6\xea\xeb\xe9\xea\xec\xe9\xeb\
\xeb\xea\xeb\xec\xe8\xeb\xec\xe9\xec\xed\xeb\xec\xee\xeb\xed\xee\
\xeb\xef\xf0\xed\xf0\xf1\xee\xf0\xf1\xef\xf1\xf1\xf0\xf1\xf2\xf0\
\xf1\xf3\xf0\xf2\xf3\xf1\xf3\xf3\xf2\xf3\xf4\xf2\xf4\xf5\xf3\xf5\
\xf5\xf3\xf5\xf5\xf4\xf6\xf7\xf5\xf6\xf7\xf6\xf7\xf7\xf6\xf7\xf8\
\xf6\xf7\xf8\xf7\xf8\xf8\xf7\xf8\xf9\xf7\xf8\xf9\xf8\xfa\xfa\xf9\
\xfa\xfb\xfa\xfb\xfb\xfa\xfb\xfb\xfb\xfb\xfc\xfb\xfc\xfc\xfc\xfc\
\xfd\xfc\xfd\xfd\xfc\xfd\xfd\xfd\xfd\xfe\xfd\xfe\xfe\xfe\xff\xff\
\xff\xfe\x3f\x28\xd3\x00\x00\x00\x9c\x74\x52\x4e\x53\x00\x01\x01\
\x02\x02\x03\x04\x04\x05\x06\x07\x07\x08\x09\x0a\x0a\x0b\x0b\x0c\
\x0d\x0d\x0d\x0e\x0e\x0f\x0f\x0f\x10\x10\x10\x10\x11\x12\x12\x13\
\x13\x13\x14\x16\x17\x1a\x1b\x1f\x21\x21\x26\x28\x29\x31\x31\x31\
\x37\x3a\x3c\x3d\x3f\x46\x47\x49\x4d\x50\x55\x57\x57\x57\x5d\x66\
\x67\x6a\x6c\x6f\x72\x75\x77\x78\x79\x7b\x7d\x7e\x7f\x80\x82\x84\
\x85\x86\x87\x87\x89\x8b\x90\x94\x97\x9a\x9b\xa3\xa8\xa9\xab\xb0\
\xb1\xbb\xbb\xbf\xc4\xca\xca\xcb\xcc\xd4\xd7\xd9\xdd\xe3\xe3\xe5\
\xea\xeb\xec\xf1\xf2\xf4\xf5\xf5\xf6\xf7\xf7\xf7\xf7\xf7\xf8\xf8\
\xf8\xf9\xf9\xf9\xfa\xfa\xfa\xfa\xfa\xfa\xfa\xfa\xfb\xfb\xfb\xfb\
\xfc\xfc\xfd\xfd\xfe\xfe\xfe\xfe\xfe\xf6\x02\x98\x54\x00\x00\x01\
\xfb\x49\x44\x41\x54\x78\xda\xed\x94\xd3\xa3\x1d\x31\x10\x87\xaf\
\x6d\x3b\xa9\x6d\xdb\xc6\xd4\xb6\x6d\xdb\x36\x6f\x6d\xfb\xd4\xb6\
\xed\xf6\xd8\xe6\xfc\x03\xb5\xdb\xec\xa6\x4f\xe5\x3c\x7f\xdf\x6e\
\x92\x99\xf9\x79\xfd\xaf\xdf\xae\x7c\xa3\x48\x6c\xc0\xcf\x08\x6f\
\xf8\x54\x1a\xe3\xcb\x2f\xd0\x98\xbe\xcd\xb2\x11\x1a\xce\x2f\x44\
\x48\x66\x0c\xab\x14\x43\x52\x83\x78\x85\x78\x09\x6a\x26\xf5\x2e\
\x92\x44\x63\x7d\xf9\x84\x44\x09\x2a\x5d\xab\xa6\xb4\xcb\x95\x4a\
\x23\xbc\x39\x85\xfb\xf7\x94\xf6\x65\x23\xab\xc5\x91\xd4\x10\x1e\
\xe1\x10\xde\xba\x7a\xe9\xba\x4e\x35\xb5\x7f\x89\x04\x92\xe0\x2f\
\x2e\x1c\xc4\xab\x97\x2e\x9c\x3b\x7d\xc3\x72\x7b\x72\xa7\xbc\xa9\
\x34\xca\x5b\x4c\x38\x80\x6f\xf9\x93\xc7\x8f\xdd\xb7\xad\x18\x53\
\x27\x99\x90\x30\x11\x61\x3f\xbe\xe3\x8f\x1c\x3e\x7a\x56\x6e\x9c\
\x3e\xb8\x6c\x2c\x49\x0a\x10\x14\xf6\xe1\x07\xfe\xc4\xa9\x33\x37\
\x2d\xcf\x26\x74\x2b\x90\x24\xd4\x7a\x9a\xb8\x17\x3f\xf1\xe7\x2f\
\x5e\x79\xe1\xca\x1c\xdb\x32\xcb\x9b\xd6\xb3\x85\x3d\xf8\x99\xbf\
\x7c\xed\xe6\x43\xa3\x7d\xc1\xf0\x8a\x6f\x5a\x1f\xcc\x12\x76\xe3\
\x97\xfc\x9d\x47\x8f\xe5\x4e\xdd\xc4\x3e\x85\x13\x68\x08\x43\xd8\
\x85\x5f\xf3\xcf\x65\x2a\x8b\x67\xf5\xf8\xe6\x69\xa9\x0c\x61\x27\
\x7e\xcb\x1b\x1c\xb8\x66\x4e\x97\x82\x59\x19\xc2\x0e\xfc\x86\x37\
\x79\x8c\xf3\x87\xb4\x6d\x50\x2b\x3b\x43\xd8\x8e\x5f\xf1\x5a\x87\
\x6b\xc9\xe8\xce\x00\x35\xf2\xb3\x2e\xbd\x0d\xbf\xe4\xcd\x9e\xcc\
\x59\x3d\x9b\x42\xbd\xe2\x7e\xcc\x67\xdd\x8a\x9f\x79\xb5\x47\x3a\
\x77\x60\x6b\x80\xf2\xd1\x02\x8d\xdb\x82\x1f\x79\x99\xcd\xba\x78\
\x44\x07\x80\xaa\x79\x04\x47\x63\x33\xbe\xe7\x9f\x18\x3d\xcb\xa7\
\x75\x6f\x0c\xb5\x8b\xfa\x08\x0f\xdf\x26\x7c\xc7\x2b\xdc\x77\x67\
\xf6\x6b\x05\x0d\xcb\x84\x8a\x8d\xf7\x46\x7c\xc3\x3f\xb5\xea\xe6\
\x0d\x6d\x0f\x50\x25\x87\xf8\x02\x6d\xc0\x9b\x0f\xf4\xce\xa5\xe3\
\xba\x36\x82\x9a\x85\x78\x56\x74\x3d\xca\x5c\x2b\x17\xf6\x6a\x01\
\xf5\x4b\x05\x72\x85\xc0\x3a\x7c\x39\x7b\x50\x1b\x80\x0a\x29\x9c\
\x31\xb3\x76\xd1\xa8\x8e\x00\xd5\xf3\xf1\x06\x59\xc6\x80\x1e\x4d\
\xa0\x6e\x31\x1f\xde\xa8\x8c\xcd\x59\xba\x3e\x94\x8b\xe4\x0f\x63\
\xef\xf4\x92\x95\x73\x7b\xfd\xaf\x3f\xa2\x5e\x03\x5f\x1a\x26\xde\
\x2f\x78\xb2\x0b\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\
\x00\x00\x07\xd2\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x30\x00\x00\x00\x30\x08\x06\x00\x00\x00\x57\x02\xf9\x87\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x00\x0e\x74\x45\
\x58\x74\x54\x69\x74\x6c\x65\x00\x46\x69\x72\x65\x77\x61\x6c\x6c\
\x12\x81\xae\xae\x00\x00\x00\x17\x74\x45\x58\x74\x41\x75\x74\x68\
\x6f\x72\x00\x4c\x61\x70\x6f\x20\x43\x61\x6c\x61\x6d\x61\x6e\x64\
\x72\x65\x69\xdf\x91\x1a\x2a\x00\x00\x07\x12\x49\x44\x41\x54\x68\
\xde\xed\x99\x4f\x8c\x1c\x47\x15\xc6\x7f\xf5\xaa\x7a\x66\x77\x76\
\xd6\xd8\x5e\x48\xec\x38\x24\x96\x6c\x21\x0c\x82\x0b\xb1\x08\x0e\
\x92\x4d\xe4\x00\x67\x56\x42\xb2\xb9\x70\x41\x42\x04\x8e\x1c\x10\
\x17\x9f\xe0\x8c\x04\x5c\x10\x12\x42\xe2\x80\x84\x48\x10\x48\x31\
\x22\x7f\x7c\x21\xbe\x70\xf0\xc6\x24\x22\x07\xf0\x9a\x38\x9b\x35\
\x24\x78\xbc\xf6\xae\x67\xba\xab\xea\x71\xa8\xee\x9e\x9e\xdd\x99\
\xf5\xec\x65\x1c\x41\x5a\x2a\xf5\x4c\x4d\x4f\xf7\x7b\xf5\x7d\xef\
\xbd\xef\x55\xc3\x07\xc7\x07\xc7\xff\xf7\x61\xc6\x4d\x7e\xf5\xdc\
\xf2\xb9\xe0\xf5\xc7\x22\xe6\xa0\xaa\xf2\xde\xa6\x72\xe7\x91\xe5\
\x99\x1a\xe6\x9c\x45\x5e\xff\x29\x8b\x0b\x73\xa8\x42\xf4\x61\x0d\
\x27\x5f\x7b\xf1\xe2\x8b\x97\x46\xae\xdb\xfe\xc7\x0b\x17\x2e\xc8\
\x1b\x7f\xbb\xfa\xb3\xe3\xc7\x8e\x2f\xec\x5b\xdc\x87\x0f\x9e\xd5\
\xb7\xff\xcd\xd3\xe7\x3e\x3f\xdb\x95\x35\xca\x2f\x7e\xf0\x4b\x0e\
\x3d\xf4\x11\x42\x8c\x6c\xdc\xd9\x78\xe4\xf6\xed\x5b\x3f\x07\x8e\
\xed\xea\xc0\x95\x2b\x57\xf6\xcd\x75\x5a\xae\xd3\xe9\xf0\xfc\xef\
\x9e\x43\x15\x3a\xfb\x1f\xe2\xc0\xb5\x5b\x33\x75\xa0\xe5\x0c\xd7\
\xff\x79\x9d\x37\xae\x5e\xc1\x39\xc7\xa9\xa7\x9e\xa2\xd7\xbb\x75\
\x78\xfb\x75\x32\xe9\x06\xf9\x20\xe7\xb5\x95\xbf\xf2\xd9\x27\x9f\
\xc4\x98\x07\xc3\x6f\x11\xe1\xdd\x77\xdf\xe3\xf4\xe9\x33\x00\xc4\
\x10\x76\x52\x6d\xd2\x9f\xdb\x73\x73\x7c\xfb\x3b\xcf\x92\xe7\x05\
\x31\xef\xf3\xec\xe9\x2e\xfb\xf7\x1f\x98\x89\xe1\xaa\x4a\xef\xd6\
\x7f\x78\xde\x18\xce\x9f\x3f\x8f\x6a\x44\x55\x09\x21\x4e\xe7\x40\
\x08\x81\x18\x02\xaf\x5e\x7e\x15\x55\xa5\xd5\x5e\xc0\x00\x32\x23\
\x28\x74\xe8\x09\x2f\xbf\xf2\x12\xd6\x3a\x4e\x9e\x3c\x49\x8c\x53\
\x3a\x10\x43\x44\x51\xc4\x18\xb4\x91\xa8\x8c\x91\x19\x91\x47\xeb\
\x24\x29\x46\x92\x1d\xaa\xc4\x18\xa6\x47\x00\xc0\x88\x80\x6a\x1d\
\x03\x22\x33\x42\x40\xab\x05\x03\x23\x06\x4a\x07\xc2\xb4\x31\x10\
\x4a\x4f\x13\x02\xc3\x72\x21\x22\x33\x8b\x81\xca\x03\x11\x29\x17\
\x6e\xea\x18\xe8\x11\x43\x07\x55\x4d\x08\x44\x65\xbe\x33\xc7\xe6\
\xe0\x2e\xd7\xde\xba\x3b\xc3\x0a\x6b\x30\xa6\x1a\x82\x2a\xd3\x21\
\xd0\xeb\xc1\x7c\x67\x88\x40\x34\x30\xb8\x37\x60\xa1\xdd\xe5\xf0\
\xe1\x23\x33\x31\x3e\x46\xe5\x9d\x77\x6e\xa4\xc4\xd1\x44\x60\x2f\
\x31\x50\x21\x60\x88\x18\x63\xc0\x90\xce\x33\xaa\xc2\xe9\x3c\x44\
\x41\x55\x89\x7e\xda\x34\xaa\xb1\x46\xa0\x8a\x60\x83\x99\x59\x0c\
\x34\x89\x94\x10\x10\x50\xa6\x47\x20\x86\x90\x56\x5c\x84\x04\x9e\
\x99\x69\x1a\xad\x80\x36\x66\x88\x82\xc2\xf4\x85\x2c\x86\x08\x5a\
\x22\x20\x86\xa0\xcc\x34\x0b\x55\xf4\xa9\xb2\x50\x0a\x62\x9d\x56\
\x4a\xf4\x88\xa1\x8b\x52\xc6\x40\x4c\xf4\xa9\x6f\x3a\x2b\xe3\x1b\
\x4e\x54\x8f\x9d\x8a\x42\xbd\x1e\x74\xba\x61\x04\x81\x56\x7b\x9e\
\xbb\xfd\x3b\xbc\xfe\xe6\xca\xec\xd3\xa8\x48\x72\x64\x2f\x5a\x28\
\x06\x9f\x98\x2f\x06\x13\x0d\x83\x7b\x03\xba\x73\x8b\x1c\x3d\x7a\
\x6c\x46\x69\x34\xb2\xba\xfa\xf7\x84\x7b\x1d\x03\x8a\x4e\xab\x85\
\x8a\xa2\xaa\x03\x92\x04\x77\x7c\x50\xfd\xa2\x41\xc4\xd4\x31\x10\
\xe2\x1e\xd4\xa8\x42\x8d\x80\x79\x50\xf6\x53\x65\xc2\x14\x07\xd3\
\x6b\xa1\x10\x4a\xf9\x5c\x22\x60\xe2\x03\x02\xc0\x20\x7b\x96\x12\
\xf4\xe8\xc6\x85\x1a\x01\xab\x96\x7e\x7f\x93\x9b\xff\xba\x89\xb5\
\x76\x66\xc6\xaf\xbc\x76\xb5\x54\xa3\x65\x10\xa3\x04\xef\xa7\x40\
\xa0\x07\xa1\x13\x30\x9a\xf2\xbe\xaa\xd2\x5d\x98\xe7\xbb\xdf\xfb\
\x3e\x5a\x16\x04\x2d\x35\x6a\x12\x8d\x5a\xcb\xf7\xd1\xf9\xe1\x87\
\xe9\xe7\x9b\xb3\x90\xd9\x61\x01\x55\x35\x7b\x89\x81\x88\x1a\x4d\
\x69\x0c\xc3\x7c\xbb\xcd\x5c\xbb\x85\xaa\xd6\x03\x4d\xb2\x57\xd1\
\x9d\xf3\xf5\x5c\x29\x8d\xab\xdf\x51\x34\x26\x03\x47\xe6\x75\xdc\
\xdc\xf0\xfe\x49\x0b\x45\x54\xf7\xd2\x91\x45\xad\x45\x5c\xba\x7b\
\xca\xcd\x8a\xa2\x66\x68\xec\xa4\xf9\xe1\x5c\xb9\xc2\xe5\x07\x23\
\x0d\x83\xab\x34\xa9\x3b\xe7\x50\xc5\x24\x7f\x1a\x95\x58\xa7\x74\
\x20\x06\xa2\xea\x48\x30\x55\x8a\xf0\xd0\xc3\x0f\x73\xe2\xc4\xc7\
\xc9\xb2\x2c\x3d\x30\xc6\xf2\xdc\x58\x61\x8d\xc3\x15\x9c\x30\x62\
\x8c\x0c\xfa\x03\x5e\xb9\x74\x89\xf5\xb5\xf5\xda\x78\x29\x9f\xe5\
\x9c\xa3\xdb\x5d\xc0\x3a\x57\x3e\x9f\xbd\x75\x64\xe3\xe0\x32\xc6\
\xf0\x89\x4f\x9e\xe0\x89\x93\x4f\x70\xf0\xc0\x12\x22\x16\x6b\x2d\
\x56\x04\x6b\x5d\xf9\x5d\xb0\xe2\xb0\x56\x88\xa5\x33\x31\x46\x62\
\x4c\x46\x87\x18\x28\x8a\x02\xef\x0b\xd6\xd7\xd7\x09\x1a\xb9\xfc\
\xe7\xcb\x0d\xe7\x52\xfe\x1c\x0c\x06\xdc\xdb\xba\x47\x37\xcb\xca\
\x4a\x6c\x86\x9d\xda\x7d\xb3\x50\xe8\x32\xe6\x5a\x5c\xe6\x70\x2e\
\x63\x61\xa1\xcb\x5f\x56\x2e\xd3\x6e\xcd\x71\xe3\xfa\x3a\xa1\x08\
\x0d\x7e\x2b\x4b\x4b\x4b\x9c\xfa\xdc\x29\x0a\x5f\xd4\xc6\x16\x45\
\xd1\xf8\xee\x69\xb7\xda\x1c\x3a\x74\xa4\x94\xec\xc3\xb6\xd5\x94\
\xf1\x33\xd7\x6e\xd3\xbf\xd7\xaf\xeb\x00\x46\xc7\x6f\x41\x8e\xcb\
\x42\xb1\x33\x1e\x01\x14\x82\x0f\x88\x08\x99\x6b\x61\x8d\x65\xd0\
\xef\xf3\xfb\x3f\xfc\xa9\xbe\x24\xcb\x1c\xcf\x9c\x3d\x43\x08\x61\
\xe2\xb0\xd6\x26\x6a\x94\xb9\x5d\x8c\xec\x88\x17\x11\x21\xcb\x5c\
\xb9\x95\x33\x7e\xf5\x77\xa1\x50\x0a\xe2\x49\xf4\xb2\x22\x38\x97\
\xe1\xac\x23\x04\xc5\xb9\x61\x7d\xb0\xce\x12\xa2\x27\x84\x6a\x04\
\x7c\xc3\x78\x00\x67\x1d\x56\x2c\xaa\xe0\x43\x48\x3b\x0f\x55\x90\
\x27\x57\x10\x31\x88\xd8\xd4\x97\x4f\xde\x87\xde\x65\x63\x6b\x82\
\xc7\xc1\x47\x8c\x08\x99\xcb\xb0\xce\x11\x7d\x18\x71\xc0\x89\x10\
\x7c\xd3\xe8\xe4\x88\x2f\x9d\xa9\xfe\xe7\x9c\x43\x4d\x24\x4e\x44\
\xc0\x26\x27\x6a\x09\xbf\x07\x04\xbc\xf7\x63\x29\xa4\xa4\xcd\x25\
\x6b\x2c\x59\x96\x61\xc5\xe1\xa3\x27\x6b\x22\x60\xed\x7d\xa9\xe3\
\xac\xc3\x5a\x87\x51\x43\x88\x15\x02\xc3\x94\x8c\x01\x6b\x25\x69\
\xb1\xfb\x34\x51\xbb\xd6\x81\x71\x31\xe0\xa3\x47\x4a\x0a\x59\xe3\
\x08\x31\xe0\xec\x28\x85\xbc\x0f\x13\xa9\xd3\x1c\x18\x08\xde\x37\
\x10\xd0\x9a\xeb\xc6\xd8\x7a\x57\x2e\xa9\xba\x29\x29\xd4\xeb\xf5\
\xe8\x76\xbb\x13\x29\x14\x7d\x2c\x83\x38\x43\xcc\x4e\x0a\x0d\x11\
\x98\x4c\x9d\x84\x82\x45\x55\x1b\x31\xd0\x40\x00\xb0\xb6\xd1\xd0\
\x30\x31\x09\x8d\x45\xc0\xe4\x45\x6e\x54\x23\x59\x96\x51\x14\xc5\
\x0e\x99\x21\x62\x4b\x07\x04\x1f\x74\xa4\x57\x36\x26\xd1\x62\x37\
\xea\x38\x6b\x11\x6b\xc9\xf3\x1c\xef\xe3\x58\x04\xaa\x5e\xb8\xda\
\xce\x8c\x3b\xdf\x2a\xe9\x38\x07\x04\xd8\x42\x59\xb9\xf8\xc2\x1f\
\x3f\x7d\xf6\x99\xb3\xed\xdb\xb7\x7b\x0d\x27\xd2\xfe\x64\x45\xa1\
\x56\xd6\x66\xb1\xbb\xc0\xe3\x8f\x3f\x36\x52\x65\x97\x0e\x2e\xed\
\x4a\x1d\xeb\x1c\x21\x04\x8a\x22\x27\x46\x3f\x16\x01\x11\xc1\x48\
\xd9\x17\xeb\x48\x10\x67\x40\x25\x4b\xd5\x6d\xeb\x1f\x2c\xd0\xce\
\xf3\xfc\x2b\xab\xd7\xae\xff\xfa\xe2\x0b\x17\x3f\xf3\xa5\x2f\x7f\
\xb1\xbd\xb1\xb1\x41\x51\x14\xe5\xd6\x46\xa8\x29\xd4\x6a\xb5\x58\
\x5e\x5e\x9e\x58\xb0\xc6\x52\xc7\x59\x0c\x06\x1f\x0a\x06\x79\x9e\
\xea\xca\x38\x04\x8c\xa4\x51\x2a\xe2\x72\x6b\xdd\x00\x5d\xe0\x6e\
\xe9\xc4\x0e\x07\x32\xa0\x03\x2c\x16\x45\xf1\xc3\x1b\x6f\xbd\xfd\
\xa3\x97\x5f\xba\xf4\xd1\x2f\x3c\x7d\x26\x13\x31\xa8\x1a\x62\x8c\
\x64\xae\xc5\x63\x8f\x1e\x45\x24\xc9\x85\x10\x3c\x85\xf7\xc9\x89\
\xa2\x20\xf7\x39\x79\x9e\xa3\x31\xd6\x3c\x16\x23\x58\x2b\x88\x38\
\xf2\x7c\x90\x10\x2a\x52\x7c\xcc\x77\xe6\x6b\xd9\xa1\x51\x89\x51\
\x69\xb5\x32\x5a\xed\x56\xd2\x5c\x11\xd6\xd6\xd6\x10\x91\x8d\x18\
\xe3\x12\x50\x00\x61\x7b\x75\xb0\xc0\x3c\x70\x00\xf8\x30\xf0\x31\
\xe0\x53\xce\xb9\xaf\x7b\xef\x8f\xa4\x37\x87\x8e\x6f\x7e\xeb\x1b\
\xf5\xaa\xd4\xc2\xac\x94\xc1\x93\xce\xa3\x42\x8e\x86\xd8\x8b\x6c\
\x6d\xf5\xf9\xed\x6f\x9e\xbb\x6f\x83\x63\xad\xdd\x0a\x21\xfc\x04\
\xf8\x15\xb0\x0a\x6c\x02\xc1\x6c\xe3\xff\x1c\xb0\x0f\xd8\x0f\x3c\
\x0a\x1c\x07\x3e\xb4\xdb\xbb\xb4\x19\x1e\x9b\xc0\x3f\x80\x37\x81\
\x75\x60\x0b\x08\x6e\xdb\x6b\x91\xa2\xbc\x30\x02\x7d\xe0\x66\x49\
\x2b\xf3\x3e\x70\xa0\x28\xb9\xdf\x03\x06\x75\xeb\x30\x26\x0b\xd9\
\xc6\x78\x3f\x18\x3e\x92\xc5\xcb\xe1\xab\x36\xcb\x4c\xda\xcd\xd8\
\x55\x41\x3d\xb8\x43\x1b\xf9\x54\xf9\x5f\x38\xfe\x0b\xdd\x6a\xdf\
\xcf\x7f\x71\xb0\x56\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\
\x82\
\x00\x00\x08\x17\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x30\x00\x00\x00\x30\x08\x06\x00\x00\x00\x57\x02\xf9\x87\
\x00\x00\x07\xde\x49\x44\x41\x54\x78\xda\xec\xcf\x01\x06\xc0\x30\
\x0c\x46\xe1\xb1\x92\xad\xac\xb4\xf7\xbf\x6b\xf6\x46\x46\x00\x6c\
\x85\xcd\x5f\x3e\x28\x92\xbc\xc5\xdd\x3f\xed\x1f\x01\x0a\x50\x80\
\x02\x14\xa0\x00\x05\xf0\x0c\x0d\x03\x3d\x8c\xf8\xb3\x37\x4b\x78\
\x05\x1b\xca\xec\x39\xf9\xf8\xfb\xd8\x8a\x3d\xd4\x14\x65\x0f\x16\
\xae\x38\xd0\xc3\xd4\x39\x39\xa0\x5d\xce\x76\xcc\x01\x3a\xb2\x26\
\x8f\xe2\x9f\x6d\xcc\xda\xb6\x6d\xdb\xb6\x6d\x7b\x77\x6c\x7b\x26\
\x1c\x24\xbd\xe9\xd8\x1a\x3b\x36\xda\x78\x6d\x77\xd0\x78\x9d\x4e\
\xed\xff\xd6\x74\xf5\xe9\x6c\xf2\xe2\xe5\x97\x3a\xa7\x4e\x6a\x72\
\x5e\xfd\xea\xde\xfa\xdf\xaa\xf7\x26\x8d\xcd\xb5\x05\x4d\xa7\xeb\
\x58\x66\x3f\x7d\xae\xa1\xe8\x0d\x6f\x78\x03\x26\xdc\xbf\x80\x05\
\x6f\x4c\x99\xe7\xf3\x56\x6f\x5d\xfd\xf8\xb2\x4a\xf5\x7b\x52\xeb\
\x3c\xb0\x9c\x1c\xf1\x20\x5c\xdd\x0d\xc1\x93\x93\x93\x2c\xb3\x9d\
\x3a\xdb\xc0\xfa\xfb\xfb\x5f\xf6\xf9\xcf\x7f\xfe\x89\xf3\x5c\xf4\
\x76\xc0\xd1\x1f\xf7\xb8\xc7\xdd\x5d\xdf\x54\xfb\xbd\xe6\x33\x0d\
\x61\xb0\xb3\xb2\xb2\x1e\xf7\xc6\x37\xbe\x71\xd5\xe2\x39\xf5\x69\
\xce\xbb\xde\xf5\xae\xc7\x4d\x31\xf0\x81\x0f\x7c\xe0\x3e\x61\xc0\
\x64\xd6\x33\xb3\xc5\x80\x31\x37\x10\x8b\xc5\x7e\x70\xf6\xec\xd9\
\x57\xcf\xa3\xcc\xf7\xa5\x36\xe3\x9e\xe2\x62\xd5\x1b\x9b\xcf\x36\
\x0c\x76\xf6\xb4\x47\xa2\xd1\x28\x03\x7b\x6c\x6c\xec\x8b\x0d\x0d\
\x0d\xaf\x5c\x1c\xa7\x8d\x38\x91\x34\xe7\xf2\xe5\xcb\xaf\x99\x62\
\xe0\x17\xbf\xf8\xc5\x43\x75\x8d\x55\x5c\xb4\xc5\x6a\xa4\x6e\xe2\
\xe3\x86\xe6\x1a\x96\x48\x24\x7e\xa7\xd3\xe9\xde\x3b\x4b\x99\xef\
\x06\x03\x0b\xaf\xdf\xb9\xfe\x49\x75\x8d\xd5\x27\xcf\x9e\x3f\x15\
\xf1\x07\x7c\x6c\x62\x62\x82\xa1\x11\x1b\x9c\xdf\x2c\x9c\xd3\x1c\
\xf1\x05\xbc\xc4\x49\x4c\xe1\x58\x2c\x96\x8f\x4c\x31\xb0\x7a\xf5\
\xea\x87\xaa\x6a\xcb\xb8\x68\xab\x64\x66\x92\xcd\xc2\xc7\x35\xf5\
\x95\x98\xf0\x7b\xb3\xd9\xfc\x01\x85\x32\x63\xc1\x07\x1e\x7d\xf4\
\xd1\x7b\xca\xab\x8a\x7f\x52\xdb\x50\x39\x62\x34\xeb\x13\x10\x1e\
\x1e\x09\x31\x9b\x5d\x62\x68\x60\x2f\x9c\xa3\x4b\x73\x24\x87\x75\
\x0a\xc7\xe1\x70\x7c\x62\x9a\x01\x9a\x08\xd1\xb4\xa8\x95\xd9\x1d\
\x12\x1f\x57\xd5\x4c\x5b\x58\x5c\x67\xf7\x8b\x32\x9f\x50\xe5\xbd\
\xa5\xaa\xa6\x54\xdb\xda\x7e\x2d\x12\x8b\x45\x19\x4a\xed\x70\xda\
\x98\x8d\x36\xc1\x41\x1c\x34\xb0\x17\xc5\x71\x49\x24\xde\x82\x9f\
\x82\xa3\x6c\xa0\xb8\x4c\xc5\x45\x63\x71\xa7\xd3\xce\xc7\x74\xea\
\xd3\x0b\xd3\x33\x37\x65\x96\x79\xcb\x96\xd5\x4f\x2e\xad\xf8\xbb\
\xba\xbe\xa9\x1a\x71\xc1\x73\xcc\xe3\x75\xa1\x82\x7c\x13\xc0\x71\
\xb9\x1d\x0c\x0d\xec\xb9\x38\x75\x99\x1c\x1f\x71\x6c\x26\x66\x73\
\x12\xc7\x6d\x63\x6e\xaf\x53\x70\x94\x0d\xa8\xd4\x27\xb8\x68\x97\
\xcb\xc1\xdc\x6e\x27\x1f\xab\x4b\x0b\xd3\x0b\x7f\xe6\x33\x9f\x79\
\x3c\xca\xfc\xbc\xe7\x3d\xef\xde\x93\x45\xc7\x7f\x45\xb0\x51\x83\
\x49\x37\x81\x32\x07\x43\x01\x7e\x6e\x24\x12\x6f\x27\xf1\x4e\x12\
\x0f\x86\xd7\xeb\x66\x68\x60\xcf\x8b\x13\x0e\x70\xe1\x92\x9d\x38\
\x4e\x89\x39\xdd\x76\x6e\xc6\xe7\xf7\x08\x8e\xb2\x81\x93\x85\xf9\
\x5c\xb4\xc7\xe3\xa2\x85\x5d\x7c\xac\x2a\x4a\x4f\x78\xdf\xaf\x7e\
\xf5\xab\x47\x8e\xe4\x1c\x78\x07\x41\x0c\x57\xaf\x5d\x8a\xd2\xed\
\xc4\xc6\xc7\xc7\x10\x15\x1c\x7c\x9c\x1b\x1e\x3d\xa7\xcb\xce\xdc\
\x1e\x12\xef\x73\xb3\x00\xed\x28\x1a\xd8\x8a\x9c\x38\x71\x22\xc4\
\x71\x10\x07\xbb\xce\x23\x43\xd5\xf3\x38\x98\x87\x18\xfe\x80\x97\
\x36\xc8\x2f\x38\xca\x06\xf2\x4f\x64\x73\xd1\x3e\x9f\x9b\x3a\x1c\
\x4f\xb2\x13\x85\x79\x2c\x99\x4c\xfe\xce\x68\xd4\x7d\xe6\xa4\x2a\
\xbf\xba\xb2\xba\x74\x1c\xa2\x64\x59\x26\xa1\x0e\xba\x72\x0d\xd8\
\x79\x91\x77\x54\x0f\x1b\x80\xf9\x5c\xbc\xa8\x00\xd8\x33\x72\x12\
\x32\x17\x6a\x96\x0c\xcc\xca\x77\x9d\xaa\x97\x8a\x8c\xd7\x4f\x8c\
\xa0\x8f\x85\xa8\x2a\x23\xa3\x61\xc1\x51\x36\x90\x9d\x77\x98\x1b\
\xf0\xfb\xbd\x7c\x71\x8c\xf3\x8e\x67\xb1\x73\xe7\x4f\xd7\x9d\x28\
\xc8\x8b\x1a\x8c\x7a\x5e\x66\xca\xa9\x78\x57\x4c\xcb\x3b\xce\x00\
\xca\x0d\x06\xc6\xa8\x06\x1a\xd8\x82\x23\xe2\xe2\x27\x71\x10\x6e\
\x91\x8c\xfc\xa0\xda\x5d\x12\x37\xc3\x23\x13\x20\xf1\x21\x3f\xbf\
\x81\x46\xc7\x46\x50\x21\xc1\x51\x36\x70\x24\xeb\x00\x17\x1d\x0c\
\xfa\x51\x7e\x3e\xce\x3d\x96\xc5\xae\x5c\xb9\x98\x88\xc7\xe3\x00\
\x91\x68\x23\x33\x9a\x20\xde\x38\x53\xde\x21\x9c\xba\x87\xaa\xe0\
\x24\xf1\x36\x32\x67\x61\x68\x60\x5f\xb9\x7a\x9d\x33\x36\x3e\xca\
\xa3\x62\xb2\xd2\x06\xa4\x23\x23\x76\x9d\x18\x41\x8a\x0c\xdf\xf5\
\x10\x7f\x36\x12\x1d\x67\x88\x99\xe0\x28\x1a\x38\x70\x78\x0f\x8f\
\x0d\x76\xd0\xed\x71\x70\x03\x59\xb9\x07\xd9\xa5\x2b\x17\xc4\xc2\
\x94\x73\xb3\x62\xde\x91\x55\x44\x07\x73\x1d\x4e\x89\x1f\x44\x33\
\x89\x44\x03\x9b\x73\xe4\x38\x76\x93\xdf\x2e\x52\x3a\x32\x76\x2e\
\xde\x27\x22\x33\x12\xa4\xcd\x0a\xf3\xe7\xa2\xb1\x08\xc3\x1c\xf1\
\x22\x03\x47\xd1\xc0\x9e\xfd\x3b\xb8\x68\x0f\xc1\x10\x09\x8c\xf7\
\x1f\xda\xc3\x1a\x1a\x6b\xcf\x1c\xcd\x3e\x28\xeb\x0d\xba\x24\x35\
\x16\x0e\x87\xb0\xeb\xd3\xf2\x0e\x13\x88\x11\x6e\x0f\xec\xac\xc9\
\xa2\x67\x7a\x93\x86\xa1\x81\x2d\x38\x06\x23\xe7\x20\xd7\x10\x8e\
\xc8\x88\x83\x2a\x22\x93\xda\xf5\x28\x3f\x23\x13\xc9\x09\x20\x04\
\x47\xd9\xc0\xce\xdd\x5b\xb9\x68\xec\x2a\x4a\x8f\xf1\xde\x7d\x3b\
\x31\xef\x6f\x2e\x97\x6b\x67\xa1\xea\xb8\xa6\xa0\xf0\xb8\x1c\x08\
\x06\x18\x32\x1c\x0a\x05\xa7\xe5\xdd\xee\xb0\x52\xa6\x21\x5e\xc7\
\xf4\xc6\x61\xa6\xd1\x0f\x30\x34\xb0\xa7\x70\x54\xc7\x65\x5c\xbd\
\x10\x07\xc1\xe2\xa0\xa2\xca\x78\x81\xc5\xe3\x31\x08\x65\x30\x8a\
\x06\x2d\x82\xa3\x68\x60\xeb\xf6\x8d\x78\x30\x25\xc2\x88\xb1\x58\
\xf8\xaf\x04\xfa\x23\xf5\x3f\x75\x76\xb5\xe7\x1f\x3c\xbc\x2f\x74\
\xfa\x6c\x73\x02\x37\x91\x2c\xc7\x79\xe6\x45\xde\x31\x8f\x3e\x01\
\x98\xce\x38\xc4\x86\x75\xfd\x6c\x40\xd3\xc3\xd0\xc0\x9e\x89\x73\
\xe6\xdc\x29\xd2\x23\xf3\x88\x44\x48\x78\x34\x16\x05\x13\x1b\x24\
\x44\xe3\x27\xba\xe0\x28\x1b\xd8\xb4\x65\x1d\x1e\x4c\x95\x5f\xc7\
\xc7\xdb\x76\x6c\x62\x58\x34\x18\x0c\x7e\xd5\xef\xf7\x7f\x9a\x44\
\xff\x9c\xce\xc3\xdf\x6a\xeb\xaa\x9a\x76\xed\xd9\x16\x1f\xd6\x0c\
\xa6\xe2\x10\xe2\xe2\x0d\x66\x2d\xd3\x1a\x86\xd8\x10\x89\xef\x1f\
\xee\xa6\x7e\xdd\x00\xd8\xb3\x71\x74\x7a\x6d\x12\xeb\x81\x85\x7e\
\xbd\x41\x38\x3a\xff\x9d\xe0\x28\x1b\x58\xb7\xe1\x6f\x7c\x82\xd9\
\xca\xb3\x8b\xb1\x58\xf8\xf7\xf4\xf5\xf7\x61\xb5\x5a\x7d\x73\x5b\
\x5b\xdb\x33\x47\x46\x46\xbe\x86\x2f\x42\xba\x79\xb6\xe6\x1d\xcf\
\x1e\x38\x7c\xf4\x80\xec\xf6\xb8\x00\xe6\xb7\xc9\x90\xb6\x8f\xf5\
\x0d\x75\xf3\xdd\x47\x15\xd0\xc0\x9e\x8d\x93\x7f\x3c\x67\xe0\x68\
\xf6\x21\x39\x14\x0e\x65\xee\xba\x30\x84\x8a\x08\x8e\xb2\x81\xbf\
\xad\xf9\x13\x26\xa5\x76\x71\x10\x63\x4c\x98\xf6\x11\xb6\x77\xef\
\xde\xdb\x07\x06\x06\x5e\x37\x3e\x3e\xfe\x03\x88\xea\xec\x6c\xcf\
\xde\xb1\x6b\xab\xbf\xa2\xb2\x54\x1e\x8f\x8c\xf3\x4c\x1b\x68\x03\
\x86\x29\xff\x3a\x3a\x07\x68\x60\xcf\x87\xb3\x6b\xcf\x76\x7f\x7d\
\x43\xad\x4c\xcf\xa6\x0c\x4c\x40\x3c\x22\x26\x38\xca\x06\xfe\xfc\
\xd7\xdf\x63\x12\xf2\xcb\x77\x11\xe3\xd5\x6b\xff\xac\xf8\x19\x5c\
\x53\x53\xf3\xa0\xd3\xe9\xfc\x10\x7d\x52\xfc\x9c\x9e\xf9\x4b\x65\
\x55\x59\xdd\xfa\x8d\x6b\xe2\xed\x9d\x6d\xc9\x04\x2d\x18\x0c\xfb\
\xf9\x8b\x0a\x0d\xec\xf9\x73\xca\xeb\x36\x6e\x5e\x1b\x1f\x1c\xea\
\x4f\x8a\x08\xc1\x90\xe0\x28\x1a\xf8\xfd\x1f\x7f\x0d\xd1\x28\x3b\
\xb2\x8b\xb1\xe2\xc2\xa2\xe3\xcb\x92\xfe\x77\xf4\xb4\x70\x38\xfc\
\x65\x7a\xee\xd7\x1e\x8f\x67\xf3\x91\xa3\x07\x7a\xb7\xef\xdc\x22\
\xdb\x1c\x52\xba\xf4\x60\x2f\x94\x73\x34\xeb\x70\xef\xce\xdd\xdb\
\x64\xaf\xcf\x8b\x50\x09\x8e\xb2\x81\x5f\xff\xf6\xe7\x10\x4d\xd9\
\xed\xc5\x01\xc4\x78\xce\x85\x45\xa7\x5c\xdf\xd6\xd7\xd7\xf7\x6a\
\x8a\xc3\xf7\x11\x87\xae\xae\x8e\x23\x1b\x37\xad\xf5\xd0\x95\x29\
\x47\x28\x56\x60\x2f\x96\xb3\x69\xf3\x3a\x4f\x91\xba\x50\xa6\x43\
\x0f\xce\x8c\x06\x1e\xfa\xc3\x1f\xfe\xf0\xf0\x2f\x7e\xf5\x13\x2e\
\x3a\xb3\xfd\xe6\x77\xbf\x54\x58\x58\x51\xc0\xfd\x92\x24\xbd\x9f\
\xe2\xf0\x33\x9a\xf7\xe7\xf2\x8a\x92\xaa\x3f\xfe\xf9\x77\x31\xb0\
\x97\xca\xf9\xd3\x5f\x04\x67\xba\x81\xfb\xbe\xfd\xed\x6f\x3f\x6e\
\xdd\xc6\xd5\xcd\x3f\xf9\xd9\x0f\x58\x46\xc7\x8b\xac\x37\xb5\xf0\
\x1b\x38\x7c\x7e\xfd\xc6\xf3\xe7\xcf\x3f\x25\x10\x08\x7c\x11\x71\
\xf0\xf9\x7c\x1b\x2f\x5d\xba\x90\x0f\x8e\xc9\x64\x7a\xdb\x32\x70\
\x60\xe0\xbd\x53\xfe\xb0\xf5\xc9\x4f\x7e\xf2\x09\x74\xc5\xbd\x1b\
\x9f\xbc\x38\xec\x99\x1d\xbf\xb3\x5a\xad\x4f\xe4\xd0\x05\xf4\x23\
\x47\x8e\xdc\x4a\xb7\xcc\xcb\x29\x0e\xdf\x05\x03\x57\xa6\x46\xa3\
\x79\xd2\x72\x70\xec\x76\xfb\x53\x84\x01\xde\xe9\x8f\x57\x77\xb6\
\xb6\xb6\xbe\x08\x4e\xff\xc9\xc0\xb7\x28\x7b\x2f\xc6\x6e\xa4\xa1\
\x0b\x17\x70\x17\x1d\xca\xe7\xd0\xf8\x11\xfc\x7b\x79\x38\x2b\x7f\
\x9d\xfe\xcf\xf7\x15\x03\x2b\x06\x56\x0c\xac\x18\x58\x31\xb0\x62\
\xe0\x1f\xf0\x4c\x83\x8a\xd5\x02\xe4\xbc\x00\x00\x00\x00\x49\x45\
\x4e\x44\xae\x42\x60\x82\
\x00\x00\x18\xdb\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x01\x2c\x00\x00\x01\x2c\x08\x02\x00\x00\x00\xf6\x1f\x19\x22\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x03\x66\x69\x54\x58\x74\x58\x4d\x4c\
\x3a\x63\x6f\x6d\x2e\x61\x64\x6f\x62\x65\x2e\x78\x6d\x70\x00\x00\
\x00\x00\x00\x3c\x3f\x78\x70\x61\x63\x6b\x65\x74\x20\x62\x65\x67\
\x69\x6e\x3d\x22\xef\xbb\xbf\x22\x20\x69\x64\x3d\x22\x57\x35\x4d\
\x30\x4d\x70\x43\x65\x68\x69\x48\x7a\x72\x65\x53\x7a\x4e\x54\x63\
\x7a\x6b\x63\x39\x64\x22\x3f\x3e\x20\x3c\x78\x3a\x78\x6d\x70\x6d\
\x65\x74\x61\x20\x78\x6d\x6c\x6e\x73\x3a\x78\x3d\x22\x61\x64\x6f\
\x62\x65\x3a\x6e\x73\x3a\x6d\x65\x74\x61\x2f\x22\x20\x78\x3a\x78\
\x6d\x70\x74\x6b\x3d\x22\x41\x64\x6f\x62\x65\x20\x58\x4d\x50\x20\
\x43\x6f\x72\x65\x20\x35\x2e\x30\x2d\x63\x30\x36\x30\x20\x36\x31\
\x2e\x31\x33\x34\x37\x37\x37\x2c\x20\x32\x30\x31\x30\x2f\x30\x32\
\x2f\x31\x32\x2d\x31\x37\x3a\x33\x32\x3a\x30\x30\x20\x20\x20\x20\
\x20\x20\x20\x20\x22\x3e\x20\x3c\x72\x64\x66\x3a\x52\x44\x46\x20\
\x78\x6d\x6c\x6e\x73\x3a\x72\x64\x66\x3d\x22\x68\x74\x74\x70\x3a\
\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x31\x39\x39\
\x39\x2f\x30\x32\x2f\x32\x32\x2d\x72\x64\x66\x2d\x73\x79\x6e\x74\
\x61\x78\x2d\x6e\x73\x23\x22\x3e\x20\x3c\x72\x64\x66\x3a\x44\x65\
\x73\x63\x72\x69\x70\x74\x69\x6f\x6e\x20\x72\x64\x66\x3a\x61\x62\
\x6f\x75\x74\x3d\x22\x22\x20\x78\x6d\x6c\x6e\x73\x3a\x78\x6d\x70\
\x4d\x4d\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x6e\x73\x2e\x61\x64\
\x6f\x62\x65\x2e\x63\x6f\x6d\x2f\x78\x61\x70\x2f\x31\x2e\x30\x2f\
\x6d\x6d\x2f\x22\x20\x78\x6d\x6c\x6e\x73\x3a\x73\x74\x52\x65\x66\
\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x6e\x73\x2e\x61\x64\x6f\x62\
\x65\x2e\x63\x6f\x6d\x2f\x78\x61\x70\x2f\x31\x2e\x30\x2f\x73\x54\
\x79\x70\x65\x2f\x52\x65\x73\x6f\x75\x72\x63\x65\x52\x65\x66\x23\
\x22\x20\x78\x6d\x6c\x6e\x73\x3a\x78\x6d\x70\x3d\x22\x68\x74\x74\
\x70\x3a\x2f\x2f\x6e\x73\x2e\x61\x64\x6f\x62\x65\x2e\x63\x6f\x6d\
\x2f\x78\x61\x70\x2f\x31\x2e\x30\x2f\x22\x20\x78\x6d\x70\x4d\x4d\
\x3a\x4f\x72\x69\x67\x69\x6e\x61\x6c\x44\x6f\x63\x75\x6d\x65\x6e\
\x74\x49\x44\x3d\x22\x78\x6d\x70\x2e\x64\x69\x64\x3a\x30\x35\x38\
\x30\x31\x31\x37\x34\x30\x37\x32\x30\x36\x38\x31\x31\x41\x38\x36\
\x35\x43\x30\x33\x36\x33\x46\x31\x37\x39\x33\x33\x45\x22\x20\x78\
\x6d\x70\x4d\x4d\x3a\x44\x6f\x63\x75\x6d\x65\x6e\x74\x49\x44\x3d\
\x22\x78\x6d\x70\x2e\x64\x69\x64\x3a\x42\x44\x45\x45\x32\x39\x38\
\x37\x43\x46\x32\x37\x31\x31\x45\x31\x39\x34\x46\x42\x38\x31\x36\
\x33\x43\x33\x35\x38\x46\x43\x37\x46\x22\x20\x78\x6d\x70\x4d\x4d\
\x3a\x49\x6e\x73\x74\x61\x6e\x63\x65\x49\x44\x3d\x22\x78\x6d\x70\
\x2e\x69\x69\x64\x3a\x42\x44\x45\x45\x32\x39\x38\x36\x43\x46\x32\
\x37\x31\x31\x45\x31\x39\x34\x46\x42\x38\x31\x36\x33\x43\x33\x35\
\x38\x46\x43\x37\x46\x22\x20\x78\x6d\x70\x3a\x43\x72\x65\x61\x74\
\x6f\x72\x54\x6f\x6f\x6c\x3d\x22\x41\x64\x6f\x62\x65\x20\x50\x68\
\x6f\x74\x6f\x73\x68\x6f\x70\x20\x43\x53\x35\x20\x4d\x61\x63\x69\
\x6e\x74\x6f\x73\x68\x22\x3e\x20\x3c\x78\x6d\x70\x4d\x4d\x3a\x44\
\x65\x72\x69\x76\x65\x64\x46\x72\x6f\x6d\x20\x73\x74\x52\x65\x66\
\x3a\x69\x6e\x73\x74\x61\x6e\x63\x65\x49\x44\x3d\x22\x78\x6d\x70\
\x2e\x69\x69\x64\x3a\x30\x35\x38\x30\x31\x31\x37\x34\x30\x37\x32\
\x30\x36\x38\x31\x31\x41\x38\x36\x35\x43\x30\x33\x36\x33\x46\x31\
\x37\x39\x33\x33\x45\x22\x20\x73\x74\x52\x65\x66\x3a\x64\x6f\x63\
\x75\x6d\x65\x6e\x74\x49\x44\x3d\x22\x78\x6d\x70\x2e\x64\x69\x64\
\x3a\x30\x35\x38\x30\x31\x31\x37\x34\x30\x37\x32\x30\x36\x38\x31\
\x31\x41\x38\x36\x35\x43\x30\x33\x36\x33\x46\x31\x37\x39\x33\x33\
\x45\x22\x2f\x3e\x20\x3c\x2f\x72\x64\x66\x3a\x44\x65\x73\x63\x72\
\x69\x70\x74\x69\x6f\x6e\x3e\x20\x3c\x2f\x72\x64\x66\x3a\x52\x44\
\x46\x3e\x20\x3c\x2f\x78\x3a\x78\x6d\x70\x6d\x65\x74\x61\x3e\x20\
\x3c\x3f\x78\x70\x61\x63\x6b\x65\x74\x20\x65\x6e\x64\x3d\x22\x72\
\x22\x3f\x3e\x9b\x80\x85\x2f\x00\x00\x15\x0b\x49\x44\x41\x54\x78\
\xda\xec\x9d\x6b\x57\x22\x39\xd7\x86\xa5\x80\x06\x01\x45\xc0\x13\
\x9e\x6d\x75\xa6\x7b\xfe\xff\xcf\x98\xef\x3a\xb6\x67\x11\x15\x14\
\x45\x10\x10\x39\x3c\xf7\xaa\x5a\xcb\xd7\xb7\xdb\x8a\xa8\x05\x64\
\x27\xf7\xf5\x81\x45\xab\x0d\xa9\x64\x5f\x95\x63\x25\xa1\x7f\xff\
\xfd\x77\x82\x10\x32\x3e\x1c\x66\x01\x21\x94\x90\x10\x4a\x48\x08\
\xa1\x84\x84\x50\x42\x42\x08\x25\x24\x84\x12\x12\x42\x28\x21\x21\
\x94\x90\x10\x42\x09\x09\xa1\x84\x84\x10\x4a\x48\x08\x25\x24\x84\
\x50\x42\x42\x28\x21\x21\x84\x12\x12\x42\x09\x09\x21\x94\x90\x10\
\x4a\x48\x08\xa1\x84\x84\x50\x42\x42\x08\x25\x24\x84\x12\x12\x42\
\x28\x21\x21\x94\x90\x10\x42\x09\x09\xa1\x84\x84\x10\x4a\x48\x08\
\x25\x24\x84\x50\x42\x42\x28\x21\x21\x84\x12\x12\x42\x09\x09\x21\
\x94\x90\x10\x4a\x48\x08\xa1\x84\x84\x50\x42\x42\x08\x25\x24\x84\
\x12\x12\x42\x28\x21\x21\x94\x90\x10\x42\x09\x09\xa1\x84\x84\x10\
\x4a\x48\x08\x25\x24\x84\x50\x42\x42\x28\x21\x21\x84\x12\x12\x42\
\x09\x09\x21\x94\x90\x10\x4a\x48\x08\xf1\x27\xc2\x2c\x90\x42\x3c\
\x1e\x77\x1c\x27\x14\x0a\x4d\x4e\x4e\x2a\xfe\xac\xd9\x6c\xf6\xfb\
\xfd\x5e\xaf\xd7\x6a\xb5\x98\x69\x94\x90\x7c\xaa\x48\x22\x91\x58\
\x2c\x06\xd3\x60\x1d\xde\xc4\x5d\x3e\xfd\x69\x2d\x97\xa7\xa7\x27\
\xbc\xc2\x4f\xbc\xe9\x74\x3a\xcc\x64\x4a\x48\x5e\xf5\x07\x1c\x27\
\xe9\x02\xdf\x12\x89\x04\x5e\xc3\xe1\x70\xb0\xf5\xe7\x6f\x0e\x77\
\xbb\x5d\xa8\xd8\x68\x34\xf0\xfa\xe8\x82\x6a\x93\x05\x41\x09\xed\
\x02\x9a\x4d\x4d\x4d\xa5\x52\x29\xbc\x42\xbc\xd1\x7f\x7b\xc2\xe5\
\xe5\x27\x10\xb2\x56\xab\xd5\xeb\x75\xbc\x42\x51\x16\x10\x25\x34\
\x16\xc4\x7d\x3a\x9d\x9e\x72\xd1\x2d\x61\x60\x61\x61\x01\xef\x6b\
\x2e\xd5\x6a\x15\x66\xb2\xc8\x28\xa1\x39\xee\xcd\xcc\xcc\xe4\x72\
\xb9\x6f\xdf\xbe\xe9\x9f\x5a\xef\x1e\xb1\xb4\xb4\xd4\x6e\xb7\x6f\
\x6f\x6f\xef\xef\xef\x69\x23\x25\x94\x0a\x7a\x77\x10\x2f\x9b\xcd\
\xe2\x8d\xc4\xf4\xe3\x96\x91\x77\x41\xd7\xb1\x52\xa9\x40\x48\xbc\
\x61\xb1\x52\x42\x01\x84\x42\x21\xd4\x7b\xb3\xb3\xb3\xd3\xd3\xd3\
\xc6\xdc\x4d\x3c\x1b\x1f\x1e\x1e\x6e\x6e\x6e\x50\x37\xf6\xfb\x7d\
\x16\x34\x25\xd4\x32\x2b\x23\x91\xf9\xf9\xf9\xb9\xb9\x39\xbc\x31\
\xf2\x02\xa7\x5d\x3a\x9d\x4e\xb9\x5c\x2e\x95\x4a\x9c\xea\xa0\x84\
\x7a\xe9\xb7\xb8\xb8\x08\xfd\x1c\xc7\xb1\xe1\x62\x51\x2b\x2e\x2c\
\x2c\x40\xc5\xab\xab\x2b\xaa\x48\x09\xc7\x4c\x34\x1a\x85\x7e\x68\
\x7c\xda\xa0\xdf\x6b\x70\xbd\xf0\x10\xf7\x1d\x34\x50\xa1\xe2\xf3\
\xf3\x33\x83\x81\x12\x8e\xa1\xa7\x04\xfd\x72\xb9\x1c\x3a\x81\xd6\
\x66\x02\x54\xf4\x5a\xe0\xb7\xb7\xb7\x50\x91\x23\x37\x94\x70\x74\
\xed\xb1\xe5\xe5\x65\xcb\xf5\x7b\x0d\xf2\x01\x6d\x01\x64\x08\x54\
\xbc\xb8\xb8\x60\x03\x95\x12\x0e\x17\xdc\xf8\x97\x96\x96\x82\x5d\
\x59\x66\x92\x8a\x99\x4c\xa6\x58\x2c\xa2\xbb\xc8\x11\x54\x4a\x18\
\x3c\xc9\x64\x72\x6d\x6d\x6d\xf4\xab\xcc\x64\x81\xdb\xd3\xea\xea\
\x2a\x6a\xc5\xb3\xb3\xb3\xc7\xc7\x47\x66\x08\x25\x0c\xb2\xfd\x89\
\xdb\x3c\xb3\x62\x40\x70\xab\xfa\xf1\xe3\xc7\xcd\xcd\x0d\x5b\xa7\
\x94\x30\x00\xe0\x1e\x0c\x34\x75\xea\x6f\xd8\x59\x37\x33\x33\x03\
\x0f\x61\x23\x73\x83\x12\x7e\x86\x58\x2c\xb6\xbe\xbe\xae\xdb\x62\
\x6b\x71\x8d\x08\xe4\x61\x36\x9b\x3d\x3d\x3d\xe5\xd8\xa9\x1f\xdc\
\xde\xe2\x6d\x32\x99\xcc\xcf\x9f\x3f\x69\x60\x20\x20\x1b\x91\x99\
\x50\x91\x59\xc1\x9a\x70\xb0\xdb\x92\xe3\x78\x37\x6f\x66\x45\x80\
\x84\xc3\xe1\xcd\xcd\xcd\x74\x3a\x8d\x2a\x91\xcf\x10\x53\x42\x15\
\xf1\x78\x7c\x7b\x7b\x5b\xe8\x73\x0f\xfa\x83\x5b\x5b\x32\x99\x3c\
\x38\x38\xe0\xfe\x37\x6c\x8e\xaa\x9a\xa0\x34\x70\xd8\x3d\x6d\x64\
\x32\xb2\x9a\x59\x41\x09\xff\x1f\xa1\x50\x68\x65\x65\xe5\xfb\xf7\
\xef\xb6\x2d\x01\x1d\x57\x83\x1f\x59\x8d\x0c\xe7\x92\x23\x36\x47\
\xff\x2f\x26\xd0\x5d\x99\x99\x99\x61\x56\x8c\x92\x85\x85\x05\xd4\
\x8a\xc7\xc7\xc7\xec\x22\xda\x7e\xe3\x8f\x44\x22\x3b\x3b\x3b\x34\
\x70\x2c\x20\xdb\x91\xf9\x9c\x83\xb5\x5a\xc2\x68\x34\xfa\xd7\x5f\
\x7f\xa5\x52\x29\xfa\x30\x2e\x90\xf9\x28\x02\x14\x04\x25\xb4\x91\
\x78\x3c\xfe\xe3\xc7\x0f\xf5\x6e\xd6\x64\x04\xa0\x08\x50\x10\x5f\
\xd9\xe0\x98\x12\x8a\x24\x99\x4c\xfe\xfd\xf7\xdf\x22\xb6\x3f\xb3\
\x01\x14\x04\x8a\x03\x85\x42\x09\x6d\x61\x7a\x7a\x1a\x4d\x20\x76\
\x45\x74\xeb\x9c\xa3\x50\x8c\xd9\x1d\x8b\x12\xbe\x63\xe0\xf6\xf6\
\x36\xa7\x22\x74\x8c\x45\xc7\x41\xd1\x58\xe8\xa1\x5d\xb1\x88\x06\
\xcf\xd6\xd6\x16\xa7\xa7\xb4\x05\x45\x83\x02\xb2\xad\x5d\x6a\x91\
\x84\xb1\x58\x8c\x75\xa0\x94\xfa\xd0\xaa\x75\x4b\xb6\x44\xa4\x37\
\x1b\xc1\x7e\xa0\xa0\xfe\xa1\x3d\xf3\x16\x56\x48\x18\x0e\x87\x77\
\x76\x76\x38\x16\x2a\x08\x14\x16\x8a\xcc\x92\xbd\x7c\xcc\x97\xd0\
\x6b\xde\x70\x3e\x50\x1c\x28\x32\x4b\xba\x0f\x86\x5f\x21\x3a\xfa\
\x9b\x9b\x9b\x5c\x13\x23\x14\x14\x1c\x8a\xcf\xf8\x81\x34\xc3\x25\
\x5c\x5e\x5e\xe6\xba\x50\xd1\xa0\xf8\x50\x88\x94\x50\x2a\x99\x4c\
\xc6\x3b\xf8\x92\x88\x06\x85\x68\xf6\x46\x07\xc6\x4a\x88\x1e\xc5\
\xc6\xc6\x06\x23\xd8\x0c\xd6\xd7\xd7\x0d\xee\xd5\x9b\x29\x61\x38\
\x1c\xde\xda\xda\xe2\x94\xa0\x39\x61\xea\x38\x28\x50\x53\x07\x4b\
\xcd\x0c\xd3\xb5\xb5\x35\xee\x52\x61\x18\x28\x50\x14\x2b\x25\x94\
\x41\xd6\x85\x51\xcb\x92\xa5\x84\xbc\x5f\x12\xb6\x71\xec\x93\x30\
\x14\x0a\xa1\x07\xcf\x23\x93\x0c\x06\x85\x8b\x22\x36\x6c\xe6\xd0\
\x28\x09\x67\x67\x67\xb9\x67\xb6\xf1\xa0\x88\x0d\x3b\x9c\xc7\x1c\
\x09\xa3\xd1\xe8\xca\xca\x0a\x63\xd4\x06\x50\xd0\x26\x2d\xef\x36\
\x47\x42\xb4\x52\x38\x27\x61\x09\xde\x51\x05\x94\x50\x2f\x32\x99\
\x4c\x3a\x9d\x66\x74\xda\x03\x8a\xdb\x98\x6d\xbc\x4d\x90\x10\xf7\
\xc5\xd5\xd5\x55\xc6\xa5\x6d\xa0\xd0\xcd\x68\xfb\x98\x70\x0d\xf3\
\xf3\xf3\x96\x6f\x5c\x69\x27\x28\x74\x14\x3d\x25\xd4\xa2\x24\xf2\
\xf9\x3c\x23\xd2\x4e\x50\xf4\x06\xdc\x7f\xc5\x4b\xb8\xbc\xbc\xcc\
\xf1\x18\x6b\x41\xd1\x1b\xf0\xa0\x93\xec\xf0\x8d\xc7\xe3\x5c\xa1\
\x66\x39\x08\x00\xe9\xbb\x77\xcb\x96\x70\x69\x69\x89\xfb\x17\x5a\
\x0e\x02\x00\x61\x40\x09\xc7\x43\x22\x91\xe0\x59\x93\x64\xc2\x9d\
\xa0\x42\x30\x50\xc2\xf1\x74\xca\x19\x7f\xc4\x80\x60\x90\x2a\x21\
\xba\x01\xdc\x3c\x86\xbc\x80\x60\x90\xdb\x33\x94\x2a\x21\x37\x8f\
\x21\xc6\x84\x84\x48\x09\xa3\xd1\x68\x2e\x97\x63\xd8\x91\xd7\x20\
\x24\x84\xce\x19\x8a\x94\x70\x6e\x6e\x8e\x83\xa2\xe4\x37\x10\x12\
\x08\x0c\x89\x29\x8f\x30\xaf\xc5\xd1\x6a\xb5\x7a\xbd\xde\xd3\xd3\
\x53\xb7\xdb\x7d\xf9\x61\x38\x1c\x8e\xc5\x62\x8e\xe3\xd8\x7c\xe4\
\x2d\x02\xe3\xf2\xf2\xb2\xdf\xef\x53\xc2\xa1\x77\xc1\x6d\x3b\xd7\
\x05\xd6\x3d\x3c\x3c\xd4\xeb\x75\x88\xd7\x68\x34\xde\xfd\xfb\x44\
\x22\x01\x21\x53\xa9\xd4\xf4\xf4\xb4\x55\x4e\x22\x30\x10\x1e\x77\
\x77\x77\x94\x70\xb8\x18\xf6\x54\xb5\x82\x5a\xad\x56\xa9\x54\xaa\
\xd5\xea\xf3\xf3\xf3\x87\xfe\x63\xc3\xc5\x8b\x45\x74\x93\xd2\xe9\
\x74\x36\x9b\xb5\x64\xcf\x01\x84\x07\x25\x1c\x2e\xb8\xc1\x1b\x1f\
\x4c\x68\x64\x96\x4a\xa5\x72\xb9\xfc\x51\xf7\xde\x04\x1f\x72\xe3\
\x02\x1b\xd1\x5a\x9b\x9f\x9f\x37\x7b\x0f\x1e\x54\xfe\x08\x12\x34\
\x19\x28\xe1\xb0\xc8\xe5\x72\x06\x0f\xc9\xa0\x33\x03\xf7\xd0\xab\
\xe9\x74\x3a\x81\x7f\x38\x6c\x2c\x16\x8b\xd0\x3b\x9f\xcf\x9b\x3d\
\xb2\x85\x20\xc1\x95\x52\xc2\x61\x61\xf0\x72\x6d\xb4\x3c\x11\x3a\
\xc3\xbe\x85\x43\xef\xf3\xf3\x73\xa8\xb8\xb4\xb4\x64\x6a\x66\xe2\
\xba\x28\xe1\xb0\xf0\xc6\x1b\x8c\xec\xfb\x15\x0a\x85\x41\x46\x5c\
\x82\x02\xaa\x1f\x1f\x1f\x5f\x5f\x5f\xaf\xac\xac\x98\xd7\xbc\x47\
\x90\x20\x54\x46\x99\x9f\x16\x49\x68\xde\x72\xed\x5e\xaf\x87\x7a\
\x09\x1d\xb6\xb1\x7c\x3b\xc2\x74\x7f\x7f\x7f\x76\x76\xd6\x98\x7d\
\x22\x5e\x87\x8a\x20\x09\x25\x65\xbd\x61\xcd\xa7\x56\xab\xb5\xb7\
\xb7\x37\x2e\x03\x5f\x40\x02\x90\x0c\x24\xc6\xb0\x16\x29\x9b\xa3\
\xc1\x33\x39\x39\x69\xd2\xa1\xf3\x0f\x0f\x0f\x87\x87\x87\xa8\x09\
\x3f\xf4\xbf\x90\x03\xd3\xd3\xd3\x5e\x56\x78\xb9\xe1\xbd\xb6\xdb\
\x6d\xef\x15\x34\x9b\x4d\x7c\xb8\xf7\x93\x01\xc1\x7f\xd9\xdd\xdd\
\xdd\xda\xda\xc2\x87\x9b\x91\xbd\xc8\x16\xe4\x12\xae\x8b\x12\x06\
\x89\x49\x3b\x1a\xde\xdd\xdd\xa1\x4b\x36\xf8\xc2\x0e\xf4\x70\x66\
\x5c\xfc\xce\xe8\xf3\x56\x2f\xbc\x7e\xa6\x0e\xf1\x77\xef\x32\x60\
\xab\x0c\xb7\x83\x83\x83\x83\xcd\xcd\x4d\x63\xda\xfc\x08\x18\x4a\
\x48\x09\x7d\x9b\x7f\xa7\xa7\xa7\x03\x95\x4d\x24\xb2\xb8\xb8\x88\
\x96\xd5\x27\xd6\x25\x4f\xba\xe4\xf3\xf9\xe7\xe7\xe7\x4a\xa5\x72\
\x75\x75\xf5\xee\xb4\x07\x6e\x0a\x47\x47\x47\xeb\xeb\xeb\x66\x2c\
\x87\x40\xc0\xe0\xaa\x29\x61\x70\xa9\x8c\x44\x92\xc9\xa4\x01\x91\
\x01\x1f\xce\xce\xce\x06\xb9\x5e\xf8\x03\x19\xbe\x3e\x5e\x02\x81\
\x17\x16\x16\xe6\xe6\xe6\x20\xff\x20\x33\x90\x48\x1e\xbe\xd4\x80\
\xee\x37\x02\x06\xd9\x38\x8c\x19\x57\x4b\x25\x9c\x9a\x9a\x32\x60\
\x72\xb9\x56\xab\x9d\x9c\x9c\xbc\xdb\x0a\x85\x7b\xcb\xcb\xcb\xc1\
\xae\x8f\x85\x57\xf3\xf3\xf3\x50\xeb\xe2\xe2\x42\x3d\x14\x84\xe4\
\x21\x91\x50\x57\xfa\xd4\x05\x02\x06\x97\x20\x62\x09\x9b\x8c\xd1\
\xd1\x54\x2a\x25\xdd\xc0\x56\xab\x75\x78\x78\xa8\x36\x10\xe2\xed\
\xec\xec\xa0\x41\x38\xa4\x15\xea\xf8\x58\x7c\x38\xbe\x42\xfd\xf9\
\x48\x24\x92\x6a\xc0\x78\xa9\x94\xb0\xa1\x84\xa3\xa0\xd7\xeb\x1d\
\x1f\x1f\xbf\x7e\xf2\xe8\xcd\xda\xfe\x9f\x7f\xfe\x19\xc1\xf8\x24\
\xbe\x02\x5f\xa4\xae\xe8\x90\x54\x24\xf8\xa3\x83\xb7\x0c\x1b\x63\
\x25\x44\x53\xca\x6f\x54\x50\x0a\xe7\xe7\xe7\xea\x51\xca\x4c\x26\
\x83\x0a\x6a\x64\x0f\x86\xe3\x8b\xf0\x75\xea\x81\x50\x24\x18\xc9\
\x16\x9d\xed\x08\x1b\x11\x8b\x10\x04\x24\x11\x3d\x6c\xd1\x1d\xc2\
\x6a\xb5\xaa\xee\x86\xa1\x13\xb8\xb9\xb9\x39\xe2\x6b\xc4\xd7\xe1\
\x4b\xd5\x03\xa1\x48\x36\x12\x2f\xba\x5b\x28\x62\x3c\x4f\x80\x84\
\xa2\xdb\xa2\xe8\x5f\xa9\xeb\x13\x54\x47\x6b\x6b\x6b\x63\xb9\xcb\
\xe0\x4b\xf1\xd5\xea\xfa\x10\x89\x17\xf7\xa0\xba\xb8\xe0\x11\x20\
\xa1\xe8\xb6\xa8\xfa\xc1\x08\x74\xcc\x46\x5f\x07\xfe\x59\x1f\x2a\
\xfa\x87\x48\xbc\xac\x27\x12\x24\x06\x0f\x25\x1c\x22\xcf\xcf\xcf\
\xd7\xd7\xd7\x7e\xbf\x8d\x44\x22\xe3\x35\xf0\xb5\x87\x8a\xf1\x52\
\x5c\x42\x20\x8f\x17\x33\x78\xa4\x4a\x88\x8e\xb5\xdc\xc7\x97\x10\
\xbe\x7e\x6d\x39\x84\xfe\xf7\xef\xdf\x35\xd9\xa2\x0f\xc9\x40\x62\
\xfc\x6e\x07\xb8\x04\xc5\xad\x44\x73\x10\x3c\xfa\x0f\x28\xe8\x2e\
\x61\x3c\x1e\x17\x3a\x2a\xd3\xe9\x74\xca\xe5\xb2\xdf\x6f\x67\x67\
\x67\xb5\x9a\x0d\x47\x62\x14\x83\x34\xb8\x10\x11\x4b\x4f\xde\xbc\
\xd9\xe9\x5f\x19\x0a\x90\x50\xe8\x3d\xb8\x54\x2a\xf9\xcd\xb3\xa1\
\xed\xa7\xe1\x41\x42\x48\x92\x5f\xa3\x14\x17\x82\xcb\x11\x5a\x10\
\xfa\x87\x90\xee\x12\x0a\x6d\x8b\xa2\x09\xa7\x98\x96\xc8\xe7\xf3\
\x1a\xee\xda\xe8\x2d\x58\xf5\xfb\x2d\x2e\x47\xe8\x30\xa9\xfe\x21\
\xa4\xbb\x84\x42\x9f\x21\xac\xd5\x6a\x7e\x83\x19\xe8\x80\x69\xfb\
\x98\x02\x12\xe6\xd7\x4d\xc5\xe5\xe0\xa2\x24\x96\x85\xfe\x21\xc4\
\x9a\x70\x28\x54\x2a\x15\xbf\x5f\x2d\x2c\x2c\x68\xbb\x8c\x03\x09\
\x53\x1c\xab\xa2\xb8\x28\x86\x10\x6b\x42\xed\x78\x78\x78\xf0\x1b\
\x27\xd0\xfc\x28\x1b\x24\xcf\xef\x1e\x51\xad\x56\x25\xb6\x48\x59\
\x13\x7e\x15\x89\xe7\xec\x34\x9b\x4d\xbf\xb6\x68\x3a\x9d\xd6\x7c\
\x0f\x7f\x24\xcf\xef\xf9\xe9\x4e\xa7\x23\x68\xf7\x24\x41\x21\xa4\
\xb5\x84\x8e\x8b\xc4\x0e\xa1\xdf\xaf\x44\x6c\x1e\xa1\x38\x7d\xb5\
\x5e\xaf\x8b\x2b\x0e\xfd\xa3\x48\xeb\xc4\x09\x3d\x6e\x4e\x21\xa1\
\x88\x9d\x94\x14\x89\x94\x28\xa1\xfe\x81\xa4\xb5\x84\x42\x4f\x4d\
\xf0\x7b\x1c\x76\x72\x72\x52\xc4\x79\x52\x48\xa4\xdf\x04\xb7\x94\
\xad\x93\x64\x05\x92\xa3\x79\x34\x88\x2b\xef\x7e\xbf\xef\xb7\x62\
\x5b\xd0\xe3\x20\x7e\x49\xc5\xa5\x49\x7c\xd2\x57\xf3\x40\xd2\xbd\
\x4f\x28\xae\xbc\xdb\xed\xb6\xdf\x10\xe2\x30\x96\x6e\x3c\xb9\x04\
\xfe\xb1\x8a\xa4\x7e\x68\x47\x53\x06\xd2\x40\xf7\x08\xe6\x5d\xe0\
\x12\x7e\x22\xb2\x3f\x81\xb7\x71\x9b\xb7\x65\x06\x9a\x5b\x6b\x6b\
\x6b\x01\x6e\x91\xa6\x48\xea\xf3\xf3\xb3\xb8\xb5\x84\x94\xf0\xf3\
\x48\x5c\xba\xad\xd8\x48\x26\xc0\xe1\x01\x6f\xfb\xe0\xd7\x5f\x8a\
\x7f\x22\xbb\x82\x1a\x7d\x55\x24\x55\xbd\x53\x0e\x03\x89\xcd\xd1\
\xf1\xa3\xe8\x32\x05\x35\x3c\x80\xe6\xee\x9b\x9b\x97\xe2\x87\x41\
\x4d\xa6\x2b\x92\x2a\x51\x42\x4e\x51\xd8\x85\x22\x46\x83\x92\xb0\
\xd9\x6c\xbe\xf9\x60\x11\x7e\x18\xd4\xe8\xa5\x22\xa9\xd2\xb7\x60\
\xa3\x84\x84\x10\x4a\x38\x64\x46\xd0\x90\xf3\x9b\x6f\x54\xcc\xef\
\x05\x58\x9f\x1b\x76\x92\x21\x25\xfc\x7c\xff\x4a\x62\xf7\x23\x28\
\x09\xbd\x5d\xd2\xfe\xfc\x79\x80\xbb\xb6\x8d\xa0\x51\xcd\x40\x92\
\x21\xa1\xc4\x35\xfb\x8a\x18\x0d\x70\xbb\xa4\x4c\x26\xb3\xb9\xb9\
\xf9\xf2\x5d\x78\x13\xec\xa9\x66\x8a\xa4\x4a\x94\x50\xf3\x40\x8a\
\x30\xef\x82\x45\xf1\xe0\x4c\xb0\xb3\xea\x59\x17\x6f\x89\x5c\xe0\
\x13\x77\x8a\x83\x28\x24\x2e\xe8\xd5\xbc\x26\xd4\x5a\x42\x89\xa3\
\xe1\x90\x10\x6d\xc2\x37\x6f\x1f\xc3\x58\x78\x39\xa4\x79\x73\x85\
\x84\x12\x9f\xf0\x64\x73\xf4\xf3\x48\xdc\xe1\x0b\x06\xfa\x3d\xca\
\x2d\xe8\x11\x04\xbf\xa4\xe2\xd2\x24\x0e\xcc\x68\x1e\x48\x5a\x67\
\xa8\xc4\x9a\x50\x51\x3b\xf9\xcd\xef\x69\x18\xb2\x7e\x95\xb6\xd0\
\x8d\x98\x35\x0f\x24\xad\x25\x14\xba\xf1\xb3\x62\x43\x51\xbf\x6d\
\x2f\xb4\x42\x91\x48\xa1\xe7\x82\x68\x1e\x48\xba\x4f\x51\x48\x9c\
\xa5\x50\x48\x28\xe2\xe0\x58\x45\x22\x25\x4a\xa8\x7f\x14\xe9\xde\
\xbe\x97\x58\x19\x2a\x1e\xde\xad\x56\xab\x9a\xb7\x48\x91\x3c\xbf\
\xe3\xd0\x70\x51\x89\x44\x82\xd5\xa0\x75\x12\x4a\x7c\x7a\x6d\xc2\
\xdd\xd0\xe9\xcd\x9f\xf7\xfb\xfd\xdb\xdb\x5b\x9d\x53\x8e\xe4\xf9\
\xcd\x0c\xe1\xa2\x24\x3e\xd7\x42\x09\x2d\x95\x50\x31\x6f\xae\x38\
\x25\x66\xec\xa8\xcf\x7e\x11\xb1\x4b\xd5\x9f\x0c\xe3\xa1\x67\x4a\
\x28\x80\xe9\xe9\x69\xc5\x56\xd6\xea\x83\x7b\xc7\x48\xb9\x5c\x56\
\x6c\x1c\x2e\x62\x97\x2a\x89\x21\xa4\xbb\x84\xfa\xdf\xc6\xde\x04\
\xcd\x36\xc5\x5e\xf7\x97\x97\x97\x1a\x0e\x15\x20\x49\x57\x57\x57\
\x7e\xbf\xc5\xe5\x08\x3d\x1e\x8b\x35\xe1\x57\x11\xba\xbd\x17\x98\
\x9f\x9f\xf7\x9b\xd7\x46\x6d\x73\x71\x71\xa1\x5b\x82\x91\x24\xbf\
\x6a\x10\x17\x82\xcb\x11\x5a\x10\xfa\x87\x90\xee\x12\xb6\x5a\x2d\
\xa1\x87\x01\x45\x22\x11\xf5\x89\x7f\x5a\x9d\xaf\x82\xc4\xa8\x4f\
\x53\x94\xb8\xf3\x9d\xd7\xcb\x55\x2c\xc1\xa3\x84\x83\xb6\x91\x84\
\xb6\x48\xc1\xe2\xe2\xa2\xe2\xf8\xdb\xe3\xe3\x63\x4d\xa6\x2b\x90\
\x0c\x24\x46\x71\xa8\x30\x2e\x44\x68\x11\x88\xd8\xa3\x51\xc0\x3a\
\x40\xb9\x2d\xd2\x68\x34\xaa\x38\xe4\x08\x6d\xbf\xa3\xa3\xa3\xb1\
\xd7\xf3\x48\x00\x92\xa1\x18\xc7\xc7\x25\x08\xdd\x0a\x5d\x4a\xf0\
\x50\xc2\xe1\x92\xcf\xe7\x15\x47\x73\xa1\x11\x78\x72\x72\x32\x46\
\x0f\xbd\x0a\x59\xd1\x30\x46\xe2\x15\x27\x87\x32\x78\x6c\x91\xf0\
\xf1\xf1\x51\x6e\x10\x38\x8e\xb3\xba\xba\xaa\xf8\x03\x6f\xfb\xd0\
\x71\x25\x0f\x5f\xad\x5e\x49\x87\xc4\x8b\xde\xcf\x42\x44\xf0\xc8\
\x90\x50\xe8\xd8\x8c\x47\x3a\x9d\x56\x1f\xcd\x7b\x73\x73\x33\xfa\
\x76\xa9\xd7\x0a\x55\xcf\x58\x22\xd9\x7e\x4b\x7f\x44\x80\x6b\xa4\
\x84\xc1\xd0\xed\x76\xf5\x1f\xe0\x52\x83\xfa\x44\xfd\xf4\x2d\xaa\
\xa3\x5f\xbf\x7e\x8d\x6c\x81\x15\xbe\x08\x5f\xa7\xae\x03\x91\x60\
\x75\x1d\xae\x3f\x08\x1b\x11\x4f\xc3\xc9\x68\x69\x08\x3d\x2d\xfd\
\x75\xa3\x74\x6b\x6b\x4b\xdd\xae\xc3\x35\xee\xee\xee\x8e\xe0\x4a\
\x07\xf9\xa2\x41\x12\xcc\xb0\xb1\x4b\x42\xa1\xc7\xe2\xfd\x56\xb1\
\x6c\x6f\x6f\xab\x17\x9d\xa0\x82\xda\xdf\xdf\x3f\x3d\x3d\x1d\xd2\
\xfd\x1b\x1f\x8b\x0f\xc7\x57\xa8\xab\x5c\x24\x12\x49\x15\x77\xe0\
\x84\xdc\xb0\x91\x31\x03\x8b\x5b\x1a\xda\xf7\x42\x97\x4d\xbd\x30\
\x35\x35\xb5\xb1\xb1\xf1\xfa\x0c\x09\xbf\x2e\xe2\xfd\xfd\x7d\x3e\
\x9f\x47\x97\x2c\xa8\xba\xa8\xd7\xeb\xe1\x63\x2f\x2f\x2f\x07\x99\
\x99\x44\x22\x15\x8f\x44\x0a\xea\x10\x4a\xa9\x09\x65\x48\xe8\x9d\
\x96\x9e\x4c\x26\xa5\x47\x46\x36\x9b\xc5\xb5\x9c\x9f\x9f\xbf\x7b\
\xbd\xf8\x1b\x38\xb3\xb8\xb8\x88\xff\xf2\x95\x69\x3a\x54\x7a\x95\
\x4a\xe5\xea\xea\x6a\xc0\x85\x01\xc1\x9e\xee\x34\x46\x10\x30\x52\
\xf6\x28\x12\xb3\x16\xa9\x5a\xad\x1a\x20\xe1\x84\xbb\xa6\x34\x12\
\x89\x0c\x32\x3d\x88\x18\x2a\xb8\xe0\xc2\xd3\xe9\x74\x26\x93\x19\
\xbc\x89\xd8\x6a\xb5\xee\xee\xee\x90\x69\x83\x0f\x0f\xa2\xa1\x81\
\x3a\xd0\x0c\x03\x65\x8d\x23\x88\x91\x10\x2d\xb4\xa5\xa5\x25\x33\
\xe2\x03\x81\x0e\x0f\x0f\x0f\x0f\x07\x5c\x51\xf5\xe8\x52\x2c\x16\
\x63\xb1\x18\x1a\x8a\x93\x93\x93\xb0\xd1\x5b\xcc\xe9\xed\xbc\xe4\
\x4d\x49\x43\x5a\xb8\x87\xf7\x88\xbf\x8f\xae\xf5\xf3\x46\x62\x84\
\x3e\xac\xf4\x26\x22\x76\x12\x11\x26\x21\x62\xab\xdd\x6e\x4b\xdc\
\xf4\xf2\x4d\x10\xee\x3f\x7f\xfe\x84\x87\x1f\x9a\x7d\x19\xde\xb9\
\xbc\x30\xd0\x80\x91\x98\x17\x10\x2a\x68\x8e\x4a\x49\xad\xa4\x31\
\x68\x54\x86\x13\x06\x81\xa0\x87\x87\xea\x79\xfc\x11\x90\xcb\xe5\
\x90\x0c\x93\x0c\x14\x17\x2a\x92\x9e\x4f\x41\x03\x43\xee\x53\x6d\
\x7e\x8d\xc0\xf5\xf5\x75\x74\xf6\x2e\x2e\x2e\x46\x7f\xe7\x4e\x24\
\x12\xcb\xcb\xcb\x26\x35\x41\x25\xb6\x45\x85\x49\x58\xaf\xd7\x4d\
\x6a\x91\xbe\x6e\x9a\x82\xdb\xdb\x5b\xf4\xfa\x46\xb3\x17\x03\xf2\
\x10\x1d\x6c\xd4\x81\x13\x26\x82\x3c\x94\x35\xb1\x2c\xec\x49\x4d\
\x44\xaa\xe8\x45\xfd\xea\x66\x61\x36\x9b\x2d\x97\xcb\x03\xce\xe6\
\x7d\xb2\xbc\x23\x11\x64\xe0\xdc\xdc\x9c\xf4\x49\x57\x75\x90\xc8\
\x4a\x30\x25\xd4\x08\x88\x81\xf6\x36\x6c\x2c\x95\x4a\x8a\x3d\x97\
\x3e\x47\x34\x1a\x85\x7b\xf8\x7c\x89\x67\x9b\x51\x42\x8d\x78\x7a\
\x7a\xaa\xd5\x6a\x06\xac\xe7\x50\x00\x49\xf2\x2e\xb8\x52\xc4\xd3\
\x17\xf7\x0b\x46\xd5\x97\x4e\xa7\x21\xb6\xd9\x99\xf6\xc2\x27\xa6\
\x67\x28\xe1\x87\x41\x15\x61\x49\x3c\x4d\xb9\x4c\xb8\x33\xef\x50\
\x11\xfd\x1c\x84\xd7\x20\x4f\xa9\x4e\x4e\x4e\xc6\x62\xb1\x54\x2a\
\x05\xfd\x0c\x1b\xf6\x7c\x17\x6d\xb7\x93\x34\x4a\xc2\xfb\xfb\x7b\
\xd4\x0c\x42\xf7\x1d\xfa\x1c\x71\x97\x97\x9d\x32\xe0\x64\xaf\xd7\
\xf3\x5e\x5f\xfe\xc6\x71\x1c\xfc\x8d\xf7\x3a\x61\x2b\x08\x0c\x59\
\xe3\xa2\x52\x25\xec\xf7\xfb\xa8\x0c\x0d\xee\x19\x0e\xe2\xe4\x84\
\x3b\xc1\x30\x41\xfe\x68\x25\x49\x7c\xfe\xdb\x61\x5e\x13\x33\xf0\
\xee\xce\x12\x53\x2e\x52\x42\xef\xc9\x00\x86\x1d\x79\x0d\x42\x42\
\xe8\x81\x96\x52\x1f\x9d\x56\x6c\xd8\x4e\xec\x44\x6e\x48\x48\x95\
\xb0\xd5\x6a\x19\xb6\x94\x94\x7c\x85\x6a\xb5\x2a\x77\x23\x22\xc1\
\x9b\x88\x5c\x5e\x5e\x32\xf8\x88\x47\xb1\x58\x94\x9b\x78\xc1\x12\
\x36\x1a\x0d\x56\x86\x64\xc2\x9d\xb5\x12\xf4\xe0\x92\x51\x12\x7a\
\xf7\x3f\x0e\x93\x5a\x0e\x02\x40\x74\x35\x28\x5e\xc2\x66\xb3\x29\
\x71\x72\x96\x04\x08\x02\x40\xf4\x41\x09\xe2\x25\x04\x85\x42\x41\
\xff\x63\x77\xc8\x90\x40\xd1\x23\x00\xa4\x5f\x85\x78\x09\x9f\x9f\
\x9f\x39\x5d\x61\x2d\xd7\xd7\xd7\x42\xe7\x06\x8d\x92\xd0\x98\x92\
\x20\xd6\xde\x7f\x4d\x90\xd0\x8c\x36\x09\xb1\xb6\x27\xe2\x98\x51\
\x1e\x95\x4a\xa5\x5a\xad\x32\x2e\xed\x01\xc5\x6d\xcc\xd2\x45\xc7\
\x98\x52\x39\x3b\x3b\xe3\x08\x8d\x25\xa0\xa0\xc7\x78\xa8\x23\x25\
\xf4\xa5\xdd\x6e\x5f\x5c\x5c\x30\x40\x6d\x00\x05\x3d\x9a\x1d\xb1\
\x28\xe1\x87\x29\x97\xcb\xa2\x8f\xf5\x25\x83\x50\xab\xd5\x84\x3e\
\xb2\x64\x85\x84\xde\x09\xec\x22\xce\x85\x24\x9f\xc3\x3b\xdd\xcd\
\xb0\x65\x52\x8e\x61\x85\xf4\xf4\xf4\xf4\xee\x99\x47\x44\x2e\x28\
\x5c\x71\xfb\x38\x59\x27\xe1\x84\xbb\xe3\x1d\xd7\xb2\x19\x09\x8a\
\x55\xdc\x76\x86\x96\x4a\x08\xd0\x62\x31\xef\x7e\x69\x39\x28\x50\
\x14\xab\x91\x97\x66\xa6\x84\xe8\x39\x0c\x7e\xf0\x18\xd1\x1f\x14\
\x25\x0a\xd4\xd4\xde\xbe\x63\x6a\xb1\x35\x9b\x4d\x53\x6f\x9c\x16\
\x82\xa2\x94\xfe\xa8\x84\x8d\x12\x4e\xb8\xcb\x68\x4a\xa5\x12\x23\
\x58\x3a\x28\x44\xb3\xf7\xf5\x72\xcc\x2e\xbf\x42\xa1\xc0\xa7\xef\
\x45\x83\xe2\x33\x7e\x61\xb0\xe1\x12\x7a\x33\x87\x9c\xc1\x17\x0a\
\x0a\x0e\xc5\x67\xfc\xe6\x09\x8e\xf1\x05\x89\x3e\xfd\xaf\x5f\xbf\
\x0c\xee\x51\x18\xdc\xab\x47\xc1\xd9\x30\xba\xe6\xd8\x50\x9c\xdd\
\x6e\x17\xc5\xc9\x67\x0e\x05\x81\xc2\x42\x91\x59\xb2\xf8\xc9\xb1\
\xa7\x50\xf7\xf7\xf7\x87\x77\xf8\x26\x09\x10\x14\x13\x0a\xcb\x9e\
\x9b\xa6\x63\x4f\xd1\xb6\x5a\xad\x83\x83\x03\x4e\x1e\xea\xdf\x7d\
\x40\x31\xc9\xdd\xc9\x97\x12\xbe\xdf\xd1\x3f\x3a\x3a\xe2\x2e\x89\
\xda\x82\xa2\x41\x01\xd9\x36\x90\xe6\xd8\x56\xcc\xd5\x6a\x95\x1e\
\xea\x6c\xa0\x85\x3b\x24\x38\x16\x16\xf6\xfd\xfd\xbd\x25\xc3\x6e\
\xb2\x5a\xa1\x28\x14\x3b\x27\x75\x1d\x3b\x8b\xbc\x56\xab\xd9\x33\
\xf8\xa6\x3f\xde\xf0\x35\x0a\xc5\xce\xcb\x77\xac\x2d\xf8\x7a\xbd\
\x6e\xd5\x10\x9c\xb6\x78\x03\xd7\x28\x0e\x6b\x73\xc0\xb1\xb9\xf8\
\x1b\x8d\xc6\xde\xde\x9e\x55\x03\x71\xba\x81\xcc\x47\x11\x88\x3e\
\xce\x85\x12\x7e\x95\x76\xbb\xfd\xdf\x7f\xff\x59\xdb\x10\x1a\x7b\
\xa7\x00\x99\x6f\xd2\x96\x4d\x94\xf0\x93\x74\x3a\x1d\x74\x48\xae\
\xaf\xaf\x99\x15\xa3\x04\x19\x8e\x6c\xe7\xf2\x09\x10\x61\x16\x4c\
\xb8\x83\xe3\x85\x42\x01\x37\xe6\x8d\x8d\x8d\x48\x84\x79\x32\xf4\
\xbb\xde\xe9\xe9\x29\x9f\x6e\x61\x4d\xf8\x06\xd5\x6a\x75\x77\x77\
\x97\x8f\x5c\x0c\x15\x64\x2f\x32\x99\x06\x52\xc2\x77\xba\x88\x3c\
\xe6\x69\x78\x4d\x50\x76\x02\xd9\x1c\x1d\xa8\x69\x7a\x71\x71\x51\
\xaf\xd7\xd9\x34\x0d\xb6\x09\x7a\x72\x72\xc2\xf3\x42\x58\x13\x7e\
\xb8\x69\x6a\xf3\xe4\x55\xe0\x4d\x50\x1a\xc8\x9a\xf0\x93\x4d\xd3\
\xd9\xd9\xd9\xe5\xe5\x65\x56\x89\x9f\xa3\xdb\xed\x16\x0a\x85\x9b\
\x9b\x1b\x66\x05\x25\xfc\x3c\x08\xa0\xfb\xfb\x7b\x78\x08\x1b\x99\
\x1b\x1f\xcd\x3a\x34\xec\x39\x09\x41\x09\x83\xe9\xcf\x9c\x9e\x9e\
\x22\xa4\xd6\xd6\xd6\x12\x89\x04\x33\xe4\x5d\x1a\x8d\xc6\xd9\xd9\
\x19\xc7\x99\x29\x61\xf0\x1d\x9b\xbd\xbd\xbd\xb9\xb9\xb9\xa5\xa5\
\xa5\x70\x38\xcc\x0c\xf1\x6b\x7f\x16\x8b\xc5\x72\xb9\xcc\x87\xc5\
\x28\xe1\x50\x40\x60\x95\x4a\xa5\xbb\xbb\x3b\xb4\x4e\x73\xb9\x1c\
\x33\xe4\x37\x6e\x6f\x6f\xd1\xfe\xe4\x9a\x78\x4a\x38\x74\x10\x64\
\x27\x27\x27\x57\x57\x57\x8b\x8b\x8b\xd9\x6c\x36\x14\x0a\xf1\xde\
\x54\xa9\x54\x90\x21\x5c\x0a\x4f\x09\x47\x0a\x02\x0e\x2a\xe2\xc6\
\x0f\x15\x67\x67\x67\x1d\xc7\xc6\xc9\x9e\x5e\xaf\x87\xae\x32\xf4\
\x63\xed\x47\x09\xc7\x59\x2b\x9e\x9f\x9f\x5f\x5e\x5e\x42\x45\x74\
\x17\xed\x51\x11\xfa\xa1\xe3\x07\xfd\x38\xf8\x49\x09\xb5\x00\x81\
\x58\x28\x14\x10\x91\xf3\xf3\xf3\x50\xd1\xec\x49\x45\x5c\x2c\xf4\
\x43\xdf\x98\xfa\x51\x42\x1d\xa3\xb3\x58\x2c\xa2\x56\x9c\x99\x99\
\x81\x8a\x53\x53\x53\x86\x5d\xe0\xe3\xe3\x23\xf4\xbb\xbb\xbb\xe3\
\xf6\x3c\x94\x50\x6b\xfa\xfd\xfe\x9d\x4b\x2c\x16\xcb\xb9\x7c\xfb\
\xf6\x4d\xf4\x15\xb5\xdb\xed\xdb\xdb\xdb\x4a\xa5\xc2\x71\x17\x4a\
\x28\x8c\xa7\xa7\xa7\xa2\x4b\x32\x99\xcc\x64\x32\xd9\x6c\x36\x1a\
\x8d\xca\xea\xee\x42\x3c\xdc\x4d\x38\xe7\x4e\x09\x4d\x68\xc5\x01\
\x74\x1a\x61\xe3\xb4\x4b\x2a\x95\xd2\x36\xb5\xf5\x7a\xfd\xc1\x85\
\xee\x51\x42\x63\x6d\x44\xa7\x31\x12\x89\xa0\xc7\x98\x72\xd1\x61\
\x1d\x5c\xa3\xd1\xa8\xbb\xd4\x6a\x35\x0e\xb7\x50\x42\x2b\x40\xa0\
\x7b\xfd\x46\xbc\x77\x1c\x07\x35\x24\x6c\x9c\x74\x89\xc7\xe3\x23\
\x48\x00\x7a\x77\x4d\x17\x88\x87\xfb\x02\x07\x5a\x28\xa1\xd5\x40\
\x80\x9a\x8b\xf7\xcf\x50\x28\x04\x15\x51\x3d\xa2\x03\xf9\xed\xdb\
\x37\xbc\x47\xb5\x19\x8b\xc5\xbe\xd2\x35\x85\xf3\xf0\xad\xdd\x6e\
\xa3\x9b\x87\x4a\x0f\xef\xb9\xb0\x93\x12\x12\x5f\xa0\x47\xc3\xe5\
\xcf\x5f\x41\x48\x28\x1a\x0e\x87\xd5\x4e\xc2\xba\x6e\xb7\x8b\xcf\
\xe1\xb9\xa8\x94\x90\x04\xcc\x8b\x54\xdc\x25\xd5\x30\xb8\xbd\x05\
\x21\x94\x90\x10\x4a\x48\x08\xa1\x84\x84\x50\x42\x42\x08\x25\x24\
\x84\x12\x12\x42\x28\x21\x21\x94\x90\x10\x42\x09\x09\xa1\x84\x84\
\x10\x4a\x48\x08\x25\x24\x84\x50\x42\x42\x28\x21\x21\x84\x12\x12\
\x42\x09\x09\x21\x94\x90\x10\x4a\x48\x08\xa1\x84\x84\x50\x42\x42\
\x08\x25\x24\x84\x12\x12\x42\x28\x21\x21\x94\x90\x10\x42\x09\x09\
\xa1\x84\x84\x10\x4a\x48\x08\x25\x24\x84\x50\x42\x42\x28\x21\x21\
\x84\x12\x12\x42\x09\x09\x21\x94\x90\x10\x4a\x48\x08\xa1\x84\x84\
\x50\x42\x42\x08\x25\x24\x84\x12\x12\x42\x28\x21\x21\x94\x90\x10\
\x42\x09\x09\xa1\x84\x84\x10\x4a\x48\x08\x25\x24\x84\x50\x42\x42\
\x28\x21\x21\x84\x12\x12\x42\x09\x09\x21\x94\x90\x10\x4a\x48\x08\
\x51\xf0\x3f\x01\x06\x00\x0c\x5e\x25\xd7\x10\xfd\x4d\x14\x00\x00\
\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x0d\x9e\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x30\x00\x00\x00\x30\x08\x06\x00\x00\x00\x57\x02\xf9\x87\
\x00\x00\x0d\x65\x49\x44\x41\x54\x78\xda\xed\x5a\x05\x5c\x5b\x7b\
\x0f\xfd\xdc\xdd\xdd\xdd\xdd\xdd\xdd\x1e\xee\x32\x57\xdc\xa5\xc3\
\x1d\x56\x5c\x5b\x8a\x17\xf7\xe1\xee\xd4\xb0\xb9\x1b\x73\x77\xb6\
\x7c\x27\x7f\x28\xeb\x68\x9f\xf2\x5c\x7e\xbf\x6c\xe3\x4f\xef\xbd\
\x39\xc9\xc9\x49\x72\xfb\x5e\x63\x66\x66\xf6\x92\xb6\x57\x01\x3c\
\xaf\xce\x12\xbd\xf6\x25\x0b\x40\x22\x91\xbc\xce\xa7\x58\xf7\x09\
\x06\xf1\x9c\x01\x68\x6a\xad\xff\x5f\x5d\x43\x55\x5f\x7d\x53\xcd\
\xd5\xfa\xa6\xea\xab\xfc\x6f\x3e\x7b\xb6\xee\xef\x95\xa7\xfb\x86\
\x4f\xbe\xfa\x63\xcf\x3a\x80\xda\xda\xda\xf7\xd4\x34\x54\xb6\x76\
\x76\xb7\xdd\xd4\x4d\x6b\xe9\xf0\x91\x43\x74\xf0\xe0\x3e\x1a\x19\
\x1b\xa2\xe6\x5d\x0d\x37\xaa\xeb\x95\xad\xfc\x99\xd5\x3e\xc7\x5b\
\x36\xfb\x11\x4f\x99\xf6\x7f\xcf\x2a\x00\x59\x8f\xec\x2d\x55\xb5\
\x15\xaa\x09\xd5\xf8\xfd\xf3\x17\xce\xd1\xfc\x99\x53\x74\xf2\xd4\
\x71\x3a\x71\xf2\x18\x1d\x3f\x71\x94\x8e\x1e\x3d\x44\x6d\x9d\xad\
\x0b\xca\x9a\x72\x15\x7f\x76\xb5\xcf\xf3\x90\x6b\x36\x7b\xe5\x8e\
\x7d\xf6\x59\x03\xa0\xac\x2e\x6d\x18\x19\x1d\xb8\x7b\xf9\xf2\x25\
\x3a\x73\xf6\x34\xd5\xd6\x57\xd1\xfa\x0d\x6b\xe9\xbf\x8f\xfd\x9b\
\xd6\xad\x5f\x43\xca\xaa\x0a\x01\xa2\xa5\xb5\xe1\x4e\x45\x55\x69\
\xc3\xaa\x01\x14\x68\xff\xe1\x29\x57\x5b\x3f\x2b\x00\x2a\x6b\xca\
\xff\xd1\xd6\xd9\x7c\xf3\xea\xd5\x2b\x74\xe1\xe2\x39\xca\xcc\xce\
\x20\x6b\x6b\x4b\x72\xf3\xd8\x46\x3e\xfe\x5e\xb4\x65\xfb\x26\x32\
\xb7\x34\xa3\xb4\x0c\x29\x1d\x3a\x7c\x80\xea\x1a\x6a\x6e\xf2\x35\
\xab\x51\x1d\xf7\x3c\xd5\x17\x3c\x64\x1a\xd9\xaa\x01\x28\x95\xca\
\x37\x95\x56\x14\x9d\x3a\x71\xe2\x18\x31\x80\xd1\xd1\x61\xb2\x77\
\xb0\xa5\x90\x1d\x41\xc2\x82\x42\xfc\xc9\x3f\xd0\x87\xbc\x7c\xdc\
\xc9\xda\xd6\x8a\x7a\xfb\x7a\x68\x74\x6c\x98\x4a\xca\x14\xa7\xf8\
\xda\xa7\xc6\xf9\xf1\x8f\x98\x02\x05\x00\x83\x6e\x45\x93\x1f\x5d\
\x15\x80\xaa\x86\x0a\xeb\x99\xd9\xa9\xbb\xd7\xae\x5d\xa1\xd3\xa7\
\x4f\xd2\x76\xb7\xad\xe4\x17\xe0\x43\xe1\x51\x12\xda\x11\x11\x42\
\xa1\x4b\x20\xfc\x00\x62\xcb\xb6\x4d\xb4\x65\xeb\x46\xda\x7f\x60\
\x2f\xd5\xd4\x57\xde\x2c\x56\x16\x9a\x3d\xd9\xfd\x9d\x64\x3d\x6f\
\x71\xcb\x57\xff\xd5\xd4\xef\x00\x20\x07\xc5\x6c\xbf\x2a\x00\x6a\
\xad\x6a\x8a\x88\xe8\xfa\xf5\xab\x94\x5f\x90\x4b\x5b\xe1\x64\x4c\
\x5c\x24\x45\xc5\x86\x53\x44\xf4\x0e\x80\x08\x15\x99\x08\x0c\xf6\
\x23\x5f\xd0\xc9\x65\x8d\x13\x65\x65\xa7\xd3\xc0\x60\x1f\xc9\x14\
\x79\x4d\x4f\x4a\x9f\xd7\xd0\x6b\xdd\x65\x9a\x80\xad\xc5\xfb\xdf\
\x65\xa2\x0e\x2c\x01\xa2\xee\x19\x03\x68\x6c\x54\x7e\xaa\xb6\xbe\
\x7a\xe1\xf6\x9d\xdb\x74\xea\xd4\x09\xda\xb4\x79\x83\x70\x3e\x3e\
\x31\x86\x62\x13\xa2\x28\x3a\x36\x62\x11\x44\x78\x08\x05\x4b\x02\
\x29\x20\xc8\x97\x3c\x41\x25\xd7\xb5\xce\xb4\x67\xef\x2c\x95\x96\
\x29\x6e\xc9\x64\x4f\xae\x48\x9e\x32\xf5\x56\x77\x99\xea\x2f\x46\
\x75\x50\x30\xf5\x39\x77\xb9\xe6\xf8\x33\x02\xd0\xd5\xd5\xfc\xe9\
\xba\xc6\xea\x4b\x7d\x03\x3d\x74\xf1\xd2\x05\xea\xee\xe9\x14\x45\
\x9b\xb4\x33\x9e\x92\x52\xe2\x28\x21\x29\x96\xe2\x12\xa2\x17\x41\
\x44\xed\x20\x49\x58\xf0\x22\x95\x02\xbc\x69\xcd\x3a\x17\xaa\x6f\
\xaa\xa3\x86\xe6\xba\x5b\xb2\xa2\xbc\xff\x3c\xb9\x64\xaa\x7f\xef\
\x21\xd7\xe6\x19\x77\x65\x7a\x1d\x32\x70\xc3\x27\x7f\xcf\x3b\x9f\
\xa6\xf3\x2d\x9f\x6f\xef\x6a\x3d\x53\x5d\xa7\x7c\xb0\x6f\xff\x1e\
\x9a\x9f\x3f\x49\x59\xb9\x19\xc2\xc9\x9d\x69\x49\x94\x92\x9a\x48\
\xc9\x3b\x13\x28\x21\x99\x41\x44\x09\x3a\x85\x45\x32\x95\x44\x16\
\x58\x95\x90\xa9\x28\xea\x1f\xec\xa5\xec\xbc\x8c\x5a\xc3\x7b\x9b\
\x2b\x95\xaf\x5f\xf9\xbc\x75\xd9\x93\x6f\xf3\x90\x69\x2f\x6c\x52\
\xce\xbe\xc3\x98\x46\x1a\xb5\x7b\xa1\xf6\x47\x46\x4e\xb6\xb4\x37\
\xfc\x7a\x57\x7b\xd3\x64\x6b\x7b\x23\xe9\x6d\x57\x47\xd3\x03\xc8\
\x65\x77\x73\x5b\xc3\xfc\xd4\xb4\xf6\x41\x7e\x61\x0e\x64\xf3\x3c\
\x1d\x39\x7a\x08\xd1\x0d\x40\xe4\xe3\x29\x2d\x73\x27\xa5\x66\xa4\
\x90\x14\x40\x92\xa5\x8b\x20\x62\xe3\xa3\x28\x32\x3a\x8c\x24\xa0\
\x12\x67\xc1\xcb\xd7\x83\x36\x6f\xdd\x44\x53\x33\x5a\xca\xca\x4f\
\xbb\x29\x95\x4a\xdf\xac\x7f\xae\x5b\x81\xea\x77\xa6\x64\x13\x8e\
\x76\xb3\xa3\x26\x0a\xb9\x18\xe6\xf4\xe8\x2c\xd3\x54\xfd\xb9\x96\
\x5d\x8d\xd7\x0f\x60\x0c\x38\x7b\xee\x8c\xb0\xce\xee\x0e\x44\x35\
\x91\x14\x25\x05\x34\x3c\x3a\x48\x07\x0f\xed\xa7\xd6\xb6\x66\xe2\
\xae\x3b\x3e\x31\x2c\xf4\x3e\x33\x27\x4d\x58\x46\x76\xaa\x00\x22\
\x4d\x4f\x16\x99\x88\x4f\x8a\x11\x54\x0a\x8f\x94\x50\x08\x6a\x81\
\x65\x75\xe3\x96\xf5\x34\x80\x0c\xe4\x15\xe4\x5c\xcb\xcc\x4d\xfd\
\xcd\x43\xba\xe8\x36\x9a\x92\x4d\x74\xde\x22\xfc\xee\x31\xa3\xfa\
\x40\x81\xc3\xe2\x1e\x39\xac\xa9\x57\x0e\x1c\x38\xb8\x77\xe1\x14\
\x64\x71\xf7\x9e\x59\xea\xe8\x6c\x13\x0f\x8f\x43\x71\xd6\x36\x54\
\x11\x77\xdb\xe9\x99\x29\x1a\x1a\xe9\x17\xd2\x59\x53\x57\x29\x22\
\x9c\x93\x9f\xc9\xc6\xb4\x10\x40\xd2\x01\x82\x29\x95\x88\x9a\xe0\
\xa2\xe6\xcf\x80\x66\xac\x48\xa8\x97\xad\xa4\xac\x2c\x27\xf4\x90\
\x3b\x19\x59\xa9\x76\x06\x0e\x6d\x47\x44\xff\x66\x02\x40\xbc\x67\
\x81\xca\xe7\x71\x94\x48\xf9\xb0\x30\x22\x25\xdf\x18\x9b\x18\xb9\
\x4f\x44\x82\x1e\x47\x8f\x1d\x62\x0a\x88\x07\xe7\xc9\xb2\xa0\x1e\
\x73\x68\x58\x57\x11\xf5\x51\x9a\x54\x8d\x89\x39\xa7\xa4\x54\x41\
\x89\xc9\x71\x94\x2f\xcf\x61\xc3\xe7\xb2\x05\x10\x06\x91\x06\x3a\
\xa5\x48\x1f\x66\x21\x0c\xb2\x1a\x1c\x1a\x20\x1a\x5b\x66\x56\x1a\
\xba\x72\xf5\x03\x69\x5a\x8a\xf7\x23\x0e\x15\xa8\x33\x8c\x14\x47\
\xae\xf6\x84\xe2\x24\x19\x9d\x43\x9d\x3c\x64\xea\x5d\x8b\x05\x64\
\x6e\xfe\x7a\x6f\x5f\xcf\x43\x6a\xcd\x04\x2d\x2c\x2c\x88\x48\xab\
\x35\x2a\x4e\x3b\x03\x00\x7d\xe4\x42\x71\xae\x5d\xbb\xca\x94\x42\
\x16\x74\x98\x38\x0f\x52\x76\x6e\x26\xb9\xb8\x3a\x91\xb5\x9d\x25\
\xad\xdb\xb0\x06\x05\x1a\x01\x20\x00\x81\x4c\x30\x9d\x98\x4a\x9c\
\x05\x0e\x04\x2b\x12\xf7\x05\x5f\xa8\x11\x0a\x1c\x34\x6c\x82\x72\
\x25\xa4\x2f\x67\x20\x5f\xfb\x0b\xee\xb0\x46\x54\x91\x6b\x6c\x71\
\x5e\xb2\xf2\xdc\x4d\xae\xf9\x39\xce\x87\x16\x01\x58\xfd\xef\xef\
\x71\x89\xd1\xf7\xf6\x1f\xd8\x47\x67\xce\x9c\xa6\xe3\xc7\x8f\x42\
\xab\x8b\x38\xe5\x42\xdf\xdb\x3a\x76\x21\x2b\x17\x00\xec\x22\x55\
\x56\x57\x10\x2b\xd0\xfe\xfd\xbb\xc9\xdd\xdb\x9d\x36\x6c\x5a\xc7\
\x35\x02\x9a\x45\x91\xa3\xb3\x3d\xcb\x25\x67\x02\xea\x94\x2e\xea\
\x81\x0b\x1a\xf7\xa6\xc8\x18\x41\x23\xa1\x46\x61\xa0\x65\x67\x4f\
\x3b\xb2\x13\xdb\x6c\x38\xeb\xc3\xa1\x19\x53\x52\xea\x2e\x53\x77\
\x19\x67\x60\xea\x9b\xc8\xc0\x94\xf8\xe1\x31\xb3\xff\xd6\x25\x80\
\x0a\x87\x0f\x1f\xa4\x03\x07\xf6\xd3\x9e\x3d\x73\x50\x96\x04\x1e\
\x0d\x84\x9a\xa8\xd5\x13\x82\xf3\xdc\xb4\x0a\xe4\xb9\x50\x9f\xc3\
\x34\xb7\x7b\x5a\x74\xdf\xa8\x98\x70\xe1\x70\x75\x5d\x05\x95\x29\
\x8b\x00\xc2\x41\xf0\x3e\x17\x54\xe2\x2c\x70\x2d\xf0\x3d\xf4\x34\
\x62\x35\xe2\xcc\xf6\x0f\x74\xf3\xe7\x74\x7a\x87\x7c\x72\x75\x9f\
\x00\x80\x13\x46\x19\x28\x50\x7f\x0d\xe7\xb3\x46\x19\x90\x69\x3e\
\x83\xf3\xc3\x8b\x00\xcc\xff\x7b\x2c\x21\x31\x16\x0a\x73\x80\xb4\
\x5a\x35\xa9\x54\x13\xe8\xa2\x12\xf2\xf1\xf3\x14\x6a\x32\x83\xc2\
\x3d\x82\x25\x85\x69\xc3\xb4\x38\x09\x20\xba\x69\x15\x74\x7d\x33\
\xc5\xc4\x47\x32\xff\x51\xe4\x95\x04\x11\xa0\x18\x38\xca\x1d\x37\
\x7f\x29\x0b\x2c\xad\x2c\xb3\xfc\x39\x16\x84\xe0\x25\x35\x1a\x18\
\xe9\xa5\xe8\xb8\x88\xf9\xe5\x0c\x28\x74\x6f\x87\x43\xd7\x8d\xa7\
\xcf\xd9\xf7\xe1\xfc\xc2\xe3\x9d\x2f\x1e\x58\xfc\xef\x36\xa2\x01\
\x5a\xec\x43\x64\x7a\xa9\xaf\xbf\x07\xce\x7b\x91\xa7\xb7\x1b\x8a\
\x34\x1e\x19\xd9\x4d\xb3\x73\xd3\xa2\x90\x63\x12\xa2\x20\xaf\xf3\
\x34\x3e\x39\x4a\x1b\x37\xaf\xc3\xc8\x10\x26\x22\xad\x28\x2e\xa0\
\x42\x18\xf7\x01\x2b\x1b\x0b\x06\xc0\xb5\xa0\xa7\x11\x77\x67\x1e\
\x2f\x78\xc8\x63\x00\x68\x66\x3d\xa0\x55\xf8\x9d\x15\x8a\x73\x8f\
\xd0\x0b\x56\x34\xb3\x37\xc2\xd1\xbb\x86\x67\x86\x80\x97\x01\x70\
\x91\x4d\x4f\x4f\x51\x6b\x6b\x33\xb5\xb4\x34\xb1\x5a\xf0\x88\x80\
\xc8\x45\xd0\xd4\xb4\x4e\x64\x45\xab\x53\x73\x77\xc5\xe2\x32\x4f\
\x63\xe3\x43\x28\x5c\x57\x56\x16\xe6\x38\x67\x06\xce\x27\x73\x54\
\xc9\xd2\xc6\x9c\x64\x85\xb9\x94\x5b\x90\xc5\xe0\xb8\x43\xf3\x9c\
\xf4\x48\x1d\xf4\xf6\x77\x62\x72\xdd\xb1\x0c\x40\x22\xe9\x79\x83\
\x29\x47\x99\x5a\x3c\xf7\x98\x1a\xb5\xf1\xf9\xf9\x65\x0a\x05\x06\
\xfb\xd3\xd0\xd0\x00\xd5\xd6\x55\x0b\x9d\x66\xfa\x6c\x05\x45\x76\
\x84\x85\xd0\x00\xb2\x32\x3c\x32\x48\xc3\xc3\x43\xc2\x91\x63\x90\
\xd0\x71\xd5\x88\x50\x1e\x77\xaf\xed\x14\x04\x10\x3c\x32\x30\xc7\
\xdd\x01\xda\x75\xad\x13\x37\x3e\xce\x82\x90\x54\xae\x03\x9e\x91\
\x18\x3c\x77\x65\x06\xdd\xdd\xdb\x0e\x9a\x06\x2f\x53\x68\x6b\x81\
\xfa\x83\xc8\xc0\x39\x13\x0d\xeb\x87\x70\x74\xcc\xa8\xb8\x0b\xb5\
\x5f\xc2\xb4\xba\x6f\xb9\x88\x3d\x7c\x3c\xa8\x05\xd1\x2f\x2b\x2f\
\x21\x99\x3c\x9f\x7c\xfd\xbc\x69\xd3\x96\x0d\x18\x85\xbd\xa9\xbe\
\xb1\x8e\xfa\xb0\x90\xb4\xb7\xb7\xd1\xce\xd4\x64\x31\xd7\x4f\xa8\
\x46\x69\xab\xdb\x26\x5a\xbb\xde\x05\x7f\x6f\x26\x0f\xd0\x8d\x33\
\x66\x6b\x6f\x2d\x22\x5e\x54\x2a\x47\xc1\xe7\x70\x1d\x70\x76\xb8\
\x5f\x70\x21\xf3\xae\x00\xc7\x43\xa9\xad\xbd\x89\x65\x55\xb7\xd2\
\x21\x13\x2a\xf4\x2f\x1e\x9d\x8d\x6a\x20\x5f\xfd\x03\xcc\x49\x93\
\xcb\x32\xba\x65\xdb\xc6\x7b\x45\x25\x0a\x2a\x2e\x56\x50\x5a\xba\
\x94\x02\x02\xfd\x20\x91\x6b\x39\x0b\xe0\xb1\x94\x7a\x7b\xbb\xa9\
\xa6\xb6\x8a\xd0\x3d\x45\x1f\x98\xd4\x8c\x83\x0a\xfe\x64\x69\x6d\
\x4e\x0e\x8e\x76\xe4\xe0\x6c\x47\x36\x76\x56\x22\xba\xd8\x93\xa9\
\xb8\xac\x50\xd0\x28\x7b\xa9\x90\xb9\x1f\xb0\x24\x73\x96\x18\x48\
\x5d\x53\x35\x32\xe7\xff\x50\x46\x0b\x74\x3f\x86\xa3\xe3\x26\x3a\
\xee\x7a\x64\xc6\xa8\xc1\xe1\xf3\xbf\xe3\x39\x69\xb9\x91\x39\xba\
\xd8\x1f\x8a\x88\x0a\xa3\x62\x80\xe0\x89\x11\xc5\x86\xe8\xba\x0a\
\xf3\xc6\x10\xd6\xde\xd9\x26\x7e\x97\x95\x93\x89\x6e\x3c\x42\x6a\
\xdd\x04\x29\x6b\xca\x68\xf3\x96\x8d\xa2\x68\xd7\x6d\x5c\x03\x70\
\x52\xc2\xa8\x8d\x5e\x51\x46\x25\xe5\x85\xbc\xb8\x88\xf1\x22\xcd\
\x40\x89\x98\x6a\xfc\xef\xf2\xca\x62\x0a\x08\xf6\x4b\x7f\xb4\xb3\
\x6a\xda\x4c\x0c\x6d\x12\xe8\x7d\xb0\x09\x6a\xfd\x87\x33\x23\x7e\
\xc0\x7f\xaf\x05\x88\xef\x20\xda\x0b\x8a\xa2\x42\x92\xec\x00\x4f\
\x43\x02\xd9\x79\x6c\x52\x8e\x82\x4a\xd9\x39\x59\x18\x1d\x8a\x79\
\x71\x07\x7f\xbb\x48\x3b\xa5\x22\xbc\xc0\xa2\xaa\xda\x72\xf1\x77\
\x4b\x5b\x03\xbf\x03\x5a\x04\x50\xb3\x0c\x40\xaf\x44\xe8\xba\xcb\
\x52\x8a\x7b\xa4\xf2\xef\x1e\xa0\xcf\x18\x8c\x12\xea\x6d\x18\x1b\
\x42\x8c\x97\x17\x4d\x36\xce\x5d\x4d\x00\x73\x72\x97\x69\x15\x7a\
\x00\xaf\x87\xbd\xc9\x65\xad\xf3\x50\x58\x84\x64\x21\x3e\x21\x96\
\x3c\xbc\xdc\xc4\xe8\xcb\xdd\xd5\x09\xc6\x7b\x2d\x03\x48\xcf\x90\
\x52\x63\x53\x3d\x4d\xcf\x6a\xb1\x98\xd4\x70\xb4\xe1\x74\x15\x35\
\xb5\xd6\x51\x63\x4b\x1d\x0f\x7d\xa0\x90\x69\x00\x3c\x52\x00\x00\
\xe8\x25\xe7\xc2\xbe\xe3\xe3\xe3\x66\x6f\x18\x69\xf7\x3c\xe3\xb1\
\x19\x5b\x59\x03\xa6\xd1\xdf\x1b\x77\x62\x4d\x24\xae\x09\x15\xd1\
\x87\xbd\x01\xf6\xe6\x3f\xff\xf3\xcf\x5f\x77\x72\x71\xb8\x11\x8a\
\x0c\x24\x26\x25\x08\xf3\xf7\xf7\x15\xfb\xac\xb3\xab\x03\xf7\x06\
\x50\x28\x1d\x9d\x37\x57\x14\x32\xef\x0a\x15\x55\x25\x98\x4a\x95\
\xc8\x42\x35\x1b\x37\x33\x9c\x2d\xd5\x80\x22\x77\x89\x42\x8f\x02\
\xe0\xcf\x61\x67\xbe\xe6\xe6\xb5\xe5\x77\x0f\x01\x68\x3d\x78\x81\
\x31\x41\x15\xcd\xf6\x7c\xdd\x97\x4d\x01\xc3\x9e\xf0\x5f\x06\xf0\
\x3a\xd8\x1b\x61\x6f\x81\xbd\xfd\x1f\xff\xf8\xcb\xdf\x50\x98\x1a\
\xe6\xb5\x81\x3d\x30\xb7\x7c\x6c\x04\x45\x7a\x4e\x22\x09\xb9\xcf\
\x35\xc1\xaf\x0f\xbb\xfb\xda\x91\x81\x52\xa6\x8c\x70\x9c\x8d\x29\
\xc5\xfc\x16\x2a\xc4\x45\xbc\xa2\x1b\x43\x4a\x19\x38\x32\xbc\xfd\
\xe6\x9f\xfe\xf4\xa7\xb7\x73\x00\x45\x44\x15\xd3\x3f\x58\xe9\x24\
\x1c\xfc\x38\xa2\x7c\x89\x9b\x99\x09\x0a\x1d\xe6\x77\x44\x46\x00\
\x60\xef\x82\xbd\x17\xf6\x01\xd8\x87\x61\x1f\x83\x7d\x12\xf6\x99\
\x6f\x7f\xfb\xeb\xbf\x35\xb3\x78\xec\xbc\x97\xb7\xfb\x03\x8c\xde\
\x34\x36\x31\x2c\x28\x53\x51\x59\xc2\x20\x60\xe5\xa4\x44\xf4\x31\
\xeb\x2f\xf6\x01\x96\x51\x06\x90\xbe\xac\x42\xdc\xd8\x58\xa5\x20\
\xbd\x5b\xea\x39\xf3\x4f\xb8\x13\xcb\xd4\x2e\xbc\xb4\xac\x3c\xe7\
\x5d\x18\xca\x74\x8d\x77\xe3\x47\x28\x04\x7b\x1b\xec\x9d\xb0\xf7\
\xc0\xde\x0f\xfb\x10\xec\xa3\xb0\x4f\xc0\x3e\x05\xfb\xec\x57\xbf\
\xf9\xd5\x5f\xd9\x39\xd8\x5c\x29\x2c\x96\x63\xbc\x98\xe2\xc2\x85\
\xc3\x0a\x11\x75\x06\x52\xa6\x2c\x16\xf4\x91\x17\xe5\x53\xde\x52\
\x23\xd3\xf7\x01\xee\xd2\xa8\x0f\xe6\xff\xed\xf5\x9b\xd6\x98\x71\
\xed\x3d\x31\x00\x6d\xa2\xa7\x7c\xea\xfb\xc6\xe7\xaa\x9f\xe8\x9b\
\x1b\x1f\x3c\x51\x16\xf4\x20\x3e\x02\xfb\xf8\x52\x26\x3e\xfd\x9d\
\xef\x7c\xfd\xe7\x9e\xde\xdb\x17\x78\xbd\xec\xe8\x6e\xa5\x12\x00\
\x28\x29\x57\x50\x29\xac\x04\xce\x2b\x4a\x64\xdc\x03\x8c\x26\xd2\
\x04\x2c\x37\xd8\xad\x09\xd7\xde\xfa\xcd\x6f\x7e\xf3\x0e\x3d\x7d\
\x4c\x99\xbb\x72\xf8\xad\xc8\x40\x98\xa9\x65\x1f\x8a\xb5\x11\x19\
\xc8\xd5\x03\x30\x54\x22\x43\x10\xfa\x4c\xbc\xcf\x80\x4e\x0c\xe4\
\x63\x0c\x06\x73\x50\xe7\x04\x36\xb3\xa1\xd1\x7e\xc2\xab\x75\x76\
\x1a\xbc\x97\x31\x75\x10\xfd\x3c\x1e\x23\x98\xff\x62\xbd\x4c\x91\
\x26\x8a\xcd\x8c\x3f\xc3\x99\xda\xb0\x79\x6d\x33\x07\xed\x89\xa2\
\xcf\xd3\xa6\xa7\x42\xfb\x0b\xfe\xb7\xc9\x02\xce\x57\xfd\xd7\x10\
\xc0\xca\x4c\xe8\xe9\xf4\x0e\xce\x86\x01\x10\xce\xc8\x07\x39\x2b\
\xe6\x56\xe6\xae\x05\xb2\xbc\xdb\xbc\x17\x70\x16\x38\x03\xec\x38\
\x4b\x27\x73\x3f\x77\x69\xb5\xe4\x02\x4e\x86\x02\x31\x90\xf6\xae\
\x16\x14\x71\xc4\x4d\xd7\x75\xae\xe6\x4f\xe5\x1b\x19\xcc\xfc\xef\
\x31\x99\x19\xb9\x76\x2f\x4f\xa3\x46\x00\xf4\x4d\x8d\xb3\x61\x00\
\xe4\xad\x06\x19\x61\x30\xef\x66\x40\x1f\xfa\xd0\x87\x3e\x8c\x17\
\x55\xf3\x5c\xc8\xe3\x93\x23\xdc\xc8\x84\xea\x30\xef\x79\x0a\xcd\
\xe2\xe8\x67\x49\x99\x3e\xbc\x3a\xa2\x47\xd4\x52\x15\x8a\x1c\xd7\
\x9c\x42\xd3\x14\x2f\x77\x9f\x89\xe1\xd5\xfa\xaf\x30\x5e\xec\x7c\
\x82\xb7\xd3\x46\x19\xe1\x02\x7f\xd3\x12\x98\xb7\x2c\x01\x7a\x1b\
\x83\xb2\xb2\x32\x37\x0b\x92\x04\xde\x9c\x99\xd3\x51\xef\x40\xa7\
\xd8\xca\xb8\x71\xb1\xf3\xcc\x7d\x11\x7d\x69\x02\x6f\x6a\xcc\x7d\
\xee\x23\x37\x9d\x9d\x1d\xfe\xf1\x0c\x9d\xd7\xf7\x85\x4d\xee\x72\
\xed\x9f\x9e\x1c\x80\xe9\xcc\xe8\x01\xb1\xbd\x91\x0d\x8d\xaf\x09\
\x94\xb9\xcb\xa3\x45\x67\xcf\x2e\xee\xca\x3c\x85\x72\xf4\x19\x0c\
\x40\x29\x05\x75\xb0\xb2\xde\x71\x72\x76\x6c\x58\x8d\xf3\xdc\x0f\
\xd0\x95\x23\x7c\xb3\x27\xdf\xfd\xac\x7d\x43\xe3\xe4\xe4\xf4\x16\
\x7b\x27\x3b\x55\x99\xb2\xe4\xbe\x5a\x3b\x41\x7d\x83\xdd\x02\x08\
\x5b\x57\x6f\x1b\x1b\x64\x34\x65\xc1\xde\xd1\x56\xc5\x9f\x5d\xcd\
\xb3\xdc\x65\x93\xdf\x04\x80\x7f\x3d\xdb\x5f\xf2\x01\xc4\x7f\xde\
\x63\xe7\x68\xdb\x1a\x12\x12\x78\x93\x5f\x97\x0c\x0c\xf7\x52\x0f\
\x36\x2e\x7c\x77\xc6\xb4\xb9\x61\x67\x6f\xdd\xca\x9f\x59\xcd\x33\
\x78\xd5\xe4\xd7\x8f\x7e\x25\x53\xef\x7d\xd6\x01\xe8\xcd\xd2\xc6\
\xec\x7f\xb6\x76\xd6\x7d\xd6\xb6\x16\x57\xd9\x6c\xf0\x6f\x3e\x7b\
\x36\xee\xcd\x73\x92\x1f\xde\x44\xbc\xa4\xbf\xa9\xe7\xbd\xf9\xd5\
\xff\x57\xe2\x55\x00\xcf\xb2\xfd\x1f\xbf\xa3\x54\x4b\x85\x0b\x06\
\xa1\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x07\x5b\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x30\x00\x00\x00\x30\x08\x06\x00\x00\x00\x57\x02\xf9\x87\
\x00\x00\x07\x22\x49\x44\x41\x54\x78\xda\xed\x58\x05\x70\x1b\x41\
\x12\x7c\x0c\x33\x27\x05\xcf\xcc\xcc\xcc\xcc\xcc\xcc\x10\x66\x66\
\x46\x33\xa3\xc0\x2c\x73\x38\x79\x7e\x53\x18\x0c\x8a\x05\x66\x5b\
\x64\x92\x4e\x92\xe7\xa7\xaf\xee\xae\xce\x17\x9d\xcb\xf4\xac\xae\
\x9a\x12\xed\xf6\x74\xcf\xcc\xae\x65\x3d\x83\x88\xfe\xab\x23\x6a\
\x20\x6a\x20\x6a\x20\x6a\x20\x6a\xe0\x7f\xc3\x40\xd4\x00\xe3\x39\
\x1c\xb3\xf0\x38\x29\xb2\xe9\xe7\x99\xc1\xb1\x80\x63\x09\xc7\x62\
\x29\x96\x48\xef\xcd\x18\x65\x80\x31\x53\xb3\x70\x3e\xc7\xb3\x27\
\x9e\x74\xda\x78\x66\xa8\xc4\xce\xe1\x98\x2d\xc5\x1c\x95\xa9\x19\
\x6a\x03\x8b\x90\xac\xb0\xc4\xfc\x91\x6d\x07\xb7\xad\xe6\xe7\x0b\
\xa5\x45\xd8\xf0\xcc\x71\x24\x9d\x6e\x9e\x05\x88\xea\xab\x15\x39\
\x17\xaf\x54\x90\x3a\xae\xde\xb8\x64\x7a\xc7\x3b\xde\x21\x9a\x53\
\x1b\x58\x8c\x24\x58\x70\xf9\x5a\x95\xef\xd2\x95\xea\xdf\xbc\xed\
\x6d\x6f\x03\xc9\x22\xe9\xb3\x99\xe3\x4c\x3c\x9d\x3c\x73\xc1\xa3\
\x01\x78\xe9\xde\xbd\x7b\xaf\xfb\xfa\xd7\xbf\xbe\x66\x94\x81\x4f\
\x7c\xe2\x13\x0b\xb0\x61\x78\x78\x88\x1a\xee\xd4\xfa\x2f\x5d\xab\
\x6a\x2a\x2d\x2d\x7a\xff\x92\x25\x4b\x16\x48\x84\x0b\xc6\x1a\x07\
\x2d\xcf\x10\x78\x6e\xd7\x4c\x99\x07\xf0\x07\x86\x49\x08\x0a\x8a\
\x01\xbf\xdf\xff\x8b\x3f\xfe\xf1\x8f\x6f\x1f\x65\x60\xed\xda\xb5\
\x4b\x2a\xaa\x2d\x04\x04\x43\x41\xea\xed\xeb\xa6\x6b\x37\x2e\xf9\
\x2b\x2f\x96\xe5\xc5\xa5\xc5\x3d\x5f\x35\x0e\x73\xf5\xc6\xe1\x29\
\x1e\x4e\xda\xdd\xdb\x35\x69\x1e\xde\x43\x00\xc4\x87\xc2\x21\x02\
\xaa\x2f\x57\x30\x6f\x70\xb3\xcd\x66\xfb\xcc\x28\x03\x7b\xf6\xec\
\x59\x62\x29\x2f\x24\xa0\xc9\xfa\x90\xfa\x5c\x3d\xa2\x91\x66\x6b\
\x63\xa8\xa2\xca\x32\x50\x5e\x59\xb2\xf1\x4d\x6f\x7a\xd3\x42\x9d\
\x71\x88\xc8\xf3\xa0\xf1\x8e\xc8\x13\x62\x1e\x6b\x6b\x73\x88\x8d\
\x4d\x88\xa7\xbc\xaa\x84\x00\x59\x3c\x50\x79\xb1\x14\x06\xb6\xb4\
\xb7\xb7\x7f\xe1\x29\x03\x45\x96\x3c\x02\x1e\x36\xde\xa5\xfb\x8f\
\x6e\x53\x63\xcb\x43\x1a\x18\xec\x17\xc7\xaa\xa6\xee\xaf\x81\xd2\
\xf2\x22\x6b\x41\xb1\xe9\xc3\xcb\x96\x2d\x9b\x2f\x25\x5f\x28\x5f\
\x97\x7a\x3c\x8f\x9b\xef\x93\xcd\xd1\xc2\x23\x35\xc8\xad\x1f\xa6\
\xba\xfa\xbf\x07\x4a\x2b\x8b\xc7\xc5\x53\x5a\x51\x44\x5a\x94\x57\
\x59\xf4\x0d\xe4\x15\x1a\x08\x80\xf8\xbb\x0f\x1b\xe8\xfe\xe3\xdb\
\xf4\xb0\xe9\x2e\xb5\x77\x3a\x30\x0e\x5c\xcd\x5e\xaa\xba\x54\xe6\
\x2f\x29\x2b\x2c\x8e\x8d\x3d\xfd\x42\x29\xf1\x62\x79\x1c\xb4\x3c\
\x8f\x5b\x1e\x70\x37\x1f\x51\x4b\x6b\x23\x3d\xb1\x37\x53\x4f\x5f\
\x97\xd8\x0d\xb7\xc7\x45\xd5\x97\xca\xfd\x96\xf2\xb1\x79\x4a\x4a\
\x0b\x48\xc6\xc8\xc8\x08\x01\xa5\xe5\xc5\xfa\x06\x0c\xe6\x2c\x62\
\x48\xe2\xef\x40\x3c\x2a\x28\x8a\xb0\xda\x1a\xc9\xed\x75\x51\x98\
\xdb\xf9\xc4\xd6\x12\x2e\x28\x32\x0d\xe6\x17\x1a\xb7\x7f\xe5\x2b\
\x5f\x51\xc6\x41\xcb\xd3\xfc\xe4\x31\xef\x6b\xa2\x56\xee\x80\xdd\
\xf9\x84\x9c\xed\x36\xea\xe8\x6a\x43\x57\x99\x27\x4c\x76\x47\x6b\
\x98\x3b\xa1\xcb\xc3\xd7\xb1\x22\x1e\x01\xc0\x94\xae\x81\xec\xdc\
\x74\x62\x48\xe2\xef\xc9\xe2\x51\x41\x49\x88\x95\xda\xbb\x9c\x34\
\xec\x1f\x22\x41\x08\x50\x4d\xed\x5f\x03\xe6\x7c\x83\x2d\x2b\x37\
\xed\xe3\xd2\x38\x2c\x54\xf3\xa0\xea\x36\xde\x63\x6f\x6b\xa5\xb6\
\x0e\xbb\xb8\xb7\xab\xa7\x03\x07\x1b\x5d\xc0\xe1\x84\x18\xaa\xab\
\xfb\x7b\x44\x9e\xfc\x22\xa3\x62\x00\x86\x01\x8c\xa7\xae\x81\xf4\
\xac\x64\x02\x20\x5e\xdb\xfe\x56\x49\x88\x13\x42\x3a\x9d\x10\xc0\
\xe3\x10\x22\xaf\xcf\x4b\x65\x15\xc5\xc3\xc6\xbc\xec\x8a\x98\x98\
\x53\x2f\x36\x9b\xcd\x0b\x65\x1e\x9b\xd3\x4a\x0e\xae\x7a\x1b\x8f\
\x20\x2a\x0f\xf1\x18\xa3\x3e\x77\x2f\xba\xc9\x7b\x3d\x38\x5f\x10\
\x28\x76\xa5\xac\xd2\x32\x6c\xce\xcf\x51\x78\xcc\xf9\xb9\x04\x40\
\x3c\x3a\x0f\x70\xe7\xf5\x0d\x24\xa7\xc5\x47\x9c\x5d\x9b\x46\x7c\
\x67\x77\x3b\x57\xb1\x93\x7a\x5d\x3d\x38\x9c\xa2\x00\x87\xd3\x1e\
\xce\x36\xa4\x0f\x19\xf3\xb3\x0f\xc8\x3c\x18\x19\xf5\x7a\x5c\xcd\
\x2e\x16\xef\x61\xf1\xbe\x7e\x2f\x44\x2b\x87\x1b\xc5\x00\x3a\x3a\
\xda\x99\x27\x43\xe4\x31\x98\xb3\x25\x03\x21\xe5\x73\x73\x81\x41\
\xdf\x40\x42\x52\x0c\x01\xe3\x11\x0f\x31\x72\x25\xfb\x07\x7c\xe2\
\x95\x8b\x24\xb5\x75\x7f\x17\x64\x1e\xf5\xc8\xc0\xac\xcb\xd3\x47\
\x1e\x9f\x9b\xd7\xcb\xe2\x87\xc8\x1f\xf0\x2b\xf7\x3c\x0a\x21\x57\
\xbc\xae\xbe\x46\xc8\x31\x64\x10\x00\x5e\x1c\x7e\xc0\xc8\xa6\x74\
\x0d\xc4\xc4\x9f\x25\x40\x2d\xde\xa1\x2f\x5e\x19\x83\xc1\xc1\x01\
\x31\x09\xe2\xef\xb5\x7f\x0b\xca\x3c\x91\x46\x06\x66\x07\x87\x06\
\xc4\x73\x14\x10\x02\x10\x23\x0a\x96\x21\x1b\xa8\xa9\xfb\x5b\x30\
\x2b\x27\x8d\x00\x88\xc7\x3a\x20\xd7\x98\xa9\x6f\xe0\xec\x85\x93\
\xca\xe1\x13\xe7\x57\x3e\x7c\xda\x31\x50\x09\x0a\x70\x05\x51\x39\
\xbb\xc3\x16\x4e\x4a\x89\x13\xaa\xaa\xcb\xaf\x4a\x3c\x9a\x91\xf1\
\xa8\x47\x06\x97\x00\x84\xc9\x55\x57\x1e\x9d\x6d\x0e\xe6\x89\x17\
\x79\xd2\xb3\x52\x08\x80\x78\x5c\xe3\x40\x56\x6e\xba\xbe\x81\x53\
\x67\x8e\x11\x30\x4a\x7c\x97\x7a\xe6\x15\x41\x5c\xc5\x41\x0a\x8f\
\x84\xc9\xe7\xf3\x72\x55\xb2\x85\x5c\x63\xd6\xe3\xce\xce\xce\x53\
\x44\xb4\x5b\xe6\x19\xef\xc8\x00\x03\x03\x03\x64\x30\x65\x0b\x06\
\x15\x4f\x6a\x7a\x92\x64\x40\x60\xc3\x02\x01\x19\x59\xa9\xfa\x06\
\x8e\x9d\x38\x44\xc0\x58\xe2\x51\xc9\xa0\xd4\xd2\xeb\xd7\xaf\x04\
\xe3\xe2\xcf\x7b\xea\x1b\x6a\x33\x90\x90\x63\x17\xc7\x36\x99\x67\
\x3c\x23\x83\xb1\xbb\x71\xf3\x5a\x44\x9e\xe4\xd4\x78\x02\x20\x1e\
\x1d\x03\xd2\x32\x92\xf4\x0d\x1c\x3e\xba\x9f\x00\x88\xef\xd0\x88\
\x77\x73\x35\xd1\x7a\x54\xad\xb1\xe9\x71\xf8\xf4\xd9\x13\x81\xf2\
\x0a\xcb\x45\x26\xdb\x87\xa4\x2c\x6a\x03\x27\xfa\xfd\xd0\xd0\xd0\
\xd7\x64\x1e\xbd\x91\x91\xd1\xd2\xd2\x1c\x3e\x33\x06\x4f\x42\x72\
\xac\x64\x20\xa0\x18\x48\x4e\x4b\xd0\x37\xb0\xff\xe0\x6e\x02\x3a\
\x46\xdd\x1e\xdd\xca\x5f\x4e\x97\xab\x8f\x12\x92\xe2\x84\xf4\xcc\
\x94\xfb\x5d\x5d\x1d\x27\xa4\x6a\xed\x64\xc2\x8f\xf1\xe3\xcc\xda\
\xda\xda\xb7\x5d\xbb\x76\xed\xf9\x32\x8f\xde\xc8\x78\xbc\x1e\x4a\
\x4c\x06\x4f\xea\x98\x3c\x71\x09\x17\x14\x03\x38\x6b\x0c\xec\xd3\
\x37\xb0\x7b\xef\x76\x02\x64\xf1\x38\xa8\x98\xbf\x40\x20\x40\xe5\
\x15\x25\xc2\xa9\x33\xc7\xfb\xea\xeb\xeb\x52\xe4\x36\x73\xfb\xbf\
\xc7\x8f\xcb\xc1\x81\xf8\xe8\x47\x3f\xba\xe2\xd0\xa1\x43\x4b\x65\
\x1e\xed\xc8\xe0\x79\x65\x55\xb9\xc0\xdd\x1b\x17\x4f\x4c\xdc\x59\
\x02\x20\x1e\x01\xc4\x27\x5e\xd0\x37\xb0\x63\xd7\x16\x02\x50\x75\
\x54\x0f\x89\xef\xde\xbb\x1d\x3e\x78\x78\x5f\xa0\xc4\x52\x54\xc1\
\x1b\xf7\x4a\x49\xd7\xb2\xa9\x57\xe9\x7d\x0d\x96\x79\xe4\xbb\x1b\
\x95\x7f\xf0\xf0\x7e\xf8\xd0\x91\x89\xf1\x9c\x8b\x39\xad\x18\xc0\
\x18\x02\x31\xf1\xe7\xf5\x0d\x6c\xd9\xb6\x41\xb9\x87\x3b\xbb\x3a\
\xe8\xe4\xe9\x63\x42\x62\x52\xdc\x9d\x9e\x9e\x9e\x63\x52\xc2\x1d\
\xbc\xf9\x43\xfc\x38\x43\x93\x34\x22\x0f\xd0\xdb\xdb\x4b\xdc\x39\
\xe6\x89\x9f\x30\xcf\x99\x73\x27\x09\x80\x78\x04\x70\x3e\xe6\xac\
\xbe\x81\x0d\x9b\x7e\x2f\x7e\x37\x31\x98\x72\x84\x43\x47\xf6\x77\
\x37\x34\xd4\x25\xaa\x6e\x85\x6f\xf9\x7c\xbe\xa5\x58\xaf\x17\x6a\
\x1e\xdc\x1c\x26\xb3\x41\x38\x7c\xe4\xc0\xa4\x79\xd8\xb8\x62\x00\
\xba\x80\x73\xe7\x4f\x47\x36\xb0\x75\xeb\xd6\xa5\x6b\xd7\xff\x86\
\xb6\xed\xd8\x3c\x5c\x54\x9c\x6f\xe1\x2e\xec\x91\x92\xfe\xd6\xef\
\xf7\xbf\x4c\x26\xd7\x0d\x0d\xcf\xf6\x9d\x53\xe2\x59\x02\x9e\x13\
\xa7\x8e\x90\x16\x67\xce\x9d\x88\x68\x60\xd1\xcf\x7f\xfe\xf3\x15\
\x26\x53\xee\x7a\x6e\x33\x92\x21\xb6\xf3\xc2\xf7\xf1\xe3\xb8\x7f\
\xa0\x9a\x46\x9e\x05\x3f\xfe\xf1\x8f\x57\x26\xa5\x26\x5e\x3a\x7a\
\xfc\x10\xa9\xc3\x68\xca\xb9\x23\x19\x78\xaf\xda\xc0\xcc\xcf\x7e\
\xf6\xb3\x6b\xac\x56\xeb\x47\xf8\xc3\x8d\x7c\x2b\x7c\xcd\xed\x76\
\x2f\x1a\x6f\x42\xc4\x34\xf3\xcc\xf8\xe2\x17\xbf\xb8\xda\x66\xb3\
\x7d\x98\x3b\xb8\x19\x85\x50\x07\xde\xb3\xdb\xed\x6b\x14\x03\x08\
\xfe\x47\xfb\xb9\x7c\xf7\xae\xe2\x3f\xe9\x2b\xf5\xc9\xf5\x63\xba\
\x79\xde\xff\xfe\xf7\xcf\xe2\x9f\x4e\x5e\xc5\x85\xd8\xa0\x31\xf0\
\x03\x8c\x62\xf4\xd7\xe9\xa8\x81\xa8\x81\xa8\x81\xa8\x81\xa8\x81\
\xa8\x81\xff\x84\xf8\x07\xbc\x36\x24\x3d\x4e\x42\xb6\x0a\x00\x00\
\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x0c\x9b\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x30\x00\x00\x00\x30\x08\x06\x00\x00\x00\x57\x02\xf9\x87\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x00\x13\x74\x45\
\x58\x74\x54\x69\x74\x6c\x65\x00\x4f\x70\x74\x69\x63\x61\x6c\x20\
\x44\x72\x69\x76\x65\x3e\x67\xba\x0c\x00\x00\x0b\xf9\x49\x44\x41\
\x54\x68\xde\xed\x5a\x69\x6c\x5c\xd5\x15\x3e\xef\xcd\xe2\x99\xb1\
\x3d\x33\x5e\xc6\xf6\xd8\x78\xdf\x9d\xc5\x0e\x10\x42\x48\x48\x94\
\xa6\x94\x90\xa4\x48\xa5\x09\x15\x45\xa5\x81\x2a\xa8\xa8\x52\xf9\
\xd7\xe5\x67\x7f\xf4\x67\xff\x20\x21\x21\xaa\x4a\xb4\x08\xa5\x0a\
\xa1\x12\x34\xcd\xc2\x9a\xcd\x49\x70\x48\x88\xb7\xf1\x16\xdb\x33\
\xb6\x43\x3c\x63\x8f\x97\x99\x37\xcb\xdb\x7b\xce\x9d\x37\xcf\x93\
\x00\x12\x6d\x1c\x12\x24\x9e\x75\xf4\x66\xde\xbc\x7b\xef\xf9\xce\
\xf9\xce\x72\xdf\x33\xb7\x6f\xdf\x3e\xf8\x2e\x1f\x3c\x7c\xc7\x8f\
\xef\x01\xdc\xed\xc3\xba\x9a\x93\x1d\xf8\xd5\x2f\x9e\x52\x64\xf9\
\x65\x8e\xe7\x37\x00\xe8\xa0\x69\xda\x15\x9b\xcd\xfe\xca\x1b\x7f\
\x7b\xf3\x5f\x77\x0a\x00\xb7\x1a\x41\xfc\xec\xb3\xcf\x7a\x75\x8b\
\x72\xc8\x5d\xe0\xde\xee\xf3\x95\x3b\xdd\x85\x6e\xd0\x54\x05\x22\
\xf3\x73\xb0\xb0\x18\x4d\xa6\xc5\xf4\x19\x5e\xb3\x3d\xf3\xd6\x5b\
\x6f\x2d\xad\x36\x00\x4b\x47\x47\xc7\x6d\x4d\xb0\xe7\x37\x7b\x1c\
\xb6\x34\x7f\xba\xa6\xba\x6e\x4b\x7b\x5b\x87\xdd\xe5\x74\x02\xcf\
\x73\x60\xb1\x5a\x81\x80\xf8\x4a\x7c\xb6\x58\x3c\x5e\x9f\x14\x13\
\x8f\xd7\x6d\xab\xff\xc7\xd8\xa5\x31\xe5\x9e\xa2\x50\xfe\x42\xde\
\xe1\xaa\xaa\xca\x75\xcd\x8d\xcd\xbc\x28\xa5\xe1\xc2\xc5\xf3\x70\
\xe2\xf8\x49\x88\xcc\x45\xc0\x57\xea\x83\x1f\x3e\xf6\x18\x6c\xda\
\xb8\xd1\x32\x18\x18\x58\xab\x47\xb5\xc3\x38\xe4\xc9\x7b\xc6\x03\
\x3f\xfb\xf9\xbe\xbd\x1e\x8f\xfb\xf7\xf7\x77\x6d\xcc\x53\x54\x19\
\x8e\xbc\xf3\x0e\x1c\x3b\x7a\x0c\xbc\xc5\x5e\x28\x2b\x2f\x03\x4d\
\xd7\xa0\xa7\xe7\x12\x88\x92\x08\x9b\x37\x3f\x62\x89\x46\xa3\x35\
\xed\x6b\x5a\x7b\x07\xfb\x03\xa3\x77\x3d\x0b\x3d\xfd\xf4\xd3\x76\
\x59\x96\x5f\x5f\xd3\xb6\xce\x49\xdf\xc7\x46\xc7\xe0\x7c\x77\x37\
\x34\xb7\x35\x83\xdb\xe3\x06\xbb\xcd\x06\x85\x05\x05\x50\x5b\x5b\
\x0d\xdd\xdd\xe7\x61\x6a\x6a\x1a\x2a\xca\x2b\x9c\x92\x28\xbd\x4e\
\x63\xef\x3a\x00\xde\xa1\xff\x74\xfd\xda\xce\x52\x8f\xc7\x03\x89\
\x84\x00\x87\xfe\x79\x08\xaa\xee\xab\x02\x9b\xcd\x0a\x16\x0b\x8f\
\x62\x61\x62\x45\x20\x25\x25\xc5\x70\xe4\xc8\x61\x3c\x97\x80\xd3\
\xe5\xf4\x4a\x90\x7a\xf2\xae\x03\x68\xa8\x69\xfa\xe3\x9a\x8e\x75\
\x36\x8e\xe3\xe0\xd8\xf1\x63\x98\x32\x55\xf0\x7a\x3d\xa6\xe2\xbc\
\x21\x16\x9e\x87\xfc\x7c\x17\x24\x53\x29\xf8\xe8\xa3\x0f\x31\xa8\
\xcb\x9c\x72\x5a\x3e\x70\x57\x01\x3c\xff\xfc\x33\xb5\xa1\x50\xb0\
\x83\xb8\x1d\x8b\xc5\xe0\xf2\xe5\xcb\xcc\xfa\x59\xe5\x2d\xbc\x25\
\xe3\x05\x54\x9e\x37\xae\x15\x15\x17\xc1\xa5\xcf\x3e\x03\x9f\xaf\
\x14\xf2\x6c\xf6\x9d\x7b\xf6\xec\x71\xdc\x15\x00\x07\x0f\x1e\xac\
\x97\x75\xe8\x2d\x2e\x2e\xb6\x24\x93\x09\x18\x1e\x19\x06\x7b\x9e\
\x9d\x49\x46\xe9\x5c\x10\x19\x0f\xf0\x28\x36\x4c\xab\xe4\xad\x91\
\xb1\x31\x28\x70\xbb\x31\xff\xa9\xbb\xbe\x75\x00\x2f\xbd\xf4\x42\
\x23\x67\x55\x7b\x54\x45\x75\xfb\x2b\xfc\x20\x89\x69\x18\xbd\x36\
\x6a\x52\x87\x37\xe9\xc3\x33\x30\x7c\xae\x20\x08\x87\xd3\x01\x43\
\x81\x00\x78\x3d\x5e\x07\x06\xf3\x2f\xef\x48\x1d\x78\xe1\xc5\xe7\
\xb6\xe3\x72\x7f\xd1\x41\x7f\xc0\x2c\xd7\x1c\xa7\xa3\x9c\x4a\x2b\
\x4a\x47\x75\x65\x4d\xc9\xf4\x4c\x0f\x57\x52\x52\x0a\xf1\x78\x0c\
\x66\xa6\x67\xa0\x18\xe9\x41\x8a\xeb\x3a\x8e\xe2\x34\x1c\x60\xe4\
\x68\x1d\x1b\x0a\x1e\x5b\x0a\x14\x9e\xd7\xc0\xe1\x70\xc0\x34\xde\
\xef\xf5\x7a\xe9\xbe\xc7\xb7\x6d\xdb\x96\x77\xe6\xcc\x19\x71\xd5\
\x00\x1c\x38\xf0\x4c\x03\x68\xf0\x9f\xa6\xe6\x96\x7c\xb7\xdb\xc3\
\xae\xf5\xf5\xf7\x41\x7f\x7f\x1f\x57\x52\x5a\xb4\xa3\xb9\xa9\x15\
\x03\x32\x1f\x2a\x2b\xaa\x98\xb2\x91\xc8\x2c\x48\xb2\x44\x99\x85\
\xdd\x4b\xd7\x34\xa4\x09\x90\x90\xf2\x88\x80\xd7\x32\xb1\xa0\x19\
\x34\x4a\xe8\x2a\x2c\x2f\x2d\x81\x95\xb3\xab\xba\x53\xdc\x8c\xc3\
\x4e\xad\x1a\x85\x24\x55\xfd\x7b\x4b\x73\x9b\xc3\xe5\xca\x87\x68\
\x74\x1e\x7a\x7b\xaf\xc2\x85\x0b\xdd\x20\x2b\x32\x10\xa0\xf6\xd6\
\x0e\x48\x26\x53\xf8\xb9\x10\x64\x49\x82\x99\xeb\xd7\xc1\x83\x7c\
\x36\x83\xd7\x92\xc3\x7b\x83\x36\xe6\xd9\x90\x3c\xbb\x1d\x6e\xdc\
\x98\x05\x57\xbe\xd3\x8e\x58\xee\x5b\x35\x0a\xad\xdd\xb0\x76\x6d\
\xa5\xbf\x7a\x4b\x63\x43\x33\x87\x0d\x18\xb8\xd0\xaa\x57\x3e\xbf\
\x0c\xaa\xaa\x22\x45\xbc\xd0\xd6\xda\x8e\x0a\x58\x20\x95\x4a\xa2\
\x12\x79\xa0\x60\xb3\x96\x42\x30\x44\x0b\x52\x3a\xeb\x01\x0a\xd4\
\x0c\x7d\x90\x4e\x86\xe5\x73\x01\x58\xd1\x0b\x64\x1c\x7f\xa5\xdf\
\x36\x3f\x17\xf5\x67\xd7\xef\xdc\xd8\xd9\x8e\x61\xfe\x3a\x0e\xdb\
\x82\x73\x73\xaa\xaa\x61\x43\xa8\xb2\xf5\xe9\xb3\xaa\x19\x67\x55\
\xd5\xb1\xcb\x3d\xab\xc9\xda\xaf\x23\x91\xc8\x90\xd5\xe0\xb8\x25\
\x11\x8b\xbf\x67\xb7\x59\x39\x1a\x40\x19\x24\x1a\x5d\xc4\x02\x95\
\x60\x8b\xda\xed\x0e\x28\xf3\x95\x33\x66\x60\x63\x06\xe5\x65\x65\
\xa0\x28\x0a\x08\xf8\xfb\xe0\x40\x00\x52\x62\x0a\xab\x6e\x21\x34\
\x34\xd4\x81\xaf\xcc\x47\x48\x90\x3e\x18\xcc\x3a\x51\x48\x33\x95\
\xc7\x36\x9b\x35\x79\x71\x21\x06\x75\x79\x75\x1c\x2a\x5a\x97\x05\
\xa0\x4a\xca\x6b\xe5\x7e\xff\xd6\x4a\x7f\xa5\x69\x5d\x46\x49\x1c\
\xaf\xd1\x7c\xec\x4c\x00\x34\x2e\x14\x0a\x6d\x9b\x08\x86\x5e\xc3\
\x5b\xb6\x67\x3c\x60\x51\x77\xf9\xca\x4a\xab\x0b\x0b\x3d\xa8\xf8\
\x1c\x48\x48\x8f\xe1\xe1\x00\x43\xef\x72\xb9\x50\xe1\x0a\xc6\x69\
\x15\xad\xbe\xb8\xb8\x00\x35\xd5\xd5\xa0\x20\xf7\x87\x47\x47\x59\
\xfa\x5c\xb7\x7e\x1d\xd2\x4c\x44\x30\x43\xb0\xb4\xb4\x0c\x4d\xcd\
\x4d\x19\xe5\x4d\xc5\x39\x26\x3c\x5a\x80\xe8\x95\x16\x25\xb0\x21\
\x95\x70\x7e\x13\x00\xb6\x25\x8f\x92\xf2\x03\x83\xfd\xcc\x48\xf9\
\xb8\x2e\x4b\x0a\x3a\x18\x67\x8d\x19\x8c\xda\x93\xc6\x86\x26\xb8\
\x36\x3e\xf1\xa8\x19\x03\x9a\xa2\x1f\x44\x7a\x58\xad\x68\xb5\x78\
\x5c\x60\x96\x9f\x0d\x87\x99\xcb\xac\xd8\x1a\xf8\xb0\x05\x10\x31\
\x65\x26\x93\x49\x96\x79\xec\x48\x21\x19\x3d\xa0\x88\x22\xd4\xd4\
\x54\x43\xa1\x1b\x27\x6d\x6c\x84\x5d\x4f\xfc\x08\xf9\x1d\x46\x23\
\x44\xcd\xfc\x6f\x0a\xb7\xf2\x19\x37\x3d\x60\x47\x4f\x60\xf5\x36\
\x63\x40\x96\x64\xc6\xbd\xe5\x98\x00\x47\x8f\x7f\x0c\x57\xfb\x02\
\xc6\xfd\x1c\xa3\x65\xdf\xc0\x30\x9c\xfc\xf0\x2c\xae\x9f\x60\x5e\
\x51\x35\x8d\x5b\x09\x62\x0e\xee\x47\xff\x30\x57\x09\x42\x9c\x01\
\x20\x20\xd4\x1e\x10\x9d\x48\x61\x11\x95\xa5\xca\x2b\x60\xdf\x63\
\xc3\xfe\x46\x56\x24\x4c\x85\x1c\x0b\x52\x5a\x80\x05\x68\x5e\x1e\
\xb4\xb5\xb5\xc0\xe8\xc8\xd8\x97\x94\xe7\x0c\x45\x88\x46\x68\x6d\
\xe0\xac\x3c\x29\x52\x6e\x26\x10\xbc\x46\xb4\x2c\x29\x29\x82\x6d\
\x5b\x1f\xc6\x82\x37\x01\x57\xae\x0e\x30\xeb\xf7\xf6\x0d\xc2\xd8\
\x78\x90\x5d\x2f\x2e\xf2\x1a\x71\xa1\xae\x04\x31\x4e\x5c\xa6\x62\
\x4a\xd0\xd0\xe2\x31\xb4\x30\x0d\xa2\x06\x8d\x6e\xd2\x10\x98\xd5\
\x6a\x83\x04\x56\x5d\x0a\x56\xea\x69\xa8\x61\x13\x45\x19\x54\x85\
\x16\x55\x19\xb8\x78\x2c\x4e\x2c\x63\x7d\x0f\x19\x80\x59\x87\xcb\
\x2a\xcd\x99\x20\x78\x23\xc8\x75\x95\xf8\xad\x17\xdd\x0a\x20\x89\
\x63\x1f\x7a\xb0\x8b\x5d\x3b\x73\xee\x22\xee\x2b\xe6\x61\x69\x39\
\xce\x94\x7f\xf0\x81\xf5\x30\x31\x3e\xc9\x12\x08\xad\x7b\x53\x16\
\x42\x17\x42\x3a\x9d\x86\x84\x90\x30\x82\x27\x13\xf5\xc4\x6d\xca\
\xf5\x94\x36\x2d\x56\x0b\xcb\x0c\x8c\x97\x5a\xc6\x0a\x22\x8e\x11\
\x04\x81\x2d\x4e\x08\x12\x48\x33\x64\x2c\xb3\xbc\x66\xc4\x01\x97\
\x05\x92\x23\x34\x3f\x51\x21\x77\x7d\xf2\x0c\x5d\x9b\x98\x98\x44\
\x65\x3b\x61\x7c\x22\x08\xd7\xbf\x98\x85\xaa\xca\x0a\x78\xe0\xfe\
\xf5\x30\x7e\x6d\x82\xe9\x46\xf7\x11\x08\x13\x00\x5e\x8c\xa4\x52\
\xe9\xea\x58\x6c\x19\x03\x2c\xcd\x6e\xa0\x85\x55\x55\x02\x31\x25\
\x61\xe1\x59\x64\xd9\x03\x44\x0e\x8b\x91\x8d\x51\x89\xb3\xf0\x8c\
\x7b\x14\x58\x64\x79\x02\x4f\x1f\x16\x17\x16\xa1\x00\x8b\x1d\x03\
\x8b\xca\xdc\xaa\x38\xa5\x32\xf2\x24\xcb\x2e\xaa\xb2\x98\xeb\x01\
\xaa\x37\x59\x6a\xbc\xfb\xde\x71\x53\x79\x3a\xd3\xf7\x35\xed\xcd\
\x0c\x80\x22\x2b\x4c\x56\x3c\xa0\xc3\x95\x94\x28\x56\x2f\x63\x67\
\x49\x8a\x90\xa0\xb3\xd9\x64\x44\x99\x05\xac\x9c\x5e\x2c\x64\x22\
\x66\x0f\x16\x0f\xec\x77\x0e\xf2\x1c\x76\x10\x30\x63\x90\xf5\x29\
\x2e\x88\x82\x73\x73\x73\xb0\xf1\xa1\x07\x51\xcf\xaf\xb0\x3c\xfd\
\x21\x06\xa2\x24\x79\x54\xd5\xf4\xb0\x09\xc0\xf0\x00\xad\x19\x18\
\xba\x06\x13\xa1\x69\x46\x9b\x1d\x3b\xb6\xc2\x27\x9f\x9c\x63\x74\
\x22\xe5\xdb\x5b\x1b\xf1\x3e\x89\x25\x11\x1b\xd3\x92\xb8\x6a\xe5\
\xfe\x2a\xa5\x53\x4a\x74\x21\x8a\xbc\x56\x19\x87\x33\x56\x52\x59\
\xe6\x99\x9e\x99\x62\x83\x85\x44\x9c\xf1\x9f\x01\x44\x0f\x15\x16\
\x78\xb0\x9d\x8e\xc3\xf2\xe2\x32\xcc\xcf\xcf\x63\x6b\x11\xc1\x5a\
\x50\x0f\x95\x95\xfe\x1c\xa5\xc1\xb0\x3c\x98\x20\xec\x76\x1b\xa4\
\xd0\x8b\x38\xff\x4c\x4e\x1a\x65\xa0\xe2\x48\xe1\xac\xf2\x5d\x9d\
\x6b\xa0\xe7\xe2\x25\x76\xa6\xef\x93\xa1\x19\xf6\x3b\xa3\xd0\x4d\
\x1e\x50\x2d\x27\xe2\x82\x30\x8d\x3d\x7e\x7d\x5d\x6d\x5d\x26\xd3\
\xd8\xad\x46\xe5\x4b\x43\x28\x18\x84\x96\xe6\x56\x06\x86\xe7\xad\
\xac\x1a\x5b\xf1\xf7\x2e\xe4\x25\xf5\x35\x04\x9c\xb6\x91\x5d\x9b\
\xd6\x43\x65\x55\x15\x53\x84\xcb\x68\xce\x84\x7d\x84\x0c\x08\x12\
\xca\x56\x69\xf4\x2c\x06\x71\x30\xc7\x03\x3a\x7a\x98\x2b\xc1\xc6\
\xf0\x0f\xbf\xfb\x2d\xc6\xa2\x00\x23\xc3\x23\x2c\xde\x86\x87\x46\
\xa0\x73\x7d\x07\x6c\xd9\xb2\x09\x86\xb1\x9b\xa5\x3a\x82\x1e\xd0\
\x9d\xe8\x75\x06\xe0\xed\xb7\xdf\xd6\xf6\xef\xdf\xff\x54\x24\x1c\
\xbe\x5c\x53\x5d\xcb\xa7\x53\x69\xa4\x83\x9e\xc1\x86\xc1\x92\x48\
\x24\x61\x12\x03\xab\x00\x8b\x48\xc6\x13\x49\x28\xce\xf3\xb2\x96\
\x62\xc7\xce\xed\xe8\x15\x3b\xf3\x0c\x2d\x46\xd6\xc9\x76\xa3\x99\
\x0c\x9d\x05\x92\xfd\xce\x81\x13\xdb\x6a\x8c\x39\xcc\x03\x9a\x09\
\x40\xd5\x94\xee\xb1\xf1\x89\xad\x35\xb8\x31\x9a\x0b\x47\x80\x74\
\x28\x2e\x2e\x31\xab\xf1\xec\x17\x61\x2c\x9a\x0b\x98\x18\x38\x98\
\x0a\x4e\x91\xf7\xba\x73\xb3\x10\x8f\x20\x02\x2f\x1c\x7c\xfe\x62\
\x30\x34\xb9\x09\x3b\x4e\xcb\x8d\xd9\x1b\x8c\xef\x82\x20\xb1\xd4\
\xd6\x3f\xd0\x07\x9b\x1f\x7e\x84\x3d\xb0\x22\xeb\xf8\x7c\x25\x99\
\xa6\x53\xcb\x54\x49\x5a\x64\xa5\x6a\x7e\xcd\x53\x34\x03\x45\x21\
\x36\x83\xd1\xe8\x82\xac\x29\xd2\x2c\x7d\x3f\x72\xe4\x08\x5f\xd7\
\xd8\xf8\xaa\x20\x24\x8b\x03\x23\x63\xed\x18\x93\x06\x5c\xa3\x12\
\xb3\xd6\x36\x3b\xb7\xae\xab\x8a\x32\x38\x35\x11\x7c\x75\x7a\x3a\
\xc8\x5b\x71\xb0\xe1\x58\xe0\xae\xcf\x7e\xf1\x22\xa6\xc9\x9e\xda\
\xda\x3a\x57\x55\x65\xa6\x48\xce\x63\x50\x86\xe7\xc2\xac\x2e\xf4\
\xf6\x5d\x65\x1c\x0f\xcf\x45\xb0\x60\xb5\xb1\xc0\x4d\x11\x15\x88\
\x6a\x9c\x6a\xf4\x2f\x5a\x0e\x08\xb6\x34\xc0\x2d\x80\x0a\x0b\x0b\
\x61\x7c\x7c\x52\x92\x34\xe5\x7a\xb6\xed\x09\x8e\x8f\x7f\x84\x72\
\xe5\x7f\x68\x44\x29\x83\xe9\x56\x30\x99\x09\xdc\xc9\x7f\x9f\x0c\
\xee\xdd\xbb\x6b\x3f\x2a\xf5\x67\x0c\xb8\xae\x9c\x9b\x75\xcc\x0e\
\x9f\x8a\xe2\x4c\x93\xcb\xe1\x2a\x4e\xa4\x04\x7e\xe7\x8e\x9d\x8c\
\xcb\x54\x98\xa8\x53\x84\x8c\xfe\x86\x27\x72\x40\xd0\x39\x6b\x41\
\xfc\xa3\xf4\x6a\xc7\x3e\x08\x63\xc7\x12\xe8\x1b\xfd\xf4\xfd\xf7\
\xdf\xa7\xc7\x9b\x74\xe7\x9c\x21\xb7\xb7\x23\x3b\x7a\xf4\xc4\x39\
\x3c\xfd\x80\x5a\xbc\x5b\xa5\xb3\x73\x4d\x3d\x72\xfc\x9d\xda\x9a\
\x1a\x6f\x64\x2e\xcc\xe5\xbb\x0a\x60\xd9\x16\xc3\xad\xa5\x68\x92\
\x84\x75\x8d\x06\xad\x18\x08\xc3\x03\x59\x10\xb4\x1b\x8b\x63\xea\
\x95\x15\xf5\x03\x54\x5e\x34\x94\xbf\xad\x0d\x8d\xfe\x0d\x45\xeb\
\xed\x1d\x0c\x0e\x07\x46\x7f\x12\x8e\x84\x85\xe1\x91\x11\x2c\x58\
\x05\xcc\x9a\xaa\x51\x95\x59\xff\xae\x65\xe2\x21\x03\xc2\x90\x1c\
\x0f\x94\x96\x96\xd2\x86\x46\x94\xa4\xf4\x9b\x5f\x26\xd7\xff\x01\
\xc0\xb0\x80\xf6\x35\xa2\x1a\xa2\x64\xcf\xfd\xfd\xfd\xc1\xbe\xab\
\x83\xbb\xfb\x7a\x3f\x57\x1d\x0e\x27\xcb\xe9\x3a\xcb\x56\x9a\x09\
\x82\xd1\x28\x2b\x39\x20\x68\x4b\xe9\xc1\x74\x3b\x15\x0a\xe9\x23\
\x81\x6b\x27\x8c\x35\x6e\xff\xa9\x04\x82\xb8\x55\xe1\x9b\x94\xce\
\x11\x99\xa4\xb7\xb7\x77\x12\xbb\xd5\x53\x0b\xb8\x37\xa0\xed\xa7\
\xd3\xe9\xcc\x74\x88\x5a\x46\xb2\x7d\xce\x0a\x90\x0c\x88\xb2\xf2\
\x72\xdc\x4f\x2c\x52\xbf\xf4\xf1\xa9\x53\xa7\x92\xb7\x4b\x9f\x9b\
\xf6\xc4\x38\xd9\x37\x52\x3e\x2b\x8b\x4b\xb1\x43\xfd\x7d\xfd\x62\
\x91\xa7\x88\x6d\x32\xa8\x72\x9b\xdb\x3f\x55\x33\x77\x52\x9a\xb1\
\x19\xa1\xc7\xee\x15\x15\xe5\xd8\xa8\x05\x53\xb2\xac\xbd\x61\x18\
\x6d\x75\x9f\x0b\x19\x93\x7e\xa5\xc2\x39\x22\x91\x9c\xfe\xf8\xf4\
\xd1\xf3\x17\xce\x2f\xc5\x71\xff\xe0\x71\x7b\xa1\x08\xfb\x74\xe2\
\xbd\x6a\x50\x68\x25\x16\x34\x96\xd6\x6b\x6a\x6b\x20\xb6\x1c\xc3\
\x76\x78\x62\x29\xb6\x18\x7b\xf7\x8e\x3d\x5e\xa7\xef\x28\x3a\x8a\
\x16\x08\x04\x6e\x0a\xe2\x5c\xc1\x7e\x49\x6e\x69\x6e\x1a\x9b\x5f\
\x5c\xf8\xf1\x86\xae\x2e\x1b\x75\x92\xa4\x2c\x15\x3d\x2d\x47\x79\
\x02\xe4\xf7\x97\xb3\x2a\x7e\xee\xec\xf9\x94\x10\x8b\x3f\x87\x7b\
\x89\xa1\x6f\xe5\xfd\x40\x16\x08\x8a\x8a\x60\xb2\xca\xeb\x06\xbd\
\xb4\x81\x81\xc0\x08\x16\xb6\x4d\x98\xdb\xeb\x1b\xea\x1b\x2c\xc4\
\x7d\x1b\x06\xb5\x94\xe9\x55\xd8\x23\x76\xbf\xdf\xcf\x52\x67\x7f\
\xff\xa0\x34\x39\x11\xfc\x40\x88\x27\xff\x74\x4f\xbd\x23\x43\xcb\
\x3a\x54\x50\xba\x77\xef\x7e\xa2\xab\xb5\xa5\x85\x27\x4a\x51\xbb\
\x0b\xd9\x2e\x14\x8f\xc1\xc1\x21\xf5\x52\xcf\xa5\x5e\x0b\x67\xdb\
\x82\x9b\x9f\xf4\x3d\xf5\x8e\x4c\x92\x24\xc5\x61\x4f\x1f\x1e\x19\
\x0d\x6d\x08\xdf\x98\xad\xaa\xa8\xa8\xb0\xd1\x93\x0c\x3a\x16\x70\
\x73\x73\xe6\xf4\xd9\xe4\xf0\xd0\xd0\x27\x56\x3e\xf9\xa4\x20\x28\
\xc2\x3d\xf9\x96\xd2\xb4\x86\x1d\x9e\xb2\xf2\xb6\x97\x35\x5d\xdd\
\xc0\x26\xe7\x2d\x57\x54\x55\x7e\x05\x37\x76\x77\xec\x35\xeb\xaa\
\xbe\x27\x26\x45\x55\x90\x73\x94\xd5\xe0\x4e\x1f\xdf\xff\xab\xc1\
\xf7\x00\x6e\xf3\xf8\x2f\x17\x50\x4f\xbf\x20\xd6\x75\x19\x00\x00\
\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x0b\x32\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x30\x00\x00\x00\x30\x08\x06\x00\x00\x00\x57\x02\xf9\x87\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x00\x13\x74\x45\
\x58\x74\x54\x69\x74\x6c\x65\x00\x4f\x70\x74\x69\x63\x61\x6c\x20\
\x44\x72\x69\x76\x65\x3e\x67\xba\x0c\x00\x00\x0a\x90\x49\x44\x41\
\x54\x68\xde\xed\x59\x59\x73\x14\xd7\x15\x3e\xdd\x3d\xbb\x34\x9a\
\xd1\x3a\x5a\x40\x32\x12\x12\x42\x48\x32\x86\x84\xaa\xb8\x5c\x50\
\x0e\xb6\x03\xc1\xe5\x72\x11\x21\x1b\x30\x20\x6f\x24\x94\xe3\xbc\
\xe5\x21\x8f\x81\x97\xfc\x81\xb8\x2a\xe4\xc5\x29\x9c\x80\x11\xc6\
\x18\x10\x3b\x94\xa0\x22\x40\x42\xd8\x08\x6d\x23\x41\x84\x36\x6c\
\xed\x33\x9a\x7d\x7a\xcd\x39\x77\xa6\x47\x23\x83\xab\xcc\x40\x8c\
\x9c\xf2\x48\x47\xdd\xea\xe9\xb9\x7d\xbe\x73\xbe\xf3\xdd\x73\xef\
\xf0\x75\x75\x75\xf0\x63\x36\x1e\x7e\xe4\xaf\x9f\x00\xfc\x5f\x01\
\x68\x78\x77\xc7\xe6\xb7\x76\xbe\x79\x79\x47\xc3\x36\xdf\x8e\x86\
\xad\x3e\x3a\xa7\x6b\x0b\x1e\xc0\xf6\xed\xdb\x9d\xdb\x76\xbe\x71\
\xda\x62\xb6\x7c\xb2\x64\xc9\xd2\xb5\xb5\xd5\x2b\xed\xd5\xcb\x6b\
\xec\x2e\x57\xc1\x5a\x8e\xe3\x0e\x6c\xdd\x51\x7f\x9a\xee\x59\x90\
\x00\x36\x7d\xb0\xc9\xa2\x80\x78\x71\x71\x51\xc9\x2b\x2b\x9f\x5d\
\x65\x75\xe5\xe5\x81\xc9\x64\x04\xb3\xc5\x02\x8b\x8a\x16\x41\xf5\
\xf2\x6a\x5b\x7a\x9a\xfd\x65\x49\x8b\x5e\xa4\x7b\x17\x1c\x80\xb4\
\x19\xf3\xe1\xa2\x82\xc2\x9a\xf2\xb2\x72\x5e\x51\x64\xb8\x76\xfd\
\x2a\xec\xdd\xbb\x17\x3e\xfc\xc3\x87\xb0\x6f\xdf\x3e\x68\xbd\x71\
\x03\x2a\xcb\x97\x09\xe9\x56\x5b\xb5\x6d\xda\x7c\x78\x41\x01\x78\
\x63\x5b\xdd\xab\x0e\x87\xe3\xa5\x15\x55\xb5\x46\x45\x95\xe1\xe8\
\xe7\x9f\xc3\xe1\x43\x8d\x60\xb5\x59\xa1\xbc\xa2\x1c\x4c\x16\x13\
\x34\x36\x36\xc2\x89\xa6\x93\x50\x59\x59\x65\x32\x9b\x2c\x2f\xd1\
\x67\x16\x04\x80\xfa\xfa\x7a\x93\x24\x49\xfb\x57\x54\xd6\x58\xe9\
\xff\x3b\xfd\x77\xe0\x6a\x4b\x0b\x94\x57\x96\x43\x86\x23\x03\x4c\
\x46\x23\xd8\xd3\xd3\xa1\xa4\x64\x31\xb4\xb4\x5c\x85\xe1\xe1\x11\
\xc8\x77\xe5\x5b\xc5\xa8\xb8\x9f\x3e\xfb\xd4\x01\xf0\x16\xed\x37\
\xb5\xd5\xcf\xe6\x60\x06\x20\x18\x0c\xc0\xc1\x43\x07\xa1\x68\x51\
\x11\x18\x8d\x06\x10\x04\x1e\x4d\x60\x66\x40\x20\xd9\xd9\x59\x70\
\xe4\xc8\x61\x3c\x66\x53\x76\x9c\x22\x84\x5f\x7b\xea\x00\x4a\x8b\
\x97\xfe\x69\x45\x55\x8d\x11\x55\x06\x4e\x9d\x3e\x05\xaa\xaa\x80\
\xd3\xe9\x48\x38\xce\xc7\x4d\xe0\x79\x48\x4b\xb3\x41\x28\x1c\x86\
\x8b\x17\x2f\x40\x6e\x76\x9e\x55\x8a\x48\x0d\x4f\x15\xc0\xae\x5d\
\xf5\xc5\x43\x43\x83\x55\x51\x31\x0a\x3e\x9f\x0f\x6e\xde\xbc\xc9\
\xa2\xaf\x3b\x2f\xf0\x42\x2c\x0b\xe8\x3c\x1f\xbf\x96\x99\x95\x09\
\x37\xda\xdb\x21\x37\x37\x07\xcc\x46\xd3\xfa\x4d\x9b\x9e\x8c\x22\
\x3d\x32\x80\x3d\x7b\xde\x2e\x51\x40\xe8\xc8\xca\xca\x12\x42\xa1\
\x20\xb8\xfb\xdc\x80\xd5\xc9\x2c\xe6\x74\x32\x88\x58\x06\x78\x34\
\xa3\xc1\x00\x94\xad\xbe\x3b\x77\x20\x3d\x23\x03\xc0\xa0\x6c\xf8\
\xc1\x01\xec\xd9\xf3\x4e\x99\x02\x7c\x9b\xa2\x28\x8e\x82\xfc\x02\
\x10\xa3\x11\xe8\xbf\xdb\x9f\xa0\x0e\x9f\xa0\x0f\xcf\xc0\xf0\xc9\
\x86\x20\x2c\x56\x0b\xf4\xf6\xf4\x80\xd3\xe1\xb4\x60\x31\xef\x4a\
\x1e\x9b\xab\xaf\x17\x9e\x08\x80\x77\x76\xef\x5c\xf7\xde\xee\x86\
\xf6\x77\x77\xef\xd2\x74\x7b\xef\xb7\x0d\xea\xfb\xbf\x7b\xfb\x52\
\x44\x96\x5b\xf2\xf3\x0a\x72\xbd\xb3\x5e\x2e\x3b\x3b\x07\x22\xd1\
\x28\x8c\x8e\x8c\x42\x06\x46\x34\x41\x9f\x78\x01\xf3\x7a\x16\xd0\
\x78\x66\x08\x00\x27\xb7\x11\xbc\xdf\xe9\x74\x82\xc6\xa9\xbf\x5a\
\xbb\x76\xad\x59\x7f\x6e\x51\xa0\x6c\xdd\x91\xc6\x46\xee\xb1\x00\
\x34\x34\x6c\x2d\x05\x95\x6b\x5a\x5a\x56\xb1\x7a\xf5\x73\x6b\x80\
\xcc\x68\xa0\xa8\xf5\x71\xe3\xe3\xe3\x2f\x66\x3a\xb2\x5c\x69\x69\
\x69\x5c\x61\x7e\x11\x68\x9a\x06\x13\x13\x63\x20\x4a\x22\xd3\xfd\
\xe4\xe2\xd5\x29\xa4\x47\x3e\x99\x46\xaa\xa6\xc0\xac\xd7\x0b\x06\
\xce\xa4\x18\xad\xdc\x2f\x12\x8e\xf0\x86\x65\x7f\xfc\xf8\x86\xeb\
\xb1\x00\x88\x8a\xf2\x8f\x8a\xf2\x0a\x8b\xcd\x96\x06\xd3\xd3\x53\
\xd0\xd1\x71\x0b\xae\x5d\x6b\x01\x49\x96\xc0\x6e\xcf\x80\xaa\xca\
\x15\x10\x0a\x85\x31\xe2\x76\x90\x44\x11\x46\xef\xdf\x07\xc7\xbc\
\xe8\x27\xf1\x3e\xee\x7c\xe2\x18\x37\xb3\xc9\x04\xdf\x7c\x33\x06\
\xb6\x34\xab\x09\xb1\x2c\x4a\x50\x48\x53\x4d\x0a\x18\x57\xa5\x0c\
\xa0\xfa\xb9\xea\xea\xa2\xc2\xc5\xcf\x97\x95\x56\x08\x66\xb3\x19\
\x6c\x18\xd5\x2f\xbf\xba\x09\xc8\x77\xc8\xca\x72\xc2\xf2\x65\x55\
\x8c\x0a\xe1\x70\x08\x9d\x30\x83\x8c\x6d\x43\x18\xc1\x10\x2d\xbe\
\x0d\x40\x97\x4f\x81\x9f\xef\x3c\x99\x01\xb3\x40\xc1\xb1\x59\x6d\
\x46\x59\x56\x0a\xf4\xe7\x6b\x1a\x37\x86\x7f\x5e\x4d\x09\x00\xaa\
\x83\x10\xf4\xf9\x8f\x63\x8a\x79\x72\x98\xd2\x3f\x3d\xed\xc1\x09\
\x2a\xc8\x6e\x32\x99\x2c\x90\x97\xe7\xc2\xfb\x00\x7c\x7e\x3f\x98\
\xcd\x16\x90\x65\x19\x02\xf8\x7e\x77\x57\x0f\x5c\xb8\x70\x11\x5a\
\xaf\xb7\xc1\xd4\xe4\xe4\x3c\xf9\x64\x96\xe4\x3c\x47\xa0\x10\x80\
\x3f\xe0\xc3\x31\xcc\x1c\x06\xe1\x99\xb9\x0c\xc0\x7d\x7c\x40\x6d\
\x6a\x19\x10\x94\x0d\xb9\x79\x39\x8b\xed\x76\x07\x3a\x3e\x09\x7e\
\xbf\x0f\xdc\xee\x1e\x16\x7d\x8a\xb0\x2b\x2f\x1f\x83\x03\x40\xcd\
\x9a\xc7\x33\x43\x0f\x07\x19\xb9\xef\xee\xef\x67\xf2\xb9\xe6\xe7\
\x6b\x60\x69\x79\x29\xf4\xf6\xf6\x61\x4b\x71\xf7\x21\x8e\x73\xcc\
\x78\x8c\x00\x81\x8b\x44\x45\x30\x22\x95\x70\xfc\x24\x00\x9c\x17\
\x0f\xce\x94\x00\xa8\xb2\xf6\x3e\xd2\xc3\x60\xc0\x07\xfb\xfd\x01\
\x16\xf9\xb1\xf1\x71\x7c\x80\x8a\xad\x80\x01\x67\xcf\x6c\x26\x99\
\xa1\x50\x88\x81\x33\x21\x85\x24\xcc\x80\x8c\x2a\x54\x5c\xbc\x18\
\xec\x19\xe9\x50\x56\x56\x06\x1b\x36\xbe\x82\xfc\x1e\xc7\x20\x4c\
\x3f\x48\x1f\x6e\xee\x5c\x96\x24\x30\x51\x41\xab\x4a\xa2\x06\x78\
\x0d\x52\x07\x00\x1c\xac\x42\x79\xc0\x5f\x0d\x02\x01\x3f\x03\x40\
\x40\xa8\x3d\x20\x3a\x91\xc3\x51\x74\x96\x66\xde\x00\xf6\x3d\x46\
\xec\x6f\x24\x59\x44\x29\xe4\x58\x91\xd2\x04\xc5\x0a\x14\x33\xb3\
\xbc\xb2\x02\xfa\xfb\xee\x3c\xe0\x3c\xcb\x02\xc7\x31\x1a\x61\x13\
\x08\x9c\x81\xc7\xf1\xd5\x84\xea\xa8\x66\xf0\xa4\x0c\x00\x07\xce\
\x53\x50\x12\x54\x8c\xb8\x0f\x23\x4c\x4e\x52\x83\x46\x14\x52\x11\
\x98\xc1\x60\x84\x20\xce\xba\x04\x82\x7a\x1a\x6a\xd8\xa2\x51\x09\
\x14\x54\x27\x2c\x44\x76\xdd\xef\xf3\x33\xd0\x36\xec\x7b\xf4\xda\
\x21\xca\xc4\x9c\xe6\x12\x20\xe8\x1a\x2b\x5a\x45\xa3\xb1\x33\x75\
\x47\x06\x8f\xef\x0b\x62\x20\xcd\x8d\x8f\x38\x17\x24\x54\x48\x12\
\x25\x88\x44\x22\x10\x0c\x04\x99\x51\xf4\x89\x42\x92\x1c\x65\x5a\
\x1f\x09\x47\xd8\xfb\x2a\x82\xa2\x7a\xd0\xd8\xfb\xe8\x3c\x5e\x0b\
\x04\x02\xe0\x9d\x9d\x45\x7d\x9f\x45\xa0\x21\xd0\xf0\x47\x2f\x5a\
\x76\xd4\x81\x24\x19\x8d\x8f\x19\x48\x38\x52\x5d\xfd\x81\x81\x3e\
\xb8\x65\xcb\x16\xed\x91\x01\xe0\xa4\x34\x11\x46\x07\x7d\xbe\x59\
\x2c\xb0\x08\xcb\x00\x3d\x98\x39\x18\x16\xd1\x31\x0f\xa3\x4f\x28\
\x88\xd1\xc7\x6c\xd0\x39\x27\xf0\x8c\x7b\xa4\x44\x3e\x8c\xbe\x17\
\x27\x27\xb2\x89\xb1\x09\x48\x4f\x4b\x43\xb5\x11\xe6\x32\x90\x64\
\x24\x65\x24\xb5\xe4\xbc\xaa\xc8\x1e\xdd\x91\xe9\x62\x47\x26\x0e\
\x37\x9b\x5a\x0d\x68\xf0\x65\x18\x69\x30\x8b\x9d\x25\xcb\x42\x90\
\xb2\x19\x03\x40\x94\x99\x41\xc7\x28\x43\x14\x69\x56\x0f\x78\x0f\
\x47\xf9\xc6\x15\x57\x00\x65\x75\x16\xa3\xef\xc5\xe8\xcf\xcc\x78\
\x60\x02\xa5\xb4\xa6\xb6\x06\xfd\x7c\x48\xe4\xe9\x07\x31\x10\x25\
\x69\x22\x54\x54\x6d\x5c\x77\xc4\x28\xf0\x99\x98\x59\x4f\x4a\x00\
\x78\x03\xf7\x77\x31\x12\x96\xa7\x67\xa6\x91\xd7\x0a\x03\x10\x8b\
\x92\xc2\x94\x67\x64\x74\x98\xb5\x0e\x81\xa0\x9f\xf1\x9f\x40\x12\
\x3d\xec\xe9\x0e\x16\xfd\x59\xcf\x2c\x4c\x4d\x4d\x61\x6b\x31\x01\
\xa5\xa5\x4b\xa0\xb0\xb0\x20\xc9\x69\x88\x47\x1e\x12\x20\x68\xd1\
\x1f\xc6\x2c\xe2\xf8\xa3\x09\x47\x14\x2e\x13\x6f\xf1\xa6\x96\x01\
\x45\x38\xe3\x0f\x04\x46\xb0\xc7\x67\x0f\x62\x4a\x63\x32\xb0\x1a\
\x20\x67\x87\x06\x07\x41\xc4\x82\x25\x30\xd8\xb3\xb0\xd9\x98\xc7\
\x82\x5c\xb9\xaa\x16\x72\xb0\xa9\xa3\x1a\xa1\xf9\xe2\x85\x17\x9e\
\x87\xd5\x3f\x5b\x9d\x88\x34\xc4\x29\xc3\x4e\x21\x06\x82\x8c\xd4\
\x2a\x82\x99\xc5\x22\x1e\xd4\x1d\x51\x38\x99\x0a\x3a\xb5\x0c\x60\
\xe5\xab\x21\x7f\x64\x33\x36\x6c\x0a\xa9\x0e\x15\xac\xaa\xc4\x6a\
\x89\x26\xaf\x60\x30\x04\xf7\x06\xee\x31\x5a\xc4\x32\x11\x62\xaa\
\x42\x2d\xc5\x8b\xeb\xd7\xc1\xd6\x6d\x6f\xc2\xeb\xaf\xbf\x06\x25\
\xcf\xc4\xe7\xa5\x24\x1d\xe1\x40\x07\xa2\xff\xcf\x81\x15\xdb\x6a\
\xac\x39\xd4\x01\x75\x30\x69\x22\xab\xd0\x38\xad\x25\xd5\x5e\x88\
\x47\x10\x3d\xa8\x20\xad\x83\x43\xf7\x14\xec\x38\x99\x6c\x12\xdf\
\x29\x0b\x21\xa4\x54\x67\xd7\xed\x98\x5e\x13\x20\xac\x05\xa2\x18\
\xf9\xa5\x21\x60\x4d\x53\x63\x45\x89\x46\x00\xb5\xef\xd0\x11\x2e\
\x8e\xc2\x8e\xcd\x20\xae\xe4\x24\x55\x16\xc7\xe6\xde\xe4\xb2\x40\
\xe6\xcf\x3c\x32\x80\x23\x47\x8e\xc4\x13\x0b\xdc\xfd\xb1\xaf\x77\
\x0f\x0c\x0c\x44\x49\x59\x8a\x0a\x17\x41\xe9\x92\x52\x28\xcc\x2f\
\x64\xad\x01\xcd\x0b\x1d\xb7\x6f\x31\x75\x19\x9f\x9c\x00\x8b\xd9\
\xca\x26\x34\x95\x9c\x47\x90\xb1\x39\x43\x61\x60\xe6\x40\x90\xa0\
\xb2\xc3\xbc\x97\xdd\x6e\xa7\xa2\x17\x45\x55\xbe\x9f\x04\xcf\x17\
\x54\xe4\xae\x54\x32\x90\x00\x70\xf6\xc4\xd9\xc1\xa9\x89\x89\x2d\
\x6e\x77\xef\xad\x1b\xed\x6d\x40\x76\x6f\xe8\x1e\x71\x5f\x0b\x04\
\x82\xd7\x47\x47\x47\xa7\xc6\xbe\x1e\x57\xfb\xdc\xbd\x4c\x49\x88\
\xcb\x24\x95\x4a\x7c\x4e\x88\x81\x50\xe7\x83\xa0\x23\x21\xd0\x62\
\x70\x28\x00\x26\xec\x83\x70\x4d\x20\xf4\xdc\xee\x6f\x8d\x07\x10\
\x34\x41\xb8\x32\x73\xfe\x2f\xa1\xc7\x5e\x91\x9d\x3c\x79\xe6\xdf\
\x9f\x1e\x6c\xfc\xe5\xa1\x7f\x1d\xce\x45\xcb\x47\x2b\x42\x2b\x69\
\xfc\xf4\xb3\xad\xdd\x9d\x9d\xf5\x5d\x3d\xdd\x1e\xa4\x8d\x36\x31\
\x39\x0e\x69\xb6\x74\xec\x95\x4c\x89\x0c\x10\xdd\xe8\x5c\x8d\xd3\
\x8a\x81\x88\x67\x40\x07\x41\xab\x31\x3f\x4a\xaf\x24\x2b\xe7\xcf\
\x9d\x3b\x17\xad\xab\xab\x63\xb7\x8c\x1c\xff\x73\x7b\xaa\x4b\x4a\
\xed\x7b\x9a\xda\xd1\xd1\x3d\xd8\xdd\xdb\xb3\x19\x1b\x3d\xbf\xbb\
\xaf\x0f\x27\xac\x74\x16\x4d\x3d\x03\x34\x4b\x2b\xaa\x5e\x0f\x5a\
\x3c\x0b\xf3\x33\x90\x93\x93\x43\x0b\x9a\xa8\x28\x46\x0e\x3c\x48\
\xae\x14\x00\xc4\x23\xa0\x7e\x87\x29\x71\x93\xf5\x63\x6f\x67\xef\
\x50\xd7\xed\x8e\x5f\xdf\xee\xf8\x4a\xb1\x58\xac\x4c\xd3\x35\xa6\
\x56\x6a\x02\x04\xa3\x91\x6e\x49\x20\x68\x49\xe9\x70\x64\xc0\xf0\
\xd0\x90\xd6\xd7\x73\xf7\x4c\xfc\x19\x8f\xbf\x2b\x81\x20\xbe\xed\
\xf0\x3c\xa7\x93\x4c\x22\xbb\x75\xab\x7b\x10\x1b\xb7\xe6\x19\x5c\
\x1b\xd0\xf2\xd3\x6a\xb5\xc6\x28\xa4\xc6\x4c\xef\x73\xe6\x80\xc4\
\x40\xe4\xb9\x5c\xb8\x9e\xf0\x50\xbf\x74\xa9\xb9\xb9\x39\xa4\xd3\
\xe7\x89\x6c\xab\xe0\x60\xdf\xcb\x79\xdd\x3c\x5e\xdf\xc1\xce\xdb\
\x9d\xd1\x4c\x6c\x61\x68\x0f\x94\x64\x55\xaf\x03\x56\x0b\x04\x40\
\x8b\xb5\xe8\x54\x0f\x36\x04\x99\x9f\xef\x82\x81\x81\xc1\xb0\x24\
\xa9\x1f\xc7\x83\xf6\x64\xf7\x85\xe2\x83\x3e\xd4\xe1\x24\x13\xc9\
\x2e\x5f\xba\x7c\xf2\xea\xb5\xab\x5e\x3f\xae\x1f\x1c\x19\x4e\xc8\
\xcc\x74\x32\xde\xeb\x4a\x34\x57\x0b\x98\x01\x14\xb9\xe2\x92\x62\
\xf0\xcd\xfa\x60\xe0\x3f\x03\x5e\x9f\xc7\xf7\xc5\xff\x6c\x6b\x91\
\xd2\x4a\xd9\x40\x7b\xa8\xe3\xba\x61\xdf\x13\x08\xf8\xfc\xbf\x3f\
\xd1\xd4\x14\xb6\xd9\x6c\x40\x46\xfc\x56\x93\xe4\x54\xb7\x82\x02\
\x17\x2b\xf6\xb6\xb6\xf6\xb0\x22\xc9\xbb\x91\x4e\xe2\x0f\xb2\x37\
\x4a\x19\x41\x93\xd1\x92\x9d\x4f\x80\x39\x74\xa8\xf1\x8b\xd1\xe1\
\x91\x4b\xad\x6d\xad\x92\x13\xa9\x44\x3b\xd5\x79\xae\x3c\xb6\xb5\
\x4e\x9c\xa7\x2d\x94\xe2\xe2\x62\xb6\x2b\xdd\xd9\xd9\x2d\x4e\x8c\
\x4f\x5e\xf0\xfb\x43\x27\x9f\xca\xf7\x03\xf1\xcc\xe8\x80\xc8\x24\
\x96\x25\x95\xaf\x6b\x6e\xbe\xd2\xd9\xeb\x76\xab\x59\x99\xd9\x48\
\xa5\x2c\x46\x97\x65\xcb\x2a\xa0\x6c\x69\x19\xdb\x76\x74\xbb\xfb\
\x95\x8e\x5b\x1d\x5d\xb8\x06\xad\x5f\x70\x5f\x31\xe1\x1a\x21\x22\
\x40\x60\x7d\x53\xd3\xa9\x73\xc7\x8e\x1e\x0b\x87\xb0\xd1\xa3\x6d\
\x17\x41\x30\xa0\xe2\x78\xe1\xec\x99\xf3\xa1\xb6\xeb\xad\xe7\x05\
\x2e\xb8\x9e\xee\x5d\x90\xdf\x52\x06\x02\x06\x6f\x24\x18\xdd\xd8\
\x77\xb7\xef\xad\x4f\x0e\xfc\xf3\xca\x47\x7f\xfd\xc8\xbf\xff\x6f\
\xfb\xfd\x47\x3f\x3b\x76\x65\x68\x78\x70\x47\x24\x2c\x6d\xa4\x7b\
\x16\xfc\xf7\xc4\x8a\x08\x47\xa3\x11\x69\x9d\x14\x55\x33\xc8\x44\
\x3c\xa7\x6b\x3f\x7d\x53\xff\x13\x80\x05\xfc\xfa\x2f\x25\x47\x49\
\xfb\x85\x84\xe8\xf5\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\
\x82\
\x00\x00\x07\x82\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x30\x00\x00\x00\x30\x08\x06\x00\x00\x00\x57\x02\xf9\x87\
\x00\x00\x07\x49\x49\x44\x41\x54\x78\xda\xed\x58\x03\x98\xa3\x5b\
\x12\x7d\x76\x73\x6d\xdb\xb6\x6d\xdb\xb6\xbd\x63\xf7\xb6\x6d\xdb\
\xd8\x68\xc2\x9e\x49\xdb\x18\x65\xe2\x34\x63\x1b\x7f\x6d\xd5\xff\
\x25\xb3\x79\x7e\xaf\xa7\xbf\xcc\x22\xfd\x7d\xa7\x3b\xba\xf7\x9e\
\x53\x55\xb7\xea\x74\x6e\x02\x80\xff\x6a\xa4\x05\xa4\x05\xa4\x05\
\xa4\x05\x3c\x5e\xa4\x05\xa4\x05\xa4\x05\xe0\xcf\x2d\x88\x7b\x11\
\x39\x88\x0c\xc4\xad\xff\x6d\x02\x92\xc9\x67\x23\x72\x11\x77\xfd\
\x37\x09\x20\xc2\xf7\x89\x65\x82\x19\xa1\x84\xfb\xf3\xdc\xdc\xdc\
\xcc\x24\x41\x37\xef\x8b\x80\xa4\x34\xe7\x3e\x02\x12\x87\xde\x83\
\xb8\xe3\x11\xf6\xb9\x8b\x08\x23\xb2\x12\xeb\x9e\xfe\xf4\xa7\xdf\
\x23\x19\x17\xc2\xea\xda\x52\x70\xfc\xbc\x50\x56\x57\x57\xf7\x6c\
\xca\x46\x7c\xcf\xdb\xf7\x43\x40\x22\xcd\x74\xf0\xdd\x0f\x03\xfa\
\x4c\x66\x52\x19\x64\x25\x0e\xa7\x48\xd2\x67\x92\x84\x66\x25\x89\
\xbd\xbb\xb8\xb8\xf8\x6e\xd9\x79\x31\x30\x0c\x03\x3b\xbb\x5b\x31\
\xe9\x39\x91\x53\x28\xe1\x7d\x32\x9e\x8d\x5c\xda\xfb\x7a\x05\xb0\
\x69\xfe\x47\xc1\x69\x28\xaf\x2c\x79\x48\x14\x95\x14\xb8\x8b\x4b\
\x0a\xe4\x79\xf9\x27\x8f\x1d\x38\x7a\xe0\x9d\x49\x11\xce\x48\x44\
\xf3\x73\x9f\xfb\xdc\x93\x39\xfc\xb1\x1f\x9d\x15\xf3\xda\x45\x12\
\xbe\x46\x24\x15\xc4\x24\x32\x21\x10\xd6\xd6\x57\x22\x81\x60\x00\
\xdc\x1e\x17\xf8\x03\x7e\x98\x9c\x91\x87\x24\xe3\xa2\xda\x1f\xfc\
\xe0\x07\xc9\xa2\x6f\xde\xb3\x00\x4a\x33\x11\xf5\xe0\x01\x36\xbb\
\xe5\x01\xb0\x82\xc3\x69\x07\x8b\xc5\x04\x57\x95\x97\x63\x1d\x5d\
\xad\xfe\xc2\x92\xfc\x96\x6f\x7d\xeb\x5b\x4f\x8e\x93\xcf\x1c\x18\
\xee\xfd\xe4\x59\x11\xc7\xa8\x50\x5e\x09\xd8\x1d\x56\x08\x22\xd9\
\x6b\x08\x05\x81\xc8\x9b\xad\x46\x84\x09\x9c\x2e\x07\x44\x22\x61\
\x50\xa9\xaf\x46\x84\x62\x9e\xb6\xb7\xb7\xe3\xf5\x89\x0a\xd8\xb3\
\x80\x6f\x7c\xe3\x1b\x99\x28\x00\xc9\x9a\xe1\xb2\x72\xfd\x21\xa1\
\x50\x5f\x04\xad\x5e\x85\x62\x6c\xb0\xb0\x34\x1f\x2d\x2d\x2f\xda\
\xad\xaf\xaf\x7a\x13\x57\x30\x3a\x3c\x39\x7d\x3e\xe0\xf1\x7a\xc0\
\xe6\xb0\xc0\xe6\xb6\x1e\x74\x1b\x6a\x30\x6c\xe9\x60\x6b\xc7\x00\
\x3b\xc6\x2d\x30\x9a\x77\x58\xf2\x14\x10\x5a\x4f\x99\xa0\x92\x52\
\xaa\x15\x0c\x66\x4b\x79\xea\xd4\xa9\xa7\x65\x65\x65\xe5\xec\x59\
\xc0\x8f\x7f\xfc\xe3\x2c\x12\xe0\x45\x12\x18\xc1\x38\x6c\xd7\x60\
\xb5\x99\x90\xc8\x26\xa8\xb5\x0a\xb8\xa2\xbc\x08\xbb\xa6\x6d\xd0\
\x68\x55\xd0\xd6\xde\xe4\x5d\xbf\xb8\x1a\xf2\xf9\xbd\x60\xd8\xd4\
\x80\x46\xaf\x64\xc9\xba\x3c\x4e\x36\xfa\x21\x8c\x7e\x28\x1c\xa2\
\x2c\xd0\x3e\x2c\x79\x3a\x23\x12\x89\x80\x52\xa5\x88\x88\xa5\x02\
\x8b\xc9\x62\xca\x77\xb9\x5c\x5f\x2b\x28\x28\x78\xf2\x9e\x05\x7c\
\xe9\x4b\x5f\xca\x7d\xa4\x3b\xd0\xd6\xd1\x12\x95\x9d\x93\x86\xad\
\x36\x2b\x4b\xfe\x92\x62\x0d\xe4\x13\xe3\x20\x3b\x27\x06\x9f\xdf\
\x07\x6a\x9d\x02\x36\x30\xe2\xfe\x80\x0f\x4c\x16\x23\xb3\xbc\xba\
\x10\x16\x88\x38\x0c\x87\x3f\x02\x08\xcc\xd8\x5c\x34\x1c\x0e\xb3\
\xa5\x44\xc0\x4b\x1d\x9e\x9d\x9f\x9a\x41\x21\xc7\x01\xe0\x08\xbe\
\xf7\x9d\xc5\xc5\xc5\xdb\xf7\xdc\x85\x3e\xf0\x81\x0f\x3c\x65\x67\
\x67\xe7\xc3\xb1\x58\xec\xef\xb4\xe1\x03\x81\x51\x2a\x9c\x9c\x9c\
\xe8\xab\xaa\x29\x73\xab\x35\xaa\xd8\xda\xfa\x2a\xfc\x93\x3b\x0c\
\x3e\x9f\x17\xf4\x18\x79\xc3\xa6\x96\x25\x36\x35\x2b\x0f\xe3\xe5\
\xd5\xe9\xf4\xba\xa6\x50\x28\x74\x32\xb1\x7e\x8c\x3b\x04\xf4\xb3\
\xbd\xb3\x15\xe3\xf0\x47\x7d\x5a\xad\xb2\x95\x5e\xc7\xf3\x8e\xa0\
\x88\x0f\x00\xc0\x2d\xd7\x35\x07\xde\xf1\x8e\x77\x64\x5c\xba\x74\
\xe9\x5d\xb8\xe1\x1f\x49\xc4\x43\x01\x00\x0e\x78\x3c\x9e\x53\x55\
\x35\xe5\xbe\xc9\x29\x39\xac\xac\x2d\xb2\x77\x46\x6b\x50\x41\x80\
\x3a\xcb\xf4\xf9\xf0\xc2\xe2\xac\x30\x49\xf8\x21\x5c\x77\x00\xff\
\xfe\x7d\x68\xa4\x0f\x66\xe7\xa6\xc2\x62\x99\x50\xe5\xf5\x7a\xf3\
\xe2\xe4\x7f\x17\x08\x04\x9e\x93\x92\x49\xac\x50\x28\x32\x30\x43\
\xef\xf2\xf9\x7c\x3f\x13\x49\x84\xc3\xbd\x7d\x5d\x31\xec\xe9\xb0\
\xbd\xbb\x81\x35\xbf\x0d\x9b\x5b\x06\x10\x49\xcf\x5e\x8c\x13\xff\
\x1b\x46\xf5\x6d\xf8\xf7\x76\xa3\xd1\xf8\x49\xb3\xd9\xfc\x31\x0e\
\x6f\x54\xbd\xb8\xbc\x30\x9a\x24\xee\x8b\x3a\x9d\xee\xae\x94\x5a\
\x89\x8f\x7d\xec\x63\x77\x9e\x39\x73\xe6\xf9\x33\x33\x53\xbf\xee\
\xec\x6e\x63\xbc\xf1\xf2\x71\xba\x1d\x18\x7d\x39\xa3\x54\x5f\xa5\
\xb2\x38\xec\xf7\xfb\xdf\x9a\x58\xf3\x89\x4f\x7c\x22\xe7\xa7\x3f\
\xfd\xe9\x33\x6c\x36\xdb\x47\xe2\x59\xfc\x3b\xd6\xfb\x6b\x6f\x88\
\x17\xa2\x41\x93\x91\x91\xf1\x84\x41\xce\xe0\x4b\xfb\x06\xba\xd9\
\x81\x44\xad\x92\xba\x4d\xdf\x40\x17\x20\xc9\x7c\x24\xf9\x27\x3e\
\x9f\x7f\x67\xd2\x9a\xdb\x5e\xf9\xca\x57\x3e\xe5\xca\x95\x2b\xef\
\xc1\xac\xfc\xc1\xe9\x74\x3e\xff\x86\xb9\x51\xf2\x40\x88\x9c\xa6\
\xb6\xa6\xb7\x89\xa5\x42\xc6\xe3\x75\xb3\x7d\x9e\x5a\x64\x7b\x57\
\x0b\x33\x3f\x3f\xdb\xe2\x76\xbb\xbf\x4b\x2d\x39\x69\x4d\x46\x66\
\x66\x66\x6e\x51\x51\xde\xdb\xf1\x8e\xbd\xa5\xb1\xb1\xf1\x85\x6f\
\x7b\xdb\xdb\xee\x4e\xb9\x00\xf2\xf1\x09\x2f\xd4\xdc\xda\xf0\x33\
\xb4\x06\x31\xa7\xcb\x4e\x2d\x93\x9d\xd2\x7d\xfd\xdd\x4c\x6b\x7b\
\xf3\x45\xab\xd5\xfa\xf9\x4f\x7f\xfa\xd3\x4f\x89\x7b\xa3\x9b\x13\
\x5e\x07\x5b\x34\x4d\xef\x5f\xbd\xe1\x0d\x6f\x48\x58\x90\x94\xbb\
\x51\x42\x36\xd9\x87\xf6\xce\x66\xab\xdf\x4f\xfd\x7e\x97\xb5\x05\
\x1a\x8d\x0a\x04\x67\x79\xd8\x56\x47\x23\x03\x43\x7d\x3d\x7f\xfa\
\xd3\x9f\x9e\x4a\x99\x4a\x76\xa3\x25\x65\x45\xc0\xe3\x8f\x05\x71\
\x7a\xcb\x8e\x1e\xfd\xcb\xb3\xe9\xf5\x94\xba\xd1\x03\x07\x0e\x3c\
\xa7\xb2\xa6\xec\xcb\x18\xfd\xed\x8d\x4d\x43\x84\xac\x00\x09\x08\
\xe3\x94\xa5\xfb\x30\x35\x2d\x67\x27\xec\x18\x67\x24\x82\x83\x6f\
\xa1\xa0\xe0\xcc\x07\x71\xb6\x3c\x81\xd6\x1e\x3d\x7a\xf4\x9e\x8a\
\xaa\x52\xc0\xfb\x81\xfe\x47\x19\x2b\x2b\x2f\x76\x16\x14\xe7\x7d\
\x6a\xdf\xdd\x68\x5d\x43\x95\xb1\xb9\xa5\x01\x1e\x02\x48\xb2\x27\
\x34\x39\x3d\xe1\x23\x92\x54\xfb\x64\x17\x68\x02\x2f\xaf\x2e\x01\
\x97\xc7\x41\x8b\xa1\x64\x5b\x2a\x7b\x2f\xb6\x37\x19\xa1\x58\x10\
\xac\xad\xaf\x8e\x11\x71\xc2\xe8\xd8\x10\x1a\x45\x37\x6b\xe8\x5c\
\x2e\x27\xb4\x77\xb6\x86\x50\x68\x2d\x39\x80\x7d\x73\xa3\x8d\xcd\
\xb5\x10\x8d\x45\x81\x7c\x0d\x91\x23\x5b\x40\x03\x2a\x10\xf4\xb3\
\xaf\xb9\xdc\x4e\xd6\x90\x25\xc8\xdb\xed\x36\x68\x68\xac\x09\x08\
\x84\x82\x28\x91\xdb\x35\x6e\xa2\x9d\xd0\xd3\xfb\xe4\x79\x12\xfb\
\xb0\x20\x61\x34\xad\xc9\x0c\x92\xe5\xa0\xf7\xe6\x16\x66\x23\xc5\
\xa5\x05\xda\x53\xf9\xa7\xf6\xc7\x8d\xd6\x35\x56\xb3\x1b\x2b\xd4\
\x97\x40\xa5\xbd\x42\xc6\x2c\xd9\x55\xb2\xc4\xec\x48\x8c\xcc\xd9\
\x85\xcb\xeb\xd1\xa6\x96\x3a\x17\x5a\x86\x6a\x8c\xe6\xe5\xfe\xc1\
\xde\x08\x45\x96\x88\xef\xee\x6e\xb1\x24\x37\x36\x75\x2c\x69\xfd\
\x86\x86\x85\xce\xa0\x66\x41\x22\x68\x7a\x53\xf9\x2d\x2c\xce\x31\
\x85\x45\x79\xae\xbc\xbc\xbc\xd7\x5c\xb7\x1b\xad\xa9\xab\x64\xeb\
\x34\x39\xf2\x09\x3f\x8f\xbe\x86\x2d\x01\xbd\x41\xc7\x60\xc7\x09\
\x0f\x8f\x0c\x2e\xa1\x0d\x38\x99\xb0\x0c\xf3\x0b\xf3\x1d\x58\x2a\
\x9e\xf9\xc5\xb9\x88\xd9\x6c\x44\x8f\x44\x19\x44\xf8\x59\x50\x06\
\x48\x14\x09\x20\xbb\xcd\x66\x65\x7e\x61\x36\x5a\x5d\x5b\x6e\xd1\
\xea\xb5\xfb\xe3\x46\x2b\xaa\xca\xec\xe8\x73\xe0\x61\x10\xec\xe8\
\x6a\xd3\x73\xf9\x9c\x71\x85\xe2\x72\x43\x9c\xf8\xe1\x68\x34\xfa\
\x79\xf4\x47\x5f\xa6\x01\x86\xa4\x8f\x4b\x65\xe2\xb1\xee\x9e\x8e\
\xd5\xea\x9a\x0a\x3b\x0a\x62\xfe\x7d\x07\x86\xc1\xeb\x73\xb3\xb6\
\x9c\x1a\x00\xee\x15\x19\x1a\xee\x4f\x99\x1b\x7d\x20\x88\xf8\x8f\
\xf0\xef\x33\x13\x13\x5a\x2e\x97\x3f\x0d\x87\xd8\x77\x90\xd0\xdf\
\xe8\xfd\x07\xae\x21\x11\x0c\xc3\xd0\xff\x10\xb1\x8a\xca\x32\xff\
\xf2\xca\x62\x6a\xdd\x28\x00\xfc\x06\x49\x7f\x0d\x0f\x7b\x5f\x30\
\x18\x7c\x11\x00\x3c\xa8\x63\xf4\xf7\xf7\xdf\xaa\x56\xab\x9f\x85\
\xa5\xf5\x3e\xfc\xec\x77\x28\x2b\x09\xff\x43\x73\x80\x2f\xe0\x46\
\x5a\xdb\x9b\x54\x4e\xa7\x7d\xcf\x6e\x34\xe5\x50\x2a\x95\x1f\x44\
\x37\xfa\xd1\xfc\xc2\x33\x21\xb1\x44\x78\xcd\x8d\xa2\xc0\x2f\xed\
\xc5\x8d\xa6\x1c\xef\x7d\xef\x7b\xb3\x7f\xf8\xc3\x1f\x3e\x73\x72\
\x52\xfe\xc3\x24\x37\xfa\xba\xff\xa6\x6f\xe6\x6e\x7e\xed\x6b\x5f\
\x9b\x8d\x99\x78\x37\x46\xfd\x0b\x00\xf0\x84\xff\xcb\x6f\xa7\xd3\
\x02\xd2\x02\xd2\x02\xd2\x02\xd2\x02\xd2\x02\xfe\x05\x1f\xeb\x8f\
\x04\xe7\x41\x85\x61\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\
\x82\
\x00\x00\x09\xce\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x30\x00\x00\x00\x30\x08\x06\x00\x00\x00\x57\x02\xf9\x87\
\x00\x00\x09\x95\x49\x44\x41\x54\x78\xda\xed\x98\x03\x90\x23\xdd\
\xd7\x87\xd7\xf6\x7e\x6b\xdb\xb6\x6d\xdb\xb6\xed\xad\xaf\x16\x63\
\xae\x6d\xe3\x5d\x5b\x19\xcf\xee\x30\xc9\xd8\xf6\x04\x63\x05\xe7\
\x7f\x4e\x57\xa6\x77\x7b\x3a\xc9\xe4\x35\xa7\xea\xa9\x9b\x9b\x49\
\x3a\xcf\xef\xf4\xb9\xdd\x37\x29\x31\x6e\xdc\xb8\xbf\x35\xff\x05\
\xf8\x23\xf8\x2f\xc0\xa3\x47\x8f\x4a\xff\x31\xf3\x9f\x8f\x01\xf2\
\x0f\x76\x3e\x7a\xfc\x50\x7e\xff\xd1\xfd\x79\xbf\xf7\xfc\x37\x0f\
\x00\x00\xa5\xc4\x3e\xe2\xec\xf0\xc8\x30\xf8\xe6\xe6\x1a\xfe\x7b\
\xcf\x7f\xb3\x00\xc6\x96\xc6\x9d\x6d\x4e\x5a\xb9\x58\x59\x9b\x2b\
\x7c\x7c\xc5\x90\x97\x97\x0b\x6f\xde\xbe\x82\xdf\x7b\x6e\x61\x65\
\x9a\x6f\x73\xd2\xf2\x83\x95\x95\x51\x33\x6d\x5e\x89\x3d\x5b\x6c\
\x96\xf4\xe8\xfa\x59\xde\xa3\x87\x87\xb4\x43\x87\x55\xd0\xab\x57\
\x59\xad\x01\x6c\x4f\x58\x8a\x93\x53\x92\xf1\xc0\x79\x70\xe5\xea\
\x25\x08\x8b\x0c\xa1\xf1\x77\x9f\xd3\x5f\x44\x64\x38\xd8\x9c\xb0\
\x7c\x5a\xd4\x29\x76\x60\xd3\x23\xa9\x3d\xba\x40\xd4\x86\x0d\x20\
\x39\x78\x10\xd2\x86\x0f\xcf\x95\x75\xeb\x76\x9e\x27\x6f\x62\x62\
\x52\xfb\xfc\x85\x33\x05\x6a\xb5\x1a\x64\x32\x09\x1c\x39\xf6\xff\
\xf0\xfa\xfd\x0b\xb0\xb1\xb5\x82\xec\xec\xac\x5f\x39\xcf\xd6\x3b\
\xcf\xcd\xcd\x01\xfa\x5c\x4b\x2b\xb3\x3c\xce\x3a\xdc\xb9\xea\xc3\
\xcb\x75\xab\xe1\xcd\xd6\xcd\xe0\x67\x62\x02\xb1\x67\xce\x40\x92\
\x8d\x0d\xc8\x3a\x77\xce\xe5\x05\x30\x37\x3f\xde\xee\xee\xfd\xdb\
\xd9\x4a\xa5\x12\xd2\xd2\x64\xf0\xec\xf9\x33\x58\xb9\x7a\x39\x38\
\x38\xda\xd3\x07\xe8\x9d\x53\x2b\xfc\x8a\x39\x1b\xc0\xf6\xa4\x95\
\xea\xc8\x91\x23\xe5\xc8\xe7\xe2\xb1\x1d\x3f\x9d\x3f\x65\xcd\xfc\
\xff\xc6\x85\x33\x70\xfd\xe8\x61\x10\x19\x19\x81\xbf\xa9\x29\x06\
\xe8\xa4\xe4\xc8\xe3\x5f\xc9\x63\x26\xc7\x06\xbd\x7e\xf3\x32\x4b\
\xa1\x28\xa0\x33\xc0\x54\x31\x3f\x3f\x9f\x41\x2e\x97\x72\xe6\x14\
\x30\x27\x27\x9b\x79\x5c\x50\xc0\x9f\xa7\xa7\xcb\x51\xea\xe7\xcd\
\x55\x2a\x15\x5c\xbe\x7a\x29\x17\x03\x34\xbc\x60\xb4\xf5\x9a\x8d\
\x8d\x19\x44\x47\x47\x03\x15\xd4\xcf\xcf\x0f\x4e\x5b\x9b\xc3\xbd\
\xfd\x7b\xc0\x6f\xde\x5c\x48\xea\xde\x3e\xaf\x68\x80\xd2\x87\x0f\
\x1f\x98\x6d\xef\x60\x97\x43\x07\xc4\x00\x3c\x28\x04\x17\x19\x89\
\xeb\x40\xce\x81\x04\x8b\xc0\x7b\x0d\x89\xde\x7f\x78\x27\xfb\xe2\
\xae\x69\xc6\x97\x8c\x37\x2b\x2f\x9d\x31\x86\xf3\xe7\x6c\xc1\xc9\
\xc9\x11\x6e\xde\xbc\x09\x07\x0f\x1e\x00\x2b\xb3\xe3\x90\xdc\xad\
\xbd\x3a\x70\x62\xb5\x85\x3f\xca\x97\x42\x2a\x6e\xdb\xb1\x65\x97\
\xbb\xc7\xb7\x02\x5a\x60\xdf\xc5\x0d\x91\x26\x21\x7d\xa4\xe9\x41\
\xce\xa2\x50\x28\xe0\xdd\x35\xa3\x7c\xaf\xe5\x65\x73\x33\x9f\xce\
\x03\x95\xdd\x6a\x08\xbe\x33\x1b\xac\xad\x8c\x18\xf9\x8b\x17\x2f\
\x60\x80\x63\x10\x3a\xa1\xda\x22\xf2\xe6\x54\x1f\xa9\xb5\x66\xed\
\xaa\xd3\x7e\xfe\xbe\x6a\xea\x47\x92\x77\xfd\xea\x0c\x02\xbb\xcf\
\x1c\xbc\xbc\x3d\x98\x10\x5f\xbf\xb9\x80\xb7\xd0\x83\x02\x68\x1e\
\x7b\xe2\xe8\x0a\x76\xf6\x5f\x58\xe8\xb9\x8c\x0c\x46\x92\xae\xf5\
\x34\x67\x1f\x6b\x7b\x5d\x5a\x80\x1d\x08\xd7\x54\x83\x42\x79\xc5\
\xb3\x61\x90\x71\xa5\x07\xbc\x38\xbd\x05\x6c\x6d\x2d\xc1\xda\xe2\
\x38\x3c\xdb\x3c\xd5\x9c\xf5\x2e\xec\x7d\xa4\x2c\xd2\x68\xf5\xda\
\x55\x4f\x22\x22\x23\xa8\x97\x75\x54\x5b\x7b\x65\xe9\xc3\xbf\x93\
\xce\x23\x33\x53\x27\xec\x6b\x92\x85\xef\xc1\x67\x7d\xcd\xef\xf2\
\x4f\x87\x32\xf2\x92\x1b\x93\xc1\xce\x7a\x31\x58\x98\x1b\xc1\xed\
\xfd\x33\xf6\x91\xb3\xb6\x00\xe5\x91\x96\x5b\xb6\x6e\x72\x4a\x4c\
\x4c\xa0\xc5\xca\x9c\x01\x27\x67\x7b\xaa\x38\x23\xee\xe6\xfe\x95\
\xad\x98\x50\xe8\xc5\xc8\x62\xbb\xe1\x63\x6f\x46\x80\x1e\xdb\x3b\
\x08\x18\x44\x22\x6f\x94\xcb\x30\x98\x14\xd1\x07\x10\xaf\xab\x81\
\xf2\xf3\x41\x25\x58\xc5\x91\x8f\xb1\x1d\x09\x9f\x36\xb5\xcf\xe3\
\xca\xf3\x03\x54\x44\xda\xed\xdc\xb5\xdd\x4f\x2a\xa5\xab\x4d\x26\
\xaf\xe2\x9c\x4a\xf3\x2b\xcb\x11\xca\xca\xfa\x91\x4c\xbd\xa4\x8a\
\x3f\xea\x90\x9f\xc4\xc8\x0b\x97\x95\xce\xf6\x5a\x52\x62\x24\xb9\
\xea\x0b\x50\x19\xe9\xbc\x6f\xff\x9e\x68\xcd\x81\x0d\x14\x27\x61\
\xfd\xb2\x54\x0c\x5d\x70\xe5\x57\x72\xe5\x4f\x8c\x62\xe4\xdd\x17\
\x95\x18\xa5\x67\x2f\xc4\x5e\x81\xaa\x20\x3d\xf6\x1f\xdc\x27\xa1\
\x2b\x10\x49\x25\x25\xc5\xff\xb0\xd0\x04\x04\xb5\x07\x8b\x50\xe4\
\xc5\x4a\x7b\x78\xba\xe3\x73\x76\x78\xc3\xb1\x03\xb1\x58\x08\x9e\
\x5e\xee\x38\x8a\xa8\x15\xe9\x31\x3d\xcf\x9b\x3b\x3f\x3a\x45\x0b\
\x56\xa7\xbc\xd7\xd2\xd2\xb9\x1f\x66\x97\x98\x40\x05\x36\x24\x40\
\x35\xa4\xef\xbe\x83\x7b\xe9\x2e\x8c\x55\xce\x60\xab\xae\xab\xe2\
\xdc\x0a\x67\xf1\xa0\x0b\x81\x2e\x24\x3e\x9f\x80\x59\xb0\xcf\x16\
\x70\xe5\xaf\x4f\x64\xe4\x3d\x97\x95\x57\x9b\x0c\x29\xb1\x86\x0a\
\x4b\x7e\x86\x04\xa8\x81\x0c\xdc\xbd\x77\xb7\x12\x40\xcd\x88\xc7\
\xc4\x44\x62\xd5\xd9\x6a\x93\x38\x56\xda\x8d\x2a\x4c\xe2\x85\x95\
\x64\xab\x4b\x73\x1f\x1f\x51\xa1\x24\xce\x3d\x68\x4e\x5b\x04\x0e\
\x52\xdf\xcf\x7a\xe5\xc5\x6b\xaa\x82\xc5\x96\x19\xe9\xe8\xd3\x1b\
\xa9\x6a\x48\x80\xd2\x48\xcd\x0a\x15\x2a\x8c\xda\x7f\x68\xbf\x1a\
\xff\x8a\xf4\x3a\xbf\xbf\xf5\x54\x9a\x27\x6c\xb8\xfc\x68\x46\x3e\
\xcd\xf7\x13\x6c\xdb\xb5\x3d\x0b\x9d\x06\x20\xd5\x0d\x0d\x50\xbb\
\x69\xd3\xa6\x73\x8c\x4d\x8c\x94\x6a\xb5\x4a\xa7\x3c\xb5\x8b\x61\
\xe2\xb9\xb4\x59\xe3\x20\xf3\xfb\xf2\x5d\xfe\xcb\x0a\x8d\x7c\x77\
\x94\x9f\xc0\xca\x4b\x84\x6f\x81\x0a\xb8\x65\xfb\xd6\x02\x74\x1a\
\x42\x9d\x61\x68\x80\xff\xeb\xd4\xa5\xcb\xda\x93\xa7\x4e\x29\x54\
\x2a\x25\x23\x1f\x1e\x1e\x0a\xcc\x35\x5d\xec\xcd\xb6\x8c\xa6\x55\
\x68\x77\x48\xb0\x2d\x42\xf7\x0a\x5f\xcd\x97\x13\x2e\x79\xb8\x59\
\xcb\x43\x79\x01\xf8\x6e\xa8\xa5\x57\x5e\x2a\x7a\xc7\xbc\x07\x00\
\x30\xc0\x16\x28\x53\xa6\xcc\x28\xda\x1d\x90\x9f\x21\x01\xea\xf6\
\xe9\xd7\x67\xff\x8d\x9b\x37\x14\x4a\xa5\x82\x5f\x79\xb6\xea\x04\
\xbf\xe2\xda\xa4\x09\xbe\xfc\x72\xad\xf2\x32\xf1\x7b\xf6\xf5\x74\
\x06\x0e\x1c\x3a\xa8\xaa\x53\xa7\xce\x6c\x2a\xac\x21\x01\xca\x20\
\x0d\x06\x0f\x1d\x6c\xf6\xec\xc5\x33\xa5\x42\xc1\x04\xd0\xdb\xef\
\x5c\x79\xbe\x38\xbb\xe5\xf6\x79\x03\x7e\x9b\xeb\x6a\x91\x1f\x0f\
\x31\x27\xc7\x80\x78\x6d\x35\x94\xff\x40\xdb\x69\xfa\x82\x43\x9f\
\x8b\x8f\x0b\xc0\xd4\xdc\x4c\xd1\xba\x6d\xeb\xd5\xe8\x55\xc7\x90\
\x00\x65\x91\xc6\xc3\x47\x8d\xb8\x20\xb0\x17\xa8\xe8\x00\x54\x7d\
\x1d\x3d\xaf\xad\xcf\x49\x9a\x23\x4f\x42\x44\xc2\xd3\xd5\x90\xe9\
\x68\x4c\x0b\x56\xab\x7c\x82\xdb\x73\x48\x48\x48\x80\xd8\xd8\x58\
\x1c\xe3\x41\x2a\x95\x30\x85\x38\x77\xe1\x9c\xa2\x6b\xd7\xae\x7b\
\xd0\xab\x1e\x15\x58\x67\x00\xcd\x5d\xb8\x1c\xd2\x7c\xcc\xd8\x31\
\xf7\xbd\x85\xde\x6a\xfa\x60\x6e\xf5\xf5\x57\x5e\x9b\x38\x15\x21\
\x3b\x5e\x04\x49\xaf\x36\x02\xfd\xa9\x33\x63\x20\xf3\xfe\x78\x56\
\x5e\x84\x6d\xe3\xf5\xfc\x02\xae\x21\x1f\x08\x0c\x0c\x84\x88\x88\
\x70\x88\x8f\x8f\x87\xd4\xd4\x54\xa6\x30\xf7\x1e\xdc\x57\xf4\xea\
\xd3\xcb\x04\xbd\x1a\x1a\x12\xa0\x02\xd2\x7a\xc2\xe4\x49\xef\x43\
\x42\x43\x48\x86\x39\x95\xb4\x39\x13\x89\x85\x9a\xbb\xa7\x07\xb3\
\x68\x1d\x9d\xec\x39\x8b\xd5\x5b\xe8\x85\x1b\x3e\x07\xc4\x91\xc1\
\xdf\xdf\x8f\xe4\x99\x85\x1f\xf6\x72\x1f\xc8\xdd\x2e\x81\x22\x33\
\x11\x72\x62\x9c\x40\x6e\x7f\x8c\x91\xf7\x5a\x59\x09\x9e\x9d\x3a\
\x08\xae\xae\xae\xf0\xf0\xd1\x03\xb8\x77\xff\x1e\xfc\xf4\xe4\x31\
\x73\xcf\x49\x4a\x4a\xc4\xc2\x65\xc1\xcb\x57\xaf\x94\xbd\xfb\xf7\
\x3f\x8f\x5e\x2d\x34\x7e\xa5\xc9\x55\x57\x80\x4a\x48\x87\x99\x73\
\x66\xbb\xc5\xc6\xc5\x92\x58\x31\xd5\xe7\xf5\x3c\xa7\xf2\xf4\x75\
\x54\x9a\x92\x00\x81\x8f\xd7\x42\x76\x94\x1d\xa4\x8b\x6f\x40\xca\
\xfb\x9d\x10\xf7\x64\x05\xf8\xef\x6f\x05\x5f\x6e\x98\x81\x40\x20\
\x00\x67\x67\x27\x70\x73\x73\x03\xa1\x50\x08\x21\x21\xc1\x40\xbb\
\x60\x89\x44\xc2\x1c\xff\x8b\x40\xa0\xee\x3f\x78\xe0\x7d\xda\x60\
\x22\x55\x35\x5d\x52\xa6\x68\x90\xc2\xbb\x70\x65\xa4\xeb\xbc\x45\
\x0b\x83\xe8\xe7\x14\x92\xd4\x17\x80\xdf\x3a\x5c\x79\x1a\xad\x8e\
\x6c\x85\xca\x15\xcb\x82\xef\xcd\xa5\x90\xf0\x7a\x23\x84\x3e\x58\
\x07\x1e\xb7\x76\xc0\xe7\xc7\x17\xe1\xfd\xfb\x77\xe0\xe2\xe2\x8c\
\x67\xcb\x1f\x62\x62\x62\xb0\x6d\x52\x80\x76\xc0\x32\x99\x8c\xc0\
\xcf\xca\xc1\xb3\xef\x0e\x83\x86\x0e\x7e\x8b\x5e\x5d\x34\x37\xb3\
\xf2\x6c\x00\x74\x2e\x1a\x80\x12\xf6\x5a\xb8\x74\x71\xbc\x5c\x2e\
\x67\x64\x69\x17\xea\xec\xe2\xa8\xd9\x2a\xd8\x83\xd8\x47\xc8\x5e\
\xef\x1d\x9d\x1c\x08\x6a\x1d\x02\x45\x7c\x29\x00\x05\xa1\x6f\x56\
\xf8\x3e\x27\x18\x3f\x7a\x28\xac\x98\x33\x0a\x5e\xdc\x3b\x07\xee\
\x6e\xdf\x98\x2f\xe4\x51\x51\x51\xd8\x22\x49\x78\xec\x74\xba\xe2\
\x50\xab\x60\xab\x66\x6a\xa0\xbd\x17\x91\xce\x04\xf0\xc5\xd7\x8f\
\x1e\x37\xd6\x15\xbd\xba\x69\x02\x94\xe5\xca\x73\x03\x54\x43\xfa\
\x2d\x5a\xba\x44\x4e\x07\xc3\x83\x23\x86\x9f\x01\x6e\xfb\x10\x0a\
\x50\x2a\x09\x25\xfd\xca\x40\x23\x41\xcf\xb3\x23\xbd\x16\x03\xd3\
\xfb\xf1\x58\x44\x2e\x1e\x9b\xc8\x61\x9e\x0f\x0b\x0f\x87\xc9\xd3\
\xa7\xf9\xd2\x0e\x59\xdf\x7e\xe8\xc7\x8d\xdc\xe0\xf9\x8b\x16\xe5\
\xd2\x01\x48\xfa\xb7\x0b\x40\xa8\x40\xad\x26\xd4\x3c\xf0\x7f\x1a\
\x98\x90\xec\xfb\xe2\xf0\x8a\x34\x63\xee\x9c\xc8\xe2\x36\x74\xec\
\x97\x79\x64\xc4\xec\xf9\xf3\x94\x24\x41\xfd\xff\x2b\xd6\x00\x27\
\x00\xc1\x4a\x72\x82\xa8\x38\xf2\x45\xcf\x12\x5d\x4e\x67\xcd\x9b\
\x9b\x82\x5e\xfd\xf5\x6d\xe8\xd8\x8d\x5c\xf9\xf2\xe5\x27\xcd\x5f\
\xbc\x48\x4d\x07\x20\x79\x83\xaf\x42\x9c\x00\xfa\x42\x70\x83\xf0\
\xe5\x09\x05\xf3\x3e\x7a\x7f\x5a\x5a\x1a\xcc\x98\x33\x27\x93\xb3\
\xa1\xd3\x13\xa0\x4e\xf5\xea\xd5\xe7\xae\x5e\xbf\x4e\x29\x95\x4b\
\xc0\x37\x50\xc8\xc3\x2f\x48\x84\x88\xc1\x3f\x58\x0c\x01\x44\x08\
\x8d\x3e\x38\xfa\x42\x20\x11\xea\x07\x41\x44\x98\x3f\x04\x13\xe1\
\x01\x10\x42\x44\x04\x42\x68\x44\x10\x84\x46\x06\x41\x58\x64\x30\
\x84\x47\x11\x21\x10\x11\x1d\xca\x10\x19\x1d\x06\x91\x31\x61\x10\
\x15\x13\x0e\x51\xb1\xe1\x10\x8d\xc4\xc4\x45\x81\x0c\x3d\xa6\xce\
\x9a\x95\x8b\x6e\x23\xf5\x6d\xe8\x0a\xf7\x41\x75\xab\x55\xab\xb6\
\x60\xe5\x9a\x35\xca\x54\x69\x8a\xce\x00\xfe\x6c\x00\x1a\x0d\x09\
\x10\x68\x78\x00\x22\x96\x46\x0c\x11\x17\x09\x52\x79\x2a\x2e\xe2\
\xe9\xf9\xe8\x36\x86\x3a\xa4\xd8\x00\x78\x06\xe6\x2c\x5d\xb5\x52\
\x29\xc1\x00\xa9\xd2\x64\x60\x90\x24\x43\x0a\x42\x23\xcd\xb5\x93\
\x02\x12\x84\x19\x09\x19\x91\xca\x41\x4a\xc8\x09\x09\xc8\x70\x94\
\xd1\x98\x26\xa5\x11\x91\xd2\x63\x16\xb9\x66\xa4\xf7\x8d\x9f\x32\
\xb5\x80\xd6\x66\x71\x67\xa0\x34\xf5\x58\xad\x3a\x75\xc6\xf7\x1b\
\x38\x10\xfe\x4a\xf4\x1f\x34\x48\x8d\x6e\xdd\x91\x6a\x7a\xd6\x00\
\xfb\xa3\x56\x03\xa4\x17\x42\xcf\x4e\x43\x66\xfc\x49\x4c\x47\xa6\
\x22\xa3\x35\x37\xb1\xba\x48\x39\xf2\x2c\xee\x4b\x3d\xbd\xa8\x0a\
\x52\x93\x7a\xee\x2f\x40\x0d\xcd\x16\xa7\x6c\x71\x3f\xab\xfc\x8d\
\xf9\x2f\xc0\x7f\x01\xfe\x0b\xf0\x3f\xe9\x65\x26\x7d\x57\x89\xd5\
\x05\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x03\x14\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x30\x00\x00\x00\x30\x08\x03\x00\x00\x00\x60\xdc\x09\xb5\
\x00\x00\x01\x29\x50\x4c\x54\x45\xff\xff\xff\x00\x00\x00\x24\x24\
\x24\x00\x00\x00\x00\x00\x00\x2e\x2e\x2e\x3b\x3b\x3b\x00\x00\x00\
\x1e\x1e\x1e\x00\x00\x00\x2b\x2b\x2b\x00\x00\x00\x24\x24\x24\x2e\
\x2e\x2e\xd8\xd8\xd8\xd9\xd9\xd9\xbf\xbf\xbf\xf9\xf9\xf9\xd9\xd9\
\xd9\xbc\xbc\xbc\xbe\xbe\xbe\xe0\xe0\xe0\xde\xde\xde\xe6\xe6\xe6\
\xdf\xdf\xdf\xe0\xe0\xe0\xe0\xe0\xe0\xe1\xe1\xe1\xff\xff\xff\xfd\
\xfd\xfd\xff\xff\xff\xff\xff\xff\xff\xff\xff\xbb\xbe\xb7\xbc\xbf\
\xb8\xbc\xbf\xb9\xbe\xc0\xb9\x98\x9a\x96\x9a\x9b\x97\xa3\xa4\xa0\
\x89\x8b\x86\x8c\x8e\x88\x8e\x90\x8b\x90\x92\x8d\x92\x95\x8f\x95\
\x97\x91\x97\x99\x94\x99\x9c\x96\x9c\x9e\x98\x9e\xa0\x9b\xa0\xa3\
\x9d\xa3\xa5\x9f\xa5\xa7\xa1\xa7\xaa\xa4\xaa\xac\xa6\xac\xaf\xa8\
\xae\xb1\xaa\xb1\xb3\xad\xb3\xb6\xaf\xb5\xb8\xb1\xb7\xba\xb4\xba\
\xbd\xb6\xd4\xd8\xd0\xd4\xd8\xd1\xd6\xda\xd2\xd7\xda\xd3\xd8\xdc\
\xd5\xda\xdd\xd6\xdb\xde\xd7\xdc\xdf\xd9\xdd\xe0\xda\xdf\xe1\xdb\
\xdf\xe2\xdc\xe1\xe3\xde\xe1\xe4\xdf\xe4\xe5\xe1\xe4\xe6\xe1\xe6\
\xe7\xe4\xe6\xe8\xe4\xe8\xea\xe6\xe9\xea\xe6\xea\xec\xe9\xeb\xec\
\xe9\xed\xee\xeb\xee\xee\xec\xef\xf0\xed\xf1\xf2\xf0\xf3\xf4\xf2\
\xf6\xf7\xf5\xf8\xf9\xf7\xfa\xfb\xfa\xfb\xfb\xfb\xfc\xfc\xfb\xfc\
\xfc\xfc\xfc\xfd\xfc\xfd\xfd\xfc\xfd\xfd\xfd\xfe\xfe\xfe\xff\xff\
\xff\x93\x20\x9e\x01\x00\x00\x00\x28\x74\x52\x4e\x53\x00\x07\x07\
\x09\x0a\x0b\x0d\x0f\x11\x12\x12\x13\x15\x16\x1a\x1b\x2c\x2c\x2f\
\x35\x37\x3a\x3d\x46\x48\x49\x4b\x4c\x65\x77\x7b\x7c\x7f\xb7\xb7\
\xb7\xb7\xc9\xc9\xda\x01\x80\x91\xd9\x00\x00\x01\x72\x49\x44\x41\
\x54\x78\xda\xed\x92\x05\x8e\x1b\x51\x14\x04\x97\x4d\x61\xe6\x09\
\x27\xcb\x64\xb6\x27\x6c\x76\x98\x39\xee\xfb\x1f\x22\xaf\xf4\x27\
\xb8\xff\x9d\xc0\x6e\x61\x49\x55\xc2\x9e\x9b\xc2\xcd\x36\x9f\x2b\
\x84\xe5\xe6\x23\x18\xf1\x4b\xb9\xa5\x45\xb6\x94\x2b\xcd\x1f\xc0\
\xc8\x72\xb9\xeb\x69\xd8\x8d\x7c\x0e\xbc\x96\xe1\x4d\x30\xb2\xc2\
\x52\xaa\xb0\xf4\xc4\xe1\x03\x18\x0b\x96\x53\xbd\x79\xf5\xe2\xe9\
\x48\xe9\xe5\x93\x01\xbb\xfd\xe1\xf8\x99\xe1\xa9\x78\xb0\xd2\x16\
\xfe\x40\xed\xe4\x5c\x40\xfc\xe7\x86\x17\x9c\xa0\x25\xfc\x9e\x5a\
\xc9\xf9\x80\xf8\x2f\x0d\x2f\x3a\x41\x53\xf8\x1d\x35\x09\x40\xfc\
\xd7\x86\x5e\xd0\x10\xfe\x63\x35\x08\x40\xfc\xb7\x86\x5e\x50\x17\
\xfe\x43\xd5\x09\x40\xfc\x77\x86\x5e\x50\x13\xfe\x7d\xd5\x08\x40\
\xfc\xf7\x86\x5e\x50\x15\xfe\x3d\x55\x09\x40\xfc\x0f\x86\x5e\x50\
\x11\xfe\x03\x55\x08\x40\xfc\x8f\x86\x5e\x50\x16\xfe\x23\x95\x09\
\x40\xfc\x4f\x86\x5e\xb0\x2f\xfc\x27\xda\x27\x00\xf1\xbf\x19\x7a\
\xc1\x9e\xf0\xbb\xda\x23\x00\xf1\x65\xe8\x05\xbb\xc2\xef\x6b\x97\
\x00\xc4\x97\xa1\x17\xec\x08\x7f\xa8\x1d\x02\x10\x7f\x62\xe8\x05\
\xdb\xc2\x1f\x6b\x9b\x00\xc4\x9f\x18\x7a\xc1\x56\xf6\xe7\x2d\x02\
\x10\xff\x87\xa1\x17\x6c\x66\x7f\xde\x24\x00\xf1\xbf\x1b\x7a\xc1\
\x46\xf6\xe7\x0d\x02\x10\xff\xab\xa1\x17\xac\x67\x7f\x5e\x27\x00\
\xf1\xbf\x18\x7a\xc1\x5a\xf6\xe7\x35\x02\x10\xff\xb3\xa1\x17\xac\
\x2a\x6c\x95\xe0\x2f\x74\x82\x5c\xf1\xd6\xdd\xb0\x3b\xc9\x69\xf0\
\xf6\x6f\x3c\x13\x0d\xe6\x0f\x9d\x4d\xae\x86\x25\x47\xfe\xc5\xa3\
\x73\xd1\x2d\x1c\xbb\x12\x84\x4b\xc7\x23\x38\x65\x9b\xed\x27\x8c\
\x1a\x92\xe4\xcf\x13\xa0\x88\x00\x00\x00\x00\x49\x45\x4e\x44\xae\
\x42\x60\x82\
"
qt_resource_name = b"\
\x00\x06\
\x06\xfa\x65\x63\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x6f\x00\x73\
\x00\x06\
\x07\x03\x7d\xc3\
\x00\x69\
\x00\x6d\x00\x61\x00\x67\x00\x65\x00\x73\
\x00\x17\
\x0c\x49\x77\x27\
\x00\x61\
\x00\x75\x00\x64\x00\x69\x00\x6f\x00\x2d\x00\x76\x00\x6f\x00\x6c\x00\x75\x00\x6d\x00\x65\x00\x2d\x00\x6d\x00\x65\x00\x64\x00\x69\
\x00\x75\x00\x6d\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x17\
\x09\x10\x6a\x47\
\x00\x6d\
\x00\x65\x00\x64\x00\x69\x00\x61\x00\x2d\x00\x70\x00\x6c\x00\x61\x00\x79\x00\x62\x00\x61\x00\x63\x00\x6b\x00\x2d\x00\x73\x00\x74\
\x00\x6f\x00\x70\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x18\
\x0f\xa4\x86\x47\
\x00\x6d\
\x00\x65\x00\x64\x00\x69\x00\x61\x00\x2d\x00\x70\x00\x6c\x00\x61\x00\x79\x00\x62\x00\x61\x00\x63\x00\x6b\x00\x2d\x00\x73\x00\x74\
\x00\x61\x00\x72\x00\x74\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x10\
\x09\xe3\x1f\x27\
\x00\x6d\
\x00\x65\x00\x64\x00\x69\x00\x61\x00\x2d\x00\x66\x00\x6c\x00\x6f\x00\x70\x00\x70\x00\x79\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x1a\
\x0f\x68\xf4\xa7\
\x00\x6d\
\x00\x65\x00\x64\x00\x69\x00\x61\x00\x2d\x00\x73\x00\x6b\x00\x69\x00\x70\x00\x2d\x00\x66\x00\x6f\x00\x72\x00\x77\x00\x61\x00\x72\
\x00\x64\x00\x2d\x00\x72\x00\x74\x00\x6c\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x11\
\x05\x1b\x11\xa7\
\x00\x64\
\x00\x65\x00\x66\x00\x61\x00\x75\x00\x6c\x00\x74\x00\x5f\x00\x63\x00\x6f\x00\x76\x00\x65\x00\x72\x00\x2e\x00\x70\x00\x6e\x00\x67\
\
\x00\x15\
\x04\x57\xa1\xc7\
\x00\x61\
\x00\x75\x00\x64\x00\x69\x00\x6f\x00\x2d\x00\x76\x00\x6f\x00\x6c\x00\x75\x00\x6d\x00\x65\x00\x2d\x00\x68\x00\x69\x00\x67\x00\x68\
\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x16\
\x03\xd1\xe1\x87\
\x00\x6d\
\x00\x65\x00\x64\x00\x69\x00\x61\x00\x2d\x00\x73\x00\x6b\x00\x69\x00\x70\x00\x2d\x00\x66\x00\x6f\x00\x72\x00\x77\x00\x61\x00\x72\
\x00\x64\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x16\
\x02\x78\xcb\xa7\
\x00\x61\
\x00\x75\x00\x64\x00\x69\x00\x6f\x00\x2d\x00\x76\x00\x6f\x00\x6c\x00\x75\x00\x6d\x00\x65\x00\x2d\x00\x6d\x00\x75\x00\x74\x00\x65\
\x00\x64\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x14\
\x09\xd2\x98\xc7\
\x00\x61\
\x00\x75\x00\x64\x00\x69\x00\x6f\x00\x2d\x00\x76\x00\x6f\x00\x6c\x00\x75\x00\x6d\x00\x65\x00\x2d\x00\x6c\x00\x6f\x00\x77\x00\x2e\
\x00\x70\x00\x6e\x00\x67\
\x00\x1a\
\x05\x01\x32\x67\
\x00\x6d\
\x00\x65\x00\x64\x00\x69\x00\x61\x00\x2d\x00\x70\x00\x6c\x00\x61\x00\x79\x00\x6c\x00\x69\x00\x73\x00\x74\x00\x2d\x00\x73\x00\x68\
\x00\x75\x00\x66\x00\x66\x00\x6c\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x1b\
\x04\xc8\x47\x87\
\x00\x61\
\x00\x63\x00\x63\x00\x65\x00\x73\x00\x73\x00\x6f\x00\x72\x00\x69\x00\x65\x00\x73\x00\x2d\x00\x74\x00\x65\x00\x78\x00\x74\x00\x2d\
\x00\x65\x00\x64\x00\x69\x00\x74\x00\x6f\x00\x72\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x18\
\x0b\xa7\x9e\x07\
\x00\x6d\
\x00\x65\x00\x64\x00\x69\x00\x61\x00\x2d\x00\x70\x00\x6c\x00\x61\x00\x79\x00\x62\x00\x61\x00\x63\x00\x6b\x00\x2d\x00\x70\x00\x61\
\x00\x75\x00\x73\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct_v1 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x12\x00\x02\x00\x00\x00\x0d\x00\x00\x00\x03\
\x00\x00\x01\xac\x00\x00\x00\x00\x00\x01\x00\x00\x52\x54\
\x00\x00\x01\x7a\x00\x00\x00\x00\x00\x01\x00\x00\x4a\xf5\
\x00\x00\x01\x4a\x00\x00\x00\x00\x00\x01\x00\x00\x3d\x53\
\x00\x00\x02\x46\x00\x00\x00\x00\x00\x01\x00\x00\x71\xaf\
\x00\x00\x02\x0c\x00\x00\x00\x00\x00\x01\x00\x00\x6a\x29\
\x00\x00\x01\x22\x00\x00\x00\x00\x00\x01\x00\x00\x24\x74\
\x00\x00\x00\x58\x00\x00\x00\x00\x00\x01\x00\x00\x0c\x42\
\x00\x00\x01\xde\x00\x00\x00\x00\x00\x01\x00\x00\x5e\xf3\
\x00\x00\x00\xc2\x00\x00\x00\x00\x00\x01\x00\x00\x14\x83\
\x00\x00\x02\x82\x00\x00\x00\x00\x00\x01\x00\x00\x7b\x81\
\x00\x00\x00\x24\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x00\xe8\x00\x00\x00\x00\x00\x01\x00\x00\x1c\x59\
\x00\x00\x00\x8c\x00\x00\x00\x00\x00\x01\x00\x00\x0e\xbe\
"
qt_resource_struct_v2 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x12\x00\x02\x00\x00\x00\x0d\x00\x00\x00\x03\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x01\xac\x00\x00\x00\x00\x00\x01\x00\x00\x52\x54\
\x00\x00\x01\x6b\x38\xbd\x58\x02\
\x00\x00\x01\x7a\x00\x00\x00\x00\x00\x01\x00\x00\x4a\xf5\
\x00\x00\x01\x6b\x38\xbd\x58\x02\
\x00\x00\x01\x4a\x00\x00\x00\x00\x00\x01\x00\x00\x3d\x53\
\x00\x00\x01\x6b\x38\xbd\x58\x02\
\x00\x00\x02\x46\x00\x00\x00\x00\x00\x01\x00\x00\x71\xaf\
\x00\x00\x01\x6b\x38\xbd\x58\x02\
\x00\x00\x02\x0c\x00\x00\x00\x00\x00\x01\x00\x00\x6a\x29\
\x00\x00\x01\x6b\x38\xbd\x58\x02\
\x00\x00\x01\x22\x00\x00\x00\x00\x00\x01\x00\x00\x24\x74\
\x00\x00\x01\x6b\x38\xbd\x58\x02\
\x00\x00\x00\x58\x00\x00\x00\x00\x00\x01\x00\x00\x0c\x42\
\x00\x00\x01\x6b\x38\xbd\x58\x02\
\x00\x00\x01\xde\x00\x00\x00\x00\x00\x01\x00\x00\x5e\xf3\
\x00\x00\x01\x6b\x38\xbd\x58\x02\
\x00\x00\x00\xc2\x00\x00\x00\x00\x00\x01\x00\x00\x14\x83\
\x00\x00\x01\x6b\x38\xbd\x58\x02\
\x00\x00\x02\x82\x00\x00\x00\x00\x00\x01\x00\x00\x7b\x81\
\x00\x00\x01\x6b\x38\xbd\x58\x02\
\x00\x00\x00\x24\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x6b\x38\xbd\x58\x02\
\x00\x00\x00\xe8\x00\x00\x00\x00\x00\x01\x00\x00\x1c\x59\
\x00\x00\x01\x6b\x38\xbd\x58\x02\
\x00\x00\x00\x8c\x00\x00\x00\x00\x00\x01\x00\x00\x0e\xbe\
\x00\x00\x01\x6b\x38\xbd\x58\x02\
"
qt_version = [int(v) for v in QtCore.qVersion().split('.')]
if qt_version < [5, 8, 0]:
rcc_version = 1
qt_resource_struct = qt_resource_struct_v1
else:
rcc_version = 2
qt_resource_struct = qt_resource_struct_v2
def qInitResources():
QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
|
# -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.12.2)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x0c\x3e\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x30\x00\x00\x00\x30\x08\x06\x00\x00\x00\x57\x02\xf9\x87\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x00\x13\x74\x45\
\x58\x74\x54\x69\x74\x6c\x65\x00\x4f\x70\x74\x69\x63\x61\x6c\x20\
\x44\x72\x69\x76\x65\x3e\x67\xba\x0c\x00\x00\x0b\x9c\x49\x44\x41\
\x54\x68\xde\xed\x59\xe9\x73\x53\xd7\x15\x3f\xef\x69\x97\x2d\x4b\
\x5e\xb0\xbc\x60\x3b\xd8\x18\x0c\x18\x12\xa0\xcd\x4c\xd3\x0c\x24\
\x21\xcd\x52\xda\x4c\x92\x1a\x27\x40\x20\x26\x09\xb4\x99\x34\xfd\
\xd0\x99\x7e\xe8\x4c\xbf\x24\x4c\xa7\xfd\x07\x9a\x99\x92\x2f\xe9\
\x24\x2d\x60\x53\xb2\xe1\x84\x7d\x4c\xa6\x6c\x06\x12\x84\x37\xd9\
\x50\x23\x2f\x04\x4b\x78\x91\xa5\xa7\xed\xad\x3d\xe7\x4a\x4f\x96\
\xb1\x62\x3b\x33\x49\xeb\x74\xf2\xe0\xf0\xc4\x7b\x4f\xf7\x9e\xdf\
\xf9\xfd\xce\xb9\xe7\x3e\xf1\x0d\x0d\x0d\xf0\x5d\x36\x1e\xbe\xe3\
\xc7\xf7\x00\xfe\xaf\x00\x34\xbd\xbc\xe3\xd9\x17\x76\x3e\x7f\x66\
\x47\xd3\xb6\xd0\x8e\xa6\xad\xa1\xed\x3b\x9f\x6b\xa3\x6b\x0b\x1e\
\xc0\xf6\xed\xdb\x5d\xdb\x76\x3e\xf7\xa9\xd5\x62\x7d\x6f\xc9\x92\
\xa5\x1b\xd6\xd4\xdf\xe7\xa8\x5f\xb1\xda\x51\xe2\x2e\xdb\xc8\x71\
\xdc\xbb\x5b\x77\x34\x7e\x4a\xcf\x2c\x48\x00\x9b\x5f\xdb\x6c\x55\
\x40\x3c\x55\x51\x5e\xf5\xd8\x7d\xf7\xae\xb3\xb9\x8b\x8b\xc1\x6c\
\x36\x81\xc5\x6a\x85\xc5\xe5\x8b\xa1\x7e\x45\xbd\x3d\x37\xc7\xf1\
\x13\x49\x4b\x9c\xa2\x67\x17\x1c\x80\x9c\x71\x4b\x73\x79\x69\xd9\
\xea\xda\x9a\x5a\x5e\x51\x64\x38\x7f\xe1\x1c\xbc\xf9\xe6\x9b\xf0\
\xfa\x6f\x5e\x87\xbd\x7b\xf7\xc2\xc5\x4b\x97\xa0\xae\x76\xb9\x21\
\xd7\x66\xaf\xb7\x8f\x59\x9a\x17\x14\x80\xe7\xb6\x35\xfc\xcc\xe9\
\x74\x3e\xba\x6a\xe5\x1a\x93\xa2\xca\x70\xf8\xfd\xf7\xa1\xf9\x40\
\x0b\xd8\xec\x36\xa8\x5d\x56\x0b\x66\xab\x19\x5a\x5a\x5a\xe0\xe3\
\xd6\x23\x50\x57\xb7\xd2\x6c\x31\x5b\x1f\xa5\xef\xcc\x77\xfc\x43\
\x2d\x2d\xdc\xb7\x06\xa0\xb1\xb1\xd1\x2c\x49\xd2\xbe\x55\x75\xab\
\x6d\xf4\xff\xeb\x7d\xd7\xe1\xdc\xd9\xb3\x50\x5b\x57\x0b\x79\xce\
\x3c\x30\x9b\x4c\xe0\xc8\xcd\x85\xaa\xaa\x0a\x38\x7b\xf6\x1c\x0c\
\x0e\x0e\x41\x89\xbb\xc4\x26\x26\xc4\x7d\xf4\xdd\xf9\xcc\xf1\xbb\
\x77\x2e\xb9\xbf\x35\x00\xbc\x55\xfb\xc5\x9a\xfa\x7b\x8b\x90\x01\
\x88\x44\x04\xd8\x7f\x60\x3f\x94\x2f\x2e\x07\x93\xc9\x08\x06\x03\
\x8f\x66\x60\x66\x44\x20\x85\x85\x05\x70\xe8\x50\x33\x9e\x0b\x89\
\x1d\x97\x08\xb1\xa7\xe6\x1a\x3f\x77\xf3\x6b\x56\x49\x35\xae\xfd\
\xd6\x00\x54\x57\x2e\xfd\xfd\xaa\x95\xab\x4d\x58\x65\xe0\x93\x4f\
\x3f\x01\x55\x55\xc0\xe5\x72\xa6\x1d\xe7\x53\x66\xe0\x79\xc8\xc9\
\xb1\x43\x34\x16\x83\x53\xa7\x4e\xc2\xa2\xc2\x62\x9b\x14\x97\x9a\
\xe6\x1a\xff\x9d\x5d\x0f\x25\x38\x9e\x5b\x5b\xfa\xf4\x9f\xf2\xbe\
\x71\x00\xbb\x76\x6d\xad\x1a\x18\xf0\xad\x4c\x88\x09\x08\x85\x42\
\x70\xe5\xca\x15\x16\x7d\xdd\x79\x03\x6f\x48\xb2\x80\xce\xf3\xa9\
\x6b\xf9\x05\xf9\x70\xe9\xf2\x65\x58\xb4\xa8\x08\x2c\x26\xf3\xa6\
\xcd\x9b\x67\xaf\x48\x0d\x0d\x5b\x34\x1e\xb4\xb0\x51\x0a\x3d\xf0\
\x8d\x02\xd8\xbd\x7b\xf7\x12\x49\x03\x4f\x41\x41\x81\x21\x1a\x8d\
\x80\xb7\xd7\x0b\x98\x9d\xcc\x92\x4e\x67\x82\x48\x32\xc0\xa3\x99\
\x8c\x46\x20\xb6\x7a\xaf\x5f\x87\xdc\x3c\x0c\xaa\x51\x79\x62\xae\
\xb9\x34\x0e\xba\x39\xce\xd0\xf0\x8d\x01\x78\xf5\xd5\x97\x6a\x38\
\xa3\xd2\xae\xc8\x4a\x5e\x69\x49\x29\x88\x89\x38\xf4\xdd\xe8\x4b\
\x4b\x87\x4f\xcb\x87\x67\x60\xf8\x4c\x43\x10\x56\x9b\x15\x7a\xba\
\xbb\xc1\xe5\x74\x59\x31\x99\x5f\xcc\x1c\x9b\x6b\x6c\x34\xdc\x3d\
\x9f\x90\x50\xcf\x83\x06\xcf\x14\x37\xbe\x91\x3b\x6f\x00\x2f\xed\
\xd9\xb9\xf1\x95\x3d\x4d\x97\x5f\xde\xf3\xa2\xa6\xdb\x2b\xbf\x6c\
\x52\x77\xff\x6a\xd7\xe9\xb8\x2c\x9f\x2d\x29\x2e\x2b\x0c\x4e\x06\
\xb9\xc2\xc2\x22\x88\x27\x12\x30\x3c\x34\x0c\x79\x18\xd1\xb4\x7c\
\x52\x09\xcc\xeb\x2c\xa0\xf1\xcc\x10\x00\x2e\x6e\x43\xf8\xbc\xcb\
\xe5\xc2\xe8\xaa\x8f\x6f\xd8\xb0\xc1\xa2\xcf\x5b\x2e\xd4\x6c\xbc\
\xbb\x6c\x8e\x9f\xf8\x73\x14\x34\xcd\x63\x89\x49\x2b\xe7\x05\xa0\
\xa9\x69\x6b\x35\xa8\xd0\xba\xb4\x66\xd9\xfa\xf5\x6b\xef\x07\x32\
\x93\x91\xa2\xd6\xcb\xf9\xfd\xfe\x87\x0b\x5c\x85\xee\x9c\x9c\x1c\
\xae\xac\xa4\x1c\xc7\xd5\x20\x10\x18\x01\x51\x12\x59\xdd\xcf\x4c\
\x5e\x5d\x42\x7a\xe4\x33\x65\xa4\x6a\x0a\x4c\x06\x83\x60\xe4\xcc\
\x8a\xc9\xc6\xfd\x28\xed\x08\x6f\x5c\x9e\xb5\x6c\xf2\x70\x8b\xd3\
\xb8\x8a\x79\x01\x10\x15\xe5\x6f\xcb\x6a\xeb\xac\x76\x7b\x0e\x8c\
\x8d\x8d\x82\xc7\x73\x15\xce\x9f\x3f\x0b\x92\x2c\x61\x94\x9d\xb0\
\x62\xf9\x4a\x88\x46\x63\xf8\xd9\x01\x92\x28\xc2\xf0\xad\x5b\xe0\
\x9c\x16\xfd\x0c\xdd\xa7\x9c\x4f\x9f\x53\x66\x31\x9b\xe1\xf6\xed\
\x11\xb0\xe7\xd8\xcc\x88\x65\x71\x5a\x42\x9a\x6a\x56\xc0\xb4\x2e\
\x8b\x8f\x23\x9c\x2a\xd7\xcc\x09\xa0\x7e\x6d\x7d\x7d\x59\x69\xc5\
\x8f\x6b\xaa\x6b\x0d\x16\x8b\x05\xec\x18\xd5\xcf\xbf\xb8\x02\x8a\
\xa2\x40\x41\x81\x0b\xea\x96\xaf\x60\x52\x88\xc5\xa2\xe8\x84\x05\
\x64\x6c\x1b\x62\x08\x86\x64\x71\x37\x00\xbd\x7c\x1a\xf8\xe9\xce\
\x93\x19\x91\x05\x0a\x8e\xdd\x66\x37\xc9\xb2\x52\x9a\x4e\x58\x8d\
\x1b\xc1\x7f\x66\xac\xd2\x1a\x68\x23\x2a\xcf\x97\xcc\x0a\x00\xab\
\x83\x21\x12\x0a\x7f\x64\x36\x19\x39\x72\x98\xe8\x1f\x1b\x9b\xc0\
\x05\x2a\xc2\x1e\x32\x9b\xad\x50\xbc\xc8\x8d\xcf\x01\x84\xc2\x61\
\xb0\x58\xac\x20\xcb\x32\x08\x78\xbf\xab\xb3\x1b\x4e\x9e\x3c\x05\
\x17\x2f\xb4\xc3\xe8\x9d\x3b\xd3\xca\x27\xb3\x0c\xe7\x39\x02\x85\
\x00\xc2\x42\x08\xc7\xb0\x70\x18\x84\x7b\xa6\x18\x80\x5b\x38\xc1\
\x9a\x99\x0e\x72\x23\x78\xcf\x3d\x3b\x03\x06\xe5\x89\x45\xc5\x45\
\x15\x0e\x87\x13\x1d\xbf\x03\xe1\x70\x08\xbc\xde\x6e\x16\x7d\x8a\
\xb0\xbb\xb8\x84\x42\x01\xd4\xac\x4d\x4c\x8c\xd3\xe4\x20\xa3\xf6\
\xbd\x7d\x7d\xac\x7c\xde\xff\xc3\xfb\x61\x69\x6d\x35\xf4\xf4\xf4\
\x62\x4b\x71\x23\x8b\xe3\x1c\x33\x1e\x23\x40\xe0\xe2\x09\x11\x4c\
\x28\x25\x1c\x3f\x03\x00\x17\xc4\x93\x2b\x1b\x03\x68\xb3\x33\xa0\
\xca\xda\x6e\x94\x87\xd1\x88\x13\x87\xc3\x02\x8b\xfc\x88\xdf\x8f\
\x13\xa8\xd8\x0a\x18\x71\xf5\x2c\x84\x04\x96\xcc\x68\x34\xca\xc0\
\x99\x51\x42\x12\x32\x20\x63\x15\xaa\xac\xac\x00\x47\x5e\x2e\xd4\
\xd4\xd4\xc0\x13\x4f\x3e\x86\xfa\xf6\x63\x10\xc6\x66\xca\x87\x9b\
\xfa\x2c\x4b\x12\x98\x29\xa1\x55\x25\x9d\x03\xbc\x06\x59\x01\x70\
\x2a\xdc\x46\xf8\xb3\x03\x00\x0e\xd6\x61\x79\xc0\xbf\x1a\x08\x42\
\x98\x01\x20\x20\xd4\x1e\x90\x9c\xc8\xe1\x04\x3a\x4b\x2b\xaf\x80\
\x7d\x8f\x09\xfb\x1b\x49\x16\xb1\x14\x72\x2c\x49\x69\x81\x62\x09\
\x8a\xcc\xd4\xd5\x2d\x83\xbe\xde\xeb\x33\x9c\x67\x2c\x70\x1c\x93\
\x11\x36\x81\xc0\x19\x79\x1c\x5f\x4d\x4b\x43\xb5\xc0\x44\x36\x00\
\xaa\x6a\x19\xc1\xd3\x9c\x39\x50\xac\x60\x49\x50\x31\xe2\x21\x8c\
\x30\x39\x49\x0d\x1a\x49\x48\x45\x60\x46\xa3\x09\x22\xb8\xea\x12\
\x08\xea\x69\xa8\x61\x4b\x24\x24\x50\xb0\x3a\x61\x22\xb2\xeb\xe1\
\x50\x98\x81\xa6\xbe\x47\xcf\x1d\x92\x4c\xd2\x69\x2e\x0d\x82\xae\
\x31\x69\x28\x1a\x8d\x9d\xaf\x3b\xe2\xfb\x68\x6f\x04\x03\x69\x69\
\xb9\x6b\x2d\x88\x28\xb1\x30\x9e\x1c\x73\x56\x21\x49\x94\x20\x1e\
\x8f\x43\x44\x88\x30\xa3\xe8\x93\x84\x24\x39\xc1\x6a\x7d\x3c\x16\
\x67\xf7\x55\x04\x85\x44\x81\xc6\xee\xa3\xf3\x78\x4d\x10\x04\x08\
\x4e\x4e\x62\x7d\x9f\x44\xa0\x51\xd2\x6d\x3a\x69\xd9\x59\x07\x92\
\x61\x34\x3e\x32\x90\x76\xa4\xbe\xfe\x35\x23\x7e\x4d\xdd\xb2\x65\
\x8b\x96\xe9\xa0\x8b\x37\xb9\xb1\xa5\xf0\xcf\x0a\x00\x17\xa5\x40\
\x0c\x1d\x0c\x85\x26\x31\xc1\xe2\x8c\x01\x9a\x98\x39\x18\x13\xd1\
\xb1\x09\x26\x9f\x68\x04\xa3\x8f\x6c\xd0\x67\xce\xc0\x33\xed\x51\
\x25\x0a\x61\xf4\x83\xb8\x38\x91\x05\x46\x02\x90\x9b\x93\x83\xd5\
\xc6\x30\xc5\x40\x86\x51\x29\xa3\x52\x4b\xce\xab\x8a\x3c\xa1\x3b\
\x32\x56\xe9\x24\x36\x26\xee\x76\x50\x31\xaa\xa5\x58\x85\xbe\x9c\
\x9d\x01\x0d\x3e\x8f\xa1\x0c\x26\xb1\xb3\x64\x2c\x44\x88\xcd\x24\
\x00\x92\xcc\x38\x3a\x46\x0c\x51\xa4\x59\x3e\xe0\x33\x1c\xf1\x8d\
\x3b\x2e\x01\xcb\xea\x24\x46\x3f\x88\xd1\x1f\x1f\x9f\x80\x00\x96\
\xd2\xd5\x6b\x56\xa3\x9f\x59\x22\x4f\x7f\x10\x03\x49\x92\x16\x42\
\x45\xd5\xd2\x91\x35\x19\xf8\x7c\xf4\x63\x7c\x66\x43\xa7\xd1\x5a\
\x71\x7b\x56\x00\xbc\x91\x7b\x5b\x8c\xc7\xe4\xb1\xf1\x31\xd4\xb5\
\xc2\x00\x24\xa3\xa4\xb0\xca\x33\x34\x3c\xc8\x5a\x07\x21\x12\x66\
\xfa\x27\x90\x24\x0f\x47\xae\x93\x45\x7f\x72\x62\x12\x46\x47\x47\
\xb1\xb5\x08\x40\x75\xf5\x12\x28\x2b\x2b\xcd\x70\x1a\x52\x91\x87\
\x34\x08\xda\xf4\xc7\x90\x45\x1c\x7f\x38\xed\x88\xc2\xe5\xe3\xad\
\x19\x00\x40\xe5\x71\xb0\xb9\x18\x50\x0c\x47\xc3\x82\x30\x84\x3d\
\x3e\x9b\x88\x55\x1a\xb3\x91\xe5\x00\x39\x3b\xe0\xf3\x81\x88\x09\
\x4b\x60\xb0\x67\x61\xab\x31\x8f\x09\x79\xdf\xba\x35\x50\x84\x4d\
\x1d\xe5\x08\xad\x17\x0f\x3e\xf8\x00\xac\xff\xc1\xfa\x74\xa4\x21\
\x25\x19\xf6\x11\x92\x20\xc8\xa8\x5a\xc5\x91\x59\x4c\x62\x5f\x5a\
\x2a\x9c\x9c\xcf\x81\x36\x13\x00\x31\xa0\x69\xb3\x33\x80\x99\xaf\
\x46\xc3\xf1\x67\x03\x7e\xbf\x4a\x55\x87\x12\x56\x55\x92\xb9\x44\
\x8b\x57\x24\x12\x85\x9b\xfd\x37\x99\x2c\x92\x4c\x44\x59\x55\xa1\
\x96\xe2\xe1\x4d\x1b\x61\xeb\xb6\xe7\xe1\xe9\xa7\x9f\x82\xaa\x7b\
\x52\xeb\x52\x46\x1d\xe1\x40\x07\xa2\xff\x9f\x03\x1b\xb6\xd5\x98\
\x73\x58\x07\x54\x5f\xc6\x42\xb6\x0c\x53\xff\xd2\x0c\x09\x69\x50\
\xaa\xcd\xc9\x00\x9e\x11\x44\xb7\x10\x8d\x5e\xf0\x0d\xdc\x54\xb0\
\xe3\x64\x65\x93\xf4\x4e\x2c\x44\x51\x52\x1d\x9d\xd7\x92\x8c\x12\
\x20\xcc\x05\x92\x18\xf9\xa5\x21\x60\x4d\x53\x93\x49\x89\x46\x00\
\x35\x2d\xfb\x64\x5c\x0a\x85\x03\x9b\x41\xdc\xc9\x49\xaa\x2c\x8e\
\x4c\xdd\xe4\x0a\xb0\x92\x1f\x9b\xe1\x20\x32\x80\x69\xef\xfb\x4a\
\x00\x87\x0e\x1d\x4a\x11\x0b\xdc\xad\x91\x2f\xf7\xf4\xf7\xf7\x27\
\xa8\xb2\x94\x97\x2d\x86\xea\x25\xd5\x50\x56\x52\xc6\x5a\x03\x5a\
\x17\x3c\xd7\xae\xb2\xea\xe2\xbf\x13\x00\xab\xc5\xc6\x16\x34\x95\
\x9c\x47\x90\xc9\x35\x43\x61\x60\xa6\x40\x50\x41\x65\xa7\x69\x87\
\xc3\xe1\xa0\xa4\x17\x45\x55\xbe\x95\x01\x2f\x14\x51\xe4\xce\x2c\
\xb0\x2b\x70\xe8\xe1\xd9\x18\x48\x03\x38\xf6\xf1\x31\xdf\x68\x20\
\xb0\xc5\xeb\xed\xb9\x7a\xe9\x72\x3b\x90\xdd\x1c\xb8\x49\xda\xd7\
\x04\x21\x72\x61\x78\x78\x78\x74\xe4\x4b\xbf\xda\xeb\xed\x61\x95\
\x84\xb4\x4c\xa5\x52\x49\xad\x09\x49\x10\xea\x74\x10\x74\x26\x04\
\x5a\x12\x0e\x05\xc0\x8c\x7d\x10\xee\x09\x0c\xdd\xd7\xfa\x2e\xa6\
\x02\x08\x9a\xc1\xf0\x19\xdb\xc0\x64\x1c\x15\x3f\xff\x43\x39\x7e\
\xed\x9e\x98\x2c\xf5\xcf\x7b\x47\x76\xe4\xc8\xd1\x7f\x1d\xdc\xdf\
\xf2\xc8\x81\x7f\x34\x2f\x42\x2b\x41\x2b\x47\xab\x6a\x39\xf8\xcf\
\xad\x5d\x1d\x1d\x8d\x5d\x3d\x5d\x93\x28\x1b\x2d\x70\xc7\x0f\x39\
\xf6\x5c\xec\x95\xcc\x69\x06\x48\x6e\xf4\x59\x4d\xc9\x8a\x81\x48\
\x31\xa0\x83\xa0\xdd\x58\x18\x4b\xaf\x24\x2b\x27\x8e\x1f\x3f\x9e\
\x68\x68\x68\x60\x8f\x0c\x7d\xf4\xc6\xe5\x99\x7d\x90\xfa\x38\xa2\
\x7b\x1b\x81\x49\xb3\x01\xd0\xe6\x69\xaa\xc7\xd3\xe5\xf3\x76\xf7\
\x3d\xe3\x0f\xf8\x05\x6f\x6f\x2f\x2e\x58\xb9\x2c\x9a\x3a\x03\xb4\
\x4a\x2b\xaa\x9e\x0f\x5a\x8a\x85\xe9\x0c\x14\x15\x15\xd1\x86\x26\
\x21\x8a\xf1\x77\x67\x8a\x6b\x86\x7b\xab\x70\xc5\x3c\x38\xeb\x13\
\xa9\x08\xa8\x5f\x61\x4a\xca\x64\xfd\xdc\xd1\xd1\xe1\xbb\x76\xb5\
\xeb\xa7\xd7\x3c\x5f\x28\x56\xab\x8d\xd5\x74\x8d\x55\x2b\x35\x0d\
\x82\xc9\x48\xb7\x0c\x10\xb4\xa5\x74\x3a\xf3\x60\x70\x60\x40\xeb\
\xed\xbe\x71\x34\x35\x47\xd6\xa3\xa2\xf1\xb7\x36\xbc\x1d\x19\xb4\
\x7b\xaf\xce\xf9\x56\x02\x41\xdc\xed\xf0\x34\xa7\x33\x8c\xa8\x94\
\x3c\x1e\xcf\x4d\x6c\xdc\xda\xc6\x71\x6f\x40\xdb\x4f\x9b\xcd\x96\
\x94\x90\x9a\x34\xbd\xcf\x99\x02\x92\x04\x51\xec\x76\xe3\x7e\x62\
\x82\xfa\xa5\xd3\x6d\x6d\x6d\x51\x5d\x3e\x59\x8f\x90\xd3\x86\xed\
\xca\x71\xad\xb9\x59\x99\xd7\x6b\x15\x1c\x6c\x5e\xce\xeb\x36\x11\
\x0c\xed\xef\xb8\xd6\x91\xc8\xc7\x16\x86\xde\x81\x52\x59\xd5\xf3\
\x80\xe5\x02\x01\xd0\x92\x2d\x3a\xe5\x83\x1d\x41\x96\x94\xb8\xa1\
\xbf\xdf\x17\x93\x24\xf5\x9d\x54\xd0\xbe\xf2\x70\x0e\xb7\x04\x15\
\x45\xeb\xfc\x5a\xaf\x16\x53\x83\x66\x75\x38\xc3\x44\xb2\x33\xa7\
\xcf\x1c\x39\x77\xfe\x5c\x30\x8c\xfb\x07\x67\x9e\x0b\xf2\xf3\x5d\
\x4c\xf7\x7a\x25\x9a\xca\x05\x64\x00\x8b\x5c\x65\x55\x25\x84\x26\
\x43\xd0\xff\xef\xfe\x60\x68\x22\xf4\xe1\x5c\x8e\x75\x76\x76\xaa\
\xc3\xad\x7f\x0c\x7e\xed\x77\xa3\x44\x2b\xb1\x81\x96\xd5\x71\xdd\
\xb0\xef\x11\x84\x50\xf8\xd7\x1f\xb7\xb6\xc6\xec\x76\x3b\x90\x91\
\xbe\xd5\x8c\x72\xaa\x5b\x69\xa9\x9b\x25\x7b\x7b\xfb\xe5\x98\x22\
\xc9\x7b\x50\x4e\xe2\x7f\xe5\xf7\x01\x62\x04\x4d\x46\xcb\x74\x3e\
\x0d\xe6\xc0\x81\x96\x0f\x87\x07\x87\x4e\x5f\x6c\xbf\x28\xb9\x50\
\x4a\xf4\xa6\xba\xd8\x5d\xcc\x5e\xad\x93\xe6\xe9\x15\x4a\x65\x65\
\x25\x7b\x2b\xdd\xd1\xd1\x25\x06\xfc\x77\x4e\x86\xc3\xd1\x23\xff\
\x93\x1f\x38\x52\xcc\xe8\x80\xc8\x24\xc6\x92\xca\x37\xb4\xb5\x7d\
\xd6\xd1\xe3\xf5\xaa\x05\xf9\x85\x28\xa5\x02\x26\x97\xe5\xcb\x97\
\x41\xcd\xd2\x1a\xf6\xda\xd1\xeb\xed\x53\x3c\x57\x3d\x9d\xd8\x1b\
\x37\x2e\xb8\x9f\x98\x70\x8f\x10\x37\x80\xb0\xa9\xb5\xf5\x93\xe3\
\x1f\x1c\xfe\x20\x16\xc5\x46\x8f\x5e\xbb\x18\x0c\x46\xac\x38\x41\
\x38\x76\xf4\x44\xb4\xfd\xc2\xc5\x13\x06\x2e\xb2\x89\x9e\x5d\x90\
\xbf\x52\x0a\x82\x31\x18\x8f\x24\x9e\xec\xbd\xd1\xfb\xc2\x7b\xef\
\xfe\xfd\xb3\xb7\xfe\xf2\x56\x78\xdf\x5f\xf7\x85\x0f\x1f\xfe\xe0\
\xcc\xc0\xa0\x6f\x47\x3c\x26\x3d\x49\xcf\x2c\xf8\xdf\x89\x15\x11\
\x0e\x27\xe2\xd2\x46\x29\xa1\xe6\x91\x89\x31\xe9\x21\xba\xf6\xfd\
\x2f\xf5\xdf\x03\x58\xc0\xc7\x7f\x00\x01\x9b\xbf\xfb\xe5\xb7\x98\
\x3f\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x02\x78\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x30\x00\x00\x00\x30\x08\x03\x00\x00\x00\x60\xdc\x09\xb5\
\x00\x00\x01\x11\x50\x4c\x54\x45\xff\xff\xff\x00\x00\x00\x24\x24\
\x24\x00\x00\x00\x00\x00\x00\x2e\x2e\x2e\x3b\x3b\x3b\x00\x00\x00\
\x1e\x1e\x1e\x00\x00\x00\x2b\x2b\x2b\x00\x00\x00\x24\x24\x24\x31\
\x31\x31\xe2\xe2\xe2\xc1\xc1\xc1\xff\xff\xff\xd2\xd2\xd2\xbf\xbf\
\xbf\xe1\xe1\xe1\xe2\xe2\xe2\xe0\xe0\xe0\xe1\xe1\xe1\xff\xff\xff\
\xfb\xfb\xfb\xfd\xfd\xfd\xff\xff\xff\xff\xff\xff\xbc\xbf\xb8\xbd\
\xc0\xb8\x9a\x9d\x99\xa5\xa6\xa2\x89\x8b\x86\x8c\x8e\x88\x8e\x90\
\x8b\x90\x92\x8d\x92\x95\x8f\x95\x97\x91\x97\x99\x94\x99\x9c\x96\
\x9c\x9e\x98\x9e\xa0\x9b\xa0\xa3\x9d\xa3\xa5\x9f\xa5\xa7\xa1\xa7\
\xaa\xa4\xaa\xac\xa6\xac\xaf\xa8\xae\xb1\xaa\xb1\xb3\xad\xb3\xb6\
\xaf\xb5\xb8\xb1\xb7\xba\xb4\xba\xbd\xb6\xd4\xd8\xd0\xd4\xd8\xd1\
\xd6\xda\xd2\xd7\xda\xd3\xd8\xdc\xd5\xda\xdd\xd6\xdb\xde\xd7\xdc\
\xdf\xd9\xdd\xe0\xda\xdf\xe1\xdb\xdf\xe2\xdc\xe1\xe3\xde\xe1\xe4\
\xdf\xe4\xe5\xe1\xe4\xe6\xe1\xe6\xe7\xe4\xe6\xe8\xe4\xe8\xea\xe6\
\xe9\xea\xe6\xea\xec\xe9\xeb\xec\xe9\xed\xee\xeb\xee\xee\xec\xef\
\xf0\xed\xf1\xf2\xf0\xf3\xf4\xf2\xf6\xf7\xf5\xf8\xf9\xf7\xfa\xfb\
\xfa\xfb\xfb\xfb\xfc\xfc\xfb\xfc\xfc\xfc\xfc\xfd\xfc\xfd\xfd\xfc\
\xfd\xfd\xfd\xfe\xfe\xfe\xff\xff\xff\x77\x19\x90\xf0\x00\x00\x00\
\x20\x74\x52\x4e\x53\x00\x07\x07\x09\x0a\x0b\x0d\x0f\x11\x12\x12\
\x13\x15\x15\x1a\x29\x2a\x2d\x34\x3c\x46\x4b\x4c\x64\x77\x7b\x7c\
\x7f\xb0\xb1\xc3\xd7\x8b\xc9\x16\x4b\x00\x00\x00\xf6\x49\x44\x41\
\x54\x78\xda\x62\x18\x81\x60\x14\x30\xb2\x73\x02\xe8\x96\xa7\xc3\
\x08\x03\x00\x08\xc2\xf1\x29\xf6\xd9\x36\xfe\xd8\xb6\x75\xd3\x7f\
\x21\x69\x60\x77\x9e\xe7\x93\x85\x26\xcd\x1f\x0b\xcd\x4c\x8b\x66\
\x42\x31\x2d\x42\xa1\x54\x20\x4b\x87\x43\x12\x44\x66\x02\x64\xc1\
\xca\x82\x06\xb3\x01\x4f\x0f\x77\xd7\x97\xe7\xa7\xc7\x87\xfb\xbb\
\x3b\x7b\x07\x47\x27\x67\x17\x57\x37\x04\xdb\xab\x1a\xcc\x8d\x10\
\xff\x2d\xa3\xc4\x86\x01\x43\xc4\x7f\xcf\x30\xb1\x69\xc0\x00\xf1\
\x3f\x32\xb0\xa0\x8f\xf8\x9f\xe9\x5b\xd0\x43\xfc\x2f\xf4\x2c\xe8\
\x22\xfe\x57\xba\x16\x74\x10\xff\x1b\x1d\x0b\xda\x88\xff\x9d\xb6\
\x05\x2d\xc4\xff\x41\xcb\x82\x26\xe2\xff\xa1\x69\x41\x03\xf5\xd3\
\xb0\xa0\x8e\xfa\xa9\x5b\x50\x43\xfc\x63\x6a\x16\x54\x11\xff\x98\
\xaa\x05\x15\xc4\xff\x47\xc5\x82\x32\xe2\xff\xa5\x6c\x41\x09\xf1\
\x7f\x53\xb2\xa0\x88\xf8\xbf\x28\x5a\x50\x40\xfc\x9f\x14\x2c\xc8\
\x23\xcb\x1b\x10\x8a\x66\x72\xb2\x6c\x62\x4d\x82\xc9\xf9\xf5\x44\
\x52\x96\x58\x9c\x90\x4d\x2d\xc5\xe5\xbf\xb5\xfc\x3f\x86\x91\x07\
\x46\x01\x00\x70\x39\xa7\x90\x59\xe1\x0b\xb9\x00\x00\x00\x00\x49\
\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x05\xc1\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x30\x00\x00\x00\x30\x08\x03\x00\x00\x00\x60\xdc\x09\xb5\
\x00\x00\x02\xd9\x50\x4c\x54\x45\xff\xff\xff\x00\x00\x00\xff\xff\
\xff\x00\x00\x00\xff\xff\xff\x00\x00\x00\x00\x00\x00\xff\xff\xff\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\xff\xff\xff\x00\x00\x00\xff\xff\xff\x00\x00\
\x00\x00\x00\x00\x27\x27\x27\x4e\x4e\x4e\x00\x00\x00\x49\x49\x49\
\x00\x00\x00\x44\x44\x44\xee\xee\xee\x00\x00\x00\x10\x10\x10\x30\
\x30\x30\x40\x40\x40\x00\x00\x00\x00\x00\x00\x55\x55\x55\x28\x28\
\x28\x5e\x5e\x5e\xae\xae\xae\xff\xff\xff\x2e\x2e\x2e\x7a\x7a\x7a\
\xff\xff\xff\xff\xff\xff\x8c\x94\x8c\xf7\xf7\xf7\xff\xff\xff\x94\
\x94\x8d\xb9\xb9\xb9\xf9\xf9\xf9\x92\x97\x8d\xac\xac\xa7\xfa\xfa\
\xfa\xfa\xfa\xfa\xfb\xfb\xfb\xae\xae\xaa\xd9\xd9\xd9\xfb\xfb\xfb\
\xe6\xe6\xe6\xfb\xfb\xfb\xa8\xab\xa4\xff\xff\xff\xfc\xfc\xfc\xff\
\xff\xff\x9e\xa1\x9b\xb6\xb9\xb3\xfc\xfc\xfc\xff\xff\xff\xfd\xfd\
\xfd\xba\xbc\xb7\xff\xff\xff\xff\xff\xff\xff\xff\xff\xf8\xf8\xf8\
\xff\xff\xff\xb0\xb4\xae\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\
\xff\xff\xfd\xfd\xfd\xff\xff\xff\xfd\xfd\xfd\xf9\xfb\xf9\xf5\xf5\
\xf5\xf5\xf5\xf5\xba\xbc\xb7\xa2\xa2\x9f\xf0\xf0\xee\xa8\xa9\xa4\
\xe9\xe9\xe7\xe3\xe3\xe1\xd6\xd6\xd4\xdd\xdd\xdc\xbd\xbe\xb9\xce\
\xcf\xcc\xc8\xca\xc7\xb6\xb8\xb5\xb7\xba\xb2\xc5\xc6\xc3\xb2\xb4\
\xaf\xb1\xb3\xb0\xac\xaf\xa8\xaf\xb0\xad\x9c\x9e\x9a\xb1\xb2\xae\
\x9a\x9b\x97\xbd\xc0\xba\xa2\xa5\x9e\xbd\xbf\xb8\x9b\x9e\x99\xb9\
\xbb\xb5\x7b\x7d\x78\x9e\xa0\x9c\x86\x88\x82\xaf\xb1\xab\xa3\xa5\
\x9f\x8a\x8b\x86\xbd\xc0\xba\xa4\xa7\xa1\x90\x92\x8c\xbc\xbf\xb8\
\x77\x79\x74\x97\x99\x93\xb3\xb5\xaf\xac\xad\xa8\x7e\x80\x7a\xa7\
\xa8\xa3\xb9\xbb\xb5\xba\xbb\xb6\xcc\xd0\xca\xbb\xbe\xb8\xc1\xc3\
\xbd\xcc\xcf\xc9\x89\x8a\x85\xcc\xce\xca\xdb\xdc\xd9\x93\x94\x90\
\x96\x98\x93\x98\x99\x95\xa3\xa6\xa0\xa4\xa5\xa0\xaa\xac\xa8\xb6\
\xb8\xb4\xd2\xd3\xd0\x82\x84\x7e\xb8\xba\xb6\xbe\xbf\xbb\xcf\xd0\
\xcd\xc8\xca\xc7\xdf\xe1\xdd\xd7\xd8\xd6\xdd\xdf\xdb\xe1\xe2\xe0\
\xe4\xe5\xe3\xed\xed\xeb\xed\xed\xec\xee\xee\xed\x6b\x6d\x67\x6d\
\x6f\x69\x71\x73\x6d\x74\x76\x70\x78\x7a\x74\x7c\x7e\x78\x7f\x81\
\x7b\x83\x85\x7f\x87\x89\x83\x8a\x8c\x86\x8e\x90\x8a\x91\x94\x8d\
\x95\x98\x91\x99\x9b\x95\x9c\x9f\x98\xa0\xa3\x9c\xa4\xa6\xa0\xa7\
\xaa\xa3\xab\xae\xa7\xaf\xb2\xab\xb2\xb5\xae\xb6\xb9\xb2\xba\xbd\
\xb6\xd4\xd8\xd0\xd4\xd8\xd1\xd6\xda\xd2\xd6\xda\xd3\xd7\xda\xd3\
\xd8\xdc\xd5\xda\xdd\xd6\xdb\xde\xd7\xdc\xdf\xd9\xdd\xe0\xda\xde\
\xe1\xdb\xdf\xe1\xdb\xdf\xe2\xdc\xe1\xe3\xde\xe1\xe4\xdf\xe4\xe5\
\xe1\xe4\xe6\xe1\xe5\xe7\xe2\xe6\xe7\xe4\xe6\xe8\xe4\xe7\xe8\xe4\
\xe7\xe9\xe4\xe8\xea\xe6\xe9\xea\xe6\xea\xeb\xe9\xea\xec\xe9\xeb\
\xeb\xea\xeb\xec\xe8\xeb\xec\xe9\xec\xed\xeb\xec\xee\xeb\xed\xee\
\xeb\xef\xf0\xed\xf0\xf1\xee\xf0\xf1\xef\xf1\xf1\xf0\xf1\xf2\xf0\
\xf1\xf3\xf0\xf2\xf3\xf1\xf3\xf3\xf2\xf3\xf4\xf2\xf4\xf5\xf3\xf5\
\xf5\xf3\xf5\xf5\xf4\xf6\xf7\xf5\xf6\xf7\xf6\xf7\xf7\xf6\xf7\xf8\
\xf6\xf7\xf8\xf7\xf8\xf8\xf7\xf8\xf9\xf7\xf8\xf9\xf8\xfa\xfa\xf9\
\xfa\xfb\xfa\xfb\xfb\xfa\xfb\xfb\xfb\xfb\xfc\xfb\xfc\xfc\xfc\xfc\
\xfd\xfc\xfd\xfd\xfc\xfd\xfd\xfd\xfd\xfe\xfd\xfe\xfe\xfe\xff\xff\
\xff\xfe\x3f\x28\xd3\x00\x00\x00\x9c\x74\x52\x4e\x53\x00\x01\x01\
\x02\x02\x03\x04\x04\x05\x06\x07\x07\x08\x09\x0a\x0a\x0b\x0b\x0c\
\x0d\x0d\x0d\x0e\x0e\x0f\x0f\x0f\x10\x10\x10\x10\x11\x12\x12\x13\
\x13\x13\x14\x16\x17\x1a\x1b\x1f\x21\x21\x26\x28\x29\x31\x31\x31\
\x37\x3a\x3c\x3d\x3f\x46\x47\x49\x4d\x50\x55\x57\x57\x57\x5d\x66\
\x67\x6a\x6c\x6f\x72\x75\x77\x78\x79\x7b\x7d\x7e\x7f\x80\x82\x84\
\x85\x86\x87\x87\x89\x8b\x90\x94\x97\x9a\x9b\xa3\xa8\xa9\xab\xb0\
\xb1\xbb\xbb\xbf\xc4\xca\xca\xcb\xcc\xd4\xd7\xd9\xdd\xe3\xe3\xe5\
\xea\xeb\xec\xf1\xf2\xf4\xf5\xf5\xf6\xf7\xf7\xf7\xf7\xf7\xf8\xf8\
\xf8\xf9\xf9\xf9\xfa\xfa\xfa\xfa\xfa\xfa\xfa\xfa\xfb\xfb\xfb\xfb\
\xfc\xfc\xfd\xfd\xfe\xfe\xfe\xfe\xfe\xf6\x02\x98\x54\x00\x00\x01\
\xfb\x49\x44\x41\x54\x78\xda\xed\x94\xd3\xa3\x1d\x31\x10\x87\xaf\
\x6d\x3b\xa9\x6d\xdb\xc6\xd4\xb6\x6d\xdb\x36\x6f\x6d\xfb\xd4\xb6\
\xed\xf6\xd8\xe6\xfc\x03\xb5\xdb\xec\xa6\x4f\xe5\x3c\x7f\xdf\x6e\
\x92\x99\xf9\x79\xfd\xaf\xdf\xae\x7c\xa3\x48\x6c\xc0\xcf\x08\x6f\
\xf8\x54\x1a\xe3\xcb\x2f\xd0\x98\xbe\xcd\xb2\x11\x1a\xce\x2f\x44\
\x48\x66\x0c\xab\x14\x43\x52\x83\x78\x85\x78\x09\x6a\x26\xf5\x2e\
\x92\x44\x63\x7d\xf9\x84\x44\x09\x2a\x5d\xab\xa6\xb4\xcb\x95\x4a\
\x23\xbc\x39\x85\xfb\xf7\x94\xf6\x65\x23\xab\xc5\x91\xd4\x10\x1e\
\xe1\x10\xde\xba\x7a\xe9\xba\x4e\x35\xb5\x7f\x89\x04\x92\xe0\x2f\
\x2e\x1c\xc4\xab\x97\x2e\x9c\x3b\x7d\xc3\x72\x7b\x72\xa7\xbc\xa9\
\x34\xca\x5b\x4c\x38\x80\x6f\xf9\x93\xc7\x8f\xdd\xb7\xad\x18\x53\
\x27\x99\x90\x30\x11\x61\x3f\xbe\xe3\x8f\x1c\x3e\x7a\x56\x6e\x9c\
\x3e\xb8\x6c\x2c\x49\x0a\x10\x14\xf6\xe1\x07\xfe\xc4\xa9\x33\x37\
\x2d\xcf\x26\x74\x2b\x90\x24\xd4\x7a\x9a\xb8\x17\x3f\xf1\xe7\x2f\
\x5e\x79\xe1\xca\x1c\xdb\x32\xcb\x9b\xd6\xb3\x85\x3d\xf8\x99\xbf\
\x7c\xed\xe6\x43\xa3\x7d\xc1\xf0\x8a\x6f\x5a\x1f\xcc\x12\x76\xe3\
\x97\xfc\x9d\x47\x8f\xe5\x4e\xdd\xc4\x3e\x85\x13\x68\x08\x43\xd8\
\x85\x5f\xf3\xcf\x65\x2a\x8b\x67\xf5\xf8\xe6\x69\xa9\x0c\x61\x27\
\x7e\xcb\x1b\x1c\xb8\x66\x4e\x97\x82\x59\x19\xc2\x0e\xfc\x86\x37\
\x79\x8c\xf3\x87\xb4\x6d\x50\x2b\x3b\x43\xd8\x8e\x5f\xf1\x5a\x87\
\x6b\xc9\xe8\xce\x00\x35\xf2\xb3\x2e\xbd\x0d\xbf\xe4\xcd\x9e\xcc\
\x59\x3d\x9b\x42\xbd\xe2\x7e\xcc\x67\xdd\x8a\x9f\x79\xb5\x47\x3a\
\x77\x60\x6b\x80\xf2\xd1\x02\x8d\xdb\x82\x1f\x79\x99\xcd\xba\x78\
\x44\x07\x80\xaa\x79\x04\x47\x63\x33\xbe\xe7\x9f\x18\x3d\xcb\xa7\
\x75\x6f\x0c\xb5\x8b\xfa\x08\x0f\xdf\x26\x7c\xc7\x2b\xdc\x77\x67\
\xf6\x6b\x05\x0d\xcb\x84\x8a\x8d\xf7\x46\x7c\xc3\x3f\xb5\xea\xe6\
\x0d\x6d\x0f\x50\x25\x87\xf8\x02\x6d\xc0\x9b\x0f\xf4\xce\xa5\xe3\
\xba\x36\x82\x9a\x85\x78\x56\x74\x3d\xca\x5c\x2b\x17\xf6\x6a\x01\
\xf5\x4b\x05\x72\x85\xc0\x3a\x7c\x39\x7b\x50\x1b\x80\x0a\x29\x9c\
\x31\xb3\x76\xd1\xa8\x8e\x00\xd5\xf3\xf1\x06\x59\xc6\x80\x1e\x4d\
\xa0\x6e\x31\x1f\xde\xa8\x8c\xcd\x59\xba\x3e\x94\x8b\xe4\x0f\x63\
\xef\xf4\x92\x95\x73\x7b\xfd\xaf\x3f\xa2\x5e\x03\x5f\x1a\x26\xde\
\x2f\x78\xb2\x0b\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\
\x00\x00\x07\xd2\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x30\x00\x00\x00\x30\x08\x06\x00\x00\x00\x57\x02\xf9\x87\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x00\x0e\x74\x45\
\x58\x74\x54\x69\x74\x6c\x65\x00\x46\x69\x72\x65\x77\x61\x6c\x6c\
\x12\x81\xae\xae\x00\x00\x00\x17\x74\x45\x58\x74\x41\x75\x74\x68\
\x6f\x72\x00\x4c\x61\x70\x6f\x20\x43\x61\x6c\x61\x6d\x61\x6e\x64\
\x72\x65\x69\xdf\x91\x1a\x2a\x00\x00\x07\x12\x49\x44\x41\x54\x68\
\xde\xed\x99\x4f\x8c\x1c\x47\x15\xc6\x7f\xf5\xaa\x7a\x66\x77\x76\
\xd6\xd8\x5e\x48\xec\x38\x24\x96\x6c\x21\x0c\x82\x0b\xb1\x08\x0e\
\x92\x4d\xe4\x00\x67\x56\x42\xb2\xb9\x70\x41\x42\x04\x8e\x1c\x10\
\x17\x9f\xe0\x8c\x04\x5c\x10\x12\x42\xe2\x80\x84\x48\x10\x48\x31\
\x22\x7f\x7c\x21\xbe\x70\xf0\xc6\x24\x22\x07\xf0\x9a\x38\x9b\x35\
\x24\x78\xbc\xf6\xae\x67\xba\xab\xea\x71\xa8\xee\x9e\x9e\xdd\x99\
\xf5\xec\x65\x1c\x41\x5a\x2a\xf5\x4c\x4d\x4f\xf7\x7b\xf5\x7d\xef\
\xbd\xef\x55\xc3\x07\xc7\x07\xc7\xff\xf7\x61\xc6\x4d\x7e\xf5\xdc\
\xf2\xb9\xe0\xf5\xc7\x22\xe6\xa0\xaa\xf2\xde\xa6\x72\xe7\x91\xe5\
\x99\x1a\xe6\x9c\x45\x5e\xff\x29\x8b\x0b\x73\xa8\x42\xf4\x61\x0d\
\x27\x5f\x7b\xf1\xe2\x8b\x97\x46\xae\xdb\xfe\xc7\x0b\x17\x2e\xc8\
\x1b\x7f\xbb\xfa\xb3\xe3\xc7\x8e\x2f\xec\x5b\xdc\x87\x0f\x9e\xd5\
\xb7\xff\xcd\xd3\xe7\x3e\x3f\xdb\x95\x35\xca\x2f\x7e\xf0\x4b\x0e\
\x3d\xf4\x11\x42\x8c\x6c\xdc\xd9\x78\xe4\xf6\xed\x5b\x3f\x07\x8e\
\xed\xea\xc0\x95\x2b\x57\xf6\xcd\x75\x5a\xae\xd3\xe9\xf0\xfc\xef\
\x9e\x43\x15\x3a\xfb\x1f\xe2\xc0\xb5\x5b\x33\x75\xa0\xe5\x0c\xd7\
\xff\x79\x9d\x37\xae\x5e\xc1\x39\xc7\xa9\xa7\x9e\xa2\xd7\xbb\x75\
\x78\xfb\x75\x32\xe9\x06\xf9\x20\xe7\xb5\x95\xbf\xf2\xd9\x27\x9f\
\xc4\x98\x07\xc3\x6f\x11\xe1\xdd\x77\xdf\xe3\xf4\xe9\x33\x00\xc4\
\x10\x76\x52\x6d\xd2\x9f\xdb\x73\x73\x7c\xfb\x3b\xcf\x92\xe7\x05\
\x31\xef\xf3\xec\xe9\x2e\xfb\xf7\x1f\x98\x89\xe1\xaa\x4a\xef\xd6\
\x7f\x78\xde\x18\xce\x9f\x3f\x8f\x6a\x44\x55\x09\x21\x4e\xe7\x40\
\x08\x81\x18\x02\xaf\x5e\x7e\x15\x55\xa5\xd5\x5e\xc0\x00\x32\x23\
\x28\x74\xe8\x09\x2f\xbf\xf2\x12\xd6\x3a\x4e\x9e\x3c\x49\x8c\x53\
\x3a\x10\x43\x44\x51\xc4\x18\xb4\x91\xa8\x8c\x91\x19\x91\x47\xeb\
\x24\x29\x46\x92\x1d\xaa\xc4\x18\xa6\x47\x00\xc0\x88\x80\x6a\x1d\
\x03\x22\x33\x42\x40\xab\x05\x03\x23\x06\x4a\x07\xc2\xb4\x31\x10\
\x4a\x4f\x13\x02\xc3\x72\x21\x22\x33\x8b\x81\xca\x03\x11\x29\x17\
\x6e\xea\x18\xe8\x11\x43\x07\x55\x4d\x08\x44\x65\xbe\x33\xc7\xe6\
\xe0\x2e\xd7\xde\xba\x3b\xc3\x0a\x6b\x30\xa6\x1a\x82\x2a\xd3\x21\
\xd0\xeb\xc1\x7c\x67\x88\x40\x34\x30\xb8\x37\x60\xa1\xdd\xe5\xf0\
\xe1\x23\x33\x31\x3e\x46\xe5\x9d\x77\x6e\xa4\xc4\xd1\x44\x60\x2f\
\x31\x50\x21\x60\x88\x18\x63\xc0\x90\xce\x33\xaa\xc2\xe9\x3c\x44\
\x41\x55\x89\x7e\xda\x34\xaa\xb1\x46\xa0\x8a\x60\x83\x99\x59\x0c\
\x34\x89\x94\x10\x10\x50\xa6\x47\x20\x86\x90\x56\x5c\x84\x04\x9e\
\x99\x69\x1a\xad\x80\x36\x66\x88\x82\xc2\xf4\x85\x2c\x86\x08\x5a\
\x22\x20\x86\xa0\xcc\x34\x0b\x55\xf4\xa9\xb2\x50\x0a\x62\x9d\x56\
\x4a\xf4\x88\xa1\x8b\x52\xc6\x40\x4c\xf4\xa9\x6f\x3a\x2b\xe3\x1b\
\x4e\x54\x8f\x9d\x8a\x42\xbd\x1e\x74\xba\x61\x04\x81\x56\x7b\x9e\
\xbb\xfd\x3b\xbc\xfe\xe6\xca\xec\xd3\xa8\x48\x72\x64\x2f\x5a\x28\
\x06\x9f\x98\x2f\x06\x13\x0d\x83\x7b\x03\xba\x73\x8b\x1c\x3d\x7a\
\x6c\x46\x69\x34\xb2\xba\xfa\xf7\x84\x7b\x1d\x03\x8a\x4e\xab\x85\
\x8a\xa2\xaa\x03\x92\x04\x77\x7c\x50\xfd\xa2\x41\xc4\xd4\x31\x10\
\xe2\x1e\xd4\xa8\x42\x8d\x80\x79\x50\xf6\x53\x65\xc2\x14\x07\xd3\
\x6b\xa1\x10\x4a\xf9\x5c\x22\x60\xe2\x03\x02\xc0\x20\x7b\x96\x12\
\xf4\xe8\xc6\x85\x1a\x01\xab\x96\x7e\x7f\x93\x9b\xff\xba\x89\xb5\
\x76\x66\xc6\xaf\xbc\x76\xb5\x54\xa3\x65\x10\xa3\x04\xef\xa7\x40\
\xa0\x07\xa1\x13\x30\x9a\xf2\xbe\xaa\xd2\x5d\x98\xe7\xbb\xdf\xfb\
\x3e\x5a\x16\x04\x2d\x35\x6a\x12\x8d\x5a\xcb\xf7\xd1\xf9\xe1\x87\
\xe9\xe7\x9b\xb3\x90\xd9\x61\x01\x55\x35\x7b\x89\x81\x88\x1a\x4d\
\x69\x0c\xc3\x7c\xbb\xcd\x5c\xbb\x85\xaa\xd6\x03\x4d\xb2\x57\xd1\
\x9d\xf3\xf5\x5c\x29\x8d\xab\xdf\x51\x34\x26\x03\x47\xe6\x75\xdc\
\xdc\xf0\xfe\x49\x0b\x45\x54\xf7\xd2\x91\x45\xad\x45\x5c\xba\x7b\
\xca\xcd\x8a\xa2\x66\x68\xec\xa4\xf9\xe1\x5c\xb9\xc2\xe5\x07\x23\
\x0d\x83\xab\x34\xa9\x3b\xe7\x50\xc5\x24\x7f\x1a\x95\x58\xa7\x74\
\x20\x06\xa2\xea\x48\x30\x55\x8a\xf0\xd0\xc3\x0f\x73\xe2\xc4\xc7\
\xc9\xb2\x2c\x3d\x30\xc6\xf2\xdc\x58\x61\x8d\xc3\x15\x9c\x30\x62\
\x8c\x0c\xfa\x03\x5e\xb9\x74\x89\xf5\xb5\xf5\xda\x78\x29\x9f\xe5\
\x9c\xa3\xdb\x5d\xc0\x3a\x57\x3e\x9f\xbd\x75\x64\xe3\xe0\x32\xc6\
\xf0\x89\x4f\x9e\xe0\x89\x93\x4f\x70\xf0\xc0\x12\x22\x16\x6b\x2d\
\x56\x04\x6b\x5d\xf9\x5d\xb0\xe2\xb0\x56\x88\xa5\x33\x31\x46\x62\
\x4c\x46\x87\x18\x28\x8a\x02\xef\x0b\xd6\xd7\xd7\x09\x1a\xb9\xfc\
\xe7\xcb\x0d\xe7\x52\xfe\x1c\x0c\x06\xdc\xdb\xba\x47\x37\xcb\xca\
\x4a\x6c\x86\x9d\xda\x7d\xb3\x50\xe8\x32\xe6\x5a\x5c\xe6\x70\x2e\
\x63\x61\xa1\xcb\x5f\x56\x2e\xd3\x6e\xcd\x71\xe3\xfa\x3a\xa1\x08\
\x0d\x7e\x2b\x4b\x4b\x4b\x9c\xfa\xdc\x29\x0a\x5f\xd4\xc6\x16\x45\
\xd1\xf8\xee\x69\xb7\xda\x1c\x3a\x74\xa4\x94\xec\xc3\xb6\xd5\x94\
\xf1\x33\xd7\x6e\xd3\xbf\xd7\xaf\xeb\x00\x46\xc7\x6f\x41\x8e\xcb\
\x42\xb1\x33\x1e\x01\x14\x82\x0f\x88\x08\x99\x6b\x61\x8d\x65\xd0\
\xef\xf3\xfb\x3f\xfc\xa9\xbe\x24\xcb\x1c\xcf\x9c\x3d\x43\x08\x61\
\xe2\xb0\xd6\x26\x6a\x94\xb9\x5d\x8c\xec\x88\x17\x11\x21\xcb\x5c\
\xb9\x95\x33\x7e\xf5\x77\xa1\x50\x0a\xe2\x49\xf4\xb2\x22\x38\x97\
\xe1\xac\x23\x04\xc5\xb9\x61\x7d\xb0\xce\x12\xa2\x27\x84\x6a\x04\
\x7c\xc3\x78\x00\x67\x1d\x56\x2c\xaa\xe0\x43\x48\x3b\x0f\x55\x90\
\x27\x57\x10\x31\x88\xd8\xd4\x97\x4f\xde\x87\xde\x65\x63\x6b\x82\
\xc7\xc1\x47\x8c\x08\x99\xcb\xb0\xce\x11\x7d\x18\x71\xc0\x89\x10\
\x7c\xd3\xe8\xe4\x88\x2f\x9d\xa9\xfe\xe7\x9c\x43\x4d\x24\x4e\x44\
\xc0\x26\x27\x6a\x09\xbf\x07\x04\xbc\xf7\x63\x29\xa4\xa4\xcd\x25\
\x6b\x2c\x59\x96\x61\xc5\xe1\xa3\x27\x6b\x22\x60\xed\x7d\xa9\xe3\
\xac\xc3\x5a\x87\x51\x43\x88\x15\x02\xc3\x94\x8c\x01\x6b\x25\x69\
\xb1\xfb\x34\x51\xbb\xd6\x81\x71\x31\xe0\xa3\x47\x4a\x0a\x59\xe3\
\x08\x31\xe0\xec\x28\x85\xbc\x0f\x13\xa9\xd3\x1c\x18\x08\xde\x37\
\x10\xd0\x9a\xeb\xc6\xd8\x7a\x57\x2e\xa9\xba\x29\x29\xd4\xeb\xf5\
\xe8\x76\xbb\x13\x29\x14\x7d\x2c\x83\x38\x43\xcc\x4e\x0a\x0d\x11\
\x98\x4c\x9d\x84\x82\x45\x55\x1b\x31\xd0\x40\x00\xb0\xb6\xd1\xd0\
\x30\x31\x09\x8d\x45\xc0\xe4\x45\x6e\x54\x23\x59\x96\x51\x14\xc5\
\x0e\x99\x21\x62\x4b\x07\x04\x1f\x74\xa4\x57\x36\x26\xd1\x62\x37\
\xea\x38\x6b\x11\x6b\xc9\xf3\x1c\xef\xe3\x58\x04\xaa\x5e\xb8\xda\
\xce\x8c\x3b\xdf\x2a\xe9\x38\x07\x04\xd8\x42\x59\xb9\xf8\xc2\x1f\
\x3f\x7d\xf6\x99\xb3\xed\xdb\xb7\x7b\x0d\x27\xd2\xfe\x64\x45\xa1\
\x56\xd6\x66\xb1\xbb\xc0\xe3\x8f\x3f\x36\x52\x65\x97\x0e\x2e\xed\
\x4a\x1d\xeb\x1c\x21\x04\x8a\x22\x27\x46\x3f\x16\x01\x11\xc1\x48\
\xd9\x17\xeb\x48\x10\x67\x40\x25\x4b\xd5\x6d\xeb\x1f\x2c\xd0\xce\
\xf3\xfc\x2b\xab\xd7\xae\xff\xfa\xe2\x0b\x17\x3f\xf3\xa5\x2f\x7f\
\xb1\xbd\xb1\xb1\x41\x51\x14\xe5\xd6\x46\xa8\x29\xd4\x6a\xb5\x58\
\x5e\x5e\x9e\x58\xb0\xc6\x52\xc7\x59\x0c\x06\x1f\x0a\x06\x79\x9e\
\xea\xca\x38\x04\x8c\xa4\x51\x2a\xe2\x72\x6b\xdd\x00\x5d\xe0\x6e\
\xe9\xc4\x0e\x07\x32\xa0\x03\x2c\x16\x45\xf1\xc3\x1b\x6f\xbd\xfd\
\xa3\x97\x5f\xba\xf4\xd1\x2f\x3c\x7d\x26\x13\x31\xa8\x1a\x62\x8c\
\x64\xae\xc5\x63\x8f\x1e\x45\x24\xc9\x85\x10\x3c\x85\xf7\xc9\x89\
\xa2\x20\xf7\x39\x79\x9e\xa3\x31\xd6\x3c\x16\x23\x58\x2b\x88\x38\
\xf2\x7c\x90\x10\x2a\x52\x7c\xcc\x77\xe6\x6b\xd9\xa1\x51\x89\x51\
\x69\xb5\x32\x5a\xed\x56\xd2\x5c\x11\xd6\xd6\xd6\x10\x91\x8d\x18\
\xe3\x12\x50\x00\x61\x7b\x75\xb0\xc0\x3c\x70\x00\xf8\x30\xf0\x31\
\xe0\x53\xce\xb9\xaf\x7b\xef\x8f\xa4\x37\x87\x8e\x6f\x7e\xeb\x1b\
\xf5\xaa\xd4\xc2\xac\x94\xc1\x93\xce\xa3\x42\x8e\x86\xd8\x8b\x6c\
\x6d\xf5\xf9\xed\x6f\x9e\xbb\x6f\x83\x63\xad\xdd\x0a\x21\xfc\x04\
\xf8\x15\xb0\x0a\x6c\x02\xc1\x6c\xe3\xff\x1c\xb0\x0f\xd8\x0f\x3c\
\x0a\x1c\x07\x3e\xb4\xdb\xbb\xb4\x19\x1e\x9b\xc0\x3f\x80\x37\x81\
\x75\x60\x0b\x08\x6e\xdb\x6b\x91\xa2\xbc\x30\x02\x7d\xe0\x66\x49\
\x2b\xf3\x3e\x70\xa0\x28\xb9\xdf\x03\x06\x75\xeb\x30\x26\x0b\xd9\
\xc6\x78\x3f\x18\x3e\x92\xc5\xcb\xe1\xab\x36\xcb\x4c\xda\xcd\xd8\
\x55\x41\x3d\xb8\x43\x1b\xf9\x54\xf9\x5f\x38\xfe\x0b\xdd\x6a\xdf\
\xcf\x7f\x71\xb0\x56\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\
\x82\
\x00\x00\x08\x17\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x30\x00\x00\x00\x30\x08\x06\x00\x00\x00\x57\x02\xf9\x87\
\x00\x00\x07\xde\x49\x44\x41\x54\x78\xda\xec\xcf\x01\x06\xc0\x30\
\x0c\x46\xe1\xb1\x92\xad\xac\xb4\xf7\xbf\x6b\xf6\x46\x46\x00\x6c\
\x85\xcd\x5f\x3e\x28\x92\xbc\xc5\xdd\x3f\xed\x1f\x01\x0a\x50\x80\
\x02\x14\xa0\x00\x05\xf0\x0c\x0d\x03\x3d\x8c\xf8\xb3\x37\x4b\x78\
\x05\x1b\xca\xec\x39\xf9\xf8\xfb\xd8\x8a\x3d\xd4\x14\x65\x0f\x16\
\xae\x38\xd0\xc3\xd4\x39\x39\xa0\x5d\xce\x76\xcc\x01\x3a\xb2\x26\
\x8f\xe2\x9f\x6d\xcc\xda\xb6\x6d\xdb\xb6\x6d\x7b\x77\x6c\x7b\x26\
\x1c\x24\xbd\xe9\xd8\x1a\x3b\x36\xda\x78\x6d\x77\xd0\x78\x9d\x4e\
\xed\xff\xd6\x74\xf5\xe9\x6c\xf2\xe2\xe5\x97\x3a\xa7\x4e\x6a\x72\
\x5e\xfd\xea\xde\xfa\xdf\xaa\xf7\x26\x8d\xcd\xb5\x05\x4d\xa7\xeb\
\x58\x66\x3f\x7d\xae\xa1\xe8\x0d\x6f\x78\x03\x26\xdc\xbf\x80\x05\
\x6f\x4c\x99\xe7\xf3\x56\x6f\x5d\xfd\xf8\xb2\x4a\xf5\x7b\x52\xeb\
\x3c\xb0\x9c\x1c\xf1\x20\x5c\xdd\x0d\xc1\x93\x93\x93\x2c\xb3\x9d\
\x3a\xdb\xc0\xfa\xfb\xfb\x5f\xf6\xf9\xcf\x7f\xfe\x89\xf3\x5c\xf4\
\x76\xc0\xd1\x1f\xf7\xb8\xc7\xdd\x5d\xdf\x54\xfb\xbd\xe6\x33\x0d\
\x61\xb0\xb3\xb2\xb2\x1e\xf7\xc6\x37\xbe\x71\xd5\xe2\x39\xf5\x69\
\xce\xbb\xde\xf5\xae\xc7\x4d\x31\xf0\x81\x0f\x7c\xe0\x3e\x61\xc0\
\x64\xd6\x33\xb3\xc5\x80\x31\x37\x10\x8b\xc5\x7e\x70\xf6\xec\xd9\
\x57\xcf\xa3\xcc\xf7\xa5\x36\xe3\x9e\xe2\x62\xd5\x1b\x9b\xcf\x36\
\x0c\x76\xf6\xb4\x47\xa2\xd1\x28\x03\x7b\x6c\x6c\xec\x8b\x0d\x0d\
\x0d\xaf\x5c\x1c\xa7\x8d\x38\x91\x34\xe7\xf2\xe5\xcb\xaf\x99\x62\
\xe0\x17\xbf\xf8\xc5\x43\x75\x8d\x55\x5c\xb4\xc5\x6a\xa4\x6e\xe2\
\xe3\x86\xe6\x1a\x96\x48\x24\x7e\xa7\xd3\xe9\xde\x3b\x4b\x99\xef\
\x06\x03\x0b\xaf\xdf\xb9\xfe\x49\x75\x8d\xd5\x27\xcf\x9e\x3f\x15\
\xf1\x07\x7c\x6c\x62\x62\x82\xa1\x11\x1b\x9c\xdf\x2c\x9c\xd3\x1c\
\xf1\x05\xbc\xc4\x49\x4c\xe1\x58\x2c\x96\x8f\x4c\x31\xb0\x7a\xf5\
\xea\x87\xaa\x6a\xcb\xb8\x68\xab\x64\x66\x92\xcd\xc2\xc7\x35\xf5\
\x95\x98\xf0\x7b\xb3\xd9\xfc\x01\x85\x32\x63\xc1\x07\x1e\x7d\xf4\
\xd1\x7b\xca\xab\x8a\x7f\x52\xdb\x50\x39\x62\x34\xeb\x13\x10\x1e\
\x1e\x09\x31\x9b\x5d\x62\x68\x60\x2f\x9c\xa3\x4b\x73\x24\x87\x75\
\x0a\xc7\xe1\x70\x7c\x62\x9a\x01\x9a\x08\xd1\xb4\xa8\x95\xd9\x1d\
\x12\x1f\x57\xd5\x4c\x5b\x58\x5c\x67\xf7\x8b\x32\x9f\x50\xe5\xbd\
\xa5\xaa\xa6\x54\xdb\xda\x7e\x2d\x12\x8b\x45\x19\x4a\xed\x70\xda\
\x98\x8d\x36\xc1\x41\x1c\x34\xb0\x17\xc5\x71\x49\x24\xde\x82\x9f\
\x82\xa3\x6c\xa0\xb8\x4c\xc5\x45\x63\x71\xa7\xd3\xce\xc7\x74\xea\
\xd3\x0b\xd3\x33\x37\x65\x96\x79\xcb\x96\xd5\x4f\x2e\xad\xf8\xbb\
\xba\xbe\xa9\x1a\x71\xc1\x73\xcc\xe3\x75\xa1\x82\x7c\x13\xc0\x71\
\xb9\x1d\x0c\x0d\xec\xb9\x38\x75\x99\x1c\x1f\x71\x6c\x26\x66\x73\
\x12\xc7\x6d\x63\x6e\xaf\x53\x70\x94\x0d\xa8\xd4\x27\xb8\x68\x97\
\xcb\xc1\xdc\x6e\x27\x1f\xab\x4b\x0b\xd3\x0b\x7f\xe6\x33\x9f\x79\
\x3c\xca\xfc\xbc\xe7\x3d\xef\xde\x93\x45\xc7\x7f\x45\xb0\x51\x83\
\x49\x37\x81\x32\x07\x43\x01\x7e\x6e\x24\x12\x6f\x27\xf1\x4e\x12\
\x0f\x86\xd7\xeb\x66\x68\x60\xcf\x8b\x13\x0e\x70\xe1\x92\x9d\x38\
\x4e\x89\x39\xdd\x76\x6e\xc6\xe7\xf7\x08\x8e\xb2\x81\x93\x85\xf9\
\x5c\xb4\xc7\xe3\xa2\x85\x5d\x7c\xac\x2a\x4a\x4f\x78\xdf\xaf\x7e\
\xf5\xab\x47\x8e\xe4\x1c\x78\x07\x41\x0c\x57\xaf\x5d\x8a\xd2\xed\
\xc4\xc6\xc7\xc7\x10\x15\x1c\x7c\x9c\x1b\x1e\x3d\xa7\xcb\xce\xdc\
\x1e\x12\xef\x73\xb3\x00\xed\x28\x1a\xd8\x8a\x9c\x38\x71\x22\xc4\
\x71\x10\x07\xbb\xce\x23\x43\xd5\xf3\x38\x98\x87\x18\xfe\x80\x97\
\x36\xc8\x2f\x38\xca\x06\xf2\x4f\x64\x73\xd1\x3e\x9f\x9b\x3a\x1c\
\x4f\xb2\x13\x85\x79\x2c\x99\x4c\xfe\xce\x68\xd4\x7d\xe6\xa4\x2a\
\xbf\xba\xb2\xba\x74\x1c\xa2\x64\x59\x26\xa1\x0e\xba\x72\x0d\xd8\
\x79\x91\x77\x54\x0f\x1b\x80\xf9\x5c\xbc\xa8\x00\xd8\x33\x72\x12\
\x32\x17\x6a\x96\x0c\xcc\xca\x77\x9d\xaa\x97\x8a\x8c\xd7\x4f\x8c\
\xa0\x8f\x85\xa8\x2a\x23\xa3\x61\xc1\x51\x36\x90\x9d\x77\x98\x1b\
\xf0\xfb\xbd\x7c\x71\x8c\xf3\x8e\x67\xb1\x73\xe7\x4f\xd7\x9d\x28\
\xc8\x8b\x1a\x8c\x7a\x5e\x66\xca\xa9\x78\x57\x4c\xcb\x3b\xce\x00\
\xca\x0d\x06\xc6\xa8\x06\x1a\xd8\x82\x23\xe2\xe2\x27\x71\x10\x6e\
\x91\x8c\xfc\xa0\xda\x5d\x12\x37\xc3\x23\x13\x20\xf1\x21\x3f\xbf\
\x81\x46\xc7\x46\x50\x21\xc1\x51\x36\x70\x24\xeb\x00\x17\x1d\x0c\
\xfa\x51\x7e\x3e\xce\x3d\x96\xc5\xae\x5c\xb9\x98\x88\xc7\xe3\x00\
\x91\x68\x23\x33\x9a\x20\xde\x38\x53\xde\x21\x9c\xba\x87\xaa\xe0\
\x24\xf1\x36\x32\x67\x61\x68\x60\x5f\xb9\x7a\x9d\x33\x36\x3e\xca\
\xa3\x62\xb2\xd2\x06\xa4\x23\x23\x76\x9d\x18\x41\x8a\x0c\xdf\xf5\
\x10\x7f\x36\x12\x1d\x67\x88\x99\xe0\x28\x1a\x38\x70\x78\x0f\x8f\
\x0d\x76\xd0\xed\x71\x70\x03\x59\xb9\x07\xd9\xa5\x2b\x17\xc4\xc2\
\x94\x73\xb3\x62\xde\x91\x55\x44\x07\x73\x1d\x4e\x89\x1f\x44\x33\
\x89\x44\x03\x9b\x73\xe4\x38\x76\x93\xdf\x2e\x52\x3a\x32\x76\x2e\
\xde\x27\x22\x33\x12\xa4\xcd\x0a\xf3\xe7\xa2\xb1\x08\xc3\x1c\xf1\
\x22\x03\x47\xd1\xc0\x9e\xfd\x3b\xb8\x68\x0f\xc1\x10\x09\x8c\xf7\
\x1f\xda\xc3\x1a\x1a\x6b\xcf\x1c\xcd\x3e\x28\xeb\x0d\xba\x24\x35\
\x16\x0e\x87\xb0\xeb\xd3\xf2\x0e\x13\x88\x11\x6e\x0f\xec\xac\xc9\
\xa2\x67\x7a\x93\x86\xa1\x81\x2d\x38\x06\x23\xe7\x20\xd7\x10\x8e\
\xc8\x88\x83\x2a\x22\x93\xda\xf5\x28\x3f\x23\x13\xc9\x09\x20\x04\
\x47\xd9\xc0\xce\xdd\x5b\xb9\x68\xec\x2a\x4a\x8f\xf1\xde\x7d\x3b\
\x31\xef\x6f\x2e\x97\x6b\x67\xa1\xea\xb8\xa6\xa0\xf0\xb8\x1c\x08\
\x06\x18\x32\x1c\x0a\x05\xa7\xe5\xdd\xee\xb0\x52\xa6\x21\x5e\xc7\
\xf4\xc6\x61\xa6\xd1\x0f\x30\x34\xb0\xa7\x70\x54\xc7\x65\x5c\xbd\
\x10\x07\xc1\xe2\xa0\xa2\xca\x78\x81\xc5\xe3\x31\x08\x65\x30\x8a\
\x06\x2d\x82\xa3\x68\x60\xeb\xf6\x8d\x78\x30\x25\xc2\x88\xb1\x58\
\xf8\xaf\x04\xfa\x23\xf5\x3f\x75\x76\xb5\xe7\x1f\x3c\xbc\x2f\x74\
\xfa\x6c\x73\x02\x37\x91\x2c\xc7\x79\xe6\x45\xde\x31\x8f\x3e\x01\
\x98\xce\x38\xc4\x86\x75\xfd\x6c\x40\xd3\xc3\xd0\xc0\x9e\x89\x73\
\xe6\xdc\x29\xd2\x23\xf3\x88\x44\x48\x78\x34\x16\x05\x13\x1b\x24\
\x44\xe3\x27\xba\xe0\x28\x1b\xd8\xb4\x65\x1d\x1e\x4c\x95\x5f\xc7\
\xc7\xdb\x76\x6c\x62\x58\x34\x18\x0c\x7e\xd5\xef\xf7\x7f\x9a\x44\
\xff\x9c\xce\xc3\xdf\x6a\xeb\xaa\x9a\x76\xed\xd9\x16\x1f\xd6\x0c\
\xa6\xe2\x10\xe2\xe2\x0d\x66\x2d\xd3\x1a\x86\xd8\x10\x89\xef\x1f\
\xee\xa6\x7e\xdd\x00\xd8\xb3\x71\x74\x7a\x6d\x12\xeb\x81\x85\x7e\
\xbd\x41\x38\x3a\xff\x9d\xe0\x28\x1b\x58\xb7\xe1\x6f\x7c\x82\xd9\
\xca\xb3\x8b\xb1\x58\xf8\xf7\xf4\xf5\xf7\x61\xb5\x5a\x7d\x73\x5b\
\x5b\xdb\x33\x47\x46\x46\xbe\x86\x2f\x42\xba\x79\xb6\xe6\x1d\xcf\
\x1e\x38\x7c\xf4\x80\xec\xf6\xb8\x00\xe6\xb7\xc9\x90\xb6\x8f\xf5\
\x0d\x75\xf3\xdd\x47\x15\xd0\xc0\x9e\x8d\x93\x7f\x3c\x67\xe0\x68\
\xf6\x21\x39\x14\x0e\x65\xee\xba\x30\x84\x8a\x08\x8e\xb2\x81\xbf\
\xad\xf9\x13\x26\xa5\x76\x71\x10\x63\x4c\x98\xf6\x11\xb6\x77\xef\
\xde\xdb\x07\x06\x06\x5e\x37\x3e\x3e\xfe\x03\x88\xea\xec\x6c\xcf\
\xde\xb1\x6b\xab\xbf\xa2\xb2\x54\x1e\x8f\x8c\xf3\x4c\x1b\x68\x03\
\x86\x29\xff\x3a\x3a\x07\x68\x60\xcf\x87\xb3\x6b\xcf\x76\x7f\x7d\
\x43\xad\x4c\xcf\xa6\x0c\x4c\x40\x3c\x22\x26\x38\xca\x06\xfe\xfc\
\xd7\xdf\x63\x12\xf2\xcb\x77\x11\xe3\xd5\x6b\xff\xac\xf8\x19\x5c\
\x53\x53\xf3\xa0\xd3\xe9\xfc\x10\x7d\x52\xfc\x9c\x9e\xf9\x4b\x65\
\x55\x59\xdd\xfa\x8d\x6b\xe2\xed\x9d\x6d\xc9\x04\x2d\x18\x0c\xfb\
\xf9\x8b\x0a\x0d\xec\xf9\x73\xca\xeb\x36\x6e\x5e\x1b\x1f\x1c\xea\
\x4f\x8a\x08\xc1\x90\xe0\x28\x1a\xf8\xfd\x1f\x7f\x0d\xd1\x28\x3b\
\xb2\x8b\xb1\xe2\xc2\xa2\xe3\xcb\x92\xfe\x77\xf4\xb4\x70\x38\xfc\
\x65\x7a\xee\xd7\x1e\x8f\x67\xf3\x91\xa3\x07\x7a\xb7\xef\xdc\x22\
\xdb\x1c\x52\xba\xf4\x60\x2f\x94\x73\x34\xeb\x70\xef\xce\xdd\xdb\
\x64\xaf\xcf\x8b\x50\x09\x8e\xb2\x81\x5f\xff\xf6\xe7\x10\x4d\xd9\
\xed\xc5\x01\xc4\x78\xce\x85\x45\xa7\x5c\xdf\xd6\xd7\xd7\xf7\x6a\
\x8a\xc3\xf7\x11\x87\xae\xae\x8e\x23\x1b\x37\xad\xf5\xd0\x95\x29\
\x47\x28\x56\x60\x2f\x96\xb3\x69\xf3\x3a\x4f\x91\xba\x50\xa6\x43\
\x0f\xce\x8c\x06\x1e\xfa\xc3\x1f\xfe\xf0\xf0\x2f\x7e\xf5\x13\x2e\
\x3a\xb3\xfd\xe6\x77\xbf\x54\x58\x58\x51\xc0\xfd\x92\x24\xbd\x9f\
\xe2\xf0\x33\x9a\xf7\xe7\xf2\x8a\x92\xaa\x3f\xfe\xf9\x77\x31\xb0\
\x97\xca\xf9\xd3\x5f\x04\x67\xba\x81\xfb\xbe\xfd\xed\x6f\x3f\x6e\
\xdd\xc6\xd5\xcd\x3f\xf9\xd9\x0f\x58\x46\xc7\x8b\xac\x37\xb5\xf0\
\x1b\x38\x7c\x7e\xfd\xc6\xf3\xe7\xcf\x3f\x25\x10\x08\x7c\x11\x71\
\xf0\xf9\x7c\x1b\x2f\x5d\xba\x90\x0f\x8e\xc9\x64\x7a\xdb\x32\x70\
\x60\xe0\xbd\x53\xfe\xb0\xf5\xc9\x4f\x7e\xf2\x09\x74\xc5\xbd\x1b\
\x9f\xbc\x38\xec\x99\x1d\xbf\xb3\x5a\xad\x4f\xe4\xd0\x05\xf4\x23\
\x47\x8e\xdc\x4a\xb7\xcc\xcb\x29\x0e\xdf\x05\x03\x57\xa6\x46\xa3\
\x79\xd2\x72\x70\xec\x76\xfb\x53\x84\x01\xde\xe9\x8f\x57\x77\xb6\
\xb6\xb6\xbe\x08\x4e\xff\xc9\xc0\xb7\x28\x7b\x2f\xc6\x6e\xa4\xa1\
\x0b\x17\x70\x17\x1d\xca\xe7\xd0\xf8\x11\xfc\x7b\x79\x38\x2b\x7f\
\x9d\xfe\xcf\xf7\x15\x03\x2b\x06\x56\x0c\xac\x18\x58\x31\xb0\x62\
\xe0\x1f\xf0\x4c\x83\x8a\xd5\x02\xe4\xbc\x00\x00\x00\x00\x49\x45\
\x4e\x44\xae\x42\x60\x82\
\x00\x00\x18\xdb\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x01\x2c\x00\x00\x01\x2c\x08\x02\x00\x00\x00\xf6\x1f\x19\x22\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x03\x66\x69\x54\x58\x74\x58\x4d\x4c\
\x3a\x63\x6f\x6d\x2e\x61\x64\x6f\x62\x65\x2e\x78\x6d\x70\x00\x00\
\x00\x00\x00\x3c\x3f\x78\x70\x61\x63\x6b\x65\x74\x20\x62\x65\x67\
\x69\x6e\x3d\x22\xef\xbb\xbf\x22\x20\x69\x64\x3d\x22\x57\x35\x4d\
\x30\x4d\x70\x43\x65\x68\x69\x48\x7a\x72\x65\x53\x7a\x4e\x54\x63\
\x7a\x6b\x63\x39\x64\x22\x3f\x3e\x20\x3c\x78\x3a\x78\x6d\x70\x6d\
\x65\x74\x61\x20\x78\x6d\x6c\x6e\x73\x3a\x78\x3d\x22\x61\x64\x6f\
\x62\x65\x3a\x6e\x73\x3a\x6d\x65\x74\x61\x2f\x22\x20\x78\x3a\x78\
\x6d\x70\x74\x6b\x3d\x22\x41\x64\x6f\x62\x65\x20\x58\x4d\x50\x20\
\x43\x6f\x72\x65\x20\x35\x2e\x30\x2d\x63\x30\x36\x30\x20\x36\x31\
\x2e\x31\x33\x34\x37\x37\x37\x2c\x20\x32\x30\x31\x30\x2f\x30\x32\
\x2f\x31\x32\x2d\x31\x37\x3a\x33\x32\x3a\x30\x30\x20\x20\x20\x20\
\x20\x20\x20\x20\x22\x3e\x20\x3c\x72\x64\x66\x3a\x52\x44\x46\x20\
\x78\x6d\x6c\x6e\x73\x3a\x72\x64\x66\x3d\x22\x68\x74\x74\x70\x3a\
\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x31\x39\x39\
\x39\x2f\x30\x32\x2f\x32\x32\x2d\x72\x64\x66\x2d\x73\x79\x6e\x74\
\x61\x78\x2d\x6e\x73\x23\x22\x3e\x20\x3c\x72\x64\x66\x3a\x44\x65\
\x73\x63\x72\x69\x70\x74\x69\x6f\x6e\x20\x72\x64\x66\x3a\x61\x62\
\x6f\x75\x74\x3d\x22\x22\x20\x78\x6d\x6c\x6e\x73\x3a\x78\x6d\x70\
\x4d\x4d\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x6e\x73\x2e\x61\x64\
\x6f\x62\x65\x2e\x63\x6f\x6d\x2f\x78\x61\x70\x2f\x31\x2e\x30\x2f\
\x6d\x6d\x2f\x22\x20\x78\x6d\x6c\x6e\x73\x3a\x73\x74\x52\x65\x66\
\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x6e\x73\x2e\x61\x64\x6f\x62\
\x65\x2e\x63\x6f\x6d\x2f\x78\x61\x70\x2f\x31\x2e\x30\x2f\x73\x54\
\x79\x70\x65\x2f\x52\x65\x73\x6f\x75\x72\x63\x65\x52\x65\x66\x23\
\x22\x20\x78\x6d\x6c\x6e\x73\x3a\x78\x6d\x70\x3d\x22\x68\x74\x74\
\x70\x3a\x2f\x2f\x6e\x73\x2e\x61\x64\x6f\x62\x65\x2e\x63\x6f\x6d\
\x2f\x78\x61\x70\x2f\x31\x2e\x30\x2f\x22\x20\x78\x6d\x70\x4d\x4d\
\x3a\x4f\x72\x69\x67\x69\x6e\x61\x6c\x44\x6f\x63\x75\x6d\x65\x6e\
\x74\x49\x44\x3d\x22\x78\x6d\x70\x2e\x64\x69\x64\x3a\x30\x35\x38\
\x30\x31\x31\x37\x34\x30\x37\x32\x30\x36\x38\x31\x31\x41\x38\x36\
\x35\x43\x30\x33\x36\x33\x46\x31\x37\x39\x33\x33\x45\x22\x20\x78\
\x6d\x70\x4d\x4d\x3a\x44\x6f\x63\x75\x6d\x65\x6e\x74\x49\x44\x3d\
\x22\x78\x6d\x70\x2e\x64\x69\x64\x3a\x42\x44\x45\x45\x32\x39\x38\
\x37\x43\x46\x32\x37\x31\x31\x45\x31\x39\x34\x46\x42\x38\x31\x36\
\x33\x43\x33\x35\x38\x46\x43\x37\x46\x22\x20\x78\x6d\x70\x4d\x4d\
\x3a\x49\x6e\x73\x74\x61\x6e\x63\x65\x49\x44\x3d\x22\x78\x6d\x70\
\x2e\x69\x69\x64\x3a\x42\x44\x45\x45\x32\x39\x38\x36\x43\x46\x32\
\x37\x31\x31\x45\x31\x39\x34\x46\x42\x38\x31\x36\x33\x43\x33\x35\
\x38\x46\x43\x37\x46\x22\x20\x78\x6d\x70\x3a\x43\x72\x65\x61\x74\
\x6f\x72\x54\x6f\x6f\x6c\x3d\x22\x41\x64\x6f\x62\x65\x20\x50\x68\
\x6f\x74\x6f\x73\x68\x6f\x70\x20\x43\x53\x35\x20\x4d\x61\x63\x69\
\x6e\x74\x6f\x73\x68\x22\x3e\x20\x3c\x78\x6d\x70\x4d\x4d\x3a\x44\
\x65\x72\x69\x76\x65\x64\x46\x72\x6f\x6d\x20\x73\x74\x52\x65\x66\
\x3a\x69\x6e\x73\x74\x61\x6e\x63\x65\x49\x44\x3d\x22\x78\x6d\x70\
\x2e\x69\x69\x64\x3a\x30\x35\x38\x30\x31\x31\x37\x34\x30\x37\x32\
\x30\x36\x38\x31\x31\x41\x38\x36\x35\x43\x30\x33\x36\x33\x46\x31\
\x37\x39\x33\x33\x45\x22\x20\x73\x74\x52\x65\x66\x3a\x64\x6f\x63\
\x75\x6d\x65\x6e\x74\x49\x44\x3d\x22\x78\x6d\x70\x2e\x64\x69\x64\
\x3a\x30\x35\x38\x30\x31\x31\x37\x34\x30\x37\x32\x30\x36\x38\x31\
\x31\x41\x38\x36\x35\x43\x30\x33\x36\x33\x46\x31\x37\x39\x33\x33\
\x45\x22\x2f\x3e\x20\x3c\x2f\x72\x64\x66\x3a\x44\x65\x73\x63\x72\
\x69\x70\x74\x69\x6f\x6e\x3e\x20\x3c\x2f\x72\x64\x66\x3a\x52\x44\
\x46\x3e\x20\x3c\x2f\x78\x3a\x78\x6d\x70\x6d\x65\x74\x61\x3e\x20\
\x3c\x3f\x78\x70\x61\x63\x6b\x65\x74\x20\x65\x6e\x64\x3d\x22\x72\
\x22\x3f\x3e\x9b\x80\x85\x2f\x00\x00\x15\x0b\x49\x44\x41\x54\x78\
\xda\xec\x9d\x6b\x57\x22\x39\xd7\x86\xa5\x80\x06\x01\x45\xc0\x13\
\x9e\x6d\x75\xa6\x7b\xfe\xff\xcf\x98\xef\x3a\xb6\x67\x11\x15\x14\
\x45\x10\x10\x39\x3c\xf7\xaa\x5a\xcb\xd7\xb7\xdb\x8a\xa8\x05\x64\
\x27\xf7\xf5\x81\x45\xab\x0d\xa9\x64\x5f\x95\x63\x25\xa1\x7f\xff\
\xfd\x77\x82\x10\x32\x3e\x1c\x66\x01\x21\x94\x90\x10\x4a\x48\x08\
\xa1\x84\x84\x50\x42\x42\x08\x25\x24\x84\x12\x12\x42\x28\x21\x21\
\x94\x90\x10\x42\x09\x09\xa1\x84\x84\x10\x4a\x48\x08\x25\x24\x84\
\x50\x42\x42\x28\x21\x21\x84\x12\x12\x42\x09\x09\x21\x94\x90\x10\
\x4a\x48\x08\xa1\x84\x84\x50\x42\x42\x08\x25\x24\x84\x12\x12\x42\
\x28\x21\x21\x94\x90\x10\x42\x09\x09\xa1\x84\x84\x10\x4a\x48\x08\
\x25\x24\x84\x50\x42\x42\x28\x21\x21\x84\x12\x12\x42\x09\x09\x21\
\x94\x90\x10\x4a\x48\x08\xa1\x84\x84\x50\x42\x42\x08\x25\x24\x84\
\x12\x12\x42\x28\x21\x21\x94\x90\x10\x42\x09\x09\xa1\x84\x84\x10\
\x4a\x48\x08\x25\x24\x84\x50\x42\x42\x28\x21\x21\x84\x12\x12\x42\
\x09\x09\x21\x94\x90\x10\x4a\x48\x08\xf1\x27\xc2\x2c\x90\x42\x3c\
\x1e\x77\x1c\x27\x14\x0a\x4d\x4e\x4e\x2a\xfe\xac\xd9\x6c\xf6\xfb\
\xfd\x5e\xaf\xd7\x6a\xb5\x98\x69\x94\x90\x7c\xaa\x48\x22\x91\x58\
\x2c\x06\xd3\x60\x1d\xde\xc4\x5d\x3e\xfd\x69\x2d\x97\xa7\xa7\x27\
\xbc\xc2\x4f\xbc\xe9\x74\x3a\xcc\x64\x4a\x48\x5e\xf5\x07\x1c\x27\
\xe9\x02\xdf\x12\x89\x04\x5e\xc3\xe1\x70\xb0\xf5\xe7\x6f\x0e\x77\
\xbb\x5d\xa8\xd8\x68\x34\xf0\xfa\xe8\x82\x6a\x93\x05\x41\x09\xed\
\x02\x9a\x4d\x4d\x4d\xa5\x52\x29\xbc\x42\xbc\xd1\x7f\x7b\xc2\xe5\
\xe5\x27\x10\xb2\x56\xab\xd5\xeb\x75\xbc\x42\x51\x16\x10\x25\x34\
\x16\xc4\x7d\x3a\x9d\x9e\x72\xd1\x2d\x61\x60\x61\x61\x01\xef\x6b\
\x2e\xd5\x6a\x15\x66\xb2\xc8\x28\xa1\x39\xee\xcd\xcc\xcc\xe4\x72\
\xb9\x6f\xdf\xbe\xe9\x9f\x5a\xef\x1e\xb1\xb4\xb4\xd4\x6e\xb7\x6f\
\x6f\x6f\xef\xef\xef\x69\x23\x25\x94\x0a\x7a\x77\x10\x2f\x9b\xcd\
\xe2\x8d\xc4\xf4\xe3\x96\x91\x77\x41\xd7\xb1\x52\xa9\x40\x48\xbc\
\x61\xb1\x52\x42\x01\x84\x42\x21\xd4\x7b\xb3\xb3\xb3\xd3\xd3\xd3\
\xc6\xdc\x4d\x3c\x1b\x1f\x1e\x1e\x6e\x6e\x6e\x50\x37\xf6\xfb\x7d\
\x16\x34\x25\xd4\x32\x2b\x23\x91\xf9\xf9\xf9\xb9\xb9\x39\xbc\x31\
\xf2\x02\xa7\x5d\x3a\x9d\x4e\xb9\x5c\x2e\x95\x4a\x9c\xea\xa0\x84\
\x7a\xe9\xb7\xb8\xb8\x08\xfd\x1c\xc7\xb1\xe1\x62\x51\x2b\x2e\x2c\
\x2c\x40\xc5\xab\xab\x2b\xaa\x48\x09\xc7\x4c\x34\x1a\x85\x7e\x68\
\x7c\xda\xa0\xdf\x6b\x70\xbd\xf0\x10\xf7\x1d\x34\x50\xa1\xe2\xf3\
\xf3\x33\x83\x81\x12\x8e\xa1\xa7\x04\xfd\x72\xb9\x1c\x3a\x81\xd6\
\x66\x02\x54\xf4\x5a\xe0\xb7\xb7\xb7\x50\x91\x23\x37\x94\x70\x74\
\xed\xb1\xe5\xe5\x65\xcb\xf5\x7b\x0d\xf2\x01\x6d\x01\x64\x08\x54\
\xbc\xb8\xb8\x60\x03\x95\x12\x0e\x17\xdc\xf8\x97\x96\x96\x82\x5d\
\x59\x66\x92\x8a\x99\x4c\xa6\x58\x2c\xa2\xbb\xc8\x11\x54\x4a\x18\
\x3c\xc9\x64\x72\x6d\x6d\x6d\xf4\xab\xcc\x64\x81\xdb\xd3\xea\xea\
\x2a\x6a\xc5\xb3\xb3\xb3\xc7\xc7\x47\x66\x08\x25\x0c\xb2\xfd\x89\
\xdb\x3c\xb3\x62\x40\x70\xab\xfa\xf1\xe3\xc7\xcd\xcd\x0d\x5b\xa7\
\x94\x30\x00\xe0\x1e\x0c\x34\x75\xea\x6f\xd8\x59\x37\x33\x33\x03\
\x0f\x61\x23\x73\x83\x12\x7e\x86\x58\x2c\xb6\xbe\xbe\xae\xdb\x62\
\x6b\x71\x8d\x08\xe4\x61\x36\x9b\x3d\x3d\x3d\xe5\xd8\xa9\x1f\xdc\
\xde\xe2\x6d\x32\x99\xcc\xcf\x9f\x3f\x69\x60\x20\x20\x1b\x91\x99\
\x50\x91\x59\xc1\x9a\x70\xb0\xdb\x92\xe3\x78\x37\x6f\x66\x45\x80\
\x84\xc3\xe1\xcd\xcd\xcd\x74\x3a\x8d\x2a\x91\xcf\x10\x53\x42\x15\
\xf1\x78\x7c\x7b\x7b\x5b\xe8\x73\x0f\xfa\x83\x5b\x5b\x32\x99\x3c\
\x38\x38\xe0\xfe\x37\x6c\x8e\xaa\x9a\xa0\x34\x70\xd8\x3d\x6d\x64\
\x32\xb2\x9a\x59\x41\x09\xff\x1f\xa1\x50\x68\x65\x65\xe5\xfb\xf7\
\xef\xb6\x2d\x01\x1d\x57\x83\x1f\x59\x8d\x0c\xe7\x92\x23\x36\x47\
\xff\x2f\x26\xd0\x5d\x99\x99\x99\x61\x56\x8c\x92\x85\x85\x05\xd4\
\x8a\xc7\xc7\xc7\xec\x22\xda\x7e\xe3\x8f\x44\x22\x3b\x3b\x3b\x34\
\x70\x2c\x20\xdb\x91\xf9\x9c\x83\xb5\x5a\xc2\x68\x34\xfa\xd7\x5f\
\x7f\xa5\x52\x29\xfa\x30\x2e\x90\xf9\x28\x02\x14\x04\x25\xb4\x91\
\x78\x3c\xfe\xe3\xc7\x0f\xf5\x6e\xd6\x64\x04\xa0\x08\x50\x10\x5f\
\xd9\xe0\x98\x12\x8a\x24\x99\x4c\xfe\xfd\xf7\xdf\x22\xb6\x3f\xb3\
\x01\x14\x04\x8a\x03\x85\x42\x09\x6d\x61\x7a\x7a\x1a\x4d\x20\x76\
\x45\x74\xeb\x9c\xa3\x50\x8c\xd9\x1d\x8b\x12\xbe\x63\xe0\xf6\xf6\
\x36\xa7\x22\x74\x8c\x45\xc7\x41\xd1\x58\xe8\xa1\x5d\xb1\x88\x06\
\xcf\xd6\xd6\x16\xa7\xa7\xb4\x05\x45\x83\x02\xb2\xad\x5d\x6a\x91\
\x84\xb1\x58\x8c\x75\xa0\x94\xfa\xd0\xaa\x75\x4b\xb6\x44\xa4\x37\
\x1b\xc1\x7e\xa0\xa0\xfe\xa1\x3d\xf3\x16\x56\x48\x18\x0e\x87\x77\
\x76\x76\x38\x16\x2a\x08\x14\x16\x8a\xcc\x92\xbd\x7c\xcc\x97\xd0\
\x6b\xde\x70\x3e\x50\x1c\x28\x32\x4b\xba\x0f\x86\x5f\x21\x3a\xfa\
\x9b\x9b\x9b\x5c\x13\x23\x14\x14\x1c\x8a\xcf\xf8\x81\x34\xc3\x25\
\x5c\x5e\x5e\xe6\xba\x50\xd1\xa0\xf8\x50\x88\x94\x50\x2a\x99\x4c\
\xc6\x3b\xf8\x92\x88\x06\x85\x68\xf6\x46\x07\xc6\x4a\x88\x1e\xc5\
\xc6\xc6\x06\x23\xd8\x0c\xd6\xd7\xd7\x0d\xee\xd5\x9b\x29\x61\x38\
\x1c\xde\xda\xda\xe2\x94\xa0\x39\x61\xea\x38\x28\x50\x53\x07\x4b\
\xcd\x0c\xd3\xb5\xb5\x35\xee\x52\x61\x18\x28\x50\x14\x2b\x25\x94\
\x41\xd6\x85\x51\xcb\x92\xa5\x84\xbc\x5f\x12\xb6\x71\xec\x93\x30\
\x14\x0a\xa1\x07\xcf\x23\x93\x0c\x06\x85\x8b\x22\x36\x6c\xe6\xd0\
\x28\x09\x67\x67\x67\xb9\x67\xb6\xf1\xa0\x88\x0d\x3b\x9c\xc7\x1c\
\x09\xa3\xd1\xe8\xca\xca\x0a\x63\xd4\x06\x50\xd0\x26\x2d\xef\x36\
\x47\x42\xb4\x52\x38\x27\x61\x09\xde\x51\x05\x94\x50\x2f\x32\x99\
\x4c\x3a\x9d\x66\x74\xda\x03\x8a\xdb\x98\x6d\xbc\x4d\x90\x10\xf7\
\xc5\xd5\xd5\x55\xc6\xa5\x6d\xa0\xd0\xcd\x68\xfb\x98\x70\x0d\xf3\
\xf3\xf3\x96\x6f\x5c\x69\x27\x28\x74\x14\x3d\x25\xd4\xa2\x24\xf2\
\xf9\x3c\x23\xd2\x4e\x50\xf4\x06\xdc\x7f\xc5\x4b\xb8\xbc\xbc\xcc\
\xf1\x18\x6b\x41\xd1\x1b\xf0\xa0\x93\xec\xf0\x8d\xc7\xe3\x5c\xa1\
\x66\x39\x08\x00\xe9\xbb\x77\xcb\x96\x70\x69\x69\x89\xfb\x17\x5a\
\x0e\x02\x00\x61\x40\x09\xc7\x43\x22\x91\xe0\x59\x93\x64\xc2\x9d\
\xa0\x42\x30\x50\xc2\xf1\x74\xca\x19\x7f\xc4\x80\x60\x90\x2a\x21\
\xba\x01\xdc\x3c\x86\xbc\x80\x60\x90\xdb\x33\x94\x2a\x21\x37\x8f\
\x21\xc6\x84\x84\x48\x09\xa3\xd1\x68\x2e\x97\x63\xd8\x91\xd7\x20\
\x24\x84\xce\x19\x8a\x94\x70\x6e\x6e\x8e\x83\xa2\xe4\x37\x10\x12\
\x08\x0c\x89\x29\x8f\x30\xaf\xc5\xd1\x6a\xb5\x7a\xbd\xde\xd3\xd3\
\x53\xb7\xdb\x7d\xf9\x61\x38\x1c\x8e\xc5\x62\x8e\xe3\xd8\x7c\xe4\
\x2d\x02\xe3\xf2\xf2\xb2\xdf\xef\x53\xc2\xa1\x77\xc1\x6d\x3b\xd7\
\x05\xd6\x3d\x3c\x3c\xd4\xeb\x75\x88\xd7\x68\x34\xde\xfd\xfb\x44\
\x22\x01\x21\x53\xa9\xd4\xf4\xf4\xb4\x55\x4e\x22\x30\x10\x1e\x77\
\x77\x77\x94\x70\xb8\x18\xf6\x54\xb5\x82\x5a\xad\x56\xa9\x54\xaa\
\xd5\xea\xf3\xf3\xf3\x87\xfe\x63\xc3\xc5\x8b\x45\x74\x93\xd2\xe9\
\x74\x36\x9b\xb5\x64\xcf\x01\x84\x07\x25\x1c\x2e\xb8\xc1\x1b\x1f\
\x4c\x68\x64\x96\x4a\xa5\x72\xb9\xfc\x51\xf7\xde\x04\x1f\x72\xe3\
\x02\x1b\xd1\x5a\x9b\x9f\x9f\x37\x7b\x0f\x1e\x54\xfe\x08\x12\x34\
\x19\x28\xe1\xb0\xc8\xe5\x72\x06\x0f\xc9\xa0\x33\x03\xf7\xd0\xab\
\xe9\x74\x3a\x81\x7f\x38\x6c\x2c\x16\x8b\xd0\x3b\x9f\xcf\x9b\x3d\
\xb2\x85\x20\xc1\x95\x52\xc2\x61\x61\xf0\x72\x6d\xb4\x3c\x11\x3a\
\xc3\xbe\x85\x43\xef\xf3\xf3\x73\xa8\xb8\xb4\xb4\x64\x6a\x66\xe2\
\xba\x28\xe1\xb0\xf0\xc6\x1b\x8c\xec\xfb\x15\x0a\x85\x41\x46\x5c\
\x82\x02\xaa\x1f\x1f\x1f\x5f\x5f\x5f\xaf\xac\xac\x98\xd7\xbc\x47\
\x90\x20\x54\x46\x99\x9f\x16\x49\x68\xde\x72\xed\x5e\xaf\x87\x7a\
\x09\x1d\xb6\xb1\x7c\x3b\xc2\x74\x7f\x7f\x7f\x76\x76\xd6\x98\x7d\
\x22\x5e\x87\x8a\x20\x09\x25\x65\xbd\x61\xcd\xa7\x56\xab\xb5\xb7\
\xb7\x37\x2e\x03\x5f\x40\x02\x90\x0c\x24\xc6\xb0\x16\x29\x9b\xa3\
\xc1\x33\x39\x39\x69\xd2\xa1\xf3\x0f\x0f\x0f\x87\x87\x87\xa8\x09\
\x3f\xf4\xbf\x90\x03\xd3\xd3\xd3\x5e\x56\x78\xb9\xe1\xbd\xb6\xdb\
\x6d\xef\x15\x34\x9b\x4d\x7c\xb8\xf7\x93\x01\xc1\x7f\xd9\xdd\xdd\
\xdd\xda\xda\xc2\x87\x9b\x91\xbd\xc8\x16\xe4\x12\xae\x8b\x12\x06\
\x89\x49\x3b\x1a\xde\xdd\xdd\xa1\x4b\x36\xf8\xc2\x0e\xf4\x70\x66\
\x5c\xfc\xce\xe8\xf3\x56\x2f\xbc\x7e\xa6\x0e\xf1\x77\xef\x32\x60\
\xab\x0c\xb7\x83\x83\x83\x83\xcd\xcd\x4d\x63\xda\xfc\x08\x18\x4a\
\x48\x09\x7d\x9b\x7f\xa7\xa7\xa7\x03\x95\x4d\x24\xb2\xb8\xb8\x88\
\x96\xd5\x27\xd6\x25\x4f\xba\xe4\xf3\xf9\xe7\xe7\xe7\x4a\xa5\x72\
\x75\x75\xf5\xee\xb4\x07\x6e\x0a\x47\x47\x47\xeb\xeb\xeb\x66\x2c\
\x87\x40\xc0\xe0\xaa\x29\x61\x70\xa9\x8c\x44\x92\xc9\xa4\x01\x91\
\x01\x1f\xce\xce\xce\x06\xb9\x5e\xf8\x03\x19\xbe\x3e\x5e\x02\x81\
\x17\x16\x16\xe6\xe6\xe6\x20\xff\x20\x33\x90\x48\x1e\xbe\xd4\x80\
\xee\x37\x02\x06\xd9\x38\x8c\x19\x57\x4b\x25\x9c\x9a\x9a\x32\x60\
\x72\xb9\x56\xab\x9d\x9c\x9c\xbc\xdb\x0a\x85\x7b\xcb\xcb\xcb\xc1\
\xae\x8f\x85\x57\xf3\xf3\xf3\x50\xeb\xe2\xe2\x42\x3d\x14\x84\xe4\
\x21\x91\x50\x57\xfa\xd4\x05\x02\x06\x97\x20\x62\x09\x9b\x8c\xd1\
\xd1\x54\x2a\x25\xdd\xc0\x56\xab\x75\x78\x78\xa8\x36\x10\xe2\xed\
\xec\xec\xa0\x41\x38\xa4\x15\xea\xf8\x58\x7c\x38\xbe\x42\xfd\xf9\
\x48\x24\x92\x6a\xc0\x78\xa9\x94\xb0\xa1\x84\xa3\xa0\xd7\xeb\x1d\
\x1f\x1f\xbf\x7e\xf2\xe8\xcd\xda\xfe\x9f\x7f\xfe\x19\xc1\xf8\x24\
\xbe\x02\x5f\xa4\xae\xe8\x90\x54\x24\xf8\xa3\x83\xb7\x0c\x1b\x63\
\x25\x44\x53\xca\x6f\x54\x50\x0a\xe7\xe7\xe7\xea\x51\xca\x4c\x26\
\x83\x0a\x6a\x64\x0f\x86\xe3\x8b\xf0\x75\xea\x81\x50\x24\x18\xc9\
\x16\x9d\xed\x08\x1b\x11\x8b\x10\x04\x24\x11\x3d\x6c\xd1\x1d\xc2\
\x6a\xb5\xaa\xee\x86\xa1\x13\xb8\xb9\xb9\x39\xe2\x6b\xc4\xd7\xe1\
\x4b\xd5\x03\xa1\x48\x36\x12\x2f\xba\x5b\x28\x62\x3c\x4f\x80\x84\
\xa2\xdb\xa2\xe8\x5f\xa9\xeb\x13\x54\x47\x6b\x6b\x6b\x63\xb9\xcb\
\xe0\x4b\xf1\xd5\xea\xfa\x10\x89\x17\xf7\xa0\xba\xb8\xe0\x11\x20\
\xa1\xe8\xb6\xa8\xfa\xc1\x08\x74\xcc\x46\x5f\x07\xfe\x59\x1f\x2a\
\xfa\x87\x48\xbc\xac\x27\x12\x24\x06\x0f\x25\x1c\x22\xcf\xcf\xcf\
\xd7\xd7\xd7\x7e\xbf\x8d\x44\x22\xe3\x35\xf0\xb5\x87\x8a\xf1\x52\
\x5c\x42\x20\x8f\x17\x33\x78\xa4\x4a\x88\x8e\xb5\xdc\xc7\x97\x10\
\xbe\x7e\x6d\x39\x84\xfe\xf7\xef\xdf\x35\xd9\xa2\x0f\xc9\x40\x62\
\xfc\x6e\x07\xb8\x04\xc5\xad\x44\x73\x10\x3c\xfa\x0f\x28\xe8\x2e\
\x61\x3c\x1e\x17\x3a\x2a\xd3\xe9\x74\xca\xe5\xb2\xdf\x6f\x67\x67\
\x67\xb5\x9a\x0d\x47\x62\x14\x83\x34\xb8\x10\x11\x4b\x4f\xde\xbc\
\xd9\xe9\x5f\x19\x0a\x90\x50\xe8\x3d\xb8\x54\x2a\xf9\xcd\xb3\xa1\
\xed\xa7\xe1\x41\x42\x48\x92\x5f\xa3\x14\x17\x82\xcb\x11\x5a\x10\
\xfa\x87\x90\xee\x12\x0a\x6d\x8b\xa2\x09\xa7\x98\x96\xc8\xe7\xf3\
\x1a\xee\xda\xe8\x2d\x58\xf5\xfb\x2d\x2e\x47\xe8\x30\xa9\xfe\x21\
\xa4\xbb\x84\x42\x9f\x21\xac\xd5\x6a\x7e\x83\x19\xe8\x80\x69\xfb\
\x98\x02\x12\xe6\xd7\x4d\xc5\xe5\xe0\xa2\x24\x96\x85\xfe\x21\xc4\
\x9a\x70\x28\x54\x2a\x15\xbf\x5f\x2d\x2c\x2c\x68\xbb\x8c\x03\x09\
\x53\x1c\xab\xa2\xb8\x28\x86\x10\x6b\x42\xed\x78\x78\x78\xf0\x1b\
\x27\xd0\xfc\x28\x1b\x24\xcf\xef\x1e\x51\xad\x56\x25\xb6\x48\x59\
\x13\x7e\x15\x89\xe7\xec\x34\x9b\x4d\xbf\xb6\x68\x3a\x9d\xd6\x7c\
\x0f\x7f\x24\xcf\xef\xf9\xe9\x4e\xa7\x23\x68\xf7\x24\x41\x21\xa4\
\xb5\x84\x8e\x8b\xc4\x0e\xa1\xdf\xaf\x44\x6c\x1e\xa1\x38\x7d\xb5\
\x5e\xaf\x8b\x2b\x0e\xfd\xa3\x48\xeb\xc4\x09\x3d\x6e\x4e\x21\xa1\
\x88\x9d\x94\x14\x89\x94\x28\xa1\xfe\x81\xa4\xb5\x84\x42\x4f\x4d\
\xf0\x7b\x1c\x76\x72\x72\x52\xc4\x79\x52\x48\xa4\xdf\x04\xb7\x94\
\xad\x93\x64\x05\x92\xa3\x79\x34\x88\x2b\xef\x7e\xbf\xef\xb7\x62\
\x5b\xd0\xe3\x20\x7e\x49\xc5\xa5\x49\x7c\xd2\x57\xf3\x40\xd2\xbd\
\x4f\x28\xae\xbc\xdb\xed\xb6\xdf\x10\xe2\x30\x96\x6e\x3c\xb9\x04\
\xfe\xb1\x8a\xa4\x7e\x68\x47\x53\x06\xd2\x40\xf7\x08\xe6\x5d\xe0\
\x12\x7e\x22\xb2\x3f\x81\xb7\x71\x9b\xb7\x65\x06\x9a\x5b\x6b\x6b\
\x6b\x01\x6e\x91\xa6\x48\xea\xf3\xf3\xb3\xb8\xb5\x84\x94\xf0\xf3\
\x48\x5c\xba\xad\xd8\x48\x26\xc0\xe1\x01\x6f\xfb\xe0\xd7\x5f\x8a\
\x7f\x22\xbb\x82\x1a\x7d\x55\x24\x55\xbd\x53\x0e\x03\x89\xcd\xd1\
\xf1\xa3\xe8\x32\x05\x35\x3c\x80\xe6\xee\x9b\x9b\x97\xe2\x87\x41\
\x4d\xa6\x2b\x92\x2a\x51\x42\x4e\x51\xd8\x85\x22\x46\x83\x92\xb0\
\xd9\x6c\xbe\xf9\x60\x11\x7e\x18\xd4\xe8\xa5\x22\xa9\xd2\xb7\x60\
\xa3\x84\x84\x10\x4a\x38\x64\x46\xd0\x90\xf3\x9b\x6f\x54\xcc\xef\
\x05\x58\x9f\x1b\x76\x92\x21\x25\xfc\x7c\xff\x4a\x62\xf7\x23\x28\
\x09\xbd\x5d\xd2\xfe\xfc\x79\x80\xbb\xb6\x8d\xa0\x51\xcd\x40\x92\
\x21\xa1\xc4\x35\xfb\x8a\x18\x0d\x70\xbb\xa4\x4c\x26\xb3\xb9\xb9\
\xf9\xf2\x5d\x78\x13\xec\xa9\x66\x8a\xa4\x4a\x94\x50\xf3\x40\x8a\
\x30\xef\x82\x45\xf1\xe0\x4c\xb0\xb3\xea\x59\x17\x6f\x89\x5c\xe0\
\x13\x77\x8a\x83\x28\x24\x2e\xe8\xd5\xbc\x26\xd4\x5a\x42\x89\xa3\
\xe1\x90\x10\x6d\xc2\x37\x6f\x1f\xc3\x58\x78\x39\xa4\x79\x73\x85\
\x84\x12\x9f\xf0\x64\x73\xf4\xf3\x48\xdc\xe1\x0b\x06\xfa\x3d\xca\
\x2d\xe8\x11\x04\xbf\xa4\xe2\xd2\x24\x0e\xcc\x68\x1e\x48\x5a\x67\
\xa8\xc4\x9a\x50\x51\x3b\xf9\xcd\xef\x69\x18\xb2\x7e\x95\xb6\xd0\
\x8d\x98\x35\x0f\x24\xad\x25\x14\xba\xf1\xb3\x62\x43\x51\xbf\x6d\
\x2f\xb4\x42\x91\x48\xa1\xe7\x82\x68\x1e\x48\xba\x4f\x51\x48\x9c\
\xa5\x50\x48\x28\xe2\xe0\x58\x45\x22\x25\x4a\xa8\x7f\x14\xe9\xde\
\xbe\x97\x58\x19\x2a\x1e\xde\xad\x56\xab\x9a\xb7\x48\x91\x3c\xbf\
\xe3\xd0\x70\x51\x89\x44\x82\xd5\xa0\x75\x12\x4a\x7c\x7a\x6d\xc2\
\xdd\xd0\xe9\xcd\x9f\xf7\xfb\xfd\xdb\xdb\x5b\x9d\x53\x8e\xe4\xf9\
\xcd\x0c\xe1\xa2\x24\x3e\xd7\x42\x09\x2d\x95\x50\x31\x6f\xae\x38\
\x25\x66\xec\xa8\xcf\x7e\x11\xb1\x4b\xd5\x9f\x0c\xe3\xa1\x67\x4a\
\x28\x80\xe9\xe9\x69\xc5\x56\xd6\xea\x83\x7b\xc7\x48\xb9\x5c\x56\
\x6c\x1c\x2e\x62\x97\x2a\x89\x21\xa4\xbb\x84\xfa\xdf\xc6\xde\x04\
\xcd\x36\xc5\x5e\xf7\x97\x97\x97\x1a\x0e\x15\x20\x49\x57\x57\x57\
\x7e\xbf\xc5\xe5\x08\x3d\x1e\x8b\x35\xe1\x57\x11\xba\xbd\x17\x98\
\x9f\x9f\xf7\x9b\xd7\x46\x6d\x73\x71\x71\xa1\x5b\x82\x91\x24\xbf\
\x6a\x10\x17\x82\xcb\x11\x5a\x10\xfa\x87\x90\xee\x12\xb6\x5a\x2d\
\xa1\x87\x01\x45\x22\x11\xf5\x89\x7f\x5a\x9d\xaf\x82\xc4\xa8\x4f\
\x53\x94\xb8\xf3\x9d\xd7\xcb\x55\x2c\xc1\xa3\x84\x83\xb6\x91\x84\
\xb6\x48\xc1\xe2\xe2\xa2\xe2\xf8\xdb\xe3\xe3\x63\x4d\xa6\x2b\x90\
\x0c\x24\x46\x71\xa8\x30\x2e\x44\x68\x11\x88\xd8\xa3\x51\xc0\x3a\
\x40\xb9\x2d\xd2\x68\x34\xaa\x38\xe4\x08\x6d\xbf\xa3\xa3\xa3\xb1\
\xd7\xf3\x48\x00\x92\xa1\x18\xc7\xc7\x25\x08\xdd\x0a\x5d\x4a\xf0\
\x50\xc2\xe1\x92\xcf\xe7\x15\x47\x73\xa1\x11\x78\x72\x72\x32\x46\
\x0f\xbd\x0a\x59\xd1\x30\x46\xe2\x15\x27\x87\x32\x78\x6c\x91\xf0\
\xf1\xf1\x51\x6e\x10\x38\x8e\xb3\xba\xba\xaa\xf8\x03\x6f\xfb\xd0\
\x71\x25\x0f\x5f\xad\x5e\x49\x87\xc4\x8b\xde\xcf\x42\x44\xf0\xc8\
\x90\x50\xe8\xd8\x8c\x47\x3a\x9d\x56\x1f\xcd\x7b\x73\x73\x33\xfa\
\x76\xa9\xd7\x0a\x55\xcf\x58\x22\xd9\x7e\x4b\x7f\x44\x80\x6b\xa4\
\x84\xc1\xd0\xed\x76\xf5\x1f\xe0\x52\x83\xfa\x44\xfd\xf4\x2d\xaa\
\xa3\x5f\xbf\x7e\x8d\x6c\x81\x15\xbe\x08\x5f\xa7\xae\x03\x91\x60\
\x75\x1d\xae\x3f\x08\x1b\x11\x4f\xc3\xc9\x68\x69\x08\x3d\x2d\xfd\
\x75\xa3\x74\x6b\x6b\x4b\xdd\xae\xc3\x35\xee\xee\xee\x8e\xe0\x4a\
\x07\xf9\xa2\x41\x12\xcc\xb0\xb1\x4b\x42\xa1\xc7\xe2\xfd\x56\xb1\
\x6c\x6f\x6f\xab\x17\x9d\xa0\x82\xda\xdf\xdf\x3f\x3d\x3d\x1d\xd2\
\xfd\x1b\x1f\x8b\x0f\xc7\x57\xa8\xab\x5c\x24\x12\x49\x15\x77\xe0\
\x84\xdc\xb0\x91\x31\x03\x8b\x5b\x1a\xda\xf7\x42\x97\x4d\xbd\x30\
\x35\x35\xb5\xb1\xb1\xf1\xfa\x0c\x09\xbf\x2e\xe2\xfd\xfd\x7d\x3e\
\x9f\x47\x97\x2c\xa8\xba\xa8\xd7\xeb\xe1\x63\x2f\x2f\x2f\x07\x99\
\x99\x44\x22\x15\x8f\x44\x0a\xea\x10\x4a\xa9\x09\x65\x48\xe8\x9d\
\x96\x9e\x4c\x26\xa5\x47\x46\x36\x9b\xc5\xb5\x9c\x9f\x9f\xbf\x7b\
\xbd\xf8\x1b\x38\xb3\xb8\xb8\x88\xff\xf2\x95\x69\x3a\x54\x7a\x95\
\x4a\xe5\xea\xea\x6a\xc0\x85\x01\xc1\x9e\xee\x34\x46\x10\x30\x52\
\xf6\x28\x12\xb3\x16\xa9\x5a\xad\x1a\x20\xe1\x84\xbb\xa6\x34\x12\
\x89\x0c\x32\x3d\x88\x18\x2a\xb8\xe0\xc2\xd3\xe9\x74\x26\x93\x19\
\xbc\x89\xd8\x6a\xb5\xee\xee\xee\x90\x69\x83\x0f\x0f\xa2\xa1\x81\
\x3a\xd0\x0c\x03\x65\x8d\x23\x88\x91\x10\x2d\xb4\xa5\xa5\x25\x33\
\xe2\x03\x81\x0e\x0f\x0f\x0f\x0f\x07\x5c\x51\xf5\xe8\x52\x2c\x16\
\x63\xb1\x18\x1a\x8a\x93\x93\x93\xb0\xd1\x5b\xcc\xe9\xed\xbc\xe4\
\x4d\x49\x43\x5a\xb8\x87\xf7\x88\xbf\x8f\xae\xf5\xf3\x46\x62\x84\
\x3e\xac\xf4\x26\x22\x76\x12\x11\x26\x21\x62\xab\xdd\x6e\x4b\xdc\
\xf4\xf2\x4d\x10\xee\x3f\x7f\xfe\x84\x87\x1f\x9a\x7d\x19\xde\xb9\
\xbc\x30\xd0\x80\x91\x98\x17\x10\x2a\x68\x8e\x4a\x49\xad\xa4\x31\
\x68\x54\x86\x13\x06\x81\xa0\x87\x87\xea\x79\xfc\x11\x90\xcb\xe5\
\x90\x0c\x93\x0c\x14\x17\x2a\x92\x9e\x4f\x41\x03\x43\xee\x53\x6d\
\x7e\x8d\xc0\xf5\xf5\x75\x74\xf6\x2e\x2e\x2e\x46\x7f\xe7\x4e\x24\
\x12\xcb\xcb\xcb\x26\x35\x41\x25\xb6\x45\x85\x49\x58\xaf\xd7\x4d\
\x6a\x91\xbe\x6e\x9a\x82\xdb\xdb\x5b\xf4\xfa\x46\xb3\x17\x03\xf2\
\x10\x1d\x6c\xd4\x81\x13\x26\x82\x3c\x94\x35\xb1\x2c\xec\x49\x4d\
\x44\xaa\xe8\x45\xfd\xea\x66\x61\x36\x9b\x2d\x97\xcb\x03\xce\xe6\
\x7d\xb2\xbc\x23\x11\x64\xe0\xdc\xdc\x9c\xf4\x49\x57\x75\x90\xc8\
\x4a\x30\x25\xd4\x08\x88\x81\xf6\x36\x6c\x2c\x95\x4a\x8a\x3d\x97\
\x3e\x47\x34\x1a\x85\x7b\xf8\x7c\x89\x67\x9b\x51\x42\x8d\x78\x7a\
\x7a\xaa\xd5\x6a\x06\xac\xe7\x50\x00\x49\xf2\x2e\xb8\x52\xc4\xd3\
\x17\xf7\x0b\x46\xd5\x97\x4e\xa7\x21\xb6\xd9\x99\xf6\xc2\x27\xa6\
\x67\x28\xe1\x87\x41\x15\x61\x49\x3c\x4d\xb9\x4c\xb8\x33\xef\x50\
\x11\xfd\x1c\x84\xd7\x20\x4f\xa9\x4e\x4e\x4e\xc6\x62\xb1\x54\x2a\
\x05\xfd\x0c\x1b\xf6\x7c\x17\x6d\xb7\x93\x34\x4a\xc2\xfb\xfb\x7b\
\xd4\x0c\x42\xf7\x1d\xfa\x1c\x71\x97\x97\x9d\x32\xe0\x64\xaf\xd7\
\xf3\x5e\x5f\xfe\xc6\x71\x1c\xfc\x8d\xf7\x3a\x61\x2b\x08\x0c\x59\
\xe3\xa2\x52\x25\xec\xf7\xfb\xa8\x0c\x0d\xee\x19\x0e\xe2\xe4\x84\
\x3b\xc1\x30\x41\xfe\x68\x25\x49\x7c\xfe\xdb\x61\x5e\x13\x33\xf0\
\xee\xce\x12\x53\x2e\x52\x42\xef\xc9\x00\x86\x1d\x79\x0d\x42\x42\
\xe8\x81\x96\x52\x1f\x9d\x56\x6c\xd8\x4e\xec\x44\x6e\x48\x48\x95\
\xb0\xd5\x6a\x19\xb6\x94\x94\x7c\x85\x6a\xb5\x2a\x77\x23\x22\xc1\
\x9b\x88\x5c\x5e\x5e\x32\xf8\x88\x47\xb1\x58\x94\x9b\x78\xc1\x12\
\x36\x1a\x0d\x56\x86\x64\xc2\x9d\xb5\x12\xf4\xe0\x92\x51\x12\x7a\
\xf7\x3f\x0e\x93\x5a\x0e\x02\x40\x74\x35\x28\x5e\xc2\x66\xb3\x29\
\x71\x72\x96\x04\x08\x02\x40\xf4\x41\x09\xe2\x25\x04\x85\x42\x41\
\xff\x63\x77\xc8\x90\x40\xd1\x23\x00\xa4\x5f\x85\x78\x09\x9f\x9f\
\x9f\x39\x5d\x61\x2d\xd7\xd7\xd7\x42\xe7\x06\x8d\x92\xd0\x98\x92\
\x20\xd6\xde\x7f\x4d\x90\xd0\x8c\x36\x09\xb1\xb6\x27\xe2\x98\x51\
\x1e\x95\x4a\xa5\x5a\xad\x32\x2e\xed\x01\xc5\x6d\xcc\xd2\x45\xc7\
\x98\x52\x39\x3b\x3b\xe3\x08\x8d\x25\xa0\xa0\xc7\x78\xa8\x23\x25\
\xf4\xa5\xdd\x6e\x5f\x5c\x5c\x30\x40\x6d\x00\x05\x3d\x9a\x1d\xb1\
\x28\xe1\x87\x29\x97\xcb\xa2\x8f\xf5\x25\x83\x50\xab\xd5\x84\x3e\
\xb2\x64\x85\x84\xde\x09\xec\x22\xce\x85\x24\x9f\xc3\x3b\xdd\xcd\
\xb0\x65\x52\x8e\x61\x85\xf4\xf4\xf4\xf4\xee\x99\x47\x44\x2e\x28\
\x5c\x71\xfb\x38\x59\x27\xe1\x84\xbb\xe3\x1d\xd7\xb2\x19\x09\x8a\
\x55\xdc\x76\x86\x96\x4a\x08\xd0\x62\x31\xef\x7e\x69\x39\x28\x50\
\x14\xab\x91\x97\x66\xa6\x84\xe8\x39\x0c\x7e\xf0\x18\xd1\x1f\x14\
\x25\x0a\xd4\xd4\xde\xbe\x63\x6a\xb1\x35\x9b\x4d\x53\x6f\x9c\x16\
\x82\xa2\x94\xfe\xa8\x84\x8d\x12\x4e\xb8\xcb\x68\x4a\xa5\x12\x23\
\x58\x3a\x28\x44\xb3\xf7\xf5\x72\xcc\x2e\xbf\x42\xa1\xc0\xa7\xef\
\x45\x83\xe2\x33\x7e\x61\xb0\xe1\x12\x7a\x33\x87\x9c\xc1\x17\x0a\
\x0a\x0e\xc5\x67\xfc\xe6\x09\x8e\xf1\x05\x89\x3e\xfd\xaf\x5f\xbf\
\x0c\xee\x51\x18\xdc\xab\x47\xc1\xd9\x30\xba\xe6\xd8\x50\x9c\xdd\
\x6e\x17\xc5\xc9\x67\x0e\x05\x81\xc2\x42\x91\x59\xb2\xf8\xc9\xb1\
\xa7\x50\xf7\xf7\xf7\x87\x77\xf8\x26\x09\x10\x14\x13\x0a\xcb\x9e\
\x9b\xa6\x63\x4f\xd1\xb6\x5a\xad\x83\x83\x03\x4e\x1e\xea\xdf\x7d\
\x40\x31\xc9\xdd\xc9\x97\x12\xbe\xdf\xd1\x3f\x3a\x3a\xe2\x2e\x89\
\xda\x82\xa2\x41\x01\xd9\x36\x90\xe6\xd8\x56\xcc\xd5\x6a\x95\x1e\
\xea\x6c\xa0\x85\x3b\x24\x38\x16\x16\xf6\xfd\xfd\xbd\x25\xc3\x6e\
\xb2\x5a\xa1\x28\x14\x3b\x27\x75\x1d\x3b\x8b\xbc\x56\xab\xd9\x33\
\xf8\xa6\x3f\xde\xf0\x35\x0a\xc5\xce\xcb\x77\xac\x2d\xf8\x7a\xbd\
\x6e\xd5\x10\x9c\xb6\x78\x03\xd7\x28\x0e\x6b\x73\xc0\xb1\xb9\xf8\
\x1b\x8d\xc6\xde\xde\x9e\x55\x03\x71\xba\x81\xcc\x47\x11\x88\x3e\
\xce\x85\x12\x7e\x95\x76\xbb\xfd\xdf\x7f\xff\x59\xdb\x10\x1a\x7b\
\xa7\x00\x99\x6f\xd2\x96\x4d\x94\xf0\x93\x74\x3a\x1d\x74\x48\xae\
\xaf\xaf\x99\x15\xa3\x04\x19\x8e\x6c\xe7\xf2\x09\x10\x61\x16\x4c\
\xb8\x83\xe3\x85\x42\x01\x37\xe6\x8d\x8d\x8d\x48\x84\x79\x32\xf4\
\xbb\xde\xe9\xe9\x29\x9f\x6e\x61\x4d\xf8\x06\xd5\x6a\x75\x77\x77\
\x97\x8f\x5c\x0c\x15\x64\x2f\x32\x99\x06\x52\xc2\x77\xba\x88\x3c\
\xe6\x69\x78\x4d\x50\x76\x02\xd9\x1c\x1d\xa8\x69\x7a\x71\x71\x51\
\xaf\xd7\xd9\x34\x0d\xb6\x09\x7a\x72\x72\xc2\xf3\x42\x58\x13\x7e\
\xb8\x69\x6a\xf3\xe4\x55\xe0\x4d\x50\x1a\xc8\x9a\xf0\x93\x4d\xd3\
\xd9\xd9\xd9\xe5\xe5\x65\x56\x89\x9f\xa3\xdb\xed\x16\x0a\x85\x9b\
\x9b\x1b\x66\x05\x25\xfc\x3c\x08\xa0\xfb\xfb\x7b\x78\x08\x1b\x99\
\x1b\x1f\xcd\x3a\x34\xec\x39\x09\x41\x09\x83\xe9\xcf\x9c\x9e\x9e\
\x22\xa4\xd6\xd6\xd6\x12\x89\x04\x33\xe4\x5d\x1a\x8d\xc6\xd9\xd9\
\x19\xc7\x99\x29\x61\xf0\x1d\x9b\xbd\xbd\xbd\xb9\xb9\xb9\xa5\xa5\
\xa5\x70\x38\xcc\x0c\xf1\x6b\x7f\x16\x8b\xc5\x72\xb9\xcc\x87\xc5\
\x28\xe1\x50\x40\x60\x95\x4a\xa5\xbb\xbb\x3b\xb4\x4e\x73\xb9\x1c\
\x33\xe4\x37\x6e\x6f\x6f\xd1\xfe\xe4\x9a\x78\x4a\x38\x74\x10\x64\
\x27\x27\x27\x57\x57\x57\x8b\x8b\x8b\xd9\x6c\x36\x14\x0a\xf1\xde\
\x54\xa9\x54\x90\x21\x5c\x0a\x4f\x09\x47\x0a\x02\x0e\x2a\xe2\xc6\
\x0f\x15\x67\x67\x67\x1d\xc7\xc6\xc9\x9e\x5e\xaf\x87\xae\x32\xf4\
\x63\xed\x47\x09\xc7\x59\x2b\x9e\x9f\x9f\x5f\x5e\x5e\x42\x45\x74\
\x17\xed\x51\x11\xfa\xa1\xe3\x07\xfd\x38\xf8\x49\x09\xb5\x00\x81\
\x58\x28\x14\x10\x91\xf3\xf3\xf3\x50\xd1\xec\x49\x45\x5c\x2c\xf4\
\x43\xdf\x98\xfa\x51\x42\x1d\xa3\xb3\x58\x2c\xa2\x56\x9c\x99\x99\
\x81\x8a\x53\x53\x53\x86\x5d\xe0\xe3\xe3\x23\xf4\xbb\xbb\xbb\xe3\
\xf6\x3c\x94\x50\x6b\xfa\xfd\xfe\x9d\x4b\x2c\x16\xcb\xb9\x7c\xfb\
\xf6\x4d\xf4\x15\xb5\xdb\xed\xdb\xdb\xdb\x4a\xa5\xc2\x71\x17\x4a\
\x28\x8c\xa7\xa7\xa7\xa2\x4b\x32\x99\xcc\x64\x32\xd9\x6c\x36\x1a\
\x8d\xca\xea\xee\x42\x3c\xdc\x4d\x38\xe7\x4e\x09\x4d\x68\xc5\x01\
\x74\x1a\x61\xe3\xb4\x4b\x2a\x95\xd2\x36\xb5\xf5\x7a\xfd\xc1\x85\
\xee\x51\x42\x63\x6d\x44\xa7\x31\x12\x89\xa0\xc7\x98\x72\xd1\x61\
\x1d\x5c\xa3\xd1\xa8\xbb\xd4\x6a\x35\x0e\xb7\x50\x42\x2b\x40\xa0\
\x7b\xfd\x46\xbc\x77\x1c\x07\x35\x24\x6c\x9c\x74\x89\xc7\xe3\x23\
\x48\x00\x7a\x77\x4d\x17\x88\x87\xfb\x02\x07\x5a\x28\xa1\xd5\x40\
\x80\x9a\x8b\xf7\xcf\x50\x28\x04\x15\x51\x3d\xa2\x03\xf9\xed\xdb\
\x37\xbc\x47\xb5\x19\x8b\xc5\xbe\xd2\x35\x85\xf3\xf0\xad\xdd\x6e\
\xa3\x9b\x87\x4a\x0f\xef\xb9\xb0\x93\x12\x12\x5f\xa0\x47\xc3\xe5\
\xcf\x5f\x41\x48\x28\x1a\x0e\x87\xd5\x4e\xc2\xba\x6e\xb7\x8b\xcf\
\xe1\xb9\xa8\x94\x90\x04\xcc\x8b\x54\xdc\x25\xd5\x30\xb8\xbd\x05\
\x21\x94\x90\x10\x4a\x48\x08\xa1\x84\x84\x50\x42\x42\x08\x25\x24\
\x84\x12\x12\x42\x28\x21\x21\x94\x90\x10\x42\x09\x09\xa1\x84\x84\
\x10\x4a\x48\x08\x25\x24\x84\x50\x42\x42\x28\x21\x21\x84\x12\x12\
\x42\x09\x09\x21\x94\x90\x10\x4a\x48\x08\xa1\x84\x84\x50\x42\x42\
\x08\x25\x24\x84\x12\x12\x42\x28\x21\x21\x94\x90\x10\x42\x09\x09\
\xa1\x84\x84\x10\x4a\x48\x08\x25\x24\x84\x50\x42\x42\x28\x21\x21\
\x84\x12\x12\x42\x09\x09\x21\x94\x90\x10\x4a\x48\x08\xa1\x84\x84\
\x50\x42\x42\x08\x25\x24\x84\x12\x12\x42\x28\x21\x21\x94\x90\x10\
\x42\x09\x09\xa1\x84\x84\x10\x4a\x48\x08\x25\x24\x84\x50\x42\x42\
\x28\x21\x21\x84\x12\x12\x42\x09\x09\x21\x94\x90\x10\x4a\x48\x08\
\x51\xf0\x3f\x01\x06\x00\x0c\x5e\x25\xd7\x10\xfd\x4d\x14\x00\x00\
\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x0d\x9e\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x30\x00\x00\x00\x30\x08\x06\x00\x00\x00\x57\x02\xf9\x87\
\x00\x00\x0d\x65\x49\x44\x41\x54\x78\xda\xed\x5a\x05\x5c\x5b\x7b\
\x0f\xfd\xdc\xdd\xdd\xdd\xdd\xdd\xdd\x1e\xee\x32\x57\xdc\xa5\xc3\
\x1d\x56\x5c\x5b\x8a\x17\xf7\xe1\xee\xd4\xb0\xb9\x1b\x73\x77\xb6\
\x7c\x27\x7f\x28\xeb\x68\x9f\xf2\x5c\x7e\xbf\x6c\xe3\x4f\xef\xbd\
\x39\xc9\xc9\x49\x72\xfb\x5e\x63\x66\x66\xf6\x92\xb6\x57\x01\x3c\
\xaf\xce\x12\xbd\xf6\x25\x0b\x40\x22\x91\xbc\xce\xa7\x58\xf7\x09\
\x06\xf1\x9c\x01\x68\x6a\xad\xff\x5f\x5d\x43\x55\x5f\x7d\x53\xcd\
\xd5\xfa\xa6\xea\xab\xfc\x6f\x3e\x7b\xb6\xee\xef\x95\xa7\xfb\x86\
\x4f\xbe\xfa\x63\xcf\x3a\x80\xda\xda\xda\xf7\xd4\x34\x54\xb6\x76\
\x76\xb7\xdd\xd4\x4d\x6b\xe9\xf0\x91\x43\x74\xf0\xe0\x3e\x1a\x19\
\x1b\xa2\xe6\x5d\x0d\x37\xaa\xeb\x95\xad\xfc\x99\xd5\x3e\xc7\x5b\
\x36\xfb\x11\x4f\x99\xf6\x7f\xcf\x2a\x00\x59\x8f\xec\x2d\x55\xb5\
\x15\xaa\x09\xd5\xf8\xfd\xf3\x17\xce\xd1\xfc\x99\x53\x74\xf2\xd4\
\x71\x3a\x71\xf2\x18\x1d\x3f\x71\x94\x8e\x1e\x3d\x44\x6d\x9d\xad\
\x0b\xca\x9a\x72\x15\x7f\x76\xb5\xcf\xf3\x90\x6b\x36\x7b\xe5\x8e\
\x7d\xf6\x59\x03\xa0\xac\x2e\x6d\x18\x19\x1d\xb8\x7b\xf9\xf2\x25\
\x3a\x73\xf6\x34\xd5\xd6\x57\xd1\xfa\x0d\x6b\xe9\xbf\x8f\xfd\x9b\
\xd6\xad\x5f\x43\xca\xaa\x0a\x01\xa2\xa5\xb5\xe1\x4e\x45\x55\x69\
\xc3\xaa\x01\x14\x68\xff\xe1\x29\x57\x5b\x3f\x2b\x00\x2a\x6b\xca\
\xff\xd1\xd6\xd9\x7c\xf3\xea\xd5\x2b\x74\xe1\xe2\x39\xca\xcc\xce\
\x20\x6b\x6b\x4b\x72\xf3\xd8\x46\x3e\xfe\x5e\xb4\x65\xfb\x26\x32\
\xb7\x34\xa3\xb4\x0c\x29\x1d\x3a\x7c\x80\xea\x1a\x6a\x6e\xf2\x35\
\xab\x51\x1d\xf7\x3c\xd5\x17\x3c\x64\x1a\xd9\xaa\x01\x28\x95\xca\
\x37\x95\x56\x14\x9d\x3a\x71\xe2\x18\x31\x80\xd1\xd1\x61\xb2\x77\
\xb0\xa5\x90\x1d\x41\xc2\x82\x42\xfc\xc9\x3f\xd0\x87\xbc\x7c\xdc\
\xc9\xda\xd6\x8a\x7a\xfb\x7a\x68\x74\x6c\x98\x4a\xca\x14\xa7\xf8\
\xda\xa7\xc6\xf9\xf1\x8f\x98\x02\x05\x00\x83\x6e\x45\x93\x1f\x5d\
\x15\x80\xaa\x86\x0a\xeb\x99\xd9\xa9\xbb\xd7\xae\x5d\xa1\xd3\xa7\
\x4f\xd2\x76\xb7\xad\xe4\x17\xe0\x43\xe1\x51\x12\xda\x11\x11\x42\
\xa1\x4b\x20\xfc\x00\x62\xcb\xb6\x4d\xb4\x65\xeb\x46\xda\x7f\x60\
\x2f\xd5\xd4\x57\xde\x2c\x56\x16\x9a\x3d\xd9\xfd\x9d\x64\x3d\x6f\
\x71\xcb\x57\xff\xd5\xd4\xef\x00\x20\x07\xc5\x6c\xbf\x2a\x00\x6a\
\xad\x6a\x8a\x88\xe8\xfa\xf5\xab\x94\x5f\x90\x4b\x5b\xe1\x64\x4c\
\x5c\x24\x45\xc5\x86\x53\x44\xf4\x0e\x80\x08\x15\x99\x08\x0c\xf6\
\x23\x5f\xd0\xc9\x65\x8d\x13\x65\x65\xa7\xd3\xc0\x60\x1f\xc9\x14\
\x79\x4d\x4f\x4a\x9f\xd7\xd0\x6b\xdd\x65\x9a\x80\xad\xc5\xfb\xdf\
\x65\xa2\x0e\x2c\x01\xa2\xee\x19\x03\x68\x6c\x54\x7e\xaa\xb6\xbe\
\x7a\xe1\xf6\x9d\xdb\x74\xea\xd4\x09\xda\xb4\x79\x83\x70\x3e\x3e\
\x31\x86\x62\x13\xa2\x28\x3a\x36\x62\x11\x44\x78\x08\x05\x4b\x02\
\x29\x20\xc8\x97\x3c\x41\x25\xd7\xb5\xce\xb4\x67\xef\x2c\x95\x96\
\x29\x6e\xc9\x64\x4f\xae\x48\x9e\x32\xf5\x56\x77\x99\xea\x2f\x46\
\x75\x50\x30\xf5\x39\x77\xb9\xe6\xf8\x33\x02\xd0\xd5\xd5\xfc\xe9\
\xba\xc6\xea\x4b\x7d\x03\x3d\x74\xf1\xd2\x05\xea\xee\xe9\x14\x45\
\x9b\xb4\x33\x9e\x92\x52\xe2\x28\x21\x29\x96\xe2\x12\xa2\x17\x41\
\x44\xed\x20\x49\x58\xf0\x22\x95\x02\xbc\x69\xcd\x3a\x17\xaa\x6f\
\xaa\xa3\x86\xe6\xba\x5b\xb2\xa2\xbc\xff\x3c\xb9\x64\xaa\x7f\xef\
\x21\xd7\xe6\x19\x77\x65\x7a\x1d\x32\x70\xc3\x27\x7f\xcf\x3b\x9f\
\xa6\xf3\x2d\x9f\x6f\xef\x6a\x3d\x53\x5d\xa7\x7c\xb0\x6f\xff\x1e\
\x9a\x9f\x3f\x49\x59\xb9\x19\xc2\xc9\x9d\x69\x49\x94\x92\x9a\x48\
\xc9\x3b\x13\x28\x21\x99\x41\x44\x09\x3a\x85\x45\x32\x95\x44\x16\
\x58\x95\x90\xa9\x28\xea\x1f\xec\xa5\xec\xbc\x8c\x5a\xc3\x7b\x9b\
\x2b\x95\xaf\x5f\xf9\xbc\x75\xd9\x93\x6f\xf3\x90\x69\x2f\x6c\x52\
\xce\xbe\xc3\x98\x46\x1a\xb5\x7b\xa1\xf6\x47\x46\x4e\xb6\xb4\x37\
\xfc\x7a\x57\x7b\xd3\x64\x6b\x7b\x23\xe9\x6d\x57\x47\xd3\x03\xc8\
\x65\x77\x73\x5b\xc3\xfc\xd4\xb4\xf6\x41\x7e\x61\x0e\x64\xf3\x3c\
\x1d\x39\x7a\x08\xd1\x0d\x40\xe4\xe3\x29\x2d\x73\x27\xa5\x66\xa4\
\x90\x14\x40\x92\xa5\x8b\x20\x62\xe3\xa3\x28\x32\x3a\x8c\x24\xa0\
\x12\x67\xc1\xcb\xd7\x83\x36\x6f\xdd\x44\x53\x33\x5a\xca\xca\x4f\
\xbb\x29\x95\x4a\xdf\xac\x7f\xae\x5b\x81\xea\x77\xa6\x64\x13\x8e\
\x76\xb3\xa3\x26\x0a\xb9\x18\xe6\xf4\xe8\x2c\xd3\x54\xfd\xb9\x96\
\x5d\x8d\xd7\x0f\x60\x0c\x38\x7b\xee\x8c\xb0\xce\xee\x0e\x44\x35\
\x91\x14\x25\x05\x34\x3c\x3a\x48\x07\x0f\xed\xa7\xd6\xb6\x66\xe2\
\xae\x3b\x3e\x31\x2c\xf4\x3e\x33\x27\x4d\x58\x46\x76\xaa\x00\x22\
\x4d\x4f\x16\x99\x88\x4f\x8a\x11\x54\x0a\x8f\x94\x50\x08\x6a\x81\
\x65\x75\xe3\x96\xf5\x34\x80\x0c\xe4\x15\xe4\x5c\xcb\xcc\x4d\xfd\
\xcd\x43\xba\xe8\x36\x9a\x92\x4d\x74\xde\x22\xfc\xee\x31\xa3\xfa\
\x40\x81\xc3\xe2\x1e\x39\xac\xa9\x57\x0e\x1c\x38\xb8\x77\xe1\x14\
\x64\x71\xf7\x9e\x59\xea\xe8\x6c\x13\x0f\x8f\x43\x71\xd6\x36\x54\
\x11\x77\xdb\xe9\x99\x29\x1a\x1a\xe9\x17\xd2\x59\x53\x57\x29\x22\
\x9c\x93\x9f\xc9\xc6\xb4\x10\x40\xd2\x01\x82\x29\x95\x88\x9a\xe0\
\xa2\xe6\xcf\x80\x66\xac\x48\xa8\x97\xad\xa4\xac\x2c\x27\xf4\x90\
\x3b\x19\x59\xa9\x76\x06\x0e\x6d\x47\x44\xff\x66\x02\x40\xbc\x67\
\x81\xca\xe7\x71\x94\x48\xf9\xb0\x30\x22\x25\xdf\x18\x9b\x18\xb9\
\x4f\x44\x82\x1e\x47\x8f\x1d\x62\x0a\x88\x07\xe7\xc9\xb2\xa0\x1e\
\x73\x68\x58\x57\x11\xf5\x51\x9a\x54\x8d\x89\x39\xa7\xa4\x54\x41\
\x89\xc9\x71\x94\x2f\xcf\x61\xc3\xe7\xb2\x05\x10\x06\x91\x06\x3a\
\xa5\x48\x1f\x66\x21\x0c\xb2\x1a\x1c\x1a\x20\x1a\x5b\x66\x56\x1a\
\xba\x72\xf5\x03\x69\x5a\x8a\xf7\x23\x0e\x15\xa8\x33\x8c\x14\x47\
\xae\xf6\x84\xe2\x24\x19\x9d\x43\x9d\x3c\x64\xea\x5d\x8b\x05\x64\
\x6e\xfe\x7a\x6f\x5f\xcf\x43\x6a\xcd\x04\x2d\x2c\x2c\x88\x48\xab\
\x35\x2a\x4e\x3b\x03\x00\x7d\xe4\x42\x71\xae\x5d\xbb\xca\x94\x42\
\x16\x74\x98\x38\x0f\x52\x76\x6e\x26\xb9\xb8\x3a\x91\xb5\x9d\x25\
\xad\xdb\xb0\x06\x05\x1a\x01\x20\x00\x81\x4c\x30\x9d\x98\x4a\x9c\
\x05\x0e\x04\x2b\x12\xf7\x05\x5f\xa8\x11\x0a\x1c\x34\x6c\x82\x72\
\x25\xa4\x2f\x67\x20\x5f\xfb\x0b\xee\xb0\x46\x54\x91\x6b\x6c\x71\
\x5e\xb2\xf2\xdc\x4d\xae\xf9\x39\xce\x87\x16\x01\x58\xfd\xef\xef\
\x71\x89\xd1\xf7\xf6\x1f\xd8\x47\x67\xce\x9c\xa6\xe3\xc7\x8f\x42\
\xab\x8b\x38\xe5\x42\xdf\xdb\x3a\x76\x21\x2b\x17\x00\xec\x22\x55\
\x56\x57\x10\x2b\xd0\xfe\xfd\xbb\xc9\xdd\xdb\x9d\x36\x6c\x5a\xc7\
\x35\x02\x9a\x45\x91\xa3\xb3\x3d\xcb\x25\x67\x02\xea\x94\x2e\xea\
\x81\x0b\x1a\xf7\xa6\xc8\x18\x41\x23\xa1\x46\x61\xa0\x65\x67\x4f\
\x3b\xb2\x13\xdb\x6c\x38\xeb\xc3\xa1\x19\x53\x52\xea\x2e\x53\x77\
\x19\x67\x60\xea\x9b\xc8\xc0\x94\xf8\xe1\x31\xb3\xff\xd6\x25\x80\
\x0a\x87\x0f\x1f\xa4\x03\x07\xf6\xd3\x9e\x3d\x73\x50\x96\x04\x1e\
\x0d\x84\x9a\xa8\xd5\x13\x82\xf3\xdc\xb4\x0a\xe4\xb9\x50\x9f\xc3\
\x34\xb7\x7b\x5a\x74\xdf\xa8\x98\x70\xe1\x70\x75\x5d\x05\x95\x29\
\x8b\x00\xc2\x41\xf0\x3e\x17\x54\xe2\x2c\x70\x2d\xf0\x3d\xf4\x34\
\x62\x35\xe2\xcc\xf6\x0f\x74\xf3\xe7\x74\x7a\x87\x7c\x72\x75\x9f\
\x00\x80\x13\x46\x19\x28\x50\x7f\x0d\xe7\xb3\x46\x19\x90\x69\x3e\
\x83\xf3\xc3\x8b\x00\xcc\xff\x7b\x2c\x21\x31\x16\x0a\x73\x80\xb4\
\x5a\x35\xa9\x54\x13\xe8\xa2\x12\xf2\xf1\xf3\x14\x6a\x32\x83\xc2\
\x3d\x82\x25\x85\x69\xc3\xb4\x38\x09\x20\xba\x69\x15\x74\x7d\x33\
\xc5\xc4\x47\x32\xff\x51\xe4\x95\x04\x11\xa0\x18\x38\xca\x1d\x37\
\x7f\x29\x0b\x2c\xad\x2c\xb3\xfc\x39\x16\x84\xe0\x25\x35\x1a\x18\
\xe9\xa5\xe8\xb8\x88\xf9\xe5\x0c\x28\x74\x6f\x87\x43\xd7\x8d\xa7\
\xcf\xd9\xf7\xe1\xfc\xc2\xe3\x9d\x2f\x1e\x58\xfc\xef\x36\xa2\x01\
\x5a\xec\x43\x64\x7a\xa9\xaf\xbf\x07\xce\x7b\x91\xa7\xb7\x1b\x8a\
\x34\x1e\x19\xd9\x4d\xb3\x73\xd3\xa2\x90\x63\x12\xa2\x20\xaf\xf3\
\x34\x3e\x39\x4a\x1b\x37\xaf\xc3\xc8\x10\x26\x22\xad\x28\x2e\xa0\
\x42\x18\xf7\x01\x2b\x1b\x0b\x06\xc0\xb5\xa0\xa7\x11\x77\x67\x1e\
\x2f\x78\xc8\x63\x00\x68\x66\x3d\xa0\x55\xf8\x9d\x15\x8a\x73\x8f\
\xd0\x0b\x56\x34\xb3\x37\xc2\xd1\xbb\x86\x67\x86\x80\x97\x01\x70\
\x91\x4d\x4f\x4f\x51\x6b\x6b\x33\xb5\xb4\x34\xb1\x5a\xf0\x88\x80\
\xc8\x45\xd0\xd4\xb4\x4e\x64\x45\xab\x53\x73\x77\xc5\xe2\x32\x4f\
\x63\xe3\x43\x28\x5c\x57\x56\x16\xe6\x38\x67\x06\xce\x27\x73\x54\
\xc9\xd2\xc6\x9c\x64\x85\xb9\x94\x5b\x90\xc5\xe0\xb8\x43\xf3\x9c\
\xf4\x48\x1d\xf4\xf6\x77\x62\x72\xdd\xb1\x0c\x40\x22\xe9\x79\x83\
\x29\x47\x99\x5a\x3c\xf7\x98\x1a\xb5\xf1\xf9\xf9\x65\x0a\x05\x06\
\xfb\xd3\xd0\xd0\x00\xd5\xd6\x55\x0b\x9d\x66\xfa\x6c\x05\x45\x76\
\x84\x85\xd0\x00\xb2\x32\x3c\x32\x48\xc3\xc3\x43\xc2\x91\x63\x90\
\xd0\x71\xd5\x88\x50\x1e\x77\xaf\xed\x14\x04\x10\x3c\x32\x30\xc7\
\xdd\x01\xda\x75\xad\x13\x37\x3e\xce\x82\x90\x54\xae\x03\x9e\x91\
\x18\x3c\x77\x65\x06\xdd\xdd\xdb\x0e\x9a\x06\x2f\x53\x68\x6b\x81\
\xfa\x83\xc8\xc0\x39\x13\x0d\xeb\x87\x70\x74\xcc\xa8\xb8\x0b\xb5\
\x5f\xc2\xb4\xba\x6f\xb9\x88\x3d\x7c\x3c\xa8\x05\xd1\x2f\x2b\x2f\
\x21\x99\x3c\x9f\x7c\xfd\xbc\x69\xd3\x96\x0d\x18\x85\xbd\xa9\xbe\
\xb1\x8e\xfa\xb0\x90\xb4\xb7\xb7\xd1\xce\xd4\x64\x31\xd7\x4f\xa8\
\x46\x69\xab\xdb\x26\x5a\xbb\xde\x05\x7f\x6f\x26\x0f\xd0\x8d\x33\
\x66\x6b\x6f\x2d\x22\x5e\x54\x2a\x47\xc1\xe7\x70\x1d\x70\x76\xb8\
\x5f\x70\x21\xf3\xae\x00\xc7\x43\xa9\xad\xbd\x89\x65\x55\xb7\xd2\
\x21\x13\x2a\xf4\x2f\x1e\x9d\x8d\x6a\x20\x5f\xfd\x03\xcc\x49\x93\
\xcb\x32\xba\x65\xdb\xc6\x7b\x45\x25\x0a\x2a\x2e\x56\x50\x5a\xba\
\x94\x02\x02\xfd\x20\x91\x6b\x39\x0b\xe0\xb1\x94\x7a\x7b\xbb\xa9\
\xa6\xb6\x8a\xd0\x3d\x45\x1f\x98\xd4\x8c\x83\x0a\xfe\x64\x69\x6d\
\x4e\x0e\x8e\x76\xe4\xe0\x6c\x47\x36\x76\x56\x22\xba\xd8\x93\xa9\
\xb8\xac\x50\xd0\x28\x7b\xa9\x90\xb9\x1f\xb0\x24\x73\x96\x18\x48\
\x5d\x53\x35\x32\xe7\xff\x50\x46\x0b\x74\x3f\x86\xa3\xe3\x26\x3a\
\xee\x7a\x64\xc6\xa8\xc1\xe1\xf3\xbf\xe3\x39\x69\xb9\x91\x39\xba\
\xd8\x1f\x8a\x88\x0a\xa3\x62\x80\xe0\x89\x11\xc5\x86\xe8\xba\x0a\
\xf3\xc6\x10\xd6\xde\xd9\x26\x7e\x97\x95\x93\x89\x6e\x3c\x42\x6a\
\xdd\x04\x29\x6b\xca\x68\xf3\x96\x8d\xa2\x68\xd7\x6d\x5c\x03\x70\
\x52\xc2\xa8\x8d\x5e\x51\x46\x25\xe5\x85\xbc\xb8\x88\xf1\x22\xcd\
\x40\x89\x98\x6a\xfc\xef\xf2\xca\x62\x0a\x08\xf6\x4b\x7f\xb4\xb3\
\x6a\xda\x4c\x0c\x6d\x12\xe8\x7d\xb0\x09\x6a\xfd\x87\x33\x23\x7e\
\xc0\x7f\xaf\x05\x88\xef\x20\xda\x0b\x8a\xa2\x42\x92\xec\x00\x4f\
\x43\x02\xd9\x79\x6c\x52\x8e\x82\x4a\xd9\x39\x59\x18\x1d\x8a\x79\
\x71\x07\x7f\xbb\x48\x3b\xa5\x22\xbc\xc0\xa2\xaa\xda\x72\xf1\x77\
\x4b\x5b\x03\xbf\x03\x5a\x04\x50\xb3\x0c\x40\xaf\x44\xe8\xba\xcb\
\x52\x8a\x7b\xa4\xf2\xef\x1e\xa0\xcf\x18\x8c\x12\xea\x6d\x18\x1b\
\x42\x8c\x97\x17\x4d\x36\xce\x5d\x4d\x00\x73\x72\x97\x69\x15\x7a\
\x00\xaf\x87\xbd\xc9\x65\xad\xf3\x50\x58\x84\x64\x21\x3e\x21\x96\
\x3c\xbc\xdc\xc4\xe8\xcb\xdd\xd5\x09\xc6\x7b\x2d\x03\x48\xcf\x90\
\x52\x63\x53\x3d\x4d\xcf\x6a\xb1\x98\xd4\x70\xb4\xe1\x74\x15\x35\
\xb5\xd6\x51\x63\x4b\x1d\x0f\x7d\xa0\x90\x69\x00\x3c\x52\x00\x00\
\xe8\x25\xe7\xc2\xbe\xe3\xe3\xe3\x66\x6f\x18\x69\xf7\x3c\xe3\xb1\
\x19\x5b\x59\x03\xa6\xd1\xdf\x1b\x77\x62\x4d\x24\xae\x09\x15\xd1\
\x87\xbd\x01\xf6\xe6\x3f\xff\xf3\xcf\x5f\x77\x72\x71\xb8\x11\x8a\
\x0c\x24\x26\x25\x08\xf3\xf7\xf7\x15\xfb\xac\xb3\xab\x03\xf7\x06\
\x50\x28\x1d\x9d\x37\x57\x14\x32\xef\x0a\x15\x55\x25\x98\x4a\x95\
\xc8\x42\x35\x1b\x37\x33\x9c\x2d\xd5\x80\x22\x77\x89\x42\x8f\x02\
\xe0\xcf\x61\x67\xbe\xe6\xe6\xb5\xe5\x77\x0f\x01\x68\x3d\x78\x81\
\x31\x41\x15\xcd\xf6\x7c\xdd\x97\x4d\x01\xc3\x9e\xf0\x5f\x06\xf0\
\x3a\xd8\x1b\x61\x6f\x81\xbd\xfd\x1f\xff\xf8\xcb\xdf\x50\x98\x1a\
\xe6\xb5\x81\x3d\x30\xb7\x7c\x6c\x04\x45\x7a\x4e\x22\x09\xb9\xcf\
\x35\xc1\xaf\x0f\xbb\xfb\xda\x91\x81\x52\xa6\x8c\x70\x9c\x8d\x29\
\xc5\xfc\x16\x2a\xc4\x45\xbc\xa2\x1b\x43\x4a\x19\x38\x32\xbc\xfd\
\xe6\x9f\xfe\xf4\xa7\xb7\x73\x00\x45\x44\x15\xd3\x3f\x58\xe9\x24\
\x1c\xfc\x38\xa2\x7c\x89\x9b\x99\x09\x0a\x1d\xe6\x77\x44\x46\x00\
\x60\xef\x82\xbd\x17\xf6\x01\xd8\x87\x61\x1f\x83\x7d\x12\xf6\x99\
\x6f\x7f\xfb\xeb\xbf\x35\xb3\x78\xec\xbc\x97\xb7\xfb\x03\x8c\xde\
\x34\x36\x31\x2c\x28\x53\x51\x59\xc2\x20\x60\xe5\xa4\x44\xf4\x31\
\xeb\x2f\xf6\x01\x96\x51\x06\x90\xbe\xac\x42\xdc\xd8\x58\xa5\x20\
\xbd\x5b\xea\x39\xf3\x4f\xb8\x13\xcb\xd4\x2e\xbc\xb4\xac\x3c\xe7\
\x5d\x18\xca\x74\x8d\x77\xe3\x47\x28\x04\x7b\x1b\xec\x9d\xb0\xf7\
\xc0\xde\x0f\xfb\x10\xec\xa3\xb0\x4f\xc0\x3e\x05\xfb\xec\x57\xbf\
\xf9\xd5\x5f\xd9\x39\xd8\x5c\x29\x2c\x96\x63\xbc\x98\xe2\xc2\x85\
\xc3\x0a\x11\x75\x06\x52\xa6\x2c\x16\xf4\x91\x17\xe5\x53\xde\x52\
\x23\xd3\xf7\x01\xee\xd2\xa8\x0f\xe6\xff\xed\xf5\x9b\xd6\x98\x71\
\xed\x3d\x31\x00\x6d\xa2\xa7\x7c\xea\xfb\xc6\xe7\xaa\x9f\xe8\x9b\
\x1b\x1f\x3c\x51\x16\xf4\x20\x3e\x02\xfb\xf8\x52\x26\x3e\xfd\x9d\
\xef\x7c\xfd\xe7\x9e\xde\xdb\x17\x78\xbd\xec\xe8\x6e\xa5\x12\x00\
\x28\x29\x57\x50\x29\xac\x04\xce\x2b\x4a\x64\xdc\x03\x8c\x26\xd2\
\x04\x2c\x37\xd8\xad\x09\xd7\xde\xfa\xcd\x6f\x7e\xf3\x0e\x3d\x7d\
\x4c\x99\xbb\x72\xf8\xad\xc8\x40\x98\xa9\x65\x1f\x8a\xb5\x11\x19\
\xc8\xd5\x03\x30\x54\x22\x43\x10\xfa\x4c\xbc\xcf\x80\x4e\x0c\xe4\
\x63\x0c\x06\x73\x50\xe7\x04\x36\xb3\xa1\xd1\x7e\xc2\xab\x75\x76\
\x1a\xbc\x97\x31\x75\x10\xfd\x3c\x1e\x23\x98\xff\x62\xbd\x4c\x91\
\x26\x8a\xcd\x8c\x3f\xc3\x99\xda\xb0\x79\x6d\x33\x07\xed\x89\xa2\
\xcf\xd3\xa6\xa7\x42\xfb\x0b\xfe\xb7\xc9\x02\xce\x57\xfd\xd7\x10\
\xc0\xca\x4c\xe8\xe9\xf4\x0e\xce\x86\x01\x10\xce\xc8\x07\x39\x2b\
\xe6\x56\xe6\xae\x05\xb2\xbc\xdb\xbc\x17\x70\x16\x38\x03\xec\x38\
\x4b\x27\x73\x3f\x77\x69\xb5\xe4\x02\x4e\x86\x02\x31\x90\xf6\xae\
\x16\x14\x71\xc4\x4d\xd7\x75\xae\xe6\x4f\xe5\x1b\x19\xcc\xfc\xef\
\x31\x99\x19\xb9\x76\x2f\x4f\xa3\x46\x00\xf4\x4d\x8d\xb3\x61\x00\
\xe4\xad\x06\x19\x61\x30\xef\x66\x40\x1f\xfa\xd0\x87\x3e\x8c\x17\
\x55\xf3\x5c\xc8\xe3\x93\x23\xdc\xc8\x84\xea\x30\xef\x79\x0a\xcd\
\xe2\xe8\x67\x49\x99\x3e\xbc\x3a\xa2\x47\xd4\x52\x15\x8a\x1c\xd7\
\x9c\x42\xd3\x14\x2f\x77\x9f\x89\xe1\xd5\xfa\xaf\x30\x5e\xec\x7c\
\x82\xb7\xd3\x46\x19\xe1\x02\x7f\xd3\x12\x98\xb7\x2c\x01\x7a\x1b\
\x83\xb2\xb2\x32\x37\x0b\x92\x04\xde\x9c\x99\xd3\x51\xef\x40\xa7\
\xd8\xca\xb8\x71\xb1\xf3\xcc\x7d\x11\x7d\x69\x02\x6f\x6a\xcc\x7d\
\xee\x23\x37\x9d\x9d\x1d\xfe\xf1\x0c\x9d\xd7\xf7\x85\x4d\xee\x72\
\xed\x9f\x9e\x1c\x80\xe9\xcc\xe8\x01\xb1\xbd\x91\x0d\x8d\xaf\x09\
\x94\xb9\xcb\xa3\x45\x67\xcf\x2e\xee\xca\x3c\x85\x72\xf4\x19\x0c\
\x40\x29\x05\x75\xb0\xb2\xde\x71\x72\x76\x6c\x58\x8d\xf3\xdc\x0f\
\xd0\x95\x23\x7c\xb3\x27\xdf\xfd\xac\x7d\x43\xe3\xe4\xe4\xf4\x16\
\x7b\x27\x3b\x55\x99\xb2\xe4\xbe\x5a\x3b\x41\x7d\x83\xdd\x02\x08\
\x5b\x57\x6f\x1b\x1b\x64\x34\x65\xc1\xde\xd1\x56\xc5\x9f\x5d\xcd\
\xb3\xdc\x65\x93\xdf\x04\x80\x7f\x3d\xdb\x5f\xf2\x01\xc4\x7f\xde\
\x63\xe7\x68\xdb\x1a\x12\x12\x78\x93\x5f\x97\x0c\x0c\xf7\x52\x0f\
\x36\x2e\x7c\x77\xc6\xb4\xb9\x61\x67\x6f\xdd\xca\x9f\x59\xcd\x33\
\x78\xd5\xe4\xd7\x8f\x7e\x25\x53\xef\x7d\xd6\x01\xe8\xcd\xd2\xc6\
\xec\x7f\xb6\x76\xd6\x7d\xd6\xb6\x16\x57\xd9\x6c\xf0\x6f\x3e\x7b\
\x36\xee\xcd\x73\x92\x1f\xde\x44\xbc\xa4\xbf\xa9\xe7\xbd\xf9\xd5\
\xff\x57\xe2\x55\x00\xcf\xb2\xfd\x1f\xbf\xa3\x54\x4b\x85\x0b\x06\
\xa1\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x07\x5b\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x30\x00\x00\x00\x30\x08\x06\x00\x00\x00\x57\x02\xf9\x87\
\x00\x00\x07\x22\x49\x44\x41\x54\x78\xda\xed\x58\x05\x70\x1b\x41\
\x12\x7c\x0c\x33\x27\x05\xcf\xcc\xcc\xcc\xcc\xcc\xcc\x10\x66\x66\
\x46\x33\xa3\xc0\x2c\x73\x38\x79\x7e\x53\x18\x0c\x8a\x05\x66\x5b\
\x64\x92\x4e\x92\xe7\xa7\xaf\xee\xae\xce\x17\x9d\xcb\xf4\xac\xae\
\x9a\x12\xed\xf6\x74\xcf\xcc\xae\x65\x3d\x83\x88\xfe\xab\x23\x6a\
\x20\x6a\x20\x6a\x20\x6a\x20\x6a\xe0\x7f\xc3\x40\xd4\x00\xe3\x39\
\x1c\xb3\xf0\x38\x29\xb2\xe9\xe7\x99\xc1\xb1\x80\x63\x09\xc7\x62\
\x29\x96\x48\xef\xcd\x18\x65\x80\x31\x53\xb3\x70\x3e\xc7\xb3\x27\
\x9e\x74\xda\x78\x66\xa8\xc4\xce\xe1\x98\x2d\xc5\x1c\x95\xa9\x19\
\x6a\x03\x8b\x90\xac\xb0\xc4\xfc\x91\x6d\x07\xb7\xad\xe6\xe7\x0b\
\xa5\x45\xd8\xf0\xcc\x71\x24\x9d\x6e\x9e\x05\x88\xea\xab\x15\x39\
\x17\xaf\x54\x90\x3a\xae\xde\xb8\x64\x7a\xc7\x3b\xde\x21\x9a\x53\
\x1b\x58\x8c\x24\x58\x70\xf9\x5a\x95\xef\xd2\x95\xea\xdf\xbc\xed\
\x6d\x6f\x03\xc9\x22\xe9\xb3\x99\xe3\x4c\x3c\x9d\x3c\x73\xc1\xa3\
\x01\x78\xe9\xde\xbd\x7b\xaf\xfb\xfa\xd7\xbf\xbe\x66\x94\x81\x4f\
\x7c\xe2\x13\x0b\xb0\x61\x78\x78\x88\x1a\xee\xd4\xfa\x2f\x5d\xab\
\x6a\x2a\x2d\x2d\x7a\xff\x92\x25\x4b\x16\x48\x84\x0b\xc6\x1a\x07\
\x2d\xcf\x10\x78\x6e\xd7\x4c\x99\x07\xf0\x07\x86\x49\x08\x0a\x8a\
\x01\xbf\xdf\xff\x8b\x3f\xfe\xf1\x8f\x6f\x1f\x65\x60\xed\xda\xb5\
\x4b\x2a\xaa\x2d\x04\x04\x43\x41\xea\xed\xeb\xa6\x6b\x37\x2e\xf9\
\x2b\x2f\x96\xe5\xc5\xa5\xc5\x3d\x5f\x35\x0e\x73\xf5\xc6\xe1\x29\
\x1e\x4e\xda\xdd\xdb\x35\x69\x1e\xde\x43\x00\xc4\x87\xc2\x21\x02\
\xaa\x2f\x57\x30\x6f\x70\xb3\xcd\x66\xfb\xcc\x28\x03\x7b\xf6\xec\
\x59\x62\x29\x2f\x24\xa0\xc9\xfa\x90\xfa\x5c\x3d\xa2\x91\x66\x6b\
\x63\xa8\xa2\xca\x32\x50\x5e\x59\xb2\xf1\x4d\x6f\x7a\xd3\x42\x9d\
\x71\x88\xc8\xf3\xa0\xf1\x8e\xc8\x13\x62\x1e\x6b\x6b\x73\x88\x8d\
\x4d\x88\xa7\xbc\xaa\x84\x00\x59\x3c\x50\x79\xb1\x14\x06\xb6\xb4\
\xb7\xb7\x7f\xe1\x29\x03\x45\x96\x3c\x02\x1e\x36\xde\xa5\xfb\x8f\
\x6e\x53\x63\xcb\x43\x1a\x18\xec\x17\xc7\xaa\xa6\xee\xaf\x81\xd2\
\xf2\x22\x6b\x41\xb1\xe9\xc3\xcb\x96\x2d\x9b\x2f\x25\x5f\x28\x5f\
\x97\x7a\x3c\x8f\x9b\xef\x93\xcd\xd1\xc2\x23\x35\xc8\xad\x1f\xa6\
\xba\xfa\xbf\x07\x4a\x2b\x8b\xc7\xc5\x53\x5a\x51\x44\x5a\x94\x57\
\x59\xf4\x0d\xe4\x15\x1a\x08\x80\xf8\xbb\x0f\x1b\xe8\xfe\xe3\xdb\
\xf4\xb0\xe9\x2e\xb5\x77\x3a\x30\x0e\x5c\xcd\x5e\xaa\xba\x54\xe6\
\x2f\x29\x2b\x2c\x8e\x8d\x3d\xfd\x42\x29\xf1\x62\x79\x1c\xb4\x3c\
\x8f\x5b\x1e\x70\x37\x1f\x51\x4b\x6b\x23\x3d\xb1\x37\x53\x4f\x5f\
\x97\xd8\x0d\xb7\xc7\x45\xd5\x97\xca\xfd\x96\xf2\xb1\x79\x4a\x4a\
\x0b\x48\xc6\xc8\xc8\x08\x01\xa5\xe5\xc5\xfa\x06\x0c\xe6\x2c\x62\
\x48\xe2\xef\x40\x3c\x2a\x28\x8a\xb0\xda\x1a\xc9\xed\x75\x51\x98\
\xdb\xf9\xc4\xd6\x12\x2e\x28\x32\x0d\xe6\x17\x1a\xb7\x7f\xe5\x2b\
\x5f\x51\xc6\x41\xcb\xd3\xfc\xe4\x31\xef\x6b\xa2\x56\xee\x80\xdd\
\xf9\x84\x9c\xed\x36\xea\xe8\x6a\x43\x57\x99\x27\x4c\x76\x47\x6b\
\x98\x3b\xa1\xcb\xc3\xd7\xb1\x22\x1e\x01\xc0\x94\xae\x81\xec\xdc\
\x74\x62\x48\xe2\xef\xc9\xe2\x51\x41\x49\x88\x95\xda\xbb\x9c\x34\
\xec\x1f\x22\x41\x08\x50\x4d\xed\x5f\x03\xe6\x7c\x83\x2d\x2b\x37\
\xed\xe3\xd2\x38\x2c\x54\xf3\xa0\xea\x36\xde\x63\x6f\x6b\xa5\xb6\
\x0e\xbb\xb8\xb7\xab\xa7\x03\x07\x1b\x5d\xc0\xe1\x84\x18\xaa\xab\
\xfb\x7b\x44\x9e\xfc\x22\xa3\x62\x00\x86\x01\x8c\xa7\xae\x81\xf4\
\xac\x64\x02\x20\x5e\xdb\xfe\x56\x49\x88\x13\x42\x3a\x9d\x10\xc0\
\xe3\x10\x22\xaf\xcf\x4b\x65\x15\xc5\xc3\xc6\xbc\xec\x8a\x98\x98\
\x53\x2f\x36\x9b\xcd\x0b\x65\x1e\x9b\xd3\x4a\x0e\xae\x7a\x1b\x8f\
\x20\x2a\x0f\xf1\x18\xa3\x3e\x77\x2f\xba\xc9\x7b\x3d\x38\x5f\x10\
\x28\x76\xa5\xac\xd2\x32\x6c\xce\xcf\x51\x78\xcc\xf9\xb9\x04\x40\
\x3c\x3a\x0f\x70\xe7\xf5\x0d\x24\xa7\xc5\x47\x9c\x5d\x9b\x46\x7c\
\x67\x77\x3b\x57\xb1\x93\x7a\x5d\x3d\x38\x9c\xa2\x00\x87\xd3\x1e\
\xce\x36\xa4\x0f\x19\xf3\xb3\x0f\xc8\x3c\x18\x19\xf5\x7a\x5c\xcd\
\x2e\x16\xef\x61\xf1\xbe\x7e\x2f\x44\x2b\x87\x1b\xc5\x00\x3a\x3a\
\xda\x99\x27\x43\xe4\x31\x98\xb3\x25\x03\x21\xe5\x73\x73\x81\x41\
\xdf\x40\x42\x52\x0c\x01\xe3\x11\x0f\x31\x72\x25\xfb\x07\x7c\xe2\
\x95\x8b\x24\xb5\x75\x7f\x17\x64\x1e\xf5\xc8\xc0\xac\xcb\xd3\x47\
\x1e\x9f\x9b\xd7\xcb\xe2\x87\xc8\x1f\xf0\x2b\xf7\x3c\x0a\x21\x57\
\xbc\xae\xbe\x46\xc8\x31\x64\x10\x00\x5e\x1c\x7e\xc0\xc8\xa6\x74\
\x0d\xc4\xc4\x9f\x25\x40\x2d\xde\xa1\x2f\x5e\x19\x83\xc1\xc1\x01\
\x31\x09\xe2\xef\xb5\x7f\x0b\xca\x3c\x91\x46\x06\x66\x07\x87\x06\
\xc4\x73\x14\x10\x02\x10\x23\x0a\x96\x21\x1b\xa8\xa9\xfb\x5b\x30\
\x2b\x27\x8d\x00\x88\xc7\x3a\x20\xd7\x98\xa9\x6f\xe0\xec\x85\x93\
\xca\xe1\x13\xe7\x57\x3e\x7c\xda\x31\x50\x09\x0a\x70\x05\x51\x39\
\xbb\xc3\x16\x4e\x4a\x89\x13\xaa\xaa\xcb\xaf\x4a\x3c\x9a\x91\xf1\
\xa8\x47\x06\x97\x00\x84\xc9\x55\x57\x1e\x9d\x6d\x0e\xe6\x89\x17\
\x79\xd2\xb3\x52\x08\x80\x78\x5c\xe3\x40\x56\x6e\xba\xbe\x81\x53\
\x67\x8e\x11\x30\x4a\x7c\x97\x7a\xe6\x15\x41\x5c\xc5\x41\x0a\x8f\
\x84\xc9\xe7\xf3\x72\x55\xb2\x85\x5c\x63\xd6\xe3\xce\xce\xce\x53\
\x44\xb4\x5b\xe6\x19\xef\xc8\x00\x03\x03\x03\x64\x30\x65\x0b\x06\
\x15\x4f\x6a\x7a\x92\x64\x40\x60\xc3\x02\x01\x19\x59\xa9\xfa\x06\
\x8e\x9d\x38\x44\xc0\x58\xe2\x51\xc9\xa0\xd4\xd2\xeb\xd7\xaf\x04\
\xe3\xe2\xcf\x7b\xea\x1b\x6a\x33\x90\x90\x63\x17\xc7\x36\x99\x67\
\x3c\x23\x83\xb1\xbb\x71\xf3\x5a\x44\x9e\xe4\xd4\x78\x02\x20\x1e\
\x1d\x03\xd2\x32\x92\xf4\x0d\x1c\x3e\xba\x9f\x00\x88\xef\xd0\x88\
\x77\x73\x35\xd1\x7a\x54\xad\xb1\xe9\x71\xf8\xf4\xd9\x13\x81\xf2\
\x0a\xcb\x45\x26\xdb\x87\xa4\x2c\x6a\x03\x27\xfa\xfd\xd0\xd0\xd0\
\xd7\x64\x1e\xbd\x91\x91\xd1\xd2\xd2\x1c\x3e\x33\x06\x4f\x42\x72\
\xac\x64\x20\xa0\x18\x48\x4e\x4b\xd0\x37\xb0\xff\xe0\x6e\x02\x3a\
\x46\xdd\x1e\xdd\xca\x5f\x4e\x97\xab\x8f\x12\x92\xe2\x84\xf4\xcc\
\x94\xfb\x5d\x5d\x1d\x27\xa4\x6a\xed\x64\xc2\x8f\xf1\xe3\xcc\xda\
\xda\xda\xb7\x5d\xbb\x76\xed\xf9\x32\x8f\xde\xc8\x78\xbc\x1e\x4a\
\x4c\x06\x4f\xea\x98\x3c\x71\x09\x17\x14\x03\x38\x6b\x0c\xec\xd3\
\x37\xb0\x7b\xef\x76\x02\x64\xf1\x38\xa8\x98\xbf\x40\x20\x40\xe5\
\x15\x25\xc2\xa9\x33\xc7\xfb\xea\xeb\xeb\x52\xe4\x36\x73\xfb\xbf\
\xc7\x8f\xcb\xc1\x81\xf8\xe8\x47\x3f\xba\xe2\xd0\xa1\x43\x4b\x65\
\x1e\xed\xc8\xe0\x79\x65\x55\xb9\xc0\xdd\x1b\x17\x4f\x4c\xdc\x59\
\x02\x20\x1e\x01\xc4\x27\x5e\xd0\x37\xb0\x63\xd7\x16\x02\x50\x75\
\x54\x0f\x89\xef\xde\xbb\x1d\x3e\x78\x78\x5f\xa0\xc4\x52\x54\xc1\
\x1b\xf7\x4a\x49\xd7\xb2\xa9\x57\xe9\x7d\x0d\x96\x79\xe4\xbb\x1b\
\x95\x7f\xf0\xf0\x7e\xf8\xd0\x91\x89\xf1\x9c\x8b\x39\xad\x18\xc0\
\x18\x02\x31\xf1\xe7\xf5\x0d\x6c\xd9\xb6\x41\xb9\x87\x3b\xbb\x3a\
\xe8\xe4\xe9\x63\x42\x62\x52\xdc\x9d\x9e\x9e\x9e\x63\x52\xc2\x1d\
\xbc\xf9\x43\xfc\x38\x43\x93\x34\x22\x0f\xd0\xdb\xdb\x4b\xdc\x39\
\xe6\x89\x9f\x30\xcf\x99\x73\x27\x09\x80\x78\x04\x70\x3e\xe6\xac\
\xbe\x81\x0d\x9b\x7e\x2f\x7e\x37\x31\x98\x72\x84\x43\x47\xf6\x77\
\x37\x34\xd4\x25\xaa\x6e\x85\x6f\xf9\x7c\xbe\xa5\x58\xaf\x17\x6a\
\x1e\xdc\x1c\x26\xb3\x41\x38\x7c\xe4\xc0\xa4\x79\xd8\xb8\x62\x00\
\xba\x80\x73\xe7\x4f\x47\x36\xb0\x75\xeb\xd6\xa5\x6b\xd7\xff\x86\
\xb6\xed\xd8\x3c\x5c\x54\x9c\x6f\xe1\x2e\xec\x91\x92\xfe\xd6\xef\
\xf7\xbf\x4c\x26\xd7\x0d\x0d\xcf\xf6\x9d\x53\xe2\x59\x02\x9e\x13\
\xa7\x8e\x90\x16\x67\xce\x9d\x88\x68\x60\xd1\xcf\x7f\xfe\xf3\x15\
\x26\x53\xee\x7a\x6e\x33\x92\x21\xb6\xf3\xc2\xf7\xf1\xe3\xb8\x7f\
\xa0\x9a\x46\x9e\x05\x3f\xfe\xf1\x8f\x57\x26\xa5\x26\x5e\x3a\x7a\
\xfc\x10\xa9\xc3\x68\xca\xb9\x23\x19\x78\xaf\xda\xc0\xcc\xcf\x7e\
\xf6\xb3\x6b\xac\x56\xeb\x47\xf8\xc3\x8d\x7c\x2b\x7c\xcd\xed\x76\
\x2f\x1a\x6f\x42\xc4\x34\xf3\xcc\xf8\xe2\x17\xbf\xb8\xda\x66\xb3\
\x7d\x98\x3b\xb8\x19\x85\x50\x07\xde\xb3\xdb\xed\x6b\x14\x03\x08\
\xfe\x47\xfb\xb9\x7c\xf7\xae\xe2\x3f\xe9\x2b\xf5\xc9\xf5\x63\xba\
\x79\xde\xff\xfe\xf7\xcf\xe2\x9f\x4e\x5e\xc5\x85\xd8\xa0\x31\xf0\
\x03\x8c\x62\xf4\xd7\xe9\xa8\x81\xa8\x81\xa8\x81\xa8\x81\xa8\x81\
\xa8\x81\xff\x84\xf8\x07\xbc\x36\x24\x3d\x4e\x42\xb6\x0a\x00\x00\
\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x0c\x9b\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x30\x00\x00\x00\x30\x08\x06\x00\x00\x00\x57\x02\xf9\x87\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x00\x13\x74\x45\
\x58\x74\x54\x69\x74\x6c\x65\x00\x4f\x70\x74\x69\x63\x61\x6c\x20\
\x44\x72\x69\x76\x65\x3e\x67\xba\x0c\x00\x00\x0b\xf9\x49\x44\x41\
\x54\x68\xde\xed\x5a\x69\x6c\x5c\xd5\x15\x3e\xef\xcd\xe2\x99\xb1\
\x3d\x33\x5e\xc6\xf6\xd8\x78\xdf\x9d\xc5\x0e\x10\x42\x48\x48\x94\
\xa6\x94\x90\xa4\x48\xa5\x09\x15\x45\xa5\x81\x2a\xa8\xa8\x52\xf9\
\xd7\xe5\x67\x7f\xf4\x67\xff\x20\x21\x21\xaa\x4a\xb4\x08\xa5\x0a\
\xa1\x12\x34\xcd\xc2\x9a\xcd\x49\x70\x48\x88\xb7\xf1\x16\xdb\x33\
\xb6\x43\x3c\x63\x8f\x97\x99\x37\xcb\xdb\x7b\xce\x9d\x37\xcf\x93\
\x00\x12\x6d\x1c\x12\x24\x9e\x75\xf4\x66\xde\xbc\x7b\xef\xf9\xce\
\xf9\xce\x72\xdf\x33\xb7\x6f\xdf\x3e\xf8\x2e\x1f\x3c\x7c\xc7\x8f\
\xef\x01\xdc\xed\xc3\xba\x9a\x93\x1d\xf8\xd5\x2f\x9e\x52\x64\xf9\
\x65\x8e\xe7\x37\x00\xe8\xa0\x69\xda\x15\x9b\xcd\xfe\xca\x1b\x7f\
\x7b\xf3\x5f\x77\x0a\x00\xb7\x1a\x41\xfc\xec\xb3\xcf\x7a\x75\x8b\
\x72\xc8\x5d\xe0\xde\xee\xf3\x95\x3b\xdd\x85\x6e\xd0\x54\x05\x22\
\xf3\x73\xb0\xb0\x18\x4d\xa6\xc5\xf4\x19\x5e\xb3\x3d\xf3\xd6\x5b\
\x6f\x2d\xad\x36\x00\x4b\x47\x47\xc7\x6d\x4d\xb0\xe7\x37\x7b\x1c\
\xb6\x34\x7f\xba\xa6\xba\x6e\x4b\x7b\x5b\x87\xdd\xe5\x74\x02\xcf\
\x73\x60\xb1\x5a\x81\x80\xf8\x4a\x7c\xb6\x58\x3c\x5e\x9f\x14\x13\
\x8f\xd7\x6d\xab\xff\xc7\xd8\xa5\x31\xe5\x9e\xa2\x50\xfe\x42\xde\
\xe1\xaa\xaa\xca\x75\xcd\x8d\xcd\xbc\x28\xa5\xe1\xc2\xc5\xf3\x70\
\xe2\xf8\x49\x88\xcc\x45\xc0\x57\xea\x83\x1f\x3e\xf6\x18\x6c\xda\
\xb8\xd1\x32\x18\x18\x58\xab\x47\xb5\xc3\x38\xe4\xc9\x7b\xc6\x03\
\x3f\xfb\xf9\xbe\xbd\x1e\x8f\xfb\xf7\xf7\x77\x6d\xcc\x53\x54\x19\
\x8e\xbc\xf3\x0e\x1c\x3b\x7a\x0c\xbc\xc5\x5e\x28\x2b\x2f\x03\x4d\
\xd7\xa0\xa7\xe7\x12\x88\x92\x08\x9b\x37\x3f\x62\x89\x46\xa3\x35\
\xed\x6b\x5a\x7b\x07\xfb\x03\xa3\x77\x3d\x0b\x3d\xfd\xf4\xd3\x76\
\x59\x96\x5f\x5f\xd3\xb6\xce\x49\xdf\xc7\x46\xc7\xe0\x7c\x77\x37\
\x34\xb7\x35\x83\xdb\xe3\x06\xbb\xcd\x06\x85\x05\x05\x50\x5b\x5b\
\x0d\xdd\xdd\xe7\x61\x6a\x6a\x1a\x2a\xca\x2b\x9c\x92\x28\xbd\x4e\
\x63\xef\x3a\x00\xde\xa1\xff\x74\xfd\xda\xce\x52\x8f\xc7\x03\x89\
\x84\x00\x87\xfe\x79\x08\xaa\xee\xab\x02\x9b\xcd\x0a\x16\x0b\x8f\
\x62\x61\x62\x45\x20\x25\x25\xc5\x70\xe4\xc8\x61\x3c\x97\x80\xd3\
\xe5\xf4\x4a\x90\x7a\xf2\xae\x03\x68\xa8\x69\xfa\xe3\x9a\x8e\x75\
\x36\x8e\xe3\xe0\xd8\xf1\x63\x98\x32\x55\xf0\x7a\x3d\xa6\xe2\xbc\
\x21\x16\x9e\x87\xfc\x7c\x17\x24\x53\x29\xf8\xe8\xa3\x0f\x31\xa8\
\xcb\x9c\x72\x5a\x3e\x70\x57\x01\x3c\xff\xfc\x33\xb5\xa1\x50\xb0\
\x83\xb8\x1d\x8b\xc5\xe0\xf2\xe5\xcb\xcc\xfa\x59\xe5\x2d\xbc\x25\
\xe3\x05\x54\x9e\x37\xae\x15\x15\x17\xc1\xa5\xcf\x3e\x03\x9f\xaf\
\x14\xf2\x6c\xf6\x9d\x7b\xf6\xec\x71\xdc\x15\x00\x07\x0f\x1e\xac\
\x97\x75\xe8\x2d\x2e\x2e\xb6\x24\x93\x09\x18\x1e\x19\x06\x7b\x9e\
\x9d\x49\x46\xe9\x5c\x10\x19\x0f\xf0\x28\x36\x4c\xab\xe4\xad\x91\
\xb1\x31\x28\x70\xbb\x31\xff\xa9\xbb\xbe\x75\x00\x2f\xbd\xf4\x42\
\x23\x67\x55\x7b\x54\x45\x75\xfb\x2b\xfc\x20\x89\x69\x18\xbd\x36\
\x6a\x52\x87\x37\xe9\xc3\x33\x30\x7c\xae\x20\x08\x87\xd3\x01\x43\
\x81\x00\x78\x3d\x5e\x07\x06\xf3\x2f\xef\x48\x1d\x78\xe1\xc5\xe7\
\xb6\xe3\x72\x7f\xd1\x41\x7f\xc0\x2c\xd7\x1c\xa7\xa3\x9c\x4a\x2b\
\x4a\x47\x75\x65\x4d\xc9\xf4\x4c\x0f\x57\x52\x52\x0a\xf1\x78\x0c\
\x66\xa6\x67\xa0\x18\xe9\x41\x8a\xeb\x3a\x8e\xe2\x34\x1c\x60\xe4\
\x68\x1d\x1b\x0a\x1e\x5b\x0a\x14\x9e\xd7\xc0\xe1\x70\xc0\x34\xde\
\xef\xf5\x7a\xe9\xbe\xc7\xb7\x6d\xdb\x96\x77\xe6\xcc\x19\x71\xd5\
\x00\x1c\x38\xf0\x4c\x03\x68\xf0\x9f\xa6\xe6\x96\x7c\xb7\xdb\xc3\
\xae\xf5\xf5\xf7\x41\x7f\x7f\x1f\x57\x52\x5a\xb4\xa3\xb9\xa9\x15\
\x03\x32\x1f\x2a\x2b\xaa\x98\xb2\x91\xc8\x2c\x48\xb2\x44\x99\x85\
\xdd\x4b\xd7\x34\xa4\x09\x90\x90\xf2\x88\x80\xd7\x32\xb1\xa0\x19\
\x34\x4a\xe8\x2a\x2c\x2f\x2d\x81\x95\xb3\xab\xba\x53\xdc\x8c\xc3\
\x4e\xad\x1a\x85\x24\x55\xfd\x7b\x4b\x73\x9b\xc3\xe5\xca\x87\x68\
\x74\x1e\x7a\x7b\xaf\xc2\x85\x0b\xdd\x20\x2b\x32\x10\xa0\xf6\xd6\
\x0e\x48\x26\x53\xf8\xb9\x10\x64\x49\x82\x99\xeb\xd7\xc1\x83\x7c\
\x36\x83\xd7\x92\xc3\x7b\x83\x36\xe6\xd9\x90\x3c\xbb\x1d\x6e\xdc\
\x98\x05\x57\xbe\xd3\x8e\x58\xee\x5b\x35\x0a\xad\xdd\xb0\x76\x6d\
\xa5\xbf\x7a\x4b\x63\x43\x33\x87\x0d\x18\xb8\xd0\xaa\x57\x3e\xbf\
\x0c\xaa\xaa\x22\x45\xbc\xd0\xd6\xda\x8e\x0a\x58\x20\x95\x4a\xa2\
\x12\x79\xa0\x60\xb3\x96\x42\x30\x44\x0b\x52\x3a\xeb\x01\x0a\xd4\
\x0c\x7d\x90\x4e\x86\xe5\x73\x01\x58\xd1\x0b\x64\x1c\x7f\xa5\xdf\
\x36\x3f\x17\xf5\x67\xd7\xef\xdc\xd8\xd9\x8e\x61\xfe\x3a\x0e\xdb\
\x82\x73\x73\xaa\xaa\x61\x43\xa8\xb2\xf5\xe9\xb3\xaa\x19\x67\x55\
\xd5\xb1\xcb\x3d\xab\xc9\xda\xaf\x23\x91\xc8\x90\xd5\xe0\xb8\x25\
\x11\x8b\xbf\x67\xb7\x59\x39\x1a\x40\x19\x24\x1a\x5d\xc4\x02\x95\
\x60\x8b\xda\xed\x0e\x28\xf3\x95\x33\x66\x60\x63\x06\xe5\x65\x65\
\xa0\x28\x0a\x08\xf8\xfb\xe0\x40\x00\x52\x62\x0a\xab\x6e\x21\x34\
\x34\xd4\x81\xaf\xcc\x47\x48\x90\x3e\x18\xcc\x3a\x51\x48\x33\x95\
\xc7\x36\x9b\x35\x79\x71\x21\x06\x75\x79\x75\x1c\x2a\x5a\x97\x05\
\xa0\x4a\xca\x6b\xe5\x7e\xff\xd6\x4a\x7f\xa5\x69\x5d\x46\x49\x1c\
\xaf\xd1\x7c\xec\x4c\x00\x34\x2e\x14\x0a\x6d\x9b\x08\x86\x5e\xc3\
\x5b\xb6\x67\x3c\x60\x51\x77\xf9\xca\x4a\xab\x0b\x0b\x3d\xa8\xf8\
\x1c\x48\x48\x8f\xe1\xe1\x00\x43\xef\x72\xb9\x50\xe1\x0a\xc6\x69\
\x15\xad\xbe\xb8\xb8\x00\x35\xd5\xd5\xa0\x20\xf7\x87\x47\x47\x59\
\xfa\x5c\xb7\x7e\x1d\xd2\x4c\x44\x30\x43\xb0\xb4\xb4\x0c\x4d\xcd\
\x4d\x19\xe5\x4d\xc5\x39\x26\x3c\x5a\x80\xe8\x95\x16\x25\xb0\x21\
\x95\x70\x7e\x13\x00\xb6\x25\x8f\x92\xf2\x03\x83\xfd\xcc\x48\xf9\
\xb8\x2e\x4b\x0a\x3a\x18\x67\x8d\x19\x8c\xda\x93\xc6\x86\x26\xb8\
\x36\x3e\xf1\xa8\x19\x03\x9a\xa2\x1f\x44\x7a\x58\xad\x68\xb5\x78\
\x5c\x60\x96\x9f\x0d\x87\x99\xcb\xac\xd8\x1a\xf8\xb0\x05\x10\x31\
\x65\x26\x93\x49\x96\x79\xec\x48\x21\x19\x3d\xa0\x88\x22\xd4\xd4\
\x54\x43\xa1\x1b\x27\x6d\x6c\x84\x5d\x4f\xfc\x08\xf9\x1d\x46\x23\
\x44\xcd\xfc\x6f\x0a\xb7\xf2\x19\x37\x3d\x60\x47\x4f\x60\xf5\x36\
\x63\x40\x96\x64\xc6\xbd\xe5\x98\x00\x47\x8f\x7f\x0c\x57\xfb\x02\
\xc6\xfd\x1c\xa3\x65\xdf\xc0\x30\x9c\xfc\xf0\x2c\xae\x9f\x60\x5e\
\x51\x35\x8d\x5b\x09\x62\x0e\xee\x47\xff\x30\x57\x09\x42\x9c\x01\
\x20\x20\xd4\x1e\x10\x9d\x48\x61\x11\x95\xa5\xca\x2b\x60\xdf\x63\
\xc3\xfe\x46\x56\x24\x4c\x85\x1c\x0b\x52\x5a\x80\x05\x68\x5e\x1e\
\xb4\xb5\xb5\xc0\xe8\xc8\xd8\x97\x94\xe7\x0c\x45\x88\x46\x68\x6d\
\xe0\xac\x3c\x29\x52\x6e\x26\x10\xbc\x46\xb4\x2c\x29\x29\x82\x6d\
\x5b\x1f\xc6\x82\x37\x01\x57\xae\x0e\x30\xeb\xf7\xf6\x0d\xc2\xd8\
\x78\x90\x5d\x2f\x2e\xf2\x1a\x71\xa1\xae\x04\x31\x4e\x5c\xa6\x62\
\x4a\xd0\xd0\xe2\x31\xb4\x30\x0d\xa2\x06\x8d\x6e\xd2\x10\x98\xd5\
\x6a\x83\x04\x56\x5d\x0a\x56\xea\x69\xa8\x61\x13\x45\x19\x54\x85\
\x16\x55\x19\xb8\x78\x2c\x4e\x2c\x63\x7d\x0f\x19\x80\x59\x87\xcb\
\x2a\xcd\x99\x20\x78\x23\xc8\x75\x95\xf8\xad\x17\xdd\x0a\x20\x89\
\x63\x1f\x7a\xb0\x8b\x5d\x3b\x73\xee\x22\xee\x2b\xe6\x61\x69\x39\
\xce\x94\x7f\xf0\x81\xf5\x30\x31\x3e\xc9\x12\x08\xad\x7b\x53\x16\
\x42\x17\x42\x3a\x9d\x86\x84\x90\x30\x82\x27\x13\xf5\xc4\x6d\xca\
\xf5\x94\x36\x2d\x56\x0b\xcb\x0c\x8c\x97\x5a\xc6\x0a\x22\x8e\x11\
\x04\x81\x2d\x4e\x08\x12\x48\x33\x64\x2c\xb3\xbc\x66\xc4\x01\x97\
\x05\x92\x23\x34\x3f\x51\x21\x77\x7d\xf2\x0c\x5d\x9b\x98\x98\x44\
\x65\x3b\x61\x7c\x22\x08\xd7\xbf\x98\x85\xaa\xca\x0a\x78\xe0\xfe\
\xf5\x30\x7e\x6d\x82\xe9\x46\xf7\x11\x08\x13\x00\x5e\x8c\xa4\x52\
\xe9\xea\x58\x6c\x19\x03\x2c\xcd\x6e\xa0\x85\x55\x55\x02\x31\x25\
\x61\xe1\x59\x64\xd9\x03\x44\x0e\x8b\x91\x8d\x51\x89\xb3\xf0\x8c\
\x7b\x14\x58\x64\x79\x02\x4f\x1f\x16\x17\x16\xa1\x00\x8b\x1d\x03\
\x8b\xca\xdc\xaa\x38\xa5\x32\xf2\x24\xcb\x2e\xaa\xb2\x98\xeb\x01\
\xaa\x37\x59\x6a\xbc\xfb\xde\x71\x53\x79\x3a\xd3\xf7\x35\xed\xcd\
\x0c\x80\x22\x2b\x4c\x56\x3c\xa0\xc3\x95\x94\x28\x56\x2f\x63\x67\
\x49\x8a\x90\xa0\xb3\xd9\x64\x44\x99\x05\xac\x9c\x5e\x2c\x64\x22\
\x66\x0f\x16\x0f\xec\x77\x0e\xf2\x1c\x76\x10\x30\x63\x90\xf5\x29\
\x2e\x88\x82\x73\x73\x73\xb0\xf1\xa1\x07\x51\xcf\xaf\xb0\x3c\xfd\
\x21\x06\xa2\x24\x79\x54\xd5\xf4\xb0\x09\xc0\xf0\x00\xad\x19\x18\
\xba\x06\x13\xa1\x69\x46\x9b\x1d\x3b\xb6\xc2\x27\x9f\x9c\x63\x74\
\x22\xe5\xdb\x5b\x1b\xf1\x3e\x89\x25\x11\x1b\xd3\x92\xb8\x6a\xe5\
\xfe\x2a\xa5\x53\x4a\x74\x21\x8a\xbc\x56\x19\x87\x33\x56\x52\x59\
\xe6\x99\x9e\x99\x62\x83\x85\x44\x9c\xf1\x9f\x01\x44\x0f\x15\x16\
\x78\xb0\x9d\x8e\xc3\xf2\xe2\x32\xcc\xcf\xcf\x63\x6b\x11\xc1\x5a\
\x50\x0f\x95\x95\xfe\x1c\xa5\xc1\xb0\x3c\x98\x20\xec\x76\x1b\xa4\
\xd0\x8b\x38\xff\x4c\x4e\x1a\x65\xa0\xe2\x48\xe1\xac\xf2\x5d\x9d\
\x6b\xa0\xe7\xe2\x25\x76\xa6\xef\x93\xa1\x19\xf6\x3b\xa3\xd0\x4d\
\x1e\x50\x2d\x27\xe2\x82\x30\x8d\x3d\x7e\x7d\x5d\x6d\x5d\x26\xd3\
\xd8\xad\x46\xe5\x4b\x43\x28\x18\x84\x96\xe6\x56\x06\x86\xe7\xad\
\xac\x1a\x5b\xf1\xf7\x2e\xe4\x25\xf5\x35\x04\x9c\xb6\x91\x5d\x9b\
\xd6\x43\x65\x55\x15\x53\x84\xcb\x68\xce\x84\x7d\x84\x0c\x08\x12\
\xca\x56\x69\xf4\x2c\x06\x71\x30\xc7\x03\x3a\x7a\x98\x2b\xc1\xc6\
\xf0\x0f\xbf\xfb\x2d\xc6\xa2\x00\x23\xc3\x23\x2c\xde\x86\x87\x46\
\xa0\x73\x7d\x07\x6c\xd9\xb2\x09\x86\xb1\x9b\xa5\x3a\x82\x1e\xd0\
\x9d\xe8\x75\x06\xe0\xed\xb7\xdf\xd6\xf6\xef\xdf\xff\x54\x24\x1c\
\xbe\x5c\x53\x5d\xcb\xa7\x53\x69\xa4\x83\x9e\xc1\x86\xc1\x92\x48\
\x24\x61\x12\x03\xab\x00\x8b\x48\xc6\x13\x49\x28\xce\xf3\xb2\x96\
\x62\xc7\xce\xed\xe8\x15\x3b\xf3\x0c\x2d\x46\xd6\xc9\x76\xa3\x99\
\x0c\x9d\x05\x92\xfd\xce\x81\x13\xdb\x6a\x8c\x39\xcc\x03\x9a\x09\
\x40\xd5\x94\xee\xb1\xf1\x89\xad\x35\xb8\x31\x9a\x0b\x47\x80\x74\
\x28\x2e\x2e\x31\xab\xf1\xec\x17\x61\x2c\x9a\x0b\x98\x18\x38\x98\
\x0a\x4e\x91\xf7\xba\x73\xb3\x10\x8f\x20\x02\x2f\x1c\x7c\xfe\x62\
\x30\x34\xb9\x09\x3b\x4e\xcb\x8d\xd9\x1b\x8c\xef\x82\x20\xb1\xd4\
\xd6\x3f\xd0\x07\x9b\x1f\x7e\x84\x3d\xb0\x22\xeb\xf8\x7c\x25\x99\
\xa6\x53\xcb\x54\x49\x5a\x64\xa5\x6a\x7e\xcd\x53\x34\x03\x45\x21\
\x36\x83\xd1\xe8\x82\xac\x29\xd2\x2c\x7d\x3f\x72\xe4\x08\x5f\xd7\
\xd8\xf8\xaa\x20\x24\x8b\x03\x23\x63\xed\x18\x93\x06\x5c\xa3\x12\
\xb3\xd6\x36\x3b\xb7\xae\xab\x8a\x32\x38\x35\x11\x7c\x75\x7a\x3a\
\xc8\x5b\x71\xb0\xe1\x58\xe0\xae\xcf\x7e\xf1\x22\xa6\xc9\x9e\xda\
\xda\x3a\x57\x55\x65\xa6\x48\xce\x63\x50\x86\xe7\xc2\xac\x2e\xf4\
\xf6\x5d\x65\x1c\x0f\xcf\x45\xb0\x60\xb5\xb1\xc0\x4d\x11\x15\x88\
\x6a\x9c\x6a\xf4\x2f\x5a\x0e\x08\xb6\x34\xc0\x2d\x80\x0a\x0b\x0b\
\x61\x7c\x7c\x52\x92\x34\xe5\x7a\xb6\xed\x09\x8e\x8f\x7f\x84\x72\
\xe5\x7f\x68\x44\x29\x83\xe9\x56\x30\x99\x09\xdc\xc9\x7f\x9f\x0c\
\xee\xdd\xbb\x6b\x3f\x2a\xf5\x67\x0c\xb8\xae\x9c\x9b\x75\xcc\x0e\
\x9f\x8a\xe2\x4c\x93\xcb\xe1\x2a\x4e\xa4\x04\x7e\xe7\x8e\x9d\x8c\
\xcb\x54\x98\xa8\x53\x84\x8c\xfe\x86\x27\x72\x40\xd0\x39\x6b\x41\
\xfc\xa3\xf4\x6a\xc7\x3e\x08\x63\xc7\x12\xe8\x1b\xfd\xf4\xfd\xf7\
\xdf\xa7\xc7\x9b\x74\xe7\x9c\x21\xb7\xb7\x23\x3b\x7a\xf4\xc4\x39\
\x3c\xfd\x80\x5a\xbc\x5b\xa5\xb3\x73\x4d\x3d\x72\xfc\x9d\xda\x9a\
\x1a\x6f\x64\x2e\xcc\xe5\xbb\x0a\x60\xd9\x16\xc3\xad\xa5\x68\x92\
\x84\x75\x8d\x06\xad\x18\x08\xc3\x03\x59\x10\xb4\x1b\x8b\x63\xea\
\x95\x15\xf5\x03\x54\x5e\x34\x94\xbf\xad\x0d\x8d\xfe\x0d\x45\xeb\
\xed\x1d\x0c\x0e\x07\x46\x7f\x12\x8e\x84\x85\xe1\x91\x11\x2c\x58\
\x05\xcc\x9a\xaa\x51\x95\x59\xff\xae\x65\xe2\x21\x03\xc2\x90\x1c\
\x0f\x94\x96\x96\xd2\x86\x46\x94\xa4\xf4\x9b\x5f\x26\xd7\xff\x01\
\xc0\xb0\x80\xf6\x35\xa2\x1a\xa2\x64\xcf\xfd\xfd\xfd\xc1\xbe\xab\
\x83\xbb\xfb\x7a\x3f\x57\x1d\x0e\x27\xcb\xe9\x3a\xcb\x56\x9a\x09\
\x82\xd1\x28\x2b\x39\x20\x68\x4b\xe9\xc1\x74\x3b\x15\x0a\xe9\x23\
\x81\x6b\x27\x8c\x35\x6e\xff\xa9\x04\x82\xb8\x55\xe1\x9b\x94\xce\
\x11\x99\xa4\xb7\xb7\x77\x12\xbb\xd5\x53\x0b\xb8\x37\xa0\xed\xa7\
\xd3\xe9\xcc\x74\x88\x5a\x46\xb2\x7d\xce\x0a\x90\x0c\x88\xb2\xf2\
\x72\xdc\x4f\x2c\x52\xbf\xf4\xf1\xa9\x53\xa7\x92\xb7\x4b\x9f\x9b\
\xf6\xc4\x38\xd9\x37\x52\x3e\x2b\x8b\x4b\xb1\x43\xfd\x7d\xfd\x62\
\x91\xa7\x88\x6d\x32\xa8\x72\x9b\xdb\x3f\x55\x33\x77\x52\x9a\xb1\
\x19\xa1\xc7\xee\x15\x15\xe5\xd8\xa8\x05\x53\xb2\xac\xbd\x61\x18\
\x6d\x75\x9f\x0b\x19\x93\x7e\xa5\xc2\x39\x22\x91\x9c\xfe\xf8\xf4\
\xd1\xf3\x17\xce\x2f\xc5\x71\xff\xe0\x71\x7b\xa1\x08\xfb\x74\xe2\
\xbd\x6a\x50\x68\x25\x16\x34\x96\xd6\x6b\x6a\x6b\x20\xb6\x1c\xc3\
\x76\x78\x62\x29\xb6\x18\x7b\xf7\x8e\x3d\x5e\xa7\xef\x28\x3a\x8a\
\x16\x08\x04\x6e\x0a\xe2\x5c\xc1\x7e\x49\x6e\x69\x6e\x1a\x9b\x5f\
\x5c\xf8\xf1\x86\xae\x2e\x1b\x75\x92\xa4\x2c\x15\x3d\x2d\x47\x79\
\x02\xe4\xf7\x97\xb3\x2a\x7e\xee\xec\xf9\x94\x10\x8b\x3f\x87\x7b\
\x89\xa1\x6f\xe5\xfd\x40\x16\x08\x8a\x8a\x60\xb2\xca\xeb\x06\xbd\
\xb4\x81\x81\xc0\x08\x16\xb6\x4d\x98\xdb\xeb\x1b\xea\x1b\x2c\xc4\
\x7d\x1b\x06\xb5\x94\xe9\x55\xd8\x23\x76\xbf\xdf\xcf\x52\x67\x7f\
\xff\xa0\x34\x39\x11\xfc\x40\x88\x27\xff\x74\x4f\xbd\x23\x43\xcb\
\x3a\x54\x50\xba\x77\xef\x7e\xa2\xab\xb5\xa5\x85\x27\x4a\x51\xbb\
\x0b\xd9\x2e\x14\x8f\xc1\xc1\x21\xf5\x52\xcf\xa5\x5e\x0b\x67\xdb\
\x82\x9b\x9f\xf4\x3d\xf5\x8e\x4c\x92\x24\xc5\x61\x4f\x1f\x1e\x19\
\x0d\x6d\x08\xdf\x98\xad\xaa\xa8\xa8\xb0\xd1\x93\x0c\x3a\x16\x70\
\x73\x73\xe6\xf4\xd9\xe4\xf0\xd0\xd0\x27\x56\x3e\xf9\xa4\x20\x28\
\xc2\x3d\xf9\x96\xd2\xb4\x86\x1d\x9e\xb2\xf2\xb6\x97\x35\x5d\xdd\
\xc0\x26\xe7\x2d\x57\x54\x55\x7e\x05\x37\x76\x77\xec\x35\xeb\xaa\
\xbe\x27\x26\x45\x55\x90\x73\x94\xd5\xe0\x4e\x1f\xdf\xff\xab\xc1\
\xf7\x00\x6e\xf3\xf8\x2f\x17\x50\x4f\xbf\x20\xd6\x75\x19\x00\x00\
\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x0b\x32\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x30\x00\x00\x00\x30\x08\x06\x00\x00\x00\x57\x02\xf9\x87\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x00\x13\x74\x45\
\x58\x74\x54\x69\x74\x6c\x65\x00\x4f\x70\x74\x69\x63\x61\x6c\x20\
\x44\x72\x69\x76\x65\x3e\x67\xba\x0c\x00\x00\x0a\x90\x49\x44\x41\
\x54\x68\xde\xed\x59\x59\x73\x14\xd7\x15\x3e\xdd\x3d\xbb\x34\x9a\
\xd1\x3a\x5a\x40\x32\x12\x12\x42\x48\x32\x86\x84\xaa\xb8\x5c\x50\
\x0e\xb6\x03\xc1\xe5\x72\x11\x21\x1b\x30\x20\x6f\x24\x94\xe3\xbc\
\xe5\x21\x8f\x81\x97\xfc\x81\xb8\x2a\xe4\xc5\x29\x9c\x80\x11\xc6\
\x18\x10\x3b\x94\xa0\x22\x40\x42\xd8\x08\x6d\x23\x41\x84\x36\x6c\
\xed\x33\x9a\x7d\x7a\xcd\x39\x77\xa6\x47\x23\x83\xab\xcc\x40\x8c\
\x9c\xf2\x48\x47\xdd\xea\xe9\xb9\x7d\xbe\x73\xbe\xf3\xdd\x73\xef\
\xf0\x75\x75\x75\xf0\x63\x36\x1e\x7e\xe4\xaf\x9f\x00\xfc\x5f\x01\
\x68\x78\x77\xc7\xe6\xb7\x76\xbe\x79\x79\x47\xc3\x36\xdf\x8e\x86\
\xad\x3e\x3a\xa7\x6b\x0b\x1e\xc0\xf6\xed\xdb\x9d\xdb\x76\xbe\x71\
\xda\x62\xb6\x7c\xb2\x64\xc9\xd2\xb5\xb5\xd5\x2b\xed\xd5\xcb\x6b\
\xec\x2e\x57\xc1\x5a\x8e\xe3\x0e\x6c\xdd\x51\x7f\x9a\xee\x59\x90\
\x00\x36\x7d\xb0\xc9\xa2\x80\x78\x71\x71\x51\xc9\x2b\x2b\x9f\x5d\
\x65\x75\xe5\xe5\x81\xc9\x64\x04\xb3\xc5\x02\x8b\x8a\x16\x41\xf5\
\xf2\x6a\x5b\x7a\x9a\xfd\x65\x49\x8b\x5e\xa4\x7b\x17\x1c\x80\xb4\
\x19\xf3\xe1\xa2\x82\xc2\x9a\xf2\xb2\x72\x5e\x51\x64\xb8\x76\xfd\
\x2a\xec\xdd\xbb\x17\x3e\xfc\xc3\x87\xb0\x6f\xdf\x3e\x68\xbd\x71\
\x03\x2a\xcb\x97\x09\xe9\x56\x5b\xb5\x6d\xda\x7c\x78\x41\x01\x78\
\x63\x5b\xdd\xab\x0e\x87\xe3\xa5\x15\x55\xb5\x46\x45\x95\xe1\xe8\
\xe7\x9f\xc3\xe1\x43\x8d\x60\xb5\x59\xa1\xbc\xa2\x1c\x4c\x16\x13\
\x34\x36\x36\xc2\x89\xa6\x93\x50\x59\x59\x65\x32\x9b\x2c\x2f\xd1\
\x67\x16\x04\x80\xfa\xfa\x7a\x93\x24\x49\xfb\x57\x54\xd6\x58\xe9\
\xff\x3b\xfd\x77\xe0\x6a\x4b\x0b\x94\x57\x96\x43\x86\x23\x03\x4c\
\x46\x23\xd8\xd3\xd3\xa1\xa4\x64\x31\xb4\xb4\x5c\x85\xe1\xe1\x11\
\xc8\x77\xe5\x5b\xc5\xa8\xb8\x9f\x3e\xfb\xd4\x01\xf0\x16\xed\x37\
\xb5\xd5\xcf\xe6\x60\x06\x20\x18\x0c\xc0\xc1\x43\x07\xa1\x68\x51\
\x11\x18\x8d\x06\x10\x04\x1e\x4d\x60\x66\x40\x20\xd9\xd9\x59\x70\
\xe4\xc8\x61\x3c\x66\x53\x76\x9c\x22\x84\x5f\x7b\xea\x00\x4a\x8b\
\x97\xfe\x69\x45\x55\x8d\x11\x55\x06\x4e\x9d\x3e\x05\xaa\xaa\x80\
\xd3\xe9\x48\x38\xce\xc7\x4d\xe0\x79\x48\x4b\xb3\x41\x28\x1c\x86\
\x8b\x17\x2f\x40\x6e\x76\x9e\x55\x8a\x48\x0d\x4f\x15\xc0\xae\x5d\
\xf5\xc5\x43\x43\x83\x55\x51\x31\x0a\x3e\x9f\x0f\x6e\xde\xbc\xc9\
\xa2\xaf\x3b\x2f\xf0\x42\x2c\x0b\xe8\x3c\x1f\xbf\x96\x99\x95\x09\
\x37\xda\xdb\x21\x37\x37\x07\xcc\x46\xd3\xfa\x4d\x9b\x9e\x8c\x22\
\x3d\x32\x80\x3d\x7b\xde\x2e\x51\x40\xe8\xc8\xca\xca\x12\x42\xa1\
\x20\xb8\xfb\xdc\x80\xd5\xc9\x2c\xe6\x74\x32\x88\x58\x06\x78\x34\
\xa3\xc1\x00\x94\xad\xbe\x3b\x77\x20\x3d\x23\x03\xc0\xa0\x6c\xf8\
\xc1\x01\xec\xd9\xf3\x4e\x99\x02\x7c\x9b\xa2\x28\x8e\x82\xfc\x02\
\x10\xa3\x11\xe8\xbf\xdb\x9f\xa0\x0e\x9f\xa0\x0f\xcf\xc0\xf0\xc9\
\x86\x20\x2c\x56\x0b\xf4\xf6\xf4\x80\xd3\xe1\xb4\x60\x31\xef\x4a\
\x1e\x9b\xab\xaf\x17\x9e\x08\x80\x77\x76\xef\x5c\xf7\xde\xee\x86\
\xf6\x77\x77\xef\xd2\x74\x7b\xef\xb7\x0d\xea\xfb\xbf\x7b\xfb\x52\
\x44\x96\x5b\xf2\xf3\x0a\x72\xbd\xb3\x5e\x2e\x3b\x3b\x07\x22\xd1\
\x28\x8c\x8e\x8c\x42\x06\x46\x34\x41\x9f\x78\x01\xf3\x7a\x16\xd0\
\x78\x66\x08\x00\x27\xb7\x11\xbc\xdf\xe9\x74\x82\xc6\xa9\xbf\x5a\
\xbb\x76\xad\x59\x7f\x6e\x51\xa0\x6c\xdd\x91\xc6\x46\xee\xb1\x00\
\x34\x34\x6c\x2d\x05\x95\x6b\x5a\x5a\x56\xb1\x7a\xf5\x73\x6b\x80\
\xcc\x68\xa0\xa8\xf5\x71\xe3\xe3\xe3\x2f\x66\x3a\xb2\x5c\x69\x69\
\x69\x5c\x61\x7e\x11\x68\x9a\x06\x13\x13\x63\x20\x4a\x22\xd3\xfd\
\xe4\xe2\xd5\x29\xa4\x47\x3e\x99\x46\xaa\xa6\xc0\xac\xd7\x0b\x06\
\xce\xa4\x18\xad\xdc\x2f\x12\x8e\xf0\x86\x65\x7f\xfc\xf8\x86\xeb\
\xb1\x00\x88\x8a\xf2\x8f\x8a\xf2\x0a\x8b\xcd\x96\x06\xd3\xd3\x53\
\xd0\xd1\x71\x0b\xae\x5d\x6b\x01\x49\x96\xc0\x6e\xcf\x80\xaa\xca\
\x15\x10\x0a\x85\x31\xe2\x76\x90\x44\x11\x46\xef\xdf\x07\xc7\xbc\
\xe8\x27\xf1\x3e\xee\x7c\xe2\x18\x37\xb3\xc9\x04\xdf\x7c\x33\x06\
\xb6\x34\xab\x09\xb1\x2c\x4a\x50\x48\x53\x4d\x0a\x18\x57\xa5\x0c\
\xa0\xfa\xb9\xea\xea\xa2\xc2\xc5\xcf\x97\x95\x56\x08\x66\xb3\x19\
\x6c\x18\xd5\x2f\xbf\xba\x09\xc8\x77\xc8\xca\x72\xc2\xf2\x65\x55\
\x8c\x0a\xe1\x70\x08\x9d\x30\x83\x8c\x6d\x43\x18\xc1\x10\x2d\xbe\
\x0d\x40\x97\x4f\x81\x9f\xef\x3c\x99\x01\xb3\x40\xc1\xb1\x59\x6d\
\x46\x59\x56\x0a\xf4\xe7\x6b\x1a\x37\x86\x7f\x5e\x4d\x09\x00\xaa\
\x83\x10\xf4\xf9\x8f\x63\x8a\x79\x72\x98\xd2\x3f\x3d\xed\xc1\x09\
\x2a\xc8\x6e\x32\x99\x2c\x90\x97\xe7\xc2\xfb\x00\x7c\x7e\x3f\x98\
\xcd\x16\x90\x65\x19\x02\xf8\x7e\x77\x57\x0f\x5c\xb8\x70\x11\x5a\
\xaf\xb7\xc1\xd4\xe4\xe4\x3c\xf9\x64\x96\xe4\x3c\x47\xa0\x10\x80\
\x3f\xe0\xc3\x31\xcc\x1c\x06\xe1\x99\xb9\x0c\xc0\x7d\x7c\x40\x6d\
\x6a\x19\x10\x94\x0d\xb9\x79\x39\x8b\xed\x76\x07\x3a\x3e\x09\x7e\
\xbf\x0f\xdc\xee\x1e\x16\x7d\x8a\xb0\x2b\x2f\x1f\x83\x03\x40\xcd\
\x9a\xc7\x33\x43\x0f\x07\x19\xb9\xef\xee\xef\x67\xf2\xb9\xe6\xe7\
\x6b\x60\x69\x79\x29\xf4\xf6\xf6\x61\x4b\x71\xf7\x21\x8e\x73\xcc\
\x78\x8c\x00\x81\x8b\x44\x45\x30\x22\x95\x70\xfc\x24\x00\x9c\x17\
\x0f\xce\x94\x00\xa8\xb2\xf6\x3e\xd2\xc3\x60\xc0\x07\xfb\xfd\x01\
\x16\xf9\xb1\xf1\x71\x7c\x80\x8a\xad\x80\x01\x67\xcf\x6c\x26\x99\
\xa1\x50\x88\x81\x33\x21\x85\x24\xcc\x80\x8c\x2a\x54\x5c\xbc\x18\
\xec\x19\xe9\x50\x56\x56\x06\x1b\x36\xbe\x82\xfc\x1e\xc7\x20\x4c\
\x3f\x48\x1f\x6e\xee\x5c\x96\x24\x30\x51\x41\xab\x4a\xa2\x06\x78\
\x0d\x52\x07\x00\x1c\xac\x42\x79\xc0\x5f\x0d\x02\x01\x3f\x03\x40\
\x40\xa8\x3d\x20\x3a\x91\xc3\x51\x74\x96\x66\xde\x00\xf6\x3d\x46\
\xec\x6f\x24\x59\x44\x29\xe4\x58\x91\xd2\x04\xc5\x0a\x14\x33\xb3\
\xbc\xb2\x02\xfa\xfb\xee\x3c\xe0\x3c\xcb\x02\xc7\x31\x1a\x61\x13\
\x08\x9c\x81\xc7\xf1\xd5\x84\xea\xa8\x66\xf0\xa4\x0c\x00\x07\xce\
\x53\x50\x12\x54\x8c\xb8\x0f\x23\x4c\x4e\x52\x83\x46\x14\x52\x11\
\x98\xc1\x60\x84\x20\xce\xba\x04\x82\x7a\x1a\x6a\xd8\xa2\x51\x09\
\x14\x54\x27\x2c\x44\x76\xdd\xef\xf3\x33\xd0\x36\xec\x7b\xf4\xda\
\x21\xca\xc4\x9c\xe6\x12\x20\xe8\x1a\x2b\x5a\x45\xa3\xb1\x33\x75\
\x47\x06\x8f\xef\x0b\x62\x20\xcd\x8d\x8f\x38\x17\x24\x54\x48\x12\
\x25\x88\x44\x22\x10\x0c\x04\x99\x51\xf4\x89\x42\x92\x1c\x65\x5a\
\x1f\x09\x47\xd8\xfb\x2a\x82\xa2\x7a\xd0\xd8\xfb\xe8\x3c\x5e\x0b\
\x04\x02\xe0\x9d\x9d\x45\x7d\x9f\x45\xa0\x21\xd0\xf0\x47\x2f\x5a\
\x76\xd4\x81\x24\x19\x8d\x8f\x19\x48\x38\x52\x5d\xfd\x81\x81\x3e\
\xb8\x65\xcb\x16\xed\x91\x01\xe0\xa4\x34\x11\x46\x07\x7d\xbe\x59\
\x2c\xb0\x08\xcb\x00\x3d\x98\x39\x18\x16\xd1\x31\x0f\xa3\x4f\x28\
\x88\xd1\xc7\x6c\xd0\x39\x27\xf0\x8c\x7b\xa4\x44\x3e\x8c\xbe\x17\
\x27\x27\xb2\x89\xb1\x09\x48\x4f\x4b\x43\xb5\x11\xe6\x32\x90\x64\
\x24\x65\x24\xb5\xe4\xbc\xaa\xc8\x1e\xdd\x91\xe9\x62\x47\x26\x0e\
\x37\x9b\x5a\x0d\x68\xf0\x65\x18\x69\x30\x8b\x9d\x25\xcb\x42\x90\
\xb2\x19\x03\x40\x94\x99\x41\xc7\x28\x43\x14\x69\x56\x0f\x78\x0f\
\x47\xf9\xc6\x15\x57\x00\x65\x75\x16\xa3\xef\xc5\xe8\xcf\xcc\x78\
\x60\x02\xa5\xb4\xa6\xb6\x06\xfd\x7c\x48\xe4\xe9\x07\x31\x10\x25\
\x69\x22\x54\x54\x6d\x5c\x77\xc4\x28\xf0\x99\x98\x59\x4f\x4a\x00\
\x78\x03\xf7\x77\x31\x12\x96\xa7\x67\xa6\x91\xd7\x0a\x03\x10\x8b\
\x92\xc2\x94\x67\x64\x74\x98\xb5\x0e\x81\xa0\x9f\xf1\x9f\x40\x12\
\x3d\xec\xe9\x0e\x16\xfd\x59\xcf\x2c\x4c\x4d\x4d\x61\x6b\x31\x01\
\xa5\xa5\x4b\xa0\xb0\xb0\x20\xc9\x69\x88\x47\x1e\x12\x20\x68\xd1\
\x1f\xc6\x2c\xe2\xf8\xa3\x09\x47\x14\x2e\x13\x6f\xf1\xa6\x96\x01\
\x45\x38\xe3\x0f\x04\x46\xb0\xc7\x67\x0f\x62\x4a\x63\x32\xb0\x1a\
\x20\x67\x87\x06\x07\x41\xc4\x82\x25\x30\xd8\xb3\xb0\xd9\x98\xc7\
\x82\x5c\xb9\xaa\x16\x72\xb0\xa9\xa3\x1a\xa1\xf9\xe2\x85\x17\x9e\
\x87\xd5\x3f\x5b\x9d\x88\x34\xc4\x29\xc3\x4e\x21\x06\x82\x8c\xd4\
\x2a\x82\x99\xc5\x22\x1e\xd4\x1d\x51\x38\x99\x0a\x3a\xb5\x0c\x60\
\xe5\xab\x21\x7f\x64\x33\x36\x6c\x0a\xa9\x0e\x15\xac\xaa\xc4\x6a\
\x89\x26\xaf\x60\x30\x04\xf7\x06\xee\x31\x5a\xc4\x32\x11\x62\xaa\
\x42\x2d\xc5\x8b\xeb\xd7\xc1\xd6\x6d\x6f\xc2\xeb\xaf\xbf\x06\x25\
\xcf\xc4\xe7\xa5\x24\x1d\xe1\x40\x07\xa2\xff\xcf\x81\x15\xdb\x6a\
\xac\x39\xd4\x01\x75\x30\x69\x22\xab\xd0\x38\xad\x25\xd5\x5e\x88\
\x47\x10\x3d\xa8\x20\xad\x83\x43\xf7\x14\xec\x38\x99\x6c\x12\xdf\
\x29\x0b\x21\xa4\x54\x67\xd7\xed\x98\x5e\x13\x20\xac\x05\xa2\x18\
\xf9\xa5\x21\x60\x4d\x53\x63\x45\x89\x46\x00\xb5\xef\xd0\x11\x2e\
\x8e\xc2\x8e\xcd\x20\xae\xe4\x24\x55\x16\xc7\xe6\xde\xe4\xb2\x40\
\xe6\xcf\x3c\x32\x80\x23\x47\x8e\xc4\x13\x0b\xdc\xfd\xb1\xaf\x77\
\x0f\x0c\x0c\x44\x49\x59\x8a\x0a\x17\x41\xe9\x92\x52\x28\xcc\x2f\
\x64\xad\x01\xcd\x0b\x1d\xb7\x6f\x31\x75\x19\x9f\x9c\x00\x8b\xd9\
\xca\x26\x34\x95\x9c\x47\x90\xb1\x39\x43\x61\x60\xe6\x40\x90\xa0\
\xb2\xc3\xbc\x97\xdd\x6e\xa7\xa2\x17\x45\x55\xbe\x9f\x04\xcf\x17\
\x54\xe4\xae\x54\x32\x90\x00\x70\xf6\xc4\xd9\xc1\xa9\x89\x89\x2d\
\x6e\x77\xef\xad\x1b\xed\x6d\x40\x76\x6f\xe8\x1e\x71\x5f\x0b\x04\
\x82\xd7\x47\x47\x47\xa7\xc6\xbe\x1e\x57\xfb\xdc\xbd\x4c\x49\x88\
\xcb\x24\x95\x4a\x7c\x4e\x88\x81\x50\xe7\x83\xa0\x23\x21\xd0\x62\
\x70\x28\x00\x26\xec\x83\x70\x4d\x20\xf4\xdc\xee\x6f\x8d\x07\x10\
\x34\x41\xb8\x32\x73\xfe\x2f\xa1\xc7\x5e\x91\x9d\x3c\x79\xe6\xdf\
\x9f\x1e\x6c\xfc\xe5\xa1\x7f\x1d\xce\x45\xcb\x47\x2b\x42\x2b\x69\
\xfc\xf4\xb3\xad\xdd\x9d\x9d\xf5\x5d\x3d\xdd\x1e\xa4\x8d\x36\x31\
\x39\x0e\x69\xb6\x74\xec\x95\x4c\x89\x0c\x10\xdd\xe8\x5c\x8d\xd3\
\x8a\x81\x88\x67\x40\x07\x41\xab\x31\x3f\x4a\xaf\x24\x2b\xe7\xcf\
\x9d\x3b\x17\xad\xab\xab\x63\xb7\x8c\x1c\xff\x73\x7b\xaa\x4b\x4a\
\xed\x7b\x9a\xda\xd1\xd1\x3d\xd8\xdd\xdb\xb3\x19\x1b\x3d\xbf\xbb\
\xaf\x0f\x27\xac\x74\x16\x4d\x3d\x03\x34\x4b\x2b\xaa\x5e\x0f\x5a\
\x3c\x0b\xf3\x33\x90\x93\x93\x43\x0b\x9a\xa8\x28\x46\x0e\x3c\x48\
\xae\x14\x00\xc4\x23\xa0\x7e\x87\x29\x71\x93\xf5\x63\x6f\x67\xef\
\x50\xd7\xed\x8e\x5f\xdf\xee\xf8\x4a\xb1\x58\xac\x4c\xd3\x35\xa6\
\x56\x6a\x02\x04\xa3\x91\x6e\x49\x20\x68\x49\xe9\x70\x64\xc0\xf0\
\xd0\x90\xd6\xd7\x73\xf7\x4c\xfc\x19\x8f\xbf\x2b\x81\x20\xbe\xed\
\xf0\x3c\xa7\x93\x4c\x22\xbb\x75\xab\x7b\x10\x1b\xb7\xe6\x19\x5c\
\x1b\xd0\xf2\xd3\x6a\xb5\xc6\x28\xa4\xc6\x4c\xef\x73\xe6\x80\xc4\
\x40\xe4\xb9\x5c\xb8\x9e\xf0\x50\xbf\x74\xa9\xb9\xb9\x39\xa4\xd3\
\xe7\x89\x6c\xab\xe0\x60\xdf\xcb\x79\xdd\x3c\x5e\xdf\xc1\xce\xdb\
\x9d\xd1\x4c\x6c\x61\x68\x0f\x94\x64\x55\xaf\x03\x56\x0b\x04\x40\
\x8b\xb5\xe8\x54\x0f\x36\x04\x99\x9f\xef\x82\x81\x81\xc1\xb0\x24\
\xa9\x1f\xc7\x83\xf6\x64\xf7\x85\xe2\x83\x3e\xd4\xe1\x24\x13\xc9\
\x2e\x5f\xba\x7c\xf2\xea\xb5\xab\x5e\x3f\xae\x1f\x1c\x19\x4e\xc8\
\xcc\x74\x32\xde\xeb\x4a\x34\x57\x0b\x98\x01\x14\xb9\xe2\x92\x62\
\xf0\xcd\xfa\x60\xe0\x3f\x03\x5e\x9f\xc7\xf7\xc5\xff\x6c\x6b\x91\
\xd2\x4a\xd9\x40\x7b\xa8\xe3\xba\x61\xdf\x13\x08\xf8\xfc\xbf\x3f\
\xd1\xd4\x14\xb6\xd9\x6c\x40\x46\xfc\x56\x93\xe4\x54\xb7\x82\x02\
\x17\x2b\xf6\xb6\xb6\xf6\xb0\x22\xc9\xbb\x91\x4e\xe2\x0f\xb2\x37\
\x4a\x19\x41\x93\xd1\x92\x9d\x4f\x80\x39\x74\xa8\xf1\x8b\xd1\xe1\
\x91\x4b\xad\x6d\xad\x92\x13\xa9\x44\x3b\xd5\x79\xae\x3c\xb6\xb5\
\x4e\x9c\xa7\x2d\x94\xe2\xe2\x62\xb6\x2b\xdd\xd9\xd9\x2d\x4e\x8c\
\x4f\x5e\xf0\xfb\x43\x27\x9f\xca\xf7\x03\xf1\xcc\xe8\x80\xc8\x24\
\x96\x25\x95\xaf\x6b\x6e\xbe\xd2\xd9\xeb\x76\xab\x59\x99\xd9\x48\
\xa5\x2c\x46\x97\x65\xcb\x2a\xa0\x6c\x69\x19\xdb\x76\x74\xbb\xfb\
\x95\x8e\x5b\x1d\x5d\xb8\x06\xad\x5f\x70\x5f\x31\xe1\x1a\x21\x22\
\x40\x60\x7d\x53\xd3\xa9\x73\xc7\x8e\x1e\x0b\x87\xb0\xd1\xa3\x6d\
\x17\x41\x30\xa0\xe2\x78\xe1\xec\x99\xf3\xa1\xb6\xeb\xad\xe7\x05\
\x2e\xb8\x9e\xee\x5d\x90\xdf\x52\x06\x02\x06\x6f\x24\x18\xdd\xd8\
\x77\xb7\xef\xad\x4f\x0e\xfc\xf3\xca\x47\x7f\xfd\xc8\xbf\xff\x6f\
\xfb\xfd\x47\x3f\x3b\x76\x65\x68\x78\x70\x47\x24\x2c\x6d\xa4\x7b\
\x16\xfc\xf7\xc4\x8a\x08\x47\xa3\x11\x69\x9d\x14\x55\x33\xc8\x44\
\x3c\xa7\x6b\x3f\x7d\x53\xff\x13\x80\x05\xfc\xfa\x2f\x25\x47\x49\
\xfb\x85\x84\xe8\xf5\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\
\x82\
\x00\x00\x07\x82\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x30\x00\x00\x00\x30\x08\x06\x00\x00\x00\x57\x02\xf9\x87\
\x00\x00\x07\x49\x49\x44\x41\x54\x78\xda\xed\x58\x03\x98\xa3\x5b\
\x12\x7d\x76\x73\x6d\xdb\xb6\x6d\xdb\xb6\xbd\x63\xf7\xb6\x6d\xdb\
\xd8\x68\xc2\x9e\x49\xdb\x18\x65\xe2\x34\x63\x1b\x7f\x6d\xd5\xff\
\x25\xb3\x79\x7e\xaf\xa7\xbf\xcc\x22\xfd\x7d\xa7\x3b\xba\xf7\x9e\
\x53\x55\xb7\xea\x74\x6e\x02\x80\xff\x6a\xa4\x05\xa4\x05\xa4\x05\
\xa4\x05\x3c\x5e\xa4\x05\xa4\x05\xa4\x05\xe0\xcf\x2d\x88\x7b\x11\
\x39\x88\x0c\xc4\xad\xff\x6d\x02\x92\xc9\x67\x23\x72\x11\x77\xfd\
\x37\x09\x20\xc2\xf7\x89\x65\x82\x19\xa1\x84\xfb\xf3\xdc\xdc\xdc\
\xcc\x24\x41\x37\xef\x8b\x80\xa4\x34\xe7\x3e\x02\x12\x87\xde\x83\
\xb8\xe3\x11\xf6\xb9\x8b\x08\x23\xb2\x12\xeb\x9e\xfe\xf4\xa7\xdf\
\x23\x19\x17\xc2\xea\xda\x52\x70\xfc\xbc\x50\x56\x57\x57\xf7\x6c\
\xca\x46\x7c\xcf\xdb\xf7\x43\x40\x22\xcd\x74\xf0\xdd\x0f\x03\xfa\
\x4c\x66\x52\x19\x64\x25\x0e\xa7\x48\xd2\x67\x92\x84\x66\x25\x89\
\xbd\xbb\xb8\xb8\xf8\x6e\xd9\x79\x31\x30\x0c\x03\x3b\xbb\x5b\x31\
\xe9\x39\x91\x53\x28\xe1\x7d\x32\x9e\x8d\x5c\xda\xfb\x7a\x05\xb0\
\x69\xfe\x47\xc1\x69\x28\xaf\x2c\x79\x48\x14\x95\x14\xb8\x8b\x4b\
\x0a\xe4\x79\xf9\x27\x8f\x1d\x38\x7a\xe0\x9d\x49\x11\xce\x48\x44\
\xf3\x73\x9f\xfb\xdc\x93\x39\xfc\xb1\x1f\x9d\x15\xf3\xda\x45\x12\
\xbe\x46\x24\x15\xc4\x24\x32\x21\x10\xd6\xd6\x57\x22\x81\x60\x00\
\xdc\x1e\x17\xf8\x03\x7e\x98\x9c\x91\x87\x24\xe3\xa2\xda\x1f\xfc\
\xe0\x07\xc9\xa2\x6f\xde\xb3\x00\x4a\x33\x11\xf5\xe0\x01\x36\xbb\
\xe5\x01\xb0\x82\xc3\x69\x07\x8b\xc5\x04\x57\x95\x97\x63\x1d\x5d\
\xad\xfe\xc2\x92\xfc\x96\x6f\x7d\xeb\x5b\x4f\x8e\x93\xcf\x1c\x18\
\xee\xfd\xe4\x59\x11\xc7\xa8\x50\x5e\x09\xd8\x1d\x56\x08\x22\xd9\
\x6b\x08\x05\x81\xc8\x9b\xad\x46\x84\x09\x9c\x2e\x07\x44\x22\x61\
\x50\xa9\xaf\x46\x84\x62\x9e\xb6\xb7\xb7\xe3\xf5\x89\x0a\xd8\xb3\
\x80\x6f\x7c\xe3\x1b\x99\x28\x00\xc9\x9a\xe1\xb2\x72\xfd\x21\xa1\
\x50\x5f\x04\xad\x5e\x85\x62\x6c\xb0\xb0\x34\x1f\x2d\x2d\x2f\xda\
\xad\xaf\xaf\x7a\x13\x57\x30\x3a\x3c\x39\x7d\x3e\xe0\xf1\x7a\xc0\
\xe6\xb0\xc0\xe6\xb6\x1e\x74\x1b\x6a\x30\x6c\xe9\x60\x6b\xc7\x00\
\x3b\xc6\x2d\x30\x9a\x77\x58\xf2\x14\x10\x5a\x4f\x99\xa0\x92\x52\
\xaa\x15\x0c\x66\x4b\x79\xea\xd4\xa9\xa7\x65\x65\x65\xe5\xec\x59\
\xc0\x8f\x7f\xfc\xe3\x2c\x12\xe0\x45\x12\x18\xc1\x38\x6c\xd7\x60\
\xb5\x99\x90\xc8\x26\xa8\xb5\x0a\xb8\xa2\xbc\x08\xbb\xa6\x6d\xd0\
\x68\x55\xd0\xd6\xde\xe4\x5d\xbf\xb8\x1a\xf2\xf9\xbd\x60\xd8\xd4\
\x80\x46\xaf\x64\xc9\xba\x3c\x4e\x36\xfa\x21\x8c\x7e\x28\x1c\xa2\
\x2c\xd0\x3e\x2c\x79\x3a\x23\x12\x89\x80\x52\xa5\x88\x88\xa5\x02\
\x8b\xc9\x62\xca\x77\xb9\x5c\x5f\x2b\x28\x28\x78\xf2\x9e\x05\x7c\
\xe9\x4b\x5f\xca\x7d\xa4\x3b\xd0\xd6\xd1\x12\x95\x9d\x93\x86\xad\
\x36\x2b\x4b\xfe\x92\x62\x0d\xe4\x13\xe3\x20\x3b\x27\x06\x9f\xdf\
\x07\x6a\x9d\x02\x36\x30\xe2\xfe\x80\x0f\x4c\x16\x23\xb3\xbc\xba\
\x10\x16\x88\x38\x0c\x87\x3f\x02\x08\xcc\xd8\x5c\x34\x1c\x0e\xb3\
\xa5\x44\xc0\x4b\x1d\x9e\x9d\x9f\x9a\x41\x21\xc7\x01\xe0\x08\xbe\
\xf7\x9d\xc5\xc5\xc5\xdb\xf7\xdc\x85\x3e\xf0\x81\x0f\x3c\x65\x67\
\x67\xe7\xc3\xb1\x58\xec\xef\xb4\xe1\x03\x81\x51\x2a\x9c\x9c\x9c\
\xe8\xab\xaa\x29\x73\xab\x35\xaa\xd8\xda\xfa\x2a\xfc\x93\x3b\x0c\
\x3e\x9f\x17\xf4\x18\x79\xc3\xa6\x96\x25\x36\x35\x2b\x0f\xe3\xe5\
\xd5\xe9\xf4\xba\xa6\x50\x28\x74\x32\xb1\x7e\x8c\x3b\x04\xf4\xb3\
\xbd\xb3\x15\xe3\xf0\x47\x7d\x5a\xad\xb2\x95\x5e\xc7\xf3\x8e\xa0\
\x88\x0f\x00\xc0\x2d\xd7\x35\x07\xde\xf1\x8e\x77\x64\x5c\xba\x74\
\xe9\x5d\xb8\xe1\x1f\x49\xc4\x43\x01\x00\x0e\x78\x3c\x9e\x53\x55\
\x35\xe5\xbe\xc9\x29\x39\xac\xac\x2d\xb2\x77\x46\x6b\x50\x41\x80\
\x3a\xcb\xf4\xf9\xf0\xc2\xe2\xac\x30\x49\xf8\x21\x5c\x77\x00\xff\
\xfe\x7d\x68\xa4\x0f\x66\xe7\xa6\xc2\x62\x99\x50\xe5\xf5\x7a\xf3\
\xe2\xe4\x7f\x17\x08\x04\x9e\x93\x92\x49\xac\x50\x28\x32\x30\x43\
\xef\xf2\xf9\x7c\x3f\x13\x49\x84\xc3\xbd\x7d\x5d\x31\xec\xe9\xb0\
\xbd\xbb\x81\x35\xbf\x0d\x9b\x5b\x06\x10\x49\xcf\x5e\x8c\x13\xff\
\x1b\x46\xf5\x6d\xf8\xf7\x76\xa3\xd1\xf8\x49\xb3\xd9\xfc\x31\x0e\
\x6f\x54\xbd\xb8\xbc\x30\x9a\x24\xee\x8b\x3a\x9d\xee\xae\x94\x5a\
\x89\x8f\x7d\xec\x63\x77\x9e\x39\x73\xe6\xf9\x33\x33\x53\xbf\xee\
\xec\x6e\x63\xbc\xf1\xf2\x71\xba\x1d\x18\x7d\x39\xa3\x54\x5f\xa5\
\xb2\x38\xec\xf7\xfb\xdf\x9a\x58\xf3\x89\x4f\x7c\x22\xe7\xa7\x3f\
\xfd\xe9\x33\x6c\x36\xdb\x47\xe2\x59\xfc\x3b\xd6\xfb\x6b\x6f\x88\
\x17\xa2\x41\x93\x91\x91\xf1\x84\x41\xce\xe0\x4b\xfb\x06\xba\xd9\
\x81\x44\xad\x92\xba\x4d\xdf\x40\x17\x20\xc9\x7c\x24\xf9\x27\x3e\
\x9f\x7f\x67\xd2\x9a\xdb\x5e\xf9\xca\x57\x3e\xe5\xca\x95\x2b\xef\
\xc1\xac\xfc\xc1\xe9\x74\x3e\xff\x86\xb9\x51\xf2\x40\x88\x9c\xa6\
\xb6\xa6\xb7\x89\xa5\x42\xc6\xe3\x75\xb3\x7d\x9e\x5a\x64\x7b\x57\
\x0b\x33\x3f\x3f\xdb\xe2\x76\xbb\xbf\x4b\x2d\x39\x69\x4d\x46\x66\
\x66\x66\x6e\x51\x51\xde\xdb\xf1\x8e\xbd\xa5\xb1\xb1\xf1\x85\x6f\
\x7b\xdb\xdb\xee\x4e\xb9\x00\xf2\xf1\x09\x2f\xd4\xdc\xda\xf0\x33\
\xb4\x06\x31\xa7\xcb\x4e\x2d\x93\x9d\xd2\x7d\xfd\xdd\x4c\x6b\x7b\
\xf3\x45\xab\xd5\xfa\xf9\x4f\x7f\xfa\xd3\x4f\x89\x7b\xa3\x9b\x13\
\x5e\x07\x5b\x34\x4d\xef\x5f\xbd\xe1\x0d\x6f\x48\x58\x90\x94\xbb\
\x51\x42\x36\xd9\x87\xf6\xce\x66\xab\xdf\x4f\xfd\x7e\x97\xb5\x05\
\x1a\x8d\x0a\x04\x67\x79\xd8\x56\x47\x23\x03\x43\x7d\x3d\x7f\xfa\
\xd3\x9f\x9e\x4a\x99\x4a\x76\xa3\x25\x65\x45\xc0\xe3\x8f\x05\x71\
\x7a\xcb\x8e\x1e\xfd\xcb\xb3\xe9\xf5\x94\xba\xd1\x03\x07\x0e\x3c\
\xa7\xb2\xa6\xec\xcb\x18\xfd\xed\x8d\x4d\x43\x84\xac\x00\x09\x08\
\xe3\x94\xa5\xfb\x30\x35\x2d\x67\x27\xec\x18\x67\x24\x82\x83\x6f\
\xa1\xa0\xe0\xcc\x07\x71\xb6\x3c\x81\xd6\x1e\x3d\x7a\xf4\x9e\x8a\
\xaa\x52\xc0\xfb\x81\xfe\x47\x19\x2b\x2b\x2f\x76\x16\x14\xe7\x7d\
\x6a\xdf\xdd\x68\x5d\x43\x95\xb1\xb9\xa5\x01\x1e\x02\x48\xb2\x27\
\x34\x39\x3d\xe1\x23\x92\x54\xfb\x64\x17\x68\x02\x2f\xaf\x2e\x01\
\x97\xc7\x41\x8b\xa1\x64\x5b\x2a\x7b\x2f\xb6\x37\x19\xa1\x58\x10\
\xac\xad\xaf\x8e\x11\x71\xc2\xe8\xd8\x10\x1a\x45\x37\x6b\xe8\x5c\
\x2e\x27\xb4\x77\xb6\x86\x50\x68\x2d\x39\x80\x7d\x73\xa3\x8d\xcd\
\xb5\x10\x8d\x45\x81\x7c\x0d\x91\x23\x5b\x40\x03\x2a\x10\xf4\xb3\
\xaf\xb9\xdc\x4e\xd6\x90\x25\xc8\xdb\xed\x36\x68\x68\xac\x09\x08\
\x84\x82\x28\x91\xdb\x35\x6e\xa2\x9d\xd0\xd3\xfb\xe4\x79\x12\xfb\
\xb0\x20\x61\x34\xad\xc9\x0c\x92\xe5\xa0\xf7\xe6\x16\x66\x23\xc5\
\xa5\x05\xda\x53\xf9\xa7\xf6\xc7\x8d\xd6\x35\x56\xb3\x1b\x2b\xd4\
\x97\x40\xa5\xbd\x42\xc6\x2c\xd9\x55\xb2\xc4\xec\x48\x8c\xcc\xd9\
\x85\xcb\xeb\xd1\xa6\x96\x3a\x17\x5a\x86\x6a\x8c\xe6\xe5\xfe\xc1\
\xde\x08\x45\x96\x88\xef\xee\x6e\xb1\x24\x37\x36\x75\x2c\x69\xfd\
\x86\x86\x85\xce\xa0\x66\x41\x22\x68\x7a\x53\xf9\x2d\x2c\xce\x31\
\x85\x45\x79\xae\xbc\xbc\xbc\xd7\x5c\xb7\x1b\xad\xa9\xab\x64\xeb\
\x34\x39\xf2\x09\x3f\x8f\xbe\x86\x2d\x01\xbd\x41\xc7\x60\xc7\x09\
\x0f\x8f\x0c\x2e\xa1\x0d\x38\x99\xb0\x0c\xf3\x0b\xf3\x1d\x58\x2a\
\x9e\xf9\xc5\xb9\x88\xd9\x6c\x44\x8f\x44\x19\x44\xf8\x59\x50\x06\
\x48\x14\x09\x20\xbb\xcd\x66\x65\x7e\x61\x36\x5a\x5d\x5b\x6e\xd1\
\xea\xb5\xfb\xe3\x46\x2b\xaa\xca\xec\xe8\x73\xe0\x61\x10\xec\xe8\
\x6a\xd3\x73\xf9\x9c\x71\x85\xe2\x72\x43\x9c\xf8\xe1\x68\x34\xfa\
\x79\xf4\x47\x5f\xa6\x01\x86\xa4\x8f\x4b\x65\xe2\xb1\xee\x9e\x8e\
\xd5\xea\x9a\x0a\x3b\x0a\x62\xfe\x7d\x07\x86\xc1\xeb\x73\xb3\xb6\
\x9c\x1a\x00\xee\x15\x19\x1a\xee\x4f\x99\x1b\x7d\x20\x88\xf8\x8f\
\xf0\xef\x33\x13\x13\x5a\x2e\x97\x3f\x0d\x87\xd8\x77\x90\xd0\xdf\
\xe8\xfd\x07\xae\x21\x11\x0c\xc3\xd0\xff\x10\xb1\x8a\xca\x32\xff\
\xf2\xca\x62\x6a\xdd\x28\x00\xfc\x06\x49\x7f\x0d\x0f\x7b\x5f\x30\
\x18\x7c\x11\x00\x3c\xa8\x63\xf4\xf7\xf7\xdf\xaa\x56\xab\x9f\x85\
\xa5\xf5\x3e\xfc\xec\x77\x28\x2b\x09\xff\x43\x73\x80\x2f\xe0\x46\
\x5a\xdb\x9b\x54\x4e\xa7\x7d\xcf\x6e\x34\xe5\x50\x2a\x95\x1f\x44\
\x37\xfa\xd1\xfc\xc2\x33\x21\xb1\x44\x78\xcd\x8d\xa2\xc0\x2f\xed\
\xc5\x8d\xa6\x1c\xef\x7d\xef\x7b\xb3\x7f\xf8\xc3\x1f\x3e\x73\x72\
\x52\xfe\xc3\x24\x37\xfa\xba\xff\xa6\x6f\xe6\x6e\x7e\xed\x6b\x5f\
\x9b\x8d\x99\x78\x37\x46\xfd\x0b\x00\xf0\x84\xff\xcb\x6f\xa7\xd3\
\x02\xd2\x02\xd2\x02\xd2\x02\xd2\x02\xd2\x02\xfe\x05\x1f\xeb\x8f\
\x04\xe7\x41\x85\x61\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\
\x82\
\x00\x00\x09\xce\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x30\x00\x00\x00\x30\x08\x06\x00\x00\x00\x57\x02\xf9\x87\
\x00\x00\x09\x95\x49\x44\x41\x54\x78\xda\xed\x98\x03\x90\x23\xdd\
\xd7\x87\xd7\xf6\x7e\x6b\xdb\xb6\x6d\xdb\xb6\xed\xad\xaf\x16\x63\
\xae\x6d\xe3\x5d\x5b\x19\xcf\xee\x30\xc9\xd8\xf6\x04\x63\x05\xe7\
\x7f\x4e\x57\xa6\x77\x7b\x3a\xc9\xe4\x35\xa7\xea\xa9\x9b\x9b\x49\
\x3a\xcf\xef\xf4\xb9\xdd\x37\x29\x31\x6e\xdc\xb8\xbf\x35\xff\x05\
\xf8\x23\xf8\x2f\xc0\xa3\x47\x8f\x4a\xff\x31\xf3\x9f\x8f\x01\xf2\
\x0f\x76\x3e\x7a\xfc\x50\x7e\xff\xd1\xfd\x79\xbf\xf7\xfc\x37\x0f\
\x00\x00\xa5\xc4\x3e\xe2\xec\xf0\xc8\x30\xf8\xe6\xe6\x1a\xfe\x7b\
\xcf\x7f\xb3\x00\xc6\x96\xc6\x9d\x6d\x4e\x5a\xb9\x58\x59\x9b\x2b\
\x7c\x7c\xc5\x90\x97\x97\x0b\x6f\xde\xbe\x82\xdf\x7b\x6e\x61\x65\
\x9a\x6f\x73\xd2\xf2\x83\x95\x95\x51\x33\x6d\x5e\x89\x3d\x5b\x6c\
\x96\xf4\xe8\xfa\x59\xde\xa3\x87\x87\xb4\x43\x87\x55\xd0\xab\x57\
\x59\xad\x01\x6c\x4f\x58\x8a\x93\x53\x92\xf1\xc0\x79\x70\xe5\xea\
\x25\x08\x8b\x0c\xa1\xf1\x77\x9f\xd3\x5f\x44\x64\x38\xd8\x9c\xb0\
\x7c\x5a\xd4\x29\x76\x60\xd3\x23\xa9\x3d\xba\x40\xd4\x86\x0d\x20\
\x39\x78\x10\xd2\x86\x0f\xcf\x95\x75\xeb\x76\x9e\x27\x6f\x62\x62\
\x52\xfb\xfc\x85\x33\x05\x6a\xb5\x1a\x64\x32\x09\x1c\x39\xf6\xff\
\xf0\xfa\xfd\x0b\xb0\xb1\xb5\x82\xec\xec\xac\x5f\x39\xcf\xd6\x3b\
\xcf\xcd\xcd\x01\xfa\x5c\x4b\x2b\xb3\x3c\xce\x3a\xdc\xb9\xea\xc3\
\xcb\x75\xab\xe1\xcd\xd6\xcd\xe0\x67\x62\x02\xb1\x67\xce\x40\x92\
\x8d\x0d\xc8\x3a\x77\xce\xe5\x05\x30\x37\x3f\xde\xee\xee\xfd\xdb\
\xd9\x4a\xa5\x12\xd2\xd2\x64\xf0\xec\xf9\x33\x58\xb9\x7a\x39\x38\
\x38\xda\xd3\x07\xe8\x9d\x53\x2b\xfc\x8a\x39\x1b\xc0\xf6\xa4\x95\
\xea\xc8\x91\x23\xe5\xc8\xe7\xe2\xb1\x1d\x3f\x9d\x3f\x65\xcd\xfc\
\xff\xc6\x85\x33\x70\xfd\xe8\x61\x10\x19\x19\x81\xbf\xa9\x29\x06\
\xe8\xa4\xe4\xc8\xe3\x5f\xc9\x63\x26\xc7\x06\xbd\x7e\xf3\x32\x4b\
\xa1\x28\xa0\x33\xc0\x54\x31\x3f\x3f\x9f\x41\x2e\x97\x72\xe6\x14\
\x30\x27\x27\x9b\x79\x5c\x50\xc0\x9f\xa7\xa7\xcb\x51\xea\xe7\xcd\
\x55\x2a\x15\x5c\xbe\x7a\x29\x17\x03\x34\xbc\x60\xb4\xf5\x9a\x8d\
\x8d\x19\x44\x47\x47\x03\x15\xd4\xcf\xcf\x0f\x4e\x5b\x9b\xc3\xbd\
\xfd\x7b\xc0\x6f\xde\x5c\x48\xea\xde\x3e\xaf\x68\x80\xd2\x87\x0f\
\x1f\x98\x6d\xef\x60\x97\x43\x07\xc4\x00\x3c\x28\x04\x17\x19\x89\
\xeb\x40\xce\x81\x04\x8b\xc0\x7b\x0d\x89\xde\x7f\x78\x27\xfb\xe2\
\xae\x69\xc6\x97\x8c\x37\x2b\x2f\x9d\x31\x86\xf3\xe7\x6c\xc1\xc9\
\xc9\x11\x6e\xde\xbc\x09\x07\x0f\x1e\x00\x2b\xb3\xe3\x90\xdc\xad\
\xbd\x3a\x70\x62\xb5\x85\x3f\xca\x97\x42\x2a\x6e\xdb\xb1\x65\x97\
\xbb\xc7\xb7\x02\x5a\x60\xdf\xc5\x0d\x91\x26\x21\x7d\xa4\xe9\x41\
\xce\xa2\x50\x28\xe0\xdd\x35\xa3\x7c\xaf\xe5\x65\x73\x33\x9f\xce\
\x03\x95\xdd\x6a\x08\xbe\x33\x1b\xac\xad\x8c\x18\xf9\x8b\x17\x2f\
\x60\x80\x63\x10\x3a\xa1\xda\x22\xf2\xe6\x54\x1f\xa9\xb5\x66\xed\
\xaa\xd3\x7e\xfe\xbe\x6a\xea\x47\x92\x77\xfd\xea\x0c\x02\xbb\xcf\
\x1c\xbc\xbc\x3d\x98\x10\x5f\xbf\xb9\x80\xb7\xd0\x83\x02\x68\x1e\
\x7b\xe2\xe8\x0a\x76\xf6\x5f\x58\xe8\xb9\x8c\x0c\x46\x92\xae\xf5\
\x34\x67\x1f\x6b\x7b\x5d\x5a\x80\x1d\x08\xd7\x54\x83\x42\x79\xc5\
\xb3\x61\x90\x71\xa5\x07\xbc\x38\xbd\x05\x6c\x6d\x2d\xc1\xda\xe2\
\x38\x3c\xdb\x3c\xd5\x9c\xf5\x2e\xec\x7d\xa4\x2c\xd2\x68\xf5\xda\
\x55\x4f\x22\x22\x23\xa8\x97\x75\x54\x5b\x7b\x65\xe9\xc3\xbf\x93\
\xce\x23\x33\x53\x27\xec\x6b\x92\x85\xef\xc1\x67\x7d\xcd\xef\xf2\
\x4f\x87\x32\xf2\x92\x1b\x93\xc1\xce\x7a\x31\x58\x98\x1b\xc1\xed\
\xfd\x33\xf6\x91\xb3\xb6\x00\xe5\x91\x96\x5b\xb6\x6e\x72\x4a\x4c\
\x4c\xa0\xc5\xca\x9c\x01\x27\x67\x7b\xaa\x38\x23\xee\xe6\xfe\x95\
\xad\x98\x50\xe8\xc5\xc8\x62\xbb\xe1\x63\x6f\x46\x80\x1e\xdb\x3b\
\x08\x18\x44\x22\x6f\x94\xcb\x30\x98\x14\xd1\x07\x10\xaf\xab\x81\
\xf2\xf3\x41\x25\x58\xc5\x91\x8f\xb1\x1d\x09\x9f\x36\xb5\xcf\xe3\
\xca\xf3\x03\x54\x44\xda\xed\xdc\xb5\xdd\x4f\x2a\xa5\xab\x4d\x26\
\xaf\xe2\x9c\x4a\xf3\x2b\xcb\x11\xca\xca\xfa\x91\x4c\xbd\xa4\x8a\
\x3f\xea\x90\x9f\xc4\xc8\x0b\x97\x95\xce\xf6\x5a\x52\x62\x24\xb9\
\xea\x0b\x50\x19\xe9\xbc\x6f\xff\x9e\x68\xcd\x81\x0d\x14\x27\x61\
\xfd\xb2\x54\x0c\x5d\x70\xe5\x57\x72\xe5\x4f\x8c\x62\xe4\xdd\x17\
\x95\x18\xa5\x67\x2f\xc4\x5e\x81\xaa\x20\x3d\xf6\x1f\xdc\x27\xa1\
\x2b\x10\x49\x25\x25\xc5\xff\xb0\xd0\x04\x04\xb5\x07\x8b\x50\xe4\
\xc5\x4a\x7b\x78\xba\xe3\x73\x76\x78\xc3\xb1\x03\xb1\x58\x08\x9e\
\x5e\xee\x38\x8a\xa8\x15\xe9\x31\x3d\xcf\x9b\x3b\x3f\x3a\x45\x0b\
\x56\xa7\xbc\xd7\xd2\xd2\xb9\x1f\x66\x97\x98\x40\x05\x36\x24\x40\
\x35\xa4\xef\xbe\x83\x7b\xe9\x2e\x8c\x55\xce\x60\xab\xae\xab\xe2\
\xdc\x0a\x67\xf1\xa0\x0b\x81\x2e\x24\x3e\x9f\x80\x59\xb0\xcf\x16\
\x70\xe5\xaf\x4f\x64\xe4\x3d\x97\x95\x57\x9b\x0c\x29\xb1\x86\x0a\
\x4b\x7e\x86\x04\xa8\x81\x0c\xdc\xbd\x77\xb7\x12\x40\xcd\x88\xc7\
\xc4\x44\x62\xd5\xd9\x6a\x93\x38\x56\xda\x8d\x2a\x4c\xe2\x85\x95\
\x64\xab\x4b\x73\x1f\x1f\x51\xa1\x24\xce\x3d\x68\x4e\x5b\x04\x0e\
\x52\xdf\xcf\x7a\xe5\xc5\x6b\xaa\x82\xc5\x96\x19\xe9\xe8\xd3\x1b\
\xa9\x6a\x48\x80\xd2\x48\xcd\x0a\x15\x2a\x8c\xda\x7f\x68\xbf\x1a\
\xff\x8a\xf4\x3a\xbf\xbf\xf5\x54\x9a\x27\x6c\xb8\xfc\x68\x46\x3e\
\xcd\xf7\x13\x6c\xdb\xb5\x3d\x0b\x9d\x06\x20\xd5\x0d\x0d\x50\xbb\
\x69\xd3\xa6\x73\x8c\x4d\x8c\x94\x6a\xb5\x4a\xa7\x3c\xb5\x8b\x61\
\xe2\xb9\xb4\x59\xe3\x20\xf3\xfb\xf2\x5d\xfe\xcb\x0a\x8d\x7c\x77\
\x94\x9f\xc0\xca\x4b\x84\x6f\x81\x0a\xb8\x65\xfb\xd6\x02\x74\x1a\
\x42\x9d\x61\x68\x80\xff\xeb\xd4\xa5\xcb\xda\x93\xa7\x4e\x29\x54\
\x2a\x25\x23\x1f\x1e\x1e\x0a\xcc\x35\x5d\xec\xcd\xb6\x8c\xa6\x55\
\x68\x77\x48\xb0\x2d\x42\xf7\x0a\x5f\xcd\x97\x13\x2e\x79\xb8\x59\
\xcb\x43\x79\x01\xf8\x6e\xa8\xa5\x57\x5e\x2a\x7a\xc7\xbc\x07\x00\
\x30\xc0\x16\x28\x53\xa6\xcc\x28\xda\x1d\x90\x9f\x21\x01\xea\xf6\
\xe9\xd7\x67\xff\x8d\x9b\x37\x14\x4a\xa5\x82\x5f\x79\xb6\xea\x04\
\xbf\xe2\xda\xa4\x09\xbe\xfc\x72\xad\xf2\x32\xf1\x7b\xf6\xf5\x74\
\x06\x0e\x1c\x3a\xa8\xaa\x53\xa7\xce\x6c\x2a\xac\x21\x01\xca\x20\
\x0d\x06\x0f\x1d\x6c\xf6\xec\xc5\x33\xa5\x42\xc1\x04\xd0\xdb\xef\
\x5c\x79\xbe\x38\xbb\xe5\xf6\x79\x03\x7e\x9b\xeb\x6a\x91\x1f\x0f\
\x31\x27\xc7\x80\x78\x6d\x35\x94\xff\x40\xdb\x69\xfa\x82\x43\x9f\
\x8b\x8f\x0b\xc0\xd4\xdc\x4c\xd1\xba\x6d\xeb\xd5\xe8\x55\xc7\x90\
\x00\x65\x91\xc6\xc3\x47\x8d\xb8\x20\xb0\x17\xa8\xe8\x00\x54\x7d\
\x1d\x3d\xaf\xad\xcf\x49\x9a\x23\x4f\x42\x44\xc2\xd3\xd5\x90\xe9\
\x68\x4c\x0b\x56\xab\x7c\x82\xdb\x73\x48\x48\x48\x80\xd8\xd8\x58\
\x1c\xe3\x41\x2a\x95\x30\x85\x38\x77\xe1\x9c\xa2\x6b\xd7\xae\x7b\
\xd0\xab\x1e\x15\x58\x67\x00\xcd\x5d\xb8\x1c\xd2\x7c\xcc\xd8\x31\
\xf7\xbd\x85\xde\x6a\xfa\x60\x6e\xf5\xf5\x57\x5e\x9b\x38\x15\x21\
\x3b\x5e\x04\x49\xaf\x36\x02\xfd\xa9\x33\x63\x20\xf3\xfe\x78\x56\
\x5e\x84\x6d\xe3\xf5\xfc\x02\xae\x21\x1f\x08\x0c\x0c\x84\x88\x88\
\x70\x88\x8f\x8f\x87\xd4\xd4\x54\xa6\x30\xf7\x1e\xdc\x57\xf4\xea\
\xd3\xcb\x04\xbd\x1a\x1a\x12\xa0\x02\xd2\x7a\xc2\xe4\x49\xef\x43\
\x42\x43\x48\x86\x39\x95\xb4\x39\x13\x89\x85\x9a\xbb\xa7\x07\xb3\
\x68\x1d\x9d\xec\x39\x8b\xd5\x5b\xe8\x85\x1b\x3e\x07\xc4\x91\xc1\
\xdf\xdf\x8f\xe4\x99\x85\x1f\xf6\x72\x1f\xc8\xdd\x2e\x81\x22\x33\
\x11\x72\x62\x9c\x40\x6e\x7f\x8c\x91\xf7\x5a\x59\x09\x9e\x9d\x3a\
\x08\xae\xae\xae\xf0\xf0\xd1\x03\xb8\x77\xff\x1e\xfc\xf4\xe4\x31\
\x73\xcf\x49\x4a\x4a\xc4\xc2\x65\xc1\xcb\x57\xaf\x94\xbd\xfb\xf7\
\x3f\x8f\x5e\x2d\x34\x7e\xa5\xc9\x55\x57\x80\x4a\x48\x87\x99\x73\
\x66\xbb\xc5\xc6\xc5\x92\x58\x31\xd5\xe7\xf5\x3c\xa7\xf2\xf4\x75\
\x54\x9a\x92\x00\x81\x8f\xd7\x42\x76\x94\x1d\xa4\x8b\x6f\x40\xca\
\xfb\x9d\x10\xf7\x64\x05\xf8\xef\x6f\x05\x5f\x6e\x98\x81\x40\x20\
\x00\x67\x67\x27\x70\x73\x73\x03\xa1\x50\x08\x21\x21\xc1\x40\xbb\
\x60\x89\x44\xc2\x1c\xff\x8b\x40\xa0\xee\x3f\x78\xe0\x7d\xda\x60\
\x22\x55\x35\x5d\x52\xa6\x68\x90\xc2\xbb\x70\x65\xa4\xeb\xbc\x45\
\x0b\x83\xe8\xe7\x14\x92\xd4\x17\x80\xdf\x3a\x5c\x79\x1a\xad\x8e\
\x6c\x85\xca\x15\xcb\x82\xef\xcd\xa5\x90\xf0\x7a\x23\x84\x3e\x58\
\x07\x1e\xb7\x76\xc0\xe7\xc7\x17\xe1\xfd\xfb\x77\xe0\xe2\xe2\x8c\
\x67\xcb\x1f\x62\x62\x62\xb0\x6d\x52\x80\x76\xc0\x32\x99\x8c\xc0\
\xcf\xca\xc1\xb3\xef\x0e\x83\x86\x0e\x7e\x8b\x5e\x5d\x34\x37\xb3\
\xf2\x6c\x00\x74\x2e\x1a\x80\x12\xf6\x5a\xb8\x74\x71\xbc\x5c\x2e\
\x67\x64\x69\x17\xea\xec\xe2\xa8\xd9\x2a\xd8\x83\xd8\x47\xc8\x5e\
\xef\x1d\x9d\x1c\x08\x6a\x1d\x02\x45\x7c\x29\x00\x05\xa1\x6f\x56\
\xf8\x3e\x27\x18\x3f\x7a\x28\xac\x98\x33\x0a\x5e\xdc\x3b\x07\xee\
\x6e\xdf\x98\x2f\xe4\x51\x51\x51\xd8\x22\x49\x78\xec\x74\xba\xe2\
\x50\xab\x60\xab\x66\x6a\xa0\xbd\x17\x91\xce\x04\xf0\xc5\xd7\x8f\
\x1e\x37\xd6\x15\xbd\xba\x69\x02\x94\xe5\xca\x73\x03\x54\x43\xfa\
\x2d\x5a\xba\x44\x4e\x07\xc3\x83\x23\x86\x9f\x01\x6e\xfb\x10\x0a\
\x50\x2a\x09\x25\xfd\xca\x40\x23\x41\xcf\xb3\x23\xbd\x16\x03\xd3\
\xfb\xf1\x58\x44\x2e\x1e\x9b\xc8\x61\x9e\x0f\x0b\x0f\x87\xc9\xd3\
\xa7\xf9\xd2\x0e\x59\xdf\x7e\xe8\xc7\x8d\xdc\xe0\xf9\x8b\x16\xe5\
\xd2\x01\x48\xfa\xb7\x0b\x40\xa8\x40\xad\x26\xd4\x3c\xf0\x7f\x1a\
\x98\x90\xec\xfb\xe2\xf0\x8a\x34\x63\xee\x9c\xc8\xe2\x36\x74\xec\
\x97\x79\x64\xc4\xec\xf9\xf3\x94\x24\x41\xfd\xff\x2b\xd6\x00\x27\
\x00\xc1\x4a\x72\x82\xa8\x38\xf2\x45\xcf\x12\x5d\x4e\x67\xcd\x9b\
\x9b\x82\x5e\xfd\xf5\x6d\xe8\xd8\x8d\x5c\xf9\xf2\xe5\x27\xcd\x5f\
\xbc\x48\x4d\x07\x20\x79\x83\xaf\x42\x9c\x00\xfa\x42\x70\x83\xf0\
\xe5\x09\x05\xf3\x3e\x7a\x7f\x5a\x5a\x1a\xcc\x98\x33\x27\x93\xb3\
\xa1\xd3\x13\xa0\x4e\xf5\xea\xd5\xe7\xae\x5e\xbf\x4e\x29\x95\x4b\
\xc0\x37\x50\xc8\xc3\x2f\x48\x84\x88\xc1\x3f\x58\x0c\x01\x44\x08\
\x8d\x3e\x38\xfa\x42\x20\x11\xea\x07\x41\x44\x98\x3f\x04\x13\xe1\
\x01\x10\x42\x44\x04\x42\x68\x44\x10\x84\x46\x06\x41\x58\x64\x30\
\x84\x47\x11\x21\x10\x11\x1d\xca\x10\x19\x1d\x06\x91\x31\x61\x10\
\x15\x13\x0e\x51\xb1\xe1\x10\x8d\xc4\xc4\x45\x81\x0c\x3d\xa6\xce\
\x9a\x95\x8b\x6e\x23\xf5\x6d\xe8\x0a\xf7\x41\x75\xab\x55\xab\xb6\
\x60\xe5\x9a\x35\xca\x54\x69\x8a\xce\x00\xfe\x6c\x00\x1a\x0d\x09\
\x10\x68\x78\x00\x22\x96\x46\x0c\x11\x17\x09\x52\x79\x2a\x2e\xe2\
\xe9\xf9\xe8\x36\x86\x3a\xa4\xd8\x00\x78\x06\xe6\x2c\x5d\xb5\x52\
\x29\xc1\x00\xa9\xd2\x64\x60\x90\x24\x43\x0a\x42\x23\xcd\xb5\x93\
\x02\x12\x84\x19\x09\x19\x91\xca\x41\x4a\xc8\x09\x09\xc8\x70\x94\
\xd1\x98\x26\xa5\x11\x91\xd2\x63\x16\xb9\x66\xa4\xf7\x8d\x9f\x32\
\xb5\x80\xd6\x66\x71\x67\xa0\x34\xf5\x58\xad\x3a\x75\xc6\xf7\x1b\
\x38\x10\xfe\x4a\xf4\x1f\x34\x48\x8d\x6e\xdd\x91\x6a\x7a\xd6\x00\
\xfb\xa3\x56\x03\xa4\x17\x42\xcf\x4e\x43\x66\xfc\x49\x4c\x47\xa6\
\x22\xa3\x35\x37\xb1\xba\x48\x39\xf2\x2c\xee\x4b\x3d\xbd\xa8\x0a\
\x52\x93\x7a\xee\x2f\x40\x0d\xcd\x16\xa7\x6c\x71\x3f\xab\xfc\x8d\
\xf9\x2f\xc0\x7f\x01\xfe\x0b\xf0\x3f\xe9\x65\x26\x7d\x57\x89\xd5\
\x05\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x03\x14\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x30\x00\x00\x00\x30\x08\x03\x00\x00\x00\x60\xdc\x09\xb5\
\x00\x00\x01\x29\x50\x4c\x54\x45\xff\xff\xff\x00\x00\x00\x24\x24\
\x24\x00\x00\x00\x00\x00\x00\x2e\x2e\x2e\x3b\x3b\x3b\x00\x00\x00\
\x1e\x1e\x1e\x00\x00\x00\x2b\x2b\x2b\x00\x00\x00\x24\x24\x24\x2e\
\x2e\x2e\xd8\xd8\xd8\xd9\xd9\xd9\xbf\xbf\xbf\xf9\xf9\xf9\xd9\xd9\
\xd9\xbc\xbc\xbc\xbe\xbe\xbe\xe0\xe0\xe0\xde\xde\xde\xe6\xe6\xe6\
\xdf\xdf\xdf\xe0\xe0\xe0\xe0\xe0\xe0\xe1\xe1\xe1\xff\xff\xff\xfd\
\xfd\xfd\xff\xff\xff\xff\xff\xff\xff\xff\xff\xbb\xbe\xb7\xbc\xbf\
\xb8\xbc\xbf\xb9\xbe\xc0\xb9\x98\x9a\x96\x9a\x9b\x97\xa3\xa4\xa0\
\x89\x8b\x86\x8c\x8e\x88\x8e\x90\x8b\x90\x92\x8d\x92\x95\x8f\x95\
\x97\x91\x97\x99\x94\x99\x9c\x96\x9c\x9e\x98\x9e\xa0\x9b\xa0\xa3\
\x9d\xa3\xa5\x9f\xa5\xa7\xa1\xa7\xaa\xa4\xaa\xac\xa6\xac\xaf\xa8\
\xae\xb1\xaa\xb1\xb3\xad\xb3\xb6\xaf\xb5\xb8\xb1\xb7\xba\xb4\xba\
\xbd\xb6\xd4\xd8\xd0\xd4\xd8\xd1\xd6\xda\xd2\xd7\xda\xd3\xd8\xdc\
\xd5\xda\xdd\xd6\xdb\xde\xd7\xdc\xdf\xd9\xdd\xe0\xda\xdf\xe1\xdb\
\xdf\xe2\xdc\xe1\xe3\xde\xe1\xe4\xdf\xe4\xe5\xe1\xe4\xe6\xe1\xe6\
\xe7\xe4\xe6\xe8\xe4\xe8\xea\xe6\xe9\xea\xe6\xea\xec\xe9\xeb\xec\
\xe9\xed\xee\xeb\xee\xee\xec\xef\xf0\xed\xf1\xf2\xf0\xf3\xf4\xf2\
\xf6\xf7\xf5\xf8\xf9\xf7\xfa\xfb\xfa\xfb\xfb\xfb\xfc\xfc\xfb\xfc\
\xfc\xfc\xfc\xfd\xfc\xfd\xfd\xfc\xfd\xfd\xfd\xfe\xfe\xfe\xff\xff\
\xff\x93\x20\x9e\x01\x00\x00\x00\x28\x74\x52\x4e\x53\x00\x07\x07\
\x09\x0a\x0b\x0d\x0f\x11\x12\x12\x13\x15\x16\x1a\x1b\x2c\x2c\x2f\
\x35\x37\x3a\x3d\x46\x48\x49\x4b\x4c\x65\x77\x7b\x7c\x7f\xb7\xb7\
\xb7\xb7\xc9\xc9\xda\x01\x80\x91\xd9\x00\x00\x01\x72\x49\x44\x41\
\x54\x78\xda\xed\x92\x05\x8e\x1b\x51\x14\x04\x97\x4d\x61\xe6\x09\
\x27\xcb\x64\xb6\x27\x6c\x76\x98\x39\xee\xfb\x1f\x22\xaf\xf4\x27\
\xb8\xff\x9d\xc0\x6e\x61\x49\x55\xc2\x9e\x9b\xc2\xcd\x36\x9f\x2b\
\x84\xe5\xe6\x23\x18\xf1\x4b\xb9\xa5\x45\xb6\x94\x2b\xcd\x1f\xc0\
\xc8\x72\xb9\xeb\x69\xd8\x8d\x7c\x0e\xbc\x96\xe1\x4d\x30\xb2\xc2\
\x52\xaa\xb0\xf4\xc4\xe1\x03\x18\x0b\x96\x53\xbd\x79\xf5\xe2\xe9\
\x48\xe9\xe5\x93\x01\xbb\xfd\xe1\xf8\x99\xe1\xa9\x78\xb0\xd2\x16\
\xfe\x40\xed\xe4\x5c\x40\xfc\xe7\x86\x17\x9c\xa0\x25\xfc\x9e\x5a\
\xc9\xf9\x80\xf8\x2f\x0d\x2f\x3a\x41\x53\xf8\x1d\x35\x09\x40\xfc\
\xd7\x86\x5e\xd0\x10\xfe\x63\x35\x08\x40\xfc\xb7\x86\x5e\x50\x17\
\xfe\x43\xd5\x09\x40\xfc\x77\x86\x5e\x50\x13\xfe\x7d\xd5\x08\x40\
\xfc\xf7\x86\x5e\x50\x15\xfe\x3d\x55\x09\x40\xfc\x0f\x86\x5e\x50\
\x11\xfe\x03\x55\x08\x40\xfc\x8f\x86\x5e\x50\x16\xfe\x23\x95\x09\
\x40\xfc\x4f\x86\x5e\xb0\x2f\xfc\x27\xda\x27\x00\xf1\xbf\x19\x7a\
\xc1\x9e\xf0\xbb\xda\x23\x00\xf1\x65\xe8\x05\xbb\xc2\xef\x6b\x97\
\x00\xc4\x97\xa1\x17\xec\x08\x7f\xa8\x1d\x02\x10\x7f\x62\xe8\x05\
\xdb\xc2\x1f\x6b\x9b\x00\xc4\x9f\x18\x7a\xc1\x56\xf6\xe7\x2d\x02\
\x10\xff\x87\xa1\x17\x6c\x66\x7f\xde\x24\x00\xf1\xbf\x1b\x7a\xc1\
\x46\xf6\xe7\x0d\x02\x10\xff\xab\xa1\x17\xac\x67\x7f\x5e\x27\x00\
\xf1\xbf\x18\x7a\xc1\x5a\xf6\xe7\x35\x02\x10\xff\xb3\xa1\x17\xac\
\x2a\x6c\x95\xe0\x2f\x74\x82\x5c\xf1\xd6\xdd\xb0\x3b\xc9\x69\xf0\
\xf6\x6f\x3c\x13\x0d\xe6\x0f\x9d\x4d\xae\x86\x25\x47\xfe\xc5\xa3\
\x73\xd1\x2d\x1c\xbb\x12\x84\x4b\xc7\x23\x38\x65\x9b\xed\x27\x8c\
\x1a\x92\xe4\xcf\x13\xa0\x88\x00\x00\x00\x00\x49\x45\x4e\x44\xae\
\x42\x60\x82\
"
qt_resource_name = b"\
\x00\x06\
\x06\xfa\x65\x63\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x6f\x00\x73\
\x00\x06\
\x07\x03\x7d\xc3\
\x00\x69\
\x00\x6d\x00\x61\x00\x67\x00\x65\x00\x73\
\x00\x17\
\x0c\x49\x77\x27\
\x00\x61\
\x00\x75\x00\x64\x00\x69\x00\x6f\x00\x2d\x00\x76\x00\x6f\x00\x6c\x00\x75\x00\x6d\x00\x65\x00\x2d\x00\x6d\x00\x65\x00\x64\x00\x69\
\x00\x75\x00\x6d\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x17\
\x09\x10\x6a\x47\
\x00\x6d\
\x00\x65\x00\x64\x00\x69\x00\x61\x00\x2d\x00\x70\x00\x6c\x00\x61\x00\x79\x00\x62\x00\x61\x00\x63\x00\x6b\x00\x2d\x00\x73\x00\x74\
\x00\x6f\x00\x70\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x18\
\x0f\xa4\x86\x47\
\x00\x6d\
\x00\x65\x00\x64\x00\x69\x00\x61\x00\x2d\x00\x70\x00\x6c\x00\x61\x00\x79\x00\x62\x00\x61\x00\x63\x00\x6b\x00\x2d\x00\x73\x00\x74\
\x00\x61\x00\x72\x00\x74\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x10\
\x09\xe3\x1f\x27\
\x00\x6d\
\x00\x65\x00\x64\x00\x69\x00\x61\x00\x2d\x00\x66\x00\x6c\x00\x6f\x00\x70\x00\x70\x00\x79\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x1a\
\x0f\x68\xf4\xa7\
\x00\x6d\
\x00\x65\x00\x64\x00\x69\x00\x61\x00\x2d\x00\x73\x00\x6b\x00\x69\x00\x70\x00\x2d\x00\x66\x00\x6f\x00\x72\x00\x77\x00\x61\x00\x72\
\x00\x64\x00\x2d\x00\x72\x00\x74\x00\x6c\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x11\
\x05\x1b\x11\xa7\
\x00\x64\
\x00\x65\x00\x66\x00\x61\x00\x75\x00\x6c\x00\x74\x00\x5f\x00\x63\x00\x6f\x00\x76\x00\x65\x00\x72\x00\x2e\x00\x70\x00\x6e\x00\x67\
\
\x00\x15\
\x04\x57\xa1\xc7\
\x00\x61\
\x00\x75\x00\x64\x00\x69\x00\x6f\x00\x2d\x00\x76\x00\x6f\x00\x6c\x00\x75\x00\x6d\x00\x65\x00\x2d\x00\x68\x00\x69\x00\x67\x00\x68\
\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x16\
\x03\xd1\xe1\x87\
\x00\x6d\
\x00\x65\x00\x64\x00\x69\x00\x61\x00\x2d\x00\x73\x00\x6b\x00\x69\x00\x70\x00\x2d\x00\x66\x00\x6f\x00\x72\x00\x77\x00\x61\x00\x72\
\x00\x64\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x16\
\x02\x78\xcb\xa7\
\x00\x61\
\x00\x75\x00\x64\x00\x69\x00\x6f\x00\x2d\x00\x76\x00\x6f\x00\x6c\x00\x75\x00\x6d\x00\x65\x00\x2d\x00\x6d\x00\x75\x00\x74\x00\x65\
\x00\x64\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x14\
\x09\xd2\x98\xc7\
\x00\x61\
\x00\x75\x00\x64\x00\x69\x00\x6f\x00\x2d\x00\x76\x00\x6f\x00\x6c\x00\x75\x00\x6d\x00\x65\x00\x2d\x00\x6c\x00\x6f\x00\x77\x00\x2e\
\x00\x70\x00\x6e\x00\x67\
\x00\x1a\
\x05\x01\x32\x67\
\x00\x6d\
\x00\x65\x00\x64\x00\x69\x00\x61\x00\x2d\x00\x70\x00\x6c\x00\x61\x00\x79\x00\x6c\x00\x69\x00\x73\x00\x74\x00\x2d\x00\x73\x00\x68\
\x00\x75\x00\x66\x00\x66\x00\x6c\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x1b\
\x04\xc8\x47\x87\
\x00\x61\
\x00\x63\x00\x63\x00\x65\x00\x73\x00\x73\x00\x6f\x00\x72\x00\x69\x00\x65\x00\x73\x00\x2d\x00\x74\x00\x65\x00\x78\x00\x74\x00\x2d\
\x00\x65\x00\x64\x00\x69\x00\x74\x00\x6f\x00\x72\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x18\
\x0b\xa7\x9e\x07\
\x00\x6d\
\x00\x65\x00\x64\x00\x69\x00\x61\x00\x2d\x00\x70\x00\x6c\x00\x61\x00\x79\x00\x62\x00\x61\x00\x63\x00\x6b\x00\x2d\x00\x70\x00\x61\
\x00\x75\x00\x73\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct_v1 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x12\x00\x02\x00\x00\x00\x0d\x00\x00\x00\x03\
\x00\x00\x01\xac\x00\x00\x00\x00\x00\x01\x00\x00\x52\x54\
\x00\x00\x01\x7a\x00\x00\x00\x00\x00\x01\x00\x00\x4a\xf5\
\x00\x00\x01\x4a\x00\x00\x00\x00\x00\x01\x00\x00\x3d\x53\
\x00\x00\x02\x46\x00\x00\x00\x00\x00\x01\x00\x00\x71\xaf\
\x00\x00\x02\x0c\x00\x00\x00\x00\x00\x01\x00\x00\x6a\x29\
\x00\x00\x01\x22\x00\x00\x00\x00\x00\x01\x00\x00\x24\x74\
\x00\x00\x00\x58\x00\x00\x00\x00\x00\x01\x00\x00\x0c\x42\
\x00\x00\x01\xde\x00\x00\x00\x00\x00\x01\x00\x00\x5e\xf3\
\x00\x00\x00\xc2\x00\x00\x00\x00\x00\x01\x00\x00\x14\x83\
\x00\x00\x02\x82\x00\x00\x00\x00\x00\x01\x00\x00\x7b\x81\
\x00\x00\x00\x24\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x00\xe8\x00\x00\x00\x00\x00\x01\x00\x00\x1c\x59\
\x00\x00\x00\x8c\x00\x00\x00\x00\x00\x01\x00\x00\x0e\xbe\
"
qt_resource_struct_v2 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x12\x00\x02\x00\x00\x00\x0d\x00\x00\x00\x03\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x01\xac\x00\x00\x00\x00\x00\x01\x00\x00\x52\x54\
\x00\x00\x01\x6b\x38\xbd\x58\x02\
\x00\x00\x01\x7a\x00\x00\x00\x00\x00\x01\x00\x00\x4a\xf5\
\x00\x00\x01\x6b\x38\xbd\x58\x02\
\x00\x00\x01\x4a\x00\x00\x00\x00\x00\x01\x00\x00\x3d\x53\
\x00\x00\x01\x6b\x38\xbd\x58\x02\
\x00\x00\x02\x46\x00\x00\x00\x00\x00\x01\x00\x00\x71\xaf\
\x00\x00\x01\x6b\x38\xbd\x58\x02\
\x00\x00\x02\x0c\x00\x00\x00\x00\x00\x01\x00\x00\x6a\x29\
\x00\x00\x01\x6b\x38\xbd\x58\x02\
\x00\x00\x01\x22\x00\x00\x00\x00\x00\x01\x00\x00\x24\x74\
\x00\x00\x01\x6b\x38\xbd\x58\x02\
\x00\x00\x00\x58\x00\x00\x00\x00\x00\x01\x00\x00\x0c\x42\
\x00\x00\x01\x6b\x38\xbd\x58\x02\
\x00\x00\x01\xde\x00\x00\x00\x00\x00\x01\x00\x00\x5e\xf3\
\x00\x00\x01\x6b\x38\xbd\x58\x02\
\x00\x00\x00\xc2\x00\x00\x00\x00\x00\x01\x00\x00\x14\x83\
\x00\x00\x01\x6b\x38\xbd\x58\x02\
\x00\x00\x02\x82\x00\x00\x00\x00\x00\x01\x00\x00\x7b\x81\
\x00\x00\x01\x6b\x38\xbd\x58\x02\
\x00\x00\x00\x24\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x6b\x38\xbd\x58\x02\
\x00\x00\x00\xe8\x00\x00\x00\x00\x00\x01\x00\x00\x1c\x59\
\x00\x00\x01\x6b\x38\xbd\x58\x02\
\x00\x00\x00\x8c\x00\x00\x00\x00\x00\x01\x00\x00\x0e\xbe\
\x00\x00\x01\x6b\x38\xbd\x58\x02\
"
qt_version = [int(v) for v in QtCore.qVersion().split('.')]
if qt_version < [5, 8, 0]:
rcc_version = 1
qt_resource_struct = qt_resource_struct_v1
else:
rcc_version = 2
qt_resource_struct = qt_resource_struct_v2
def qInitResources():
QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
|
en
| 0.73548
|
# -*- coding: utf-8 -*- # Resource object code # # Created by: The Resource Compiler for PyQt5 (Qt v5.12.2) # # WARNING! All changes made in this file will be lost!
| 1.105353
| 1
|
esv_reference_server/handlers.py
|
electrumsv/electrumsv-reference-server
| 0
|
6627845
|
<gh_stars>0
"""
Copyright(c) 2021 Bitcoin Association.
Distributed under the Open BSV software license, see the accompanying file LICENSE
"""
from __future__ import annotations
import os
from datetime import datetime, timedelta
import logging
import time
from typing import Any, Dict, Optional, TYPE_CHECKING
import aiohttp
from aiohttp import web
from bitcoinx import P2MultiSig_Output, Signature
from .errors import APIErrors
from .keys import generate_payment_public_key, \
VerifiableKeyData, verify_key_data
from .constants import AccountFlag, ChannelState, EXTERNAL_SERVER_HOST, EXTERNAL_SERVER_PORT
from . import networks
from .networks import mapi_broadcast_transaction
from .payment_channels import BrokenChannelError, InvalidTransactionError, \
process_contract_update_async, process_contract_close_async, process_funding_script, \
process_funding_transaction_async, process_refund_contract_transaction
from .sqlite_db import DatabaseStateModifiedError, create_account, create_account_payment_channel, \
deactivate_account, \
delete_account_payment_channel, \
get_account_id_for_api_key, \
get_account_id_for_public_key_bytes, get_account_metadata_for_account_id, \
get_active_channel_for_account_id, set_account_registered, \
set_payment_channel_closed, set_payment_channel_funding_transaction, \
set_payment_channel_initial_contract_transaction, \
update_payment_channel_contract
if TYPE_CHECKING:
from .keys import ServerKeys
from .application_state import ApplicationState
logger = logging.getLogger('handlers')
async def ping(request: web.Request) -> web.Response:
return web.Response(text="ElectrumSV Reference Server")
async def get_account(request: web.Request) -> web.Response:
"""Two alternative forms of authentication. Either Bearer Token auth required"""
app_state: ApplicationState = request.app['app_state']
account_id: Optional[int] = None
auth_string = request.headers.get('Authorization', None)
if auth_string is not None:
if not auth_string.startswith("Bearer "):
raise web.HTTPBadRequest(reason="Invalid API key")
api_key = auth_string[7:]
account_id, account_flags = get_account_id_for_api_key(app_state.database_context, api_key)
else:
if not request.body_exists:
raise web.HTTPBadRequest(reason="Body required")
key_data: VerifiableKeyData = await request.json()
if not verify_key_data(key_data):
# We do not reveal if the account exists or the key data was invalid.
raise web.HTTPUnauthorized()
public_key_bytes = bytes.fromhex(key_data["public_key_hex"])
account_id, account_flags = get_account_id_for_public_key_bytes(app_state.database_context,
public_key_bytes)
# We do not reveal if the account does not exist/is disabled or the key data was invalid.
if account_id is None or account_flags & AccountFlag.DISABLED_MASK:
raise web.HTTPUnauthorized
metadata = get_account_metadata_for_account_id(app_state.database_context, account_id)
# This should never happen but we error if it does.
assert metadata.public_key_bytes != b""
data = {
"public_key_hex": metadata.public_key_bytes.hex(),
"api_key": metadata.api_key,
}
return web.json_response(data)
async def post_account_key(request: web.Request) -> web.Response:
"""
Start the payment channel funding process by generating a payment key for the given client.
If the client does not have an account this is part of the process of creating their account.
If the client does have an account they must not have an active payment channel.
There is no asynchronicity within this handler so it should be safe from any race conditions
by any client submitting multiple requests to it.
Error responses:
400 / bad request Invalid API key type or no body with client key data.
401 / unauthorized An API key was provided and it was invalid or the client key data was
not valid.
409 / conflict There is an existing active payment channel.
"""
app_state: ApplicationState = request.app['app_state']
server_keys: ServerKeys = app_state.server_keys
account_id: Optional[int] = None
account_public_key_bytes: Optional[bytes] = None
payment_key_index: int = 0
payment_key_bytes: Optional[bytes] = None
auth_string = request.headers.get('Authorization', None)
api_key: str
if auth_string is not None:
if not auth_string.startswith("Bearer "):
raise web.HTTPBadRequest
api_key = auth_string[7:]
account_id, _account_flags = get_account_id_for_api_key(app_state.database_context, api_key)
if account_id is None:
# We do not reveal if the account exists or the key data was invalid.
raise web.HTTPUnauthorized
metadata = await app_state.database_context.run_in_thread_async(
get_account_metadata_for_account_id, account_id)
if metadata.active_channel_id is not None:
raise web.HTTPConflict
payment_key_index = metadata.last_payment_key_index
if account_public_key_bytes is None:
assert len(metadata.public_key_bytes)
account_public_key_bytes = metadata.public_key_bytes
else:
if not request.body_exists:
raise web.HTTPBadRequest
key_data: VerifiableKeyData = await request.json()
if not verify_key_data(key_data):
# We do not reveal if the account exists or the key data was invalid.
raise web.HTTPUnauthorized
account_public_key_bytes = bytes.fromhex(key_data["public_key_hex"])
account_id, account_flags = get_account_id_for_public_key_bytes(app_state.database_context,
account_public_key_bytes)
if account_flags & AccountFlag.DISABLED_MASK:
raise web.HTTPUnauthorized
if account_id is None:
account_id, api_key = await app_state.database_context.run_in_thread_async(
create_account, account_public_key_bytes)
payment_key_index = 1
else:
metadata = get_account_metadata_for_account_id(app_state.database_context, account_id)
if metadata.flags & AccountFlag.MID_CREATION:
# This is a user with an account in the process of being created, and the required
# action is that they fund it. If they request a fresh payment key they are
# resetting the funding process.
assert metadata.active_channel_id is not None
await app_state.database_context.run_in_thread_async(
delete_account_payment_channel, metadata.active_channel_id)
else:
# This should be an active user who is opening a new payment channel and does not
# have an active one.
if metadata.active_channel_id is not None:
raise web.HTTPConflict
payment_key_index = metadata.last_payment_key_index + 1
api_key = metadata.api_key
# Ensure all paths that reach here have set an index to use.
assert payment_key_index > 0
payment_key_bytes = generate_payment_public_key(server_keys.identity_public_key,
account_public_key_bytes, payment_key_index).to_bytes()
assert account_id is not None
assert payment_key_bytes is not None
await app_state.database_context.run_in_thread_async(
create_account_payment_channel, account_id, payment_key_index, payment_key_bytes)
mpwriter = aiohttp.MultipartWriter()
part = mpwriter.append(payment_key_bytes)
part.set_content_disposition('inline', name="key")
part = mpwriter.append(api_key)
part.set_content_disposition('inline', name="api-key")
response = web.Response()
response.body = mpwriter
return response
async def post_account_channel(request: web.Request) -> web.Response:
"""
Accept the initial version of the contract from the client. The initial version of the contract
acts as insurance for the client in the form of being a complete refund.
"""
app_state: ApplicationState = request.app['app_state']
auth_string = request.headers.get('Authorization', None)
if auth_string is None or not auth_string.startswith("Bearer "):
raise web.HTTPBadRequest(reason="No 'Bearer' authentication.")
api_key = auth_string[7:]
account_id, _account_flags = get_account_id_for_api_key(app_state.database_context, api_key)
if account_id is None:
# We do not reveal if the account exists or the api key was invalid.
raise web.HTTPUnauthorized
channel_row = get_active_channel_for_account_id(app_state.database_context, account_id)
if channel_row is None or channel_row.channel_state != ChannelState.PAYMENT_KEY_DISPENSED:
raise web.HTTPBadRequest(reason=f"{APIErrors.PAYMENT_CHANNEL_INVALID}: "
"Channel invalid.")
# Request processing.
funding_value_text = request.query.get("funding_value")
if funding_value_text is None:
raise web.HTTPBadRequest(reason=f"{APIErrors.MISSING_QUERY_PARAM}: "
"Missing 'funding_value' parameter")
funding_value = int(funding_value_text)
funding_p2ms: Optional[P2MultiSig_Output] = None
funding_script_bytes = b""
contract_transaction_bytes = b""
async for part_reader in await request.multipart():
if part_reader.name == "script":
funding_script_bytes = await part_reader.read(decode=True)
funding_p2ms = process_funding_script(funding_script_bytes,
channel_row.payment_key_bytes)
if funding_p2ms is None:
code = APIErrors.INVALID_MULTIPART_PAYLOAD
raise web.HTTPBadRequest(reason=f"{code}: Invalid 'script' multipart")
elif part_reader.name == "transaction":
contract_transaction_bytes = await part_reader.read(decode=True)
else:
part_name = part_reader.name or "?"
code = APIErrors.INVALID_MULTIPART_PAYLOAD
raise web.HTTPBadRequest(reason=f"{code}: Invalid '{part_name}' multipart")
if not funding_script_bytes:
code = APIErrors.MISSING_MULTIPART_PAYLOAD
raise web.HTTPBadRequest(reason=f"{code}: Missing the 'script' multipart payload")
if not contract_transaction_bytes:
code = APIErrors.MISSING_MULTIPART_PAYLOAD
raise web.HTTPBadRequest(reason=f"{code}: Missing the 'transaction' multipart payload")
assert funding_p2ms is not None
delivery_time = int(time.time())
account_metadata = await app_state.database_context.run_in_thread_async(
get_account_metadata_for_account_id, account_id)
if account_metadata is None:
raise web.HTTPUnauthorized
try:
client_payment_key_bytes, funding_transaction_hash, refund_signature_bytes = \
process_refund_contract_transaction(
contract_transaction_bytes, delivery_time, funding_value, funding_p2ms,
app_state.server_keys, account_metadata, channel_row)
except InvalidTransactionError as exc:
raise web.HTTPBadRequest(reason=f"{APIErrors.INVALID_TRANSACTION}: {exc.args[0]}")
await app_state.database_context.run_in_thread_async(
set_payment_channel_initial_contract_transaction, channel_row.channel_id,
funding_value, funding_transaction_hash, funding_value, refund_signature_bytes,
contract_transaction_bytes, client_payment_key_bytes)
return web.Response(body=refund_signature_bytes, content_type="application/octet-stream")
async def put_account_channel_update(request: web.Request) -> web.Response:
"""
Accept a contract amendment from the client. This is a decreased refund to themselves and
an increased payment to us.
"""
app_state: ApplicationState = request.app['app_state']
auth_string = request.headers.get('Authorization', None)
if auth_string is None or not auth_string.startswith("Bearer "):
raise web.HTTPBadRequest(reason="No 'Bearer' authentication")
api_key = auth_string[7:]
account_id, account_flags = get_account_id_for_api_key(app_state.database_context, api_key)
if account_id is None:
# We do not reveal if the account exists or the api key was invalid.
raise web.HTTPUnauthorized
channel_row = get_active_channel_for_account_id(app_state.database_context, account_id)
if channel_row is None or channel_row.channel_state != ChannelState.CONTRACT_OPEN:
raise web.HTTPBadRequest(reason=f"{APIErrors.PAYMENT_CHANNEL_INVALID}: "
"Channel invalid.")
# Request processing.
refund_value_text = request.query.get("refund_value")
if refund_value_text is None:
raise web.HTTPBadRequest(reason=f"{APIErrors.MISSING_QUERY_PARAM}: "
"Missing 'refund_value' query parameter.")
refund_value = int(refund_value_text)
refund_signature_bytes = b""
async for part_reader in await request.multipart():
if part_reader.name == "signature":
refund_signature_bytes = await part_reader.read(decode=True)
if Signature.analyze_encoding(refund_signature_bytes) == 0:
raise web.HTTPBadRequest(reason=f"{APIErrors.INVALID_MULTIPART_PAYLOAD}: "
"Invalid signature")
else:
part_name = part_reader.name or "?"
raise web.HTTPBadRequest(reason=f"{APIErrors.INVALID_MULTIPART_PAYLOAD}: "
f"Invalid '{part_name}' multipart")
if not refund_signature_bytes:
raise web.HTTPBadRequest(reason=f"{APIErrors.MISSING_MULTIPART_PAYLOAD}: "
"Missing the 'signature' multipart payload")
try:
new_refund_sequence = await process_contract_update_async(refund_signature_bytes,
refund_value, channel_row)
except BrokenChannelError as exc:
# These errors are ones that can only be made by someone who is intentionally
# messing with the server. They have to have done the signature correctly already
# in establishing the initial full refund contract.
await app_state.database_context.run_in_thread_async(deactivate_account, account_id,
AccountFlag.DISABLED_FLAGGED)
raise web.HTTPNotAcceptable(reason=f"{APIErrors.BROKEN_PAYMENT_CHANNEL}: {exc.args[0]}")
try:
await app_state.database_context.run_in_thread_async(update_payment_channel_contract,
channel_row.channel_id, refund_value, refund_signature_bytes, new_refund_sequence)
except DatabaseStateModifiedError:
raise web.HTTPBadRequest(reason=f"{APIErrors.CHANNEL_STATE_INCONSISTENCY}: "
"Channel state inconsistency")
# If this is the first time the client has given us a payment through the payment channel
# then we change their account from one that is mid creation to one that is registered.
if account_flags & AccountFlag.MID_CREATION:
try:
await app_state.database_context.run_in_thread_async(set_account_registered, account_id)
except DatabaseStateModifiedError:
raise web.HTTPBadRequest(reason=f"{APIErrors.ACCOUNT_STATE_INCONSISTENCY}: "
"Account inconsistency")
return web.Response()
async def post_account_funding(request: web.Request) -> web.Response:
"""
Receive the funding transaction from the client. It is expected that the client will have
broadcast the transaction before they give it to us, although this is not a requirement.
"""
app_state: ApplicationState = request.app['app_state']
auth_string = request.headers.get('Authorization', None)
if auth_string is None or not auth_string.startswith("Bearer "):
raise web.HTTPBadRequest(reason="No 'Bearer' authentication")
api_key = auth_string[7:]
account_id, _account_flags = get_account_id_for_api_key(app_state.database_context, api_key)
if account_id is None:
# We do not reveal if the account exists or the api key was invalid.
raise web.HTTPUnauthorized
channel_row = get_active_channel_for_account_id(app_state.database_context, account_id)
if channel_row is None or channel_row.channel_state != ChannelState.REFUND_ESTABLISHED:
raise web.HTTPBadRequest(reason=f"{APIErrors.PAYMENT_CHANNEL_INVALID}: "
"Channel invalid.")
funding_transaction_bytes = b""
async for part_reader in await request.multipart():
if part_reader.name == "transaction":
funding_transaction_bytes = await part_reader.read(decode=True)
else:
part_name = part_reader.name or "?"
raise web.HTTPBadRequest(reason=f"{APIErrors.INVALID_MULTIPART_PAYLOAD}: "
f"Invalid '{part_name}' multipart")
if not funding_transaction_bytes:
raise web.HTTPBadRequest(reason=f"{APIErrors.MISSING_MULTIPART_PAYLOAD}: "
"Missing the 'transaction' multipart payload")
try:
funding_output_script_bytes = await process_funding_transaction_async(
funding_transaction_bytes, channel_row)
except BrokenChannelError as exc:
await app_state.database_context.run_in_thread_async(set_payment_channel_closed,
channel_row.channel_id, ChannelState.CLOSED_INVALID_FUNDING_TRANSACTION)
raise web.HTTPNotAcceptable(reason=f"{APIErrors.BROKEN_PAYMENT_CHANNEL}: {exc.args[0]}")
try:
await mapi_broadcast_transaction(app_state.network, funding_transaction_bytes)
except (aiohttp.ClientError, networks.NetworkError) as exc:
await app_state.database_context.run_in_thread_async(set_payment_channel_closed,
channel_row.channel_id, ChannelState.CLOSED_BROADCASTING_FUNDING_TRANSACTION)
raise web.HTTPNotAcceptable(reason=f"{APIErrors.MAPI_BROADCAST_FAILURE}: {exc.args[0]}")
# TODO(utxo-spends) We should register for the spend of the funding output and react to it.
try:
await app_state.database_context.run_in_thread_async(
set_payment_channel_funding_transaction, channel_row.channel_id,
funding_transaction_bytes, funding_output_script_bytes)
except DatabaseStateModifiedError:
raise web.HTTPBadRequest(reason=f"{APIErrors.CHANNEL_STATE_INCONSISTENCY}: "
f"Channel state inconsistency")
return web.Response()
async def delete_account_channel(request: web.Request) -> web.Response:
"""
Close the payment channel for the client.
"""
app_state: ApplicationState = request.app['app_state']
auth_string = request.headers.get('Authorization', None)
if auth_string is None or not auth_string.startswith("Bearer "):
raise web.HTTPBadRequest(reason="No 'Bearer' authentication")
api_key = auth_string[7:]
account_id, _account_flags = get_account_id_for_api_key(app_state.database_context, api_key)
if account_id is None:
# We do not reveal if the account exists or the api key was invalid.
raise web.HTTPUnauthorized
channel_row = get_active_channel_for_account_id(app_state.database_context, account_id)
if channel_row is None or channel_row.channel_state != ChannelState.REFUND_ESTABLISHED:
raise web.HTTPBadRequest(reason=f"{APIErrors.PAYMENT_CHANNEL_INVALID}: "
f"Channel invalid.")
refund_value_str = request.query.get("refund_value")
if refund_value_str is None:
raise web.HTTPBadRequest(reason=f"{APIErrors.MISSING_QUERY_PARAM}: "
"Missing 'refund_value' parameter")
refund_value = int(refund_value_str)
refund_signature_bytes = b""
async for part_reader in await request.multipart():
if part_reader.name == "signature":
refund_signature_bytes = await part_reader.read(decode=True)
if Signature.analyze_encoding(refund_signature_bytes) == 0:
raise web.HTTPBadRequest(reason=f"{APIErrors.INVALID_MULTIPART_PAYLOAD}: "
"Invalid signature")
else:
part_name = part_reader.name or "?"
raise web.HTTPBadRequest(reason=f"{APIErrors.INVALID_MULTIPART_PAYLOAD}: "
f"Invalid '{part_name}' multipart")
if not refund_signature_bytes:
raise web.HTTPBadRequest(reason=f"{APIErrors.MISSING_MULTIPART_PAYLOAD}: "
f"Missing the 'signature' multipart payload")
account_metadata = await app_state.database_context.run_in_thread_async(
get_account_metadata_for_account_id, account_id)
if account_metadata is None:
raise web.HTTPUnauthorized
contract_transaction_bytes = await process_contract_close_async(refund_signature_bytes,
refund_value, app_state.server_keys, account_metadata, channel_row)
try:
await mapi_broadcast_transaction(app_state.network, contract_transaction_bytes)
except (aiohttp.ClientError, networks.NetworkError) as exc:
# TODO(critical-to-fix): What do we do when claiming the contact/broadcasting errors?
# - It could be because the fee was not high enough.
# - It could be because the transaction structure is invalid and we checked it wrong.
# - It could be because ???
raise web.HTTPNotAcceptable(reason=f"{APIErrors.MAPI_BROADCAST_FAILURE}: {exc.args[0]}")
return web.Response()
async def get_endpoints_data(request: web.Request) -> web.Response:
utc_now_datetime = datetime.utcnow()
utc_expiry_datetime = utc_now_datetime + timedelta(days=1)
data: Dict[str, Any] = {
"apiType": "bsvapi.endpoint",
"apiVersion": 1,
"baseUrl": f"http://{EXTERNAL_SERVER_HOST}:{EXTERNAL_SERVER_PORT}",
"timestamp": utc_now_datetime.isoformat() +"Z",
"expiryTime": utc_expiry_datetime.isoformat() +"Z",
"endpoints": [
{
"apiType": "bsvapi.account",
"apiVersion": 1,
"baseUrl": "/api/v1/account",
},
{
"apiType": "bsvapi.channel",
"apiVersion": 1,
"baseUrl": "/api/v1/channel"
},
{
"apiType": "bsvapi.websocket",
"apiVersion": 1,
"baseUrl": "/api/v1/web-socket"
}
]
}
if os.environ.get('EXPOSE_HEADER_SV_APIS'):
data['endpoints'].extend([
{
"apiType": "bsvapi.headers",
"apiVersion": 1,
"baseUrl": "/api/v1/headers",
},
{
"apiType": "bsvapi.network",
"apiVersion": 1,
"baseUrl": "/api/v1/network",
},
])
if os.environ.get('EXPOSE_INDEXER_APIS'):
data['endpoints'].extend([
{
"apiType": "bsvapi.transaction",
"apiVersion": 1,
"baseURL": "/api/v1/transaction",
},
{
"apiType": "bsvapi.merkle-proof",
"apiVersion": 1,
"baseURL": "/api/v1/merkle-proof",
},
{
"apiType": "bsvapi.output-spend",
"apiVersion": 1,
"baseURL": "/api/v1/output-spend",
},
{
"apiType": "bsvapi.restoration",
"apiVersion": 1,
"baseURL": "/api/v1/restoration",
"pricing": {
"data": {
"satoshis": 4524,
"bytes": 10000000,
}
}
}
])
return web.json_response(data=data)
|
"""
Copyright(c) 2021 Bitcoin Association.
Distributed under the Open BSV software license, see the accompanying file LICENSE
"""
from __future__ import annotations
import os
from datetime import datetime, timedelta
import logging
import time
from typing import Any, Dict, Optional, TYPE_CHECKING
import aiohttp
from aiohttp import web
from bitcoinx import P2MultiSig_Output, Signature
from .errors import APIErrors
from .keys import generate_payment_public_key, \
VerifiableKeyData, verify_key_data
from .constants import AccountFlag, ChannelState, EXTERNAL_SERVER_HOST, EXTERNAL_SERVER_PORT
from . import networks
from .networks import mapi_broadcast_transaction
from .payment_channels import BrokenChannelError, InvalidTransactionError, \
process_contract_update_async, process_contract_close_async, process_funding_script, \
process_funding_transaction_async, process_refund_contract_transaction
from .sqlite_db import DatabaseStateModifiedError, create_account, create_account_payment_channel, \
deactivate_account, \
delete_account_payment_channel, \
get_account_id_for_api_key, \
get_account_id_for_public_key_bytes, get_account_metadata_for_account_id, \
get_active_channel_for_account_id, set_account_registered, \
set_payment_channel_closed, set_payment_channel_funding_transaction, \
set_payment_channel_initial_contract_transaction, \
update_payment_channel_contract
if TYPE_CHECKING:
from .keys import ServerKeys
from .application_state import ApplicationState
logger = logging.getLogger('handlers')
async def ping(request: web.Request) -> web.Response:
return web.Response(text="ElectrumSV Reference Server")
async def get_account(request: web.Request) -> web.Response:
"""Two alternative forms of authentication. Either Bearer Token auth required"""
app_state: ApplicationState = request.app['app_state']
account_id: Optional[int] = None
auth_string = request.headers.get('Authorization', None)
if auth_string is not None:
if not auth_string.startswith("Bearer "):
raise web.HTTPBadRequest(reason="Invalid API key")
api_key = auth_string[7:]
account_id, account_flags = get_account_id_for_api_key(app_state.database_context, api_key)
else:
if not request.body_exists:
raise web.HTTPBadRequest(reason="Body required")
key_data: VerifiableKeyData = await request.json()
if not verify_key_data(key_data):
# We do not reveal if the account exists or the key data was invalid.
raise web.HTTPUnauthorized()
public_key_bytes = bytes.fromhex(key_data["public_key_hex"])
account_id, account_flags = get_account_id_for_public_key_bytes(app_state.database_context,
public_key_bytes)
# We do not reveal if the account does not exist/is disabled or the key data was invalid.
if account_id is None or account_flags & AccountFlag.DISABLED_MASK:
raise web.HTTPUnauthorized
metadata = get_account_metadata_for_account_id(app_state.database_context, account_id)
# This should never happen but we error if it does.
assert metadata.public_key_bytes != b""
data = {
"public_key_hex": metadata.public_key_bytes.hex(),
"api_key": metadata.api_key,
}
return web.json_response(data)
async def post_account_key(request: web.Request) -> web.Response:
"""
Start the payment channel funding process by generating a payment key for the given client.
If the client does not have an account this is part of the process of creating their account.
If the client does have an account they must not have an active payment channel.
There is no asynchronicity within this handler so it should be safe from any race conditions
by any client submitting multiple requests to it.
Error responses:
400 / bad request Invalid API key type or no body with client key data.
401 / unauthorized An API key was provided and it was invalid or the client key data was
not valid.
409 / conflict There is an existing active payment channel.
"""
app_state: ApplicationState = request.app['app_state']
server_keys: ServerKeys = app_state.server_keys
account_id: Optional[int] = None
account_public_key_bytes: Optional[bytes] = None
payment_key_index: int = 0
payment_key_bytes: Optional[bytes] = None
auth_string = request.headers.get('Authorization', None)
api_key: str
if auth_string is not None:
if not auth_string.startswith("Bearer "):
raise web.HTTPBadRequest
api_key = auth_string[7:]
account_id, _account_flags = get_account_id_for_api_key(app_state.database_context, api_key)
if account_id is None:
# We do not reveal if the account exists or the key data was invalid.
raise web.HTTPUnauthorized
metadata = await app_state.database_context.run_in_thread_async(
get_account_metadata_for_account_id, account_id)
if metadata.active_channel_id is not None:
raise web.HTTPConflict
payment_key_index = metadata.last_payment_key_index
if account_public_key_bytes is None:
assert len(metadata.public_key_bytes)
account_public_key_bytes = metadata.public_key_bytes
else:
if not request.body_exists:
raise web.HTTPBadRequest
key_data: VerifiableKeyData = await request.json()
if not verify_key_data(key_data):
# We do not reveal if the account exists or the key data was invalid.
raise web.HTTPUnauthorized
account_public_key_bytes = bytes.fromhex(key_data["public_key_hex"])
account_id, account_flags = get_account_id_for_public_key_bytes(app_state.database_context,
account_public_key_bytes)
if account_flags & AccountFlag.DISABLED_MASK:
raise web.HTTPUnauthorized
if account_id is None:
account_id, api_key = await app_state.database_context.run_in_thread_async(
create_account, account_public_key_bytes)
payment_key_index = 1
else:
metadata = get_account_metadata_for_account_id(app_state.database_context, account_id)
if metadata.flags & AccountFlag.MID_CREATION:
# This is a user with an account in the process of being created, and the required
# action is that they fund it. If they request a fresh payment key they are
# resetting the funding process.
assert metadata.active_channel_id is not None
await app_state.database_context.run_in_thread_async(
delete_account_payment_channel, metadata.active_channel_id)
else:
# This should be an active user who is opening a new payment channel and does not
# have an active one.
if metadata.active_channel_id is not None:
raise web.HTTPConflict
payment_key_index = metadata.last_payment_key_index + 1
api_key = metadata.api_key
# Ensure all paths that reach here have set an index to use.
assert payment_key_index > 0
payment_key_bytes = generate_payment_public_key(server_keys.identity_public_key,
account_public_key_bytes, payment_key_index).to_bytes()
assert account_id is not None
assert payment_key_bytes is not None
await app_state.database_context.run_in_thread_async(
create_account_payment_channel, account_id, payment_key_index, payment_key_bytes)
mpwriter = aiohttp.MultipartWriter()
part = mpwriter.append(payment_key_bytes)
part.set_content_disposition('inline', name="key")
part = mpwriter.append(api_key)
part.set_content_disposition('inline', name="api-key")
response = web.Response()
response.body = mpwriter
return response
async def post_account_channel(request: web.Request) -> web.Response:
"""
Accept the initial version of the contract from the client. The initial version of the contract
acts as insurance for the client in the form of being a complete refund.
"""
app_state: ApplicationState = request.app['app_state']
auth_string = request.headers.get('Authorization', None)
if auth_string is None or not auth_string.startswith("Bearer "):
raise web.HTTPBadRequest(reason="No 'Bearer' authentication.")
api_key = auth_string[7:]
account_id, _account_flags = get_account_id_for_api_key(app_state.database_context, api_key)
if account_id is None:
# We do not reveal if the account exists or the api key was invalid.
raise web.HTTPUnauthorized
channel_row = get_active_channel_for_account_id(app_state.database_context, account_id)
if channel_row is None or channel_row.channel_state != ChannelState.PAYMENT_KEY_DISPENSED:
raise web.HTTPBadRequest(reason=f"{APIErrors.PAYMENT_CHANNEL_INVALID}: "
"Channel invalid.")
# Request processing.
funding_value_text = request.query.get("funding_value")
if funding_value_text is None:
raise web.HTTPBadRequest(reason=f"{APIErrors.MISSING_QUERY_PARAM}: "
"Missing 'funding_value' parameter")
funding_value = int(funding_value_text)
funding_p2ms: Optional[P2MultiSig_Output] = None
funding_script_bytes = b""
contract_transaction_bytes = b""
async for part_reader in await request.multipart():
if part_reader.name == "script":
funding_script_bytes = await part_reader.read(decode=True)
funding_p2ms = process_funding_script(funding_script_bytes,
channel_row.payment_key_bytes)
if funding_p2ms is None:
code = APIErrors.INVALID_MULTIPART_PAYLOAD
raise web.HTTPBadRequest(reason=f"{code}: Invalid 'script' multipart")
elif part_reader.name == "transaction":
contract_transaction_bytes = await part_reader.read(decode=True)
else:
part_name = part_reader.name or "?"
code = APIErrors.INVALID_MULTIPART_PAYLOAD
raise web.HTTPBadRequest(reason=f"{code}: Invalid '{part_name}' multipart")
if not funding_script_bytes:
code = APIErrors.MISSING_MULTIPART_PAYLOAD
raise web.HTTPBadRequest(reason=f"{code}: Missing the 'script' multipart payload")
if not contract_transaction_bytes:
code = APIErrors.MISSING_MULTIPART_PAYLOAD
raise web.HTTPBadRequest(reason=f"{code}: Missing the 'transaction' multipart payload")
assert funding_p2ms is not None
delivery_time = int(time.time())
account_metadata = await app_state.database_context.run_in_thread_async(
get_account_metadata_for_account_id, account_id)
if account_metadata is None:
raise web.HTTPUnauthorized
try:
client_payment_key_bytes, funding_transaction_hash, refund_signature_bytes = \
process_refund_contract_transaction(
contract_transaction_bytes, delivery_time, funding_value, funding_p2ms,
app_state.server_keys, account_metadata, channel_row)
except InvalidTransactionError as exc:
raise web.HTTPBadRequest(reason=f"{APIErrors.INVALID_TRANSACTION}: {exc.args[0]}")
await app_state.database_context.run_in_thread_async(
set_payment_channel_initial_contract_transaction, channel_row.channel_id,
funding_value, funding_transaction_hash, funding_value, refund_signature_bytes,
contract_transaction_bytes, client_payment_key_bytes)
return web.Response(body=refund_signature_bytes, content_type="application/octet-stream")
async def put_account_channel_update(request: web.Request) -> web.Response:
"""
Accept a contract amendment from the client. This is a decreased refund to themselves and
an increased payment to us.
"""
app_state: ApplicationState = request.app['app_state']
auth_string = request.headers.get('Authorization', None)
if auth_string is None or not auth_string.startswith("Bearer "):
raise web.HTTPBadRequest(reason="No 'Bearer' authentication")
api_key = auth_string[7:]
account_id, account_flags = get_account_id_for_api_key(app_state.database_context, api_key)
if account_id is None:
# We do not reveal if the account exists or the api key was invalid.
raise web.HTTPUnauthorized
channel_row = get_active_channel_for_account_id(app_state.database_context, account_id)
if channel_row is None or channel_row.channel_state != ChannelState.CONTRACT_OPEN:
raise web.HTTPBadRequest(reason=f"{APIErrors.PAYMENT_CHANNEL_INVALID}: "
"Channel invalid.")
# Request processing.
refund_value_text = request.query.get("refund_value")
if refund_value_text is None:
raise web.HTTPBadRequest(reason=f"{APIErrors.MISSING_QUERY_PARAM}: "
"Missing 'refund_value' query parameter.")
refund_value = int(refund_value_text)
refund_signature_bytes = b""
async for part_reader in await request.multipart():
if part_reader.name == "signature":
refund_signature_bytes = await part_reader.read(decode=True)
if Signature.analyze_encoding(refund_signature_bytes) == 0:
raise web.HTTPBadRequest(reason=f"{APIErrors.INVALID_MULTIPART_PAYLOAD}: "
"Invalid signature")
else:
part_name = part_reader.name or "?"
raise web.HTTPBadRequest(reason=f"{APIErrors.INVALID_MULTIPART_PAYLOAD}: "
f"Invalid '{part_name}' multipart")
if not refund_signature_bytes:
raise web.HTTPBadRequest(reason=f"{APIErrors.MISSING_MULTIPART_PAYLOAD}: "
"Missing the 'signature' multipart payload")
try:
new_refund_sequence = await process_contract_update_async(refund_signature_bytes,
refund_value, channel_row)
except BrokenChannelError as exc:
# These errors are ones that can only be made by someone who is intentionally
# messing with the server. They have to have done the signature correctly already
# in establishing the initial full refund contract.
await app_state.database_context.run_in_thread_async(deactivate_account, account_id,
AccountFlag.DISABLED_FLAGGED)
raise web.HTTPNotAcceptable(reason=f"{APIErrors.BROKEN_PAYMENT_CHANNEL}: {exc.args[0]}")
try:
await app_state.database_context.run_in_thread_async(update_payment_channel_contract,
channel_row.channel_id, refund_value, refund_signature_bytes, new_refund_sequence)
except DatabaseStateModifiedError:
raise web.HTTPBadRequest(reason=f"{APIErrors.CHANNEL_STATE_INCONSISTENCY}: "
"Channel state inconsistency")
# If this is the first time the client has given us a payment through the payment channel
# then we change their account from one that is mid creation to one that is registered.
if account_flags & AccountFlag.MID_CREATION:
try:
await app_state.database_context.run_in_thread_async(set_account_registered, account_id)
except DatabaseStateModifiedError:
raise web.HTTPBadRequest(reason=f"{APIErrors.ACCOUNT_STATE_INCONSISTENCY}: "
"Account inconsistency")
return web.Response()
async def post_account_funding(request: web.Request) -> web.Response:
"""
Receive the funding transaction from the client. It is expected that the client will have
broadcast the transaction before they give it to us, although this is not a requirement.
"""
app_state: ApplicationState = request.app['app_state']
auth_string = request.headers.get('Authorization', None)
if auth_string is None or not auth_string.startswith("Bearer "):
raise web.HTTPBadRequest(reason="No 'Bearer' authentication")
api_key = auth_string[7:]
account_id, _account_flags = get_account_id_for_api_key(app_state.database_context, api_key)
if account_id is None:
# We do not reveal if the account exists or the api key was invalid.
raise web.HTTPUnauthorized
channel_row = get_active_channel_for_account_id(app_state.database_context, account_id)
if channel_row is None or channel_row.channel_state != ChannelState.REFUND_ESTABLISHED:
raise web.HTTPBadRequest(reason=f"{APIErrors.PAYMENT_CHANNEL_INVALID}: "
"Channel invalid.")
funding_transaction_bytes = b""
async for part_reader in await request.multipart():
if part_reader.name == "transaction":
funding_transaction_bytes = await part_reader.read(decode=True)
else:
part_name = part_reader.name or "?"
raise web.HTTPBadRequest(reason=f"{APIErrors.INVALID_MULTIPART_PAYLOAD}: "
f"Invalid '{part_name}' multipart")
if not funding_transaction_bytes:
raise web.HTTPBadRequest(reason=f"{APIErrors.MISSING_MULTIPART_PAYLOAD}: "
"Missing the 'transaction' multipart payload")
try:
funding_output_script_bytes = await process_funding_transaction_async(
funding_transaction_bytes, channel_row)
except BrokenChannelError as exc:
await app_state.database_context.run_in_thread_async(set_payment_channel_closed,
channel_row.channel_id, ChannelState.CLOSED_INVALID_FUNDING_TRANSACTION)
raise web.HTTPNotAcceptable(reason=f"{APIErrors.BROKEN_PAYMENT_CHANNEL}: {exc.args[0]}")
try:
await mapi_broadcast_transaction(app_state.network, funding_transaction_bytes)
except (aiohttp.ClientError, networks.NetworkError) as exc:
await app_state.database_context.run_in_thread_async(set_payment_channel_closed,
channel_row.channel_id, ChannelState.CLOSED_BROADCASTING_FUNDING_TRANSACTION)
raise web.HTTPNotAcceptable(reason=f"{APIErrors.MAPI_BROADCAST_FAILURE}: {exc.args[0]}")
# TODO(utxo-spends) We should register for the spend of the funding output and react to it.
try:
await app_state.database_context.run_in_thread_async(
set_payment_channel_funding_transaction, channel_row.channel_id,
funding_transaction_bytes, funding_output_script_bytes)
except DatabaseStateModifiedError:
raise web.HTTPBadRequest(reason=f"{APIErrors.CHANNEL_STATE_INCONSISTENCY}: "
f"Channel state inconsistency")
return web.Response()
async def delete_account_channel(request: web.Request) -> web.Response:
"""
Close the payment channel for the client.
"""
app_state: ApplicationState = request.app['app_state']
auth_string = request.headers.get('Authorization', None)
if auth_string is None or not auth_string.startswith("Bearer "):
raise web.HTTPBadRequest(reason="No 'Bearer' authentication")
api_key = auth_string[7:]
account_id, _account_flags = get_account_id_for_api_key(app_state.database_context, api_key)
if account_id is None:
# We do not reveal if the account exists or the api key was invalid.
raise web.HTTPUnauthorized
channel_row = get_active_channel_for_account_id(app_state.database_context, account_id)
if channel_row is None or channel_row.channel_state != ChannelState.REFUND_ESTABLISHED:
raise web.HTTPBadRequest(reason=f"{APIErrors.PAYMENT_CHANNEL_INVALID}: "
f"Channel invalid.")
refund_value_str = request.query.get("refund_value")
if refund_value_str is None:
raise web.HTTPBadRequest(reason=f"{APIErrors.MISSING_QUERY_PARAM}: "
"Missing 'refund_value' parameter")
refund_value = int(refund_value_str)
refund_signature_bytes = b""
async for part_reader in await request.multipart():
if part_reader.name == "signature":
refund_signature_bytes = await part_reader.read(decode=True)
if Signature.analyze_encoding(refund_signature_bytes) == 0:
raise web.HTTPBadRequest(reason=f"{APIErrors.INVALID_MULTIPART_PAYLOAD}: "
"Invalid signature")
else:
part_name = part_reader.name or "?"
raise web.HTTPBadRequest(reason=f"{APIErrors.INVALID_MULTIPART_PAYLOAD}: "
f"Invalid '{part_name}' multipart")
if not refund_signature_bytes:
raise web.HTTPBadRequest(reason=f"{APIErrors.MISSING_MULTIPART_PAYLOAD}: "
f"Missing the 'signature' multipart payload")
account_metadata = await app_state.database_context.run_in_thread_async(
get_account_metadata_for_account_id, account_id)
if account_metadata is None:
raise web.HTTPUnauthorized
contract_transaction_bytes = await process_contract_close_async(refund_signature_bytes,
refund_value, app_state.server_keys, account_metadata, channel_row)
try:
await mapi_broadcast_transaction(app_state.network, contract_transaction_bytes)
except (aiohttp.ClientError, networks.NetworkError) as exc:
# TODO(critical-to-fix): What do we do when claiming the contact/broadcasting errors?
# - It could be because the fee was not high enough.
# - It could be because the transaction structure is invalid and we checked it wrong.
# - It could be because ???
raise web.HTTPNotAcceptable(reason=f"{APIErrors.MAPI_BROADCAST_FAILURE}: {exc.args[0]}")
return web.Response()
async def get_endpoints_data(request: web.Request) -> web.Response:
utc_now_datetime = datetime.utcnow()
utc_expiry_datetime = utc_now_datetime + timedelta(days=1)
data: Dict[str, Any] = {
"apiType": "bsvapi.endpoint",
"apiVersion": 1,
"baseUrl": f"http://{EXTERNAL_SERVER_HOST}:{EXTERNAL_SERVER_PORT}",
"timestamp": utc_now_datetime.isoformat() +"Z",
"expiryTime": utc_expiry_datetime.isoformat() +"Z",
"endpoints": [
{
"apiType": "bsvapi.account",
"apiVersion": 1,
"baseUrl": "/api/v1/account",
},
{
"apiType": "bsvapi.channel",
"apiVersion": 1,
"baseUrl": "/api/v1/channel"
},
{
"apiType": "bsvapi.websocket",
"apiVersion": 1,
"baseUrl": "/api/v1/web-socket"
}
]
}
if os.environ.get('EXPOSE_HEADER_SV_APIS'):
data['endpoints'].extend([
{
"apiType": "bsvapi.headers",
"apiVersion": 1,
"baseUrl": "/api/v1/headers",
},
{
"apiType": "bsvapi.network",
"apiVersion": 1,
"baseUrl": "/api/v1/network",
},
])
if os.environ.get('EXPOSE_INDEXER_APIS'):
data['endpoints'].extend([
{
"apiType": "bsvapi.transaction",
"apiVersion": 1,
"baseURL": "/api/v1/transaction",
},
{
"apiType": "bsvapi.merkle-proof",
"apiVersion": 1,
"baseURL": "/api/v1/merkle-proof",
},
{
"apiType": "bsvapi.output-spend",
"apiVersion": 1,
"baseURL": "/api/v1/output-spend",
},
{
"apiType": "bsvapi.restoration",
"apiVersion": 1,
"baseURL": "/api/v1/restoration",
"pricing": {
"data": {
"satoshis": 4524,
"bytes": 10000000,
}
}
}
])
return web.json_response(data=data)
|
en
| 0.951167
|
Copyright(c) 2021 Bitcoin Association. Distributed under the Open BSV software license, see the accompanying file LICENSE Two alternative forms of authentication. Either Bearer Token auth required # We do not reveal if the account exists or the key data was invalid. # We do not reveal if the account does not exist/is disabled or the key data was invalid. # This should never happen but we error if it does. Start the payment channel funding process by generating a payment key for the given client. If the client does not have an account this is part of the process of creating their account. If the client does have an account they must not have an active payment channel. There is no asynchronicity within this handler so it should be safe from any race conditions by any client submitting multiple requests to it. Error responses: 400 / bad request Invalid API key type or no body with client key data. 401 / unauthorized An API key was provided and it was invalid or the client key data was not valid. 409 / conflict There is an existing active payment channel. # We do not reveal if the account exists or the key data was invalid. # We do not reveal if the account exists or the key data was invalid. # This is a user with an account in the process of being created, and the required # action is that they fund it. If they request a fresh payment key they are # resetting the funding process. # This should be an active user who is opening a new payment channel and does not # have an active one. # Ensure all paths that reach here have set an index to use. Accept the initial version of the contract from the client. The initial version of the contract acts as insurance for the client in the form of being a complete refund. # We do not reveal if the account exists or the api key was invalid. # Request processing. Accept a contract amendment from the client. This is a decreased refund to themselves and an increased payment to us. # We do not reveal if the account exists or the api key was invalid. # Request processing. # These errors are ones that can only be made by someone who is intentionally # messing with the server. They have to have done the signature correctly already # in establishing the initial full refund contract. # If this is the first time the client has given us a payment through the payment channel # then we change their account from one that is mid creation to one that is registered. Receive the funding transaction from the client. It is expected that the client will have broadcast the transaction before they give it to us, although this is not a requirement. # We do not reveal if the account exists or the api key was invalid. # TODO(utxo-spends) We should register for the spend of the funding output and react to it. Close the payment channel for the client. # We do not reveal if the account exists or the api key was invalid. # TODO(critical-to-fix): What do we do when claiming the contact/broadcasting errors? # - It could be because the fee was not high enough. # - It could be because the transaction structure is invalid and we checked it wrong. # - It could be because ???
| 1.79228
| 2
|
species/analysis/photometry.py
|
tomasstolker/SPECIES
| 0
|
6627846
|
"""
Module with functionalities for calculating synthetic photometry.
"""
import os
import math
import warnings
import configparser
from typing import Optional, Union, Tuple, List
import h5py
import numpy as np
from typeguard import typechecked
from species.data import database
from species.read import read_filter, read_calibration
from species.util import phot_util
class SyntheticPhotometry:
"""
Class for calculating synthetic photometry from a spectrum and also for conversion between
magnitudes and fluxes. Note that depending on the detector type (energy- or photon-counting)
the integral for the filter-weighted flux contains an additional wavelength factor.
"""
@typechecked
def __init__(self, filter_name: str) -> None:
"""
Parameters
----------
filter_name : str
Filter name as listed in the database. Filters from the SVO Filter Profile Service are
automatically downloaded and added to the database.
Returns
-------
NoneType
None
"""
self.filter_name = filter_name
self.filter_interp = None
self.wavel_range = None
self.vega_mag = 0.03 # (mag)
config_file = os.path.join(os.getcwd(), "species_config.ini")
config = configparser.ConfigParser()
config.read(config_file)
self.database = config["species"]["database"]
read_filt = read_filter.ReadFilter(self.filter_name)
self.det_type = read_filt.detector_type()
@typechecked
def zero_point(self) -> np.float64:
"""
Internal function for calculating the zero point of the provided ``filter_name``.
Returns
-------
float
Zero-point flux (W m-2 um-1).
"""
if self.wavel_range is None:
transmission = read_filter.ReadFilter(self.filter_name)
self.wavel_range = transmission.wavelength_range()
h5_file = h5py.File(self.database, "r")
try:
h5_file["spectra/calibration/vega"]
except KeyError:
h5_file.close()
species_db = database.Database()
species_db.add_spectra("vega")
h5_file = h5py.File(self.database, "r")
readcalib = read_calibration.ReadCalibration("vega", None)
calibbox = readcalib.get_spectrum()
wavelength = calibbox.wavelength
flux = calibbox.flux
wavelength_crop = wavelength[
(wavelength > self.wavel_range[0]) & (wavelength < self.wavel_range[1])
]
flux_crop = flux[
(wavelength > self.wavel_range[0]) & (wavelength < self.wavel_range[1])
]
h5_file.close()
return self.spectrum_to_flux(wavelength_crop, flux_crop)[0]
@typechecked
def spectrum_to_flux(
self,
wavelength: np.ndarray,
flux: np.ndarray,
error: Optional[np.ndarray] = None,
threshold: Optional[float] = 0.05,
) -> Tuple[
Union[np.float32, np.float64], Union[Optional[np.float32], Optional[np.float64]]
]:
"""
Function for calculating the average flux from a spectrum and a filter profile. The error
is propagated by sampling 200 random values from the error distributions.
Parameters
----------
wavelength : np.ndarray
Wavelength points (um).
flux : np.ndarray
Flux (W m-2 um-1).
error : np.ndarray, None
Uncertainty (W m-2 um-1). Not used if set to ``None``.
threshold : float, None
Transmission threshold (value between 0 and 1). If the minimum transmission value is
larger than the threshold, a NaN is returned. This will happen if the input spectrum
does not cover the full wavelength range of the filter profile. Not used if set to
``None``.
Returns
-------
float
Average flux (W m-2 um-1).
float, None
Uncertainty (W m-2 um-1).
"""
if error is not None:
# The error calculation requires the original spectrum because spectrum_to_flux is used
wavel_error = wavelength.copy()
flux_error = flux.copy()
if self.filter_interp is None:
transmission = read_filter.ReadFilter(self.filter_name)
self.filter_interp = transmission.interpolate_filter()
if self.wavel_range is None:
self.wavel_range = transmission.wavelength_range()
if wavelength.size == 0:
raise ValueError(
f"Calculation of the mean flux for {self.filter_name} is not "
f"possible because the wavelength array is empty."
)
indices = np.where(
(self.wavel_range[0] <= wavelength) & (wavelength <= self.wavel_range[1])
)[0]
if indices.size < 2:
syn_flux = np.nan
warnings.warn(
"Calculating a synthetic flux requires more than one wavelength "
"point. Photometry is set to NaN."
)
else:
if threshold is None and (
wavelength[0] > self.wavel_range[0]
or wavelength[-1] < self.wavel_range[1]
):
warnings.warn(
f"The filter profile of {self.filter_name} "
f"({self.wavel_range[0]:.4f}-{self.wavel_range[1]:.4f}) extends "
f"beyond the wavelength range of the spectrum ({wavelength[0]:.4f} "
f"-{wavelength[-1]:.4f}). The flux is set to NaN. Setting the "
f"'threshold' parameter will loosen the wavelength constraints."
)
syn_flux = np.nan
else:
wavelength = wavelength[indices]
flux = flux[indices]
transmission = self.filter_interp(wavelength)
if (
threshold is not None
and (transmission[0] > threshold or transmission[-1] > threshold)
and (
wavelength[0] < self.wavel_range[0]
or wavelength[-1] > self.wavel_range[-1]
)
):
warnings.warn(
f"The filter profile of {self.filter_name} "
f"({self.wavel_range[0]:.4f}-{self.wavel_range[1]:.4f}) "
f"extends beyond the wavelength range of the spectrum "
f"({wavelength[0]:.4f}-{wavelength[-1]:.4f}). The flux "
f"is set to NaN. Increasing the 'threshold' parameter "
f"({threshold}) will loosen the wavelength constraint."
)
syn_flux = np.nan
else:
indices = np.isnan(transmission)
indices = np.logical_not(indices)
if self.det_type == "energy":
# Energy counting detector
integrand1 = transmission[indices] * flux[indices]
integrand2 = transmission[indices]
elif self.det_type == "photon":
# Photon counting detector
integrand1 = (
wavelength[indices] * transmission[indices] * flux[indices]
)
integrand2 = wavelength[indices] * transmission[indices]
integral1 = np.trapz(integrand1, wavelength[indices])
integral2 = np.trapz(integrand2, wavelength[indices])
syn_flux = integral1 / integral2
if error is not None and not np.any(np.isnan(error)):
phot_random = np.zeros(200)
for i in range(200):
# Use the original spectrum size (i.e. wavel_error and flux_error)
spec_random = (
flux_error
+ np.random.normal(loc=0.0, scale=1.0, size=wavel_error.shape[0])
* error
)
phot_random[i] = self.spectrum_to_flux(
wavel_error, spec_random, error=None, threshold=threshold
)[0]
error_flux = np.std(phot_random)
elif error is not None and np.any(np.isnan(error)):
warnings.warn("Spectum contains NaN so can not calculate the error.")
error_flux = None
else:
error_flux = None
return syn_flux, error_flux
@typechecked
def spectrum_to_magnitude(
self,
wavelength: np.ndarray,
flux: np.ndarray,
error: Optional[Union[np.ndarray, List[np.ndarray]]] = None,
distance: Optional[Tuple[float, Optional[float]]] = None,
threshold: Optional[float] = 0.05,
) -> Tuple[
Tuple[float, Optional[float]], Optional[Tuple[Optional[float], Optional[float]]]
]:
"""
Function for calculating the apparent and absolute magnitude from a spectrum and a
filter profile. The error is propagated by sampling 200 random values from the error
distributions.
Parameters
----------
wavelength : np.ndarray
Wavelength points (um).
flux : np.ndarray
Flux (W m-2 um-1).
error : np.ndarray, list(np.ndarray), None
Uncertainty (W m-2 um-1).
distance : tuple(float, float), None
Distance and uncertainty (pc). No absolute magnitude is calculated if set to ``None``.
No error on the absolute magnitude is calculated if the uncertainty is set to ``None``.
threshold : float, None
Transmission threshold (value between 0 and 1). If the minimum transmission value is
larger than the threshold, a NaN is returned. This will happen if the input spectrum
does not cover the full wavelength range of the filter profile. Not used if set to
``None``.
Returns
-------
tuple(float, float)
Apparent magnitude and uncertainty.
tuple(float, float)
Absolute magnitude and uncertainty.
"""
zp_flux = self.zero_point()
syn_flux = self.spectrum_to_flux(
wavelength, flux, error=error, threshold=threshold
)
app_mag = self.vega_mag - 2.5 * math.log10(syn_flux[0] / zp_flux)
if error is not None and not np.any(np.isnan(error)):
mag_random = np.zeros(200)
for i in range(200):
spec_random = (
flux
+ np.random.normal(loc=0.0, scale=1.0, size=wavelength.shape[0])
* error
)
flux_random = self.spectrum_to_flux(
wavelength, spec_random, error=None, threshold=threshold
)
mag_random[i] = self.vega_mag - 2.5 * np.log10(flux_random[0] / zp_flux)
error_app_mag = np.std(mag_random)
elif error is not None and np.any(np.isnan(error)):
warnings.warn("Spectum contains NaN so can not calculate the error.")
error_app_mag = None
else:
error_app_mag = None
if distance is None:
abs_mag = None
error_abs_mag = None
else:
abs_mag = app_mag - 5.0 * np.log10(distance[0]) + 5.0
if error_app_mag is not None and distance[1] is not None:
error_dist = distance[1] * (5.0 / (distance[0] * math.log(10.0)))
error_abs_mag = math.sqrt(error_app_mag ** 2 + error_dist ** 2)
else:
error_abs_mag = None
return (app_mag, error_app_mag), (abs_mag, error_abs_mag)
@typechecked
def magnitude_to_flux(
self,
magnitude: float,
error: Optional[float] = None,
zp_flux: Optional[float] = None,
) -> Tuple[np.float64, np.float64]:
"""
Function for converting a magnitude to a flux.
Parameters
----------
magnitude : float
Magnitude.
error : float, None
Error on the magnitude. Not used if set to ``None``.
zp_flux : float, None
Zero-point flux (W m-2 um-1). The value is calculated if set to ``None``.
Returns
-------
float
Flux (W m-2 um-1).
float
Error (W m-2 um-1).
"""
if zp_flux is None:
zp_flux = self.zero_point()
flux = 10.0 ** (-0.4 * (magnitude - self.vega_mag)) * zp_flux
if error is None:
error_flux = None
else:
error_upper = flux * (10.0 ** (0.4 * error) - 1.0)
error_lower = flux * (1.0 - 10.0 ** (-0.4 * error))
error_flux = (error_lower + error_upper) / 2.0
return flux, error_flux
@typechecked
def flux_to_magnitude(
self,
flux: float,
error: Optional[Union[float, np.ndarray]] = None,
distance: Optional[
Union[
Tuple[float, Optional[float]], Tuple[np.ndarray, Optional[np.ndarray]]
]
] = None,
) -> Tuple[
Union[Tuple[float, Optional[float]], Tuple[np.ndarray, Optional[np.ndarray]]],
Union[Tuple[float, Optional[float]], Tuple[np.ndarray, Optional[np.ndarray]]],
]:
"""
Function for converting a flux into a magnitude.
Parameters
----------
flux : float, np.ndarray
Flux (W m-2 um-1).
error : float, np.ndarray, None
Uncertainty (W m-2 um-1). Not used if set to None.
distance : tuple(float, float), tuple(np.ndarray, np.ndarray)
Distance and uncertainty (pc). The returned absolute magnitude is set to None in case
``distance`` is set to None. The error is not propagated into the error on the absolute
magnitude in case the distance uncertainty is set to None, for example
``distance=(20., None)``
Returns
-------
tuple(float, float), tuple(np.ndarray, np.ndarray)
Apparent magnitude and uncertainty.
tuple(float, float), tuple(np.ndarray, np.ndarray)
Absolute magnitude and uncertainty.
"""
zp_flux = self.zero_point()
app_mag = self.vega_mag - 2.5 * np.log10(flux / zp_flux)
if error is None:
error_app_mag = None
error_abs_mag = None
else:
error_app_lower = app_mag - (
self.vega_mag - 2.5 * np.log10((flux + error) / zp_flux)
)
error_app_upper = (
self.vega_mag - 2.5 * np.log10((flux - error) / zp_flux)
) - app_mag
error_app_mag = (error_app_lower + error_app_upper) / 2.0
if distance is None:
abs_mag = None
error_abs_mag = None
else:
abs_mag, error_abs_mag = phot_util.apparent_to_absolute(
(app_mag, error_app_mag), distance
)
return (app_mag, error_app_mag), (abs_mag, error_abs_mag)
|
"""
Module with functionalities for calculating synthetic photometry.
"""
import os
import math
import warnings
import configparser
from typing import Optional, Union, Tuple, List
import h5py
import numpy as np
from typeguard import typechecked
from species.data import database
from species.read import read_filter, read_calibration
from species.util import phot_util
class SyntheticPhotometry:
"""
Class for calculating synthetic photometry from a spectrum and also for conversion between
magnitudes and fluxes. Note that depending on the detector type (energy- or photon-counting)
the integral for the filter-weighted flux contains an additional wavelength factor.
"""
@typechecked
def __init__(self, filter_name: str) -> None:
"""
Parameters
----------
filter_name : str
Filter name as listed in the database. Filters from the SVO Filter Profile Service are
automatically downloaded and added to the database.
Returns
-------
NoneType
None
"""
self.filter_name = filter_name
self.filter_interp = None
self.wavel_range = None
self.vega_mag = 0.03 # (mag)
config_file = os.path.join(os.getcwd(), "species_config.ini")
config = configparser.ConfigParser()
config.read(config_file)
self.database = config["species"]["database"]
read_filt = read_filter.ReadFilter(self.filter_name)
self.det_type = read_filt.detector_type()
@typechecked
def zero_point(self) -> np.float64:
"""
Internal function for calculating the zero point of the provided ``filter_name``.
Returns
-------
float
Zero-point flux (W m-2 um-1).
"""
if self.wavel_range is None:
transmission = read_filter.ReadFilter(self.filter_name)
self.wavel_range = transmission.wavelength_range()
h5_file = h5py.File(self.database, "r")
try:
h5_file["spectra/calibration/vega"]
except KeyError:
h5_file.close()
species_db = database.Database()
species_db.add_spectra("vega")
h5_file = h5py.File(self.database, "r")
readcalib = read_calibration.ReadCalibration("vega", None)
calibbox = readcalib.get_spectrum()
wavelength = calibbox.wavelength
flux = calibbox.flux
wavelength_crop = wavelength[
(wavelength > self.wavel_range[0]) & (wavelength < self.wavel_range[1])
]
flux_crop = flux[
(wavelength > self.wavel_range[0]) & (wavelength < self.wavel_range[1])
]
h5_file.close()
return self.spectrum_to_flux(wavelength_crop, flux_crop)[0]
@typechecked
def spectrum_to_flux(
self,
wavelength: np.ndarray,
flux: np.ndarray,
error: Optional[np.ndarray] = None,
threshold: Optional[float] = 0.05,
) -> Tuple[
Union[np.float32, np.float64], Union[Optional[np.float32], Optional[np.float64]]
]:
"""
Function for calculating the average flux from a spectrum and a filter profile. The error
is propagated by sampling 200 random values from the error distributions.
Parameters
----------
wavelength : np.ndarray
Wavelength points (um).
flux : np.ndarray
Flux (W m-2 um-1).
error : np.ndarray, None
Uncertainty (W m-2 um-1). Not used if set to ``None``.
threshold : float, None
Transmission threshold (value between 0 and 1). If the minimum transmission value is
larger than the threshold, a NaN is returned. This will happen if the input spectrum
does not cover the full wavelength range of the filter profile. Not used if set to
``None``.
Returns
-------
float
Average flux (W m-2 um-1).
float, None
Uncertainty (W m-2 um-1).
"""
if error is not None:
# The error calculation requires the original spectrum because spectrum_to_flux is used
wavel_error = wavelength.copy()
flux_error = flux.copy()
if self.filter_interp is None:
transmission = read_filter.ReadFilter(self.filter_name)
self.filter_interp = transmission.interpolate_filter()
if self.wavel_range is None:
self.wavel_range = transmission.wavelength_range()
if wavelength.size == 0:
raise ValueError(
f"Calculation of the mean flux for {self.filter_name} is not "
f"possible because the wavelength array is empty."
)
indices = np.where(
(self.wavel_range[0] <= wavelength) & (wavelength <= self.wavel_range[1])
)[0]
if indices.size < 2:
syn_flux = np.nan
warnings.warn(
"Calculating a synthetic flux requires more than one wavelength "
"point. Photometry is set to NaN."
)
else:
if threshold is None and (
wavelength[0] > self.wavel_range[0]
or wavelength[-1] < self.wavel_range[1]
):
warnings.warn(
f"The filter profile of {self.filter_name} "
f"({self.wavel_range[0]:.4f}-{self.wavel_range[1]:.4f}) extends "
f"beyond the wavelength range of the spectrum ({wavelength[0]:.4f} "
f"-{wavelength[-1]:.4f}). The flux is set to NaN. Setting the "
f"'threshold' parameter will loosen the wavelength constraints."
)
syn_flux = np.nan
else:
wavelength = wavelength[indices]
flux = flux[indices]
transmission = self.filter_interp(wavelength)
if (
threshold is not None
and (transmission[0] > threshold or transmission[-1] > threshold)
and (
wavelength[0] < self.wavel_range[0]
or wavelength[-1] > self.wavel_range[-1]
)
):
warnings.warn(
f"The filter profile of {self.filter_name} "
f"({self.wavel_range[0]:.4f}-{self.wavel_range[1]:.4f}) "
f"extends beyond the wavelength range of the spectrum "
f"({wavelength[0]:.4f}-{wavelength[-1]:.4f}). The flux "
f"is set to NaN. Increasing the 'threshold' parameter "
f"({threshold}) will loosen the wavelength constraint."
)
syn_flux = np.nan
else:
indices = np.isnan(transmission)
indices = np.logical_not(indices)
if self.det_type == "energy":
# Energy counting detector
integrand1 = transmission[indices] * flux[indices]
integrand2 = transmission[indices]
elif self.det_type == "photon":
# Photon counting detector
integrand1 = (
wavelength[indices] * transmission[indices] * flux[indices]
)
integrand2 = wavelength[indices] * transmission[indices]
integral1 = np.trapz(integrand1, wavelength[indices])
integral2 = np.trapz(integrand2, wavelength[indices])
syn_flux = integral1 / integral2
if error is not None and not np.any(np.isnan(error)):
phot_random = np.zeros(200)
for i in range(200):
# Use the original spectrum size (i.e. wavel_error and flux_error)
spec_random = (
flux_error
+ np.random.normal(loc=0.0, scale=1.0, size=wavel_error.shape[0])
* error
)
phot_random[i] = self.spectrum_to_flux(
wavel_error, spec_random, error=None, threshold=threshold
)[0]
error_flux = np.std(phot_random)
elif error is not None and np.any(np.isnan(error)):
warnings.warn("Spectum contains NaN so can not calculate the error.")
error_flux = None
else:
error_flux = None
return syn_flux, error_flux
@typechecked
def spectrum_to_magnitude(
self,
wavelength: np.ndarray,
flux: np.ndarray,
error: Optional[Union[np.ndarray, List[np.ndarray]]] = None,
distance: Optional[Tuple[float, Optional[float]]] = None,
threshold: Optional[float] = 0.05,
) -> Tuple[
Tuple[float, Optional[float]], Optional[Tuple[Optional[float], Optional[float]]]
]:
"""
Function for calculating the apparent and absolute magnitude from a spectrum and a
filter profile. The error is propagated by sampling 200 random values from the error
distributions.
Parameters
----------
wavelength : np.ndarray
Wavelength points (um).
flux : np.ndarray
Flux (W m-2 um-1).
error : np.ndarray, list(np.ndarray), None
Uncertainty (W m-2 um-1).
distance : tuple(float, float), None
Distance and uncertainty (pc). No absolute magnitude is calculated if set to ``None``.
No error on the absolute magnitude is calculated if the uncertainty is set to ``None``.
threshold : float, None
Transmission threshold (value between 0 and 1). If the minimum transmission value is
larger than the threshold, a NaN is returned. This will happen if the input spectrum
does not cover the full wavelength range of the filter profile. Not used if set to
``None``.
Returns
-------
tuple(float, float)
Apparent magnitude and uncertainty.
tuple(float, float)
Absolute magnitude and uncertainty.
"""
zp_flux = self.zero_point()
syn_flux = self.spectrum_to_flux(
wavelength, flux, error=error, threshold=threshold
)
app_mag = self.vega_mag - 2.5 * math.log10(syn_flux[0] / zp_flux)
if error is not None and not np.any(np.isnan(error)):
mag_random = np.zeros(200)
for i in range(200):
spec_random = (
flux
+ np.random.normal(loc=0.0, scale=1.0, size=wavelength.shape[0])
* error
)
flux_random = self.spectrum_to_flux(
wavelength, spec_random, error=None, threshold=threshold
)
mag_random[i] = self.vega_mag - 2.5 * np.log10(flux_random[0] / zp_flux)
error_app_mag = np.std(mag_random)
elif error is not None and np.any(np.isnan(error)):
warnings.warn("Spectum contains NaN so can not calculate the error.")
error_app_mag = None
else:
error_app_mag = None
if distance is None:
abs_mag = None
error_abs_mag = None
else:
abs_mag = app_mag - 5.0 * np.log10(distance[0]) + 5.0
if error_app_mag is not None and distance[1] is not None:
error_dist = distance[1] * (5.0 / (distance[0] * math.log(10.0)))
error_abs_mag = math.sqrt(error_app_mag ** 2 + error_dist ** 2)
else:
error_abs_mag = None
return (app_mag, error_app_mag), (abs_mag, error_abs_mag)
@typechecked
def magnitude_to_flux(
self,
magnitude: float,
error: Optional[float] = None,
zp_flux: Optional[float] = None,
) -> Tuple[np.float64, np.float64]:
"""
Function for converting a magnitude to a flux.
Parameters
----------
magnitude : float
Magnitude.
error : float, None
Error on the magnitude. Not used if set to ``None``.
zp_flux : float, None
Zero-point flux (W m-2 um-1). The value is calculated if set to ``None``.
Returns
-------
float
Flux (W m-2 um-1).
float
Error (W m-2 um-1).
"""
if zp_flux is None:
zp_flux = self.zero_point()
flux = 10.0 ** (-0.4 * (magnitude - self.vega_mag)) * zp_flux
if error is None:
error_flux = None
else:
error_upper = flux * (10.0 ** (0.4 * error) - 1.0)
error_lower = flux * (1.0 - 10.0 ** (-0.4 * error))
error_flux = (error_lower + error_upper) / 2.0
return flux, error_flux
@typechecked
def flux_to_magnitude(
self,
flux: float,
error: Optional[Union[float, np.ndarray]] = None,
distance: Optional[
Union[
Tuple[float, Optional[float]], Tuple[np.ndarray, Optional[np.ndarray]]
]
] = None,
) -> Tuple[
Union[Tuple[float, Optional[float]], Tuple[np.ndarray, Optional[np.ndarray]]],
Union[Tuple[float, Optional[float]], Tuple[np.ndarray, Optional[np.ndarray]]],
]:
"""
Function for converting a flux into a magnitude.
Parameters
----------
flux : float, np.ndarray
Flux (W m-2 um-1).
error : float, np.ndarray, None
Uncertainty (W m-2 um-1). Not used if set to None.
distance : tuple(float, float), tuple(np.ndarray, np.ndarray)
Distance and uncertainty (pc). The returned absolute magnitude is set to None in case
``distance`` is set to None. The error is not propagated into the error on the absolute
magnitude in case the distance uncertainty is set to None, for example
``distance=(20., None)``
Returns
-------
tuple(float, float), tuple(np.ndarray, np.ndarray)
Apparent magnitude and uncertainty.
tuple(float, float), tuple(np.ndarray, np.ndarray)
Absolute magnitude and uncertainty.
"""
zp_flux = self.zero_point()
app_mag = self.vega_mag - 2.5 * np.log10(flux / zp_flux)
if error is None:
error_app_mag = None
error_abs_mag = None
else:
error_app_lower = app_mag - (
self.vega_mag - 2.5 * np.log10((flux + error) / zp_flux)
)
error_app_upper = (
self.vega_mag - 2.5 * np.log10((flux - error) / zp_flux)
) - app_mag
error_app_mag = (error_app_lower + error_app_upper) / 2.0
if distance is None:
abs_mag = None
error_abs_mag = None
else:
abs_mag, error_abs_mag = phot_util.apparent_to_absolute(
(app_mag, error_app_mag), distance
)
return (app_mag, error_app_mag), (abs_mag, error_abs_mag)
|
en
| 0.701992
|
Module with functionalities for calculating synthetic photometry. Class for calculating synthetic photometry from a spectrum and also for conversion between magnitudes and fluxes. Note that depending on the detector type (energy- or photon-counting) the integral for the filter-weighted flux contains an additional wavelength factor. Parameters ---------- filter_name : str Filter name as listed in the database. Filters from the SVO Filter Profile Service are automatically downloaded and added to the database. Returns ------- NoneType None # (mag) Internal function for calculating the zero point of the provided ``filter_name``. Returns ------- float Zero-point flux (W m-2 um-1). Function for calculating the average flux from a spectrum and a filter profile. The error is propagated by sampling 200 random values from the error distributions. Parameters ---------- wavelength : np.ndarray Wavelength points (um). flux : np.ndarray Flux (W m-2 um-1). error : np.ndarray, None Uncertainty (W m-2 um-1). Not used if set to ``None``. threshold : float, None Transmission threshold (value between 0 and 1). If the minimum transmission value is larger than the threshold, a NaN is returned. This will happen if the input spectrum does not cover the full wavelength range of the filter profile. Not used if set to ``None``. Returns ------- float Average flux (W m-2 um-1). float, None Uncertainty (W m-2 um-1). # The error calculation requires the original spectrum because spectrum_to_flux is used # Energy counting detector # Photon counting detector # Use the original spectrum size (i.e. wavel_error and flux_error) Function for calculating the apparent and absolute magnitude from a spectrum and a filter profile. The error is propagated by sampling 200 random values from the error distributions. Parameters ---------- wavelength : np.ndarray Wavelength points (um). flux : np.ndarray Flux (W m-2 um-1). error : np.ndarray, list(np.ndarray), None Uncertainty (W m-2 um-1). distance : tuple(float, float), None Distance and uncertainty (pc). No absolute magnitude is calculated if set to ``None``. No error on the absolute magnitude is calculated if the uncertainty is set to ``None``. threshold : float, None Transmission threshold (value between 0 and 1). If the minimum transmission value is larger than the threshold, a NaN is returned. This will happen if the input spectrum does not cover the full wavelength range of the filter profile. Not used if set to ``None``. Returns ------- tuple(float, float) Apparent magnitude and uncertainty. tuple(float, float) Absolute magnitude and uncertainty. Function for converting a magnitude to a flux. Parameters ---------- magnitude : float Magnitude. error : float, None Error on the magnitude. Not used if set to ``None``. zp_flux : float, None Zero-point flux (W m-2 um-1). The value is calculated if set to ``None``. Returns ------- float Flux (W m-2 um-1). float Error (W m-2 um-1). Function for converting a flux into a magnitude. Parameters ---------- flux : float, np.ndarray Flux (W m-2 um-1). error : float, np.ndarray, None Uncertainty (W m-2 um-1). Not used if set to None. distance : tuple(float, float), tuple(np.ndarray, np.ndarray) Distance and uncertainty (pc). The returned absolute magnitude is set to None in case ``distance`` is set to None. The error is not propagated into the error on the absolute magnitude in case the distance uncertainty is set to None, for example ``distance=(20., None)`` Returns ------- tuple(float, float), tuple(np.ndarray, np.ndarray) Apparent magnitude and uncertainty. tuple(float, float), tuple(np.ndarray, np.ndarray) Absolute magnitude and uncertainty.
| 2.543254
| 3
|
create_thumbnail.py
|
Marmita-de-Redon/automation
| 1
|
6627847
|
<reponame>Marmita-de-Redon/automation
import sys
import textwrap
from PIL import Image, ImageDraw, ImageFont
center_x = 1500
center_y = 1700
color = (92,198,255) #5cc6ff
def multiline_title(title):
splitted = textwrap.wrap(title, width=12)
return "\n".join(splitted)
def main():
if len(sys.argv) < 5:
print("usage: %s <source_image> <dest_image> <dest_image_small> <text>")
exit(2)
image_source = sys.argv[1]
image_dest = sys.argv[2]
small_image_dest = sys.argv[3]
text = multiline_title(sys.argv[4])
img = Image.open(image_source)
img_rgb = img.convert('RGB')
draw = ImageDraw.Draw(img_rgb)
# font = ImageFont.truetype(<font-file>, <font-size>)
font = ImageFont.truetype("font.ttf", 300)
w, h = draw.textsize(text, font=font)
position = (center_x-w/2, center_y-h/2)
draw.text(position, text, align="center", fill=color, font=font)
img_rgb = img_rgb.resize((1024,1024), resample=Image.BICUBIC)
img_rgb.save(image_dest)
small_img = img_rgb.resize((480,480), resample=Image.BICUBIC)
small_img.save(small_image_dest)
if __name__ == "__main__":
main()
|
import sys
import textwrap
from PIL import Image, ImageDraw, ImageFont
center_x = 1500
center_y = 1700
color = (92,198,255) #5cc6ff
def multiline_title(title):
splitted = textwrap.wrap(title, width=12)
return "\n".join(splitted)
def main():
if len(sys.argv) < 5:
print("usage: %s <source_image> <dest_image> <dest_image_small> <text>")
exit(2)
image_source = sys.argv[1]
image_dest = sys.argv[2]
small_image_dest = sys.argv[3]
text = multiline_title(sys.argv[4])
img = Image.open(image_source)
img_rgb = img.convert('RGB')
draw = ImageDraw.Draw(img_rgb)
# font = ImageFont.truetype(<font-file>, <font-size>)
font = ImageFont.truetype("font.ttf", 300)
w, h = draw.textsize(text, font=font)
position = (center_x-w/2, center_y-h/2)
draw.text(position, text, align="center", fill=color, font=font)
img_rgb = img_rgb.resize((1024,1024), resample=Image.BICUBIC)
img_rgb.save(image_dest)
small_img = img_rgb.resize((480,480), resample=Image.BICUBIC)
small_img.save(small_image_dest)
if __name__ == "__main__":
main()
|
en
| 0.080308
|
#5cc6ff # font = ImageFont.truetype(<font-file>, <font-size>)
| 3.025158
| 3
|
report_building/parse_medication.py
|
Team-Asesor/Asesor
| 0
|
6627848
|
<reponame>Team-Asesor/Asesor<gh_stars>0
import pandas as pd
from collections import defaultdict
from collections import OrderedDict
import matplotlib.pyplot as plt
import random
import re
import ast
from nltk.corpus import wordnet as wn
import spacy
from spacy import displacy
from collections import Counter
import en_core_web_sm
nlp = spacy.load("en")
print(nlp)
df_medications=pd.read_excel("sample_medication.xlsx")
medication_list=df_medications["text"].tolist()
medication_list_med=df_medications["medication"].tolist()
dosage_list=[]
timing_list=[]
purpose_list=[]
for i in range(len(medication_list)):
u=medication_list[i].strip()
doc = nlp(u)
dosages=[]
timings=[]
medications=[]
purposes=[]
dosage_indicators=["every", "each", "per", "once", "twice", "thrice", "times"]
for ent in doc.ents:
#print(ent.text, ent.label_)
if ent.label_ == 'DATE':
tex=ent.text.lower()
if any(sub in tex for sub in dosage_indicators): dosages.append(tex)
else: timings.append(tex)
for chunk in doc.noun_chunks:
#print("text: " + chunk.text)
#print("label: " + chunk.label_)
#print("root: " + chunk.root.text)
if chunk.root.text == 'DATE':
tex=chunk.text.lower()
if any(sub in tex for sub in dosage_indicators): dosages.append(tex)
else: timings.append(tex)
else:
word_a=chunk.root.text
for token in doc:
word_b=token.text.lower()
if word_a!=word_b:
continue
lem=token.lemma_.lower()
#print(lem, token.pos_)
if token.pos_=="NOUN" and token.tag_=="NN":
s_lower= chunk.text.strip().lower()
if 'purpose' not in s_lower and 'medication' not in s_lower and 'dosage' not in s_lower and 'timing' not in s_lower:medications.append(chunk.text)
s_lower= chunk.text.strip().lower()
if 'purpose' not in s_lower and 'medication' not in s_lower and 'dosage' not in s_lower and 'timing' not in s_lower:purposes.append(chunk.text)
parts=u.split()
possible=["every", "once", "twice", "thrice"]
for index, part in enumerate(parts):
if (part=="times" or part=="time") and index>0:
new_dosage=parts[index-1]+" "+parts[index]
possible.append(new_dosage)
u=u.lower()
possible_prefix="|".join(possible)
possible_prefix="("+possible_prefix+")"
m = re.findall(possible_prefix+'(.*?)(\\.|day|week|month|year)', u)
if m!=None:
for m_elem in m:
t=m_elem[0]+m_elem[1]+m_elem[2]
dosages.append(t)
dosages=list(set(dosages))
'''
m = re.findall('(take|taking|took|taken|takes|taken)(.*?)(\\.|for|since)', u)
if m!=None:
for m_elem in m:
t=m_elem[1]
medications.append(t)
'''
medications=list(set(medications))
m = re.findall('(take|taking|took|taken|takes|taken|use|using|used|uses)(.+?)(for)(.+?)(\\.)', u)
if m!=None:
for m_elem in m:
#print(m_elem)
t=m_elem[3]
s=m_elem[1]
t_parts=t.split()
s_parts=s.split()
if len(t_parts)<=2:
purposes.append(t.strip().lower())
if len(s_parts)<=2:
medications.append(s.strip().lower())
purposes=list(set(purposes))
medications=list(set(medications))
m = re.findall('(medication |medication:|medications |medications:)(.+?)(,|\\.|and)', u)
if m!=None:
for m_elem in m:
s=m_elem[1]
if len(s_parts)<=2:
s_lower= s.strip().lower()
medications.append(s_lower)
medications=list(set(medications))
m = re.findall('(purpose |problem |purpose:|purposes |purposes:)(.+?)(,|\\.|and)', u)
if m!=None:
for m_elem in m:
s=m_elem[1]
if len(s_parts)<=2:
s_lower= s.strip().lower()
purposes.append(s_lower)
purposes=list(set(purposes))
m = re.findall('(timing |timing:|timings |timings:)(.+?)', u)
if m!=None:
for m_elem in m:
s=m_elem[1]
if len(s_parts)<=2:
s_lower=s.strip().lower()
if s_lower!="":
timings.append(s_lower)
timings=list(set(timings))
m = re.findall('(dosage |dosage:|dosages |dosages:)(.+?)', u)
if m!=None:
for m_elem in m:
s=m_elem[1]
if len(s_parts)<=2:
s_lower=s.strip().lower()
if s_lower!="":
dosages.append(s_lower)
dosages=list(set(dosages))
print("response: ")
print(doc)
print("medications: ")
print(medications)
print("purpose: ")
print(purposes)
print("timing: ")
print(timings)
print("dosages: ")
print(dosages)
#df_medications=pd.read_excel("sample_medication.xlsx")
#medication_list=df_medications["text"].tolist()
#medication_list_med=df_medications["medication"].tolist()
dosage_list.append(dosages)
timing_list.append(timings)
purpose_list.append(purposes)
dic_res={
"text":medication_list,
"medications": medication_list_med,
"purposes": purpose_list,
"dosages": dosage_list,
"timings": timing_list
}
df_res=pd.DataFrame(dic_res)
df_res.to_csv('parsed_medication.csv')
|
import pandas as pd
from collections import defaultdict
from collections import OrderedDict
import matplotlib.pyplot as plt
import random
import re
import ast
from nltk.corpus import wordnet as wn
import spacy
from spacy import displacy
from collections import Counter
import en_core_web_sm
nlp = spacy.load("en")
print(nlp)
df_medications=pd.read_excel("sample_medication.xlsx")
medication_list=df_medications["text"].tolist()
medication_list_med=df_medications["medication"].tolist()
dosage_list=[]
timing_list=[]
purpose_list=[]
for i in range(len(medication_list)):
u=medication_list[i].strip()
doc = nlp(u)
dosages=[]
timings=[]
medications=[]
purposes=[]
dosage_indicators=["every", "each", "per", "once", "twice", "thrice", "times"]
for ent in doc.ents:
#print(ent.text, ent.label_)
if ent.label_ == 'DATE':
tex=ent.text.lower()
if any(sub in tex for sub in dosage_indicators): dosages.append(tex)
else: timings.append(tex)
for chunk in doc.noun_chunks:
#print("text: " + chunk.text)
#print("label: " + chunk.label_)
#print("root: " + chunk.root.text)
if chunk.root.text == 'DATE':
tex=chunk.text.lower()
if any(sub in tex for sub in dosage_indicators): dosages.append(tex)
else: timings.append(tex)
else:
word_a=chunk.root.text
for token in doc:
word_b=token.text.lower()
if word_a!=word_b:
continue
lem=token.lemma_.lower()
#print(lem, token.pos_)
if token.pos_=="NOUN" and token.tag_=="NN":
s_lower= chunk.text.strip().lower()
if 'purpose' not in s_lower and 'medication' not in s_lower and 'dosage' not in s_lower and 'timing' not in s_lower:medications.append(chunk.text)
s_lower= chunk.text.strip().lower()
if 'purpose' not in s_lower and 'medication' not in s_lower and 'dosage' not in s_lower and 'timing' not in s_lower:purposes.append(chunk.text)
parts=u.split()
possible=["every", "once", "twice", "thrice"]
for index, part in enumerate(parts):
if (part=="times" or part=="time") and index>0:
new_dosage=parts[index-1]+" "+parts[index]
possible.append(new_dosage)
u=u.lower()
possible_prefix="|".join(possible)
possible_prefix="("+possible_prefix+")"
m = re.findall(possible_prefix+'(.*?)(\\.|day|week|month|year)', u)
if m!=None:
for m_elem in m:
t=m_elem[0]+m_elem[1]+m_elem[2]
dosages.append(t)
dosages=list(set(dosages))
'''
m = re.findall('(take|taking|took|taken|takes|taken)(.*?)(\\.|for|since)', u)
if m!=None:
for m_elem in m:
t=m_elem[1]
medications.append(t)
'''
medications=list(set(medications))
m = re.findall('(take|taking|took|taken|takes|taken|use|using|used|uses)(.+?)(for)(.+?)(\\.)', u)
if m!=None:
for m_elem in m:
#print(m_elem)
t=m_elem[3]
s=m_elem[1]
t_parts=t.split()
s_parts=s.split()
if len(t_parts)<=2:
purposes.append(t.strip().lower())
if len(s_parts)<=2:
medications.append(s.strip().lower())
purposes=list(set(purposes))
medications=list(set(medications))
m = re.findall('(medication |medication:|medications |medications:)(.+?)(,|\\.|and)', u)
if m!=None:
for m_elem in m:
s=m_elem[1]
if len(s_parts)<=2:
s_lower= s.strip().lower()
medications.append(s_lower)
medications=list(set(medications))
m = re.findall('(purpose |problem |purpose:|purposes |purposes:)(.+?)(,|\\.|and)', u)
if m!=None:
for m_elem in m:
s=m_elem[1]
if len(s_parts)<=2:
s_lower= s.strip().lower()
purposes.append(s_lower)
purposes=list(set(purposes))
m = re.findall('(timing |timing:|timings |timings:)(.+?)', u)
if m!=None:
for m_elem in m:
s=m_elem[1]
if len(s_parts)<=2:
s_lower=s.strip().lower()
if s_lower!="":
timings.append(s_lower)
timings=list(set(timings))
m = re.findall('(dosage |dosage:|dosages |dosages:)(.+?)', u)
if m!=None:
for m_elem in m:
s=m_elem[1]
if len(s_parts)<=2:
s_lower=s.strip().lower()
if s_lower!="":
dosages.append(s_lower)
dosages=list(set(dosages))
print("response: ")
print(doc)
print("medications: ")
print(medications)
print("purpose: ")
print(purposes)
print("timing: ")
print(timings)
print("dosages: ")
print(dosages)
#df_medications=pd.read_excel("sample_medication.xlsx")
#medication_list=df_medications["text"].tolist()
#medication_list_med=df_medications["medication"].tolist()
dosage_list.append(dosages)
timing_list.append(timings)
purpose_list.append(purposes)
dic_res={
"text":medication_list,
"medications": medication_list_med,
"purposes": purpose_list,
"dosages": dosage_list,
"timings": timing_list
}
df_res=pd.DataFrame(dic_res)
df_res.to_csv('parsed_medication.csv')
|
en
| 0.252194
|
#print(ent.text, ent.label_) #print("text: " + chunk.text) #print("label: " + chunk.label_) #print("root: " + chunk.root.text) #print(lem, token.pos_) m = re.findall('(take|taking|took|taken|takes|taken)(.*?)(\\.|for|since)', u) if m!=None: for m_elem in m: t=m_elem[1] medications.append(t) #print(m_elem) #df_medications=pd.read_excel("sample_medication.xlsx") #medication_list=df_medications["text"].tolist() #medication_list_med=df_medications["medication"].tolist()
| 2.627264
| 3
|
library/ptpulse/ledmatrix.py
|
Helenous/Pi-top-Pulse
| 0
|
6627849
|
# ledmatrix.py (pi-topPULSE)
# Copyright (C) 2017 CEED ltd.
#
from ptcommon.logger import PTLogger
from copy import deepcopy
from math import ceil
from math import radians
from math import sin
from math import cos
from math import sin
from os import path
from serial import serialutil
from serial import Serial
import signal
from sys import exit
from time import sleep
from threading import Timer
# local
from ptpulse import configuration
_initialised = False
_w = 7
_h = 7
_rotation = 0
_brightness = 1.0
_max_freq = 50 # Maximum update speed is 50 times per second
_update_rate = 0.1
_running = False
_show_enabled = True
_gamma_correction_arr = [
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 2, 2, 2, 2, 2, 2, 2,
2, 3, 3, 3, 3, 3, 3, 3,
4, 4, 4, 4, 4, 5, 5, 5,
5, 6, 6, 6, 6, 7, 7, 7,
7, 8, 8, 8, 9, 9, 9, 10,
10, 10, 11, 11, 11, 12, 12, 13,
13, 13, 14, 14, 15, 15, 16, 16,
17, 17, 18, 18, 19, 19, 20, 20,
21, 21, 22, 22, 23, 24, 24, 25,
25, 26, 27, 27, 28, 29, 29, 30,
31, 32, 32, 33, 34, 35, 35, 36,
37, 38, 39, 39, 40, 41, 42, 43,
44, 45, 46, 47, 48, 49, 50, 50,
51, 52, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 66, 67, 68,
69, 70, 72, 73, 74, 75, 77, 78,
79, 81, 82, 83, 85, 86, 87, 89,
90, 92, 93, 95, 96, 98, 99, 101,
102, 104, 105, 107, 109, 110, 112, 114,
115, 117, 119, 120, 122, 124, 126, 127,
129, 131, 133, 135, 137, 138, 140, 142,
144, 146, 148, 150, 152, 154, 156, 158,
160, 162, 164, 167, 169, 171, 173, 175,
177, 180, 182, 184, 186, 189, 191, 193,
196, 198, 200, 203, 205, 208, 210, 213,
215, 218, 220, 223, 225, 228, 231, 233,
236, 239, 241, 244, 247, 249, 252, 255
]
_sync = bytearray(
[
7,
127,
127,
127,
127,
127,
127,
127,
127,
127,
127,
127,
127,
127,
127,
127,
127
]
)
_empty = [0, 0, 0]
_empty_map = [
[_empty, _empty, _empty, _empty, _empty, _empty, _empty],
[_empty, _empty, _empty, _empty, _empty, _empty, _empty],
[_empty, _empty, _empty, _empty, _empty, _empty, _empty],
[_empty, _empty, _empty, _empty, _empty, _empty, _empty],
[_empty, _empty, _empty, _empty, _empty, _empty, _empty],
[_empty, _empty, _empty, _empty, _empty, _empty, _empty],
[_empty, _empty, _empty, _empty, _empty, _empty, _empty]
]
_pixel_map = deepcopy(_empty_map)
#######################
# INTERNAL OPERATIONS #
#######################
def _initialise():
"""INTERNAL. Initialise the matrix."""
global _initialised
global _serial_device
global _pixel_map
if not _initialised:
if configuration.mcu_enabled():
if not path.exists('/dev/serial0'):
err_str = "Could not find serial port - are you sure it's enabled?"
raise serialutil.SerialException(err_str)
PTLogger.debug("Opening serial port...")
_serial_device = Serial("/dev/serial0", baudrate=250000, timeout=2)
if _serial_device.isOpen():
PTLogger.debug("OK.")
else:
PTLogger.info("Error: Failed to open serial port!")
exit()
_initialised = True
else:
PTLogger.info("Error: pi-topPULSE is not initialised. Call ptpulse.configuration.initialise() ptpulse.configuration.enable_device()")
exit()
def _signal_handler(signal, frame):
"""INTERNAL. Handles signals from the OS to exit."""
PTLogger.info("\nQuitting...")
stop()
off()
exit(0)
def _get_avg_colour():
"""INTERNAL. Get the average color of the matrix."""
total_rgb = [0, 0, 0]
avg_rgb = [0, 0, 0]
counter = 0
for x in range(_w):
for y in range(_h):
for c in range(3):
total_rgb[c] = total_rgb[c] + _pixel_map[x][y][c]
for i, val in enumerate(total_rgb):
avg_rgb[i] = int(round(val / (_w * _h)))
return avg_rgb
def _write(data):
"""INTERNAL. Write data to the matrix."""
PTLogger.debug('{s0:<4}{s1:<4}{s2:<4}{s3:<4}{s4:<4}{s5:<4}{s6:<4}{s7:<4}{s8:<4}{s9:<4}{s10:<4}'.format(s0=data[0], s1=data[1], s2=data[2], s3=data[3], s4=data[4], s5=data[5], s6=data[6], s7=data[7], s8=data[8], s9=data[9], s10=data[10]))
_serial_device.write(data)
sleep(0.002)
def _get_gamma_corrected_value(original_value):
"""INTERNAL. Converts a brightness value from 0-255
to the value that produces an approximately linear
scaling to the human eye."""
return _gamma_correction_arr[original_value]
def _scale_pixel_to_brightness(original_value):
"""INTERNAL. Multiplies intended brightness of
a pixel by brightness scaling factor to generate
an adjusted value."""
unrounded_new_brightness = original_value * _brightness
rounded_new_brightness = round(unrounded_new_brightness)
int_new_brightness = int(rounded_new_brightness)
return int_new_brightness
def _get_rotated_pixel_map():
"""INTERNAL. Get a rotated copy of the current in-memory pixel map."""
rotated_pixel_map = deepcopy(_pixel_map)
# Some fancy maths to rotate pixel map so that
# 0,0 (x,y) - with rotation 0 - is the bottom left LED
scaled_rotation = int(_rotation / 90)
adjusted_scaled_rotation = (scaled_rotation + 1)
modulo_adjusted_scaled_rotation = (adjusted_scaled_rotation % 4)
count = (6 - modulo_adjusted_scaled_rotation) % 4
for x in range(count):
rotated_pixel_map = list(zip(*rotated_pixel_map[::-1]))
return rotated_pixel_map
def _brightness_correct(original_value):
"""INTERNAL. Correct a single color for brightness."""
brightness_scaled = _scale_pixel_to_brightness(original_value)
new_value = _get_gamma_corrected_value(brightness_scaled)
return new_value
def _adjust_r_g_b_for_brightness_correction(r, g, b):
"""INTERNAL. Correct LED for brightness."""
r = _brightness_correct(r)
g = _brightness_correct(g)
b = _brightness_correct(b)
return r, g, b
def _sync_with_device():
"""INTERNAL. Send the sync frame to tell the device that LED
data is expected."""
_initialise()
PTLogger.debug("Sync data:")
_write(_sync)
def _rgb_to_bytes_to_send(rgb):
"""INTERNAL. Format the LED data in the device-specific layout."""
# Create three 5-bit colour vals, splitting the green bits
# into two parts (hardware spec):
# |XX|G0|G1|R0|R1|R2|R3|R4|
# |G2|G3|G4|B0|B1|B2|B3|B4|
r = rgb[0]
g = rgb[1]
b = rgb[2]
byte0 = (r >> 3) & 0x1F
byte1 = (b >> 3) & 0x1F
grnb0 = (g >> 1) & 0x60
grnb1 = (g << 2) & 0xE0
byte0 = (byte0 | grnb0) & 0xFF
byte1 = (byte1 | grnb1) & 0xFF
return byte0, byte1
def _timer_method():
"""INTERNAL. Run by the timer on each tick."""
global _running
global _update_rate
while _running:
show()
sleep(_update_rate)
def _flip(direction):
"""INTERNAL. Flip the pixel map."""
global _pixel_map
flipped_pixel_map = deepcopy(_pixel_map)
for x in range(_w):
for y in range(_h):
if direction is "h":
flipped_pixel_map[x][y] = _pixel_map[(_w - 1) - x][y]
elif direction is "v":
flipped_pixel_map[x][y] = _pixel_map[x][(_h - 1) - y]
else:
err = 'Flip direction must be [h]orizontal or [v]ertical only'
raise ValueError(err)
_pixel_map = flipped_pixel_map
def _set_show_state(enabled):
"""INTERNAL."""
global _show_enabled
_show_enabled = enabled
if not _show_enabled:
_temp_disable_t.start()
def _enable_show_state():
"""INTERNAL."""
_set_show_state(True)
def _disable_show_state():
"""INTERNAL."""
_set_show_state(True)
#######################
# EXTERNAL OPERATIONS #
#######################
def set_debug_print_state(debug_enable):
"""Enable/disable debug prints"""
global _debug
_debug = debug_enable
def brightness(new_brightness):
"""Set the display brightness between 0.0 and 1.0.
:param new_brightness: Brightness from 0.0 to 1.0 (default 1.0)"""
global _brightness
if new_brightness > 1 or new_brightness < 0:
raise ValueError('Brightness level must be between 0 and 1')
_brightness = new_brightness
def get_brightness():
"""Get the display brightness value. Returns a float between 0.0 and 1.0."""
return _brightness
def rotation(new_rotation=0):
"""Set the display rotation.
:param new_rotation: Specify the rotation in degrees: 0, 90, 180 or 270"""
global _rotation
if new_rotation in [0, 90, 180, 270]:
_rotation = new_rotation
return True
else:
raise ValueError('Rotation: 0, 90, 180 or 270 degrees only')
def flip_h():
"""Flips the grid horizontally."""
_flip("h")
def flip_v():
"""Flips the grid vertically."""
_flip("v")
def get_shape():
"""Returns the shape (width, height) of the display."""
return (_w, _h)
def get_pixel(x, y):
"""Get the RGB value of a single pixel.
:param x: Horizontal position from 0 to 7
:param y: Veritcal position from 0 to 7"""
global _pixel_map
return _pixel_map[y][x]
def set_pixel(x, y, r, g, b):
"""Set a single pixel to RGB colour.
:param x: Horizontal position from 0 to 7
:param y: Veritcal position from 0 to 7
:param r: Amount of red from 0 to 255
:param g: Amount of green from 0 to 255
:param b: Amount of blue from 0 to 255"""
global _pixel_map
new_r, new_g, new_b = _adjust_r_g_b_for_brightness_correction(r, g, b)
_pixel_map[y][x] = [new_r, new_g, new_b]
def set_all(r, g, b):
"""Set all pixels to a specific colour."""
global _pixel_map
for x in range(_w):
for y in range(_h):
new_r, new_g, new_b = _adjust_r_g_b_for_brightness_correction(r, g, b)
_pixel_map[x][y][0] = new_r
_pixel_map[x][y][1] = new_g
_pixel_map[x][y][2] = new_b
def show():
"""Update pi-topPULSE with the contents of the display buffer."""
global _pixel_map
global _rotation
global _show_enabled
wait_counter = 0
attempt_to_show_early = not _show_enabled
if attempt_to_show_early:
PTLogger.info("Can't update pi-topPULSE LEDs more than 50/s. Waiting...")
pause_length = 0.001
# Scale wait time to _max_freq
wait_counter_length = ceil(float(1 / float(_max_freq * pause_length)))
while not _show_enabled:
if wait_counter >= wait_counter_length:
# Timer hasn't reset for some reason - force override
_enable_show_state()
break
else:
sleep(pause_length)
wait_counter = wait_counter + 1
if attempt_to_show_early:
PTLogger.debug("pi-topPULSE LEDs re-enabled.")
_sync_with_device()
rotated_pixel_map = _get_rotated_pixel_map()
avg_rgb = _get_avg_colour()
_initialise()
PTLogger.debug("LED data:")
# For each col
for x in range(_w):
# Write col to LED matrix
# Start with col no., so LED matrix knows which one it belongs to
pixel_map_buffer = chr(x)
# Get col's frame buffer, iterating over each pixel
for y in range(_h + 1):
if y == _h:
# Ambient lighting bytes
byte0, byte1 = _rgb_to_bytes_to_send(avg_rgb)
else:
byte0, byte1 = _rgb_to_bytes_to_send(rotated_pixel_map[x][y])
pixel_map_buffer += chr(byte0)
pixel_map_buffer += chr(byte1)
# Write col to LED matrix
arr = bytearray(pixel_map_buffer, 'Latin_1')
_write(arr)
# Prevent another write if it's too fast
_disable_show_state()
def clear():
"""Clear the buffer."""
global _pixel_map
_pixel_map = deepcopy(_empty_map)
def off():
"""Clear the buffer and immediately update pi-topPULSE."""
clear()
show()
def run_tests():
"""Runs a series of tests to check the LED board is working as expected."""
off()
# ------------------------------
# Pixels
# ------------------------------
counter = 0
for r in range(4):
rotation(90 * r)
for x in range(_w):
for y in range(_h):
rad = radians((float(counter) / (4 * _w * _h)) * 360)
r = int((sin(rad) * 127) + 127)
g = int((cos(rad) * 127) + 127)
b = 255 - int((sin(rad) * 127) + 127)
set_pixel(x, y, r, g, b)
show()
sleep(0.05)
counter = counter + 1
off()
sleep(0.2)
# ------------------------------
# Rows and rotation
# ------------------------------
for r in range(4):
rotation(90 * r)
for c in range(3):
for x in range(_w):
for y in range(_h):
set_pixel(x, y, 255 if c == 0 else 0, 255 if c == 1 else 0, 255 if c == 2 else 0)
show()
sleep(0.05)
off()
sleep(0.2)
# ------------------------------
# Brightness
# ------------------------------
for b in range(100):
brightness(float(b) / 100)
set_all(255, 255, 255)
show()
sleep(0.01)
for b in range(100):
brightness(1 - (float(b) / 100))
set_all(255, 255, 255)
show()
sleep(0.01)
off()
brightness(1.0)
sleep(0.2)
# ------------------------------
# Flipping
# ------------------------------
for x in range(int(_w / 2)):
for y in range(int(_h / 2)):
set_pixel(x, y, 255, 255, 255)
set_pixel(int(_w / 4), int(_h / 4), 0, 255, 0)
show()
sleep(0.5)
for f in range(4):
for x in range(2):
if x == 0:
flip_h()
else:
flip_v()
show()
sleep(0.5)
off()
sleep(0.2)
# ------------------------------
# Conway - auto refresh
# ------------------------------
start(0.1)
life_map = [[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
for r in range(40):
temp_map = deepcopy(life_map)
for x in range(_w):
for y in range(_h):
current_cell = temp_map[x][y]
neighbours = 0
neighbours = neighbours + temp_map[(x - 1) % _w][(y - 1) % _h]
neighbours = neighbours + temp_map[(x - 1) % _w][(y - 0) % _h]
neighbours = neighbours + temp_map[(x - 1) % _w][(y + 1) % _h]
neighbours = neighbours + temp_map[(x - 0) % _w][(y - 1) % _h]
neighbours = neighbours + temp_map[(x - 0) % _w][(y + 1) % _h]
neighbours = neighbours + temp_map[(x + 1) % _w][(y - 1) % _h]
neighbours = neighbours + temp_map[(x + 1) % _w][(y - 0) % _h]
neighbours = neighbours + temp_map[(x + 1) % _w][(y + 1) % _h]
if current_cell == 1 and (neighbours < 2 or neighbours > 3):
life_map[x][y] = 0
if (current_cell == 0 and neighbours == 3):
life_map[x][y] = 1
for x in range(_w):
for y in range(_h):
if (life_map[x][y] == 1):
set_pixel(x, y, 255, 255, 0)
else:
set_pixel(x, y, 0, 128, 0)
sleep(0.1)
stop()
off()
def start(new_update_rate=0.1):
"""Starts a timer to automatically refresh the LEDs."""
global _update_rate
global _running
global _auto_refresh_timer
if new_update_rate < (1 / _max_freq):
_update_rate = (1 / _max_freq)
else:
_update_rate = new_update_rate
_running = True
_auto_refresh_timer.start()
def stop():
"""Stops the timer that automatically refreshes the LEDs."""
global _running
global _auto_refresh_timer
_running = False
_auto_refresh_timer.cancel()
##################
# INITIALISATION #
##################
_signal = signal.signal(signal.SIGINT, _signal_handler)
_auto_refresh_timer = Timer(_update_rate, _timer_method)
_temp_disable_t = Timer(_max_freq, _enable_show_state)
clear()
|
# ledmatrix.py (pi-topPULSE)
# Copyright (C) 2017 CEED ltd.
#
from ptcommon.logger import PTLogger
from copy import deepcopy
from math import ceil
from math import radians
from math import sin
from math import cos
from math import sin
from os import path
from serial import serialutil
from serial import Serial
import signal
from sys import exit
from time import sleep
from threading import Timer
# local
from ptpulse import configuration
_initialised = False
_w = 7
_h = 7
_rotation = 0
_brightness = 1.0
_max_freq = 50 # Maximum update speed is 50 times per second
_update_rate = 0.1
_running = False
_show_enabled = True
_gamma_correction_arr = [
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 2, 2, 2, 2, 2, 2, 2,
2, 3, 3, 3, 3, 3, 3, 3,
4, 4, 4, 4, 4, 5, 5, 5,
5, 6, 6, 6, 6, 7, 7, 7,
7, 8, 8, 8, 9, 9, 9, 10,
10, 10, 11, 11, 11, 12, 12, 13,
13, 13, 14, 14, 15, 15, 16, 16,
17, 17, 18, 18, 19, 19, 20, 20,
21, 21, 22, 22, 23, 24, 24, 25,
25, 26, 27, 27, 28, 29, 29, 30,
31, 32, 32, 33, 34, 35, 35, 36,
37, 38, 39, 39, 40, 41, 42, 43,
44, 45, 46, 47, 48, 49, 50, 50,
51, 52, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 66, 67, 68,
69, 70, 72, 73, 74, 75, 77, 78,
79, 81, 82, 83, 85, 86, 87, 89,
90, 92, 93, 95, 96, 98, 99, 101,
102, 104, 105, 107, 109, 110, 112, 114,
115, 117, 119, 120, 122, 124, 126, 127,
129, 131, 133, 135, 137, 138, 140, 142,
144, 146, 148, 150, 152, 154, 156, 158,
160, 162, 164, 167, 169, 171, 173, 175,
177, 180, 182, 184, 186, 189, 191, 193,
196, 198, 200, 203, 205, 208, 210, 213,
215, 218, 220, 223, 225, 228, 231, 233,
236, 239, 241, 244, 247, 249, 252, 255
]
_sync = bytearray(
[
7,
127,
127,
127,
127,
127,
127,
127,
127,
127,
127,
127,
127,
127,
127,
127,
127
]
)
_empty = [0, 0, 0]
_empty_map = [
[_empty, _empty, _empty, _empty, _empty, _empty, _empty],
[_empty, _empty, _empty, _empty, _empty, _empty, _empty],
[_empty, _empty, _empty, _empty, _empty, _empty, _empty],
[_empty, _empty, _empty, _empty, _empty, _empty, _empty],
[_empty, _empty, _empty, _empty, _empty, _empty, _empty],
[_empty, _empty, _empty, _empty, _empty, _empty, _empty],
[_empty, _empty, _empty, _empty, _empty, _empty, _empty]
]
_pixel_map = deepcopy(_empty_map)
#######################
# INTERNAL OPERATIONS #
#######################
def _initialise():
"""INTERNAL. Initialise the matrix."""
global _initialised
global _serial_device
global _pixel_map
if not _initialised:
if configuration.mcu_enabled():
if not path.exists('/dev/serial0'):
err_str = "Could not find serial port - are you sure it's enabled?"
raise serialutil.SerialException(err_str)
PTLogger.debug("Opening serial port...")
_serial_device = Serial("/dev/serial0", baudrate=250000, timeout=2)
if _serial_device.isOpen():
PTLogger.debug("OK.")
else:
PTLogger.info("Error: Failed to open serial port!")
exit()
_initialised = True
else:
PTLogger.info("Error: pi-topPULSE is not initialised. Call ptpulse.configuration.initialise() ptpulse.configuration.enable_device()")
exit()
def _signal_handler(signal, frame):
"""INTERNAL. Handles signals from the OS to exit."""
PTLogger.info("\nQuitting...")
stop()
off()
exit(0)
def _get_avg_colour():
"""INTERNAL. Get the average color of the matrix."""
total_rgb = [0, 0, 0]
avg_rgb = [0, 0, 0]
counter = 0
for x in range(_w):
for y in range(_h):
for c in range(3):
total_rgb[c] = total_rgb[c] + _pixel_map[x][y][c]
for i, val in enumerate(total_rgb):
avg_rgb[i] = int(round(val / (_w * _h)))
return avg_rgb
def _write(data):
"""INTERNAL. Write data to the matrix."""
PTLogger.debug('{s0:<4}{s1:<4}{s2:<4}{s3:<4}{s4:<4}{s5:<4}{s6:<4}{s7:<4}{s8:<4}{s9:<4}{s10:<4}'.format(s0=data[0], s1=data[1], s2=data[2], s3=data[3], s4=data[4], s5=data[5], s6=data[6], s7=data[7], s8=data[8], s9=data[9], s10=data[10]))
_serial_device.write(data)
sleep(0.002)
def _get_gamma_corrected_value(original_value):
"""INTERNAL. Converts a brightness value from 0-255
to the value that produces an approximately linear
scaling to the human eye."""
return _gamma_correction_arr[original_value]
def _scale_pixel_to_brightness(original_value):
"""INTERNAL. Multiplies intended brightness of
a pixel by brightness scaling factor to generate
an adjusted value."""
unrounded_new_brightness = original_value * _brightness
rounded_new_brightness = round(unrounded_new_brightness)
int_new_brightness = int(rounded_new_brightness)
return int_new_brightness
def _get_rotated_pixel_map():
"""INTERNAL. Get a rotated copy of the current in-memory pixel map."""
rotated_pixel_map = deepcopy(_pixel_map)
# Some fancy maths to rotate pixel map so that
# 0,0 (x,y) - with rotation 0 - is the bottom left LED
scaled_rotation = int(_rotation / 90)
adjusted_scaled_rotation = (scaled_rotation + 1)
modulo_adjusted_scaled_rotation = (adjusted_scaled_rotation % 4)
count = (6 - modulo_adjusted_scaled_rotation) % 4
for x in range(count):
rotated_pixel_map = list(zip(*rotated_pixel_map[::-1]))
return rotated_pixel_map
def _brightness_correct(original_value):
"""INTERNAL. Correct a single color for brightness."""
brightness_scaled = _scale_pixel_to_brightness(original_value)
new_value = _get_gamma_corrected_value(brightness_scaled)
return new_value
def _adjust_r_g_b_for_brightness_correction(r, g, b):
"""INTERNAL. Correct LED for brightness."""
r = _brightness_correct(r)
g = _brightness_correct(g)
b = _brightness_correct(b)
return r, g, b
def _sync_with_device():
"""INTERNAL. Send the sync frame to tell the device that LED
data is expected."""
_initialise()
PTLogger.debug("Sync data:")
_write(_sync)
def _rgb_to_bytes_to_send(rgb):
"""INTERNAL. Format the LED data in the device-specific layout."""
# Create three 5-bit colour vals, splitting the green bits
# into two parts (hardware spec):
# |XX|G0|G1|R0|R1|R2|R3|R4|
# |G2|G3|G4|B0|B1|B2|B3|B4|
r = rgb[0]
g = rgb[1]
b = rgb[2]
byte0 = (r >> 3) & 0x1F
byte1 = (b >> 3) & 0x1F
grnb0 = (g >> 1) & 0x60
grnb1 = (g << 2) & 0xE0
byte0 = (byte0 | grnb0) & 0xFF
byte1 = (byte1 | grnb1) & 0xFF
return byte0, byte1
def _timer_method():
"""INTERNAL. Run by the timer on each tick."""
global _running
global _update_rate
while _running:
show()
sleep(_update_rate)
def _flip(direction):
"""INTERNAL. Flip the pixel map."""
global _pixel_map
flipped_pixel_map = deepcopy(_pixel_map)
for x in range(_w):
for y in range(_h):
if direction is "h":
flipped_pixel_map[x][y] = _pixel_map[(_w - 1) - x][y]
elif direction is "v":
flipped_pixel_map[x][y] = _pixel_map[x][(_h - 1) - y]
else:
err = 'Flip direction must be [h]orizontal or [v]ertical only'
raise ValueError(err)
_pixel_map = flipped_pixel_map
def _set_show_state(enabled):
"""INTERNAL."""
global _show_enabled
_show_enabled = enabled
if not _show_enabled:
_temp_disable_t.start()
def _enable_show_state():
"""INTERNAL."""
_set_show_state(True)
def _disable_show_state():
"""INTERNAL."""
_set_show_state(True)
#######################
# EXTERNAL OPERATIONS #
#######################
def set_debug_print_state(debug_enable):
"""Enable/disable debug prints"""
global _debug
_debug = debug_enable
def brightness(new_brightness):
"""Set the display brightness between 0.0 and 1.0.
:param new_brightness: Brightness from 0.0 to 1.0 (default 1.0)"""
global _brightness
if new_brightness > 1 or new_brightness < 0:
raise ValueError('Brightness level must be between 0 and 1')
_brightness = new_brightness
def get_brightness():
"""Get the display brightness value. Returns a float between 0.0 and 1.0."""
return _brightness
def rotation(new_rotation=0):
"""Set the display rotation.
:param new_rotation: Specify the rotation in degrees: 0, 90, 180 or 270"""
global _rotation
if new_rotation in [0, 90, 180, 270]:
_rotation = new_rotation
return True
else:
raise ValueError('Rotation: 0, 90, 180 or 270 degrees only')
def flip_h():
"""Flips the grid horizontally."""
_flip("h")
def flip_v():
"""Flips the grid vertically."""
_flip("v")
def get_shape():
"""Returns the shape (width, height) of the display."""
return (_w, _h)
def get_pixel(x, y):
"""Get the RGB value of a single pixel.
:param x: Horizontal position from 0 to 7
:param y: Veritcal position from 0 to 7"""
global _pixel_map
return _pixel_map[y][x]
def set_pixel(x, y, r, g, b):
"""Set a single pixel to RGB colour.
:param x: Horizontal position from 0 to 7
:param y: Veritcal position from 0 to 7
:param r: Amount of red from 0 to 255
:param g: Amount of green from 0 to 255
:param b: Amount of blue from 0 to 255"""
global _pixel_map
new_r, new_g, new_b = _adjust_r_g_b_for_brightness_correction(r, g, b)
_pixel_map[y][x] = [new_r, new_g, new_b]
def set_all(r, g, b):
"""Set all pixels to a specific colour."""
global _pixel_map
for x in range(_w):
for y in range(_h):
new_r, new_g, new_b = _adjust_r_g_b_for_brightness_correction(r, g, b)
_pixel_map[x][y][0] = new_r
_pixel_map[x][y][1] = new_g
_pixel_map[x][y][2] = new_b
def show():
"""Update pi-topPULSE with the contents of the display buffer."""
global _pixel_map
global _rotation
global _show_enabled
wait_counter = 0
attempt_to_show_early = not _show_enabled
if attempt_to_show_early:
PTLogger.info("Can't update pi-topPULSE LEDs more than 50/s. Waiting...")
pause_length = 0.001
# Scale wait time to _max_freq
wait_counter_length = ceil(float(1 / float(_max_freq * pause_length)))
while not _show_enabled:
if wait_counter >= wait_counter_length:
# Timer hasn't reset for some reason - force override
_enable_show_state()
break
else:
sleep(pause_length)
wait_counter = wait_counter + 1
if attempt_to_show_early:
PTLogger.debug("pi-topPULSE LEDs re-enabled.")
_sync_with_device()
rotated_pixel_map = _get_rotated_pixel_map()
avg_rgb = _get_avg_colour()
_initialise()
PTLogger.debug("LED data:")
# For each col
for x in range(_w):
# Write col to LED matrix
# Start with col no., so LED matrix knows which one it belongs to
pixel_map_buffer = chr(x)
# Get col's frame buffer, iterating over each pixel
for y in range(_h + 1):
if y == _h:
# Ambient lighting bytes
byte0, byte1 = _rgb_to_bytes_to_send(avg_rgb)
else:
byte0, byte1 = _rgb_to_bytes_to_send(rotated_pixel_map[x][y])
pixel_map_buffer += chr(byte0)
pixel_map_buffer += chr(byte1)
# Write col to LED matrix
arr = bytearray(pixel_map_buffer, 'Latin_1')
_write(arr)
# Prevent another write if it's too fast
_disable_show_state()
def clear():
"""Clear the buffer."""
global _pixel_map
_pixel_map = deepcopy(_empty_map)
def off():
"""Clear the buffer and immediately update pi-topPULSE."""
clear()
show()
def run_tests():
"""Runs a series of tests to check the LED board is working as expected."""
off()
# ------------------------------
# Pixels
# ------------------------------
counter = 0
for r in range(4):
rotation(90 * r)
for x in range(_w):
for y in range(_h):
rad = radians((float(counter) / (4 * _w * _h)) * 360)
r = int((sin(rad) * 127) + 127)
g = int((cos(rad) * 127) + 127)
b = 255 - int((sin(rad) * 127) + 127)
set_pixel(x, y, r, g, b)
show()
sleep(0.05)
counter = counter + 1
off()
sleep(0.2)
# ------------------------------
# Rows and rotation
# ------------------------------
for r in range(4):
rotation(90 * r)
for c in range(3):
for x in range(_w):
for y in range(_h):
set_pixel(x, y, 255 if c == 0 else 0, 255 if c == 1 else 0, 255 if c == 2 else 0)
show()
sleep(0.05)
off()
sleep(0.2)
# ------------------------------
# Brightness
# ------------------------------
for b in range(100):
brightness(float(b) / 100)
set_all(255, 255, 255)
show()
sleep(0.01)
for b in range(100):
brightness(1 - (float(b) / 100))
set_all(255, 255, 255)
show()
sleep(0.01)
off()
brightness(1.0)
sleep(0.2)
# ------------------------------
# Flipping
# ------------------------------
for x in range(int(_w / 2)):
for y in range(int(_h / 2)):
set_pixel(x, y, 255, 255, 255)
set_pixel(int(_w / 4), int(_h / 4), 0, 255, 0)
show()
sleep(0.5)
for f in range(4):
for x in range(2):
if x == 0:
flip_h()
else:
flip_v()
show()
sleep(0.5)
off()
sleep(0.2)
# ------------------------------
# Conway - auto refresh
# ------------------------------
start(0.1)
life_map = [[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
for r in range(40):
temp_map = deepcopy(life_map)
for x in range(_w):
for y in range(_h):
current_cell = temp_map[x][y]
neighbours = 0
neighbours = neighbours + temp_map[(x - 1) % _w][(y - 1) % _h]
neighbours = neighbours + temp_map[(x - 1) % _w][(y - 0) % _h]
neighbours = neighbours + temp_map[(x - 1) % _w][(y + 1) % _h]
neighbours = neighbours + temp_map[(x - 0) % _w][(y - 1) % _h]
neighbours = neighbours + temp_map[(x - 0) % _w][(y + 1) % _h]
neighbours = neighbours + temp_map[(x + 1) % _w][(y - 1) % _h]
neighbours = neighbours + temp_map[(x + 1) % _w][(y - 0) % _h]
neighbours = neighbours + temp_map[(x + 1) % _w][(y + 1) % _h]
if current_cell == 1 and (neighbours < 2 or neighbours > 3):
life_map[x][y] = 0
if (current_cell == 0 and neighbours == 3):
life_map[x][y] = 1
for x in range(_w):
for y in range(_h):
if (life_map[x][y] == 1):
set_pixel(x, y, 255, 255, 0)
else:
set_pixel(x, y, 0, 128, 0)
sleep(0.1)
stop()
off()
def start(new_update_rate=0.1):
"""Starts a timer to automatically refresh the LEDs."""
global _update_rate
global _running
global _auto_refresh_timer
if new_update_rate < (1 / _max_freq):
_update_rate = (1 / _max_freq)
else:
_update_rate = new_update_rate
_running = True
_auto_refresh_timer.start()
def stop():
"""Stops the timer that automatically refreshes the LEDs."""
global _running
global _auto_refresh_timer
_running = False
_auto_refresh_timer.cancel()
##################
# INITIALISATION #
##################
_signal = signal.signal(signal.SIGINT, _signal_handler)
_auto_refresh_timer = Timer(_update_rate, _timer_method)
_temp_disable_t = Timer(_max_freq, _enable_show_state)
clear()
|
en
| 0.70615
|
# ledmatrix.py (pi-topPULSE) # Copyright (C) 2017 CEED ltd. # # local # Maximum update speed is 50 times per second ####################### # INTERNAL OPERATIONS # ####################### INTERNAL. Initialise the matrix. INTERNAL. Handles signals from the OS to exit. INTERNAL. Get the average color of the matrix. INTERNAL. Write data to the matrix. INTERNAL. Converts a brightness value from 0-255 to the value that produces an approximately linear scaling to the human eye. INTERNAL. Multiplies intended brightness of a pixel by brightness scaling factor to generate an adjusted value. INTERNAL. Get a rotated copy of the current in-memory pixel map. # Some fancy maths to rotate pixel map so that # 0,0 (x,y) - with rotation 0 - is the bottom left LED INTERNAL. Correct a single color for brightness. INTERNAL. Correct LED for brightness. INTERNAL. Send the sync frame to tell the device that LED data is expected. INTERNAL. Format the LED data in the device-specific layout. # Create three 5-bit colour vals, splitting the green bits # into two parts (hardware spec): # |XX|G0|G1|R0|R1|R2|R3|R4| # |G2|G3|G4|B0|B1|B2|B3|B4| INTERNAL. Run by the timer on each tick. INTERNAL. Flip the pixel map. INTERNAL. INTERNAL. INTERNAL. ####################### # EXTERNAL OPERATIONS # ####################### Enable/disable debug prints Set the display brightness between 0.0 and 1.0. :param new_brightness: Brightness from 0.0 to 1.0 (default 1.0) Get the display brightness value. Returns a float between 0.0 and 1.0. Set the display rotation. :param new_rotation: Specify the rotation in degrees: 0, 90, 180 or 270 Flips the grid horizontally. Flips the grid vertically. Returns the shape (width, height) of the display. Get the RGB value of a single pixel. :param x: Horizontal position from 0 to 7 :param y: Veritcal position from 0 to 7 Set a single pixel to RGB colour. :param x: Horizontal position from 0 to 7 :param y: Veritcal position from 0 to 7 :param r: Amount of red from 0 to 255 :param g: Amount of green from 0 to 255 :param b: Amount of blue from 0 to 255 Set all pixels to a specific colour. Update pi-topPULSE with the contents of the display buffer. # Scale wait time to _max_freq # Timer hasn't reset for some reason - force override # For each col # Write col to LED matrix # Start with col no., so LED matrix knows which one it belongs to # Get col's frame buffer, iterating over each pixel # Ambient lighting bytes # Write col to LED matrix # Prevent another write if it's too fast Clear the buffer. Clear the buffer and immediately update pi-topPULSE. Runs a series of tests to check the LED board is working as expected. # ------------------------------ # Pixels # ------------------------------ # ------------------------------ # Rows and rotation # ------------------------------ # ------------------------------ # Brightness # ------------------------------ # ------------------------------ # Flipping # ------------------------------ # ------------------------------ # Conway - auto refresh # ------------------------------ Starts a timer to automatically refresh the LEDs. Stops the timer that automatically refreshes the LEDs. ################## # INITIALISATION # ##################
| 2.008651
| 2
|
spacetimeformer/spacetimeformer_model/spacetimeformer_model.py
|
Piki1989/spacetimeformer
| 1
|
6627850
|
from typing import Tuple
import torch
from torch import nn
import torch.nn.functional as F
import pytorch_lightning as pl
import torchmetrics
import spacetimeformer as stf
class Spacetimeformer_Forecaster(stf.Forecaster):
def __init__(
self,
d_y: int = 1,
d_x: int = 4,
start_token_len: int = 64,
attn_factor: int = 5,
d_model: int = 512,
n_heads: int = 8,
e_layers: int = 2,
d_layers: int = 2,
d_ff: int = 2048,
dropout_emb: float = 0.05,
dropout_token: float = 0.05,
dropout_qkv: float = 0.05,
dropout_ff: float = 0.05,
dropout_attn_out: float = 0.05,
global_self_attn: str = "performer",
local_self_attn: str = "none",
global_cross_attn: str = "performer",
local_cross_attn: str = "none",
performer_kernel: str = "relu",
embed_method: str = "spatio-temporal",
performer_relu: bool = True,
performer_redraw_interval: int = 1000,
activation: str = "gelu",
post_norm: bool = False,
norm: str = "layer",
init_lr: float = 1e-10,
base_lr: float = 3e-4,
warmup_steps: float = 0,
decay_factor: float = 0.25,
initial_downsample_convs: int = 0,
intermediate_downsample_convs: int = 0,
l2_coeff: float = 0,
loss: str = "nll",
linear_window: int = 0,
class_loss_imp: float = 0.1,
time_emb_dim: int = 6,
verbose=True,
):
super().__init__(l2_coeff=l2_coeff, loss=loss, linear_window=linear_window)
self.spacetimeformer = stf.spacetimeformer_model.nn.Spacetimeformer(
d_y=d_y,
d_x=d_x,
start_token_len=start_token_len,
attn_factor=attn_factor,
d_model=d_model,
n_heads=n_heads,
e_layers=e_layers,
d_layers=d_layers,
d_ff=d_ff,
initial_downsample_convs=initial_downsample_convs,
intermediate_downsample_convs=intermediate_downsample_convs,
dropout_emb=dropout_emb,
dropout_attn_out=dropout_attn_out,
dropout_qkv=dropout_qkv,
dropout_ff=dropout_ff,
dropout_token=dropout_token,
global_self_attn=global_self_attn,
local_self_attn=local_self_attn,
global_cross_attn=global_cross_attn,
local_cross_attn=local_cross_attn,
activation=activation,
post_norm=post_norm,
device=self.device,
norm=norm,
embed_method=embed_method,
performer_attn_kernel=performer_kernel,
performer_redraw_interval=performer_redraw_interval,
time_emb_dim=time_emb_dim,
verbose=True,
)
self.start_token_len = start_token_len
self.init_lr = init_lr
self.base_lr = base_lr
self.warmup_steps = warmup_steps
self.decay_factor = decay_factor
self.embed_method = embed_method
self.class_loss_imp = class_loss_imp
qprint = lambda _msg_: print(_msg_) if verbose else None
qprint(f" *** Spacetimeformer Summary: *** ")
qprint(f"\tModel Dim: {d_model}")
qprint(f"\tFF Dim: {d_ff}")
qprint(f"\tEnc Layers: {e_layers}")
qprint(f"\tDec Layers: {d_layers}")
qprint(f"\tEmbed Dropout: {dropout_emb}")
qprint(f"\tToken Dropout: {dropout_token}")
qprint(f"\tFF Dropout: {dropout_ff}")
qprint(f"\tAttn Out Dropout: {dropout_attn_out}")
qprint(f"\tQKV Dropout: {dropout_qkv}")
qprint(f"\tL2 Coeff: {l2_coeff}")
qprint(f"\tWarmup Steps: {warmup_steps}")
qprint(f"\tNormalization Scheme: {norm}")
qprint(f" *** *** ")
@property
def train_step_forward_kwargs(self):
return {"output_attn": False}
@property
def eval_step_forward_kwargs(self):
return {"output_attn": False}
def step(self, batch: Tuple[torch.Tensor], train: bool):
kwargs = (
self.train_step_forward_kwargs if train else self.eval_step_forward_kwargs
)
time_mask = self.time_masked_idx if train else None
forecast_loss, class_loss, acc, output, mask = self.compute_loss(
batch=batch,
time_mask=time_mask,
forward_kwargs=kwargs,
)
*_, y_t = batch
stats = self._compute_stats(mask * output, mask * y_t)
stats["forecast_loss"] = forecast_loss
stats["class_loss"] = class_loss
stats["loss"] = forecast_loss + self.class_loss_imp * class_loss
stats["acc"] = acc
return stats
def classification_loss(
self, logits: torch.Tensor, labels: torch.Tensor
) -> Tuple[torch.Tensor]:
labels = labels.view(-1).to(logits.device)
d_y = labels.max() + 1
logits = logits.view(
-1, d_y
) # = torch.cat(logits.chunk(bs, dim=0), dim=1).squeeze(0)
class_loss = F.cross_entropy(logits, labels)
acc = torchmetrics.functional.accuracy(
torch.softmax(logits, dim=1),
labels,
)
return class_loss, acc
def compute_loss(self, batch, time_mask=None, forward_kwargs={}):
x_c, y_c, x_t, y_t = batch
outputs, (logits, labels) = self(x_c, y_c, x_t, y_t, **forward_kwargs)
forecast_loss, mask = self.forecasting_loss(
outputs=outputs, y_t=y_t, time_mask=time_mask
)
if self.embed_method == "spatio-temporal" and self.class_loss_imp > 0:
class_loss, acc = self.classification_loss(logits=logits, labels=labels)
else:
class_loss, acc = 0.0, -1.0
return forecast_loss, class_loss, acc, outputs.mean, mask
def forward_model_pass(self, x_c, y_c, x_t, y_t, output_attn=False):
if len(y_c.shape) == 2:
y_c = y_c.unsqueeze(-1)
y_t = y_t.unsqueeze(-1)
batch_x = y_c
batch_x_mark = x_c
if self.start_token_len > 0:
batch_y = torch.cat((y_c[:, -self.start_token_len :, :], y_t), dim=1)
batch_y_mark = torch.cat((x_c[:, -self.start_token_len :, :], x_t), dim=1)
else:
batch_y = y_t
batch_y_mark = x_t
dec_inp = torch.cat(
[
batch_y[:, : self.start_token_len, :],
torch.zeros((batch_y.shape[0], y_t.shape[1], batch_y.shape[-1])).to(
self.device
),
],
dim=1,
).float()
output, (logits, labels), attn = self.spacetimeformer(
x_enc=batch_x,
x_mark_enc=batch_x_mark,
x_dec=dec_inp,
x_mark_dec=batch_y_mark,
output_attention=output_attn,
)
if output_attn:
return output, (logits, labels), attn
return output, (logits, labels)
def configure_optimizers(self):
optimizer = torch.optim.AdamW(
self.parameters(), lr=self.base_lr, weight_decay=self.l2_coeff
)
scheduler = stf.lr_scheduler.WarmupReduceLROnPlateau(
optimizer,
init_lr=self.init_lr,
peak_lr=self.base_lr,
warmup_steps=self.warmup_steps,
patience=2,
factor=self.decay_factor,
)
return {
"optimizer": optimizer,
"lr_scheduler": {
"scheduler": scheduler,
"interval": "epoch",
"frequency": 1,
"monitor": "val/forecast_loss",
"reduce_on_plateau": True,
},
}
@classmethod
def add_cli(self, parser):
super().add_cli(parser)
parser.add_argument(
"--start_token_len",
type=int,
required=True,
help="Length of decoder start token. Adds this many of the final context points to the start of the target sequence.",
)
parser.add_argument(
"--d_model", type=int, default=256, help="Transformer embedding dimension."
)
parser.add_argument(
"--n_heads", type=int, default=8, help="Number of self-attention heads."
)
parser.add_argument(
"--enc_layers", type=int, default=4, help="Transformer encoder layers."
)
parser.add_argument(
"--dec_layers", type=int, default=3, help="Transformer decoder layers."
)
parser.add_argument(
"--d_ff",
type=int,
default=1024,
help="Dimension of Transformer up-scaling MLP layer. (often 4 * d_model)",
)
parser.add_argument(
"--attn_factor",
type=int,
default=5,
help="ProbSparse attention factor. N/A to other attn mechanisms.",
)
parser.add_argument(
"--dropout_emb",
type=float,
default=0.2,
help="Embedding dropout rate. Drop out elements of the embedding vectors during training.",
)
parser.add_argument(
"--dropout_token",
type=float,
default=0.0,
help="Token dropout rate. Drop out entire input tokens during training.",
)
parser.add_argument(
"--dropout_attn_out",
type=float,
default=0.0,
help="Attention dropout rate. Dropout elements of the attention matrix. Only applicable to attn mechanisms that explicitly compute the attn matrix (e.g. Full).",
)
parser.add_argument(
"--dropout_qkv",
type=float,
default=0.0,
help="Query, Key and Value dropout rate. Dropout elements of these attention vectors during training.",
)
parser.add_argument(
"--dropout_ff",
type=float,
default=0.3,
help="Standard dropout applied to activations of FF networks in the Transformer.",
)
parser.add_argument(
"--global_self_attn",
type=str,
default="performer",
choices=[
"full",
"prob",
"performer",
"nystromformer",
"benchmark",
"none",
],
help="Attention mechanism type.",
)
parser.add_argument(
"--global_cross_attn",
type=str,
default="performer",
choices=[
"full",
"performer",
"benchmark",
"none",
],
help="Attention mechanism type.",
)
parser.add_argument(
"--local_self_attn",
type=str,
default="performer",
choices=[
"full",
"prob",
"performer",
"benchmark",
"none",
],
help="Attention mechanism type.",
)
parser.add_argument(
"--local_cross_attn",
type=str,
default="performer",
choices=[
"full",
"performer",
"benchmark",
"none",
],
help="Attention mechanism type.",
)
parser.add_argument(
"--activation",
type=str,
default="gelu",
choices=["relu", "gelu"],
help="Activation function for Transformer encoder and decoder layers.",
)
parser.add_argument(
"--post_norm",
action="store_true",
help="Enable post-norm architecture for Transformers. See https://arxiv.org/abs/2002.04745.",
)
parser.add_argument(
"--norm",
type=str,
choices=["layer", "batch", "scale", "power", "none"],
default="batch",
)
parser.add_argument(
"--init_lr", type=float, default=1e-10, help="Initial learning rate."
)
parser.add_argument(
"--base_lr",
type=float,
default=5e-4,
help="Base/peak LR. The LR is annealed to this value from --init_lr over --warmup_steps training steps.",
)
parser.add_argument(
"--warmup_steps", type=int, default=0, help="LR anneal steps."
)
parser.add_argument(
"--decay_factor",
type=float,
default=0.25,
help="Factor to reduce LR on plateau (after warmup period is over).",
)
parser.add_argument(
"--initial_downsample_convs",
type=int,
default=0,
help="Add downsampling Conv1Ds to the encoder embedding layer to reduce context sequence length.",
)
parser.add_argument(
"--class_loss_imp",
type=float,
default=0.1,
help="Coefficient for node classification loss function. Set to 0 to disable this feature. Does not significantly impact forecasting results due to detached gradient.",
)
parser.add_argument(
"--intermediate_downsample_convs",
type=int,
default=0,
help="Add downsampling Conv1Ds between encoder layers.",
)
parser.add_argument(
"--time_emb_dim",
type=int,
default=12,
help="Time embedding dimension. Embed *each dimension of x* with this many learned periodic values.",
)
parser.add_argument(
"--performer_kernel",
type=str,
default="relu",
choices=["softmax", "relu"],
help="Performer attention kernel. See Performer paper for details.",
)
parser.add_argument(
"--performer_redraw_interval",
type=int,
default=125,
help="Training steps between resampling orthogonal random features for FAVOR+ attention",
)
parser.add_argument(
"--embed_method",
type=str,
choices=["spatio-temporal", "temporal"],
default="spatio-temporal",
help="Embedding method. spatio-temporal enables long-sequence spatio-temporal transformer mode while temporal recovers default architecture.",
)
|
from typing import Tuple
import torch
from torch import nn
import torch.nn.functional as F
import pytorch_lightning as pl
import torchmetrics
import spacetimeformer as stf
class Spacetimeformer_Forecaster(stf.Forecaster):
def __init__(
self,
d_y: int = 1,
d_x: int = 4,
start_token_len: int = 64,
attn_factor: int = 5,
d_model: int = 512,
n_heads: int = 8,
e_layers: int = 2,
d_layers: int = 2,
d_ff: int = 2048,
dropout_emb: float = 0.05,
dropout_token: float = 0.05,
dropout_qkv: float = 0.05,
dropout_ff: float = 0.05,
dropout_attn_out: float = 0.05,
global_self_attn: str = "performer",
local_self_attn: str = "none",
global_cross_attn: str = "performer",
local_cross_attn: str = "none",
performer_kernel: str = "relu",
embed_method: str = "spatio-temporal",
performer_relu: bool = True,
performer_redraw_interval: int = 1000,
activation: str = "gelu",
post_norm: bool = False,
norm: str = "layer",
init_lr: float = 1e-10,
base_lr: float = 3e-4,
warmup_steps: float = 0,
decay_factor: float = 0.25,
initial_downsample_convs: int = 0,
intermediate_downsample_convs: int = 0,
l2_coeff: float = 0,
loss: str = "nll",
linear_window: int = 0,
class_loss_imp: float = 0.1,
time_emb_dim: int = 6,
verbose=True,
):
super().__init__(l2_coeff=l2_coeff, loss=loss, linear_window=linear_window)
self.spacetimeformer = stf.spacetimeformer_model.nn.Spacetimeformer(
d_y=d_y,
d_x=d_x,
start_token_len=start_token_len,
attn_factor=attn_factor,
d_model=d_model,
n_heads=n_heads,
e_layers=e_layers,
d_layers=d_layers,
d_ff=d_ff,
initial_downsample_convs=initial_downsample_convs,
intermediate_downsample_convs=intermediate_downsample_convs,
dropout_emb=dropout_emb,
dropout_attn_out=dropout_attn_out,
dropout_qkv=dropout_qkv,
dropout_ff=dropout_ff,
dropout_token=dropout_token,
global_self_attn=global_self_attn,
local_self_attn=local_self_attn,
global_cross_attn=global_cross_attn,
local_cross_attn=local_cross_attn,
activation=activation,
post_norm=post_norm,
device=self.device,
norm=norm,
embed_method=embed_method,
performer_attn_kernel=performer_kernel,
performer_redraw_interval=performer_redraw_interval,
time_emb_dim=time_emb_dim,
verbose=True,
)
self.start_token_len = start_token_len
self.init_lr = init_lr
self.base_lr = base_lr
self.warmup_steps = warmup_steps
self.decay_factor = decay_factor
self.embed_method = embed_method
self.class_loss_imp = class_loss_imp
qprint = lambda _msg_: print(_msg_) if verbose else None
qprint(f" *** Spacetimeformer Summary: *** ")
qprint(f"\tModel Dim: {d_model}")
qprint(f"\tFF Dim: {d_ff}")
qprint(f"\tEnc Layers: {e_layers}")
qprint(f"\tDec Layers: {d_layers}")
qprint(f"\tEmbed Dropout: {dropout_emb}")
qprint(f"\tToken Dropout: {dropout_token}")
qprint(f"\tFF Dropout: {dropout_ff}")
qprint(f"\tAttn Out Dropout: {dropout_attn_out}")
qprint(f"\tQKV Dropout: {dropout_qkv}")
qprint(f"\tL2 Coeff: {l2_coeff}")
qprint(f"\tWarmup Steps: {warmup_steps}")
qprint(f"\tNormalization Scheme: {norm}")
qprint(f" *** *** ")
@property
def train_step_forward_kwargs(self):
return {"output_attn": False}
@property
def eval_step_forward_kwargs(self):
return {"output_attn": False}
def step(self, batch: Tuple[torch.Tensor], train: bool):
kwargs = (
self.train_step_forward_kwargs if train else self.eval_step_forward_kwargs
)
time_mask = self.time_masked_idx if train else None
forecast_loss, class_loss, acc, output, mask = self.compute_loss(
batch=batch,
time_mask=time_mask,
forward_kwargs=kwargs,
)
*_, y_t = batch
stats = self._compute_stats(mask * output, mask * y_t)
stats["forecast_loss"] = forecast_loss
stats["class_loss"] = class_loss
stats["loss"] = forecast_loss + self.class_loss_imp * class_loss
stats["acc"] = acc
return stats
def classification_loss(
self, logits: torch.Tensor, labels: torch.Tensor
) -> Tuple[torch.Tensor]:
labels = labels.view(-1).to(logits.device)
d_y = labels.max() + 1
logits = logits.view(
-1, d_y
) # = torch.cat(logits.chunk(bs, dim=0), dim=1).squeeze(0)
class_loss = F.cross_entropy(logits, labels)
acc = torchmetrics.functional.accuracy(
torch.softmax(logits, dim=1),
labels,
)
return class_loss, acc
def compute_loss(self, batch, time_mask=None, forward_kwargs={}):
x_c, y_c, x_t, y_t = batch
outputs, (logits, labels) = self(x_c, y_c, x_t, y_t, **forward_kwargs)
forecast_loss, mask = self.forecasting_loss(
outputs=outputs, y_t=y_t, time_mask=time_mask
)
if self.embed_method == "spatio-temporal" and self.class_loss_imp > 0:
class_loss, acc = self.classification_loss(logits=logits, labels=labels)
else:
class_loss, acc = 0.0, -1.0
return forecast_loss, class_loss, acc, outputs.mean, mask
def forward_model_pass(self, x_c, y_c, x_t, y_t, output_attn=False):
if len(y_c.shape) == 2:
y_c = y_c.unsqueeze(-1)
y_t = y_t.unsqueeze(-1)
batch_x = y_c
batch_x_mark = x_c
if self.start_token_len > 0:
batch_y = torch.cat((y_c[:, -self.start_token_len :, :], y_t), dim=1)
batch_y_mark = torch.cat((x_c[:, -self.start_token_len :, :], x_t), dim=1)
else:
batch_y = y_t
batch_y_mark = x_t
dec_inp = torch.cat(
[
batch_y[:, : self.start_token_len, :],
torch.zeros((batch_y.shape[0], y_t.shape[1], batch_y.shape[-1])).to(
self.device
),
],
dim=1,
).float()
output, (logits, labels), attn = self.spacetimeformer(
x_enc=batch_x,
x_mark_enc=batch_x_mark,
x_dec=dec_inp,
x_mark_dec=batch_y_mark,
output_attention=output_attn,
)
if output_attn:
return output, (logits, labels), attn
return output, (logits, labels)
def configure_optimizers(self):
optimizer = torch.optim.AdamW(
self.parameters(), lr=self.base_lr, weight_decay=self.l2_coeff
)
scheduler = stf.lr_scheduler.WarmupReduceLROnPlateau(
optimizer,
init_lr=self.init_lr,
peak_lr=self.base_lr,
warmup_steps=self.warmup_steps,
patience=2,
factor=self.decay_factor,
)
return {
"optimizer": optimizer,
"lr_scheduler": {
"scheduler": scheduler,
"interval": "epoch",
"frequency": 1,
"monitor": "val/forecast_loss",
"reduce_on_plateau": True,
},
}
@classmethod
def add_cli(self, parser):
super().add_cli(parser)
parser.add_argument(
"--start_token_len",
type=int,
required=True,
help="Length of decoder start token. Adds this many of the final context points to the start of the target sequence.",
)
parser.add_argument(
"--d_model", type=int, default=256, help="Transformer embedding dimension."
)
parser.add_argument(
"--n_heads", type=int, default=8, help="Number of self-attention heads."
)
parser.add_argument(
"--enc_layers", type=int, default=4, help="Transformer encoder layers."
)
parser.add_argument(
"--dec_layers", type=int, default=3, help="Transformer decoder layers."
)
parser.add_argument(
"--d_ff",
type=int,
default=1024,
help="Dimension of Transformer up-scaling MLP layer. (often 4 * d_model)",
)
parser.add_argument(
"--attn_factor",
type=int,
default=5,
help="ProbSparse attention factor. N/A to other attn mechanisms.",
)
parser.add_argument(
"--dropout_emb",
type=float,
default=0.2,
help="Embedding dropout rate. Drop out elements of the embedding vectors during training.",
)
parser.add_argument(
"--dropout_token",
type=float,
default=0.0,
help="Token dropout rate. Drop out entire input tokens during training.",
)
parser.add_argument(
"--dropout_attn_out",
type=float,
default=0.0,
help="Attention dropout rate. Dropout elements of the attention matrix. Only applicable to attn mechanisms that explicitly compute the attn matrix (e.g. Full).",
)
parser.add_argument(
"--dropout_qkv",
type=float,
default=0.0,
help="Query, Key and Value dropout rate. Dropout elements of these attention vectors during training.",
)
parser.add_argument(
"--dropout_ff",
type=float,
default=0.3,
help="Standard dropout applied to activations of FF networks in the Transformer.",
)
parser.add_argument(
"--global_self_attn",
type=str,
default="performer",
choices=[
"full",
"prob",
"performer",
"nystromformer",
"benchmark",
"none",
],
help="Attention mechanism type.",
)
parser.add_argument(
"--global_cross_attn",
type=str,
default="performer",
choices=[
"full",
"performer",
"benchmark",
"none",
],
help="Attention mechanism type.",
)
parser.add_argument(
"--local_self_attn",
type=str,
default="performer",
choices=[
"full",
"prob",
"performer",
"benchmark",
"none",
],
help="Attention mechanism type.",
)
parser.add_argument(
"--local_cross_attn",
type=str,
default="performer",
choices=[
"full",
"performer",
"benchmark",
"none",
],
help="Attention mechanism type.",
)
parser.add_argument(
"--activation",
type=str,
default="gelu",
choices=["relu", "gelu"],
help="Activation function for Transformer encoder and decoder layers.",
)
parser.add_argument(
"--post_norm",
action="store_true",
help="Enable post-norm architecture for Transformers. See https://arxiv.org/abs/2002.04745.",
)
parser.add_argument(
"--norm",
type=str,
choices=["layer", "batch", "scale", "power", "none"],
default="batch",
)
parser.add_argument(
"--init_lr", type=float, default=1e-10, help="Initial learning rate."
)
parser.add_argument(
"--base_lr",
type=float,
default=5e-4,
help="Base/peak LR. The LR is annealed to this value from --init_lr over --warmup_steps training steps.",
)
parser.add_argument(
"--warmup_steps", type=int, default=0, help="LR anneal steps."
)
parser.add_argument(
"--decay_factor",
type=float,
default=0.25,
help="Factor to reduce LR on plateau (after warmup period is over).",
)
parser.add_argument(
"--initial_downsample_convs",
type=int,
default=0,
help="Add downsampling Conv1Ds to the encoder embedding layer to reduce context sequence length.",
)
parser.add_argument(
"--class_loss_imp",
type=float,
default=0.1,
help="Coefficient for node classification loss function. Set to 0 to disable this feature. Does not significantly impact forecasting results due to detached gradient.",
)
parser.add_argument(
"--intermediate_downsample_convs",
type=int,
default=0,
help="Add downsampling Conv1Ds between encoder layers.",
)
parser.add_argument(
"--time_emb_dim",
type=int,
default=12,
help="Time embedding dimension. Embed *each dimension of x* with this many learned periodic values.",
)
parser.add_argument(
"--performer_kernel",
type=str,
default="relu",
choices=["softmax", "relu"],
help="Performer attention kernel. See Performer paper for details.",
)
parser.add_argument(
"--performer_redraw_interval",
type=int,
default=125,
help="Training steps between resampling orthogonal random features for FAVOR+ attention",
)
parser.add_argument(
"--embed_method",
type=str,
choices=["spatio-temporal", "temporal"],
default="spatio-temporal",
help="Embedding method. spatio-temporal enables long-sequence spatio-temporal transformer mode while temporal recovers default architecture.",
)
|
en
| 0.153551
|
# = torch.cat(logits.chunk(bs, dim=0), dim=1).squeeze(0)
| 2.348978
| 2
|