id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
423658
|
import re
from pathlib import Path
from typing import Annotated, cast
import pytest
from box import BoxError
from arti import Artifact, CompositeKey, Fingerprint, Graph, producer
from arti.backends.memory import MemoryBackend
from arti.executors.local import LocalExecutor
from arti.internal.utils import frozendict
from arti.storage.literal import StringLiteral
from arti.storage.local import LocalFile, LocalFilePartition
from arti.types import Int64
from arti.views import python as python_views
from tests.arti.dummies import A1, A2, A3, A4, P1, P2
from tests.arti.dummies import Num as _Num
from tests.arti.dummies import div
class Num(_Num):
storage: LocalFile
@pytest.fixture
def graph() -> Graph:
# NOTE: .out() supports strict Artifact subclass mypy typing with the mypy_plugin, but Producers
# also support simple iteration (eg: `a, b = MyProducer(...)`).
with Graph(name="test") as g:
g.artifacts.a = A1()
g.artifacts.b = P1(a1=g.artifacts.a).out()
g.artifacts.c.a, g.artifacts.c.b = P2(a2=g.artifacts.b).out()
return g
def test_Graph(graph: Graph) -> None:
assert isinstance(graph.artifacts.a, A1)
assert isinstance(graph.artifacts.b, A2)
assert isinstance(graph.artifacts.c.a, A3)
assert isinstance(graph.artifacts.c.b, A4)
assert not graph.artifacts.a.storage.includes_input_fingerprint_template
assert graph.artifacts.b.storage.includes_input_fingerprint_template
assert graph.artifacts.c.a.storage.includes_input_fingerprint_template
assert graph.artifacts.c.b.storage.includes_input_fingerprint_template
def test_Graph_literals(tmp_path: Path) -> None:
n_add_runs = 0
@producer()
def add(x: int, y: Annotated[int, Num]) -> int:
nonlocal n_add_runs
n_add_runs += 1
return x + y
with Graph(name="Test") as g:
g.artifacts.x = 1
g.artifacts.y = Num(storage=LocalFile(path=str(tmp_path / "y.json")))
g.artifacts.z = add(x=g.artifacts.x, y=g.artifacts.y).out()
# Changes to `phase` will cause a new snapshot_id. However, since `phase` isn't an input to
# `add`, we *shouldn't* have to recompute `z` - assuming the backend properly stores
# storage->storage_partitions separate from the set of storage_partitions associated with a
# snapshot_id.
g.artifacts.phase = Num(storage=LocalFile(path=str(tmp_path / "phase.json")))
Int64Artifact = Artifact.from_type(Int64())
x, y, z, phase = g.artifacts.x, g.artifacts.y, g.artifacts.z, g.artifacts.phase
assert isinstance(x, Int64Artifact)
assert isinstance(x.storage, StringLiteral)
assert x.storage.value == "1"
assert isinstance(z, Int64Artifact)
assert isinstance(z.storage, StringLiteral)
assert z.storage.value is None
g.write(1, artifact=y)
g.write(1, artifact=phase)
with pytest.raises(FileNotFoundError, match="No data"):
g.read(z, annotation=int)
# Run the initial build to compute z
g.build()
assert g.read(z, annotation=int) == 2
assert n_add_runs == 1
assert len(g.backend.read_graph_partitions(g.name, g.get_snapshot_id(), "z", z)) == 1
assert len(g.backend.read_artifact_partitions(z)) == 1
# A subsequent build shouldn't require a rerun, ensuring we properly lookup existing literals.
g.build()
assert g.read(z, annotation=int) == 2
assert n_add_runs == 1
assert len(g.backend.read_graph_partitions(g.name, g.get_snapshot_id(), "z", z)) == 1
assert len(g.backend.read_artifact_partitions(z)) == 1
# Changing an input should trigger a rerun. There will still only be 1 z literal for this graph,
# but now 2 overall for the storage (with different `input_fingerprint`s).
g.write(2, artifact=y)
g.build()
assert g.read(z, annotation=int) == 3
assert n_add_runs == 2
assert len(g.backend.read_graph_partitions(g.name, g.get_snapshot_id(), "z", z)) == 1
assert len(g.backend.read_artifact_partitions(z)) == 2
# After getting a new snapshot_id, but no changes to `add`s inputs, ensure we properly lookup
# existing literals - even though the snapshot_id will change, the input_fingerprint for `z`
# will not.
g.write(2, artifact=phase)
g.build()
assert g.read(z, annotation=int) == 3
assert n_add_runs == 2
assert len(g.backend.read_graph_partitions(g.name, g.get_snapshot_id(), "z", z)) == 1
assert len(g.backend.read_artifact_partitions(z)) == 2
def test_Graph_snapshot() -> None:
with Graph(name="test") as g:
g.artifacts.a = A1()
p1 = P1(a1=g.artifacts.a)
g.artifacts.b = cast(A2, p1.out())
id_components = [
g.fingerprint,
Fingerprint.from_string("a"),
Fingerprint.from_string("b"),
g.artifacts.a.fingerprint,
g.artifacts.b.fingerprint,
p1.fingerprint,
*(
storage_partition.with_content_fingerprint().fingerprint
for storage_partition in g.artifacts.a.discover_storage_partitions()
),
]
snapshot = g.snapshot()
assert snapshot.snapshot_id == Fingerprint.combine(*id_components)
# Ensure order independence
assert snapshot.snapshot_id == Fingerprint.combine(*reversed(id_components))
# Ensure snapshot of a snapshot doesn't copy
assert snapshot.snapshot() is snapshot
def test_Graph_get_snapshot_id(tmp_path: Path) -> None:
with Graph(name="test") as g:
g.artifacts.a = 5
assert g.snapshot_id is None
snapshot = g.snapshot()
assert snapshot.snapshot_id is not None
assert snapshot.snapshot_id == g.get_snapshot_id()
# Confirm snapshot_id is still unset on the original graph
assert g.snapshot_id is None
def test_Graph_snapshot_missing_input_artifact(tmp_path: Path) -> None:
with Graph(name="test") as g:
g.artifacts.a = Num(storage=LocalFile(path=str(tmp_path / "a.json")))
with pytest.raises(ValueError, match=re.escape("No data found for `a`")):
assert g.snapshot()
def test_Graph_snapshot_id_producer_arg_order(tmp_path: Path) -> None:
a = Num(storage=LocalFile(path=str(tmp_path / "a.json")))
with open(a.storage.path, "w") as f:
f.write("10")
b = Num(storage=LocalFile(path=str(tmp_path / "b.json")))
with open(b.storage.path, "w") as f:
f.write("5")
c = Num(storage=LocalFile.rooted_at(tmp_path))
# Create two Graphs, varying only by the arg order to the Producer.
with Graph(name="test") as g_ab:
g_ab.artifacts.c = div(a=a, b=b).out(c)
with Graph(name="test") as g_ba:
g_ba.artifacts.c = div(a=b, b=a).out(c)
assert g_ab.get_snapshot_id() != g_ba.get_snapshot_id()
def test_Graph_tagging(tmp_path: Path) -> None:
@producer()
def plus1(x: Annotated[int, Num]) -> int:
return x + 1
with Graph(name="Test") as g:
g.artifacts.x = Num(storage=LocalFile(path=str(tmp_path / "x.json")))
g.artifacts.y = plus1(x=g.artifacts.x)
g.write(1, artifact=g.artifacts.x)
snapshot_1 = g.build()
assert snapshot_1.read(g.artifacts.y, annotation=int) == 2
snapshot_1.tag("prod")
assert g.from_tag("prod").read(g.artifacts.y, annotation=int) == 2
g.write(2, artifact=g.artifacts.x)
snapshot_2 = g.build()
assert snapshot_2.read(g.artifacts.y, annotation=int) == 3
assert snapshot_1.snapshot_id != snapshot_2.snapshot_id
assert g.read(g.artifacts.y, annotation=int) == 3
assert g.from_tag("prod").read(g.artifacts.y, annotation=int) == 2
with pytest.raises(
ValueError, match=re.escape("Existing `prod` tag for Graph `Test` points to Fingerprint")
):
g.tag("prod")
with pytest.raises(ValueError, match=re.escape("No known `fake` tag for Graph `Test`")):
g.from_tag("fake")
def test_Graph_build(tmp_path: Path) -> None:
n_builds = 0
@producer()
def increment(i: Annotated[int, Num]) -> Annotated[int, Num]:
nonlocal n_builds
n_builds += 1
return i + 1
@producer()
def dup(i: Annotated[int, Num]) -> tuple[Annotated[int, Num], Annotated[int, Num]]:
return i, i
with Graph(name="test") as g:
g.artifacts.root.a = Num(storage=LocalFile(path=str(tmp_path / "a.json")))
g.artifacts.b = increment(i=g.artifacts.root.a).out(
Num(storage=LocalFile.rooted_at(tmp_path))
)
# Test multiple return values
g.artifacts.c, g.artifacts.d = dup(i=g.artifacts.root.a).out(
Num(storage=LocalFile.rooted_at(tmp_path)),
Num(storage=LocalFile.rooted_at(tmp_path)),
)
a, b, c, d = (
g.artifacts.root.a,
cast(Num, g.artifacts.b),
cast(Num, g.artifacts.c),
cast(Num, g.artifacts.d),
)
# Bootstrap the initial artifact and build
g.write(0, artifact=a)
g.build()
assert n_builds == 1
assert g.read(b, annotation=int) == 1
assert g.read(c, annotation=int) == g.read(d, annotation=int) == 0
# A second build should no-op
g.build(executor=LocalExecutor())
assert n_builds == 1
assert g.read(b, annotation=int) == 1
assert g.read(c, annotation=int) == g.read(d, annotation=int) == 0
# Changing the raw Artifact data should trigger a rerun
g.write(1, artifact=a)
g.build()
assert n_builds == 2
assert g.read(b, annotation=int) == 2
assert g.read(c, annotation=int) == g.read(d, annotation=int) == 1
# Changing back to the original data should no-op
g.write(0, artifact=a)
g.build()
assert n_builds == 2
assert g.read(b, annotation=int) == 1
assert g.read(c, annotation=int) == g.read(d, annotation=int) == 0
# Test that the MemoryBackend will discover existing StoragePartitions, even when empty. Other
# backends are persistent, so this isn't necessary. This is really a MemoryBackend test, but
# easiest to test in a Graph context.
#
# Running a build should no-op (ie: num_builds shouldn't increment), but we unfortunately can't
# read *immediately* because we won't know the input_fingerprints for all the generated
# Artifacts until build. Eventually, we need to allow the Artifact to access the backend
# directly and automatically compute the input_fingerprints (ie: sync on the fly), which would
# allow us to read automatically.
g = g.copy(update={"backend": MemoryBackend()})
g.build()
assert n_builds == 2
assert g.read(b, annotation=int) == 1
assert g.read(c, annotation=int) == g.read(d, annotation=int) == 0
def test_Graph_build_failed_validation(tmp_path: Path) -> None:
failed_validation_msg = "This is junk data!"
@producer(validate_outputs=lambda i: (False, failed_validation_msg))
def angry_add(i: Annotated[int, Num]) -> Annotated[int, Num]:
return i + 1
num = Num(storage=LocalFile.rooted_at(tmp_path)) # Immutable, thus can reuse
with Graph(name="test") as g:
g.artifacts.a = num
g.artifacts.b = cast(Num, angry_add(i=g.artifacts.a).out(num))
g.write(0, artifact=g.artifacts.a)
with pytest.raises(ValueError, match=failed_validation_msg):
g.build()
with pytest.raises(FileNotFoundError, match="No data"):
g.read(g.artifacts.b, annotation=int)
def test_Graph_dependencies(graph: Graph) -> None:
p1 = graph.artifacts.b.producer_output.producer
p2 = graph.artifacts.c.a.producer_output.producer
assert graph.dependencies == frozendict(
{
graph.artifacts.a: frozenset(),
p1: frozenset({graph.artifacts.a}),
graph.artifacts.b: frozenset({p1}),
p2: frozenset({graph.artifacts.b}),
graph.artifacts.c.a: frozenset({p2}),
graph.artifacts.c.b: frozenset({p2}),
}
)
def test_Graph_errors() -> None:
with Graph(name="test") as graph:
graph.artifacts.a = A1()
graph.artifacts.b = P1(a1=graph.artifacts.a).out()
with pytest.raises(BoxError, match="Box is frozen"):
graph.artifacts.a = A1()
with pytest.raises(AttributeError, match="has no attribute"):
graph.artifacts.z
with Graph(name="outer"):
with pytest.raises(ValueError, match="Another graph is being defined"):
with Graph(name="inner"):
pass
def test_Graph_producers(graph: Graph) -> None:
p1 = graph.artifacts.b.producer_output.producer
p2 = graph.artifacts.c.a.producer_output.producer
assert graph.producers == frozenset({p1, p2})
def test_Graph_producer_output(graph: Graph) -> None:
p1 = graph.artifacts.b.producer_output.producer
p2 = graph.artifacts.c.a.producer_output.producer
assert graph.producer_outputs == frozendict(
{
p1: (graph.artifacts.b,),
p2: (graph.artifacts.c.a, graph.artifacts.c.b),
}
)
with Graph(name="test") as g:
with pytest.raises(
ValueError,
match="producer_outputs cannot be used while the Graph is still being defined",
):
g.producer_outputs
def test_Graph_read_write(tmp_path: Path) -> None:
with Graph(name="test") as g:
g.artifacts.i = Num(storage=LocalFile(path=str(tmp_path / "i.json")))
i = g.artifacts.i
# Test write
storage_partition = g.write(5, artifact=i)
assert isinstance(storage_partition, LocalFilePartition)
assert storage_partition.content_fingerprint != Fingerprint.empty()
assert storage_partition.input_fingerprint == Fingerprint.empty()
assert storage_partition.keys == CompositeKey()
assert storage_partition.path.endswith(i.format.extension)
# Once snapshotted, writing to the raw Artifacts would result in a different snapshot.
with pytest.raises(
ValueError,
match=re.escape("Writing to a raw Artifact (`i`) would cause a `snapshot_id` change."),
):
g.snapshot().write(10, artifact=i)
# Test read
assert g.read(i, annotation=int) == 5
assert g.read(i, view=python_views.Int()) == 5
assert g.read(i, annotation=int, storage_partitions=[storage_partition]) == 5
with pytest.raises(ValueError, match="Either `annotation` or `view` must be passed"):
g.read(i)
with pytest.raises(ValueError, match="Only one of `annotation` or `view` may be passed"):
g.read(i, annotation=int, view=python_views.Int())
def test_Graph_references(graph: Graph) -> None:
with Graph(name="test-2") as g2:
g2.artifacts.upstream.a = graph.artifacts.a
assert graph.artifacts.a == g2.artifacts.upstream.a
def test_Graph_storage_resolution() -> None:
with Graph(name="test", path_tags={"tag": "value"}) as g:
g.artifacts.root.a = Num(storage=LocalFile())
g.artifacts.root.b = Num(storage=LocalFile())
g.artifacts.c = cast(
Num, div(a=g.artifacts.root.a, b=g.artifacts.root.b).out(Num(storage=LocalFile()))
)
with pytest.raises(
ValueError,
match=re.escape(
"Produced Artifacts must have a '{input_fingerprint}' template in their Storage"
),
):
g.artifacts.d = div(a=g.artifacts.root.a, b=g.artifacts.root.b).out(
Num(storage=LocalFile(path="junk"))
)
assert g.artifacts.root.a.storage.path.endswith("/test/tag=value/root/a/a.json")
assert g.artifacts.root.b.storage.path.endswith("/test/tag=value/root/b/b.json")
assert g.artifacts.c.storage.path.endswith("/test/tag=value/c/{input_fingerprint}/c.json")
|
423666
|
from decimal import Decimal
import pytest
from investments.currency import Currency
from investments.money import Money
def test_money():
usd1 = Money(1, Currency.USD)
usd7 = Money(7, Currency.USD)
rub1 = Money(1, Currency.RUB)
rub3 = Money(3, Currency.RUB)
rub5 = Money(5, Currency.RUB)
assert usd1 != rub1
assert usd1 != usd7
assert usd1 == Money(1, Currency.USD)
with pytest.raises(TypeError):
r = rub3 / usd1
r = rub5 / rub1
assert isinstance(r, float)
assert r == 5
assert rub1 < rub3
with pytest.raises(TypeError):
r = rub1 < usd7
with pytest.raises(TypeError):
r = usd1 + rub3
with pytest.raises(TypeError):
r = usd1 + 1
r = usd1 + usd7
assert r.amount == 8
assert r.currency == Currency.USD
r = rub5 - rub3
assert r.amount == 2
assert r.currency == Currency.RUB
negative_money = Money(-1, Currency.RUB)
assert negative_money.amount == Decimal('-1')
assert abs(negative_money).amount == Decimal('1')
def test_money_zero():
rub3 = Money(3, Currency.RUB)
r = rub3 + 0
assert r == rub3
r = 0 + rub3
assert r == rub3
r = rub3 - 0
assert r == rub3
r = 0 - rub3
assert r.amount == -1 * rub3.amount
assert r == -1 * rub3
with pytest.raises(TypeError):
r = rub3 + 3
with pytest.raises(TypeError):
r = 3 + rub3
with pytest.raises(TypeError):
r = rub3 - 3
with pytest.raises(TypeError):
r = 3 - rub3
def test_money_float():
v = 0.3
v_expect = 0.9
m = Money(v, Currency.USD)
m_expect = Money(v_expect, Currency.USD)
vsum = v + v + v
assert vsum != v_expect
msum = Money(v + v + v, Currency.USD)
assert msum.amount != m_expect.amount
msum = m + m + m
assert msum.amount == m_expect.amount
|
423676
|
from setuptools import setup
setup(
name="pgn",
version="0.01",
description="Pytorch graph networks",
author="<NAME>",
author_email="<EMAIL>",
packages=["pgn"],
)
|
423691
|
import boto3
import sciwing.constants as constants
import wasabi
import json
from collections import namedtuple
from botocore.exceptions import ClientError
import pathlib
import re
import os
from typing import NamedTuple
PATHS = constants.PATHS
AWS_CRED_DIR = PATHS["AWS_CRED_DIR"]
OUTPUT_DIR = PATHS["OUTPUT_DIR"]
class S3Util:
def __init__(self, aws_cred_config_json_filename: str):
""" Some utilities that would be useful to upload folders/models to s3
Parameters
----------
aws_cred_config_json_filename : str
You need to instantiate this file with a aws configuration json file
The following will be the keys and values
aws_access_key_id : str
The access key id for the AWS account that you have
aws_access_secret : str
The access secret
region : str
The region in which your bucket is present
parsect_bucket_name : str
The name of the bucket where all the models/experiments will be sotred
"""
self.aws_cred_config_json_filename = aws_cred_config_json_filename
self.msg_printer = wasabi.Printer()
self.credentials = self.load_credentials()
self.s3_client = self.get_client()
self.s3_resource = self.get_resource()
def load_credentials(self) -> NamedTuple:
""" Read the credentials from the json file
Returns
-------
NamedTuple
a named tuple with access_key, access_secret, region and bucket_name as the keys
and the corresponding values filled in
"""
with open(self.aws_cred_config_json_filename) as fp:
cred = json.load(fp)
try:
aws_access_key_id = cred["aws_access_key_id"]
aws_access_secret = cred["aws_access_secret"]
aws_region = cred["region"]
bucket_name = cred["parsect_bucket_name"]
Credentials = namedtuple(
"Credentials", ["access_key", "access_secret", "region", "bucket_name"]
)
credentials = Credentials(
access_key=aws_access_key_id,
access_secret=aws_access_secret,
region=aws_region,
bucket_name=bucket_name,
)
return credentials
except KeyError:
self.msg_printer.fail(
f"Your credential file f{self.aws_cred_config_json_filename} "
f"is malformed. Please contact the developers for more info "
)
def get_client(self):
""" Returns boto3 client
Returns
-------
boto3.client
The client object that manages all the aws operations
The client is the low level access to the connection with s3
"""
try:
s3_client = boto3.client(
"s3",
region_name=self.credentials.region,
aws_access_key_id=self.credentials.access_key,
aws_secret_access_key=self.credentials.access_secret,
)
return s3_client
except ClientError:
self.msg_printer.fail(f"Failed to connect to s3 instance")
def get_resource(self):
"""Returns a high level manager for the aws bucket
Returns
-------
boto3.resource
Resource that manages connections with s3
"""
try:
s3_resource = boto3.resource(
"s3",
region_name=self.credentials.region,
aws_access_key_id=self.credentials.access_key,
aws_secret_access_key=self.credentials.access_secret,
)
return s3_resource
except ClientError:
self.msg_printer.fail(f"Failed to get the s3 resource")
def upload_file(self, filename: str, obj_name: str = None):
"""
Parameters
----------
filename : str
The filename in the local directory that needs to be uploaded to s3
obj_name : str
The filename to be used in s3 bucket. If None then obj_name in s3 will be
the same as the filename
"""
if obj_name is None:
obj_name = filename
try:
self.s3_client.upload_file(filename, self.credentials.bucket_name, obj_name)
except ClientError:
self.msg_printer.fail(f"Could not upload file {filename}")
def upload_folder(self, folder_name: str, base_folder_name: str):
""" Recursively uploads a folder to s3
Parameters
----------
folder_name : str
The name of the local folder that is uploaded
base_folder_name : str
The name of the folder from which the current folder
being uploaded stems from. This is needed to associate appropriate
files and directories to their hierarchies within the folder
"""
path = pathlib.Path(folder_name)
for file in path.iterdir():
if file.is_file():
self.upload_file(
filename=str(file), obj_name=f"{base_folder_name}/{file.name}"
)
elif file.is_dir():
self.upload_folder(
folder_name=str(file),
base_folder_name=f"{base_folder_name}/{file.name}",
)
def download_file(self, filename_s3: str, local_filename: str):
""" Downloads a file from s3
Parameters
----------
filename_s3 : str
A filename in s3 that needs to be downloaded
local_filename : str
The local filename that will be used
"""
object = self.s3_resource.Object(self.credentials.bucket_name, filename_s3)
object.download_file(local_filename)
def download_folder(
self,
folder_name_s3: str,
download_only_best_checkpoint: bool = False,
chkpoints_foldername: str = "checkpoints",
best_model_filename="best_model.pt",
output_dir: str = OUTPUT_DIR,
):
""" Downloads a folder from s3 recursively
Parameters
----------
folder_name_s3 : str
The name of the folder in s3
download_only_best_checkpoint : bool
If the folder being downloaded is an experiment folder, then you
can download only the best model checkpoints for running test or inference
chkpoints_foldername : str
The name of the checkpoints folder where the best model parameters are stored
best_model_filename : str
The name of the file where the best model parameters are stored
Returns
-------
"""
bucket = self.s3_resource.Bucket(self.credentials.bucket_name)
with self.msg_printer.loading(f"Downloading folder {folder_name_s3}"):
if len(list(bucket.objects.filter(Prefix=folder_name_s3))) == 0:
raise FileNotFoundError(f"Failed to find folder {folder_name_s3}")
for key in bucket.objects.filter(Prefix=folder_name_s3):
if not os.path.exists(f"{output_dir}/{os.path.dirname(key.key)}"):
os.makedirs(f"{output_dir}/{os.path.dirname(key.key)}")
if download_only_best_checkpoint:
if re.search(chkpoints_foldername, key.key):
if re.search(best_model_filename, key.key):
bucket.download_file(key.key, f"{output_dir}/{key.key}")
else:
bucket.download_file(key.key, f"{output_dir}/{key.key}")
else:
bucket.download_file(key.key, f"{output_dir}/{key.key}")
self.msg_printer.good(f"Finished downloading {folder_name_s3}")
def search_folders_with(self, pattern):
""" Searches for folders in the s3 bucket with specific pattern
Parameters
----------
pattern : str
A regex pattern
Returns
-------
List[str]
The list of foldernames that match the pattern
"""
bucket = self.s3_resource.Bucket(self.credentials.bucket_name)
foldernames = []
for obj in bucket.objects.all():
foldernames.append(obj.key.split("/")[0])
foldernames = list(set(foldernames))
filtered_folder_names = []
for foldername in foldernames:
obj = re.match(pattern, foldername)
if obj is not None:
filtered_folder_names.append(foldername)
return filtered_folder_names
|
423696
|
from templeplus.pymod import PythonModifier
from toee import *
import tpdp
from utilities import *
import spell_utils
print "Registering sp-Demonhide"
def demonhideSpellGrantDr(attachee, args, evt_obj):
drAmount = 5
drBreakType = args.get_arg(2)
damageMesId = 126 #ID126 in damage.mes is DR
evt_obj.damage_packet.add_physical_damage_res(drAmount, drBreakType, damageMesId)
return 0
demonhideSpell = PythonModifier("sp-Demonhide", 4, False) # spell_id, duration, drBreakType, empty
demonhideSpell.AddHook(ET_OnTakingDamage, EK_NONE, demonhideSpellGrantDr,())
demonhideSpell.AddHook(ET_OnConditionAddPre, EK_NONE, spell_utils.replaceCondition, ()) #damage reduction does stack; so I need replaceCondition
demonhideSpell.AddHook(ET_OnGetTooltip, EK_NONE, spell_utils.spellTooltip, ())
demonhideSpell.AddHook(ET_OnGetEffectTooltip, EK_NONE, spell_utils.spellEffectTooltip, ())
demonhideSpell.AddHook(ET_OnD20Query, EK_Q_Critter_Has_Spell_Active, spell_utils.queryActiveSpell, ())
demonhideSpell.AddHook(ET_OnD20Signal, EK_S_Killed, spell_utils.spellKilled, ())
demonhideSpell.AddSpellDispelCheckStandard()
demonhideSpell.AddSpellTeleportPrepareStandard()
demonhideSpell.AddSpellTeleportReconnectStandard()
demonhideSpell.AddSpellCountdownStandardHook()
|
423718
|
import gc
import math
import os
import time
import numpy as np
import tensorflow as tf
from tentacle.board import Board
from tentacle.data_set import DataSet
from tentacle.ds_loader import DatasetLoader
DATASET_CAPACITY = 16 * 8000
BATCH_SIZE = 32
class ValueNet(object):
def __init__(self, brain_dir, summary_dir):
self.brain_dir = brain_dir
self.brain_file = os.path.join(self.brain_dir, 'model.ckpt')
self.summary_dir = summary_dir
self._has_more_data = True
self.ds_train = None
self.ds_test = None
self.graph = tf.Graph()
with self.graph.as_default():
self.states_pl, self.rewards_pl = self.placeholder_inputs()
self.value_outputs, self.opt_op, self.global_step, self.mse = self.model(self.states_pl, self.rewards_pl)
self.summary_op = tf.summary.merge_all()
init = tf.initialize_all_variables()
self.saver = tf.train.Saver(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="value_net"))
self.summary_writer = tf.summary.FileWriter(self.summary_dir, self.graph)
self.sess = tf.Session(graph=self.graph)
self.sess.run(init)
def get_input_shape(self):
NUM_CHANNELS = 4
return Board.BOARD_SIZE, Board.BOARD_SIZE, NUM_CHANNELS
def placeholder_inputs(self):
h, w, c = self.get_input_shape()
states = tf.placeholder(tf.float32, [None, h, w, c]) # NHWC
rewards = tf.placeholder(tf.float32, shape=[None])
return states, rewards
def weight_variable(self, shape):
initial = tf.truncated_normal(shape, stddev=0.01)
return tf.Variable(initial)
def bias_variable(self, shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def create_value_net(self, states_pl):
NUM_CHANNELS = 4
ch1 = 32
W_1 = self.weight_variable([3, 3, NUM_CHANNELS, ch1])
b_1 = self.bias_variable([ch1])
ch = 32
W_2 = self.weight_variable([3, 3, ch1, ch])
b_2 = self.bias_variable([ch])
W_21 = self.weight_variable([3, 3, ch, ch])
b_21 = self.bias_variable([ch])
W_22 = self.weight_variable([3, 3, ch, ch])
b_22 = self.bias_variable([ch])
# W_23 = self.weight_variable([1, 1, ch, 1])
# b_23 = self.bias_variable([1])
h_conv1 = tf.nn.relu(tf.nn.conv2d(states_pl, W_1, [1, 1, 1, 1], padding='SAME') + b_1)
h_conv2 = tf.nn.relu(tf.nn.conv2d(h_conv1, W_2, [1, 1, 1, 1], padding='SAME') + b_2)
h_conv21 = tf.nn.relu(tf.nn.conv2d(h_conv2, W_21, [1, 1, 1, 1], padding='SAME') + b_21)
h_conv22 = tf.nn.relu(tf.nn.conv2d(h_conv21, W_22, [1, 1, 1, 1], padding='SAME') + b_22)
# h_conv23 = tf.nn.relu(tf.nn.conv2d(h_conv22, W_23, [1, 1, 1, 1], padding='SAME') + b_23)
conv_out_dim = h_conv22.get_shape()[1:].num_elements()
conv_out = tf.reshape(h_conv22, [-1, conv_out_dim])
num_hidden = 1
W_3 = tf.Variable(tf.zeros([conv_out_dim, num_hidden], tf.float32))
b_3 = tf.Variable(tf.zeros([num_hidden], tf.float32))
# W_4 = tf.Variable(tf.zeros([num_hidden, 1], tf.float32))
# b_4 = tf.Variable(tf.zeros([1], tf.float32))
# hidden = tf.nn.relu(tf.matmul(conv_out, W_3) + b_3)
# fc_out = tf.matmul(hidden, W_4) + b_4
fc_out = tf.tanh(tf.matmul(conv_out, W_3) + b_3)
return fc_out
def model(self, states_pl, rewards_pl):
global_step = tf.Variable(0, name='global_step', trainable=False)
with tf.variable_scope("value_net"):
value_outputs = self.create_value_net(states_pl)
value_net_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="value_net")
mean_square_loss = tf.reduce_mean(tf.squared_difference(rewards_pl, value_outputs))
value_reg_loss = tf.reduce_sum([tf.reduce_sum(tf.square(x)) for x in value_net_vars])
value_loss = mean_square_loss + 0.001 * value_reg_loss
optimizer = tf.train.AdamOptimizer(0.0001)
value_opt_op = optimizer.minimize(value_loss, global_step=global_step)
tf.summary.scalar("raw_value_loss", mean_square_loss)
tf.summary.scalar("reg_value_loss", value_reg_loss)
tf.summary.scalar("all_value_loss", value_loss)
return value_outputs, value_opt_op, global_step, mean_square_loss
def get_state_values(self, states, players):
h, w, c = self.get_input_shape()
ss = []
for s, p in zip(states, players):
img, _ = self.adapt_state(s, p)
ss.append(img)
ss = np.array(ss)
feed_dict = {
self.states_pl: ss.reshape((-1, h, w, c)),
}
return self.sess.run(self.value_outputs, feed_dict=feed_dict)
def save(self):
self.saver.save(self.sess, self.brain_file)
def load(self):
ckpt = tf.train.get_checkpoint_state(self.brain_dir)
if ckpt and ckpt.model_checkpoint_path:
self.saver.restore(self.sess, ckpt.model_checkpoint_path)
def close(self):
self.sess.close()
def train(self, train_dat_file, test_dat_file):
self.loader_train = DatasetLoader(train_dat_file)
self.loader_test = DatasetLoader(test_dat_file)
epoch = 0
while True:
print('epoch:', epoch)
epoch += 1
ith_part = 0
while self._has_more_data:
ith_part += 1
self.adapt()
self.train_part(ith_part)
# if ith_part >= 1:
# break
self._has_more_data = True
# if epoch >= 1:
# break
def fill_feed_dict(self, data_set, states_pl, rewards_pl, batch_size=None):
batch_size = batch_size or BATCH_SIZE
states_feed, rewards_feed = data_set.next_batch(batch_size)
feed_dict = {
states_pl: states_feed,
rewards_pl: rewards_feed
}
return feed_dict
def train_part(self, ith_part):
NUM_STEPS = self.ds_train.num_examples // BATCH_SIZE
print('total num steps:', NUM_STEPS)
start_time = time.time()
train_mse = 0.
for step in range(1, NUM_STEPS + 1):
feed_dict = self.fill_feed_dict(self.ds_train, self.states_pl, self.rewards_pl)
_, train_mse = self.sess.run([self.opt_op, self.mse], feed_dict=feed_dict)
if step % 1000 == 0:
summary_str, gstep = self.sess.run([self.summary_op, self.global_step], feed_dict=feed_dict)
self.summary_writer.add_summary(summary_str, gstep)
self.summary_writer.flush()
if step == NUM_STEPS:
self.saver.save(self.sess, self.brain_file, global_step=self.global_step)
duration = time.time() - start_time
test_mse = self.do_eval(self.mse, self.states_pl, self.rewards_pl, self.ds_test)
print('part: %d, acc_train: %.3f, test accuracy: %.3f, time cost: %.3f sec' %
(ith_part, train_mse, test_mse, duration))
def do_eval(self, mse, states_pl, rewards_pl, data_set):
accum_mse = 0.
batch_size = BATCH_SIZE
assert batch_size != 0
steps_per_epoch = math.ceil(data_set.num_examples / batch_size)
for _ in range(steps_per_epoch):
feed_dict = self.fill_feed_dict(data_set, states_pl, rewards_pl, batch_size)
accum_mse += self.sess.run(mse, feed_dict=feed_dict)
avg_mse = accum_mse / (steps_per_epoch or 1)
return avg_mse
def forge(self, row):
board = row[:Board.BOARD_SIZE_SQ]
player = row[-2]
image, _ = self.adapt_state(board, player)
reward = row[-1]
return image, reward
def adapt_state(self, board, player):
black = (board == Board.STONE_BLACK).astype(float)
white = (board == Board.STONE_WHITE).astype(float)
empty = (board == Board.STONE_EMPTY).astype(float)
is_black_move = np.ones_like(black, float) if player == Board.STONE_BLACK else np.zeros_like(black, float)
image = np.dstack((black, white, empty, is_black_move)).ravel()
legal = empty.astype(bool)
return image, legal
def adapt(self):
gc.collect()
if self.ds_train is not None and not self.loader_train.is_wane:
self.ds_train = None
if self.ds_test is not None and not self.loader_test.is_wane:
self.ds_test = None
gc.collect()
h, w, c = self.get_input_shape()
def f(dat):
ds = []
for row in dat:
s, r = self.forge(row)
ds.append((s, r))
ds = np.array(ds)
return DataSet(np.vstack(ds[:, 0]).reshape((-1, h, w, c)), ds[:, 1])
if self.ds_train is None:
ds_train, self._has_more_data = self.loader_train.load(DATASET_CAPACITY)
self.ds_train = f(ds_train)
if self.ds_test is None:
ds_test, _ = self.loader_test.load(DATASET_CAPACITY // 2)
self.ds_test = f(ds_test)
print(self.ds_train.images.shape, self.ds_train.labels.shape)
print(self.ds_test.images.shape, self.ds_test.labels.shape)
|
423742
|
import os
import random
import argparse
import numpy as np
class CycleGANArgParser(object):
def __init__(self):
self.parser = argparse.ArgumentParser(description="args")
self.parser.add_argument(
"--batch_size", type=int, default=9, help="Batch size."
)
self.parser.add_argument("--seed", type=int, default=0, help="Random Seed.")
self.parser.add_argument("--device", type=str, default="cuda")
self.parser.add_argument(
"--epochs_per_save",
type=int,
default=1,
help="Number of epochs between saving the model.",
)
self.parser.add_argument(
"--start_epoch", type=int, default=1, help="Epoch to start training"
)
self.parser.add_argument(
"--num_epochs", type=int, default=6500, help="Number of epochs to train."
)
self.parser.add_argument(
"--decay_after",
type=float,
default=2e5,
help="Decay learning rate after n iterations.",
)
self.parser.add_argument(
"--sample_rate",
type=int,
default=22050,
help="Sampling rate of mel-spectrograms.",
)
self.parser.add_argument(
"--speaker_A_id", type=str, default="VCC2SF3", help="Source speaker id."
)
self.parser.add_argument(
"--speaker_B_id", type=str, default="VCC2SM3", help="Target speaker id."
)
self.parser.add_argument(
"--origin_data_dir",
type=str,
default="vcc2018/vcc2018_training/",
help="Directory containing origin dataset files.",
)
self.parser.add_argument(
"--preprocessed_data_dir",
type=str,
default="vcc2018_preprocessed/vcc2018_training/",
help="Directory containing preprocessed dataset files.",
)
self.parser.add_argument(
"--pretrain_models",
type=str,
default="pretrain_models/",
help="Directory containing pretrain models.",
)
self.parser.add_argument(
"--infer_data_dir",
type=str,
default="sample/",
help="Directory containing infer dataset files.",
)
self.parser.add_argument(
"--output_data_dir",
type=str,
default="./converted_sound/",
help="Directory containing output dataset files.",
)
self.parser.add_argument(
"--generator_lr",
type=float,
default=2e-4,
help="Initial generator learning rate.",
)
self.parser.add_argument(
"--discriminator_lr",
type=float,
default=1e-4,
help="Initial discrminator learning rate.",
)
self.parser.add_argument(
"--cycle_loss_lambda",
type=float,
default=10,
help="Lambda value for cycle consistency loss.",
)
self.parser.add_argument(
"--identity_loss_lambda",
type=float,
default=5,
help="Lambda value for identity loss.",
)
self.parser.add_argument(
"--num_frames", type=int, default=64, help="Num frames per training sample."
)
self.parser.add_argument(
"--max_mask_len",
type=int,
default=32,
help="Maximum length of mask for Mask-CycleGAN-VC.",
)
self.parser.set_defaults(
batch_size=9, num_epochs=50, decay_after=1e4, start_epoch=1, num_frames=64
)
def parse_args(self):
args = self.parser.parse_args()
# Limit sources of nondeterministic behavior
os.environ["PYTHONHASHSEED"] = str(args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
self.print_options(args)
return args
def print_options(self, args):
"""
Function that prints current options
Parameters
----------
args : Namespace
Arguments for models and model testing
"""
message = ""
message += "----------------- Options ---------------\n"
for k, v in sorted(vars(args).items()):
message += "{:>25}: {:<30}\n".format(str(k), str(v))
message += "----------------- End -------------------"
print(message)
|
423754
|
def retry_until(condition):
def retry(request):
try:
return request()
except Exception as exception:
if condition(exception):
return retry(request)
else:
raise exception
return retry
def retry(max_retries):
retries = [0]
def retry_count():
retries[0] += 1
return retries[0]
return retry_until(lambda _: retry_count() != max_retries)
|
423794
|
from pymtl import *
from lizard.model.hardware_model import HardwareModel, Result
from lizard.model.flmodel import FLModel
from lizard.util.rtl.cam import Entry
from lizard.bitutil import clog2, clog2nz
from lizard.bitutil.bit_struct_generator import *
class RandomReplacementCAMFL(FLModel):
@HardwareModel.validate
def __init__(s, interface, nregs):
super(RandomReplacementCAMFL, s).__init__(interface)
Addr = Bits(clog2nz(nregs))
Key = s.interface.Key
Value = s.interface.Value
s.Entry = Entry(Key, Value)
s.state(
entries=[s.Entry() for _ in range(nregs)],
overwrite_counter=Addr(),
)
@s.model_method
def read(key):
for i in range(nregs - 1, -1, -1):
entry = s.entries[i]
if entry.key == key and entry.valid:
return Result(value=entry.value, valid=1)
return Result(value=s.entries[0].value, valid=0)
@s.model_method
def write(key, remove, value):
new = s.Entry()
new.key = key
new.value = value
if remove:
new.valid = 0
else:
new.valid = 1
last_invalid = -1
for i in range(nregs - 1, -1, -1):
entry = s.entries[i]
if entry.key == key and entry.valid:
s.entries[i] = new
return
if last_invalid == -1 and not entry.valid:
last_invalid = i
if remove:
return
if last_invalid != -1:
s.entries[last_invalid] = new
else:
i = s.overwrite_counter
s.entries[int(i)] = new
if i == nregs - 1:
s.overwrite_counter = 0
else:
s.overwrite_counter = i + 1
@s.model_method
def clear():
for i in range(len(s.entries)):
s.entries[i] = s.Entry()
|
423831
|
import torch
import cv2
import numpy as np
import torch.backends.cudnn as cudnn
import os
from tqdm import tqdm
from skimage import io
from net.models import deeplabv3plus
from dataset.my_datasets import MyGenDataSet
from torch.utils import data
def generate_mode_seg0(dataloader, model, path):
for index, batch in tqdm(enumerate(dataloader)):
image, name = batch
image = image.cuda()
# print(name)
rot_90 = torch.rot90(image, 1, [2, 3])
rot_180 = torch.rot90(image, 2, [2, 3])
rot_270 = torch.rot90(image, 3, [2, 3])
hor_flip = torch.flip(image, [-1])
ver_flip = torch.flip(image, [-2])
image = torch.cat([image, rot_90, rot_180, rot_270, hor_flip, ver_flip], dim=0)
model.eval()
with torch.no_grad():
pred = model(image)
pred = pred[0:1] + torch.rot90(pred[1:2], 3, [2, 3]) + torch.rot90(pred[2:3], 2, [2, 3]) + torch.rot90(pred[3:4], 1, [2, 3]) + torch.flip(pred[4:5], [-1]) + torch.flip(pred[5:6], [-2])
pred = torch.softmax(pred, dim=1).cpu().data.numpy()
pred_arg = np.int16(np.argmax(pred[0], axis=0))
io.imsave(os.path.join(path, name[0]), np.int64(pred_arg) * 255)
return True
def generate_mode_seg1(dataloader, model, path):
for index, batch in tqdm(enumerate(dataloader)):
image_ori, image, name = batch
image = image.cuda()
# print(name)
rot_90 = torch.rot90(image, 1, [2, 3])
rot_180 = torch.rot90(image, 2, [2, 3])
rot_270 = torch.rot90(image, 3, [2, 3])
hor_flip = torch.flip(image, [-1])
ver_flip = torch.flip(image, [-2])
image = torch.cat([image, rot_90, rot_180, rot_270, hor_flip, ver_flip], dim=0)
model.eval()
with torch.no_grad():
pred = model(image)
pred = pred[0:1] + torch.rot90(pred[1:2], 3, [2, 3]) + torch.rot90(pred[2:3], 2, [2, 3]) + torch.rot90(pred[3:4], 1, [2, 3]) + torch.flip(pred[4:5], [-1]) + torch.flip(pred[5:6], [-2])
pred = torch.softmax(pred, dim=1).cpu().data.numpy()
pred_arg = np.int16(np.argmax(pred[0], axis=0))
pred_arg = cv2.resize(pred_arg, (image_ori.shape[2], image_ori.shape[1]), interpolation=cv2.INTER_NEAREST)
io.imsave(os.path.join(path, name[0]), np.int64(pred_arg) * 255)
return True
########################### Load coarse segmentation network.
cudnn.enabled = True
model = deeplabv3plus(num_classes=2)
model.cuda()
model = torch.nn.DataParallel(model)
pretrained_dict = torch.load('models/DR_CoarseSN/CoarseSN.pth')
model.load_state_dict(pretrained_dict)
model.eval()
model.float()
########################### Coarse_masks for MaskCN
#### Training
class_p = 'Training'
data_root = 'dataset/cls_data/'+class_p+'_Add_resize_crop_cls/'
data_list = 'dataset/ISIC/'+class_p+'_Add_cls.txt'
dataloader = data.DataLoader(MyGenDataSet(data_root, data_list, mode=0), batch_size=1, shuffle=False, num_workers=8,
pin_memory=True)
path = 'Coarse_masks/'+class_p+'_MaskCN/'
if not os.path.isdir(path):
os.makedirs(path)
generate_mode_seg0(dataloader, model, path)
#### Validation
class_p = 'Validation' ### 'Testing'
data_root = 'dataset/cls_data/'+class_p+'_resize_crop9_cls/'
data_list = 'dataset/ISIC/'+class_p+'_crop9_cls.txt'
dataloader = data.DataLoader(MyGenDataSet(data_root, data_list, mode=0), batch_size=1, shuffle=False, num_workers=8,
pin_memory=True)
path = 'Coarse_masks/'+class_p+'_MaskCN/'
if not os.path.isdir(path):
os.makedirs(path)
generate_mode_seg0(dataloader, model, path)
########################### Coarse_masks for EnhancedSN
#### Training
class_p = 'Training'
data_root = 'dataset/seg_data/'+class_p+'_resize_seg/'
data_list = 'dataset/ISIC/'+class_p+'_seg.txt'
dataloader = data.DataLoader(MyGenDataSet(data_root, data_list, mode=1), batch_size=1, shuffle=False, num_workers=8,
pin_memory=True)
path = 'Coarse_masks/'+class_p+'_EnhancedSN/'
if not os.path.isdir(path):
os.makedirs(path)
generate_mode_seg1(dataloader, model, path)
#### Validation
class_p = 'Validation' ### 'Testing'
data_root = 'dataset/seg_data/ISIC-2017_'+class_p+'_Data/'
data_list = 'dataset/ISIC/'+class_p+'_seg.txt'
dataloader = data.DataLoader(MyGenDataSet(data_root, data_list, mode=1), batch_size=1, shuffle=False, num_workers=8,
pin_memory=True)
path = 'Coarse_masks/'+class_p+'_EnhancedSN/'
if not os.path.isdir(path):
os.makedirs(path)
generate_mode_seg1(dataloader, model, path)
|
423838
|
from adventofcode.year_2020.day_06_2020 import part_one, part_two
test_input = [
'abc',
'',
'a',
'b',
'c',
'',
'ab',
'ac',
'',
'a',
'a',
'a',
'a',
'',
'b',
]
def test_part_one():
assert 11 == part_one(test_input)
def test_part_two():
assert 6 == part_two(test_input)
|
423850
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("Test")
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1000)
)
process.source = cms.Source("PoolSource",
# fileNames = cms.untracked.vstring('/store/data/CRUZET3/Cosmics/RECO/CRUZET3_V5_v7/0004/12C27642-1362-DD11-825B-000423D6A6F4.root')
fileNames = cms.untracked.vstring('/store/data/Commissioning08/Cosmics/RECO/CRUZET4_v1/000/058/738/FE34639D-4273-DD11-8EBC-0019DB29C614.root')
)
process.myFilter = cms.EDFilter("HcalHPDFilter")
process.Out = cms.OutputModule("PoolOutputModule",
outputCommands = cms.untracked.vstring('keep *'),
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('p')
),
fileName = cms.untracked.string('hpd_filtered.root')
)
process.p = cms.Path(process.myFilter)
process.outpath = cms.EndPath(process.Out)
|
423894
|
import unittest
from biicode.common.model.symbolic.block_version import BlockVersion
from biicode.common.model.brl.brl_block import BRLBlock
from biicode.common.deps.block_version_graph import BlockVersionGraph
class BlockVersionGraphTest(unittest.TestCase):
def empty_test(self):
'''When one is empty, it should always be compatible'''
g1 = BlockVersionGraph()
g2 = BlockVersionGraph()
self.assertFalse(g1.collision(g2))
brl0 = BRLBlock('user/user/block/master')
brl1 = BRLBlock('user/user/block2/master')
v0 = BlockVersion(brl0, 0)
v1 = BlockVersion(brl1, 1)
g1.add_node(v0)
self.assertFalse(g1.collision(g2))
self.assertFalse(g2.collision(g1))
g1.add_node(v1)
self.assertFalse(g1.collision(g2))
self.assertFalse(g2.collision(g1))
def compatible_test(self):
'''both not empty, but compatible'''
brl0 = BRLBlock('user/user/block/master')
brl1 = BRLBlock('user/user/block2/master')
v0 = BlockVersion(brl0, 0)
v1 = BlockVersion(brl1, 1)
g1 = BlockVersionGraph()
g2 = BlockVersionGraph()
g1.add_node(v0)
g2.add_node(v1)
self.assertFalse(g1.collision(g2))
self.assertFalse(g2.collision(g1))
g1.add_node(v1)
g2.add_node(v0)
self.assertFalse(g1.collision(g2))
self.assertFalse(g2.collision(g1))
def incompatible_test(self):
'''both not empty, and incompatible'''
brl0 = BRLBlock('user/user/block/master')
brl1 = BRLBlock('user/user/block2/master')
v0 = BlockVersion(brl0, 0)
v1 = BlockVersion(brl1, 1)
v2 = BlockVersion(brl1, 0)
v3 = BlockVersion(brl0, 1)
g1 = BlockVersionGraph()
g2 = BlockVersionGraph()
g1.add_node(v0)
g2.add_node(v3)
self.assertTrue(g1.collision(g2))
self.assertTrue(g2.collision(g1))
g1 = BlockVersionGraph()
g2 = BlockVersionGraph()
g1.add_nodes([v0, v1])
g2.add_nodes([v2, v3])
self.assertTrue(g1.collision(g2))
self.assertTrue(g2.collision(g1))
def disjoints_graphs_no_collisions_test(self):
g1 = BlockVersionGraph()
g2 = BlockVersionGraph()
self.assertEqual(BlockVersionGraph(), g1.collision(g2))
self.assertEqual(BlockVersionGraph(), g2.collision(g1))
brl0 = BRLBlock('user/user/block/master')
brl1 = BRLBlock('user/user/block2/master')
v0 = BlockVersion(brl0, 0)
v1 = BlockVersion(brl1, 1)
g1.add_node(v0)
g2.add_node(v1)
self.assertEqual(BlockVersionGraph(), g1.collision(g2))
self.assertEqual(BlockVersionGraph(), g2.collision(g1))
def connected_graphs_no_collisions_test(self):
g1 = BlockVersionGraph()
g2 = BlockVersionGraph()
brl0 = BRLBlock('user/user/block/master')
brl1 = BRLBlock('user/user/block2/master')
v0 = BlockVersion(brl0, 0)
v1 = BlockVersion(brl1, 1)
g1.add_node(v0)
g2.add_node(v0)
self.assertEqual(BlockVersionGraph(), g1.collision(g2))
self.assertEqual(BlockVersionGraph(), g2.collision(g1))
g1.add_node(v1)
self.assertEqual(BlockVersionGraph(), g1.collision(g2))
self.assertEqual(BlockVersionGraph(), g2.collision(g1))
def simple_collisions_test(self):
g1 = BlockVersionGraph()
g2 = BlockVersionGraph()
brl0 = BRLBlock('user/user/block/master')
v0 = BlockVersion(brl0, 0)
v1 = BlockVersion(brl0, 1)
g1.add_node(v0)
g2.add_node(v1)
expected = BlockVersionGraph()
expected.add_nodes([v0, v1])
self.assertEqual(expected, g1.collision(g2))
self.assertEqual(expected, g2.collision(g1))
def diamond_collisions_test(self):
g1 = BlockVersionGraph()
g2 = BlockVersionGraph()
brlA = BRLBlock('user/user/blockA/master')
brlB = BRLBlock('user/user/blockB/master')
brlC = BRLBlock('user/user/blockC/master')
brlD = BRLBlock('user/user/blockD/master')
brlE = BRLBlock('user/user/blockE/master')
brlF = BRLBlock('user/user/blockF/master')
vA0 = BlockVersion(brlA, 0)
vA1 = BlockVersion(brlA, 1)
vB = BlockVersion(brlB, 0)
vC = BlockVersion(brlC, 1)
vD = BlockVersion(brlD, 0)
vE = BlockVersion(brlE, 3)
vF = BlockVersion(brlF, 13)
g1.add_nodes([vA0, vB, vD, vF, vE])
g1.add_edge(vB, vA0)
g1.add_edge(vD, vB)
g1.add_edge(vA0, vE)
g2.add_nodes([vA1, vC, vD, vE])
g2.add_edge(vC, vA1)
g2.add_edge(vD, vC)
g2.add_edge(vA1, vE)
expected = BlockVersionGraph()
expected.add_nodes([vA0, vA1, vB, vC, vD])
expected.add_edge(vC, vA1)
expected.add_edge(vD, vC)
expected.add_edge(vD, vB)
expected.add_edge(vB, vA0)
self.assertEqual(expected, g1.collision(g2))
self.assertEqual(expected, g2.collision(g1))
|
423900
|
import time
print "Multiply"
def mult(x, y):
print "Hmmm..."
time.sleep(3) # Wait 3 seconds
print "Multiplying %s and %s" % (x, y) # Print two values - requires brackets around x and y.
result = x * y
return result
a = raw_input("First number: ")
a = int(a)
b = raw_input("Second number: ")
b = int(b)
result = mult(a, b)
print "Result: %s" % result
|
423994
|
import subprocess
import socket
import time
def pytest_funcarg__echoserver(request):
def setup():
p = subprocess.Popen(
['python3', '12_11_echo_server.py'])
time.sleep(1)
return p
def cleanup(p):
p.terminate()
return request.cached_setup(
setup=setup,
teardown=cleanup,
scope="session")
def pytest_funcarg__clientsocket(request):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('localhost', 1028))
request.addfinalizer(lambda: s.close())
return s
def test_echo(echoserver, clientsocket):
clientsocket.send(b"abc")
assert clientsocket.recv(3) == b'abc'
def test_echo2(echoserver, clientsocket):
clientsocket.send(b"def")
assert clientsocket.recv(3) == b'def'
|
424001
|
import slack
from djangoProject.settings import SLACK_TOKEN
def send_okr_message(array):
name = array[0]
date_time = array[1]
key_result = array[2]
time_spent = array[3]
objective = array[4]
update = array[5]
image = array[6]
slack_id = array[7]
message = {
'channel': '#okrs',
"blocks": [
{
"type": "divider"
},
{
"type": "header",
"text": {
"type": "plain_text",
"text": name + " added a new entry to OKR table :okr:",
}
},
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "*Objective: * " + objective + "\n*Key Result : * " + key_result + "\n*Date : * " +str(date_time) + "\n*Time Spent: * " + str(time_spent) + "\n*Update : * " + update
},
"accessory": {
"type": "image",
"image_url": image,
"alt_text": name
}
},
{
"type": "context",
"elements": [
{
"text": f":wkc-badge1: <https://sushiksha.konkanischolarship.com/okr/|Sushiksha OKR> | <@{slack_id}>",
"type": "mrkdwn"
}
]
}
]
}
client_obj = slack.WebClient(token=SLACK_TOKEN)
client_obj.chat_postMessage(**message)
print('Slack message sent')
|
424043
|
from vega_lite_linter import Lint
import json
# with open('./vega_lite_linter/test/multiple/test9.json') as json_file:
# demo = json.load(json_file)
# print(demo)
demo = {
"data": {
"url": "data/cars.json"
},
"mark": "bar",
"encoding": {
"x": {
"field": "Horsepower",
"type": "quantitative"
},
"y": {
"field": "Miles_per_Gallon",
"type": "quantitative"
},
"size": {
"field": "Cylinders",
"type": "ordinal"
}
}
}
lint = Lint(demo)
result = lint.lint()
print('lint rules: ', '-'*20, len(result))
print(result)
fix = lint.fix()
print('fix rules: ', '-'*20)
for key in fix:
print('---- ', key, fix[key])
# if fix['fixable']:
# newvl = fix['optimize_spec']
# new_lint = Lint(newvl)
# new_result = new_lint.lint()
# print('new lint rules: ', '-'*20)
# print(new_result)
# new_fix = new_lint.fix()
# for key in new_fix:
# print('---- ', key, new_fix[key])
# if new_fix['fixable']:
# newvl1 = new_fix['optimize_spec']
# new_lint1 = Lint(newvl1)
# new_result1 = new_lint1.lint()
# print('new lint rules: ', '-'*20)
# print(new_result1)
|
424082
|
from ir_measures import measures
from .base import Measure, ParamInfo, SumAgg
class _NumRel(measures.Measure):
"""
The number of relevant documents the query has (independent of what the system retrieved).
"""
__name__ = 'NumRel'
NAME = __name__
SUPPORTED_PARAMS = {
'rel': measures.ParamInfo(dtype=int, default=1, desc='minimum relevance score to be counted (inclusive)')
}
def aggregator(self):
return SumAgg()
NumRel = _NumRel()
measures.register(NumRel)
|
424102
|
from paypalrestsdk import BillingPlan, ResourceNotFound
import logging
logging.basicConfig(level=logging.INFO)
try:
billing_plan = BillingPlan.find("P-0NJ10521L3680291SOAQIVT")
print("Got Billing Plan Details for Billing Plan[%s]" % (billing_plan.id))
if billing_plan.activate():
billing_plan = BillingPlan.find("P-0NJ10521L3680291SOAQIVT")
print("Billing Plan [%s] state changed to [%s]" %
(billing_plan.id, billing_plan.state))
else:
print(billing_plan.error)
except ResourceNotFound as error:
print("Billing Plan Not Found")
|
424141
|
import datetime
import pytest
from django.contrib.sites.models import Site
from django.utils import timezone
from jcasts.episodes.factories import AudioLogFactory, BookmarkFactory, EpisodeFactory
from jcasts.episodes.models import AudioLog, Bookmark, Episode
from jcasts.podcasts.factories import SubscriptionFactory
from jcasts.podcasts.models import Podcast
class TestEpisodeManager:
def test_get_next_episode_if_none(self, episode):
assert Episode.objects.get_next_episode(episode) is None
def test_get_previous_episode_if_none(self, episode):
assert Episode.objects.get_previous_episode(episode) is None
def test_get_next_episode(self, episode):
next_episode = EpisodeFactory(
podcast=episode.podcast,
pub_date=episode.pub_date + datetime.timedelta(days=2),
)
assert Episode.objects.get_next_episode(episode) == next_episode
def test_get_next_episode_not_same_podcast(self, episode):
EpisodeFactory(
pub_date=episode.pub_date + datetime.timedelta(days=2),
)
assert Episode.objects.get_next_episode(episode) is None
def test_get_next_episode_before_current(self, episode):
EpisodeFactory(
podcast=episode.podcast,
pub_date=episode.pub_date - datetime.timedelta(days=2),
)
assert Episode.objects.get_next_episode(episode) is None
def test_get_previous_episode(self, episode):
previous_episode = EpisodeFactory(
podcast=episode.podcast,
pub_date=episode.pub_date - datetime.timedelta(days=2),
)
assert Episode.objects.get_previous_episode(episode) == previous_episode
def test_get_previous_not_same_podcast(self, episode):
EpisodeFactory(
pub_date=episode.pub_date + datetime.timedelta(days=2),
)
assert Episode.objects.get_previous_episode(episode) is None
def test_get_previous_episode_after_current(self, episode):
EpisodeFactory(
podcast=episode.podcast,
pub_date=episode.pub_date + datetime.timedelta(days=2),
)
assert Episode.objects.get_previous_episode(episode) is None
def test_recommended_no_follows(self, db, user):
assert Episode.objects.recommended(user).count() == 0
def test_recommended(self, db, user):
podcast = SubscriptionFactory(user=user).podcast
# ok
first = EpisodeFactory(podcast=podcast)
# not following
EpisodeFactory()
# listened
AudioLogFactory(episode__podcast=podcast, user=user)
# favorite
BookmarkFactory(episode__podcast=podcast, user=user)
# trailer
EpisodeFactory(podcast=podcast, episode_type="trailer")
# too old
EpisodeFactory(
podcast=podcast, pub_date=timezone.now() - datetime.timedelta(days=30)
)
episodes = Episode.objects.recommended(user)
assert episodes.count() == 1
assert episodes.first() == first
def test_with_current_time_if_anonymous(self, db, anonymous_user):
EpisodeFactory()
episode = Episode.objects.with_current_time(anonymous_user).first()
assert episode.current_time == 0
assert not episode.completed
assert not episode.listened
def test_with_current_time_if_not_played(self, user):
EpisodeFactory()
episode = Episode.objects.with_current_time(user).first()
assert not episode.current_time
assert not episode.completed
assert not episode.listened
def test_with_current_time_if_played(self, user):
log = AudioLogFactory(user=user, current_time=20, updated=timezone.now())
episode = Episode.objects.with_current_time(user).first()
assert episode.current_time == 20
assert not episode.completed
assert episode.listened == log.updated
def test_with_current_time_if_completed(self, user):
log = AudioLogFactory(
user=user,
current_time=20,
completed=timezone.now(),
updated=timezone.now(),
)
episode = Episode.objects.with_current_time(user).first()
assert episode.current_time == 20
assert episode.completed
assert episode.listened == log.updated
def test_search(self, db):
EpisodeFactory(title="testing")
assert Episode.objects.search("testing").count() == 1
class TestEpisodeModel:
link = "https://example.com"
def test_get_link_if_episode(self):
assert Episode(link=self.link).get_link() == self.link
def test_get_link_if_podcast(self):
assert (
Episode(link=None, podcast=Podcast(link=self.link)).get_link() == self.link
)
def test_get_link_if_none(self):
assert Episode(link=None, podcast=Podcast(link=None)).get_link() is None
def test_episode_explicit(self):
assert Episode(explicit=True).is_explicit() is True
def test_podcast_explicit(self):
assert (
Episode(explicit=False, podcast=Podcast(explicit=True)).is_explicit()
is True
)
def test_not_explicit(self):
assert (
Episode(explicit=False, podcast=Podcast(explicit=False)).is_explicit()
is False
)
def test_slug(self):
episode = Episode(title="Testing")
assert episode.slug == "testing"
def test_slug_if_title_empty(self):
assert Episode().slug == "no-title"
def test_get_media_url_ext(self):
assert (
Episode(
media_url="https://thegrognardfiles.com/wp-content/uploads/2021/08/Episode-50-Part-1-Fighting-Fantasy-with-Ian-Livingstone-27_08_2021-23.58.mp3"
).get_media_url_ext()
== "mp3"
)
def test_time_remaining(self):
episode = Episode(duration="1:00:00")
episode.current_time = 1200
assert episode.time_remaining == 2400
def test_time_remaining_if_no_duration(self):
episode = Episode(duration="")
episode.current_time = 1200
assert episode.time_remaining == 0
def test_time_remaining_current_time_none(self):
episode = Episode(duration="1:00:00")
episode.current_time = None
assert episode.time_remaining == 3600
def test_time_remaining_current_time_not_set(self):
episode = Episode(duration="1:00:00")
with pytest.raises(AssertionError):
episode.time_remaining
def test_duration_in_seconds_hours_minutes_seconds(self):
assert Episode(duration="2:30:40").duration_in_seconds == 9040
def test_duration_in_seconds_hours_minutes_seconds_extra_digit(self):
assert Episode(duration="2:30:40:2903903").duration_in_seconds == 9040
def test_duration_in_seconds_minutes_seconds(self):
assert Episode(duration="30:40").duration_in_seconds == 1840
def test_duration_in_seconds_seconds_only(self):
assert Episode(duration="40").duration_in_seconds == 40
def test_get_duration_in_seconds_if_empty(self):
assert Episode(duration="").duration_in_seconds == 0
def test_duration_in_seconds_if_non_numeric(self):
assert Episode(duration="NaN").duration_in_seconds == 0
def test_duration_in_seconds_if_seconds_only(self):
assert Episode(duration="60").duration_in_seconds == 60
def test_duration_in_seconds_if_minutes_and_seconds(self):
assert Episode(duration="2:30").duration_in_seconds == 150
def test_duration_in_seconds_if_hours_minutes_and_seconds(self):
assert Episode(duration="2:30:30").duration_in_seconds == 9030
def test_is_completed_if_not_set(self, episode):
with pytest.raises(AssertionError):
episode.is_completed
def test_is_completed_if_marked_complete(self, user, episode):
AudioLogFactory(
user=user,
current_time=50,
updated=timezone.now(),
completed=timezone.now(),
episode=episode,
)
assert Episode.objects.with_current_time(user).first().is_completed
def test_pc_complete_if_duration_none(self, user):
episode = EpisodeFactory(duration="")
AudioLogFactory(
user=user,
current_time=50,
updated=timezone.now(),
episode=episode,
)
assert not Episode.objects.with_current_time(user).first().is_completed
def test_is_completed_if_pc_complete_under_100(self, user, episode):
AudioLogFactory(
user=user,
current_time=50,
updated=timezone.now(),
episode=episode,
)
assert not Episode.objects.with_current_time(user).first().is_completed
def test_is_completed_if_pc_complete_over_100(self, user, episode):
AudioLogFactory(
user=user,
current_time=100,
updated=timezone.now(),
episode=episode,
)
assert Episode.objects.with_current_time(user).first().is_completed
def test_pc_complete_without_current_time_attr(self, user, episode):
AudioLogFactory(
user=user,
current_time=50,
updated=timezone.now(),
episode=episode,
)
with pytest.raises(AssertionError):
Episode.objects.first().pc_complete
def test_pc_complete(self, user, episode):
AudioLogFactory(
user=user,
current_time=50,
updated=timezone.now(),
episode=episode,
)
assert Episode.objects.with_current_time(user).first().pc_complete == 50
def test_pc_complete_zero_current_time(self, user, episode):
AudioLogFactory(
user=user,
current_time=0,
updated=timezone.now(),
episode=episode,
)
assert Episode.objects.with_current_time(user).first().pc_complete == 0
def test_pc_complete_zero_duration(self, user, episode):
AudioLogFactory(
user=user,
current_time=0,
updated=timezone.now(),
episode=EpisodeFactory(duration=""),
)
assert Episode.objects.with_current_time(user).first().pc_complete == 0
def test_pc_complete_gt_100(self, user, episode):
AudioLogFactory(
user=user,
current_time=120,
updated=timezone.now(),
episode=episode,
)
assert Episode.objects.with_current_time(user).first().pc_complete == 100
def test_pc_complete_marked_complete(self, user, episode):
now = timezone.now()
AudioLogFactory(
user=user,
current_time=50,
updated=now,
completed=now,
episode=episode,
)
assert Episode.objects.with_current_time(user).first().pc_complete == 100
def test_pc_complete_not_played(self, user, episode):
assert Episode.objects.with_current_time(user).first().pc_complete == 0
def test_pc_complete_anonymous(self, anonymous_user, episode):
AudioLogFactory(
current_time=50,
updated=timezone.now(),
episode=episode,
)
assert (
Episode.objects.with_current_time(anonymous_user).first().pc_complete == 0
)
def test_str(self):
assert str(Episode(title="testing")) == "testing"
def test_str_no_title(self):
episode = Episode(title="", guid="abc123")
assert str(episode) == episode.guid
def test_cleaned_title(self):
episode = Episode(title="<b>Test & Code")
assert episode.cleaned_title == "Test & Code"
def test_cleaned_description(self):
episode = Episode(description="<b>Test & Code")
assert episode.cleaned_description == "Test & Code"
def test_get_file_size(self):
assert Episode(length=500).get_file_size() == "500\xa0bytes"
def test_get_file_size_if_none(self):
assert Episode(length=None).get_file_size() is None
def test_get_media_metadata(self, db):
cover_url = "https://www.omnycontent.com/d/playlist/aaea4e69-af51-495e-afc9-a9760146922b/9b63d479-4382-4198-8e63-aac7013964ff/e5ebd302-9d49-4c56-a234-aac701396502/image.jpg?t=1568401263\u0026size=Large"
episode = EpisodeFactory(podcast__cover_url=cover_url)
data = episode.get_media_metadata()
assert data["title"] == episode.title
assert data["album"] == episode.podcast.title
assert data["artist"] == episode.podcast.owner
assert data["artwork"][0] == {
"src": cover_url,
"sizes": "96x96",
"type": "image/jpeg",
}
def test_get_cover_url_if_episode_cover(self, podcast):
episode = EpisodeFactory(
podcast=podcast, cover_url="https://example.com/episode-cover.jpg"
)
assert episode.get_cover_url() == "https://example.com/episode-cover.jpg"
def test_get_cover_url_if_podcast_cover(self, episode):
assert episode.get_cover_url() == "https://example.com/cover.jpg"
def test_get_cover_url_if_none(self, db):
episode = EpisodeFactory(podcast__cover_url=None)
assert episode.get_cover_url() is None
def test_get_opengraph_data(self, rf, episode):
req = rf.get("/")
req.site = Site.objects.get_current()
data = episode.get_opengraph_data(req)
assert episode.title in data["title"]
assert data["url"] == "http://testserver" + episode.get_absolute_url()
def test_is_bookmarked_anonymous(self, anonymous_user, episode):
assert not episode.is_bookmarked(anonymous_user)
def test_is_bookmarked_false(self, user, episode):
assert not episode.is_bookmarked(user)
def test_is_bookmarked_true(self, user, episode):
fave = BookmarkFactory(user=user, episode=episode)
assert fave.episode.is_bookmarked(fave.user)
@pytest.mark.parametrize(
"episode_type,number,season,expected",
[
("full", None, None, ""),
("trailer", None, None, "Trailer"),
("trailer", 10, 3, "Trailer"),
("full", 10, 3, "Episode 10 Season 3"),
("full", 10, None, "Episode 10"),
("full", None, 3, "Season 3"),
],
)
def test_get_episode_metadata(self, episode_type, number, season, expected):
assert (
Episode(
episode_type=episode_type,
episode=number,
season=season,
).get_episode_metadata()
== expected
)
class TestBookmarkManager:
def test_search(self, db):
episode = EpisodeFactory(title="testing")
BookmarkFactory(episode=episode)
assert Bookmark.objects.search("testing").count() == 1
class TestAudioLogManager:
def test_search(self, db):
episode = EpisodeFactory(title="testing")
AudioLogFactory(episode=episode)
assert AudioLog.objects.search("testing").count() == 1
|
424150
|
from inspect import signature
from functools import wraps
from typing import (
Any,
Union,
Callable,
Optional,
TypeVar,
Tuple,
Dict,
List,
Iterator,
overload,
)
from contextlib import contextmanager
from weakref import WeakValueDictionary
__all__ = ["Model", "Control", "view", "unview", "views", "link", "unlink", "notifier"]
Event = Dict[str, Any]
TupleOfEvents = Tuple[Event, ...]
ViewFunction = Callable[["Model", TupleOfEvents], None]
def views(model: "Model") -> List[ViewFunction]:
"""Return a model's views keyed on what events they respond to.
Model views are added by calling :func:`view` on a model.
"""
if not isinstance(model, Model):
raise TypeError("Expected a Model, not %r." % model)
return model._model_views[:]
_F = TypeVar("_F", bound=ViewFunction)
@overload
def view(model: "Model") -> Callable[[_F], _F]:
...
@overload
def view(model: "Model", function: ViewFunction) -> None:
...
def view(
model: "Model", function: Optional[ViewFunction] = None
) -> Optional[Callable[[_F], _F]]:
"""A decorator for registering a callback to a model
Parameters:
model: the model object whose changes the callback should respond to.
Examples:
.. code-block:: python
from spectate import mvc
items = mvc.List()
@mvc.view(items)
def printer(items, events):
for e in events:
print(e)
items.append(1)
"""
if not isinstance(model, Model):
raise TypeError("Expected a Model, notself._model_notifier() %r." % model)
def setup(function: _F) -> _F:
model._attach_model_view(function)
return function
if function is not None:
setup(function)
return None
else:
return setup
def unview(model: "Model", function: ViewFunction) -> None:
"""Remove a view callbcak from a model.
Parameters:
model: The model which contains the view function.
function: The callable which was registered to the model as a view.
Raises:
ValueError: If the given ``function`` is not a view of the given ``model``.
"""
model._remove_model_view(function)
def link(source: "Model", *targets: "Model") -> None:
"""Attach all of the source's present and future view functions to the targets.
Parameters:
source: The model whose view functions will be attached to the targets.
targets: The models that will acquire the source's view functions.
"""
for t in targets:
source._attach_child_model(t)
def unlink(source: "Model", *targets: "Model") -> None:
"""Remove all of the source's present and future view functions from the targets.
Parameters:
source: The model whose view functions will be removed from the targets.
targets: The models that will no longer share view functions with the source.
"""
for t in targets:
source._remove_child_model(t)
@contextmanager
def notifier(model: "Model") -> Iterator[Callable[..., None]]:
"""Manually send notifications to the given model.
Parameters:
model: The model whose views will recieve notifications
Returns:
A function whose keyword arguments become event data.
Example:
.. code-block:: python
m = Model()
@view(m)
def printer(m, events):
for e in events:
print(e)
with notifier(m) as notify:
# the view should print out this event
notify(x=1, y=2)
"""
events = []
def notify(*args, **kwargs):
events.append(dict(*args, **kwargs))
yield notify
if events:
model._notify_model_views(tuple(events))
class Control:
"""An object used to define control methods on a :class:`Model`
A "control" method on a :class:`Model` is one which reacts to another method being
called. For example there is a control method on the
:class:`~spectate.mvc.models.List`
which responds when :meth:`~spectate.mvc.models.List.append` is called.
A control method is a slightly modified :ref:`beforeback <Control Beforebacks>` or
:ref:`afterback <Control Afterbacks>` that accepts an extra ``notify`` argument.
These are added to a control object by calling :meth:`Control.before` or
:meth:`Control.after` respectively. The ``notify`` arugment is a function which
allows a control method to send messages to :func:`views <view>` that are registered
to a :class:`Model`.
Parameters:
methods:
The names of the methods on the model which this control will react to
When they are calthrough the Nodeled. This is either a comma seperated
string, or a list of strings.
before:
A control method that reacts before any of the given ``methods`` are
called. If given as a callable, then that function will be used as the
callback. If given as a string, then the control will look up a method
with that name when reacting (useful when subclassing).
after:
A control method that reacts after any of the given ``methods`` are
alled. If given as a callable, then that function will be used as the
callback. If given as a string, then the control will look up a method
with that name when reacting (useful when subclassing).
Examples:
Control methods are registered to a :class:`Control` with a ``str`` or function.
A string may refer to the name of a method on a `Model` while a function should
be decorated under the same name as the :class:`Control` object to preserve the
namespace.
.. code-block:: python
from spectate import mvc
class X(mvc.Model):
_control_method = mvc.Control("method").before("_control_before_method")
def _control_before_method(self, call, notify):
print("before")
# Note how the method uses the same name. It
# would be redundant to use a different one.
@_control_a.after
def _control_method(self, answer, notify):
print("after")
def method(self):
print("during")
x = X()
x.method()
.. code-block:: text
before
during
after
"""
def __init__(
self,
methods: Union[list, tuple, str],
*,
before: Union[Callable, str] = None,
after: Union[Callable, str] = None,
):
if isinstance(methods, (list, tuple)):
self.methods = tuple(methods)
elif isinstance(methods, str):
self.methods = tuple(map(str.strip, methods.split(",")))
else:
raise ValueError("methods must be a string or list of strings")
self.name = None
if isinstance(before, Control):
before = before._before
self._before = before
if isinstance(after, Control):
after = after._after
self._after = after
def __get__(self, obj, cls):
if obj is None:
return self
else:
return BoundControl(obj, self)
def __set_name__(self, cls, name):
if not issubclass(cls, Model):
raise TypeError("Can only define a control on a Model, not %r" % cls)
if self.name:
msg = "Control was defined twice - %r and %r."
raise RuntimeError(msg % (self.name, name))
else:
self.name = name
for m in self.methods:
setattr(cls, m, self._create_controlled_method(cls, m))
def _create_controlled_method(self, cls, name):
method = getattr(cls, name)
@wraps(method)
def wrapped_method(obj, *args, **kwargs):
cls = type(obj)
bound_control = self.__get__(obj, cls)
before_control = bound_control.before
if before_control is not None:
before_value = before_control(
obj, {"name": name, "args": args, "kwargs": kwargs}
)
else:
before_value = None
result = method.__get__(obj, cls)(*args, **kwargs)
after_control = bound_control.after
if after_control is not None:
after_control(
obj, {"before": before_value, "name": name, "value": result}
)
return result
return wrapped_method
class BoundControl:
def __init__(self, obj, ctrl):
self._obj = obj
self._cls = type(obj)
self._name = ctrl.name
self._before = ctrl._before
self._after = ctrl._after
self.methods = ctrl.methods
@property
def before(self):
if self._before is None:
method_name = self._name + "_before"
if hasattr(self._obj, method_name):
before = getattr(self._obj, method_name)
else:
return None
else:
before = self._before
if isinstance(before, str):
before = getattr(self._obj, before)
elif hasattr(before, "__get__"):
before = before.__get__(self._obj, type(self._obj))
@wraps(before)
def beforeback(value, call):
def parameters():
meth = getattr(value, call["name"])
bound = signature(meth).bind(*call["args"], **call["kwargs"])
return dict(bound.arguments)
with notifier(value) as notify:
return before(dict(call, parameters=parameters), notify)
return beforeback
@property
def after(self):
if self._after is None:
return None
else:
after = self._after
if isinstance(after, str):
after = getattr(self._obj, after)
elif hasattr(after, "__get__"):
after = after.__get__(self._obj, type(self._obj))
@wraps(after)
def afterback(value, answer):
with notifier(value) as notify:
return after(answer, notify)
return afterback
class Model:
"""An object that can be :class:`controlled <Control>` and :func:`viewed <view>`.
Users should define :class:`Control` methods and then :func:`view` the change
events those controls emit. This process starts by defining controls on a subclass
of :class:`Model`.
Examples:
.. code-block:: python
from specate import mvc
class Object(Model):
_control_attr_change = Control(
"__setattr__, __delattr__",
before="_control_before_attr_change",
after="_control_after_attr_change",
)
def __init__(self, *args, **kwargs):
for k, v in dict(*args, **kwargs).items():
setattr(self, k, v)
def _control_before_attr_change(self, call, notify):
return call["args"][0], getattr(self, call["args"][0], Undefined)
def _control_after_attr_change(self, answer, notify):
attr, old = answer["before"]
new = getattr(self, attr, Undefined)
if new != old:
notify(attr=attr, old=old, new=new)
o = Object()
@mvc.view(o)
def printer(o, events):
for e in events:
print(e)
"""
_model_views: List[ViewFunction]
_inner_models: "WeakValueDictionary[int, Model]"
def __new__(cls, *args: Any, **kwargs: Any) -> "Model":
new = super().__new__
if new is not object.__new__:
self = new(cls, *args, **kwargs) # type: ignore
else:
self = new(cls)
object.__setattr__(self, "_model_views", [])
object.__setattr__(self, "_inner_models", WeakValueDictionary())
return self
def _attach_child_model(self, model: "Model") -> None:
self._inner_models[id(model)] = model
for v in self._model_views:
model._attach_model_view(v)
def _remove_child_model(self, model: "Model") -> None:
try:
del self._inner_models[id(model)]
except KeyError:
pass
else:
for v in self._model_views:
model._remove_model_view(v)
def _attach_model_view(self, function: ViewFunction) -> None:
self._model_views.append(function)
for inner in self._inner_models.values():
inner._attach_model_view(function)
def _remove_model_view(self, function: ViewFunction) -> None:
self._model_views.remove(function)
for inner in self._inner_models.values():
inner._remove_model_view(function)
def _notify_model_views(self, events: TupleOfEvents):
for view in self._model_views:
view(self, events)
|
424170
|
from micromlgen import platforms
from micromlgen.svm import is_svm, port_svm
from micromlgen.rvm import is_rvm, port_rvm
from micromlgen.sefr import is_sefr, port_sefr
from micromlgen.decisiontree import is_decisiontree, port_decisiontree
from micromlgen.randomforest import is_randomforest, port_randomforest
from micromlgen.logisticregression import is_logisticregression, port_logisticregression
from micromlgen.gaussiannb import is_gaussiannb, port_gaussiannb
from micromlgen.pca import is_pca, port_pca
from micromlgen.principalfft import is_principalfft, port_principalfft
from micromlgen.linear_regression import is_linear_regression, port_linear_regression
from micromlgen.xgboost import is_xgboost, port_xgboost
def port(
clf,
classname=None,
classmap=None,
platform=platforms.ARDUINO,
precision=None,
**kwargs):
"""Port a classifier to plain C++"""
assert platform in platforms.ALL, 'Unknown platform %s. Use one of %s' % (platform, ', '.join(platforms.ALL))
if is_svm(clf):
return port_svm(**locals())
elif is_rvm(clf):
return port_rvm(**locals())
elif is_sefr(clf):
return port_sefr(**locals())
elif is_decisiontree(clf):
return port_decisiontree(**locals())
elif is_randomforest(clf):
return port_randomforest(**locals())
elif is_logisticregression(clf):
return port_logisticregression(**locals())
elif is_gaussiannb(clf):
return port_gaussiannb(**locals())
elif is_pca(clf):
return port_pca(**locals())
elif is_principalfft(clf):
return port_principalfft(**locals(), **kwargs)
elif is_linear_regression(clf):
return port_linear_regression(**locals(), **kwargs)
elif is_xgboost(clf):
return port_xgboost(**locals(), **kwargs)
raise TypeError('clf MUST be one of %s' % ', '.join(platforms.ALLOWED_CLASSIFIERS))
|
424196
|
from typing import List
class Solution:
def nextPermutation(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
Wiki: Permutation
Generation in lexicographic order
按照字典顺序生成的下一个排列
"""
n = len(nums)
k = 0
def reverse(arry, i, j):
while i < j:
arry[i], arry[j] = arry[j], arry[i]
i += 1
j -= 1
for idx in range(n - 1, 0, -1):
if nums[idx - 1] < nums[idx]:
k = idx
break
if not k:
reverse(nums, k, n - 1)
else:
i = 0
for idx in range(n - 1, -1, -1):
if nums[idx] > nums[k - 1]:
i = idx
break
nums[k - 1], nums[i] = nums[i], nums[k - 1]
reverse(nums, k, n - 1)
|
424224
|
from tensorflow.keras.initializers import Initializer
from libspn_keras.initializers.dirichlet import Dirichlet
_DEFAULT_ACCUMULATOR_INITIALIZER = Dirichlet(alpha=1.0, axis=-2)
def set_default_accumulator_initializer(initializer: Initializer) -> None:
"""
Configure the default accumulator that will be used for sum accumulators.
Args:
initializer: The initializer which will be used by default for sum accumulators.
"""
global _DEFAULT_ACCUMULATOR_INITIALIZER
_DEFAULT_ACCUMULATOR_INITIALIZER = initializer
def get_default_accumulator_initializer() -> Initializer:
"""
Obtain default accumulator initializer.
Returns:
The default accumulator initializer that will be use in sum accumulators, unless specified
explicitly at initialization.
"""
global _DEFAULT_ACCUMULATOR_INITIALIZER
return _DEFAULT_ACCUMULATOR_INITIALIZER
|
424235
|
import tensorflow as tf
import config
import models
from input_data import AudioWrapper
from helper import Trainer, Evaluator
def train(args):
is_training = True
session = tf.compat.v1.Session(config=config.TF_SESSION_CONFIG)
dataset = AudioWrapper(args, 'train', is_training, session)
wavs, labels = dataset.get_input_and_output_op()
model = models.__dict__[args.arch](args)
model.build(wavs=wavs, labels=labels, is_training=is_training)
trainer = Trainer(model, session, args, dataset)
trainer.train()
def evaluate(args):
is_training = False
session = tf.compat.v1.Session(config=config.TF_SESSION_CONFIG)
dataset = AudioWrapper(args, args.dataset_name, is_training, session)
wavs, labels = dataset.get_input_and_output_op()
model = models.__dict__[args.arch](args)
model.build(wavs=wavs, labels=labels, is_training=is_training)
evaluator = Evaluator(model, session, args, dataset)
evaluator.evaluate()
if __name__ == "__main__":
args = config.arg_config()
if args.mod == 'train':
train(args)
else:
evaluate(args)
|
424244
|
from tensorflow import keras
from tensorflow.keras import layers
from databases import OmniglotDatabase
from models.lasiummamlgan.database_parsers import OmniglotParser
from models.lasiummamlgan.gan import GAN
from models.lasiummamlgan.maml_gan import MAMLGAN
from networks.maml_umtra_networks import SimpleModel
def get_generator(latent_dim):
generator = keras.Sequential(
[
keras.Input(shape=(latent_dim,)),
# We want to generate 128 coefficients to reshape into a 7x7x128 map
layers.Dense(7 * 7 * 128),
layers.LeakyReLU(alpha=0.2),
layers.Reshape((7, 7, 128)),
layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.Conv2D(1, (7, 7), padding="same", activation="sigmoid"),
],
name="generator",
)
generator.summary()
return generator
def get_discriminator():
discriminator = keras.Sequential(
[
keras.Input(shape=(28, 28, 1)),
layers.Conv2D(64, (3, 3), strides=(2, 2), padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.Conv2D(128, (3, 3), strides=(2, 2), padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.Conv2D(128, (3, 3), strides=(2, 2), padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.GlobalMaxPooling2D(),
layers.Dense(1),
],
name="discriminator",
)
discriminator.summary()
return discriminator
if __name__ == '__main__':
omniglot_database = OmniglotDatabase(random_seed=47, num_train_classes=1200, num_val_classes=100)
shape = (28, 28, 1)
latent_dim = 128
omniglot_generator = get_generator(latent_dim)
omniglot_discriminator = get_discriminator()
omniglot_parser = OmniglotParser(shape=shape)
gan = GAN(
'omniglot',
image_shape=shape,
latent_dim=latent_dim,
database=omniglot_database,
parser=omniglot_parser,
generator=omniglot_generator,
discriminator=omniglot_discriminator,
visualization_freq=50,
d_learning_rate=0.0003,
g_learning_rate=0.0003,
)
# gan.perform_training(epochs=49, checkpoint_freq=1)
gan.load_latest_checkpoint(epoch_to_load_from='500')
maml_gan = MAMLGAN(
gan=gan,
latent_dim=latent_dim,
generated_image_shape=shape,
database=omniglot_database,
network_cls=SimpleModel,
n=5,
k_ml=1,
k_val_ml=5,
k_val=1,
k_val_val=15,
k_val_test=15,
k_test=1,
meta_batch_size=4,
num_steps_ml=5,
lr_inner_ml=0.4,
num_steps_validation=5,
save_after_iterations=1000,
meta_learning_rate=0.001,
report_validation_frequency=200,
log_train_images_after_iteration=200,
num_tasks_val=100,
clip_gradients=False,
epsilon=246.09375,
experiment_name='omniglot_p1_0.5_epsilon_246.09375',
val_seed=42,
val_test_batch_norm_momentum=0.0
)
# for checkpoint in ('00', '10', '30', '50', '100', '200', '300', '400', '500'):
# gan.load_latest_checkpoint(epoch_to_load_from=checkpoint)
# import tensorflow as tf
# tf.random.set_seed(None)
# maml_gan.visualize_meta_learning_task(shape, num_tasks_to_visualize=1, checkpoint=checkpoint)
# exit()
print(maml_gan.epsilon)
maml_gan.visualize_meta_learning_task(shape, num_tasks_to_visualize=1)
maml_gan.train(iterations=1000)
maml_gan.evaluate(50, num_tasks=1000, seed=42)
print(maml_gan.epsilon)
print(maml_gan.num_epsilon_ignore)
|
424270
|
from django.db import models
from officialWebsite.events.models import Topic
class Resource(models.Model):
name = models.CharField(max_length=255, blank=False, default="")
url = models.URLField(blank=False, default="")
topic = models.ManyToManyField(Topic, blank=True)
def __str__(self):
return self.name
|
424271
|
import logging
from typing import List, TYPE_CHECKING
log = logging.getLogger(__name__)
if TYPE_CHECKING:
from rastervision.pipeline.pipeline_config import PipelineConfig # noqa
class Pipeline():
"""A pipeline of commands to run sequentially.
This is an abstraction over a sequence of commands. Each command is
represented by a method. This base class has two test commands, and
new pipelines should be created by subclassing this.
Note that any split command methods should have the following signature:
def my_command(self, split_ind: int = 0, num_splits: int = 1)
The num_splits represents how many parallel jobs should be created, and
the split_ind is the index of the current job within that set.
Attributes:
commands: command names listed in the order in which they should run
split_commands: names of commands that can be split and run in parallel
gpu_commands: names of commands that should be executed on GPUs if
available
"""
commands: List[str] = ['test_cpu', 'test_gpu']
split_commands: List[str] = ['test_cpu']
gpu_commands: List[str] = ['test_gpu']
def __init__(self, config: 'PipelineConfig', tmp_dir: str):
"""Constructor
Args:
config: the configuration of this pipeline
tmp_dir: the root any temporary directories created by running this
pipeline
"""
self.config = config
self.tmp_dir = tmp_dir
def test_cpu(self, split_ind: int = 0, num_splits: int = 1):
"""A command to test the ability to run split jobs on CPU."""
log.info('test_cpu split: {}/{}'.format(split_ind, num_splits))
log.info(self.config)
def test_gpu(self):
"""A command to test the ability to run on GPU."""
log.info(self.config)
|
424274
|
from math import *
import sys
if len(sys.argv) != 6:
print "Usage: <script> <N> <t> <M> <field_case> <sbox_case>"
print "field_case: 0 (binary), 1 (prime)"
print "sbox_case: 0 (x^3), 1 (x^5), 2 (x^(-1))"
exit()
N_fixed = int(sys.argv[1])
t_fixed = int(sys.argv[2])
M = int(sys.argv[3]) # Security level
field_case = int(sys.argv[4])
sbox_case = int(sys.argv[5])
if N_fixed % t_fixed != 0:
print "t is not a divisor of N!"
exit()
if field_case == 0:
n = int(ceil(float(N_fixed) / t_fixed))
if n % 2 == 0:
n_new = n + 1
N_fixed = int(n_new * t_fixed)
print "N:", N_fixed
print "Security level M:", M
if field_case == 0:
print "Field: Binary"
else:
print "Field: Prime"
if sbox_case == 0:
print "S-box: f(x) = x^3"
elif sbox_case == 1:
print "S-box: f(x) = x^5"
elif sbox_case == 2:
print "S-box: f(x) = x^(-1)"
def sat_inequiv_cubic(N, t, R_F, R_P):
n = ceil(float(N) / t)
R_F_1 = 6 if ((t + 1) <= (N + n - M)) else 10 # Statistical
R_F_2 = 0.63 * min(n, M) + log(t, 2) - R_P # Interpolation
R_F_3 = 0.32 * min(n, M) - R_P # Groebner 1
R_F_4 = float(0.18 * min(n, M) - 1 - R_P) / (t - 1) # Groebner 2
R_F_5 = (0.63 * min(n, M) + 2 + log(t, 2) - R_P) if (field_case == 0) else 0
R_F_max = max(ceil(R_F_1), ceil(R_F_2), ceil(R_F_3), ceil(R_F_4), ceil(R_F_5))
if R_F >= R_F_max:
return True
else:
return False
def sat_inequiv_fifth(N, t, R_F, R_P):
n = ceil(float(N) / t)
R_F_1 = 6 if ((2 * (t + 1)) <= (N + n - M)) else 10 # Statistical
R_F_2 = 0.43 * min(n, M) + log(t, 2) - R_P # Interpolation
R_F_3 = 0.21 * min(n, M) - R_P # Groebner 1
R_F_4 = float(0.14 * min(n, M) - 1 - R_P) / (t - 1) # Groebner 2
R_F_5 = (0.63 * min(n, M) + 2 + log(t, 2) - R_P) if (field_case == 0) else 0
R_F_max = max(ceil(R_F_1), ceil(R_F_2), ceil(R_F_3), ceil(R_F_4), ceil(R_F_5))
if R_F >= R_F_max:
return True
else:
return False
def sat_inequiv_inverse(N, t, R_F, R_P):
n = ceil(float(N) / t)
R_F_1 = 6 if ((2 * (t + 1)) <= (N + n - M)) else 10 # Statistical
R_P_1 = 2 + log(t, 2) + min(n, M) - log(t, 2) * R_F # Interpolation
R_F_2 = float(log(t, 2) + 0.5 * min(n, M) - R_P) / log(t, 2) # Groebner 1
R_F_3 = float(0.25 * min(n, M) - 1 - R_P) / (t - 1) # Groebner 2
R_F_4 = (0.63 * min(n, M) + 2 + log(t, 2) - R_P) if (field_case == 0) else 0
R_F_max = max(ceil(R_F_1), ceil(R_F_2), ceil(R_F_3), ceil(R_F_4))
R_P_max = ceil(R_P_1)
if R_F >= R_F_max and R_P >= R_P_max:
return True
else:
return False
def get_sbox_cost(R_F, R_P, N, t):
return int(t * R_F + R_P)
def get_size_cost(R_F, R_P, N, t):
n = ceil(float(N) / t)
return int((N * R_F) + (n * R_P))
def find_FD_round_numbers(N, t, cost_function, security_margin):
sat_inequiv = None
if sbox_case == 0:
sat_inequiv = sat_inequiv_cubic
elif sbox_case == 1:
sat_inequiv = sat_inequiv_fifth
elif sbox_case == 2:
sat_inequiv = sat_inequiv_inverse
R_P = 0
R_F = 0
min_cost = float("inf")
max_cost_rf = 0
# Brute-force approach
for R_P_t in range(1, 1000):
for R_F_t in range(4, 200):
if R_F_t % 2 == 0:
if (sat_inequiv(N, t, R_F_t, R_P_t) == True):
if security_margin == True:
R_F_t += 2
R_P_t = int(ceil(float(R_P_t) * 1.075))
cost = cost_function(R_F_t, R_P_t, N, t)
if (cost < min_cost) or ((cost == min_cost) and (R_F_t < max_cost_rf)):
R_P = ceil(R_P_t)
R_F = ceil(R_F_t)
min_cost = cost
max_cost_rf = R_F
return (int(R_F), int(R_P))
def calc_final_numbers_fixed(security_margin):
# [Min. S-boxes] Find best possible for t_fixed and N_fixed
ret_list = []
(R_F, R_P) = find_FD_round_numbers(N_fixed, t_fixed, get_sbox_cost, security_margin)
min_sbox_cost = get_sbox_cost(R_F, R_P, N_fixed, t_fixed)
ret_list.append(R_F)
ret_list.append(R_P)
ret_list.append(min_sbox_cost)
# [Min. Size] Find best possible for t_fixed and N_fixed
# Minimum number of S-boxes for fixed n results in minimum size also (round numbers are the same)!
min_size_cost = get_size_cost(R_F, R_P, N_fixed, t_fixed)
ret_list.append(min_size_cost)
return ret_list # [R_F, R_P, min_sbox_cost, min_size_cost]
def print_latex_table_combinations(combinations, security_margin):
global N_fixed
global t_fixed
global M
global field_case
global sbox_case
field_string = ""
sbox_string = ""
for comb in combinations:
N_fixed = comb[0]
t_fixed = comb[1]
M = comb[2]
field_case = comb[3]
sbox_case = comb[4]
n = int(ceil(float(N_fixed) / t_fixed))
ret = calc_final_numbers_fixed(security_margin)
if field_case == 0:
field_string = "\mathbb F_{2^n}"
elif field_case == 1:
field_string = "\mathbb F_{p}"
if sbox_case == 0:
sbox_string = "x^3"
elif sbox_case == 1:
sbox_string = "x^5"
elif sbox_case == 2:
sbox_string = "x^{-1}"
print "$" + str(M) + "$ & $" + str(N_fixed) + "$ & $" + str(n) + "$ & $" + str(t_fixed) + "$ & $" + str(ret[0]) + "$ & $" + str(ret[1]) + "$ & $" + field_string + "$ & $" + str(ret[2]) + "$ & $" + str(ret[3]) + "$ \\\\"
def print_pretty_combinations(combinations, security_margin):
global N_fixed
global t_fixed
global M
global field_case
global sbox_case
field_string = ""
sbox_string = ""
print "Format: [Security Level, Field Size, # Elements, Field, S-Box, R_F, R_P]"
for comb in combinations:
N_fixed = comb[0]
t_fixed = comb[1]
M = comb[2]
field_case = comb[3]
sbox_case = comb[4]
n = int(ceil(float(N_fixed) / t_fixed))
ret = calc_final_numbers_fixed(security_margin)
if field_case == 0:
field_string = "GF(2^n)"
elif field_case == 1:
field_string = "GF(p)"
if sbox_case == 0:
sbox_string = "x^3"
elif sbox_case == 1:
sbox_string = "x^5"
elif sbox_case == 2:
sbox_string = "x^{-1}"
print [str(M), str(n), str(t_fixed), field_string, sbox_string, str(ret[0]), str(ret[1])]
ret_fixed = calc_final_numbers_fixed(True)
print ret_fixed
print "Recommendation for N=" + str(N_fixed) + ", t=" + str(t_fixed) + ":"
print "R_F =", ret_fixed[0]
print "R_P =", ret_fixed[1]
print "S-box cost =", ret_fixed[2]
print "Size cost =", ret_fixed[3]
# Table for challenge
# Format: [N, t, M, field, s_box]
# --> [N, t, M, 0/1, 0] (binary/prime field and x^3)
combinations_challenge = [
[3*45, 3, 45, 0, 0],
[3*45, 3, 45, 1, 0],
[3*90, 3, 45, 0, 0],
[3*90, 3, 45, 1, 0],
[4*80, 4, 80, 0, 0],
[4*80, 4, 80, 1, 0],
[3*160, 3, 80, 0, 0],
[3*160, 3, 80, 1, 0],
[11*160, 11, 80, 0, 0],
[11*160, 11, 80, 1, 0],
[4*128, 4, 128, 0, 0],
[4*128, 4, 128, 1, 0],
[3*256, 3, 128, 0, 0],
[3*256, 3, 128, 1, 0],
[12*128, 12, 128, 0, 0],
[12*128, 12, 128, 1, 0],
[11*256, 11, 128, 0, 0],
[11*256, 11, 128, 1, 0],
[8*128, 8, 256, 0, 0],
[8*128, 8, 256, 1, 0],
[3*512, 3, 256, 0, 0],
[3*512, 3, 256, 1, 0],
[14*128, 14, 256, 0, 0],
[14*128, 14, 256, 1, 0],
[11*512, 11, 256, 0, 0],
[11*512, 11, 256, 1, 0],
]
print "--- Round numbers (with security margin) ---"
print_pretty_combinations(combinations_challenge, True)
exit()
# Build table
# x^3
x_3_combinations = [
[1536, 2, 128, 1, 0], [1536, 4, 128, 1, 0], [1536, 6, 128, 1, 0], [1536, 8, 128, 1, 0], [1536, 16, 128, 1, 0],
[1512, 24, 128, 0, 0], [1551, 47, 128, 0, 0], [1581, 51, 128, 0, 0],
[1536, 2, 256, 1, 0], [1536, 4, 256, 1, 0], [1536, 6, 256, 1, 0], [1536, 8, 256, 1, 0], [1536, 16, 256, 1, 0],
[1512, 24, 256, 0, 0], [1551, 47, 256, 0, 0], [1581, 51, 256, 0, 0]
]
# With security margin
print "--- Table x^3 WITH security margin ---"
print_latex_table_combinations(x_3_combinations, True)
# Without security margin
print "--- Table x^3 WITHOUT security margin ---"
print_latex_table_combinations(x_3_combinations, False)
# x^5
x_5_combinations = [
[1536, 2, 128, 1, 1], [1536, 4, 128, 1, 1], [1536, 6, 128, 1, 1], [1536, 8, 128, 1, 1], [1536, 16, 128, 1, 1],
[1536, 2, 256, 1, 1], [1536, 4, 256, 1, 1], [1536, 6, 256, 1, 1], [1536, 8, 256, 1, 1], [1536, 16, 256, 1, 1]
]
# With security margin
print "--- Table x^5 WITH security margin ---"
print_latex_table_combinations(x_5_combinations, True)
# Without security margin
print "--- Table x^5 WITHOUT security margin ---"
print_latex_table_combinations(x_5_combinations, False)
# x^{-1}
x_inv_combinations = [
[1536, 2, 128, 1, 2], [1536, 4, 128, 1, 2], [1536, 6, 128, 1, 2], [1536, 8, 128, 1, 2], [1536, 16, 128, 1, 2],
[1536, 2, 256, 1, 2], [1536, 4, 256, 1, 2], [1536, 6, 256, 1, 2], [1536, 8, 256, 1, 2], [1536, 16, 256, 1, 2]
]
# With security margin
print "--- Table x^{-1} WITH security margin ---"
print_latex_table_combinations(x_inv_combinations, True)
# Without security margin
print "--- Table x^{-1} WITHOUT security margin ---"
print_latex_table_combinations(x_inv_combinations, False)
|
424368
|
import numpy as np
from numpy.polynomial import Polynomial
def longstaff_schwartz_iter(X, t, df, fit, exercise_payoff,
itm_select=None):
# given no prior exercise we just receive the final payoff
cashflow = exercise_payoff(X[-1, :])
# iterating backwards in time
for i in reversed(range(1, X.shape[0] - 1)):
# discount cashflows from next period
cashflow = cashflow * df(t[i], t[i+1])
x = X[i, :]
# exercise value for time t[i]
exercise = exercise_payoff(x)
# boolean index of all in-the-money paths
# (paths considered for exercise)
itm = itm_select(exercise, x) \
if itm_select \
else np.full(x.shape, True)
# fit curve
fitted = fit(x[itm], cashflow[itm])
# approximate continuation value
continuation = fitted(x)
# boolean index where exercise is beneficial
ex_idx = itm & (exercise > continuation)
# update cashflows with early exercises
cashflow[ex_idx] = exercise[ex_idx]
yield cashflow, x, fitted, continuation, exercise, ex_idx
def longstaff_schwartz(X, t, df, fit, exercise_payoff, itm_select=None):
for cashflow, *_ in longstaff_schwartz_iter(X, t, df, fit,
exercise_payoff, itm_select):
pass
return cashflow.mean(axis=0) * df(t[0], t[1])
def ls_american_option_quadratic_iter(X, t, r, strike):
# given no prior exercise we just receive the payoff of a European option
cashflow = np.maximum(strike - X[-1, :], 0.0)
# iterating backwards in time
for i in reversed(range(1, X.shape[0] - 1)):
# discount factor between t[i] and t[i+1]
df = np.exp(-r * (t[i+1]-t[i]))
# discount cashflows from next period
cashflow = cashflow * df
x = X[i, :]
# exercise value for time t[i]
exercise = np.maximum(strike - x, 0.0)
# boolean index of all in-the-money paths
itm = exercise > 0
# fit polynomial of degree 2
fitted = Polynomial.fit(x[itm], cashflow[itm], 2)
# approximate continuation value
continuation = fitted(x)
# boolean index where exercise is beneficial
ex_idx = itm & (exercise > continuation)
# update cashflows with early exercises
cashflow[ex_idx] = exercise[ex_idx]
yield cashflow, x, fitted, continuation, exercise, ex_idx
def longstaff_schwartz_american_option_quadratic(X, t, r, strike):
for cashflow, *_ in ls_american_option_quadratic_iter(X, t, r, strike):
pass
return cashflow.mean(axis=0) * np.exp(-r * (t[1] - t[0]))
|
424401
|
import logging
import time
from typing import Any, Dict, Optional
import aiohttp
from .entities import UserInfo
logger = logging.getLogger(__name__)
DEFAULT_DISCOVERY_RESPONSE_CACHE_PERIOD = 3600 # 1 hour
class OpenIdConnectDiscovery:
"""Retrieve info from OpenID Connect (OIDC) endpoints"""
def __init__(self):
self._discovery_url: Optional[str] = None
self._discovery_data_cached_at: Optional[float] = None
self._discovery_cache_period: float = float(
DEFAULT_DISCOVERY_RESPONSE_CACHE_PERIOD
)
self._discovery_data: Optional[Dict[str, Any]] = None
def init(
self,
discovery_url: str,
*,
discovery_cache_period: int = DEFAULT_DISCOVERY_RESPONSE_CACHE_PERIOD,
):
"""Set up OpenID Connect data fetching
Args:
discovery_url:
The well-known OpenID Connect discovery endpoint
Example: "https://domain/.well-known/openid-connect"
discovery_cache_period:
How many seconds to cache the OpenID Discovery endpoint response. Defaults to 1 hour.
"""
self._discovery_url = discovery_url
self._discovery_cache_period = float(discovery_cache_period)
def is_configured(self) -> bool:
return bool(self._discovery_url)
async def get_user_info(self, access_token: str) -> Optional[UserInfo]:
"""Get user info for the given OAuth 2 access token
Returns a parsed UserInfo object on successful verification,
otherwise `None`.
"""
if not self.is_configured():
logger.info("OpenID Connect discovery URL is not set up!")
return None
if not access_token:
logger.debug("No access token provided")
return None
user_info = await self._fetch_user_info(access_token)
if user_info is None:
return UserInfo.make_dummy()
else:
return UserInfo.from_oidc_endpoint(user_info)
async def get_jwks_uri(self) -> str:
"""Get or fetch the JWKS URI"""
data = await self.get_discovery_data()
return data["jwks_uri"]
async def _fetch_user_info(self, access_token: str) -> Optional[Dict[str, Any]]:
timeout = aiohttp.ClientTimeout(total=10)
url = await self.get_user_info_endpoint()
headers = {"Authorization": f"Bearer {access_token}"}
logger.debug(f"Fetching user info from {url}")
async with aiohttp.ClientSession(timeout=timeout) as session:
async with session.get(url, headers=headers) as response:
if response.status == 200:
return await response.json()
else:
logger.debug(
"User info could not be fetched (might be a machine user)"
)
return None
async def get_user_info_endpoint(self) -> str:
data = await self.get_discovery_data()
return data["userinfo_endpoint"]
async def get_discovery_data(self) -> Dict[str, Any]:
if (
self._discovery_data is None
or self._discovery_data_cached_at is None
or (
(time.monotonic() - self._discovery_data_cached_at)
> self._discovery_cache_period
)
):
try:
self._discovery_data = await self._fetch_discovery_data()
except Exception as ex:
if self._discovery_data is None:
raise
else:
logger.info(
f"Failed to refresh OIDC discovery data, re-using old data. "
f"Exception was: {ex!r}"
)
self._discovery_data_cached_at = time.monotonic()
else:
self._discovery_data_cached_at = time.monotonic()
return self._discovery_data
async def _fetch_discovery_data(self) -> Dict[str, Any]:
timeout = aiohttp.ClientTimeout(total=10)
assert self._discovery_url, "No OIDC discovery URL specified"
logger.debug(f"Fetching OIDC discovery data from {self._discovery_url}")
async with aiohttp.ClientSession(timeout=timeout, raise_for_status=True) as s:
async with s.get(self._discovery_url) as response:
return await response.json()
|
424420
|
description='HRPT Graphit Filter via SPS-S5'
devices = dict(
graphit=device('nicos_sinq.amor.devices.sps_switch.SpsSwitch',
description='Graphit filter controlled by SPS',
epicstimeout=3.0,
readpv='SQ:HRPT:SPS1:DigitalInput',
commandpv='SQ:HRPT:SPS1:Push',
commandstr="S0001",
byte=4,
bit=4,
mapping={'OFF': False, 'ON': True},
lowlevel=True
),
sps1=device(
'nicos_ess.devices.epics.extensions.EpicsCommandReply',
epicstimeout=3.0,
description='Controller of the counter box',
commandpv='SQ:HRPT:spsdirect.AOUT',
replypv='SQ:HRPT:spsdirect.AINP',
),
)
|
424458
|
from distutils.core import setup
with open("README.rst") as f:
long_description = f.read()
setup(
name="elara",
packages=["elara"],
version="0.5.4",
license="three-clause BSD",
description="Elara DB is an easy to use, lightweight key-value database written for python that can also be used as a fast in-memory cache for JSON-serializable data. Includes various methods and features to manipulate data structures in-memory, protect database files and export data.",
long_description=long_description,
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/saurabh0719/elara",
keywords=[
"database",
"key-value",
"storage",
"file storage",
"key-value database",
"nosql",
"nosql database",
"cache",
"in-memory cache",
"file cache",
],
project_urls={
"Documentation": "https://github.com/saurabh0719/elara#readme",
"Source": "https://github.com/saurabh0719/elara",
},
install_requires=["cryptography", "msgpack", "safer"],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Topic :: Database",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python",
],
)
|
424461
|
import codecs
import os
import ujson
from unicodedata import normalize
from collections import Counter
GO = "<GO>" # <s>: start of sentence
EOS = "<EOS>" # </s>: end of sentence, also act as padding
UNK = "<UNK>" # for Unknown tokens
PAD = "<PAD>" # padding not used
def write_json(filename, dataset):
with codecs.open(filename, mode="w", encoding="utf-8") as f:
ujson.dump(dataset, f)
def word_convert(word):
# convert french characters to latin equivalents
word = normalize("NFD", word).encode("ascii", "ignore").decode("utf-8")
word = word.lower()
return word
def raw_dataset_iter(filename):
with codecs.open(filename, mode="r", encoding="cp1252") as f:
words, tags = [], []
for line in f:
line = line.lstrip().rstrip()
if len(line) == 0 and len(words) != 0: # means read whole one sentence
yield words, tags
words, tags = [], []
else:
_, word, tag = line.split("\t")
word = word_convert(word)
words.append(word)
tags.append(tag)
def load_dataset(filename):
dataset = []
for words, tags in raw_dataset_iter(filename):
dataset.append({"words": words, "tags": tags})
return dataset
def build_vocab(datasets):
word_counter = Counter()
tag_counter = Counter()
for dataset in datasets:
for record in dataset:
words = record["words"]
for word in words:
word_counter[word] += 1
tags = record["tags"]
for tag in tags:
tag_counter[tag] += 1
word_vocab = [GO, EOS, UNK] + [word for word, _ in word_counter.most_common()]
word_dict = dict([(word, idx) for idx, word in enumerate(word_vocab)])
tag_vocab = [GO, EOS] + [tag for tag, _ in tag_counter.most_common()]
tag_dict = dict([(tag, idx) for idx, tag in enumerate(tag_vocab)])
return word_dict, tag_dict
def build_dataset(data, word_dict, tag_dict):
dataset = []
for record in data:
words = [word_dict[word] if word in word_dict else word_dict[UNK] for word in record["words"]]
tags = [tag_dict[tag] for tag in record["tags"]]
dataset.append({"words": words, "tags": tags})
return dataset
def process_data():
# load raw data
train_data = load_dataset(os.path.join("media", "train.crf"))
dev_data = load_dataset(os.path.join("media", "dev.crf"))
test_data = load_dataset(os.path.join("media", "test.crf"))
# build vocabulary
word_dict, _ = build_vocab([train_data, dev_data])
_, tag_dict = build_vocab([train_data, dev_data, test_data])
# create indices dataset
train_set = build_dataset(train_data, word_dict, tag_dict)
dev_set = build_dataset(dev_data, word_dict, tag_dict)
test_set = build_dataset(test_data, word_dict, tag_dict)
vocab = {"word_dict": word_dict, "tag_dict": tag_dict}
# write to file
write_json("vocab.json", vocab)
write_json("train.json", train_set)
write_json("dev.json", dev_set)
write_json("test.json", test_set)
if __name__ == "__main__":
process_data()
|
424469
|
from pprint import pprint
from jnt.patterns import re_whitespaces
import codecs
import xml.etree.ElementTree as et
from glob import glob
from os.path import join
from collections import defaultdict
from jnt.matching.crowdsourcing_words import load_crowd_clusters
from math import ceil
import re
WORDS_PER_PAGE = 5
def clean(text):
text = text.strip().replace("\n", " ")
return re_whitespaces.sub(" ", text)
def get_inventory(inventory_dir):
inventory = defaultdict(dict)
for word_fname in glob(join(inventory_dir, "*.xml")) :
tree = et.parse(word_fname)
root = tree.getroot()
target = root.attrib["lemma"]
if "-n" not in target: continue
for child in root:
if child.tag == "sense":
sense = {"target": target}
sense["id"] = child.attrib["n"]
sense["name"] = child.attrib["name"]
for gchild in child:
if gchild.tag == "mappings":
for ggchild in gchild:
if ggchild.tag == "wn": sense["wn"] = ggchild.text
if gchild.tag == "commentary":
sense["definition"] = clean(gchild.text)
if gchild.tag == "examples":
sense["example"] = clean(gchild.text)
inventory[target.replace("-n","")][sense["id"]] = sense
return inventory
def highlight_target(text, target):
regex = re.compile(ur"(,|\s|.|^)" + target + "(,|\s|.|$)", re.U|re.I)
return regex.sub(ur"\1<b>" + target + ur"</b>\2", text)
def build_related4crowd(inventory_dir, related_words_fpath, csv_fpath):
related = load_crowd_clusters(related_words_fpath)
with codecs.open(log_fpath, "w", "utf-8") as log, codecs.open(csv_fpath, "w", "utf-8") as table:
# print header
print >> table, "id\ttarget\tname\tdefinition\texamples\tontowiki_id\twordnet2_ids",
for x in range(WORDS_PER_PAGE): table.write("\tmatchterm" + unicode(x+1))
table.write("\n")
inventory = get_inventory(inventory_dir)
for word in inventory:
if word not in ["president","capital","plant","rate"]: continue
for sense_id in inventory[word]:
# print to table
related_words = list(related[word])
for chunk in range( int(ceil(float(len(related[word])) / WORDS_PER_PAGE))):
table.write("%s\t%s\t%s\t%s\t%s\t%s\t%s" % (word + "#" + sense_id,
word.strip(),
inventory[word][sense_id]["name"],
highlight_target(inventory[word][sense_id]["definition"], word),
highlight_target(inventory[word][sense_id]["example"], word),
inventory[word][sense_id]["id"],
inventory[word][sense_id]["wn"]))
for x in range(WORDS_PER_PAGE):
try:
related_word = related_words.pop()
except IndexError:
related_word = ""
table.write("\t%s" % related_word.strip())
table.write("\n")
print "CSV:", csv_fpath
inventory_dir = "/Users/alex/Desktop/matching-eval/lexical-sample/train/lexical-sample/sense-inventories/"
related_words_fpath = "/Users/alex/Desktop/matching-eval/cluster-terms/semeval35n-clusters.csv"
log_fpath = "/Users/alex/Desktop/matching-eval/related4crowd.txt"
csv_fpath = "/Users/alex/Desktop/matching-eval/related4crowd-tmp.csv"
build_related4crowd(inventory_dir, related_words_fpath, csv_fpath)
|
424581
|
import re
import math
from collections import Counter
import os
import string
from read import *
import pandas as pd
from pandas import ExcelWriter, ExcelFile
import numpy as np
import matplotlib.pyplot as plt
import spacy
from nltk.corpus import stopwords
import nltk
from sklearn.linear_model import LinearRegression
from sklearn.cross_validation import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.externals import joblib
import sendgrid
from sendgrid.helpers.mail import *
pd.options.mode.chained_assignment = None
'''
CODES AND THEIR MEANINGS:
ml -> Machine Learning
bc -> Blockchain
ai -> Artificial Intelligence
su -> StartUp
prod -> Product
dev -> Development
'''
######################################### CUSTOM FUNCTIONS TO PERFORM TASKS #############################################
email_list = ["<EMAIL>","<EMAIL>", "<EMAIL>"]
sg = sendgrid.SendGridAPIClient(apikey="<KEY>")
WORD = re.compile(r'\w+')
def custom_sum(df_list): #Custom sum function to calculate likes as some fields have video views as well in dataset
summ = 0 #Initialising value to zero
for val in df_list: #Running through the entire column
if(type(val)!=int): #Checking if the value is a pure integer or not
continue #If not, then continue to next value
summ += val #Else add the val to summ
return summ
def custom_time_sum(df_list): #Custom time sum function to calculate the sum of times in the dataset by removing " hours"
summ = 0
for val in df_list: #Checking for every value in the column
val = val.replace(u' hours',u'') #Replacing " hours" with a null string
summ += int(val) #Adding the integral value of hours to summ
return summ
def custom_time_list(df_list): #Custom time sum function to calculate the sum of times in the dataset by removing " hours"
#print(df_list)
for i in range(0,len(df_list)): #Checking for every value in the column
df_list[i] = df_list[i].replace(u' hours',u'') #Replacing " hours" with a null string
df_list[i] = int(df_list[i]) #Adding the integral value of hours to summ
return df_list
def get_cosine(vec1, vec2):
intersection = set(vec1.keys()) & set(vec2.keys())
numerator = sum([vec1[x] * vec2[x] for x in intersection])
sum1 = sum([vec1[x]**2 for x in vec1.keys()])
sum2 = sum([vec2[x]**2 for x in vec2.keys()])
denominator = math.sqrt(sum1) * math.sqrt(sum2)
return float(numerator) / denominator
def text_to_vector(text):
words = WORD.findall(text)
return Counter(words)
def dataset_hashtag_generator(df_list):
try:
nlp = spacy.load("en_core_web_sm")
except:
os.system("python3 -m spacy download en")
nlp = spacy.load("en_core_web_sm")
try:
stopw = stopwords.words("english")
except:
os.system("python3 -m nltk.downloader stopwords")
stopw = stopwords.words("english")
noun_list = []
for i in range(0, len(df_list)-1):
print(df_list[i])
try:
if(np.isnan(df_list[i])):
continue
except:
df_list[i] = re.sub(r'https?:\/\/.*\/\w*','',df_list[i]) # Remove hyperlinks
df_list[i] = re.sub(r'['+string.punctuation+']+', ' ',df_list[i]) # Remove puncutations like 's
df_list[i] = df_list[i].replace("#","")
emoji_pattern = re.compile("["u"\U0001F600-\U0001F64F" u"\U0001F300-\U0001F5FF" u"\U0001F680-\U0001F6FF" u"\U0001F1E0-\U0001F1FF""]+", flags=re.UNICODE) #Removes emoji
df_list[i] = emoji_pattern.sub(r'', df_list[i]) # no emoji
doc = nlp(df_list[i])
temp_list = []
for sent in doc.sents:
for token in sent:
token_temp = str(token)
if(token.pos_=="NOUN" and token.text not in stopw):
#print(sent)
#print(i, token.text)
temp_list.append(token.text)
noun_list.append(temp_list)
temp_list = []
print(noun_list)
return noun_list
def caption_hashtag_generator(sentence):
nlp = spacy.load("en_core_web_sm")
stopw = stopwords.words("english")
noun_list = []
sentence = re.sub(r'https?:\/\/.*\/\w*','',sentence) # Remove hyperlinks
sentence = re.sub(r'['+string.punctuation+']+', ' ',sentence) # Remove puncutations like 's
sentence = sentence.replace("#","")
emoji_pattern = re.compile("["u"\U0001F600-\U0001F64F" u"\U0001F300-\U0001F5FF" u"\U0001F680-\U0001F6FF" u"\U0001F1E0-\U0001F1FF""]+", flags=re.UNICODE) #Removes emoji
sentence = emoji_pattern.sub(r'', sentence) # no emoji
doc = nlp(sentence)
temp_list = []
for sent in doc.sents:
for token in sent:
token_temp = str(token)
#print(sent)
print(token.text, token.pos_)
if(token.pos_=="NOUN" and token.text not in stopw):
#print(sent)
#print(i, token.text)
temp_list.append(token.text)
noun_list.append(temp_list)
temp_list = []
#print(noun_list)
return noun_list
def data_science(df, df_list):
hashtags = [] #Initialising hashtags list
for hs in df["Hashtags"]: #Reading every hashtag that was used in posts
hashtags += hs.split("#") #Every field in Hashtags column contains more than one hashtag so need to identify all. That's why using the split at # thing
#print(hashtags)
for elem in range(0,len(hashtags)): #If we print hashtags list before, it gives a non breaking space(\xa0) so need to replace it with null character or empty string
hashtags[elem] = hashtags[elem].replace(u'\xa0',u'') #Replacement happens here
#print(hashtags)
fdist = nltk.FreqDist(hashtags) #freqdist function present in nltk
fdist.plot(20) #Finding top 20 hashtags
frame_ml.plot(x="Followers", y="Likes", figsize=(50,100), style="o")
frame_ai.plot(x="Followers", y="Likes", figsize=(50,100), style="o")
frame_bc.plot(x="Followers", y="Likes", figsize=(50,100), style="o")
frame_su.plot(x="Followers", y="Likes", figsize=(50,100), style="o")
frame_prod.plot(x="Followers", y="Likes", figsize=(50,100), style="o")
frame_dev.plot(x="Followers", y="Likes", figsize=(50,100), style="o")
plt.show()
mean_likes_ml = round(custom_sum(frame_ml['Likes'].tolist())/len(frame_ml))
mean_likes_bc = round(custom_sum(frame_bc['Likes'].tolist())/len(frame_bc))
mean_likes_ai = round(custom_sum(frame_ai['Likes'].tolist())/len(frame_ai))
mean_likes_su = round(custom_sum(frame_su['Likes'].tolist())/len(frame_su))
mean_likes_prod = round(custom_sum(frame_prod['Likes'].tolist())/len(frame_prod))
mean_likes_dev = round(custom_sum(frame_dev['Likes'].tolist())/len(frame_dev))
mean_time_ml = round(custom_time_sum(frame_ml['Time since posted'].tolist())/len(frame_ml))
mean_time_bc = round(custom_time_sum(frame_bc['Time since posted'].tolist())/len(frame_bc))
mean_time_ai = round(custom_time_sum(frame_ai['Time since posted'].tolist())/len(frame_ai))
mean_time_su = round(custom_time_sum(frame_su['Time since posted'].tolist())/len(frame_su))
mean_time_prod = round(custom_time_sum(frame_prod['Time since posted'].tolist())/len(frame_prod))
mean_time_dev = round(custom_time_sum(frame_dev['Time since posted'].tolist())/len(frame_dev))
mean_follow_ml = round(np.sum(frame_ml['Followers'])/len(frame_ml))
mean_follow_bc = round(np.sum(frame_bc['Followers'])/len(frame_bc))
mean_follow_ai = round(np.sum(frame_ai['Followers'])/len(frame_ai))
mean_follow_su = round(np.sum(frame_su['Followers'])/len(frame_su))
mean_follow_prod = round(np.sum(frame_prod['Followers'])/len(frame_prod))
mean_follow_dev = round(np.sum(frame_dev['Followers'])/len(frame_dev))
like_rate_ml = round(mean_likes_ml/mean_time_ml)
like_rate_bc = round(mean_likes_bc/mean_time_bc)
like_rate_ai = round(mean_likes_ai/mean_time_ai)
like_rate_su = round(mean_likes_su/mean_time_su)
like_rate_prod = round(mean_likes_prod/mean_time_prod)
like_rate_dev = round(mean_likes_dev/mean_time_dev)
print("MEAN LIKES\tMEAN TIME\tRATE OF LIKES(PER HR)\tMEAN FOLLOWERS")
print(str(mean_likes_ml) + "\t\t" + str(mean_time_ml) + "\t\t" + str(like_rate_ml) + "\t\t\t" + str(mean_follow_ml))
print(str(mean_likes_bc) + "\t\t" + str(mean_time_bc) + "\t\t" + str(like_rate_bc) + "\t\t\t" + str(mean_follow_bc))
print(str(mean_likes_ai) + "\t\t" + str(mean_time_ai) + "\t\t" + str(like_rate_ai) + "\t\t\t" + str(mean_follow_ai))
print(str(mean_likes_su) + "\t\t" + str(mean_time_su) + "\t\t" + str(like_rate_su) + "\t\t\t" + str(mean_follow_su))
print(str(mean_likes_prod) + "\t\t" + str(mean_time_prod) + "\t\t" + str(like_rate_prod) + "\t\t\t" + str(mean_follow_prod))
print(str(mean_likes_dev) + "\t\t" + str(mean_time_dev) + "\t\t" + str(like_rate_dev) + "\t\t\t" + str(mean_follow_dev))
print("\n\nAVERAGE LIKE RATE COMBINING ALL HASHTAGS:")
print(round((like_rate_ml + like_rate_bc + like_rate_ai + like_rate_su + like_rate_prod + like_rate_dev)/6))
print("Likes after 3 hours would be "+str(round((like_rate_ml + like_rate_bc + like_rate_ai + like_rate_su + like_rate_prod + like_rate_dev)/6)*3))
'''
It's very clear from the mean of likes that dev is a moving hashtag to get more likes.
But this might be because of various factors:
(1) The user posting with #development might already have more followers
(2) The size of the dataset is too small to come to a conlusion (125-130 only)
(3) There might be more videos so views have been ommitted giving a better mean
'''
def model(frame_df, no_followers=400):
custom_time_list(frame_df['Time since posted'])
inp = frame_df[['Followers', 'Time since posted']]
op = frame_df[['Likes']]
train_x, test_x, train_y, test_y = train_test_split(inp, op, test_size = 0.2, random_state = 999)
lr = LinearRegression().fit(train_x, train_y) #Fitting and creating a model
pred = lr.predict(test_x) #Predicting the answers for valdiation data
mse = mean_squared_error(pred, test_y) #finding the mean squared error
try:
model = joblib.load("models/reach_model")
except:
os.system("mkdir models")
joblib.dump(lr, "models/reach_model",compress=9)
model = joblib.load("models/reach_model")
reach_pred = model.predict([[no_followers,10]])
#print(reach_pred, mse)
expected_reach = "Expected Reach is " + str(int(reach_pred-round(mse**0.5))) + "-" + str(int(reach_pred+round(mse**0.5)))
return expected_reach
def sendmail(email_id, caption):
from_email = Email("<EMAIL>", name="<NAME>")
to_email = Email(email_id)
subject = "Weekly Updates From Merkalysis"
content = Content("text/html", "<html><body><p>A post is up on Instagram from _rahul_kumaran_'s account with a caption \"" + caption + "\"</p></body></html>")
mail = Mail(from_email, subject, to_email, content)
response = sg.client.mail.send.post(request_body=mail.get())
return response
def Main():
df = pd.read_csv("datasets/combined_hashtag.csv") #Reading the new csv file
frame_df = pd.DataFrame(df)
caption = input("What's your caption?\n")
no_followers = int(input("How many followers do you have on Instagram?\n"))
hash_list = caption_hashtag_generator(caption)
#data_science(df, frame_df)
expected_reach = model(frame_df, no_followers)
print(expected_reach + '\n\n' + str(hash_list))
'''for email_id in email_list:
response = sendmail(email_id, caption)
print(response.status_code)'''
'''t1 = text_to_vector("machine")
t2 = text_to_vector("machine learning")
cosine = get_cosine(t1,t2)
print(cosine)'''
|
424610
|
from os import path
import tornado.web
from temboardui.web import (
Blueprint,
TemplateRenderer,
)
PLUGIN_NAME = 'activity'
blueprint = Blueprint()
blueprint.generic_proxy(r'/activity/kill', methods=['POST'])
plugin_path = path.dirname(path.realpath(__file__))
render_template = TemplateRenderer(plugin_path + '/templates')
def configuration(config):
return {}
def get_routes(config):
routes = blueprint.rules + [
(r"/js/activity/(.*)", tornado.web.StaticFileHandler, {
'path': plugin_path + "/static/js"
}),
]
return routes
def get_agent_username(request):
try:
return request.instance.get_profile()['username']
except Exception:
return None
@blueprint.instance_route(r'/activity/(running|blocking|waiting)')
def activity(request, mode):
request.instance.check_active_plugin(PLUGIN_NAME)
agent_username = get_agent_username(request)
xsession = request.instance.xsession if agent_username else None
return render_template(
'activity.html',
nav=True,
agent_username=agent_username,
instance=request.instance,
plugin=PLUGIN_NAME,
mode=mode,
xsession=xsession,
role=request.current_user,
)
@blueprint.instance_proxy(r'/activity(?:/blocking|/waiting)?')
def activity_proxy(request):
request.instance.check_active_plugin(PLUGIN_NAME)
return dict(
blocking=request.instance.get('/activity/blocking'),
running=request.instance.get('/activity'),
waiting=request.instance.get('/activity/waiting'),
)
|
424616
|
from .base_requests import AnymailRequestsBackend, RequestsPayload
from ..exceptions import AnymailRequestsAPIError
from ..message import AnymailRecipientStatus
from ..utils import get_anymail_setting
class EmailBackend(AnymailRequestsBackend):
"""
Postal v1 API Email Backend
"""
esp_name = "Postal"
def __init__(self, **kwargs):
"""Init options from Django settings"""
esp_name = self.esp_name
self.api_key = get_anymail_setting(
"api_key", esp_name=esp_name, kwargs=kwargs, allow_bare=True
)
# Required, as there is no hosted instance of Postal
api_url = get_anymail_setting("api_url", esp_name=esp_name, kwargs=kwargs)
if not api_url.endswith("/"):
api_url += "/"
super().__init__(api_url, **kwargs)
def build_message_payload(self, message, defaults):
return PostalPayload(message, defaults, self)
def parse_recipient_status(self, response, payload, message):
parsed_response = self.deserialize_json_response(response, payload, message)
if parsed_response["status"] != "success":
raise AnymailRequestsAPIError(
email_message=message, payload=payload, response=response, backend=self
)
# If we get here, the send call was successful.
messages = parsed_response["data"]["messages"]
return {
email: AnymailRecipientStatus(message_id=details["id"], status="queued")
for email, details in messages.items()
}
class PostalPayload(RequestsPayload):
def __init__(self, message, defaults, backend, *args, **kwargs):
http_headers = kwargs.pop("headers", {})
http_headers["X-Server-API-Key"] = backend.api_key
http_headers["Content-Type"] = "application/json"
http_headers["Accept"] = "application/json"
super().__init__(
message, defaults, backend, headers=http_headers, *args, **kwargs
)
def get_api_endpoint(self):
return "api/v1/send/message"
def init_payload(self):
self.data = {}
def serialize_data(self):
return self.serialize_json(self.data)
def set_from_email(self, email):
self.data["from"] = str(email)
def set_subject(self, subject):
self.data["subject"] = subject
def set_to(self, emails):
self.data["to"] = [str(email) for email in emails]
def set_cc(self, emails):
self.data["cc"] = [str(email) for email in emails]
def set_bcc(self, emails):
self.data["bcc"] = [str(email) for email in emails]
def set_reply_to(self, emails):
if len(emails) > 1:
self.unsupported_feature("multiple reply_to addresses")
if len(emails) > 0:
self.data["reply_to"] = str(emails[0])
def set_extra_headers(self, headers):
self.data["headers"] = headers
def set_text_body(self, body):
self.data["plain_body"] = body
def set_html_body(self, body):
if "html_body" in self.data:
self.unsupported_feature("multiple html parts")
self.data["html_body"] = body
def make_attachment(self, attachment):
"""Returns Postal attachment dict for attachment"""
att = {
"name": attachment.name or "",
"data": attachment.b64content,
"content_type": attachment.mimetype,
}
if attachment.inline:
# see https://github.com/postalhq/postal/issues/731
# but it might be possible with the send/raw endpoint
self.unsupported_feature('inline attachments')
return att
def set_attachments(self, attachments):
if attachments:
self.data["attachments"] = [
self.make_attachment(attachment) for attachment in attachments
]
def set_envelope_sender(self, email):
self.data["sender"] = str(email)
def set_tags(self, tags):
if len(tags) > 1:
self.unsupported_feature("multiple tags")
if len(tags) > 0:
self.data["tag"] = tags[0]
def set_esp_extra(self, extra):
self.data.update(extra)
|
424621
|
from namedlist import namedlist
import numpy as np
import gym
from typing import Any, Union, List
import copy
from overcooked_ai_py.mdp.actions import Action, Direction
from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe
from overcooked_ai_py.mdp.overcooked_env import OvercookedEnv, DEFAULT_ENV_PARAMS
from ding.envs import BaseEnv, BaseEnvTimestep, BaseEnvInfo
from ding.envs.common.env_element import EnvElement, EnvElementInfo
from ding.utils import ENV_REGISTRY
OvercookEnvTimestep = namedlist('OvercookEnvTimestep', ['obs', 'reward', 'done', 'info'])
OvercookEnvInfo = namedlist('OvercookEnvInfo', ['agent_num', 'obs_space', 'act_space', 'rew_space'])
# n, s = Direction.NORTH, Direction.SOUTH
# e, w = Direction.EAST, Direction.WEST
# stay, interact = Action.STAY, Action.INTERACT
# Action.ALL_ACTIONS: [n, s, e, w, stay, interact]
@ENV_REGISTRY.register('overcooked')
class OvercookEnv(BaseEnv):
def __init__(self, cfg) -> None:
self._cfg = cfg
self._env_name = cfg.get("env_name", "cramped_room")
self._horizon = cfg.get("horizon", 400)
self._concat_obs = cfg.get("concat_obs", False)
self._action_mask = cfg.get("action_mask", True)
self._use_shaped_reward = cfg.get("use_shaped_reward", True)
self.mdp = OvercookedGridworld.from_layout_name(self._env_name)
self.base_env = OvercookedEnv.from_mdp(self.mdp, horizon=self._horizon, info_level=0)
featurize_fn = lambda mdp, state: mdp.lossless_state_encoding(state)
self.featurize_fn = featurize_fn
self.action_dim = len(Action.ALL_ACTIONS)
self.action_space = gym.spaces.Discrete(len(Action.ALL_ACTIONS))
# rightnow overcook environment encoding only support 2 agent game
self.agent_num = 2
# set up obs shape
dummy_mdp = self.base_env.mdp
dummy_state = dummy_mdp.get_standard_start_state()
self.obs_shape = self.featurize_fn(dummy_mdp, dummy_state)[0].shape
def seed(self, seed: int, dynamic_seed: bool = True) -> None:
self._seed = seed
self._dynamic_seed = dynamic_seed
np.random.seed(self._seed)
def close(self) -> None:
# Note: the real env instance only has a empty close method, only pas
pass
def step(self, action):
if isinstance(action, list):
action = np.concatenate(action)
assert all(self.action_space.contains(a) for a in action), "%r (%s) invalid" % (action, type(action))
agent_action, other_agent_action = [Action.INDEX_TO_ACTION[a] for a in action]
if self.agent_idx == 0:
joint_action = (agent_action, other_agent_action)
else:
joint_action = (other_agent_action, agent_action)
next_state, reward, done, env_info = self.base_env.step(joint_action)
if self._use_shaped_reward:
reward += env_info['shaped_r_by_agent'][0]
reward += env_info['shaped_r_by_agent'][1]
reward = np.array([float(reward)])
self._final_eval_reward += reward
ob_p0, ob_p1 = self.featurize_fn(self.mdp, next_state)
if self.agent_idx == 0:
both_agents_ob = [ob_p0, ob_p1]
else:
both_agents_ob = [ob_p1, ob_p0]
if self._concat_obs:
both_agents_ob = np.concatenate(both_agents_ob)
else:
both_agents_ob = np.stack(both_agents_ob)
env_info["policy_agent_idx"] = self.agent_idx
env_info["final_eval_reward"] = self._final_eval_reward
action_mask = self.get_action_mask()
if self._action_mask:
obs = {
"agent_state": both_agents_ob,
"overcooked_state": self.base_env.state,
"other_agent_env_idx": 1 - self.agent_idx,
"action_mask": action_mask
}
else:
obs = both_agents_ob
return OvercookEnvTimestep(obs, reward, done, env_info)
def reset(self):
self.base_env.reset()
self._final_eval_reward = 0
self.mdp = self.base_env.mdp
# random init agent index
self.agent_idx = np.random.choice([0, 1])
ob_p0, ob_p1 = self.featurize_fn(self.mdp, self.base_env.state)
if self.agent_idx == 0:
both_agents_ob = [ob_p0, ob_p1]
else:
both_agents_ob = [ob_p1, ob_p0]
if self._concat_obs:
both_agents_ob = np.concatenate(both_agents_ob)
else:
both_agents_ob = np.stack(both_agents_ob)
action_mask = self.get_action_mask()
if self._action_mask:
obs = {
"agent_state": both_agents_ob,
"overcooked_state": self.base_env.state,
"other_agent_env_idx": 1 - self.agent_idx,
"action_mask": action_mask
}
else:
obs = both_agents_ob
return obs
def get_available_actions(self):
return self.mdp.get_actions(self.base_env.state)
def get_action_mask(self):
available_actions = self.get_available_actions()
action_masks = np.zeros((2, self.action_dim))
for i in range(self.action_dim):
if Action.INDEX_TO_ACTION[i] in available_actions[0]:
action_masks[0][i] = 1
if Action.INDEX_TO_ACTION[i] in available_actions[1]:
action_masks[1][i] = 1
return action_masks
def info(self):
T = EnvElementInfo
if self._concat_obs:
agent_state = list(self.obs_shape)
agent_state[0] = agent_state[0] * 2
agent_state = tuple(agent_state)
else:
agent_state = (self.agent_num, self.obs_shape)
env_info = OvercookEnvInfo(
agent_num=self.agent_num,
obs_space=T({
'agent_state': agent_state,
'action_mask': (self.agent_num, self.action_dim),
}, None),
act_space=T((self.agent_num, self.action_dim), None),
rew_space=T((1, ), None)
)
return env_info
def __repr__(self):
pass
@ENV_REGISTRY.register('overcooked_game')
class OvercookGameEnv(BaseEnv):
def __init__(self, cfg) -> None:
self._cfg = cfg
self._env_name = cfg.get("env_name", "cramped_room")
self._horizon = cfg.get("horizon", 400)
self._concat_obs = cfg.get("concat_obs", False)
self._action_mask = cfg.get("action_mask", False)
self._use_shaped_reward = cfg.get("use_shaped_reward", True)
self.mdp = OvercookedGridworld.from_layout_name(self._env_name)
self.base_env = OvercookedEnv.from_mdp(self.mdp, horizon=self._horizon, info_level=0)
featurize_fn = lambda mdp, state: mdp.lossless_state_encoding(state)
self.featurize_fn = featurize_fn
self.action_dim = len(Action.ALL_ACTIONS)
self.action_space = gym.spaces.Discrete(len(Action.ALL_ACTIONS))
# rightnow overcook environment encoding only support 2 agent game
self.agent_num = 2
# set up obs shape
dummy_mdp = self.base_env.mdp
dummy_state = dummy_mdp.get_standard_start_state()
self.obs_shape = self.featurize_fn(dummy_mdp, dummy_state)[0].shape
def seed(self, seed: int, dynamic_seed: bool = True) -> None:
self._seed = seed
self._dynamic_seed = dynamic_seed
np.random.seed(self._seed)
def close(self) -> None:
# Note: the real env instance only has a empty close method, only pas
pass
def step(self, action):
if isinstance(action, list):
action = np.array(action).astype(np.int)
if action.shape == (2, 1):
action = [action[0][0], action[1][0]]
assert all(self.action_space.contains(a) for a in action), "%r (%s) invalid" % (action, type(action))
agent_action, other_agent_action = [Action.INDEX_TO_ACTION[a] for a in action]
if self.agent_idx == 0:
joint_action = (agent_action, other_agent_action)
else:
joint_action = (other_agent_action, agent_action)
next_state, reward, done, env_info = self.base_env.step(joint_action)
reward = np.array([float(reward)])
self._final_eval_reward += reward
if self._use_shaped_reward:
self._final_eval_reward += env_info['shaped_r_by_agent'][0]
self._final_eval_reward += env_info['shaped_r_by_agent'][1]
rewards = np.array([reward, reward]).astype(np.float32)
if self._use_shaped_reward:
rewards[0] += env_info['shaped_r_by_agent'][0]
rewards[1] += env_info['shaped_r_by_agent'][1]
ob_p0, ob_p1 = self.featurize_fn(self.mdp, next_state)
if self.agent_idx == 0:
both_agents_ob = [ob_p0, ob_p1]
else:
both_agents_ob = [ob_p1, ob_p0]
if self._concat_obs:
both_agents_ob = np.concatenate(both_agents_ob)
else:
both_agents_ob = np.stack(both_agents_ob)
env_info["policy_agent_idx"] = self.agent_idx
env_info["final_eval_reward"] = self._final_eval_reward
action_mask = self.get_action_mask()
if self._action_mask:
obs = {
"agent_state": both_agents_ob,
"overcooked_state": self.base_env.state,
"other_agent_env_idx": 1 - self.agent_idx,
"action_mask": action_mask
}
else:
obs = both_agents_ob
return OvercookEnvTimestep(obs, rewards, done, [env_info, env_info])
def reset(self):
self.base_env.reset()
self._final_eval_reward = 0
self.mdp = self.base_env.mdp
# random init agent index
self.agent_idx = np.random.choice([0, 1])
#fix init agent index
self.agent_idx = 0
ob_p0, ob_p1 = self.featurize_fn(self.mdp, self.base_env.state)
if self.agent_idx == 0:
both_agents_ob = [ob_p0, ob_p1]
else:
both_agents_ob = [ob_p1, ob_p0]
if self._concat_obs:
both_agents_ob = np.concatenate(both_agents_ob)
else:
both_agents_ob = np.stack(both_agents_ob)
action_mask = self.get_action_mask()
if self._action_mask:
obs = {
"agent_state": both_agents_ob,
"overcooked_state": self.base_env.state,
"other_agent_env_idx": 1 - self.agent_idx,
"action_mask": action_mask
}
else:
obs = both_agents_ob
return obs
def get_available_actions(self):
return self.mdp.get_actions(self.base_env.state)
def get_action_mask(self):
available_actions = self.get_available_actions()
action_masks = np.zeros((2, self.action_dim))
for i in range(self.action_dim):
if Action.INDEX_TO_ACTION[i] in available_actions[0]:
action_masks[0][i] = 1
if Action.INDEX_TO_ACTION[i] in available_actions[1]:
action_masks[1][i] = 1
return action_masks
def info(self):
T = EnvElementInfo
if self._concat_obs:
agent_state = list(self.obs_shape)
agent_state[0] = agent_state[0] * 2
agent_state = tuple(agent_state)
else:
agent_state = (self.agent_num, self.obs_shape)
env_info = OvercookEnvInfo(
agent_num=self.agent_num,
obs_space=T({
'agent_state': agent_state,
'action_mask': (self.agent_num, self.action_dim),
}, None),
act_space=T((self.agent_num, self.action_dim), None),
rew_space=T((1, ), None)
)
return env_info
def __repr__(self):
return "DI-engine Overcooked GameEnv"
|
424653
|
from __future__ import annotations
__all__ = ["TimeValue", "TimeInterval"]
from dataclasses import dataclass
from datetime import datetime
from dateutil.relativedelta import relativedelta
from hijri_converter import Gregorian, Hijri
from . import constants
class TimeValue(relativedelta):
def __init__(
self,
dt1=None,
dt2=None,
years=None,
months=None,
days=None,
leapdays=None,
weeks=None,
hours=None,
minutes=None,
seconds=None,
microseconds=None,
year=None,
month=None,
day=None,
weekday=None,
yearday=None,
nlyearday=None,
hour=None,
minute=None,
second=None,
microsecond=None,
am_pm=None,
next_month=None,
prev_month=None,
hijri=None,
):
super().__init__(
dt1=dt1,
dt2=dt2,
years=years if years is not None else 0,
months=months if months is not None else 0,
days=days if days is not None else 0,
leapdays=leapdays if leapdays is not None else 0,
hours=hours if hours is not None else 0,
minutes=minutes if minutes is not None else 0,
seconds=seconds if seconds is not None else 0,
microseconds=microseconds if microseconds is not None else 0,
year=year,
month=month,
day=day,
weekday=weekday,
yearday=yearday,
nlyearday=nlyearday,
hour=hour,
minute=minute,
second=second,
microsecond=microsecond,
)
self._years = years
self._months = months
self._days = days
self._leapdays = leapdays
self._weeks = weeks
self._hours = hours
self._minutes = minutes
self._seconds = seconds
self._microseconds = microseconds
self.next_month = next_month
self.prev_month = prev_month
self.am_pm = am_pm
self.hijri = hijri
@property
def am_pm(self):
return self._am_pm
@am_pm.setter
def am_pm(self, am_pm):
self._am_pm = am_pm
# handle hour am pm
if am_pm == "PM" and self.hour is not None and self.hour < 12:
self.hour += 12
@property
def weeks(self):
return self._weeks or 0
@weeks.setter
def weeks(self, value):
self._weeks = value
def is_years_set(self):
return self._years is not None or self.year is not None
def is_months_set(self):
return self._months is not None or self.month is not None
def is_days_set(self):
return self._days is not None or self.day is not None
def is_leapdays_set(self):
return self._leapdays is not None
def is_weeks_set(self):
return self._weeks is not None or self.weekday is not None
def is_hours_set(self):
return self._hours is not None or self.hour is not None
def is_minutes_set(self):
return self._minutes is not None or self.minute is not None
def is_seconds_set(self):
return self._seconds is not None or self.second is not None
def is_microseconds_set(self):
return self._microseconds is not None or self.microsecond is not None
def is_am_pm_set(self):
return self.am_pm is not None
def is_hijri_set(self):
return self.hijri is not None
def _add(self, value1, value2):
if value1 is not None and value2 is not None:
return value1 + value2
if value1 is None:
return value2
if value2 is None:
return value1
def __add__(self, other):
if isinstance(other, TimeValue):
return self.__class__(
years=self._add(other._years, self._years),
months=self._add(other._months, self._months),
days=self._add(other._days, self._days),
leapdays=self._add(other._leapdays, self._leapdays),
weeks=self._add(other._weeks, self._weeks),
hours=self._add(other._hours, self._hours),
minutes=self._add(other._minutes, self._minutes),
seconds=self._add(other._seconds, self._seconds),
microseconds=self._add(other._microseconds, self._microseconds),
year=(other.year if other.year is not None else self.year),
month=(other.month if other.month is not None else self.month),
day=(other.day if other.day is not None else self.day),
weekday=(other.weekday if other.weekday is not None else self.weekday),
hour=(other.hour if other.hour is not None else self.hour),
minute=(other.minute if other.minute is not None else self.minute),
second=(other.second if other.second is not None else self.second),
microsecond=(
other.microsecond
if other.microsecond is not None
else self.microsecond
),
am_pm=other.am_pm or self.am_pm,
next_month=(
other.next_month
if other.next_month is not None
else self.next_month
),
prev_month=(
other.prev_month
if other.prev_month is not None
else self.prev_month
),
hijri=other.hijri or self.hijri,
)
old_values = self.__dict__.copy()
# Handle next/prev week
if isinstance(other, datetime) and self.weeks:
self.days = self._days or 0
current_day = other.weekday()
if self._days is not None:
self.days += self.weeks * 7
else:
start_of_week = (current_day + 7 - constants.START_OF_WEEK) % 7
# next week(s)
if self.weeks > 0:
self.days += 7 - start_of_week + (self.weeks - 1) * 7
# prev week(s)
elif self.weeks < 0:
self.days -= start_of_week - 7 * self.weeks
# Handle hijri date
if isinstance(other, datetime) and self.hijri:
current_hijri = Gregorian.fromdate(other.date()).to_hijri()
hijri_year = self.year or current_hijri.year
hijri_month = self.month or current_hijri.month
hijri_day = self.day or current_hijri.day
month_lengths = [0] + [
Hijri(hijri_year, i, 1).month_length() for i in range(1, 13)
]
hijri_day = min(hijri_day, month_lengths[hijri_month])
hijri_year += self.years
hijri_month += self.months
hijri_day += self.days
while hijri_day > month_lengths[hijri_month]:
if hijri_month > 12:
hijri_year += self.months // 12
hijri_month = self.months % 12
hijri_day -= month_lengths[hijri_month]
hijri_month += 1
if self.next_month:
hijri_year += 1 if self.next_month <= current_hijri.month else 0
hijri_month = self.next_month
elif self.prev_month:
hijri_year += 0 if self.prev_month <= current_hijri.month else -1
hijri_month = self.prev_month
new_date = Hijri(hijri_year, hijri_month, hijri_day).to_gregorian()
self.year = new_date.year
self.month = new_date.month
self.day = new_date.day
self.years = 0
self.months = 0
self.days = 0
elif isinstance(other, datetime):
current_month = other.month
if self.next_month:
self.years += 1 if self.next_month <= current_month else 0
self.month = self.next_month
elif self.prev_month:
self.years += 0 if self.prev_month <= current_month else -1
self.month = self.prev_month
output = super().__add__(other)
self.__dict__ = old_values
return output
def __repr__(self):
l = []
for attr in [
"_years",
"_months",
"_weeks",
"_days",
"_leapdays",
"_hours",
"_minutes",
"_seconds",
"_microseconds",
"_am_pm",
"next_month",
"prev_month",
"year",
"month",
"day",
"weekday",
"hour",
"minute",
"second",
"microsecond",
"hijri",
]:
value = getattr(self, attr)
if value is not None:
l.append(
"{attr}={value}".format(attr=attr.strip("_"), value=repr(value))
)
return "{classname}({attrs})".format(
classname=self.__class__.__name__, attrs=", ".join(l)
)
def __eq__(self, other):
if not isinstance(other, TimeValue):
return NotImplemented
if self.weekday or other.weekday:
if not self.weekday or not other.weekday:
return False
if self.weekday.weekday != other.weekday.weekday:
return False
n1, n2 = self.weekday.n, other.weekday.n
if n1 != n2 and not ((not n1 or n1 == 1) and (not n2 or n2 == 1)):
return False
return (
self._years == other._years
and self._months == other._months
and self._weeks == other._weeks
and self._days == other._days
and self._hours == other._hours
and self._minutes == other._minutes
and self._seconds == other._seconds
and self._microseconds == other._microseconds
and self._leapdays == other._leapdays
and self.year == other.year
and self.month == other.month
and self.day == other.day
and self.hour == other.hour
and self.minute == other.minute
and self.second == other.second
and self.microsecond == other.microsecond
and self.weekday == other.weekday
and self.am_pm == other.am_pm
and self.hijri == other.hijri
)
@dataclass
class TimeInterval:
start: TimeValue | None = None
end: TimeValue | None = None
|
424669
|
import logging
class ConfigConverterBase():
def __init__(self):
self.logger = logging.getLogger(__name__)
self.from_version = 0
self.to_version = 0
from_version = 0
to_version = 0
def upgrade(self, old_config):
return old_config
|
424701
|
expected_output = {
"Ethernet1/1": {
"advertising_code": "Passive Cu",
"cable_attenuation": "0/0/0/0/0 dB for bands 5/7/12.9/25.8/56 " "GHz",
"cable_length": 2.0,
"cis_part_number": "37-1843-01",
"cis_product_id": "QDD-400-CU2M",
"cis_version_id": "V01",
"cisco_id": "0x18",
"clei": "CMPQAGSCAA",
"cmis_ver": 4,
"date_code": "20031400",
"dom_supported": False,
"far_end_lanes": "8 lanes aaaaaaaa",
"host_electrical_intf": "Undefined",
"max_power": 1.5,
"media_interface": "copper cable unequalized",
"name": "CISCO-LEONI",
"near_end_lanes": "none",
"nominal_bitrate": 425000,
"part_number": "L45593-K218-C20",
"power_class": "1 (1.5 W maximum)",
"revision": "00",
"serial_number": "LCC2411GG1W-A",
"vendor_oui": "a8b0ae",
"transceiver_present": True,
"transceiver_type": "QSFP-DD-400G-COPPER",
}
}
|
424717
|
import asyncio
import errno
import os
import sys
import logging
import ipaddress
from distutils.version import LooseVersion
import ray.dashboard.utils as dashboard_utils
import ray.dashboard.optional_utils as dashboard_optional_utils
# All third-party dependencies that are not included in the minimal Ray
# installation must be included in this file. This allows us to determine if
# the agent has the necessary dependencies to be started.
from ray.dashboard.optional_deps import aiohttp, hdrs
# Logger for this module. It should be configured at the entry point
# into the program using Ray. Ray provides a default configuration at
# entry/init points.
logger = logging.getLogger(__name__)
routes = dashboard_optional_utils.ClassMethodRouteTable
def setup_static_dir():
build_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "client", "build"
)
module_name = os.path.basename(os.path.dirname(__file__))
if not os.path.isdir(build_dir):
raise dashboard_utils.FrontendNotFoundError(
errno.ENOENT,
"Dashboard build directory not found. If installing "
"from source, please follow the additional steps "
"required to build the dashboard"
f"(cd python/ray/{module_name}/client "
"&& npm install "
"&& npm ci "
"&& npm run build)",
build_dir,
)
static_dir = os.path.join(build_dir, "static")
routes.static("/static", static_dir, follow_symlinks=True)
return build_dir
class HttpServerDashboardHead:
def __init__(self, ip, http_host, http_port, http_port_retries):
self.ip = ip
self.http_host = http_host
self.http_port = http_port
self.http_port_retries = http_port_retries
# Below attirubtes are filled after `run` API is invoked.
self.runner = None
# Setup Dashboard Routes
try:
build_dir = setup_static_dir()
logger.info("Setup static dir for dashboard: %s", build_dir)
except dashboard_utils.FrontendNotFoundError as ex:
# Not to raise FrontendNotFoundError due to NPM incompatibilities
# with Windows.
# Please refer to ci.sh::build_dashboard_front_end()
if sys.platform in ["win32", "cygwin"]:
logger.warning(ex)
else:
raise ex
dashboard_optional_utils.ClassMethodRouteTable.bind(self)
# Create a http session for all modules.
# aiohttp<4.0.0 uses a 'loop' variable, aiohttp>=4.0.0 doesn't anymore
if LooseVersion(aiohttp.__version__) < LooseVersion("4.0.0"):
self.http_session = aiohttp.ClientSession(loop=asyncio.get_event_loop())
else:
self.http_session = aiohttp.ClientSession()
@routes.get("/")
async def get_index(self, req) -> aiohttp.web.FileResponse:
return aiohttp.web.FileResponse(
os.path.join(
os.path.dirname(os.path.abspath(__file__)), "client/build/index.html"
)
)
@routes.get("/favicon.ico")
async def get_favicon(self, req) -> aiohttp.web.FileResponse:
return aiohttp.web.FileResponse(
os.path.join(
os.path.dirname(os.path.abspath(__file__)), "client/build/favicon.ico"
)
)
def get_address(self):
assert self.http_host and self.http_port
return self.http_host, self.http_port
async def run(self, modules):
# Bind http routes of each module.
for c in modules:
dashboard_optional_utils.ClassMethodRouteTable.bind(c)
# Http server should be initialized after all modules loaded.
# working_dir uploads for job submission can be up to 100MiB.
app = aiohttp.web.Application(client_max_size=100 * 1024 ** 2)
app.add_routes(routes=routes.bound_routes())
self.runner = aiohttp.web.AppRunner(app)
await self.runner.setup()
last_ex = None
for i in range(1 + self.http_port_retries):
try:
site = aiohttp.web.TCPSite(self.runner, self.http_host, self.http_port)
await site.start()
break
except OSError as e:
last_ex = e
self.http_port += 1
logger.warning("Try to use port %s: %s", self.http_port, e)
else:
raise Exception(
f"Failed to find a valid port for dashboard after "
f"{self.http_port_retries} retries: {last_ex}"
)
self.http_host, self.http_port, *_ = site._server.sockets[0].getsockname()
self.http_host = (
self.ip
if ipaddress.ip_address(self.http_host).is_unspecified
else self.http_host
)
logger.info(
"Dashboard head http address: %s:%s", self.http_host, self.http_port
)
# Dump registered http routes.
dump_routes = [r for r in app.router.routes() if r.method != hdrs.METH_HEAD]
for r in dump_routes:
logger.info(r)
logger.info("Registered %s routes.", len(dump_routes))
async def cleanup(self):
# Wait for finish signal.
await self.runner.cleanup()
|
424764
|
from testing_helpers import wrap
@wrap
def count_threshold_generator(limit, threshold):
return sum(item > threshold for item in xrange(limit))
#def test_count_threshold_generator():
# count_threshold_generator(1000,490)
|
424766
|
import os
import sys
import subprocess as sp
def_path = sys.argv[-1]
# print(sys.argv)
dumpbin_path = os.environ.get("dumpbin_path", "dumpbin")
export_all = os.environ.get("EXPORT_ALL", "0")=="1"
syms = {}
for obj in sys.argv[1:-2]:
cmd = f'"{dumpbin_path}" -SYMBOLS "{obj}"'
ret = sp.getoutput(cmd)
# print(ret)
for l in ret.splitlines():
if '|' in l:
if "UNDEF" in l: continue
if "External" not in l: continue
sym = l.split('|')[1].strip().split()[0]
if sym[0] in '@.': continue
if sym.startswith("??$get_from_env"): syms[sym] = 1
# if sym.startswith("??"): continue
if sym.startswith("my"): syms[sym] = 1
# for cutt
if "custom_cuda" in sym: syms[sym] = 1
if "cutt" in sym: syms[sym] = 1
if "_cudaGetErrorEnum" in sym: syms[sym] = 1
if export_all: syms[sym] = 1
if "jittor" not in sym: continue
syms[sym] = 1
# print(ret)
libname = os.path.basename(def_path).rsplit(".", 1)[0]
src = f"LIBRARY {libname}\nEXPORTS\n"
for k in syms:
src += f" {k}\n"
# print(src)
with open(def_path, "w") as f:
f.write(src)
|
424805
|
import sublime
import sublime_plugin
class SetMarkCommand(sublime_plugin.TextCommand):
def run(self, edit):
mark = [s for s in self.view.sel()]
self.view.add_regions("mark", mark, "mark", "dot", sublime.HIDDEN | sublime.PERSISTENT)
class SwapWithMarkCommand(sublime_plugin.TextCommand):
def run(self, edit):
old_mark = self.view.get_regions("mark")
mark = [s for s in self.view.sel()]
self.view.add_regions("mark", mark, "mark", "dot", sublime.HIDDEN | sublime.PERSISTENT)
if len(old_mark):
self.view.sel().clear()
for r in old_mark:
self.view.sel().add(r)
class SelectToMarkCommand(sublime_plugin.TextCommand):
def run(self, edit):
mark = self.view.get_regions("mark")
num = min(len(mark), len(self.view.sel()))
regions = []
for i in range(num):
regions.append(self.view.sel()[i].cover(mark[i]))
for i in range(num, len(self.view.sel())):
regions.append(self.view.sel()[i])
self.view.sel().clear()
for r in regions:
self.view.sel().add(r)
class DeleteToMark(sublime_plugin.TextCommand):
def run(self, edit):
self.view.run_command("select_to_mark")
self.view.run_command("add_to_kill_ring", {"forward": False})
self.view.run_command("left_delete")
|
424827
|
import string
from django import forms
from django.contrib.auth import authenticate, get_user_model
from django.utils.translation import ugettext_lazy as _
from scatterauth.settings import app_settings
class LoginForm(forms.Form):
signature = forms.CharField(widget=forms.HiddenInput, max_length=101)
pubkey = forms.CharField(widget=forms.HiddenInput, max_length=53)
# def clean_signature(self):
# sig = self.cleaned_data['signature']
# if len(sig) != 101:
# raise forms.ValidationError(_('Invalid signature'))
# return sig
# list(set()) here is to eliminate the possibility of double including the address field
signup_fields = list(set(app_settings.SCATTERAUTH_USER_SIGNUP_FIELDS + [app_settings.SCATTERAUTH_USER_PUBKEY_FIELD]))
class SignupForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
# first call parent's constructor
super().__init__(*args, **kwargs)
# make sure to make email required, because password is not set
# and if the user loses private key he can get 'reset' password link to email
if 'email' in app_settings.SCATTERAUTH_USER_SIGNUP_FIELDS:
self.fields['email'].required = True
self.fields[app_settings.SCATTERAUTH_USER_PUBKEY_FIELD].required = True
def clean_address_field(self):
# validate_eth_address(self.cleaned_data[app_settings.SCATTERAUTH_USER_PUBKEY_FIELD])
return self.cleaned_data[app_settings.SCATTERAUTH_USER_PUBKEY_FIELD]
class Meta:
model = get_user_model()
fields = signup_fields
# hack to set the method for cleaning address field
setattr(SignupForm, 'clean_' + app_settings.SCATTERAUTH_USER_PUBKEY_FIELD, SignupForm.clean_address_field)
|
424835
|
import os
import sys
import tty, termios
import string
from pyfiglet import Figlet
from .charDef import *
_, n = os.popen('stty size', 'r').read().split()
COLUMNS = int(n)
def mybeep():
print(chr(BEEP_CHAR), end = '')
def mygetc():
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(fd)
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
def getchar():
c = mygetc()
if ord(c) == LINE_BEGIN_KEY or \
ord(c) == LINE_END_KEY or \
ord(c) == TAB_KEY or \
ord(c) == NEWLINE_KEY:
return c
elif ord(c) == BACK_SPACE_KEY:
return c
elif ord(c) == ESC_KEY:
combo = mygetc()
if ord(combo) == MOD_KEY_INT:
key = mygetc()
if ord(key) >= MOD_KEY_BEGIN - MOD_KEY_FLAG and ord(key) <= MOD_KEY_END - MOD_KEY_FLAG:
if ord(mygetc()) == MOD_KEY_DUMMY:
return chr(ord(key) + MOD_KEY_FLAG)
else:
return UNDEFINED_KEY
elif ord(key) >= ARROW_KEY_BEGIN - ARROW_KEY_FLAG and ord(key) <= ARROW_KEY_END - ARROW_KEY_FLAG:
return chr(ord(key) + ARROW_KEY_FLAG)
else:
return UNDEFINED_KEY
else:
mybeep()
return getchar()
else:
if c in string.printable:
return c
else:
return UNDEFINED_KEY
return UNDEFINED_KEY
# Basic command line functions
def puts(s, indent = 4):
''' Print string with indent. '''
forceWrite(' ' * indent + s + '\n')
def moveCursorLeft(n):
''' Move cursor left n columns. '''
forceWrite("\033[{}D".format(n))
def moveCursorRight(n):
''' Move cursor right n columns. '''
forceWrite("\033[{}C".format(n))
def moveCursorUp(n):
''' Move cursor up n rows. '''
forceWrite("\033[{}A".format(n))
def moveCursorDown(n):
''' Move cursor down n rows. '''
forceWrite("\033[{}B".format(n))
def moveCursorHead():
forceWrite("\r")
def clearLine():
''' Clear content of one line on the console. '''
forceWrite(" " * COLUMNS)
moveCursorHead()
def clearConsole(n):
''' Clear n console rows (bottom up). '''
for _ in range(n):
clearLine()
moveCursorUp(1)
def forceWrite(s):
sys.stdout.write(s)
sys.stdout.flush()
def renderText(s):
f = Figlet(font = 'slant')
print(f.renderText(s), end = '')
|
424849
|
import numpy as np
import tensorflow as tf
def tf_integral(x,a):
return 0.5*(x*tf.sqrt(x**2+a)+a*tf.log(tf.abs(x+tf.sqrt(x**2+a))))
def tf_pre_parabol(x,par):
x = x-450.
prev = 2.*par*(tf_integral(tf.abs(x),0.25/(par**2))-tf_integral(0,0.25/(par**2)))
return prev+450.
def projector(param,ph,logo):
'''Apply off-plane transformations to the sticker images
param: parabola rate of the off-plane parabolic tranformation, rank 2 tensor with shape [N, 1]
ph:angle of the off-plane rotation, rank 2 tensor with shape [N, 1]
logo: rank 4 tensor with format NHWC and shape [N, 400, 900, 3]
return: rank 4 tensor with format NHWC and shape [N, 900, 900, 3]
'''
right_cumsum = tf.transpose(tf.pad(tf.cumsum(logo[:,:,450:],axis=2),tf.constant([[0,0],[0,0],[1,0],[0,0]])),[0,2,1,3])
left_cumsum = tf.transpose(tf.pad(tf.cumsum(logo[:,:,:450][:,:,::-1],axis=2),tf.constant([[0,0],[0,0],[1,0],[0,0]])),[0,2,1,3])
anchors = tf.expand_dims(tf.cast(tf.round(tf.clip_by_value(\
tf_pre_parabol(tf.expand_dims(tf.constant(np.arange(450,901,dtype=np.float32)),0),\
param)-450.,0,450.)),tf.int32),2)
anch_inds = tf.tile(tf.expand_dims(tf.expand_dims(tf.range(tf.shape(param)[0]),1),2),[1,451,1])
new_anchors = tf.concat([anch_inds,anchors],2)
anchors_div = tf.expand_dims(tf.cast(tf.clip_by_value(anchors[:,1:]-anchors[:,:-1],1,900),tf.float32),3)
right_anchors_cumsum = tf.gather_nd(right_cumsum,new_anchors)
right_anchors_diffs = right_anchors_cumsum[:,1:]-right_anchors_cumsum[:,:-1]
right = right_anchors_diffs/anchors_div
left_anchors_cumsum = tf.gather_nd(left_cumsum,new_anchors)
left_anchors_diffs = left_anchors_cumsum[:,1:]-left_anchors_cumsum[:,:-1]
left = left_anchors_diffs/anchors_div
tmp_result = tf.transpose(tf.concat([left[:,::-1],right],axis=1),[0,2,1,3])
cumsum = tf.pad(tf.cumsum(tmp_result,axis=1),tf.constant([[0,0],[1,0],[0,0],[0,0]]))
angle = tf.expand_dims(np.pi/180.*ph,2)
z = param*tf.constant((np.arange(900,dtype=np.float32)-449.5)**2)
z_tile = tf.tile(tf.expand_dims(z,1),tf.constant([1,901,1]))
y_coord = tf.constant(np.arange(-250,651,dtype=np.float32))
y_tile = tf.tile(tf.expand_dims(tf.expand_dims(y_coord,1),0),[tf.shape(param)[0],1,900])
y_prev = (y_tile+z_tile*tf.sin(-angle))/tf.cos(angle)
y_round = tf.cast(tf.round(tf.clip_by_value(y_prev,0,400.)),tf.int32)
y_div = tf.clip_by_value(y_round[:,1:]-y_round[:,:-1],1,900)
x_coord = tf.constant(np.arange(900,dtype=np.int32))
x_tile = tf.tile(tf.expand_dims(tf.expand_dims(x_coord,0),0),[tf.shape(param)[0],901,1])
b_coord = tf.tile(tf.expand_dims(tf.expand_dims(tf.range(tf.shape(param)[0]),1),2),[1,901,900])
indices = tf.stack([b_coord,y_round,x_tile],axis=3)
chosen_cumsum = tf.gather_nd(cumsum,indices)
chosen_cumsum_diffs = chosen_cumsum[:,1:]-chosen_cumsum[:,:-1]
final_results = tf.clip_by_value(chosen_cumsum_diffs/tf.expand_dims(tf.cast(y_div,tf.float32),3),0.,1.)
return final_results
def TVloss(logo,w_tv):
'''Calculate TV loss of the sticker image with predefined weight.
logo: rank 4 tensor with format NHWC
w_tv: weight of the TV loss
return: scalar value of the TV loss
'''
vert_diff = logo[:,1:]-logo[:,:-1]
hor_diff = logo[:,:,1:]-logo[:,:,:-1]
vert_diff_sq = tf.square(vert_diff)
hor_diff_sq = tf.square(hor_diff)
vert_pad = tf.pad(vert_diff_sq,tf.constant([[0,0],[1,0],[0,0],[0,0]]))
hor_pad = tf.pad(hor_diff_sq,tf.constant([[0,0],[0,0],[1,0],[0,0]]))
tv_sum = vert_pad+hor_pad
tv = tf.sqrt(tv_sum+1e-5)
tv_final_sum = tf.reduce_sum(tv)
tv_loss = w_tv*tv_final_sum
return tv_loss
|
424852
|
import candles as candles
import trendAnalysis as trend
import oscilators as oscilators
from numpy import *
class Strategy:
positiveSignal = 50
negativeSignal = -50
trendVal = 100
defPositiveSignal = 50
defNegativeSignal = -50
defTrendVal = 100
"""Formacje"""
#Odwrocenie trendu wzrostowego
headAndShouldersVal = -100
tripleTopVal = -100
risingWedgeVal = -80
fallingTriangleVal = -80
defHeadAndShouldersVal = -100
defTripleTopVal = -100
defRisingWedgeVal = -80
defFallingTriangleVal = -80
#Odwrocenie trendu spadkowego
reversedHeadAndShouldersVal = 100
tripleBottomVal = 100
fallingWedgeVal = 80
risingTriangleVal = 80
defReversedHeadAndShouldersVal = 100
defTripleBottomVal = 100
defFallingWedgeVal = 80
defRisingTriangleVal = 80
#Kontynuacja trendu
symetricTriangleVal = 50
rectangleVal = 30
flagPennantVal = 20
defFlagPennantVal = 20
defSymetricTriangleVal = 50
defRectangleVal = 30
"""Wskazniki i oscylatory"""
oscilatorsVal = 50
newHighNewLowVal = 50
bollignerVal = 50
momentumVal = 50
rocVal = 50
cciVal = 50
rsiVal = 50
williamsVal = 50
defOscilatorsVal = 50
defNewHighNewLowVal = 50
defBollignerVal = 50
defMomentumVal = 50
defRocVal = 50
defCciVal = 50
defRsiVal = 50
defWilliamsVal = 50
"""Luki"""
#Wzrostowe
risingBreakawayGapVal = 50
risingContinuationGapVal = 30
fallingExhaustionGapVal = 10
defRisingBreakawayGapVal = 50
defRisingContinuationGapVal = 30
defFallingExhaustionGapVal = 10
#Spadkowe
fallingBreakawayGapVal = -50
risingExhaustionGapVal = -50
fallingContinuationGapVal = -30
defFallingBreakawayGapVal = -50
defRisingExhaustionGapVal = -50
defFallingContinuationGapVal = -30
"""Formacje swiecowe"""
#sygnal kupna
bull3Val = 15
mornigStarVal = 10
piercingVal = 5
defBull3Val = 15
defMornigStarVal = 10
defPiercingVal = 5
#sygnal sprzedazy
bear3Val = -15
eveningStarVal = -10
darkCloudVal = -5
defBear3Val = -15
defEveningStarVal = -10
defDarkCloudVal = -5
data = None
def __init__(self, data):
self.setData(data)
def setData(self, data):
self.data = data
#Potega wyrazen regularnych i textmate'a nie ma to jak wygenerowac 251 linii kodu
def setPositiveSignal(self, positiveSignal):
self.positiveSignal = positiveSignal
def disablePositiveSignal(self):
self.positiveSignal = 0
def enablePositiveSignal(self):
self.positiveSignal = self.defPositiveSignal
def setNegativeSignal(self, negativeSignal):
self.negativeSignal = negativeSignal
def disableNegativeSignal(self):
self.negativeSignal = 0
def enableNegativeSignal(self):
self.negativeSignal = self.defNegativeSignal
def setTrendVal(self, trendVal):
self.trendVal = trendVal
def disableTrendVal(self):
self.trendVal = 0
def enableTrendVal(self):
self.trendVal = self.defTrendVal
"""Formacje"""
#Odwrocenie trendu wzrostowego
def setHeadAndShouldersVal(self, headAndShouldersVal):
self.headAndShouldersVal = headAndShouldersVal
def disableHeadAndShouldersVal(self):
self.headAndShouldersVal = 0
def enableHeadAndShouldersVal(self):
self.headAndShouldersVal = self.defHeadAndShouldersVal
def setTripleTopVal(self, tripleTopVal):
self.tripleTopVal = tripleTopVal
def disableTripleTopVal(self):
self.tripleTopVal = 0
def enableTripleTopVal(self):
self.tripleTopVal = self.defTripleTopVal
def setRisingWedgeVal(self, risingWedgeVal):
self.risingWedgeVal = risingWedgeVal
def disableRisingWedgeVal(self):
self.risingWedgeVal = 0
def enableRisingWedgeVal(self):
self.risingWedgeVal = self.defRisingWedgeVal
def setFallingTriangleVal(self, fallingTriangleVal):
self.fallingTriangleVal = fallingTriangleVal
def disableFallingTriangleVal(self):
self.fallingTriangleVal = 0
def enableFallingTriangleVal(self):
self.fallingTriangleVal = self.defFallingTriangleVal
#Odwrocenie trendu spadkowego
def setReversedHeadAndShouldersVal(self, reversedHeadAndShouldersVal):
self.reversedHeadAndShouldersVal = reversedHeadAndShouldersVal
def disableReversedHeadAndShouldersVal(self):
self.reversedHeadAndShouldersVal = 0
def enableReversedHeadAndShouldersVal(self):
self.reversedHeadAndShouldersVal = self.defReversedHeadAndShouldersVal
def setTripleBottomVal(self, tripleBottomVal):
self.tripleBottomVal = tripleBottomVal
def disableTripleBottomVal(self):
self.tripleBottomVal = 0
def enableTripleBottomVal(self):
self.tripleBottomVal = self.defTripleBottomVal
def setFallingWedgeVal(self, fallingWedgeVal):
self.fallingWedgeVal = fallingWedgeVal
def disableFallingWedgeVal(self):
self.fallingWedgeVal = 0
def enableFallingWedgeVal(self):
self.fallingWedgeVal = self.defFallingWedgeVal
def setRisingTriangleVal(self, risingTriangleVal):
self.risingTriangleVal = risingTriangleVal
def disableRisingTriangleVal(self):
self.risingTriangleVal = 0
def enableRisingTriangleVal(self):
self.risingTriangleVal = self.defRisingTriangleVal
#Kontynuacja trendu
def setSymetricTriangleVal(self, symetricTriangleVal):
self.symetricTriangleVal = symetricTriangleVal
def disableSymetricTriangleVal(self):
self.symetricTriangleVal = 0
def enableSymetricTriangleVal(self):
self.symetricTriangleVal = self.defSymetricTriangleVal
def setRectangleVal(self, rectangleVal):
self.rectangleVal = rectangleVal
def disableRectangleVal(self):
self.rectangleVal = 0
def enableRectangleVal(self):
self.rectangleVal = self.defRectangleVal
def setFlagPennantVal(self, flagPennantVal):
self.flagPennantVal = flagPennantVal
def disableFlagPennantVal(self):
self.flagPennantVal = 0
def enableFlagPennantVal(self):
self.flagPennantVal = self.defFlagPennantVal
"""Wskazniki i oscylatory"""
def setOscilatorsVal(self, oscilatorsVal):
self.oscilatorsVal = oscilatorsVal
def disableOscilatorsVal(self):
self.oscilatorsVal = 0
def enableOscilatorsVal(self):
self.oscilatorsVal = self.defOscilatorsVal
def setNewHighNewLowVal(self, newHighNewLowVal):
self.newHighNewLowVal = newHighNewLowVal
def disableNewHighNewLowVal(self):
self.newHighNewLowVal = 0
def enableNewHighNewLowVal(self):
self.newHighNewLowVal = self.defNewHighNewLowVal
def setBollignerVal(self, bollignerVal):
self.bollignerVal = bollignerVal
def disableBollignerVal(self):
self.bollignerVal = 0
def enableBollignerVal(self):
self.bollignerVal = self.defBollignerVal
def setMomentumVal(self, momentumVal):
self.momentumVal = momentumVal
def disableMomentumVal(self):
self.momentumVal = 0
def enableMomentumVal(self):
self.momentumVal = self.defMomentumVal
def setRocVal(self, rocVal):
self.rocVal = rocVal
def disableRocVal(self):
self.rocVal = 0
def enableRocVal(self):
self.rocVal = self.defRocVal
def setCciVal(self, cciVal):
self.cciVal = cciVal
def disableCciVal(self):
self.cciVal = 0
def enableCciVal(self):
self.cciVal = self.defCciVal
def setRsiVal(self, rsiVal):
self.rsiVal = rsiVal
def disableRsiVal(self):
self.rsiVal = 0
def enableRsiVal(self):
self.rsiVal = self.defRsiVal
def setWilliamsVal(self, williamsVal):
self.williamsVal = williamsVal
def disableWilliamsVal(self):
self.williamsVal = 0
def enableWilliamsVal(self):
self.williamsVal = self.defWilliamsVal
"""Luki"""
#Wzrostowe
def setRisingBreakawayGapVal(self, risingBreakawayGapVal):
self.risingBreakawayGapVal = risingBreakawayGapVal
def disableRisingBreakawayGapVal(self):
self.risingBreakawayGapVal = 0
def enableRisingBreakawayGapVal(self):
self.risingBreakawayGapVal = self.defRisingBreakawayGapVal
def setRisingContinuationGapVal(self, risingContinuationGapVal):
self.risingContinuationGapVal = risingContinuationGapVal
def disableRisingContinuationGapVal(self):
self.risingContinuationGapVal = 0
def enableRisingContinuationGapVal(self):
self.risingContinuationGapVal = self.defRisingContinuationGapVal
def setFallingExhaustionGapVal(self, fallingExhaustionGapVal):
self.fallingExhaustionGapVal = fallingExhaustionGapVal
def disableFallingExhaustionGapVal(self):
self.fallingExhaustionGapVal = 0
def enableFallingExhaustionGapVal(self):
self.fallingExhaustionGapVal = self.defFallingExhaustionGapVal
#Spadkowe
def setFallingBreakawayGapVal(self, fallingBreakawayGapVal):
self.fallingBreakawayGapVal = fallingBreakawayGapVal
def disableFallingBreakawayGapVal(self):
self.fallingBreakawayGapVal = 0
def enableFallingBreakawayGapVal(self):
self.fallingBreakawayGapVal = self.defFallingBreakawayGapVal
def setRisingExhaustionGapVal(self, risingExhaustionGapVal):
self.risingExhaustionGapVal = risingExhaustionGapVal
def disableRisingExhaustionGapVal(self):
self.risingExhaustionGapVal = 0
def enableRisingExhaustionGapVal(self):
self.risingExhaustionGapVal = self.defRisingExhaustionGapVal
def setFallingContinuationGapVal(self, fallingContinuationGapVal):
self.fallingContinuationGapVal = fallingContinuationGapVal
def disableFallingContinuationGapVal(self):
self.fallingContinuationGapVal = 0
def enableFallingContinuationGapVal(self):
self.fallingContinuationGapVal = self.defFallingContinuationGapVal
"""Formacje swiecowe"""
#sygnal kupna
def setBull3Val(self, bull3Val):
self.bull3Val = bull3Val
def disableBull3Val(self):
self.bull3Val = 0
def enableBull3Val(self):
self.bull3Val = self.defBull3Val
def setMornigStarVal(self, mornigStarVal):
self.mornigStarVal = mornigStarVal
def disableMornigStarVal(self):
self.mornigStarVal = 0
def enableMornigStarVal(self):
self.mornigStarVal = self.defMornigStarVal
def setPiercingVal(self, piercingVal):
self.piercingVal = piercingVal
def disablePiercingVal(self):
self.piercingVal = 0
def enablePiercingVal(self):
self.piercingVal = self.defPiercingVal
#sygnal sprzedazy
def setBear3Val(self, bear3Val):
self.bear3Val = bear3Val
def disableBear3Val(self):
self.bear3Val = 0
def enableBear3Val(self):
self.bear3Val = self.defBear3Val
def setEveningStarVal(self, eveningStarVal):
self.eveningStarVal = eveningStarVal
def disableEveningStarVal(self):
self.eveningStarVal = 0
def enableEveningStarVal(self):
self.eveningStarVal = self.defEveningStarVal
def setDarkCloudVal(self, darkCloudVal):
self.darkCloudVal = darkCloudVal
def disableDarkCloudVal(self):
self.darkCloudVal = 0
def enableDarkCloudVal(self):
self.darkCloudVal = self.defDarkCloudVal
def resetCoefficients(self):
self.positiveSignal = self.defPositiveSignal
self.negativeSignal = self.defNegativeSignal
self.trendVal = self.defTrendVal
self.headAndShouldersVal = self.defHeadAndShouldersVal
self.tripleTopVal = self.defTripleTopVal
self.risingWedgeVal = self.defRisingWedgeVal
self.fallingTriangleVal = self.defFallingTriangleVal
self.reversedHeadAndShouldersVal = self.defReversedHeadAndShouldersVal
self.tripleBottomVal = self.defTripleBottomVal
self.fallingWedgeVal = self.defFallingWedgeVal
self.risingTriangleVal = self.defRisingTriangleVal
self.symetricTriangleVal = self.defSymetricTriangleVal
self.rectangleVal = self.defRectangleVal
self.oscilatorsVal = self.defOscilatorsVal
self.newHighNewLowVal = self.defNewHighNewLowVal
self.bollignerVal = self.defBollignerVal
self.momentumVal = self.defMomentumVal
self.rocVal = self.defRocVal
self.cciVal = self.defCciVal
self.rsiVal = self.defRsiVal
self.williamsVal = self.defWilliamsVal
self.risingBreakawayGapVal = self.defRisingBreakawayGapVal
self.risingContinuationGapVal = self.defRisingContinuationGapVal
self.fallingExhaustionGapVal = self.defFallingExhaustionGapVal
self.fallingBreakawayGapVal = self.defFallingBreakawayGapVal
self.risingExhaustionGapVal = self.defRisingExhaustionGapVal
self.fallingContinuationGapVal = self.defFallingContinuationGapVal
self.bull3Val = self.defBull3Val
self.mornigStarVal = self.defMornigStarVal
self.piercingVal = self.defPiercingVal
self.bear3Val = self.defBear3Val
self.eveningStarVal = self.defEveningStarVal
self.darkCloudVal = self.defDarkCloudVal
self.flagPennantVal = self.defFlagPennantVal
def analyze(self):
resultText = ''
overallScore = 0
print "The program will now analyse trends, selected chart patterns, candle patterns, indicators, oscillators and gaps\n"
resultText = resultText + "The program will now analyse trends, selected chart patterns, candle patterns, indicators, oscillators and gaps\n"
print " (+) -> positive\n\t(0) -> neutral\n\t(-) -> negative signal\n"
resultText = resultText + " (+) -> positive\n (0) -> neutral\n (-) -> negative signal\n"
overallScore += self.trendVal * trend.optimizedTrend(self.data.close)
resultText = resultText + "\nResults of trend analysis\n"
if overallScore > 0:
print " (+) the long term trend is rising\n"
resultText = resultText + " (+) the long term trend is rising\n"
elif overallScore < 0:
print " (-) the long term trend is falling\n"
resultText = resultText + " (-) the long term trend is falling\n"
else:
print " (0) the long term trend is neutral\n"
resultText = resultText + " (0) the long-term trend is neutral\n"
print "\nThe program has identified the following chart patterns:\n"
resultText = resultText + "\nThe program has identified the following chart patterns:\n"
form = trend.lookForHeadAndShoulders(self.data.close, self.data.volume, 1)
overallScore += form[0] * self.headAndShouldersVal
if form[0] * self.headAndShouldersVal != 0:
print " (-) head and shoulders\n" + self.data.date[int(form[1][0])].strftime("%Y-%m-%d")+self.data.date[int(form[1][2])].strftime("%Y-%m-%d")
resultText = resultText + " (-) head and shoulders " + self.data.date[int(form[1][0])].strftime("%Y-%m-%d") + " - " + self.data.date[int(form[1][2])].strftime("%Y-%m-%d") + "\n"
form = trend.lookForReversedHeadAndShoulders(self.data.close, self.data.volume, 1)
overallScore += form[0] * self.reversedHeadAndShouldersVal
if form[0] * self.reversedHeadAndShouldersVal != 0:
print " (+) reversed head and shoulders\n"
resultText = resultText + " (+) reversed head and shoulders " + self.data.date[int(form[1][0])].strftime("%Y-%m-%d") + " - " + self.data.date[int(form[1][2])].strftime("%Y-%m-%d") + "\n"
form = trend.lookForTripleTop(self.data.close, self.data.volume, 1)
overallScore += form[0] * self.tripleTopVal
if form[0] * self.tripleTopVal != 0:
print " (-) triple top\n"
resultText = resultText + " (-) triple top " + self.data.date[int(form[1][0])].strftime("%Y-%m-%d") + " - " + self.data.date[int(form[1][2])].strftime("%Y-%m-%d") + "\n"
form = trend.lookForTripleBottom(self.data.close, self.data.volume, 1)
overallScore += form[0] * self.tripleBottomVal
if form[0] * self.tripleBottomVal != 0:
print " (+) triple bottom\n"
resultText = resultText + " (+) triple bottom " + self.data.date[int(form[1][0])].strftime("%Y-%m-%d") + " - " + self.data.date[int(form[1][2])].strftime("%Y-%m-%d") + "\n"
geometricFormations = trend.findGeometricFormations(self.data.close)
for formation in geometricFormations:
hasFound = 0
if formation != None:
if formation[0] == 'rect':
overallScore += self.rectangleVal * formation[3]
if self.rectangleVal * formation[3] > 0:
print " (+) rising rectangle\n"
resultText = resultText + " (+) rising rectangle "
hasFound = 1
elif self.rectangleVal * formation[3] < 0:
print " (-) falling rectangle\n"
resultText = resultText + " (-) falling rectangle "
hasFound = 1
elif formation[0] == 'symmetric_triangle':
overallScore += self.symetricTriangleVal * formation[3]
if self.symetricTriangleVal * formation[3] > 0:
print " (+) symmetric triangle - continuation of rising trend\n"
resultText = resultText + " (+) symmetric triangle - continuation of rising trend "
hasFound = 1
elif self.symetricTriangleVal * formation[3] < 0:
print " (-) symmetric triangle - continuation of falling trend\n"
resultText = resultText + " (-) symmetric triangle - continuation of falling trend "
hasFound = 1
elif formation[0] == 'falling_triangle':
overallScore += self.fallingTriangleVal * formation[3]
if self.fallingTriangleVal * formation[3] != 0:
print " (-) falling triangle\n"
resultText = resultText + " (-) falling triangle "
hasFound = 1
elif formation[0] == 'rising_triangle':
overallScore += self.risingTriangleVal * formation[3]
if self.risingTriangleVal * formation[3] != 0:
print " (+) rising triangle\n"
hasFound = 1
resultText = resultText + " (+) rising triangle "
elif formation[0] == 'rising_wedge':
overallScore += self.risingWedgeVal * formation[3]
if self.risingWedgeVal * formation[3] != 0:
print " (-) rising wedge\n"
resultText = resultText + " (-) rising wedge "
hasFound = 1
elif formation[0] == 'falling_wedge':
overallScore += self.fallingWedgeVal * formation[3]
if self.fallingWedgeVal * formation[3] != 0:
print " (+) falling wedge\n"
resultText = resultText + " (+) falling wedge "
hasFound = 1
if hasFound:
resultText = resultText + self.data.date[int(formation[1][0])].strftime("%Y-%m-%d") + " - " + self.data.date[int(formation[1][2])].strftime("%Y-%m-%d") + "\n"
flags = trend.findFlagsAndPennants(self.data.close,self.data.volume, self.data.high, self.data.low)
if flags != None:
overallScore += defFlagPennantVal * flags[1]
if flags[1] < 0:
print "(-) falling-trend flag/pennant"
resultText = resultText + "(-) falling-trend flag/pennant"
else:
print "(+) rising-trend flag/pennant"
resultText = resultText + "(+) rising-trend flag/pennant"
gaps = candles.findGaps(self.data.high,self.data.low,self.data.close)
for formation in gaps:
if formation != None:
if formation[0][0] == 'rising_breakaway_gap':
overallScore += self.risingBreakawayGapVal * formation[1]
if self.risingBreakawayGapVal * formation[1] != 0:
print " (+) rising breakaway gap\n"
elif formation[0][0] == 'rising_continuation_gap':
overallScore += self.risingContinuationGapVal * formation[1]
if self.risingContinuationGapVal * formation[1] != 0:
print " (+) rising continuation gap\n"
elif formation[0][0] == 'rising_exhaustion_gap':
overallScore += self.risingExhaustionGapVal * formation[1]
if self.risingExhaustionGapVal * formation[1] != 0:
print " (-) rising exhaustion gap\n"
elif formation[0][0] == 'falling_breakaway_gap':
overallScore += self.fallingBreakawayGapVal * formation[1]
if self.fallingBreakawayGapVal * formation[1] != 0:
print " (-) falling breakaway gap\n"
elif formation[0][0] == 'falling_continuation_gap':
overallScore += self.fallingContinuationGapVal * formation[1]
if self.fallingContinuationGapVal * formation[1] != 0:
print " (-) falling continuation gap\n"
elif formation[0][0] == 'falling_exhaustion_gap':
overallScore += self.fallingExhaustionGapVal * formation[1]
if self.fallingExhaustionGapVal * formation[1] != 0:
print " (+) falling exhaustion gap\n"
candleFormations = candles.findCandleFormations(self.data.open, self.data.high, self.data.low, self.data.close)
for formation in candleFormations:
if formation != None:
if formation[0][0] == 'bull3':
overallScore += bull3Val * formation[3]
if bull3Val * formation[3] != 0:
print " (+) triple bull candle pattern\n"
resultText = resultText + " (+) triple bull candle pattern\n"
elif formation[0][0] == 'morning_star':
overallScore += self.morningStarVal * formation[3]
if self.morningStarVal * formation[3] != 0:
print " (+) morning star candle pattern\n"
resultText = resultText + " (+) morning star candle pattern\n"
elif formation[0][0] == 'piercing':
overallScore += self.piercingVal * formation[3]
if self.piercingVal * formation[3] != 0:
print " (+) piercing candle pattern\n"
resultText = resultText + " (+) piercing candle pattern\n"
elif formation[0][0] == 'bear3':
overallScore += self.bear3Val * formation[3]
if bear3Val * formation[3] != 0:
print " (-) triple bear candle pattern\n"
resultText = resultText + " (-) triple bear candle pattern\n"
elif formation[0][0] == 'evening_star':
overallScore += self.eveningStarVal * formation[3]
if self.eveningStarVal * formation[3] != 0:
print " (-) evening star candle pattern\n"
resultText = resultText + " (-) evening star candle pattern\n"
elif formation[0][0] == 'dark_cloud':
overallScore += self.darkCloudVal * formation[3]
if self.darkCloudVal * formation[3] != 0:
print " (-) dark cloud candle pattern\n"
resultText = resultText + " (-) dark cloud candle pattern\n"
# score, oscilatorsAndIndicators = oscilators.oscillatorStrategy(array(self.data.close), array(self.data.high), array(self.data.low), min(10, len(self.data.close)))
# overallScore += self.newHighNewLowVal * oscilatorsAndIndicators[0]
# if self.newHighNewLowVal * oscilatorsAndIndicators[0] > 0:
# print " (+) new high - new low index\n"
# elif self.newHighNewLowVal * oscilatorsAndIndicators[0] < 0:
# print " (-) new high - new low index\n"
#
# overallScore += self.bollignerVal * oscilatorsAndIndicators[1]
# if self.bollignerVal * oscilatorsAndIndicators > 0:
# print " (+) bolligner bounds\n"
# elif self.bollignerVal * oscilatorsAndIndicators < 0:
# print " (-) bolligner bounds\n"
#
# overallScore += self.momentumVal * oscilatorsAndIndicators[2]
# if self.momentumVal * oscilatorsAndIndicators > 0:
# print " (+) momentum oscillator\n"
# elif self.momentumVal * oscilatorsAndIndicators < 0:
# print " (-) momentum oscillator\n"
#
# overallScore += self.rocVal * oscilatorsAndIndicators[3]
# if self.rocVal * oscilatorsAndIndicators[3] > 0:
# print " (+) roc oscillator\n"
# elif self.rocVal * oscilatorsAndIndicators[3] < 0:
# print " (-) roc oscillator\n"
#
# overallScore += self.cciVal * oscilatorsAndIndicators[4]
# if self.cciVal * oscilatorsAndIndicators[4] > 0:
# print " (+) cci oscillator\n"
# elif self.cciVal * oscilatorsAndIndicators[4] < 0:
# print " (-) cci oscillator\n"
#
# overallScore += self.rsiVal * oscilatorsAndIndicators[5]
# if self.rsiVal * oscilatorsAndIndicators[5] > 0:
# print " (+) rsi oscillator\n"
# elif self.rsiVal * oscilatorsAndIndicators[5] < 0:
# print " (-) rsi oscillator\n"
#
# overallScore += self.williamsVal * oscilatorsAndIndicators[6]
# if self.williamsVal * oscilatorsAndIndicators[6] > 0:
# print " (+) williams oscillator\n"
# elif self.williamsVal * oscilatorsAndIndicators[6] < 0:
# print " (-) williams oscillator\n"
print "\n Overall score: ",overallScore, "\n"
resultText = resultText + "\n Overall score: "+str(overallScore)+ "\n\n"
if overallScore > self.positiveSignal:
print "The technical analysis has generated a positive signal, however a fundamental analysis should also be considered\n"
resultText = resultText + "The technical analysis has generated a positive signal, however a fundamental analysis should also be considered\n"
elif overallScore < self.negativeSignal:
print "The technical analysis has generated a negative signal. If you own actives, you should consider selling them. However, a fundamental analysis should also be taken into account\n"
resultText = resultText + "The technical analysis has generated a negative signal. If you own actives, you should consider selling them. However, a fundamental analysis should also be taken into account\n"
else:
print "The technical analysis has generated a neutral signal\n"
resultText = resultText + "The technical analysis has generated a neutral signal\n"
print "\n\nNO RESPONSIBILITY is taken by the authors of this software, for the accuracy of any predictions or the loss of any finance by anyone using this program. You may use this software at your own risk.\n"
resultText = resultText + "\n\nNO RESPONSIBILITY is taken by the authors of this software, for the accuracy of any predictions or the loss of any finance by anyone using this program. You may use this software at your own risk.\n"
return resultText
|
424859
|
from parade.cmdline import execute
class TestCmdline(object):
def test_no_cmd(self):
assert execute() == 0
|
424883
|
import torch
import torchtestcase
import unittest
from survae.tests.nn import ModuleTest
from survae.nn.layers import GELU, Swish, ConcatReLU, ConcatELU, GatedTanhUnit
class GELUTest(ModuleTest):
def test_layer_is_well_behaved(self):
batch_size = 10
shape = (6,)
x = torch.randn(batch_size, *shape)
module = GELU()
self.assert_layer_is_well_behaved(module, x)
class SwishTest(ModuleTest):
def test_layer_is_well_behaved(self):
batch_size = 10
shape = (6,)
x = torch.randn(batch_size, *shape)
module = Swish()
self.assert_layer_is_well_behaved(module, x)
class ConcatReLUTest(ModuleTest):
def test_layer_is_well_behaved(self):
batch_size = 10
shape = (6,)
x = torch.randn(batch_size, *shape)
module = ConcatReLU()
self.assert_layer_is_well_behaved(module, x)
y = module(x)
expected_shape = (batch_size, 12)
self.assertEqual(y.shape, expected_shape)
class ConcatELUTest(ModuleTest):
def test_layer_is_well_behaved(self):
batch_size = 10
shape = (6,)
x = torch.randn(batch_size, *shape)
module = ConcatELU()
self.assert_layer_is_well_behaved(module, x)
y = module(x)
expected_shape = (batch_size, 12)
self.assertEqual(y.shape, expected_shape)
class GatedTanhUnitTest(ModuleTest):
def test_layer_is_well_behaved(self):
batch_size = 10
shape = (6,)
x = torch.randn(batch_size, *shape)
module = GatedTanhUnit()
self.assert_layer_is_well_behaved(module, x)
y = module(x)
expected_shape = (batch_size, 3)
self.assertEqual(y.shape, expected_shape)
if __name__ == '__main__':
unittest.main()
|
424929
|
import torch.nn as nn
import torch
from relogic.logickit.base.utils import log
from typing import Tuple
from relogic.logickit.modules.input_variational_dropout import InputVariationalDropout
from relogic.logickit.modules.bilinear_matrix_attention import BilinearMatrixAttention
import copy
import numpy
from relogic.logickit.utils.utils import get_range_vector, get_device_of, masked_log_softmax
from relogic.logickit.modules.chi_liu_edmonds import decode_mst
import torch.nn.functional as F
class BiaffineDepModule(nn.Module):
def __init__(self, config, task_name, n_classes):
super(BiaffineDepModule, self).__init__()
self.config = config
self.task_name = task_name
self.n_classes = n_classes
if hasattr(self.config, "sequence_labeling_use_cls") and self.config.sequence_labeling_use_cls:
self.mul = 2
log("Use CLS in dependency parsing")
else:
self.mul = 1
encoder_dim = config.hidden_size
arc_representation_dim = tag_representation_dim = config.dep_parsing_mlp_dim
# self.pos_tag_embedding = nn.Embedding()
self.head_sentinel = torch.nn.Parameter(torch.randn([1, 1, config.hidden_size]))
# TODO: Need to check the dropout attribute.
# TODO: How to design task specific parameter configuration
self.dropout = InputVariationalDropout(config.dropout)
self.head_arc_feedforward = nn.Sequential(
nn.Linear(encoder_dim, arc_representation_dim),
nn.ELU())
self.child_arc_feedforward = copy.deepcopy(self.head_arc_feedforward)
self.head_tag_feedforward = nn.Sequential(
nn.Linear(encoder_dim, tag_representation_dim),
nn.ELU())
self.child_tag_feedforward = copy.deepcopy(self.head_tag_feedforward)
self.arc_attention = BilinearMatrixAttention(
matrix_1_dim=arc_representation_dim,
matrix_2_dim=arc_representation_dim,
use_input_biases=True)
self.tag_bilinear = nn.modules.Bilinear(
tag_representation_dim, tag_representation_dim, self.n_classes)
def forward(self, *input, **kwargs):
features = kwargs.pop("features")
mask = (kwargs.pop("input_head") == 1).long()
head_indices = kwargs.pop("arcs_ids", None)
head_tags = kwargs.pop("label_ids", None)
batch_size = features.size(0)
encoding_dim = features.size(2)
head_sentinel = self.head_sentinel.expand(batch_size, 1, encoding_dim)
encoded_text = torch.cat([head_sentinel, features], dim=1)
mask = torch.cat([mask.new_ones(batch_size, 1), mask], dim=1)
float_mask = mask.float()
if head_indices is not None:
head_indices = torch.cat([head_indices.new_zeros(batch_size, 1), head_indices], 1)
if head_tags is not None:
head_tags = torch.cat([head_tags.new_zeros(batch_size, 1), head_tags], 1)
encoded_text = self.dropout(encoded_text)
head_arc_representation = self.dropout(self.head_arc_feedforward(encoded_text))
child_arc_representation = self.dropout(self.child_arc_feedforward(encoded_text))
head_tag_representation = self.dropout(self.head_tag_feedforward(encoded_text))
child_tag_representation = self.dropout(self.child_tag_feedforward(encoded_text))
attended_arcs = self.arc_attention(head_arc_representation, child_arc_representation)
# shape (batch_size, sequence_length, sequence_length)
minus_inf = -1e8
minus_mask = (1 - float_mask) * minus_inf
attended_arcs = attended_arcs + minus_mask.unsqueeze(2) + minus_mask.unsqueeze(1)
if self.training:
predicted_heads, predicted_head_tags = self.greedy_decode(
head_tag_representation, child_tag_representation, attended_arcs, mask)
else:
predicted_heads, predicted_head_tags = self.mst_decode(
head_tag_representation, child_tag_representation, attended_arcs, mask
)
if head_indices is not None and head_tags is not None:
arc_nll, tag_nll = self.construct_loss(
head_tag_representation=head_tag_representation,
child_tag_representation=child_tag_representation,
attended_arcs=attended_arcs,
head_indices=head_indices,
head_tags=head_tags,
mask=mask,
)
else:
arc_nll, tag_nll = self.construct_loss(
head_tag_representation=head_tag_representation,
child_tag_representation=child_tag_representation,
attended_arcs=attended_arcs,
head_indices=predicted_heads.long(),
head_tags=predicted_head_tags.long(),
mask=mask,
)
loss = arc_nll + tag_nll
# if head_indices is not None and head_tags is not None:
# evaluation_mask = self._get_mask_for_eval(mask[:, 1:], pos_tags)
# # We calculate attatchment scores for the whole sentence
# # but excluding the symbolic ROOT token at the start,
# # which is why we start from the second element in the sequence.
# self._attachment_scores(
# predicted_heads[:, 1:],
# predicted_head_tags[:, 1:],
# head_indices,
# head_tags,
# evaluation_mask,
# )
output_dict = {
"heads": predicted_heads,
"head_tags": predicted_head_tags,
"arc_loss": arc_nll,
"tag_loss": tag_nll,
"loss": loss,
"mask": mask}
return output_dict
def construct_loss(
self,
head_tag_representation: torch.Tensor,
child_tag_representation: torch.Tensor,
attended_arcs: torch.Tensor,
head_indices: torch.Tensor,
head_tags: torch.Tensor,
mask: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Computes the arc and tag loss for a sequence given gold head indices and tags.
Parameters
----------
head_tag_representation : ``torch.Tensor``, required.
A tensor of shape (batch_size, sequence_length, tag_representation_dim),
which will be used to generate predictions for the dependency tags
for the given arcs.
child_tag_representation : ``torch.Tensor``, required
A tensor of shape (batch_size, sequence_length, tag_representation_dim),
which will be used to generate predictions for the dependency tags
for the given arcs.
attended_arcs : ``torch.Tensor``, required.
A tensor of shape (batch_size, sequence_length, sequence_length) used to generate
a distribution over attachments of a given word to all other words.
head_indices : ``torch.Tensor``, required.
A tensor of shape (batch_size, sequence_length).
The indices of the heads for every word.
head_tags : ``torch.Tensor``, required.
A tensor of shape (batch_size, sequence_length).
The dependency labels of the heads for every word.
mask : ``torch.Tensor``, required.
A mask of shape (batch_size, sequence_length), denoting unpadded
elements in the sequence.
Returns
-------
arc_nll : ``torch.Tensor``, required.
The negative log likelihood from the arc loss.
tag_nll : ``torch.Tensor``, required.
The negative log likelihood from the arc tag loss.
"""
float_mask = mask.float()
batch_size, sequence_length, _ = attended_arcs.size()
# shape (batch_size, 1)
range_vector = get_range_vector(batch_size, get_device_of(attended_arcs)).unsqueeze(1)
# shape (batch_size, sequence_length, sequence_length)
normalised_arc_logits = (
masked_log_softmax(attended_arcs, mask)
* float_mask.unsqueeze(2)
* float_mask.unsqueeze(1)
)
# shape (batch_size, sequence_length, num_head_tags)
head_tag_logits = self.get_head_tags(
head_tag_representation, child_tag_representation, head_indices
)
normalised_head_tag_logits = masked_log_softmax(
head_tag_logits, mask.unsqueeze(-1)
) * float_mask.unsqueeze(-1)
# index matrix with shape (batch, sequence_length)
timestep_index = get_range_vector(sequence_length, get_device_of(attended_arcs))
child_index = (
timestep_index.view(1, sequence_length).expand(batch_size, sequence_length).long()
)
# shape (batch_size, sequence_length)
arc_loss = normalised_arc_logits[range_vector, child_index, head_indices]
tag_loss = normalised_head_tag_logits[range_vector, child_index, head_tags]
# We don't care about predictions for the symbolic ROOT token's head,
# so we remove it from the loss.
arc_loss = arc_loss[:, 1:]
tag_loss = tag_loss[:, 1:]
# The number of valid positions is equal to the number of unmasked elements minus
# 1 per sequence in the batch, to account for the symbolic HEAD token.
valid_positions = mask.sum() - batch_size
arc_nll = -arc_loss.sum() / valid_positions.float()
tag_nll = -tag_loss.sum() / valid_positions.float()
return arc_nll, tag_nll
def greedy_decode(
self,
head_tag_representation: torch.Tensor,
child_tag_representation: torch.Tensor,
attended_arcs: torch.Tensor,
mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Decodes the head and head tag predictions by decoding the unlabeled arcs
independently for each word and then again, predicting the head tags of
these greedily chosen arcs independently. Note that this method of decoding
is not guaranteed to produce trees (i.e. there maybe be multiple roots,
or cycles when children are attached to their parents).
Parameters
----------
head_tag_representation : ``torch.Tensor``, required.
A tensor of shape (batch_size, sequence_length, tag_representation_dim),
which will be used to generate predictions for the dependency tags
for the given arcs.
child_tag_representation : ``torch.Tensor``, required
A tensor of shape (batch_size, sequence_length, tag_representation_dim),
which will be used to generate predictions for the dependency tags
for the given arcs.
attended_arcs : ``torch.Tensor``, required.
A tensor of shape (batch_size, sequence_length, sequence_length) used to generate
a distribution over attachments of a given word to all other words.
Returns
-------
heads : ``torch.Tensor``
A tensor of shape (batch_size, sequence_length) representing the
greedily decoded heads of each word.
head_tags : ``torch.Tensor``
A tensor of shape (batch_size, sequence_length) representing the
dependency tags of the greedily decoded heads of each word.
"""
attended_arcs = attended_arcs + torch.diag(
attended_arcs.new(mask.size(1)).fill_(-numpy.inf))
# Compute the heads greedily.
# shape (batch_size, sequence_length)
_, heads = attended_arcs.max(dim=2)
# Given the greedily predicted heads, decode their dependency tags.
# shape (batch_size, sequence_length, num_head_tags)
head_tag_logits = self.get_head_tags(
head_tag_representation, child_tag_representation, heads
)
_, head_tags = head_tag_logits.max(dim=2)
return heads, head_tags
def get_head_tags(
self,
head_tag_representation: torch.Tensor,
child_tag_representation: torch.Tensor,
head_indices: torch.Tensor) -> torch.Tensor:
"""
Decodes the head tags given the head and child tag representations
and a tensor of head indices to compute tags for. Note that these are
either gold or predicted heads, depending on whether this function is
being called to compute the loss, or if it's being called during inference.
Parameters
----------
head_tag_representation : ``torch.Tensor``, required.
A tensor of shape (batch_size, sequence_length, tag_representation_dim),
which will be used to generate predictions for the dependency tags
for the given arcs.
child_tag_representation : ``torch.Tensor``, required
A tensor of shape (batch_size, sequence_length, tag_representation_dim),
which will be used to generate predictions for the dependency tags
for the given arcs.
head_indices : ``torch.Tensor``, required.
A tensor of shape (batch_size, sequence_length). The indices of the heads
for every word.
Returns
-------
head_tag_logits : ``torch.Tensor``
A tensor of shape (batch_size, sequence_length, num_head_tags),
representing logits for predicting a distribution over tags
for each arc.
"""
batch_size = head_tag_representation.size(0)
# shape (batch_size,)
range_vector = get_range_vector(
batch_size, get_device_of(head_tag_representation)
).unsqueeze(1)
# This next statement is quite a complex piece of indexing, which you really
# need to read the docs to understand. See here:
# https://docs.scipy.org/doc/numpy-1.13.0/reference/arrays.indexing.html#advanced-indexing
# In effect, we are selecting the indices corresponding to the heads of each word from the
# sequence length dimension for each element in the batch.
# shape (batch_size, sequence_length, tag_representation_dim)
selected_head_tag_representations = head_tag_representation[range_vector, head_indices]
selected_head_tag_representations = selected_head_tag_representations.contiguous()
# shape (batch_size, sequence_length, num_head_tags)
head_tag_logits = self.tag_bilinear(
selected_head_tag_representations, child_tag_representation
)
return head_tag_logits
def mst_decode(
self,
head_tag_representation: torch.Tensor,
child_tag_representation: torch.Tensor,
attended_arcs: torch.Tensor,
mask: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Decodes the head and head tag predictions using the Edmonds' Algorithm
for finding minimum spanning trees on directed graphs. Nodes in the
graph are the words in the sentence, and between each pair of nodes,
there is an edge in each direction, where the weight of the edge corresponds
to the most likely dependency label probability for that arc. The MST is
then generated from this directed graph.
Parameters
----------
head_tag_representation : ``torch.Tensor``, required.
A tensor of shape (batch_size, sequence_length, tag_representation_dim),
which will be used to generate predictions for the dependency tags
for the given arcs.
child_tag_representation : ``torch.Tensor``, required
A tensor of shape (batch_size, sequence_length, tag_representation_dim),
which will be used to generate predictions for the dependency tags
for the given arcs.
attended_arcs : ``torch.Tensor``, required.
A tensor of shape (batch_size, sequence_length, sequence_length) used to generate
a distribution over attachments of a given word to all other words.
Returns
-------
heads : ``torch.Tensor``
A tensor of shape (batch_size, sequence_length) representing the
greedily decoded heads of each word.
head_tags : ``torch.Tensor``
A tensor of shape (batch_size, sequence_length) representing the
dependency tags of the optimally decoded heads of each word.
"""
batch_size, sequence_length, tag_representation_dim = head_tag_representation.size()
lengths = mask.data.sum(dim=1).long().cpu().numpy()
expanded_shape = [batch_size, sequence_length, sequence_length, tag_representation_dim]
head_tag_representation = head_tag_representation.unsqueeze(2)
head_tag_representation = head_tag_representation.expand(*expanded_shape).contiguous()
child_tag_representation = child_tag_representation.unsqueeze(1)
child_tag_representation = child_tag_representation.expand(*expanded_shape).contiguous()
# Shape (batch_size, sequence_length, sequence_length, num_head_tags)
pairwise_head_logits = self.tag_bilinear(head_tag_representation, child_tag_representation)
# Note that this log_softmax is over the tag dimension, and we don't consider pairs
# of tags which are invalid (e.g are a pair which includes a padded element) anyway below.
# Shape (batch, num_labels,sequence_length, sequence_length)
normalized_pairwise_head_logits = F.log_softmax(pairwise_head_logits, dim=3).permute(
0, 3, 1, 2
)
# Mask padded tokens, because we only want to consider actual words as heads.
minus_inf = -1e8
minus_mask = (1 - mask.float()) * minus_inf
attended_arcs = attended_arcs + minus_mask.unsqueeze(2) + minus_mask.unsqueeze(1)
# Shape (batch_size, sequence_length, sequence_length)
normalized_arc_logits = F.log_softmax(attended_arcs, dim=2).transpose(1, 2)
# Shape (batch_size, num_head_tags, sequence_length, sequence_length)
# This energy tensor expresses the following relation:
# energy[i,j] = "Score that i is the head of j". In this
# case, we have heads pointing to their children.
batch_energy = torch.exp(
normalized_arc_logits.unsqueeze(1) + normalized_pairwise_head_logits
)
return self._run_mst_decoding(batch_energy, lengths)
@staticmethod
def _run_mst_decoding(
batch_energy: torch.Tensor, lengths: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
heads = []
head_tags = []
for energy, length in zip(batch_energy.detach().cpu(), lengths):
scores, tag_ids = energy.max(dim=0)
# Although we need to include the root node so that the MST includes it,
# we do not want any word to be the parent of the root node.
# Here, we enforce this by setting the scores for all word -> ROOT edges
# edges to be 0.
scores[0, :] = 0
# Decode the heads. Because we modify the scores to prevent
# adding in word -> ROOT edges, we need to find the labels ourselves.
instance_heads, _ = decode_mst(scores.numpy(), length, has_labels=False)
# Find the labels which correspond to the edges in the max spanning tree.
instance_head_tags = []
for child, parent in enumerate(instance_heads):
instance_head_tags.append(tag_ids[parent, child].item())
# We don't care what the head or tag is for the root token, but by default it's
# not necesarily the same in the batched vs unbatched case, which is annoying.
# Here we'll just set them to zero.
instance_heads[0] = 0
instance_head_tags[0] = 0
heads.append(instance_heads)
head_tags.append(instance_head_tags)
return torch.from_numpy(numpy.stack(heads)), torch.from_numpy(numpy.stack(head_tags))
def _get_mask_for_eval(
self, mask: torch.LongTensor, pos_tags: torch.LongTensor
) -> torch.LongTensor:
"""
Dependency evaluation excludes words are punctuation.
Here, we create a new mask to exclude word indices which
have a "punctuation-like" part of speech tag.
Parameters
----------
mask : ``torch.LongTensor``, required.
The original mask.
pos_tags : ``torch.LongTensor``, required.
The pos tags for the sequence.
Returns
-------
A new mask, where any indices equal to labels
we should be ignoring are masked.
"""
new_mask = mask.detach()
for label in self._pos_to_ignore:
label_mask = pos_tags.eq(label).long()
new_mask = new_mask * (1 - label_mask)
return new_mask
|
424944
|
import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
from System.Collections.Generic import *
clr.AddReference("RevitNodes")
import Revit
clr.ImportExtensions(Revit.GeometryConversion)
clr.AddReference("RevitServices")
import RevitServices
from RevitServices.Persistence import DocumentManager
from RevitServices.Transactions import TransactionManager
doc = DocumentManager.Instance.CurrentDBDocument
nestedcurves = IN[0]
revision = UnwrapElement(IN[1])
view = UnwrapElement(IN[2])
elementlist = list()
TransactionManager.Instance.EnsureInTransaction(doc)
for curves in nestedcurves:
Curvelist = list()
for curve in curves:
Curvelist.append(curve.ToRevitType())
icurves = List[Curve](Curvelist)
revcloud = RevisionCloud.Create(doc, view, revision.Id, icurves);
elementlist.append(revcloud)
TransactionManager.Instance.TransactionTaskDone()
OUT = elementlist
|
424971
|
from django.conf.urls import include, url
from django.views.generic import RedirectView
from django.contrib import admin
admin.autodiscover()
urlpatterns = [
url(r'^testapp/', include('testmain.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^progressbarupload/?', include('progressbarupload.urls')),
url(r'^$', RedirectView.as_view(pattern_name='test_form'))
]
|
424991
|
import numpy as np
import pytest
import tensorflow as tf
from tfsnippet.ops import pixelcnn_2d_sample, convert_to_tensor_and_cast
class PixelCNN2DSampleTestCase(tf.test.TestCase):
def test_pixelcnn_2d_sample(self):
height, width = 31, 32
self.assertLess(height * width, 10000)
def make_x(channels_last=True):
x = tf.range(height * width, dtype=tf.int32)
if channels_last:
x = tf.reshape(x, [1, height, width, 1])
else:
x = tf.reshape(x, [1, 1, height, width])
return x
with self.test_session() as sess:
# test static args
def f(i, inputs):
offset = convert_to_tensor_and_cast((i + 1) * 10000,
dtype=tf.int32)
return [offset + make_x()]
x = make_x()
ans = sess.run(x + (x + 1) * 10000)
y = pixelcnn_2d_sample(f, [x], height, width, channels_last=True)[0]
np.testing.assert_equal(sess.run(y), ans)
# test dynamic args
def f(i, inputs):
offset = convert_to_tensor_and_cast((i + 1) * 10000,
dtype=tf.int32)
return [offset + make_x(channels_last=False)]
x = make_x(channels_last=False)
mask = tf.reshape(
tf.concat([tf.ones([10], dtype=tf.int32),
tf.zeros([height * width - 110], dtype=tf.int32),
tf.ones([100], dtype=tf.int32)],
axis=0),
[1, 1, height, width]
)
ans = sess.run(x * mask + (x + (x + 1) * 10000) * (1 - mask))
height_t = tf.placeholder(dtype=tf.int32, shape=())
width_t = tf.placeholder(dtype=tf.int32, shape=())
y = pixelcnn_2d_sample(
f, [x], height_t, width_t,
start=tf.constant(10),
end=tf.constant(height * width - 100),
channels_last=False
)[0]
np.testing.assert_equal(
sess.run(y, feed_dict={height_t: height, width_t: width}),
ans
)
def test_errors(self):
height, width = 31, 32
def fn(i, inputs):
return inputs
with pytest.raises(ValueError, match='`inputs` must not be empty'):
_ = pixelcnn_2d_sample(fn, [], height, width)
with pytest.raises(ValueError,
match=r'The shape of `inputs\[1\]` is invalid'):
inputs = [tf.zeros([1, height, width, 1]),
tf.zeros([2, 1, height, width, 1])]
_ = pixelcnn_2d_sample(fn, inputs, height, width)
def fn(i, inputs):
return [inputs[0]]
with pytest.raises(ValueError,
match='The length of outputs != inputs: 1 vs 2'):
inputs = [tf.zeros([1, height, width, 1]),
tf.zeros([1, height, width, 1])]
_ = pixelcnn_2d_sample(fn, inputs, height, width)
def fn(i, inputs):
return [tf.cast(inputs[0], dtype=tf.float64)]
with pytest.raises(TypeError,
match=r'`outputs\[0\].dtype` != `inputs\[0\].dtype`'
r': .* vs .*'):
inputs = [tf.zeros([1, height, width, 1], dtype=tf.float32)]
_ = pixelcnn_2d_sample(fn, inputs, height, width)
|
424997
|
import sys
import reedsolo
reedsolo.init_tables(0x11d)
qr_bytes = '''
01000001
10010110
11010110
00010110
01110111
00000110
10010110
01010111
10110110
????????
????????
????????
????????
????????
????????
????????
????????
01000011
????????
00010101
????????
10111110
????????
01010001
????????
11111000
01011000
????????
????????
????????
10100110
????????
????????
00001101
????1011
'''.split()
qr_bytes_2 = '''
????????
????????
????????
????????
????????
????????
????????
????????
00100111
11010000
11101100
00010001
11101100
00010001
11101100
00010001
11101100
0000????
????????
????????
????????
01101000
????????
01100000
????????
10011011
10100000
????????
10110100
00101000
????????
????????
01111111
????????
01001110
'''.split()
part1 = bytearray()
erasures = []
for i, bits in enumerate(qr_bytes):
if '?' in bits:
erasures.append(i)
part1.append(0)
else:
part1.append(int(bits, 2))
mes, ecc = reedsolo.rs_correct_msg(part1, 18, erase_pos=erasures)
for c in mes:
print('{:08b}'.format(c))
part2 = bytearray()
erasures2 = []
for j, bits2 in enumerate(qr_bytes_2):
if '?' in bits2:
erasures2.append(j)
part2.append(0)
else:
part2.append(int(bits2, 2))
mes2, ecc2 = reedsolo.rs_correct_msg(part2, 18, erase_pos=erasures2)
for c2 in mes2:
print('{:08b}'.format(c2))
|
425005
|
from mc import *
mc = Minecraft()
playerPos = mc.player.getTilePos()
mc.player.setPos(playerPos.x, mc.getHeight(playerPos.x, playerPos.z)+1, playerPos.z)
|
425014
|
import psycopg2
def test_pg_server(pg_server):
with psycopg2.connect(**pg_server['params']) as conn:
with conn.cursor() as cursor:
cursor.execute('SELECT version();')
|
425137
|
class Solution:
"""
@param nums: a list of integers
@param m: an integer
@return: return a integer
"""
def splitArray(self, nums, m):
# write your code here
start, end = max(nums), sum(nums)
def doable(target):
count = 1
sum = 0
for num in nums:
if sum + num > target:
count += 1
if count > m:
return False
sum = num
else:
sum += num
return True
while start < end:
mid = (start + end) // 2
if doable(mid):
end = mid
else:
start = mid + 1
return start
|
425207
|
s="""
.a.fy
int x_a = 10
.b.fy
import:
.aa(*)
"""
from fython.test import *
shell('rm -rf a/ a.* b.* c.*')
writer(s)
# w = load('.b', force=1, release=1, verbose=0, run_main=0)
# print(open(w.module.url.fortran_path, 'r').read())
|
425214
|
data = (
((-0.010000, -0.090000), (-0.010000, -0.040000)),
((0.589999, -0.040000), (0.730000, -0.040000)),
((0.990000, -0.980000), (0.990000, -0.840000)),
((0.630000, -0.490000), (0.630000, -0.480000)),
((-0.300000, 0.160000), (-0.250000, 0.160000)),
((0.440000, -0.190000), (0.440000, -0.240000)),
((0.150000, -0.630000), (0.150000, -0.680000)),
((0.429999, -0.540000), (0.290000, -0.540000)),
((-0.690001, 0.200000), (-0.750000, 0.200000)),
((0.929999, -0.580000), (0.929999, -0.490000)),
((-0.900001, -0.440000), (-0.950001, -0.440000)),
((-0.840000, 0.840000), (-0.840000, 0.800000)),
((0.679999, 0.150000), (0.679999, 0.200000)),
((-0.250000, 0.540000), (-0.350000, 0.540000)),
((0.290000, 0.360000), (0.290000, 0.440000)),
((-0.840000, 0.000000), (-0.840000, -0.050000)),
((0.530000, 0.550000), (0.589999, 0.550000)),
((-0.010000, 0.740000), (-0.050000, 0.740000)),
((-0.100000, 0.010000), (-0.100000, -0.040000)),
((-0.590000, -0.430000), (-0.540000, -0.430000)),
((0.429999, 0.010000), (0.530000, 0.010000)),
((0.139999, 0.690000), (0.139999, 0.750000)),
((-0.700001, 0.650000), (-0.700001, 0.700000)),
((-0.590000, -0.730000), (-0.500000, -0.730000)),
((-0.500000, -0.930000), (-0.440001, -0.930000)),
((-0.110001, -0.690000), (-0.110001, -0.590000)),
((0.000000, 0.310000), (0.000000, 0.300000)),
((0.440000, 0.750000), (0.490000, 0.750000)),
((-0.550000, -0.690000), (-0.550000, -0.640000)),
((0.880000, 0.490000), (0.830000, 0.490000)),
((0.730000, 0.490000), (0.730000, 0.500000)),
((-0.390000, -0.980000), (-0.010000, -0.980000)),
((-0.260000, -0.190000), (-0.260000, -0.140000)),
((0.349999, -0.680000), (0.480000, -0.680000)),
((0.250000, 0.250000), (0.250000, 0.060000)),
((-0.360001, 0.850000), (-0.310000, 0.850000)),
((1.000000, 1.000000), (1.000000, -0.990000)),
((0.040000, 0.050000), (-0.010000, 0.050000)),
((0.740000, -0.980000), (0.990000, -0.980000)),
((-0.250000, 0.160000), (-0.250000, 0.050000)),
((-0.210000, 0.390000), (-0.210000, 0.440000)),
((0.540000, -0.440000), (0.530000, -0.440000)),
((0.290000, -0.540000), (0.290000, -0.480000)),
((-0.890000, 0.350000), (-0.890000, 0.210000)),
((0.929999, -0.490000), (0.889999, -0.490000)),
((-0.950001, -0.540000), (-0.950001, -0.530000)),
((0.190000, -0.200000), (0.150000, -0.200000)),
((0.040000, 0.940000), (-0.050000, 0.940000)),
((-0.740001, 0.690000), (-0.790000, 0.690000)),
((0.679999, 0.200000), (0.589999, 0.200000)),
((-0.400001, 0.490000), (-0.400001, 0.500000)),
((0.040000, -0.690000), (0.000000, -0.690000)),
((-0.690001, 0.100000), (-0.740001, 0.100000)),
((0.490000, 0.440000), (0.490000, 0.390000)),
((-0.010000, 0.500000), (-0.010000, 0.740000)),
((-0.110001, 0.010000), (-0.100000, 0.010000)),
((-0.740001, -0.300000), (-0.740001, -0.380000)),
((0.429999, 0.000000), (0.429999, 0.010000)),
((0.139999, 0.750000), (0.150000, 0.750000)),
((-0.500000, -0.730000), (-0.500000, -0.680000)),
((-0.440001, -0.930000), (-0.440001, -0.940000)),
((0.089999, -0.790000), (-0.010000, -0.790000)),
((0.000000, 0.300000), (-0.160001, 0.300000)),
((0.250000, 0.940000), (0.250000, 0.850000)),
((-0.550000, -0.640000), (-0.700001, -0.640000)),
((0.880000, 0.390000), (0.730000, 0.390000)),
((0.830000, 0.440000), (0.830000, 0.450000)),
((-0.300000, -0.790000), (-0.300000, -0.840000)),
((-0.310000, -0.190000), (-0.260000, -0.190000)),
((0.540000, -0.840000), (0.540000, -0.890000)),
((0.380000, 0.060000), (0.490000, 0.060000)),
((-0.360001, 0.840000), (-0.360001, 0.850000)),
((-0.060000, -0.100000), (-0.060000, -0.090000)),
((0.580000, -0.050000), (0.530000, -0.050000)),
((-0.250000, 0.050000), (-0.260000, 0.050000)),
((0.339999, -0.240000), (0.339999, -0.190000)),
((-0.800000, 0.350000), (-0.890000, 0.350000)),
((-0.950001, -0.430000), (-0.900001, -0.430000)),
((-0.160001, -0.690000), (-0.210000, -0.690000)),
((0.730000, 0.690000), (0.730000, 0.840000)),
((-0.800000, 0.590000), (-0.800000, 0.640000)),
((0.589999, 0.200000), (0.589999, 0.100000)),
((-0.350000, 0.490000), (-0.400001, 0.490000)),
((0.040000, -0.740000), (0.040000, -0.690000)),
((-0.850000, -0.050000), (-0.850000, 0.000000)),
((0.540000, 0.440000), (0.490000, 0.440000)),
((-0.060000, 0.500000), (-0.010000, 0.500000)),
((-0.050000, 0.000000), (-0.050000, -0.050000)),
((-0.740001, -0.380000), (-0.590000, -0.380000)),
((-0.590000, -0.350000), (-0.600000, -0.350000)),
((0.349999, -0.090000), (0.389999, -0.090000)),
((0.150000, 0.750000), (0.150000, 0.700000)),
((-0.750000, -0.680000), (-0.590000, -0.680000)),
((-0.790000, -0.980000), (-0.500000, -0.980000)),
((-0.200001, -0.590000), (-0.200001, -0.640000)),
((-0.150001, 0.390000), (-0.150001, 0.310000)),
((-0.840000, -0.530000), (-0.840000, -0.580000)),
((0.830000, 0.540000), (0.730000, 0.540000)),
((0.889999, 0.640000), (0.889999, 0.550000)),
((-0.940001, 0.790000), (-0.940001, 0.750000)),
((1.000000, 1.000000), (-1.000000, 1.000000)),
((0.740000, -0.840000), (0.540000, -0.840000)),
((0.490000, 0.060000), (0.490000, 0.050000)),
((-0.440001, 0.900000), (-0.390000, 0.900000)),
((-0.010000, 0.110000), (0.000000, 0.110000)),
((0.540000, 0.050000), (0.540000, 0.000000)),
((0.740000, 0.060000), (0.740000, -0.040000)),
((0.630000, -0.930000), (0.740000, -0.930000)),
((0.679999, -0.490000), (0.630000, -0.490000)),
((0.240000, -0.240000), (0.339999, -0.240000)),
((0.150000, -0.680000), (0.190000, -0.680000)),
((0.349999, -0.480000), (0.349999, -0.490000)),
((-0.840000, 0.210000), (-0.840000, 0.200000)),
((0.889999, -0.540000), (0.880000, -0.540000)),
((-0.900001, -0.640000), (-0.900001, -0.540000)),
((0.139999, -0.290000), (0.150000, -0.290000)),
((-0.210000, -0.690000), (-0.210000, -0.680000)),
((0.099999, 0.650000), (0.200000, 0.650000)),
((0.730000, 0.840000), (0.639999, 0.840000)),
((-0.600000, 0.890000), (-0.690001, 0.890000)),
((0.740000, 0.150000), (0.679999, 0.150000)),
((-0.350000, 0.540000), (-0.350000, 0.490000)),
((-0.840000, -0.050000), (-0.850000, -0.050000)),
((0.380000, 0.390000), (0.380000, 0.440000)),
((-0.060000, 0.550000), (-0.060000, 0.600000)),
((0.040000, 0.150000), (-0.050000, 0.150000)),
((-0.590000, -0.380000), (-0.590000, -0.430000)),
((-0.590000, -0.300000), (-0.590000, -0.350000)),
((0.389999, 0.150000), (0.389999, 0.100000)),
((0.200000, 0.690000), (0.139999, 0.690000)),
((-0.750000, 0.650000), (-0.700001, 0.650000)),
((0.040000, -0.930000), (0.089999, -0.930000)),
((-0.700001, -0.590000), (-0.790000, -0.590000)),
((0.889999, 0.500000), (0.889999, 0.440000)),
((0.990000, 0.640000), (0.889999, 0.640000)),
((-0.250000, -0.190000), (-0.200001, -0.190000)),
((0.480000, -0.680000), (0.480000, -0.580000)),
((0.490000, 0.050000), (0.389999, 0.050000)),
((1.000000, -0.990000), (-1.000000, -0.990000)),
((-0.350000, 0.800000), (-0.350000, 0.700000)),
((-0.010000, 0.050000), (-0.010000, 0.110000)),
((0.740000, -0.040000), (0.790000, -0.040000)),
((0.839999, -0.690000), (0.839999, -0.730000)),
((-0.210000, 0.440000), (-0.250000, 0.440000)),
((0.240000, -0.380000), (0.240000, -0.240000)),
((-0.700001, 0.350000), (-0.750000, 0.350000)),
((-0.900001, -0.540000), (-0.950001, -0.540000)),
((0.150000, -0.290000), (0.150000, -0.480000)),
((-0.210000, -0.680000), (-0.160001, -0.680000)),
((0.099999, 0.790000), (0.099999, 0.650000)),
((0.639999, 0.840000), (0.639999, 0.740000)),
((-0.600000, 0.840000), (-0.600000, 0.890000)),
((0.690000, 0.300000), (0.690000, 0.160000)),
((-0.400001, 0.500000), (-0.360001, 0.500000)),
((0.049999, -0.680000), (0.049999, -0.740000)),
((-0.740001, 0.100000), (-0.740001, -0.050000)),
((0.639999, 0.490000), (0.540000, 0.490000)),
((-0.100000, 0.590000), (-0.110001, 0.590000)),
((-0.160001, 0.100000), (-0.160001, 0.150000)),
((-0.650001, -0.300000), (-0.740001, -0.300000)),
((0.290000, -0.100000), (0.240000, -0.100000)),
((0.190000, 0.700000), (0.190000, 0.840000)),
((-0.950001, -0.690000), (-0.950001, -0.580000)),
((-0.490001, 0.400000), (-0.490001, 0.360000)),
((-0.440001, -0.690000), (-0.490001, -0.690000)),
((-0.950001, -0.930000), (-0.940001, -0.930000)),
((0.089999, -0.930000), (0.089999, -0.790000)),
((-0.850000, -0.590000), (-0.850000, -0.530000)),
((0.889999, 0.440000), (0.830000, 0.440000)),
((0.940000, 0.550000), (0.940000, 0.390000)),
((-0.890000, 0.890000), (-0.890000, 0.790000)),
((0.690000, -0.830000), (0.740000, -0.830000)),
((0.440000, 0.310000), (0.440000, 0.260000)),
((-0.390000, 0.800000), (-0.350000, 0.800000)),
((-0.060000, -0.090000), (-0.010000, -0.090000)),
((0.730000, -0.040000), (0.730000, 0.060000)),
((0.630000, -0.680000), (0.679999, -0.680000)),
((-0.310000, 0.100000), (-0.310000, 0.110000)),
((-0.150001, 0.490000), (-0.150001, 0.440000)),
((0.339999, -0.190000), (0.440000, -0.190000)),
((-0.750000, 0.350000), (-0.750000, 0.360000)),
((0.889999, -0.480000), (0.940000, -0.480000)),
((-0.950001, -0.440000), (-0.950001, -0.430000)),
((0.589999, -0.100000), (0.490000, -0.100000)),
((-0.060000, -0.930000), (-0.060000, -0.740000)),
((0.740000, 0.690000), (0.730000, 0.690000)),
((-0.800000, 0.640000), (-0.840000, 0.640000)),
((0.580000, 0.150000), (0.480000, 0.150000)),
((-0.440001, 0.790000), (-0.440001, 0.740000)),
((-0.100000, -0.680000), (0.049999, -0.680000)),
((0.290000, 0.440000), (0.200000, 0.440000)),
((0.639999, 0.500000), (0.639999, 0.490000)),
((0.150000, 0.950000), (0.150000, 0.900000)),
((-0.110001, 0.400000), (-0.110001, 0.450000)),
((-0.060000, 0.100000), (-0.160001, 0.100000)),
((-0.550000, -0.390000), (-0.550000, -0.300000)),
((0.290000, -0.190000), (0.290000, -0.100000)),
((0.339999, 0.740000), (0.200000, 0.740000)),
((0.000000, -0.050000), (0.000000, -0.150000)),
((-0.800000, 0.550000), (-0.750000, 0.550000)),
((-0.490001, -0.690000), (-0.490001, -0.740000)),
((-0.940001, -0.930000), (-0.940001, -0.980000)),
((-0.110001, -0.590000), (-0.200001, -0.590000)),
((0.630000, 0.800000), (0.630000, 0.850000)),
((-0.850000, -0.530000), (-0.840000, -0.530000)),
((0.780000, 0.490000), (0.730000, 0.490000)),
((0.830000, 0.490000), (0.830000, 0.540000)),
((-0.890000, 0.790000), (-0.940001, 0.790000)),
((-0.060000, -0.190000), (0.000000, -0.190000)),
((0.490000, -0.580000), (0.490000, -0.680000)),
((0.250000, 0.060000), (0.299999, 0.060000)),
((-0.390000, 0.900000), (-0.390000, 0.800000)),
((0.540000, 0.000000), (0.490000, 0.000000)),
((0.740000, -0.930000), (0.740000, -0.980000)),
((0.679999, -0.680000), (0.679999, -0.490000)),
((-0.310000, 0.110000), (-0.260000, 0.110000)),
((0.290000, -0.480000), (0.349999, -0.480000)),
((-0.750000, 0.360000), (-0.690001, 0.360000)),
((0.889999, -0.490000), (0.889999, -0.540000)),
((0.889999, -0.430000), (0.889999, -0.480000)),
((0.240000, -0.480000), (0.240000, -0.430000)),
((-0.950001, -0.190000), (-0.850000, -0.190000)),
((0.589999, -0.050000), (0.589999, -0.100000)),
((0.150000, 0.800000), (0.150000, 0.790000)),
((-0.700001, 0.500000), (-0.700001, 0.600000)),
((0.480000, 0.150000), (0.480000, 0.200000)),
((-0.100000, -0.590000), (-0.100000, -0.680000)),
((0.200000, 0.440000), (0.200000, 0.390000)),
((-0.250000, 0.350000), (-0.250000, 0.210000)),
((0.589999, 0.500000), (0.639999, 0.500000)),
((-0.160001, 0.400000), (-0.110001, 0.400000)),
((-0.050000, 0.150000), (-0.050000, 0.010000)),
((0.389999, 0.100000), (0.349999, 0.100000)),
((0.200000, 0.740000), (0.200000, 0.690000)),
((-0.750000, 0.550000), (-0.750000, 0.650000)),
((-0.790000, -0.740000), (-0.840000, -0.740000)),
((-0.200001, -0.730000), (-0.200001, -0.880000)),
((-0.050000, 0.490000), (-0.050000, 0.440000)),
((-0.700001, -0.640000), (-0.700001, -0.590000)),
((0.780000, 0.400000), (0.780000, 0.490000)),
((0.740000, 0.550000), (0.780000, 0.550000)),
((-0.300000, -0.840000), (-0.390000, -0.840000)),
((0.000000, -0.190000), (0.000000, -0.250000)),
((0.480000, -0.580000), (0.490000, -0.580000)),
((0.380000, 0.300000), (0.380000, 0.310000)),
((-0.350000, 0.700000), (-0.300000, 0.700000)),
((-1.000000, -0.990000), (-1.000000, 1.000000)),
((-0.150001, -0.190000), (-0.110001, -0.190000)),
((0.940000, -0.690000), (0.839999, -0.690000)),
((-0.260000, 0.050000), (-0.260000, 0.100000)),
((0.630000, -0.390000), (0.540000, -0.390000)),
((0.530000, -0.640000), (0.530000, -0.540000)),
((-0.650001, 0.400000), (-0.650001, 0.440000)),
((0.780000, -0.590000), (0.730000, -0.590000)),
((-0.850000, -0.190000), (-0.850000, -0.140000)),
((0.780000, -0.050000), (0.589999, -0.050000)),
((0.630000, 0.790000), (0.540000, 0.790000)),
((-0.840000, 0.500000), (-0.700001, 0.500000)),
((0.740000, 0.300000), (0.690000, 0.300000)),
((0.049999, -0.740000), (0.040000, -0.740000)),
((0.200000, 0.390000), (0.089999, 0.390000)),
((-0.060000, 0.900000), (-0.060000, 0.950000)),
((-0.060000, 0.450000), (-0.060000, 0.500000)),
((-0.100000, -0.040000), (-0.060000, -0.040000)),
((0.150000, -0.090000), (0.150000, -0.190000)),
((0.150000, 0.700000), (0.190000, 0.700000)),
((-0.900001, -0.690000), (-0.950001, -0.690000)),
((-0.490001, 0.360000), (-0.350000, 0.360000)),
((-0.790000, -0.730000), (-0.790000, -0.740000)),
((0.190000, -0.840000), (0.099999, -0.840000)),
((-0.150001, 0.310000), (0.000000, 0.310000)),
((0.740000, 0.850000), (0.740000, 0.750000)),
((0.929999, -0.880000), (0.940000, -0.880000)),
((0.730000, 0.400000), (0.780000, 0.400000)),
((0.790000, 0.690000), (0.790000, 0.550000)),
((-0.390000, -0.380000), (-0.210000, -0.380000)),
((0.740000, -0.830000), (0.740000, -0.840000)),
((0.380000, 0.310000), (0.440000, 0.310000)),
((-0.310000, 0.850000), (-0.310000, 0.890000)),
((-0.150001, 0.050000), (-0.150001, -0.190000)),
((0.490000, -0.090000), (0.580000, -0.090000)),
((0.530000, -0.290000), (0.530000, -0.200000)),
((-0.150001, 0.440000), (-0.200001, 0.440000)),
((0.630000, -0.430000), (0.630000, -0.390000)),
((0.580000, -0.640000), (0.530000, -0.640000)),
((-0.890000, 0.210000), (-0.840000, 0.210000)),
((0.740000, -0.300000), (0.740000, -0.390000)),
((0.200000, -0.200000), (0.200000, -0.350000)),
((-0.850000, -0.140000), (-0.840000, -0.140000)),
((0.540000, 0.790000), (0.540000, 0.690000)),
((-0.840000, 0.640000), (-0.840000, 0.500000)),
((0.349999, 0.200000), (0.349999, 0.150000)),
((-0.440001, 0.740000), (-0.450001, 0.740000)),
((-0.790000, 0.060000), (-0.790000, 0.010000)),
((-0.650001, 0.160000), (-0.650001, 0.210000)),
((-0.200001, 0.210000), (-0.200001, 0.160000)),
((0.490000, 0.390000), (0.380000, 0.390000)),
((-0.060000, 0.950000), (0.150000, 0.950000)),
((-0.150001, 0.550000), (-0.060000, 0.550000)),
((0.000000, 0.010000), (0.000000, 0.000000)),
((-0.550000, -0.300000), (-0.590000, -0.300000)),
((0.349999, 0.050000), (0.339999, 0.050000)),
((0.099999, 0.540000), (0.099999, 0.500000)),
((-0.990001, -0.740000), (-0.990001, -0.830000)),
((0.000000, -0.150000), (-0.100000, -0.150000)),
((-0.360001, 0.300000), (-0.360001, 0.350000)),
((-0.840000, -0.730000), (-0.790000, -0.730000)),
((0.099999, -0.840000), (0.099999, -0.940000)),
((0.630000, 0.850000), (0.740000, 0.850000)),
((0.940000, -0.880000), (0.940000, -0.940000)),
((0.790000, 0.550000), (0.839999, 0.550000)),
((0.830000, 0.590000), (0.830000, 0.600000)),
((-0.310000, -0.790000), (-0.360001, -0.790000)),
((-0.390000, -0.340000), (-0.390000, -0.380000)),
((-0.010000, -0.250000), (-0.010000, -0.200000)),
((0.440000, -0.790000), (0.380000, -0.790000)),
((0.429999, 0.260000), (0.429999, 0.300000)),
((0.490000, 0.000000), (0.490000, -0.090000)),
((0.630000, -0.530000), (0.639999, -0.530000)),
((0.139999, -0.630000), (0.150000, -0.630000)),
((0.440000, -0.540000), (0.440000, -0.640000)),
((-0.950001, 0.350000), (-0.950001, 0.400000)),
((0.740000, -0.390000), (0.730000, -0.390000)),
((0.150000, -0.480000), (0.240000, -0.480000)),
((0.150000, 0.790000), (0.099999, 0.790000)),
((-0.700001, 0.600000), (-0.690001, 0.600000)),
((0.349999, 0.150000), (0.339999, 0.150000)),
((-0.150001, 0.250000), (-0.150001, 0.210000)),
((-0.250000, 0.210000), (-0.200001, 0.210000)),
((-0.150001, 0.640000), (-0.150001, 0.550000)),
((-0.050000, 0.010000), (0.000000, 0.010000)),
((-0.650001, -0.290000), (-0.650001, -0.240000)),
((0.349999, 0.100000), (0.349999, 0.050000)),
((-0.990001, -0.830000), (-0.900001, -0.830000)),
((-0.350000, 0.300000), (-0.360001, 0.300000)),
((-0.250000, -0.730000), (-0.200001, -0.730000)),
((0.889999, 0.550000), (0.940000, 0.550000)),
((0.740000, 0.600000), (0.740000, 0.550000)),
((-0.640000, 0.940000), (-0.740001, 0.940000)),
((-0.310000, -0.830000), (-0.310000, -0.790000)),
((-0.440001, -0.340000), (-0.390000, -0.340000)),
((0.389999, -0.890000), (0.389999, -0.940000)),
((0.429999, -0.050000), (0.380000, -0.050000)),
((-0.940001, 0.940000), (-0.990001, 0.940000)),
((-0.100000, 0.060000), (-0.100000, 0.050000)),
((0.730000, 0.060000), (0.740000, 0.060000)),
((-0.200001, 0.390000), (-0.210000, 0.390000)),
((0.299999, -0.380000), (0.299999, -0.390000)),
((0.139999, -0.790000), (0.139999, -0.630000)),
((-0.900001, 0.350000), (-0.950001, 0.350000)),
((0.830000, -0.440000), (0.830000, -0.300000)),
((-0.840000, -0.190000), (-0.790000, -0.190000)),
((0.690000, -0.140000), (0.690000, -0.190000)),
((0.049999, 0.800000), (0.049999, 0.600000)),
((-0.690001, 0.890000), (-0.690001, 0.840000)),
((0.339999, 0.150000), (0.339999, 0.200000)),
((-0.110001, 0.360000), (-0.060000, 0.360000)),
((0.099999, 0.450000), (0.099999, 0.400000)),
((-0.750000, 0.010000), (-0.750000, 0.110000)),
((-0.790000, 0.250000), (-0.790000, 0.160000)),
((-0.150001, 0.160000), (-0.150001, 0.110000)),
((-0.110001, 0.640000), (-0.150001, 0.640000)),
((-0.060000, -0.040000), (-0.060000, 0.100000)),
((-0.650001, -0.240000), (-0.640000, -0.240000)),
((-0.750000, -0.090000), (-0.700001, -0.090000)),
((0.150000, -0.190000), (0.290000, -0.190000)),
((-0.900001, -0.830000), (-0.900001, -0.690000)),
((-0.100000, -0.200000), (-0.150001, -0.200000)),
((-0.350000, 0.360000), (-0.350000, 0.300000)),
((-0.890000, -0.630000), (-0.840000, -0.630000)),
((0.040000, -0.940000), (0.040000, -0.930000)),
((0.389999, 0.800000), (0.440000, 0.800000)),
((-0.360001, -0.680000), (-0.360001, -0.630000)),
((-0.600000, -0.440000), (-0.600000, -0.390000)),
((0.830000, -0.940000), (0.830000, -0.880000)),
((0.839999, 0.500000), (0.889999, 0.500000)),
((0.880000, 0.600000), (0.880000, 0.650000)),
((-0.640000, 0.950000), (-0.640000, 0.940000)),
((-0.360001, -0.780000), (-0.250000, -0.780000)),
((-0.200001, -0.140000), (-0.200001, -0.150000)),
((0.389999, -0.940000), (0.380000, -0.940000)),
((0.380000, -0.050000), (0.380000, 0.000000)),
((-0.310000, 0.890000), (-0.360001, 0.890000)),
((-0.940001, 0.950000), (-0.940001, 0.940000)),
((0.480000, -0.290000), (0.530000, -0.290000)),
((-0.200001, 0.440000), (-0.200001, 0.390000)),
((0.250000, -0.380000), (0.299999, -0.380000)),
((-0.010000, -0.490000), (-0.050000, -0.490000)),
((-0.940001, 0.400000), (-0.940001, 0.360000)),
((0.730000, -0.300000), (0.690000, -0.300000)),
((0.679999, -0.140000), (0.690000, -0.140000)),
((0.480000, 0.200000), (0.349999, 0.200000)),
((-0.060000, 0.360000), (-0.060000, 0.400000)),
((0.099999, 0.400000), (0.190000, 0.400000)),
((-0.790000, 0.010000), (-0.750000, 0.010000)),
((-0.790000, 0.160000), (-0.650001, 0.160000)),
((-0.310000, 0.940000), (-0.310000, 0.950000)),
((-0.590000, 0.440000), (-0.590000, 0.300000)),
((0.089999, -0.040000), (0.290000, -0.040000)),
((-0.640000, -0.240000), (-0.640000, -0.350000)),
((-0.750000, -0.100000), (-0.750000, -0.090000)),
((0.150000, 0.540000), (0.099999, 0.540000)),
((-0.950001, -0.740000), (-0.990001, -0.740000)),
((-0.100000, -0.150000), (-0.100000, -0.200000)),
((-0.360001, 0.350000), (-0.440001, 0.350000)),
((-0.440001, -0.680000), (-0.440001, -0.690000)),
((-0.200001, -0.640000), (-0.250000, -0.640000)),
((-0.050000, 0.440000), (-0.100000, 0.440000)),
((0.830000, -0.880000), (0.880000, -0.880000)),
((0.830000, 0.600000), (0.880000, 0.600000)),
((0.730000, 0.540000), (0.730000, 0.600000)),
((-0.740001, 0.890000), (-0.890000, 0.890000)),
((-0.400001, -0.880000), (-0.400001, -0.830000)),
((-0.250000, -0.780000), (-0.250000, -0.790000)),
((0.000000, -0.250000), (-0.010000, -0.250000)),
((0.380000, -0.940000), (0.380000, -0.890000)),
((0.380000, -0.790000), (0.380000, -0.740000)),
((0.380000, 0.000000), (0.299999, 0.000000)),
((-0.110001, 0.940000), (-0.110001, 0.990000)),
((0.780000, 0.100000), (0.690000, 0.100000)),
((0.089999, 0.850000), (0.089999, 0.900000)),
((-0.260000, 0.100000), (-0.310000, 0.100000)),
((0.630000, -0.580000), (0.630000, -0.530000)),
((0.250000, -0.300000), (0.250000, -0.380000)),
((-0.950001, 0.210000), (-0.900001, 0.210000)),
((-0.650001, 0.440000), (-0.740001, 0.440000)),
((0.940000, -0.480000), (0.940000, -0.590000)),
((0.880000, -0.540000), (0.880000, -0.440000)),
((-0.890000, -0.940000), (-0.900001, -0.940000)),
((0.780000, -0.190000), (0.780000, -0.050000)),
((0.089999, 0.250000), (-0.150001, 0.250000)),
((0.929999, 0.160000), (0.929999, 0.300000)),
((-0.260000, -0.590000), (-0.260000, -0.580000)),
((0.089999, 0.390000), (0.089999, 0.450000)),
((-0.310000, 0.950000), (-0.150001, 0.950000)),
((-0.110001, 0.450000), (-0.060000, 0.450000)),
((-0.260000, -0.050000), (-0.260000, 0.000000)),
((-0.740001, -0.290000), (-0.650001, -0.290000)),
((0.290000, 0.160000), (0.299999, 0.160000)),
((-0.490001, 0.300000), (-0.490001, 0.210000)),
((-0.450001, -0.730000), (-0.260000, -0.730000)),
((-0.250000, -0.640000), (-0.250000, -0.730000)),
((-0.100000, 0.440000), (-0.100000, 0.390000)),
((-0.390000, -0.630000), (-0.390000, -0.680000)),
((-0.690001, -0.390000), (-0.690001, -0.440000)),
((0.880000, -0.880000), (0.880000, -0.740000)),
((0.730000, 0.600000), (0.740000, 0.600000)),
((0.990000, 0.650000), (0.990000, 0.840000)),
((-0.250000, -0.790000), (-0.300000, -0.790000)),
((-0.490001, -0.190000), (-0.490001, -0.240000)),
((0.440000, -0.890000), (0.389999, -0.890000)),
((0.380000, 0.010000), (0.380000, 0.060000)),
((-0.990001, 0.990000), (-0.990001, 0.950000)),
((-0.100000, 0.050000), (-0.150001, 0.050000)),
((0.780000, 0.000000), (0.780000, 0.100000)),
((0.490000, -0.440000), (0.480000, -0.440000)),
((0.299999, -0.390000), (0.200000, -0.390000)),
((-0.950001, 0.200000), (-0.950001, 0.210000)),
((-0.740001, 0.440000), (-0.740001, 0.390000)),
((0.880000, -0.440000), (0.830000, -0.440000)),
((-0.890000, -0.890000), (-0.890000, -0.940000)),
((0.580000, -0.240000), (0.679999, -0.240000)),
((-0.840000, -0.140000), (-0.840000, -0.190000)),
((-0.160001, -0.840000), (-0.160001, -0.690000)),
((0.049999, 0.600000), (0.089999, 0.600000)),
((-0.690001, 0.840000), (-0.840000, 0.840000)),
((0.690000, 0.160000), (0.740000, 0.160000)),
((-0.010000, 0.400000), (-0.010000, 0.450000)),
((0.790000, -0.190000), (0.940000, -0.190000)),
((-0.210000, -0.590000), (-0.260000, -0.590000)),
((-0.750000, 0.000000), (-0.840000, 0.000000)),
((-0.260000, 0.750000), (-0.260000, 0.940000)),
((-0.600000, 0.300000), (-0.600000, 0.450000)),
((-0.260000, 0.000000), (-0.310000, 0.000000)),
((-0.700001, -0.250000), (-0.700001, -0.100000)),
((0.290000, 0.100000), (0.290000, 0.160000)),
((0.139999, 0.600000), (0.150000, 0.600000)),
((-0.260000, -0.730000), (-0.260000, -0.640000)),
((-0.840000, -0.630000), (-0.840000, -0.730000)),
((0.099999, -0.940000), (0.040000, -0.940000)),
((-0.100000, 0.390000), (-0.150001, 0.390000)),
((-0.390000, -0.680000), (-0.360001, -0.680000)),
((-0.600000, -0.390000), (-0.690001, -0.390000)),
((0.940000, -0.940000), (0.830000, -0.940000)),
((0.839999, 0.550000), (0.839999, 0.500000)),
((-0.500000, -0.190000), (-0.490001, -0.190000)),
((0.290000, -0.740000), (0.290000, -0.580000)),
((0.429999, 0.300000), (0.380000, 0.300000)),
((0.480000, -0.440000), (0.480000, -0.290000)),
((0.589999, -0.440000), (0.589999, -0.580000)),
((0.190000, -0.440000), (0.190000, -0.380000)),
((0.530000, -0.540000), (0.440000, -0.540000)),
((-0.010000, -0.640000), (-0.010000, -0.490000)),
((-0.840000, 0.200000), (-0.950001, 0.200000)),
((-0.740001, 0.390000), (-0.840000, 0.390000)),
((0.730000, -0.390000), (0.730000, -0.300000)),
((-0.950001, -0.530000), (-0.900001, -0.530000)),
((0.580000, -0.250000), (0.580000, -0.240000)),
((0.630000, 0.740000), (0.630000, 0.790000)),
((-0.500000, 0.750000), (-0.500000, 0.940000)),
((0.740000, 0.160000), (0.740000, 0.150000)),
((0.139999, 0.150000), (0.089999, 0.150000)),
((-0.360001, 0.790000), (-0.440001, 0.790000)),
((-0.310000, 0.390000), (-0.450001, 0.390000)),
((0.940000, -0.190000), (0.940000, -0.300000)),
((-0.210000, -0.630000), (-0.210000, -0.590000)),
((-0.840000, 0.310000), (-0.840000, 0.260000)),
((-0.150001, 0.900000), (-0.060000, 0.900000)),
((-0.590000, 0.300000), (-0.600000, 0.300000)),
((0.290000, -0.040000), (0.290000, 0.050000)),
((-0.310000, 0.000000), (-0.310000, 0.050000)),
((-0.700001, -0.340000), (-0.650001, -0.340000)),
((0.299999, 0.110000), (0.380000, 0.110000)),
((0.150000, 0.600000), (0.150000, 0.540000)),
((-0.160001, -0.100000), (-0.300000, -0.100000)),
((-0.500000, -0.680000), (-0.440001, -0.680000)),
((-0.350000, -0.880000), (-0.350000, -0.930000)),
((0.299999, -0.830000), (0.299999, -0.840000)),
((-0.160001, 0.650000), (-0.160001, 0.700000)),
((-0.740001, 0.940000), (-0.740001, 0.890000)),
((-0.400001, -0.830000), (-0.310000, -0.830000)),
((-0.440001, -0.240000), (-0.440001, -0.340000)),
((0.380000, -0.740000), (0.290000, -0.740000)),
((-0.100000, 0.940000), (-0.110001, 0.940000)),
((0.889999, 0.100000), (0.889999, 0.000000)),
((0.089999, 0.900000), (0.099999, 0.900000)),
((0.830000, -0.790000), (0.790000, -0.790000)),
((0.589999, -0.580000), (0.630000, -0.580000)),
((0.429999, -0.630000), (0.429999, -0.540000)),
((0.940000, -0.590000), (0.889999, -0.590000)),
((-0.650001, -0.480000), (-0.650001, -0.430000)),
((0.630000, -0.250000), (0.580000, -0.250000)),
((-0.100000, -0.780000), (-0.100000, -0.840000)),
((0.089999, 0.800000), (0.150000, 0.800000)),
((0.830000, 0.160000), (0.929999, 0.160000)),
((0.089999, 0.150000), (0.089999, 0.250000)),
((-0.360001, 0.500000), (-0.360001, 0.790000)),
((-0.310000, 0.260000), (-0.310000, 0.390000)),
((0.940000, -0.300000), (0.889999, -0.300000)),
((-0.260000, -0.580000), (-0.050000, -0.580000)),
((-0.840000, 0.260000), (-0.740001, 0.260000)),
((-0.200001, 0.160000), (-0.150001, 0.160000)),
((0.540000, 0.490000), (0.540000, 0.440000)),
((-0.310000, 0.740000), (-0.310000, 0.750000)),
((-0.550000, 0.450000), (-0.550000, 0.500000)),
((-0.250000, -0.050000), (-0.260000, -0.050000)),
((-0.650001, -0.340000), (-0.650001, -0.300000)),
((0.339999, 0.050000), (0.339999, 0.100000)),
((0.380000, 0.110000), (0.380000, 0.160000)),
((-0.990001, -0.730000), (-0.940001, -0.730000)),
((-0.160001, -0.250000), (-0.160001, -0.100000)),
((-0.490001, 0.210000), (-0.390000, 0.210000)),
((-0.310000, -0.640000), (-0.310000, -0.590000)),
((-0.540000, -0.880000), (-0.540000, -0.940000)),
((0.200000, -0.830000), (0.299999, -0.830000)),
((0.780000, -0.730000), (0.830000, -0.730000)),
((-0.900001, 0.890000), (-0.950001, 0.890000)),
((-0.540000, -0.240000), (-0.500000, -0.240000)),
((0.589999, 0.390000), (0.540000, 0.390000)),
((-0.160001, 0.800000), (-0.160001, 0.850000)),
((-0.990001, 0.950000), (-0.940001, 0.950000)),
((0.200000, 0.850000), (0.200000, 0.800000)),
((0.830000, -0.840000), (0.830000, -0.790000)),
((-0.450001, 0.100000), (-0.490001, 0.100000)),
((0.089999, -0.740000), (0.089999, -0.640000)),
((-0.950001, 0.400000), (-0.940001, 0.400000)),
((-0.650001, -0.430000), (-0.640000, -0.430000)),
((-0.100000, -0.840000), (-0.160001, -0.840000)),
((0.089999, 0.600000), (0.089999, 0.800000)),
((0.839999, 0.150000), (0.839999, 0.050000)),
((-0.060000, 0.400000), (-0.010000, 0.400000)),
((-0.060000, -0.640000), (-0.060000, -0.590000)),
((0.000000, 0.440000), (0.000000, 0.390000)),
((-0.850000, 0.250000), (-0.850000, 0.310000)),
((0.049999, 0.210000), (0.049999, 0.110000)),
((-0.310000, 0.750000), (-0.260000, 0.750000)),
((0.250000, 0.050000), (0.250000, 0.000000)),
((-0.700001, -0.100000), (-0.750000, -0.100000)),
((0.380000, 0.160000), (0.440000, 0.160000)),
((-0.300000, -0.150000), (-0.350000, -0.150000)),
((-0.310000, -0.590000), (-0.450001, -0.590000)),
((0.200000, -0.840000), (0.200000, -0.930000)),
((-0.750000, -0.390000), (-0.750000, -0.150000)),
((0.830000, -0.730000), (0.830000, -0.630000)),
((-0.540000, -0.140000), (-0.540000, -0.240000)),
((0.540000, -0.890000), (0.530000, -0.890000)),
((0.589999, 0.440000), (0.589999, 0.390000)),
((0.190000, 0.990000), (-0.100000, 0.990000)),
((-0.640000, 0.650000), (-0.640000, 0.600000)),
((0.099999, 0.850000), (0.200000, 0.850000)),
((0.839999, -0.840000), (0.830000, -0.840000)),
((0.630000, -0.480000), (0.679999, -0.480000)),
((-0.490001, 0.100000), (-0.490001, 0.050000)),
((0.190000, -0.380000), (0.240000, -0.380000)),
((0.089999, -0.640000), (-0.010000, -0.640000)),
((-0.790000, 0.360000), (-0.790000, 0.310000)),
((0.889999, -0.640000), (0.880000, -0.640000)),
((-0.740001, -0.430000), (-0.740001, -0.480000)),
((0.690000, -0.190000), (0.780000, -0.190000)),
((-0.110001, -0.830000), (-0.110001, -0.780000)),
((0.639999, 0.740000), (0.630000, 0.740000)),
((-0.540000, 0.750000), (-0.500000, 0.750000)),
((0.839999, 0.050000), (0.830000, 0.050000)),
((-0.890000, 0.990000), (-0.890000, 0.900000)),
((-0.390000, 0.310000), (-0.390000, 0.260000)),
((0.089999, 0.450000), (0.099999, 0.450000)),
((-0.740001, 0.250000), (-0.790000, 0.250000)),
((0.200000, 0.210000), (0.200000, 0.050000)),
((0.040000, 0.210000), (0.049999, 0.210000)),
((-0.150001, 0.950000), (-0.150001, 0.900000)),
((0.290000, 0.050000), (0.250000, 0.050000)),
((-0.600000, -0.350000), (-0.600000, -0.200000)),
((-0.150001, -0.200000), (-0.150001, -0.250000)),
((-0.350000, -0.150000), (-0.350000, -0.340000)),
((-0.700001, 0.700000), (-0.650001, 0.700000)),
((-0.450001, -0.590000), (-0.450001, -0.480000)),
((-0.360001, -0.880000), (-0.350000, -0.880000)),
((0.889999, 0.260000), (0.889999, 0.200000)),
((-0.440001, -0.440000), (-0.490001, -0.440000)),
((-0.700001, -0.390000), (-0.750000, -0.390000)),
((0.880000, 0.650000), (0.990000, 0.650000)),
((0.940000, 0.750000), (0.940000, 0.690000)),
((-0.640000, -0.140000), (-0.540000, -0.140000)),
((0.349999, -0.590000), (0.349999, -0.680000)),
((0.540000, 0.350000), (0.349999, 0.350000)),
((-0.940001, 0.600000), (-0.850000, 0.600000)),
((-0.100000, 0.990000), (-0.100000, 0.940000)),
((-0.800000, 0.450000), (-0.800000, 0.490000)),
((0.889999, 0.000000), (0.780000, 0.000000)),
((0.099999, 0.900000), (0.099999, 0.850000)),
((0.929999, -0.840000), (0.929999, -0.780000)),
((0.679999, -0.480000), (0.679999, -0.350000)),
((-0.490001, 0.050000), (-0.540000, 0.050000)),
((0.380000, -0.630000), (0.429999, -0.630000)),
((-0.790000, 0.310000), (-0.700001, 0.310000)),
((0.380000, 0.640000), (0.380000, 0.740000)),
((0.889999, -0.590000), (0.889999, -0.640000)),
((-0.640000, -0.480000), (-0.590000, -0.480000)),
((0.139999, -0.380000), (0.139999, -0.290000)),
((0.429999, 0.940000), (0.429999, 0.950000)),
((0.830000, 0.050000), (0.830000, 0.160000)),
((-0.850000, 0.990000), (-0.890000, 0.990000)),
((0.929999, -0.380000), (0.929999, -0.340000)),
((-0.800000, 0.160000), (-0.800000, 0.250000)),
((0.099999, 0.110000), (0.099999, 0.010000)),
((0.190000, 0.210000), (0.200000, 0.210000)),
((0.339999, 0.100000), (0.290000, 0.100000)),
((-0.940001, -0.730000), (-0.940001, -0.790000)),
((-0.350000, -0.340000), (-0.260000, -0.340000)),
((-0.650001, 0.700000), (-0.650001, 0.750000)),
((-0.260000, -0.640000), (-0.310000, -0.640000)),
((-0.540000, -0.940000), (-0.600000, -0.940000)),
((0.530000, 0.800000), (0.630000, 0.800000)),
((-0.440001, -0.380000), (-0.440001, -0.440000)),
((0.929999, 0.790000), (0.929999, 0.850000)),
((0.389999, -0.590000), (0.349999, -0.590000)),
((0.389999, -0.390000), (0.349999, -0.390000)),
((0.349999, 0.350000), (0.349999, 0.260000)),
((0.679999, 0.350000), (0.679999, 0.440000)),
((-0.940001, 0.700000), (-0.940001, 0.600000)),
((-0.160001, 0.850000), (-0.100000, 0.850000)),
((0.940000, 0.060000), (0.940000, -0.040000)),
((0.200000, 0.800000), (0.349999, 0.800000)),
((0.740000, -0.780000), (0.839999, -0.780000)),
((0.530000, -0.380000), (0.639999, -0.380000)),
((-0.840000, 0.390000), (-0.840000, 0.360000)),
((0.880000, -0.590000), (0.790000, -0.590000)),
((-0.900001, -0.940000), (-0.900001, -0.890000)),
((-0.790000, -0.190000), (-0.790000, -0.430000)),
((0.040000, 0.440000), (0.000000, 0.440000)),
((0.429999, 0.950000), (0.589999, 0.950000)),
((-0.550000, 0.850000), (-0.540000, 0.850000)),
((0.940000, 0.150000), (0.839999, 0.150000)),
((-0.850000, 0.940000), (-0.850000, 0.990000)),
((0.929999, -0.340000), (0.940000, -0.340000)),
((-0.060000, -0.590000), (-0.100000, -0.590000)),
((-0.800000, 0.250000), (-0.850000, 0.250000)),
((0.139999, 0.050000), (0.139999, 0.150000)),
((-0.060000, 0.160000), (0.040000, 0.160000)),
((-0.400001, 0.010000), (-0.400001, 0.110000)),
((-0.160001, 0.150000), (-0.200001, 0.150000)),
((0.240000, -0.100000), (0.240000, -0.090000)),
((-0.800000, -0.930000), (-0.800000, -0.830000)),
((-0.300000, -0.940000), (-0.360001, -0.940000)),
((0.880000, 0.210000), (0.880000, 0.260000)),
((-0.160001, 0.700000), (-0.150001, 0.700000)),
((0.530000, 0.700000), (0.530000, 0.800000)),
((-0.490001, -0.630000), (-0.390000, -0.630000)),
((-0.690001, -0.440000), (-0.700001, -0.440000)),
((0.889999, 0.890000), (0.889999, 0.750000)),
((-0.490001, -0.240000), (-0.440001, -0.240000)),
((0.530000, -0.890000), (0.530000, -0.840000)),
((0.349999, -0.390000), (0.349999, -0.430000)),
((0.349999, 0.260000), (0.429999, 0.260000)),
((-0.690001, 0.650000), (-0.640000, 0.650000)),
((0.200000, 0.790000), (0.200000, 0.750000)),
((0.490000, -0.350000), (0.490000, -0.440000)),
((0.530000, -0.440000), (0.530000, -0.380000)),
((0.440000, -0.640000), (0.380000, -0.640000)),
((0.349999, 0.740000), (0.349999, 0.550000)),
((-0.790000, -0.430000), (-0.740001, -0.430000)),
((0.040000, 0.390000), (0.040000, 0.440000)),
((0.589999, 0.950000), (0.589999, 0.900000)),
((-0.540000, 0.850000), (-0.540000, 0.750000)),
((-0.890000, 0.900000), (-0.750000, 0.900000)),
((-0.390000, 0.260000), (-0.310000, 0.260000)),
((0.889999, -0.300000), (0.889999, -0.380000)),
((-0.740001, 0.260000), (-0.740001, 0.250000)),
((0.150000, 0.060000), (0.190000, 0.060000)),
((-0.060000, 0.110000), (-0.060000, 0.160000)),
((-0.600000, 0.450000), (-0.550000, 0.450000)),
((0.040000, 0.100000), (0.040000, 0.150000)),
((-0.600000, -0.200000), (-0.690001, -0.200000)),
((-0.150001, -0.250000), (-0.160001, -0.250000)),
((-0.590000, 0.750000), (-0.590000, 0.700000)),
((-0.400001, -0.350000), (-0.500000, -0.350000)),
((-0.600000, -0.890000), (-0.700001, -0.890000)),
((-0.360001, -0.940000), (-0.360001, -0.880000)),
((0.880000, 0.260000), (0.889999, 0.260000)),
((0.440000, 0.700000), (0.530000, 0.700000)),
((-0.150001, 0.700000), (-0.150001, 0.650000)),
((0.889999, 0.750000), (0.940000, 0.750000)),
((-0.060000, -0.240000), (-0.060000, -0.190000)),
((0.290000, -0.580000), (0.389999, -0.580000)),
((0.429999, -0.100000), (0.429999, -0.050000)),
((-0.990001, 0.840000), (-0.990001, 0.700000)),
((-0.100000, 0.840000), (-0.150001, 0.840000)),
((0.380000, 0.990000), (0.200000, 0.990000)),
((-0.800000, 0.490000), (-0.850000, 0.490000)),
((0.929999, -0.780000), (0.940000, -0.780000)),
((-0.490001, -0.090000), (-0.490001, -0.140000)),
((0.639999, -0.440000), (0.589999, -0.440000)),
((0.380000, -0.640000), (0.380000, -0.630000)),
((0.190000, -0.590000), (0.099999, -0.590000)),
((0.380000, 0.740000), (0.349999, 0.740000)),
((0.630000, -0.150000), (0.540000, -0.150000)),
((-0.640000, -0.430000), (-0.640000, -0.480000)),
((0.099999, -0.380000), (0.139999, -0.380000)),
((0.429999, -0.980000), (0.429999, -0.930000)),
((0.580000, 0.940000), (0.429999, 0.940000)),
((-0.800000, 0.940000), (-0.800000, 0.950000)),
((-0.450001, 0.250000), (-0.450001, 0.260000)),
((0.000000, 0.390000), (-0.050000, 0.390000)),
((-0.800000, 0.060000), (-0.790000, 0.060000)),
((-0.150001, 0.110000), (-0.060000, 0.110000)),
((0.150000, 0.160000), (0.150000, 0.060000)),
((-0.500000, -0.050000), (-0.500000, 0.010000)),
((-0.450001, 0.450000), (-0.450001, 0.640000)),
((0.089999, 0.100000), (0.040000, 0.100000)),
((-0.700001, -0.350000), (-0.700001, -0.340000)),
((-0.940001, -0.790000), (-0.950001, -0.790000)),
((-0.440001, 0.300000), (-0.490001, 0.300000)),
((-0.500000, -0.350000), (-0.500000, -0.340000)),
((-0.600000, -0.940000), (-0.600000, -0.890000)),
((0.299999, -0.840000), (0.200000, -0.840000)),
((0.790000, 0.310000), (0.790000, 0.210000)),
((-0.150001, 0.650000), (-0.060000, 0.650000)),
((0.429999, 0.840000), (0.429999, 0.850000)),
((0.940000, 0.790000), (0.929999, 0.790000)),
((-0.360001, -0.790000), (-0.360001, -0.780000)),
((-0.100000, -0.490000), (-0.160001, -0.490000)),
((0.389999, -0.430000), (0.389999, -0.480000)),
((0.679999, 0.440000), (0.589999, 0.440000)),
((-0.100000, 0.850000), (-0.100000, 0.840000)),
((0.200000, 0.990000), (0.200000, 0.940000)),
((-0.110001, -0.190000), (-0.110001, -0.140000)),
((-0.640000, 0.690000), (-0.690001, 0.690000)),
((0.940000, -0.040000), (0.990000, -0.040000)),
((0.940000, -0.780000), (0.940000, -0.830000)),
((0.839999, -0.780000), (0.839999, -0.840000)),
((-0.490001, -0.140000), (-0.440001, -0.140000)),
((0.190000, -0.680000), (0.190000, -0.590000)),
((0.880000, -0.640000), (0.880000, -0.590000)),
((0.630000, -0.190000), (0.630000, -0.150000)),
((-0.900001, -0.890000), (-0.950001, -0.890000)),
((-0.110001, -0.780000), (-0.100000, -0.780000)),
((0.049999, 0.590000), (0.049999, 0.390000)),
((-0.450001, 0.990000), (-0.840000, 0.990000)),
((-0.050000, 0.390000), (-0.050000, 0.360000)),
((-0.800000, 0.010000), (-0.800000, 0.060000)),
((0.099999, 0.160000), (0.150000, 0.160000)),
((-0.500000, 0.010000), (-0.400001, 0.010000)),
((-0.200001, 0.150000), (-0.200001, 0.000000)),
((-0.690001, -0.250000), (-0.700001, -0.250000)),
((0.240000, -0.090000), (0.339999, -0.090000)),
((-0.950001, -0.790000), (-0.950001, -0.740000)),
((-0.440001, 0.350000), (-0.440001, 0.300000)),
((-0.800000, -0.250000), (-0.850000, -0.250000)),
((-0.850000, -0.930000), (-0.800000, -0.930000)),
((0.190000, -0.940000), (0.190000, -0.840000)),
((0.490000, 0.740000), (0.440000, 0.740000)),
((-0.490001, -0.440000), (-0.490001, -0.630000)),
((-0.700001, -0.440000), (-0.700001, -0.390000)),
((0.780000, -0.740000), (0.780000, -0.730000)),
((-0.160001, -0.490000), (-0.160001, -0.480000)),
((0.530000, -0.840000), (0.440000, -0.840000)),
((0.440000, -0.040000), (0.440000, -0.100000)),
((-0.150001, 0.790000), (-0.200001, 0.790000)),
((-0.850000, 0.590000), (-0.950001, 0.590000)),
((-0.690001, 0.690000), (-0.690001, 0.650000)),
((0.200000, 0.750000), (0.389999, 0.750000)),
((0.679999, -0.350000), (0.490000, -0.350000)),
((-0.540000, 0.050000), (-0.540000, -0.090000)),
((0.200000, -0.440000), (0.190000, -0.440000)),
((0.639999, 0.400000), (0.639999, 0.350000)),
((0.480000, -0.930000), (0.480000, -0.880000)),
((0.099999, -0.740000), (0.089999, -0.740000)),
((-0.900001, 0.210000), (-0.900001, 0.350000)),
((0.830000, -0.300000), (0.740000, -0.300000)),
((0.150000, -0.250000), (0.099999, -0.250000)),
((0.780000, 0.950000), (0.940000, 0.950000)),
((-0.650001, 0.850000), (-0.640000, 0.850000)),
((0.690000, 0.360000), (0.839999, 0.360000)),
((-0.450001, 0.890000), (-0.450001, 0.990000)),
((-0.050000, 0.360000), (0.150000, 0.360000)),
((-0.900001, 0.010000), (-0.800000, 0.010000)),
((0.049999, 0.110000), (0.099999, 0.110000)),
((-0.450001, -0.100000), (-0.450001, -0.050000)),
((0.250000, 0.000000), (0.089999, 0.000000)),
((-0.690001, -0.200000), (-0.690001, -0.250000)),
((0.040000, -0.590000), (0.040000, -0.530000)),
((-0.650001, 0.750000), (-0.590000, 0.750000)),
((-0.800000, -0.440000), (-0.800000, -0.250000)),
((-0.200001, -0.880000), (-0.100000, -0.880000)),
((-0.500000, 0.350000), (-0.500000, 0.390000)),
((-0.740001, -0.640000), (-0.790000, -0.640000)),
((0.990000, 0.840000), (0.940000, 0.840000)),
((-0.650001, -0.790000), (-0.650001, -0.740000)),
((-0.110001, -0.240000), (-0.060000, -0.240000)),
((0.440000, -0.840000), (0.440000, -0.890000)),
((0.389999, -0.580000), (0.389999, -0.590000)),
((0.440000, -0.100000), (0.429999, -0.100000)),
((-0.990001, 0.700000), (-0.940001, 0.700000)),
((0.190000, 0.940000), (0.190000, 0.990000)),
((-0.850000, 0.490000), (-0.850000, 0.590000)),
((0.990000, 0.100000), (0.889999, 0.100000)),
((0.580000, -0.200000), (0.580000, -0.190000)),
((0.200000, -0.390000), (0.200000, -0.440000)),
((0.639999, -0.380000), (0.639999, -0.440000)),
((0.429999, -0.930000), (0.480000, -0.930000)),
((-0.840000, 0.360000), (-0.790000, 0.360000)),
((0.339999, 0.540000), (0.339999, 0.640000)),
((0.690000, -0.540000), (0.690000, -0.680000)),
((-0.950001, -0.880000), (-0.850000, -0.880000)),
((0.099999, -0.250000), (0.099999, -0.380000)),
((0.679999, -0.240000), (0.679999, -0.140000)),
((0.940000, 0.950000), (0.940000, 0.940000)),
((-0.640000, 0.850000), (-0.640000, 0.800000)),
((-0.800000, 0.950000), (-0.640000, 0.950000)),
((-0.450001, 0.260000), (-0.400001, 0.260000)),
((-0.950001, -0.100000), (-0.950001, 0.110000)),
((0.200000, 0.050000), (0.139999, 0.050000)),
((-0.540000, 0.450000), (-0.450001, 0.450000)),
((-0.640000, -0.350000), (-0.700001, -0.350000)),
((0.339999, -0.040000), (0.349999, -0.040000)),
((0.679999, 0.450000), (0.679999, 0.540000)),
((-0.950001, -0.380000), (-0.950001, -0.290000)),
((-0.750000, -0.440000), (-0.800000, -0.440000)),
((-0.100000, -0.880000), (-0.100000, -0.890000)),
((0.000000, 0.750000), (0.000000, 0.490000)),
((0.429999, 0.850000), (0.580000, 0.850000)),
((-0.590000, -0.190000), (-0.590000, -0.290000)),
((0.940000, 0.360000), (0.940000, 0.350000)),
((0.940000, 0.840000), (0.940000, 0.790000)),
((0.830000, 0.900000), (0.940000, 0.900000)),
((-0.600000, -0.790000), (-0.650001, -0.790000)),
((-0.110001, -0.480000), (-0.110001, -0.440000)),
((0.349999, -0.430000), (0.389999, -0.430000)),
((0.389999, 0.050000), (0.389999, -0.040000)),
((-0.990001, 0.940000), (-0.990001, 0.850000)),
((-0.360001, 0.990000), (-0.440001, 0.990000)),
((-0.110001, -0.140000), (-0.010000, -0.140000)),
((-0.950001, 0.690000), (-0.990001, 0.690000)),
((-0.600000, 0.690000), (-0.600000, 0.740000)),
((0.580000, -0.040000), (0.580000, 0.010000)),
((0.990000, -0.040000), (0.990000, 0.100000)),
((0.639999, -0.200000), (0.580000, -0.200000)),
((0.349999, -0.880000), (0.429999, -0.880000)),
((0.490000, -0.880000), (0.490000, -0.930000)),
((0.440000, 0.540000), (0.339999, 0.540000)),
((0.790000, -0.540000), (0.690000, -0.540000)),
((-0.950001, -0.890000), (-0.950001, -0.880000)),
((0.049999, 0.390000), (0.040000, 0.390000)),
((-0.400001, 0.800000), (-0.400001, 0.890000)),
((0.889999, -0.380000), (0.929999, -0.380000)),
((-0.050000, -0.640000), (-0.060000, -0.640000)),
((-0.850000, 0.000000), (-0.900001, 0.000000)),
((-0.200001, 0.000000), (-0.250000, 0.000000)),
((0.339999, -0.090000), (0.339999, -0.040000)),
((0.299999, 0.160000), (0.299999, 0.110000)),
((0.099999, 0.500000), (0.290000, 0.500000)),
((-0.990001, -0.380000), (-0.950001, -0.380000)),
((-0.840000, -0.740000), (-0.840000, -0.780000)),
((0.349999, -0.940000), (0.190000, -0.940000)),
((0.000000, 0.490000), (-0.050000, 0.490000)),
((0.440000, 0.740000), (0.440000, 0.700000)),
((0.580000, 0.850000), (0.580000, 0.940000)),
((-0.690001, -0.190000), (-0.590000, -0.190000)),
((0.880000, -0.740000), (0.780000, -0.740000)),
((0.830000, 0.890000), (0.830000, 0.900000)),
((-0.600000, -0.880000), (-0.600000, -0.790000)),
((-0.200001, -0.290000), (-0.110001, -0.290000)),
((0.380000, -0.350000), (0.380000, -0.340000)),
((-0.150001, 0.840000), (-0.150001, 0.790000)),
((-0.360001, 0.950000), (-0.360001, 0.990000)),
((-0.400001, -0.050000), (-0.400001, -0.040000)),
((-0.550000, 0.690000), (-0.600000, 0.690000)),
((0.639999, -0.100000), (0.639999, -0.200000)),
((0.429999, -0.880000), (0.429999, -0.830000)),
((0.639999, 0.350000), (0.589999, 0.350000)),
((0.099999, -0.590000), (0.099999, -0.740000)),
((0.730000, -0.680000), (0.730000, -0.630000)),
((-0.850000, -0.640000), (-0.900001, -0.640000)),
((0.150000, -0.980000), (0.429999, -0.980000)),
((0.190000, 0.540000), (0.190000, 0.640000)),
((-0.550000, 0.800000), (-0.550000, 0.850000)),
((-0.150001, 0.210000), (0.000000, 0.210000)),
((0.839999, 0.360000), (0.839999, 0.310000)),
((-0.450001, 0.800000), (-0.400001, 0.800000)),
((-0.400001, 0.310000), (-0.390000, 0.310000)),
((0.929999, -0.290000), (0.929999, -0.200000)),
((-0.050000, -0.580000), (-0.050000, -0.640000)),
((0.049999, 0.350000), (0.049999, 0.260000)),
((-0.260000, 0.940000), (-0.310000, 0.940000)),
((-0.550000, 0.500000), (-0.540000, 0.500000)),
((-0.250000, 0.000000), (-0.250000, -0.050000)),
((0.089999, 0.000000), (0.089999, 0.100000)),
((0.049999, -0.590000), (0.040000, -0.590000)),
((0.290000, 0.500000), (0.290000, 0.650000)),
((-0.940001, -0.290000), (-0.940001, -0.390000)),
((-0.790000, -0.530000), (-0.750000, -0.530000)),
((-0.300000, -0.890000), (-0.300000, -0.940000)),
((-0.450001, 0.350000), (-0.500000, 0.350000)),
((0.790000, 0.210000), (0.880000, 0.210000)),
((-0.060000, 0.650000), (-0.060000, 0.750000)),
((-0.690001, -0.050000), (-0.690001, -0.190000)),
((0.830000, -0.150000), (0.830000, -0.140000)),
((0.940000, 0.890000), (0.889999, 0.890000)),
((-0.650001, -0.740000), (-0.690001, -0.740000)),
((-0.110001, -0.290000), (-0.110001, -0.240000)),
((-0.500000, -0.240000), (-0.500000, -0.190000)),
((0.380000, -0.340000), (0.429999, -0.340000)),
((0.200000, 0.940000), (0.190000, 0.940000)),
((-0.010000, -0.100000), (-0.060000, -0.100000)),
((-0.640000, 0.740000), (-0.640000, 0.690000)),
((0.580000, -0.190000), (0.630000, -0.190000)),
((-0.300000, 0.390000), (-0.300000, 0.160000)),
((0.429999, -0.830000), (0.679999, -0.830000)),
((0.040000, -0.300000), (0.000000, -0.300000)),
((0.349999, 0.550000), (0.440000, 0.550000)),
((0.690000, -0.530000), (0.790000, -0.530000)),
((0.150000, -0.880000), (0.150000, -0.980000)),
((-0.790000, 0.590000), (-0.800000, 0.590000)),
((0.000000, 0.210000), (0.000000, 0.200000)),
((-0.450001, 0.740000), (-0.450001, 0.800000)),
((-0.400001, 0.260000), (-0.400001, 0.310000)),
((0.929999, -0.200000), (0.690000, -0.200000)),
((0.049999, 0.260000), (0.089999, 0.260000)),
((-0.950001, 0.110000), (-0.940001, 0.110000)),
((-0.060000, 0.800000), (-0.060000, 0.890000)),
((-0.540000, 0.500000), (-0.540000, 0.450000)),
((-0.050000, 0.740000), (-0.050000, 0.640000)),
((0.349999, -0.840000), (0.349999, -0.880000)),
((0.150000, -0.530000), (0.150000, -0.540000)),
((0.580000, 0.450000), (0.679999, 0.450000)),
((-0.890000, -0.150000), (-0.990001, -0.150000)),
((-0.300000, -0.100000), (-0.300000, -0.150000)),
((-0.400001, -0.480000), (-0.400001, -0.350000)),
((0.200000, -0.930000), (0.349999, -0.930000)),
((-0.450001, 0.310000), (-0.450001, 0.350000)),
((0.740000, 0.250000), (0.740000, 0.200000)),
((-0.590000, -0.290000), (-0.490001, -0.290000)),
((-0.700001, -0.890000), (-0.700001, -0.880000)),
((-0.160001, -0.480000), (-0.110001, -0.480000)),
((0.429999, -0.340000), (0.429999, -0.200000)),
((-0.990001, 0.850000), (-0.940001, 0.850000)),
((0.730000, 0.950000), (0.730000, 0.990000)),
((-0.010000, -0.140000), (-0.010000, -0.100000)),
((-0.500000, 0.590000), (-0.550000, 0.590000)),
((-0.950001, 0.590000), (-0.950001, 0.690000)),
((-0.540000, -0.090000), (-0.490001, -0.090000)),
((0.589999, 0.260000), (0.639999, 0.260000)),
((0.740000, -0.630000), (0.740000, -0.780000)),
((0.540000, -0.150000), (0.540000, -0.290000)),
((0.139999, -0.880000), (0.150000, -0.880000)),
((-0.500000, 0.390000), (-0.540000, 0.390000)),
((-0.790000, 0.690000), (-0.790000, 0.590000)),
((0.580000, 0.100000), (0.580000, 0.150000)),
((-0.400001, 0.890000), (-0.450001, 0.890000)),
((0.880000, -0.390000), (0.880000, -0.290000)),
((-0.210000, -0.430000), (-0.210000, -0.390000)),
((0.150000, 0.360000), (0.150000, 0.350000)),
((-0.900001, 0.000000), (-0.900001, 0.010000)),
((-0.940001, 0.110000), (-0.940001, 0.060000)),
((0.299999, 0.300000), (0.290000, 0.300000)),
((-0.060000, 0.890000), (-0.160001, 0.890000)),
((-0.450001, -0.050000), (-0.500000, -0.050000)),
((-0.550000, -0.740000), (-0.600000, -0.740000)),
((0.000000, -0.300000), (0.000000, -0.350000)),
((-0.350000, -0.050000), (-0.350000, -0.100000)),
((0.049999, -0.200000), (0.049999, -0.430000)),
((0.150000, -0.540000), (0.049999, -0.540000)),
((0.339999, 0.650000), (0.339999, 0.740000)),
((-0.990001, -0.390000), (-0.990001, -0.480000)),
((-0.840000, -0.780000), (-0.750000, -0.780000)),
((0.349999, -0.930000), (0.349999, -0.940000)),
((0.740000, 0.200000), (0.730000, 0.200000)),
((-0.540000, -0.300000), (-0.540000, -0.380000)),
((-0.690001, -0.790000), (-0.700001, -0.790000)),
((-0.450001, -0.200000), (-0.450001, -0.150000)),
((0.429999, -0.350000), (0.380000, -0.350000)),
((0.540000, 0.390000), (0.540000, 0.350000)),
((0.730000, 0.990000), (0.389999, 0.990000)),
((-0.360001, -0.050000), (-0.400001, -0.050000)),
((-0.500000, 0.550000), (-0.500000, 0.590000)),
((0.349999, 0.790000), (0.200000, 0.790000)),
((0.740000, -0.150000), (0.730000, -0.150000)),
((0.679999, -0.790000), (0.480000, -0.790000)),
((0.589999, 0.350000), (0.589999, 0.260000)),
((0.780000, -0.880000), (0.780000, -0.790000)),
((0.730000, -0.630000), (0.740000, -0.630000)),
((-0.850000, -0.880000), (-0.850000, -0.640000)),
((0.190000, 0.640000), (0.099999, 0.640000)),
((-0.540000, 0.390000), (-0.540000, 0.250000)),
((-0.640000, 0.800000), (-0.550000, 0.800000)),
((-0.100000, 0.200000), (-0.100000, 0.150000)),
((0.839999, 0.310000), (0.940000, 0.310000)),
((-0.600000, -0.090000), (-0.600000, 0.060000)),
((-0.210000, -0.390000), (-0.390000, -0.390000)),
((-0.850000, 0.310000), (-0.840000, 0.310000)),
((0.299999, 0.440000), (0.299999, 0.300000)),
((-0.110001, 0.750000), (-0.110001, 0.800000)),
((-0.500000, 0.690000), (-0.500000, 0.740000)),
((-0.550000, -0.790000), (-0.550000, -0.740000)),
((0.000000, -0.350000), (-0.150001, -0.350000)),
((-0.350000, -0.100000), (-0.360001, -0.100000)),
((0.389999, -0.830000), (0.389999, -0.840000)),
((0.049999, -0.540000), (0.049999, -0.590000)),
((0.530000, 0.400000), (0.580000, 0.400000)),
((-0.940001, -0.390000), (-0.990001, -0.390000)),
((-0.400001, -0.300000), (-0.400001, -0.290000)),
((-0.750000, -0.780000), (-0.750000, -0.680000)),
((-0.750000, -0.530000), (-0.750000, -0.440000)),
((-0.100000, -0.890000), (-0.300000, -0.890000)),
((-0.500000, 0.160000), (-0.500000, 0.310000)),
((0.780000, 0.110000), (0.780000, 0.250000)),
((-0.540000, 0.100000), (-0.640000, 0.100000)),
((0.830000, -0.140000), (0.990000, -0.140000)),
((0.940000, 0.900000), (0.940000, 0.890000)),
((-0.690001, -0.740000), (-0.690001, -0.790000)),
((-0.450001, -0.150000), (-0.500000, -0.150000)),
((0.349999, -0.200000), (0.349999, -0.240000)),
((0.389999, -0.040000), (0.440000, -0.040000)),
((-0.940001, 0.840000), (-0.990001, 0.840000)),
((0.389999, 0.990000), (0.389999, 0.900000)),
((-0.600000, 0.550000), (-0.500000, 0.550000)),
((-0.950001, 0.540000), (-0.950001, 0.550000)),
((0.349999, 0.800000), (0.349999, 0.790000)),
((0.740000, -0.090000), (0.740000, -0.150000)),
((0.480000, -0.880000), (0.490000, -0.880000)),
((0.040000, -0.480000), (0.040000, -0.300000)),
((0.440000, 0.550000), (0.440000, 0.540000)),
((0.790000, -0.530000), (0.790000, -0.540000)),
((-0.740001, -0.480000), (-0.650001, -0.480000)),
((0.780000, 0.900000), (0.780000, 0.950000)),
((-0.540000, 0.250000), (-0.590000, 0.250000)),
((0.940000, 0.310000), (0.940000, 0.150000)),
((-0.750000, 0.940000), (-0.800000, 0.940000)),
((-0.390000, -0.390000), (-0.390000, -0.490000)),
((-0.850000, 0.060000), (-0.850000, 0.110000)),
((0.040000, 0.160000), (0.040000, 0.210000)),
((0.380000, 0.440000), (0.299999, 0.440000)),
((-0.160001, 0.940000), (-0.250000, 0.940000)),
((-0.500000, 0.740000), (-0.550000, 0.740000)),
((-0.360001, -0.100000), (-0.360001, -0.050000)),
((0.389999, -0.840000), (0.349999, -0.840000)),
((0.099999, -0.430000), (0.099999, -0.440000)),
((0.099999, -0.530000), (0.150000, -0.530000)),
((0.580000, 0.400000), (0.580000, 0.450000)),
((-0.400001, -0.290000), (-0.360001, -0.290000)),
((-0.990001, -0.150000), (-0.990001, -0.380000)),
((-0.450001, -0.480000), (-0.400001, -0.480000)),
((0.730000, 0.260000), (0.780000, 0.260000)),
((-0.490001, 0.840000), (-0.490001, 0.700000)),
((-0.540000, 0.150000), (-0.540000, 0.100000)),
((0.990000, -0.630000), (0.990000, -0.150000)),
((0.830000, 0.450000), (0.880000, 0.450000)),
((-0.700001, -0.880000), (-0.600000, -0.880000)),
((-0.400001, -0.250000), (-0.400001, -0.200000)),
((0.389999, -0.480000), (0.429999, -0.480000)),
((-0.940001, 0.850000), (-0.940001, 0.840000)),
((0.630000, 0.950000), (0.730000, 0.950000)),
((-0.950001, 0.550000), (-0.890000, 0.550000)),
((0.540000, 0.300000), (0.540000, 0.200000)),
((0.540000, -0.290000), (0.589999, -0.290000)),
((0.000000, -0.890000), (0.000000, -0.980000)),
((0.099999, 0.590000), (0.049999, 0.590000)),
((0.790000, 0.740000), (0.740000, 0.740000)),
((0.589999, 0.100000), (0.580000, 0.100000)),
((-0.750000, 0.900000), (-0.750000, 0.940000)),
((-0.360001, -0.430000), (-0.210000, -0.430000)),
((-0.850000, 0.110000), (-0.790000, 0.110000)),
((0.190000, 0.060000), (0.190000, 0.210000)),
((-0.200001, 0.790000), (-0.200001, 0.750000)),
((-0.550000, 0.740000), (-0.550000, 0.790000)),
((-0.450001, -0.840000), (-0.450001, -0.790000)),
((-0.150001, -0.380000), (0.000000, -0.380000)),
((-0.310000, -0.050000), (-0.350000, -0.050000)),
((0.049999, -0.430000), (0.099999, -0.430000)),
((0.240000, -0.730000), (0.240000, -0.490000)),
((0.290000, 0.650000), (0.339999, 0.650000)),
((-0.360001, -0.350000), (-0.360001, -0.300000)),
((-0.750000, -0.150000), (-0.800000, -0.150000)),
((-0.850000, -0.490000), (-0.850000, -0.340000)),
((0.780000, 0.260000), (0.780000, 0.310000)),
((-0.060000, 0.750000), (0.000000, 0.750000)),
((-0.540000, -0.380000), (-0.440001, -0.380000)),
((0.990000, -0.050000), (0.880000, -0.050000)),
((-0.940001, 0.750000), (-0.890000, 0.750000)),
((-0.590000, -0.780000), (-0.590000, -0.830000)),
((0.429999, -0.480000), (0.429999, -0.350000)),
((0.540000, 0.900000), (0.540000, 0.890000)),
((-0.900001, 0.500000), (-0.900001, 0.540000)),
((-0.600000, 0.740000), (-0.640000, 0.740000)),
((0.780000, 0.650000), (0.780000, 0.700000)),
((0.679999, -0.830000), (0.679999, -0.790000)),
((0.580000, 0.300000), (0.540000, 0.300000)),
((-0.060000, -0.530000), (-0.060000, -0.480000)),
((0.639999, 0.600000), (0.639999, 0.550000)),
((0.490000, -0.300000), (0.490000, -0.340000)),
((0.049999, -0.890000), (0.000000, -0.890000)),
((0.099999, 0.640000), (0.099999, 0.590000)),
((0.740000, 0.740000), (0.740000, 0.690000)),
((0.000000, 0.200000), (-0.100000, 0.200000)),
((-0.600000, 0.060000), (-0.500000, 0.060000)),
((0.940000, -0.340000), (0.940000, -0.440000)),
((0.790000, -0.240000), (0.790000, -0.250000)),
((-0.440001, -0.490000), (-0.440001, -0.580000)),
((-0.790000, 0.110000), (-0.790000, 0.100000)),
((0.040000, -0.530000), (0.089999, -0.530000)),
((0.349999, 0.400000), (0.349999, 0.360000)),
((-0.200001, 0.750000), (-0.110001, 0.750000)),
((-0.400001, 0.690000), (-0.500000, 0.690000)),
((-0.440001, -0.840000), (-0.450001, -0.840000)),
((-0.390000, -0.090000), (-0.390000, -0.140000)),
((0.049999, -0.440000), (0.049999, -0.490000)),
((-0.990001, -0.490000), (-0.990001, -0.730000)),
((-0.360001, -0.150000), (-0.400001, -0.150000)),
((-0.800000, -0.490000), (-0.850000, -0.490000)),
((-0.550000, -0.880000), (-0.540000, -0.880000)),
((-0.500000, 0.310000), (-0.450001, 0.310000)),
((0.780000, 0.310000), (0.790000, 0.310000)),
((-0.490001, 0.200000), (-0.490001, 0.150000)),
((0.990000, -0.140000), (0.990000, -0.050000)),
((-0.890000, 0.750000), (-0.890000, 0.700000)),
((0.429999, -0.200000), (0.349999, -0.200000)),
((0.740000, 0.940000), (0.630000, 0.940000)),
((-0.890000, 0.490000), (-0.990001, 0.490000)),
((0.690000, 0.650000), (0.780000, 0.650000)),
((0.339999, -0.590000), (0.299999, -0.590000)),
((0.580000, 0.250000), (0.580000, 0.300000)),
((-0.060000, -0.480000), (0.040000, -0.480000)),
((0.380000, 0.600000), (0.639999, 0.600000)),
((0.589999, -0.300000), (0.490000, -0.300000)),
((0.139999, -0.980000), (0.139999, -0.880000)),
((0.589999, 0.900000), (0.780000, 0.900000)),
((-0.540000, 0.210000), (-0.540000, 0.200000)),
((-0.160001, 0.200000), (-0.160001, 0.250000)),
((-0.650001, -0.150000), (-0.650001, -0.090000)),
((0.880000, -0.290000), (0.929999, -0.290000)),
((-0.110001, -0.440000), (-0.360001, -0.440000)),
((0.150000, 0.350000), (0.049999, 0.350000)),
((-0.940001, 0.060000), (-0.850000, 0.060000)),
((0.089999, -0.530000), (0.089999, -0.480000)),
((0.339999, 0.400000), (0.349999, 0.400000)),
((-0.160001, 0.890000), (-0.160001, 0.940000)),
((-0.690001, 0.790000), (-0.690001, 0.740000)),
((-0.440001, -0.790000), (-0.440001, -0.840000)),
((0.250000, -0.440000), (0.250000, -0.780000)),
((-0.940001, -0.490000), (-0.990001, -0.490000)),
((-0.400001, -0.150000), (-0.400001, -0.100000)),
((-0.840000, -0.340000), (-0.840000, -0.480000)),
((-0.550000, -0.930000), (-0.550000, -0.880000)),
((0.099999, -0.200000), (0.049999, -0.200000)),
((-0.650001, 0.060000), (-0.650001, 0.110000)),
((0.630000, 0.060000), (0.630000, 0.160000)),
((-0.390000, 0.200000), (-0.490001, 0.200000)),
((0.929999, -0.630000), (0.990000, -0.630000)),
((-0.890000, 0.700000), (-0.850000, 0.700000)),
((-0.440001, 0.840000), (-0.490001, 0.840000)),
((0.630000, 0.940000), (0.630000, 0.950000)),
((-0.990001, 0.690000), (-0.990001, 0.500000)),
((0.690000, 0.800000), (0.690000, 0.650000)),
((0.790000, -0.790000), (0.790000, -0.940000)),
((0.389999, -0.380000), (0.389999, -0.390000)),
((0.540000, 0.200000), (0.490000, 0.200000)),
((0.580000, -0.880000), (0.780000, -0.880000)),
((-0.640000, 0.600000), (-0.600000, 0.600000)),
((0.679999, 0.550000), (0.679999, 0.600000)),
((0.780000, -0.430000), (0.780000, -0.340000)),
((0.589999, -0.290000), (0.589999, -0.300000)),
((0.200000, 0.650000), (0.200000, 0.550000)),
((-0.540000, 0.200000), (-0.590000, 0.200000)),
((-0.160001, 0.250000), (-0.210000, 0.250000)),
((-0.500000, 0.110000), (-0.450001, 0.110000)),
((0.639999, -0.250000), (0.639999, -0.300000)),
((-0.360001, -0.440000), (-0.360001, -0.430000)),
((0.250000, 0.400000), (0.250000, 0.360000)),
((0.530000, 0.360000), (0.530000, 0.400000)),
((-0.210000, 0.900000), (-0.200001, 0.900000)),
((-0.440001, 0.550000), (-0.400001, 0.550000)),
((-0.450001, -0.790000), (-0.550000, -0.790000)),
((-0.150001, -0.350000), (-0.150001, -0.380000)),
((0.250000, -0.780000), (0.349999, -0.780000)),
((0.190000, -0.730000), (0.240000, -0.730000)),
((-0.800000, -0.150000), (-0.800000, -0.100000)),
((-0.400001, -0.100000), (-0.450001, -0.100000)),
((-0.740001, -0.540000), (-0.800000, -0.540000)),
((0.099999, -0.190000), (0.099999, -0.200000)),
((0.780000, 0.250000), (0.740000, 0.250000)),
((0.480000, 0.840000), (0.429999, 0.840000)),
((-0.390000, 0.210000), (-0.390000, 0.200000)),
((0.839999, -0.630000), (0.839999, -0.680000)),
((-0.590000, -0.830000), (-0.490001, -0.830000)),
((-0.440001, -0.140000), (-0.440001, -0.190000)),
((-0.550000, -0.100000), (-0.550000, 0.050000)),
((-0.440001, 0.850000), (-0.440001, 0.840000)),
((0.389999, 0.900000), (0.540000, 0.900000)),
((-0.400001, -0.040000), (-0.360001, -0.040000)),
((-0.990001, 0.500000), (-0.900001, 0.500000)),
((0.780000, 0.700000), (0.830000, 0.700000)),
((0.639999, 0.260000), (0.639999, 0.250000)),
((0.580000, -0.930000), (0.580000, -0.880000)),
((-0.600000, 0.600000), (-0.600000, 0.650000)),
((0.480000, 0.590000), (0.380000, 0.590000)),
((0.490000, -0.340000), (0.679999, -0.340000)),
((-0.590000, 0.250000), (-0.590000, 0.210000)),
((-0.210000, 0.250000), (-0.210000, 0.310000)),
((-0.500000, 0.060000), (-0.500000, 0.110000)),
((-0.600000, -0.740000), (-0.600000, -0.690000)),
((0.790000, -0.250000), (0.639999, -0.250000)),
((-0.390000, -0.490000), (-0.440001, -0.490000)),
((0.250000, 0.360000), (0.290000, 0.360000)),
((-0.840000, 0.050000), (-0.940001, 0.050000)),
((0.139999, -0.480000), (0.139999, -0.390000)),
((0.250000, 0.260000), (0.339999, 0.260000)),
((-0.200001, 0.900000), (-0.200001, 0.840000)),
((-0.400001, 0.550000), (-0.400001, 0.690000)),
((-0.010000, -0.390000), (-0.160001, -0.390000)),
((-0.440001, -0.090000), (-0.390000, -0.090000)),
((-0.990001, -0.480000), (-0.940001, -0.480000)),
((-0.260000, -0.290000), (-0.260000, -0.240000)),
((-0.790000, -0.480000), (-0.790000, -0.530000)),
((-0.590000, -0.840000), (-0.590000, -0.930000)),
((0.049999, -0.190000), (0.099999, -0.190000)),
((0.540000, 0.110000), (0.540000, 0.060000)),
((-0.490001, 0.700000), (-0.400001, 0.700000)),
((0.480000, 0.790000), (0.480000, 0.840000)),
((-0.490001, 0.150000), (-0.540000, 0.150000)),
((0.839999, -0.680000), (0.929999, -0.680000)),
((0.929999, 0.060000), (0.940000, 0.060000)),
((-0.850000, 0.750000), (-0.790000, 0.750000)),
((-0.400001, -0.200000), (-0.450001, -0.200000)),
((0.380000, 0.900000), (0.380000, 0.990000)),
((-0.360001, -0.040000), (-0.360001, 0.010000)),
((-0.850000, 0.400000), (-0.750000, 0.400000)),
((-0.890000, 0.550000), (-0.890000, 0.490000)),
((0.880000, 0.750000), (0.880000, 0.890000)),
((0.780000, -0.940000), (0.780000, -0.890000)),
((0.730000, -0.150000), (0.730000, -0.100000)),
((0.339999, -0.690000), (0.339999, -0.590000)),
((0.490000, 0.160000), (0.580000, 0.160000)),
((0.490000, -0.930000), (0.580000, -0.930000)),
((-0.360001, -0.530000), (-0.360001, -0.480000)),
((0.690000, 0.600000), (0.690000, 0.450000)),
((0.690000, -0.300000), (0.690000, -0.430000)),
((-0.110001, 0.200000), (-0.160001, 0.200000)),
((-0.550000, -0.150000), (-0.650001, -0.150000)),
((0.630000, -0.300000), (0.630000, -0.250000)),
((0.240000, 0.360000), (0.240000, 0.400000)),
((-0.940001, 0.050000), (-0.940001, -0.040000)),
((0.139999, -0.390000), (0.089999, -0.390000)),
((0.250000, 0.310000), (0.250000, 0.260000)),
((-0.200001, 0.840000), (-0.250000, 0.840000)),
((-0.550000, 0.790000), (-0.690001, 0.790000)),
((-0.210000, -0.740000), (-0.390000, -0.740000)),
((0.349999, -0.830000), (0.389999, -0.830000)),
((0.089999, -0.630000), (0.089999, -0.580000)),
((-0.360001, -0.300000), (-0.400001, -0.300000)),
((-0.890000, -0.100000), (-0.890000, -0.150000)),
((-0.840000, -0.480000), (-0.790000, -0.480000)),
((-0.650001, 0.110000), (-0.550000, 0.110000)),
((0.540000, 0.060000), (0.630000, 0.060000)),
((-0.400001, 0.700000), (-0.400001, 0.750000)),
((0.490000, 0.790000), (0.480000, 0.790000)),
((0.929999, -0.680000), (0.929999, -0.630000)),
((0.929999, -0.040000), (0.929999, 0.060000)),
((-0.490001, -0.880000), (-0.400001, -0.880000)),
((-0.100000, -0.440000), (-0.100000, -0.490000)),
((-0.590000, 0.050000), (-0.590000, -0.100000)),
((0.929999, 0.850000), (0.990000, 0.850000)),
((-0.490001, 0.950000), (-0.490001, 0.850000)),
((-0.940001, 0.250000), (-0.950001, 0.250000)),
((-0.850000, 0.360000), (-0.850000, 0.400000)),
((0.589999, 0.700000), (0.679999, 0.700000)),
((0.790000, -0.940000), (0.780000, -0.940000)),
((0.349999, -0.380000), (0.389999, -0.380000)),
((0.490000, 0.200000), (0.490000, 0.160000)),
((0.580000, -0.730000), (0.580000, -0.640000)),
((-0.360001, -0.480000), (-0.200001, -0.480000)),
((-0.590000, 0.650000), (-0.590000, 0.590000)),
((0.679999, 0.600000), (0.690000, 0.600000)),
((0.690000, -0.430000), (0.780000, -0.430000)),
((0.200000, 0.550000), (0.240000, 0.550000)),
((-0.940001, -0.100000), (-0.950001, -0.100000)),
((-0.200001, 0.310000), (-0.200001, 0.260000)),
((-0.450001, -0.340000), (-0.450001, -0.250000)),
((0.940000, -0.440000), (0.929999, -0.440000)),
((0.089999, -0.390000), (0.089999, -0.240000)),
((0.240000, 0.310000), (0.250000, 0.310000)),
((-0.650001, 0.800000), (-0.650001, 0.850000)),
((-0.210000, -0.880000), (-0.210000, -0.740000)),
((0.099999, -0.440000), (0.049999, -0.440000)),
((-0.310000, -0.300000), (-0.310000, -0.290000)),
((-0.800000, -0.100000), (-0.890000, -0.100000)),
((-0.800000, -0.540000), (-0.800000, -0.490000)),
((0.440000, 0.160000), (0.440000, 0.110000)),
((-0.400001, 0.750000), (-0.390000, 0.750000)),
((-0.490001, -0.300000), (-0.540000, -0.300000)),
((0.830000, -0.630000), (0.839999, -0.630000)),
((-0.490001, -0.830000), (-0.490001, -0.880000)),
((-0.550000, 0.050000), (-0.590000, 0.050000)),
((0.299999, -0.290000), (0.299999, -0.300000)),
((0.990000, 0.850000), (0.990000, 0.990000)),
((-0.350000, 0.010000), (-0.350000, -0.040000)),
((-0.950001, 0.250000), (-0.950001, 0.300000)),
((-0.940001, 0.360000), (-0.850000, 0.360000)),
((0.889999, 0.200000), (0.790000, 0.200000)),
((0.589999, 0.750000), (0.589999, 0.700000)),
((0.480000, -0.790000), (0.480000, -0.690000)),
((0.639999, 0.250000), (0.580000, 0.250000)),
((0.380000, 0.590000), (0.380000, 0.600000)),
((-0.200001, 0.590000), (-0.250000, 0.590000)),
((0.000000, -0.980000), (0.139999, -0.980000)),
((0.790000, 0.790000), (0.790000, 0.740000)),
((-0.940001, -0.050000), (-0.940001, -0.100000)),
((-0.100000, 0.150000), (-0.110001, 0.150000)),
((-0.450001, -0.250000), (-0.550000, -0.250000)),
((-0.360001, 0.250000), (-0.450001, 0.250000)),
((-0.600000, -0.690000), (-0.740001, -0.690000)),
((0.929999, -0.440000), (0.929999, -0.390000)),
((0.089999, -0.480000), (0.139999, -0.480000)),
((-0.250000, 0.740000), (-0.310000, 0.740000)),
((-0.450001, 0.640000), (-0.500000, 0.640000)),
((-0.010000, -0.440000), (-0.010000, -0.390000)),
((-0.940001, -0.480000), (-0.940001, -0.490000)),
((-0.310000, -0.290000), (-0.260000, -0.290000)),
((-0.590000, -0.930000), (-0.550000, -0.930000)),
((0.099999, -0.050000), (0.099999, -0.100000)),
((-0.550000, 0.160000), (-0.500000, 0.160000)),
((0.589999, 0.840000), (0.490000, 0.840000)),
((-0.490001, -0.290000), (-0.490001, -0.300000)),
((0.880000, -0.050000), (0.880000, -0.040000)),
((-0.850000, 0.700000), (-0.850000, 0.750000)),
((-0.640000, -0.100000), (-0.640000, -0.140000)),
((0.290000, 0.900000), (0.380000, 0.900000)),
((-0.350000, -0.040000), (-0.300000, -0.040000)),
((-0.950001, 0.300000), (-0.990001, 0.300000)),
((-0.550000, 0.590000), (-0.550000, 0.690000)),
((0.790000, 0.200000), (0.790000, 0.010000)),
((0.880000, 0.890000), (0.830000, 0.890000)),
((0.730000, -0.100000), (0.639999, -0.100000)),
((0.480000, -0.690000), (0.339999, -0.690000)),
((0.389999, -0.300000), (0.349999, -0.300000)),
((0.630000, 0.300000), (0.630000, 0.310000)),
((-0.200001, -0.530000), (-0.060000, -0.530000)),
((-0.200001, 0.600000), (-0.200001, 0.590000)),
((0.339999, 0.640000), (0.299999, 0.640000)),
((0.690000, -0.680000), (0.730000, -0.680000)),
((-0.150001, -0.630000), (-0.150001, -0.730000)),
((0.240000, 0.700000), (0.299999, 0.700000)),
((-0.900001, -0.050000), (-0.940001, -0.050000)),
((-0.110001, 0.150000), (-0.110001, 0.200000)),
((-0.360001, -0.630000), (-0.350000, -0.630000)),
((-0.550000, -0.250000), (-0.550000, -0.150000)),
((-0.360001, 0.160000), (-0.360001, 0.250000)),
((-0.640000, -0.830000), (-0.640000, -0.840000)),
((0.929999, -0.390000), (0.880000, -0.390000)),
((0.290000, 0.300000), (0.290000, 0.350000)),
((0.349999, 0.360000), (0.530000, 0.360000)),
((-0.500000, 0.640000), (-0.500000, 0.650000)),
((0.349999, -0.780000), (0.349999, -0.830000)),
((0.240000, -0.890000), (0.240000, -0.880000)),
((-0.260000, -0.340000), (-0.260000, -0.300000)),
((-0.400001, -0.980000), (-0.400001, -0.890000)),
((0.190000, -0.050000), (0.099999, -0.050000)),
((-0.550000, 0.110000), (-0.550000, 0.160000)),
((-0.550000, -0.440000), (-0.600000, -0.440000)),
((-0.840000, 0.690000), (-0.890000, 0.690000)),
((-0.050000, -0.440000), (-0.100000, -0.440000)),
((-0.440001, -0.190000), (-0.390000, -0.190000)),
((-0.490001, 0.850000), (-0.440001, 0.850000)),
((0.740000, 0.990000), (0.740000, 0.940000)),
((-0.300000, -0.040000), (-0.300000, -0.090000)),
((0.790000, 0.010000), (0.880000, 0.010000)),
((0.580000, 0.650000), (0.580000, 0.750000)),
((0.440000, -0.940000), (0.440000, -0.980000)),
((0.349999, -0.300000), (0.349999, -0.380000)),
((-0.600000, 0.650000), (-0.590000, 0.650000)),
((0.690000, 0.440000), (0.690000, 0.360000)),
((0.839999, -0.250000), (0.839999, -0.430000)),
((0.630000, 0.400000), (0.639999, 0.400000)),
((-0.150001, -0.730000), (-0.050000, -0.730000)),
((0.240000, 0.550000), (0.240000, 0.700000)),
((-0.590000, 0.210000), (-0.540000, 0.210000)),
((-0.210000, 0.310000), (-0.200001, 0.310000)),
((-0.350000, -0.630000), (-0.350000, -0.680000)),
((-0.500000, -0.340000), (-0.450001, -0.340000)),
((-0.740001, -0.830000), (-0.640000, -0.830000)),
((-0.300000, -0.540000), (-0.300000, -0.630000)),
((0.089999, 0.260000), (0.089999, 0.310000)),
((-0.400001, 0.110000), (-0.350000, 0.110000)),
((-0.700001, 0.800000), (-0.650001, 0.800000)),
((0.000000, -0.380000), (0.000000, -0.440000)),
((-0.360001, 0.050000), (-0.360001, 0.100000)),
((0.240000, -0.490000), (0.099999, -0.490000)),
((0.049999, -0.100000), (0.049999, -0.190000)),
((-0.440001, 0.540000), (-0.440001, 0.450000)),
((-0.550000, -0.630000), (-0.550000, -0.440000)),
((-0.390000, -0.190000), (-0.390000, -0.250000)),
((0.299999, -0.300000), (0.250000, -0.300000)),
((-0.210000, 0.800000), (-0.160001, 0.800000)),
((0.540000, 0.890000), (0.290000, 0.890000)),
((-0.700001, 0.850000), (-0.700001, 0.900000)),
((-0.360001, 0.010000), (-0.350000, 0.010000)),
((-0.990001, 0.160000), (-0.800000, 0.160000)),
((0.389999, 0.650000), (0.580000, 0.650000)),
((0.589999, -0.940000), (0.440000, -0.940000)),
((0.530000, -0.200000), (0.480000, -0.200000)),
((-0.650001, 0.540000), (-0.650001, 0.640000)),
((0.790000, -0.100000), (0.790000, -0.190000)),
((0.740000, 0.440000), (0.690000, 0.440000)),
((-0.250000, 0.690000), (-0.250000, 0.600000)),
((0.889999, -0.250000), (0.839999, -0.250000)),
((0.630000, 0.360000), (0.630000, 0.400000)),
((-0.160001, -0.680000), (-0.160001, -0.630000)),
((0.839999, 0.790000), (0.790000, 0.790000)),
((-0.990001, -0.140000), (-0.900001, -0.140000)),
((-0.700001, 0.390000), (-0.700001, 0.400000)),
((-0.350000, -0.680000), (-0.300000, -0.680000)),
((-0.450001, 0.110000), (-0.450001, 0.160000)),
((-0.740001, -0.690000), (-0.740001, -0.830000)),
((0.639999, -0.300000), (0.630000, -0.300000)),
((0.240000, 0.400000), (0.250000, 0.400000)),
((0.200000, 0.350000), (0.200000, 0.300000)),
((-0.250000, 0.840000), (-0.250000, 0.740000)),
((-0.350000, 0.110000), (-0.350000, 0.060000)),
((-0.440001, 0.650000), (-0.440001, 0.550000)),
((-0.850000, -0.250000), (-0.850000, -0.240000)),
((0.000000, -0.440000), (-0.010000, -0.440000)),
((-0.390000, -0.140000), (-0.310000, -0.140000)),
((0.000000, -0.630000), (0.089999, -0.630000)),
((0.639999, -0.530000), (0.639999, -0.640000)),
((0.099999, -0.490000), (0.099999, -0.530000)),
((0.679999, 0.540000), (0.630000, 0.540000)),
((-0.650001, -0.590000), (-0.650001, -0.580000)),
((-0.700001, -0.090000), (-0.700001, -0.040000)),
((0.730000, 0.200000), (0.730000, 0.260000)),
((-0.440001, 0.450000), (-0.310000, 0.450000)),
((0.490000, 0.840000), (0.490000, 0.790000)),
((-0.690001, -0.630000), (-0.550000, -0.630000)),
((0.880000, -0.040000), (0.929999, -0.040000)),
((-0.200001, -0.430000), (-0.050000, -0.430000)),
((-0.390000, -0.250000), (-0.400001, -0.250000)),
((-0.210000, 0.700000), (-0.210000, 0.800000)),
((0.290000, 0.890000), (0.290000, 0.900000)),
((0.389999, 0.750000), (0.389999, 0.650000)),
((0.730000, -0.980000), (0.730000, -0.940000)),
((0.480000, -0.200000), (0.480000, -0.190000)),
((0.440000, -0.730000), (0.440000, -0.790000)),
((0.290000, -0.350000), (0.290000, -0.340000)),
((0.630000, 0.310000), (0.730000, 0.310000)),
((0.530000, -0.730000), (0.580000, -0.730000)),
((-0.200001, -0.480000), (-0.200001, -0.530000)),
((-0.650001, 0.640000), (-0.740001, 0.640000)),
((0.299999, 0.640000), (0.299999, 0.500000)),
((0.889999, -0.240000), (0.889999, -0.250000)),
((0.790000, 0.940000), (0.790000, 0.850000)),
((-0.700001, 0.400000), (-0.650001, 0.400000)),
((-0.640000, -0.840000), (-0.740001, -0.840000)),
((-0.840000, 0.100000), (-0.840000, 0.050000)),
((0.290000, 0.350000), (0.200000, 0.350000)),
((-0.110001, 0.800000), (-0.060000, 0.800000)),
((-0.350000, 0.060000), (-0.300000, 0.060000)),
((-0.690001, 0.740000), (-0.700001, 0.740000)),
((-0.310000, -0.140000), (-0.310000, -0.050000)),
((0.639999, -0.640000), (0.630000, -0.640000)),
((0.000000, -0.490000), (0.000000, -0.630000)),
((0.380000, -0.890000), (0.240000, -0.890000)),
((0.630000, 0.540000), (0.630000, 0.590000)),
((-0.400001, -0.890000), (-0.500000, -0.890000)),
((0.440000, 0.110000), (0.540000, 0.110000)),
((-0.390000, 0.750000), (-0.390000, 0.540000)),
((-0.840000, 0.740000), (-0.840000, 0.690000)),
((-0.050000, -0.430000), (-0.050000, -0.440000)),
((0.990000, 0.990000), (0.740000, 0.990000)),
((0.690000, 0.100000), (0.690000, 0.000000)),
((0.990000, 0.110000), (0.990000, 0.590000)),
((0.580000, 0.750000), (0.589999, 0.750000)),
((0.780000, -0.890000), (0.589999, -0.890000)),
((-0.450001, 0.050000), (-0.450001, 0.100000)),
((0.429999, -0.730000), (0.440000, -0.730000)),
((0.290000, -0.340000), (0.339999, -0.340000)),
((0.530000, -0.740000), (0.530000, -0.730000)),
((-0.740001, 0.640000), (-0.740001, 0.540000)),
((0.940000, -0.090000), (0.940000, -0.100000)),
((0.690000, 0.450000), (0.740000, 0.450000)),
((0.839999, -0.430000), (0.889999, -0.430000)),
((0.580000, 0.310000), (0.580000, 0.360000)),
((0.250000, 0.540000), (0.190000, 0.540000)),
((0.790000, 0.850000), (0.839999, 0.850000)),
((-0.650001, 0.250000), (-0.650001, 0.390000)),
((-0.300000, -0.690000), (-0.400001, -0.690000)),
((-0.300000, -0.630000), (-0.210000, -0.630000)),
((-0.790000, -0.140000), (-0.740001, -0.140000)),
((-0.950001, 0.450000), (-0.800000, 0.450000)),
((0.089999, -0.150000), (0.089999, -0.140000)),
((0.240000, 0.250000), (0.150000, 0.250000)),
((0.099999, 0.300000), (0.099999, 0.160000)),
((-0.700001, 0.740000), (-0.700001, 0.800000)),
((-0.390000, -0.740000), (-0.390000, -0.790000)),
((-0.800000, -0.240000), (-0.800000, -0.200000)),
((-0.310000, 0.050000), (-0.360001, 0.050000)),
((0.049999, -0.490000), (0.000000, -0.490000)),
((-0.360001, -0.290000), (-0.360001, -0.150000)),
((-0.590000, -0.480000), (-0.590000, -0.590000)),
((-0.490001, -0.940000), (-0.490001, -0.980000)),
((0.830000, 0.800000), (0.830000, 0.840000)),
((-0.640000, -0.540000), (-0.690001, -0.540000)),
((-0.590000, -0.100000), (-0.640000, -0.100000)),
((-0.260000, 0.550000), (-0.260000, 0.700000)),
((-0.700001, 0.900000), (-0.600000, 0.900000)),
((-0.990001, 0.300000), (-0.990001, 0.160000)),
((0.990000, 0.590000), (0.929999, 0.590000)),
((0.630000, -0.940000), (0.630000, -0.930000)),
((0.530000, -0.190000), (0.530000, -0.140000)),
((0.339999, -0.340000), (0.339999, -0.290000)),
((0.730000, 0.350000), (0.679999, 0.350000)),
((0.639999, -0.740000), (0.530000, -0.740000)),
((-0.050000, -0.490000), (-0.050000, -0.540000)),
((-0.640000, 0.540000), (-0.650001, 0.540000)),
((0.940000, -0.100000), (0.790000, -0.100000)),
((0.480000, 0.500000), (0.480000, 0.590000)),
((-0.250000, 0.600000), (-0.200001, 0.600000)),
((0.250000, 0.690000), (0.250000, 0.540000)),
((0.839999, 0.850000), (0.839999, 0.790000)),
((-0.900001, -0.140000), (-0.900001, -0.050000)),
((-0.450001, 0.160000), (-0.360001, 0.160000)),
((-0.740001, -0.140000), (-0.740001, -0.290000)),
((-0.950001, 0.440000), (-0.950001, 0.450000)),
((0.240000, 0.010000), (0.240000, 0.250000)),
((-0.500000, 0.650000), (-0.440001, 0.650000)),
((-0.850000, -0.240000), (-0.800000, -0.240000)),
((0.089999, -0.090000), (0.089999, -0.040000)),
((0.200000, -0.780000), (0.200000, -0.830000)),
((0.630000, -0.590000), (0.580000, -0.590000)),
((0.490000, 0.590000), (0.490000, 0.490000)),
((-0.260000, -0.300000), (-0.310000, -0.300000)),
((-0.590000, -0.590000), (-0.650001, -0.590000)),
((-0.500000, -0.840000), (-0.590000, -0.840000)),
((-0.700001, -0.040000), (-0.650001, -0.040000)),
((0.780000, 0.800000), (0.830000, 0.800000)),
((-0.110001, 0.990000), (-0.350000, 0.990000)),
((-0.790000, 0.750000), (-0.790000, 0.740000)),
((-0.060000, -0.300000), (-0.060000, -0.250000)),
((-0.350000, 0.550000), (-0.260000, 0.550000)),
((-0.900001, 0.540000), (-0.950001, 0.540000)),
((0.880000, 0.010000), (0.880000, 0.110000)),
((0.830000, 0.750000), (0.880000, 0.750000)),
((0.730000, -0.940000), (0.630000, -0.940000)),
((-0.440001, 0.150000), (-0.440001, 0.050000)),
((0.389999, -0.780000), (0.429999, -0.780000)),
((0.339999, -0.350000), (0.290000, -0.350000)),
((0.730000, 0.310000), (0.730000, 0.350000)),
((-0.050000, -0.540000), (-0.210000, -0.540000)),
((-0.800000, 0.540000), (-0.800000, 0.550000)),
((0.839999, -0.040000), (0.839999, -0.090000)),
((0.299999, 0.500000), (0.480000, 0.500000)),
((0.679999, -0.290000), (0.830000, -0.290000)),
((-0.450001, 0.390000), (-0.450001, 0.400000)),
((0.049999, -0.830000), (0.049999, -0.890000)),
((0.940000, 0.940000), (0.790000, 0.940000)),
((-0.840000, 0.990000), (-0.840000, 0.940000)),
((0.089999, 0.310000), (0.190000, 0.310000)),
((-0.790000, 0.100000), (-0.840000, 0.100000)),
((0.099999, 0.010000), (0.240000, 0.010000)),
((-0.210000, 0.010000), (-0.210000, 0.200000)),
((0.040000, -0.090000), (0.089999, -0.090000)),
((0.530000, 0.500000), (0.580000, 0.500000)),
((-0.550000, 0.400000), (-0.490001, 0.400000)),
((-0.500000, -0.890000), (-0.500000, -0.840000)),
((0.099999, -0.100000), (0.049999, -0.100000)),
((0.780000, 0.840000), (0.780000, 0.890000)),
((-0.350000, 0.990000), (-0.350000, 0.900000)),
((-0.950001, -0.580000), (-0.940001, -0.580000)),
((0.880000, 0.590000), (0.830000, 0.590000)),
((-0.790000, 0.740000), (-0.840000, 0.740000)),
((-0.060000, -0.250000), (-0.100000, -0.250000)),
((-0.350000, 0.690000), (-0.350000, 0.550000)),
((-0.600000, 0.950000), (-0.490001, 0.950000)),
((-0.210000, -0.050000), (-0.210000, -0.040000)),
((0.690000, 0.000000), (0.630000, 0.000000)),
((0.830000, 0.700000), (0.830000, 0.750000)),
((0.589999, -0.890000), (0.589999, -0.940000)),
((-0.440001, 0.050000), (-0.450001, 0.050000)),
((0.389999, -0.290000), (0.389999, -0.300000)),
((-0.590000, 0.590000), (-0.640000, 0.590000)),
((0.740000, 0.450000), (0.740000, 0.440000)),
((-0.200001, 0.690000), (-0.250000, 0.690000)),
((0.679999, -0.340000), (0.679999, -0.290000)),
((0.580000, 0.360000), (0.630000, 0.360000)),
((-0.160001, -0.630000), (-0.150001, -0.630000)),
((0.299999, 0.700000), (0.299999, 0.690000)),
((-0.890000, 0.150000), (-0.890000, 0.100000)),
((-0.300000, -0.680000), (-0.300000, -0.690000)),
((0.190000, 0.310000), (0.190000, 0.360000)),
((-0.900001, 0.390000), (-0.900001, 0.440000)),
((0.089999, -0.140000), (0.139999, -0.140000)),
((0.339999, 0.260000), (0.339999, 0.400000)),
((-0.210000, 0.200000), (-0.260000, 0.200000)),
((-0.390000, -0.790000), (-0.440001, -0.790000)),
((0.530000, 0.450000), (0.530000, 0.500000)),
((-0.550000, 0.260000), (-0.550000, 0.400000)),
((-0.850000, -0.340000), (-0.840000, -0.340000)),
((-0.440001, -0.940000), (-0.490001, -0.940000)),
((0.200000, -0.050000), (0.200000, -0.140000)),
((-0.650001, 0.050000), (-0.690001, 0.050000)),
((0.740000, 0.750000), (0.780000, 0.750000)),
((-0.940001, -0.580000), (-0.940001, -0.680000)),
((-0.690001, -0.540000), (-0.690001, -0.630000)),
((0.880000, 0.540000), (0.880000, 0.590000)),
((0.940000, 0.690000), (0.880000, 0.690000)),
((-0.100000, -0.250000), (-0.100000, -0.300000)),
((0.250000, -0.290000), (0.299999, -0.290000)),
((-0.260000, 0.700000), (-0.210000, 0.700000)),
((-0.600000, 0.900000), (-0.600000, 0.950000)),
((-0.750000, 0.400000), (-0.750000, 0.450000)),
((0.929999, -0.930000), (0.929999, -0.880000)),
((0.480000, -0.190000), (0.530000, -0.190000)),
((-0.350000, 0.250000), (-0.350000, 0.150000)),
((0.240000, -0.430000), (0.339999, -0.430000)),
((0.730000, -0.790000), (0.730000, -0.690000)),
((-0.210000, -0.490000), (-0.350000, -0.490000)),
((0.240000, 0.890000), (0.139999, 0.890000)),
((-0.640000, 0.590000), (-0.640000, 0.540000)),
((0.780000, -0.340000), (0.790000, -0.340000)),
((0.530000, 0.210000), (0.530000, 0.300000)),
((-0.700001, 0.150000), (-0.890000, 0.150000)),
((-0.590000, 0.200000), (-0.590000, 0.150000)),
((-0.500000, 0.940000), (-0.590000, 0.940000)),
((0.589999, 0.640000), (0.380000, 0.640000)),
((-0.540000, -0.690000), (-0.550000, -0.690000)),
((-0.840000, -0.790000), (-0.840000, -0.890000)),
((0.190000, 0.360000), (0.240000, 0.360000)),
((-0.890000, 0.390000), (-0.900001, 0.390000)),
((0.139999, -0.240000), (0.139999, -0.150000)),
((-0.300000, 0.060000), (-0.300000, 0.010000)),
((-0.890000, -0.240000), (-0.890000, -0.290000)),
((-0.010000, -0.290000), (0.040000, -0.290000)),
((0.630000, -0.640000), (0.630000, -0.590000)),
((0.630000, 0.590000), (0.490000, 0.590000)),
((0.200000, -0.140000), (0.250000, -0.140000)),
((-0.650001, -0.040000), (-0.650001, 0.050000)),
((-0.390000, 0.540000), (-0.440001, 0.540000)),
((0.589999, 0.890000), (0.589999, 0.840000)),
((-0.300000, 0.900000), (-0.300000, 0.790000)),
((0.929999, 0.540000), (0.880000, 0.540000)),
((0.880000, 0.690000), (0.880000, 0.700000)),
((-0.050000, -0.300000), (-0.060000, -0.300000)),
((0.250000, -0.250000), (0.250000, -0.290000)),
((0.630000, 0.010000), (0.679999, 0.010000)),
((0.630000, -0.140000), (0.630000, -0.090000)),
((0.429999, -0.780000), (0.429999, -0.730000)),
((0.339999, -0.430000), (0.339999, -0.350000)),
((0.780000, -0.790000), (0.730000, -0.790000)),
((-0.740001, 0.540000), (-0.800000, 0.540000)),
((0.790000, -0.340000), (0.790000, -0.440000)),
((-0.450001, 0.400000), (-0.260000, 0.400000)),
((0.490000, -0.100000), (0.490000, -0.150000)),
((-0.900001, 0.100000), (-0.900001, 0.150000)),
((-0.650001, 0.390000), (-0.700001, 0.390000)),
((-0.590000, 0.940000), (-0.590000, 0.900000)),
((0.589999, 0.690000), (0.589999, 0.640000)),
((-0.840000, 0.940000), (-0.850000, 0.940000)),
((-0.300000, 0.440000), (-0.590000, 0.440000)),
((-0.750000, -0.790000), (-0.840000, -0.790000)),
((-0.890000, 0.440000), (-0.890000, 0.390000)),
((0.139999, -0.090000), (0.150000, -0.090000)),
((0.200000, 0.300000), (0.099999, 0.300000)),
((-0.350000, -0.930000), (-0.310000, -0.930000)),
((-0.940001, -0.240000), (-0.890000, -0.240000)),
((0.380000, -0.440000), (0.250000, -0.440000)),
((0.240000, -0.880000), (0.339999, -0.880000)),
((0.139999, 0.440000), (0.139999, 0.490000)),
((-0.640000, 0.490000), (-0.640000, 0.260000)),
((0.299999, 0.000000), (0.299999, -0.050000)),
((-0.210000, 0.640000), (-0.210000, 0.650000)),
((0.780000, 0.890000), (0.589999, 0.890000)),
((-0.890000, -0.680000), (-0.890000, -0.840000)),
((-0.700001, -0.580000), (-0.700001, -0.530000)),
((0.880000, 0.700000), (0.929999, 0.700000)),
((-0.640000, -0.780000), (-0.590000, -0.780000)),
((-0.200001, -0.300000), (-0.200001, -0.430000)),
((-0.850000, 0.600000), (-0.850000, 0.650000)),
((-0.300000, 0.590000), (-0.310000, 0.590000)),
((-0.210000, -0.040000), (-0.160001, -0.040000)),
((-0.650001, 0.450000), (-0.650001, 0.500000)),
((0.630000, 0.000000), (0.630000, 0.010000)),
((0.990000, 0.600000), (0.990000, 0.640000)),
((-0.950001, 0.890000), (-0.950001, 0.900000)),
((0.339999, -0.290000), (0.389999, -0.290000)),
((0.639999, -0.690000), (0.639999, -0.740000)),
((0.790000, -0.040000), (0.790000, -0.090000)),
((0.790000, -0.440000), (0.690000, -0.440000)),
((0.299999, 0.690000), (0.250000, 0.690000)),
((-0.750000, 0.110000), (-0.700001, 0.110000)),
((0.679999, 0.690000), (0.589999, 0.690000)),
((-0.300000, 0.490000), (-0.300000, 0.440000)),
((-0.750000, -0.940000), (-0.750000, -0.790000)),
((-0.250000, -0.540000), (-0.300000, -0.540000)),
((-0.900001, 0.440000), (-0.950001, 0.440000)),
((0.139999, -0.140000), (0.139999, -0.090000)),
((-0.100000, 0.790000), (-0.100000, 0.690000)),
((-0.310000, -0.930000), (-0.310000, -0.880000)),
((-0.840000, -0.290000), (-0.840000, -0.300000)),
((-0.360001, 0.100000), (-0.390000, 0.100000)),
((0.150000, -0.780000), (0.200000, -0.780000)),
((0.339999, -0.880000), (0.339999, -0.790000)),
((0.139999, 0.490000), (0.089999, 0.490000)),
((0.839999, -0.480000), (0.839999, -0.580000)),
((-0.210000, 0.650000), (-0.160001, 0.650000)),
((0.780000, 0.750000), (0.780000, 0.800000)),
((-0.890000, -0.840000), (-0.990001, -0.840000)),
((-0.700001, -0.530000), (-0.640000, -0.530000)),
((-0.640000, -0.730000), (-0.640000, -0.780000)),
((-0.010000, -0.200000), (-0.050000, -0.200000)),
((0.389999, -0.240000), (0.389999, -0.250000)),
((-0.300000, 0.700000), (-0.300000, 0.590000)),
((-0.160001, -0.090000), (-0.160001, -0.050000)),
((-0.750000, 0.450000), (-0.650001, 0.450000)),
((0.839999, -0.930000), (0.929999, -0.930000)),
((0.880000, 0.110000), (0.990000, 0.110000)),
((-0.950001, 0.900000), (-0.900001, 0.900000)),
((0.299999, -0.590000), (0.299999, -0.730000)),
((-0.210000, -0.540000), (-0.210000, -0.490000)),
((0.839999, -0.090000), (0.940000, -0.090000)),
((-0.260000, 0.450000), (-0.160001, 0.450000)),
((0.339999, -0.150000), (0.339999, -0.140000)),
((0.530000, 0.300000), (0.480000, 0.300000)),
((-0.050000, -0.830000), (0.049999, -0.830000)),
((-0.990001, 0.150000), (-0.990001, -0.140000)),
((-0.540000, 0.900000), (-0.540000, 0.890000)),
((-0.540000, -0.640000), (-0.540000, -0.690000)),
((-0.200001, 0.490000), (-0.300000, 0.490000)),
((-0.840000, -0.890000), (-0.890000, -0.890000)),
((-0.010000, -0.880000), (0.040000, -0.880000)),
((-0.250000, -0.530000), (-0.250000, -0.540000)),
((0.139999, 0.890000), (0.139999, 0.940000)),
((-0.790000, -0.040000), (-0.790000, -0.140000)),
((0.089999, -0.240000), (0.139999, -0.240000)),
((-0.100000, 0.690000), (-0.110001, 0.690000)),
((-0.800000, -0.200000), (-0.940001, -0.200000)),
((0.040000, -0.290000), (0.040000, -0.090000)),
((-0.390000, 0.100000), (-0.390000, 0.000000)),
((0.429999, 0.440000), (0.429999, 0.450000)),
((0.150000, -0.690000), (0.150000, -0.780000)),
((0.089999, 0.490000), (0.089999, 0.550000)),
((-0.210000, -0.380000), (-0.210000, -0.200000)),
((-0.700001, 0.060000), (-0.650001, 0.060000)),
((-0.160001, 0.550000), (-0.160001, 0.640000)),
((-0.350000, 0.900000), (-0.300000, 0.900000)),
((-0.990001, -0.840000), (-0.990001, -0.980000)),
((0.790000, 0.500000), (0.790000, 0.400000)),
((0.929999, 0.740000), (0.839999, 0.740000)),
((-0.050000, -0.200000), (-0.050000, -0.300000)),
((0.150000, 0.900000), (0.240000, 0.900000)),
((-0.160001, 0.060000), (-0.100000, 0.060000)),
((-0.600000, 0.500000), (-0.600000, 0.550000)),
((0.889999, -0.740000), (0.889999, -0.890000)),
((-0.900001, 0.800000), (-0.900001, 0.890000)),
((0.630000, -0.090000), (0.740000, -0.090000)),
((-0.260000, 0.110000), (-0.260000, 0.150000)),
((0.299999, -0.730000), (0.389999, -0.730000)),
((-0.400001, -0.530000), (-0.360001, -0.530000)),
((-0.260000, 0.400000), (-0.260000, 0.450000)),
((0.490000, -0.150000), (0.339999, -0.150000)),
((0.290000, 0.200000), (0.290000, 0.210000)),
((-0.050000, -0.730000), (-0.050000, -0.830000)),
((-0.900001, 0.150000), (-0.990001, 0.150000)),
((0.630000, 0.650000), (0.679999, 0.650000)),
((0.040000, -0.880000), (0.040000, -0.840000)),
((-0.690001, 0.490000), (-0.790000, 0.490000)),
((-0.110001, 0.690000), (-0.110001, 0.740000)),
((-0.160001, 0.360000), (-0.160001, 0.400000)),
((-0.260000, -0.880000), (-0.260000, -0.830000)),
((-0.890000, -0.300000), (-0.890000, -0.630000)),
((0.429999, 0.450000), (0.530000, 0.450000)),
((0.200000, -0.690000), (0.150000, -0.690000)),
((0.380000, -0.490000), (0.380000, -0.440000)),
((0.240000, -0.790000), (0.240000, -0.740000)),
((0.150000, 0.440000), (0.139999, 0.440000)),
((-0.640000, 0.260000), (-0.550000, 0.260000)),
((-0.450001, -0.740000), (-0.450001, -0.730000)),
((0.730000, -0.490000), (0.730000, -0.480000)),
((-0.700001, 0.000000), (-0.700001, 0.060000)),
((-0.310000, 0.790000), (-0.310000, 0.840000)),
((-0.940001, -0.680000), (-0.890000, -0.680000)),
((0.730000, 0.500000), (0.790000, 0.500000)),
((-0.700001, -0.790000), (-0.700001, -0.730000)),
((-0.100000, -0.300000), (-0.200001, -0.300000)),
((-0.850000, 0.650000), (-0.800000, 0.650000)),
((0.240000, 0.900000), (0.240000, 0.950000)),
((-0.160001, -0.040000), (-0.160001, 0.060000)),
((0.889999, -0.890000), (0.839999, -0.890000)),
((0.639999, 0.050000), (0.540000, 0.050000)),
((-0.350000, 0.150000), (-0.440001, 0.150000)),
((-0.700001, 0.300000), (-0.800000, 0.300000)),
((0.690000, -0.440000), (0.690000, -0.530000)),
((0.830000, -0.290000), (0.830000, -0.240000)),
((0.380000, -0.140000), (0.380000, -0.100000)),
((0.480000, 0.310000), (0.580000, 0.310000)),
((-0.700001, 0.110000), (-0.700001, 0.150000)),
((-0.590000, 0.150000), (-0.600000, 0.150000)),
((-0.400001, -0.690000), (-0.400001, -0.640000)),
((-0.740001, -0.930000), (-0.640000, -0.930000)),
((0.040000, -0.840000), (-0.050000, -0.840000)),
((-0.310000, -0.580000), (-0.310000, -0.530000)),
((-0.690001, 0.600000), (-0.690001, 0.490000)),
((0.040000, 0.790000), (-0.100000, 0.790000)),
((-0.300000, 0.010000), (-0.210000, 0.010000)),
((-0.840000, -0.300000), (-0.890000, -0.300000)),
((-0.160001, -0.390000), (-0.160001, -0.340000)),
((-0.490001, 0.000000), (-0.490001, -0.040000)),
((0.480000, 0.400000), (0.480000, 0.440000)),
((0.240000, -0.740000), (0.190000, -0.740000)),
((0.429999, 0.150000), (0.389999, 0.150000)),
((0.139999, 0.550000), (0.139999, 0.600000)),
((-0.490001, -0.980000), (-0.400001, -0.980000)),
((0.730000, -0.480000), (0.839999, -0.480000)),
((-0.300000, -0.200000), (-0.300000, -0.250000)),
((0.190000, -0.150000), (0.190000, -0.050000)),
((-0.640000, -0.530000), (-0.640000, -0.540000)),
((-0.640000, 0.100000), (-0.640000, -0.050000)),
((0.929999, 0.400000), (0.929999, 0.540000)),
((0.389999, -0.250000), (0.250000, -0.250000)),
((0.240000, 0.950000), (0.349999, 0.950000)),
((-0.300000, -0.090000), (-0.160001, -0.090000)),
((0.839999, -0.890000), (0.839999, -0.930000)),
((0.639999, 0.100000), (0.639999, 0.050000)),
((-0.950001, 0.740000), (-0.950001, 0.800000)),
((0.530000, -0.140000), (0.630000, -0.140000)),
((-0.700001, 0.210000), (-0.700001, 0.300000)),
((0.000000, 0.850000), (0.089999, 0.850000)),
((0.480000, 0.300000), (0.480000, 0.310000)),
((-0.600000, 0.150000), (-0.600000, 0.250000)),
((-0.590000, 0.900000), (-0.540000, 0.900000)),
((0.929999, 0.300000), (0.839999, 0.300000)),
((-0.400001, -0.640000), (-0.540000, -0.640000)),
((-0.050000, 0.600000), (-0.050000, 0.540000)),
((-0.740001, -0.840000), (-0.740001, -0.930000)),
((0.690000, -0.240000), (0.790000, -0.240000)),
((0.139999, 0.940000), (0.049999, 0.940000)),
((-0.800000, -0.040000), (-0.790000, -0.040000)),
((-0.650001, 0.210000), (-0.640000, 0.210000)),
((-0.200001, 0.740000), (-0.200001, 0.690000)),
((-0.210000, 0.850000), (-0.210000, 0.900000)),
((-0.940001, -0.200000), (-0.940001, -0.240000)),
((-0.160001, -0.340000), (-0.010000, -0.340000)),
((-0.490001, -0.040000), (-0.440001, -0.040000)),
((0.580000, -0.590000), (0.580000, -0.490000)),
((0.190000, -0.740000), (0.190000, -0.730000)),
((0.429999, 0.100000), (0.429999, 0.150000)),
((0.490000, 0.490000), (0.150000, 0.490000)),
((-0.590000, 0.490000), (-0.640000, 0.490000)),
((-0.210000, -0.200000), (-0.300000, -0.200000)),
((0.299999, -0.050000), (0.200000, -0.050000)),
((-0.690001, 0.050000), (-0.690001, 0.000000)),
((0.929999, 0.700000), (0.929999, 0.740000)),
((-0.990001, 0.990000), (-0.900001, 0.990000)),
((-0.800000, 0.700000), (-0.750000, 0.700000)),
((0.940000, -0.740000), (0.889999, -0.740000)),
((0.929999, 0.600000), (0.990000, 0.600000)),
((-0.950001, 0.800000), (-0.900001, 0.800000)),
((0.389999, -0.730000), (0.389999, -0.780000)),
((0.490000, -0.240000), (0.490000, -0.250000)),
((0.730000, -0.690000), (0.639999, -0.690000)),
((-0.400001, -0.540000), (-0.400001, -0.530000)),
((0.790000, -0.090000), (0.830000, -0.090000)),
((0.000000, 0.900000), (0.000000, 0.850000)),
((-0.100000, 0.490000), (-0.150001, 0.490000)),
((0.339999, 0.200000), (0.290000, 0.200000)),
((-0.600000, 0.250000), (-0.650001, 0.250000)),
((-0.590000, 0.890000), (-0.590000, 0.840000)),
((0.839999, 0.300000), (0.839999, 0.250000)),
((0.679999, 0.650000), (0.679999, 0.690000)),
((-0.060000, 0.600000), (-0.050000, 0.600000)),
((0.690000, -0.200000), (0.690000, -0.240000)),
((0.240000, 0.840000), (0.240000, 0.890000)),
((-0.890000, -0.040000), (-0.890000, -0.090000)),
((-0.640000, 0.210000), (-0.640000, 0.150000)),
((0.150000, 0.250000), (0.150000, 0.200000)),
((-0.010000, 0.450000), (0.040000, 0.450000)),
((-0.310000, -0.880000), (-0.260000, -0.880000)),
((-0.440001, -0.040000), (-0.440001, -0.090000)),
((-0.540000, -0.630000), (-0.500000, -0.630000)),
((0.190000, -0.580000), (0.190000, -0.530000)),
((0.580000, -0.490000), (0.380000, -0.490000)),
((0.339999, -0.790000), (0.240000, -0.790000)),
((0.150000, 0.490000), (0.150000, 0.440000)),
((-0.400001, -0.740000), (-0.450001, -0.740000)),
((-0.650001, -0.580000), (-0.600000, -0.580000)),
((-0.310000, -0.250000), (-0.310000, -0.190000)),
((0.630000, 0.160000), (0.639999, 0.160000)),
((-0.310000, 0.450000), (-0.310000, 0.500000)),
((-0.310000, 0.840000), (-0.360001, 0.840000)),
((0.830000, 0.690000), (0.790000, 0.690000)),
((-0.700001, -0.730000), (-0.640000, -0.730000)),
((0.349999, -0.240000), (0.389999, -0.240000)),
((-0.800000, 0.650000), (-0.800000, 0.700000)),
((0.679999, 0.010000), (0.679999, 0.100000)),
((0.929999, 0.590000), (0.929999, 0.600000)),
((-0.900001, 0.900000), (-0.900001, 0.990000)),
((0.440000, -0.980000), (0.730000, -0.980000)),
((0.049999, -0.050000), (0.000000, -0.050000)),
((0.830000, -0.090000), (0.830000, -0.040000)),
((0.830000, -0.240000), (0.889999, -0.240000)),
((-0.100000, 0.500000), (-0.100000, 0.490000)),
((0.540000, -0.530000), (0.540000, -0.630000)),
((-0.200001, 0.540000), (-0.200001, 0.490000)),
((-0.260000, -0.940000), (-0.260000, -0.930000)),
((-0.310000, -0.530000), (-0.250000, -0.530000)),
((0.049999, 0.890000), (0.040000, 0.890000)),
((-0.890000, -0.090000), (-0.800000, -0.090000)),
((-0.640000, 0.150000), (-0.690001, 0.150000)),
((0.040000, 0.450000), (0.040000, 0.790000)),
((-0.250000, 0.940000), (-0.250000, 0.850000)),
((-0.250000, -0.830000), (-0.250000, -0.880000)),
((-0.390000, 0.000000), (-0.490001, 0.000000)),
((0.089999, -0.580000), (0.190000, -0.580000)),
((0.530000, 0.010000), (0.530000, 0.100000)),
((0.580000, 0.500000), (0.580000, 0.540000)),
((0.089999, 0.550000), (0.139999, 0.550000)),
((-0.950001, -0.290000), (-0.940001, -0.290000)),
((-0.540000, -0.740000), (-0.540000, -0.780000)),
((-0.600000, -0.580000), (-0.600000, -0.490000)),
((0.639999, 0.160000), (0.639999, 0.110000)),
((-0.160001, 0.640000), (-0.210000, 0.640000)),
((-0.640000, -0.050000), (-0.690001, -0.050000)),
((0.990000, -0.150000), (0.830000, -0.150000)),
((-0.800000, -0.980000), (-0.800000, -0.940000)),
((-0.500000, -0.100000), (-0.550000, -0.100000)),
((-0.310000, 0.590000), (-0.310000, 0.690000)),
((0.349999, 0.950000), (0.349999, 0.940000)),
((-0.940001, 0.310000), (-0.940001, 0.250000)),
((-0.650001, 0.500000), (-0.600000, 0.500000)),
((-0.260000, 0.150000), (-0.310000, 0.150000)),
((0.679999, 0.300000), (0.630000, 0.300000)),
((-0.350000, -0.490000), (-0.350000, -0.540000)),
((0.049999, 0.060000), (0.049999, -0.050000)),
((0.830000, -0.040000), (0.839999, -0.040000)),
((-0.010000, 0.840000), (-0.010000, 0.900000)),
((0.990000, -0.830000), (0.990000, -0.640000)),
((0.000000, -0.690000), (0.000000, -0.780000)),
((0.830000, 0.250000), (0.830000, 0.350000)),
((0.780000, 0.550000), (0.780000, 0.640000)),
((0.730000, -0.590000), (0.730000, -0.580000)),
((-0.260000, -0.930000), (-0.060000, -0.930000)),
((0.049999, 0.940000), (0.049999, 0.890000)),
((-0.800000, -0.090000), (-0.800000, -0.040000)),
((0.139999, -0.150000), (0.089999, -0.150000)),
((0.139999, 0.200000), (0.139999, 0.260000)),
((-0.110001, 0.740000), (-0.200001, 0.740000)),
((-0.250000, 0.850000), (-0.210000, 0.850000)),
((-0.260000, 0.360000), (-0.160001, 0.360000)),
((-0.010000, -0.340000), (-0.010000, -0.290000)),
((0.190000, 0.400000), (0.190000, 0.450000)),
((0.200000, -0.530000), (0.200000, -0.690000)),
((-0.260000, -0.240000), (-0.250000, -0.240000)),
((-0.540000, -0.780000), (-0.400001, -0.780000)),
((-0.600000, -0.490000), (-0.740001, -0.490000)),
((-0.690001, 0.000000), (-0.700001, 0.000000)),
((0.639999, 0.110000), (0.780000, 0.110000)),
((-0.300000, 0.790000), (-0.310000, 0.790000)),
((-0.490001, 0.600000), (-0.490001, 0.490000)),
((-0.800000, -0.940000), (-0.850000, -0.940000)),
((-0.500000, -0.150000), (-0.500000, -0.100000)),
((0.490000, -0.780000), (0.679999, -0.780000)),
((-0.790000, -0.640000), (-0.790000, -0.690000)),
((-0.890000, 0.690000), (-0.890000, 0.640000)),
((-0.310000, 0.150000), (-0.310000, 0.250000)),
((0.490000, -0.250000), (0.440000, -0.250000)),
((0.679999, 0.210000), (0.679999, 0.300000)),
((-0.350000, -0.540000), (-0.400001, -0.540000)),
((0.990000, -0.640000), (0.940000, -0.640000)),
((0.540000, -0.630000), (0.589999, -0.630000)),
((-0.160001, 0.450000), (-0.160001, 0.500000)),
((-0.950001, -0.250000), (-0.950001, -0.190000)),
((0.339999, -0.140000), (0.380000, -0.140000)),
((-0.750000, 0.200000), (-0.750000, 0.210000)),
((-0.590000, 0.840000), (-0.600000, 0.840000)),
((0.830000, 0.350000), (0.740000, 0.350000)),
((0.780000, 0.640000), (0.630000, 0.640000)),
((0.730000, -0.580000), (0.830000, -0.580000)),
((-0.050000, -0.840000), (-0.050000, -0.940000)),
((-0.440001, -0.580000), (-0.310000, -0.580000)),
((0.380000, 0.840000), (0.240000, 0.840000)),
((-0.940001, -0.040000), (-0.890000, -0.040000)),
((0.150000, 0.200000), (0.139999, 0.200000)),
((-0.110001, 0.350000), (-0.110001, 0.360000)),
((-0.260000, 0.200000), (-0.260000, 0.360000)),
((0.480000, 0.440000), (0.429999, 0.440000)),
((0.530000, 0.540000), (0.530000, 0.550000)),
((-0.250000, -0.240000), (-0.250000, -0.350000)),
((-0.400001, -0.780000), (-0.400001, -0.740000)),
((-0.500000, -0.630000), (-0.500000, -0.430000)),
((0.250000, -0.150000), (0.190000, -0.150000)),
((-0.490001, 0.490000), (-0.500000, 0.490000)),
((0.790000, 0.400000), (0.929999, 0.400000)),
((0.830000, 0.640000), (0.830000, 0.690000)),
((-0.850000, -0.940000), (-0.850000, -0.930000)),
((0.440000, 0.260000), (0.490000, 0.260000)),
((-0.850000, 0.850000), (-0.700001, 0.850000)),
((-0.790000, -0.690000), (-0.800000, -0.690000)),
((-0.990001, 0.490000), (-0.990001, 0.310000)),
((0.679999, 0.100000), (0.639999, 0.100000)),
((-0.890000, 0.640000), (-0.900001, 0.640000)),
((-0.310000, 0.250000), (-0.350000, 0.250000)),
((0.490000, -0.690000), (0.490000, -0.780000)),
((0.000000, 0.110000), (0.000000, 0.060000)),
((0.940000, -0.640000), (0.940000, -0.690000)),
((0.589999, -0.630000), (0.589999, -0.730000)),
((0.580000, -0.480000), (0.580000, -0.430000)),
((0.299999, -0.530000), (0.540000, -0.530000)),
((-0.750000, 0.210000), (-0.700001, 0.210000)),
((0.740000, 0.350000), (0.740000, 0.300000)),
((0.780000, -0.690000), (0.780000, -0.590000)),
((-0.650001, -0.090000), (-0.600000, -0.090000)),
((0.190000, -0.350000), (0.190000, -0.200000)),
((-0.060000, -0.740000), (-0.150001, -0.740000)),
((0.429999, 0.690000), (0.429999, 0.790000)),
((-0.690001, 0.150000), (-0.690001, 0.100000)),
((-0.790000, 0.490000), (-0.790000, 0.440000)),
((0.240000, 0.260000), (0.240000, 0.310000)),
((-0.250000, -0.880000), (-0.210000, -0.880000)),
((0.530000, 0.100000), (0.429999, 0.100000)),
((0.580000, 0.540000), (0.530000, 0.540000)),
((-0.250000, -0.350000), (-0.360001, -0.350000)),
((-0.490001, -0.740000), (-0.540000, -0.740000)),
((0.250000, -0.140000), (0.250000, -0.150000)),
((0.830000, 0.840000), (0.780000, 0.840000)),
((-0.500000, 0.490000), (-0.500000, 0.540000)),
((-0.940001, -0.980000), (-0.800000, -0.980000)),
((0.490000, 0.260000), (0.490000, 0.250000)),
((-0.310000, 0.690000), (-0.350000, 0.690000)),
((-0.850000, 0.790000), (-0.850000, 0.850000)),
((-0.740001, -0.630000), (-0.740001, -0.640000)),
((-0.160001, -0.050000), (-0.210000, -0.050000)),
((-0.990001, 0.310000), (-0.940001, 0.310000)),
((0.880000, 0.350000), (0.880000, 0.390000)),
((0.540000, -0.690000), (0.490000, -0.690000)),
((0.580000, 0.160000), (0.580000, 0.210000)),
((-0.010000, 0.900000), (0.000000, 0.900000)),
((0.940000, -0.830000), (0.990000, -0.830000)),
((0.440000, -0.480000), (0.580000, -0.480000)),
((-0.900001, -0.430000), (-0.900001, -0.250000)),
((0.000000, -0.780000), (0.099999, -0.780000)),
((-0.690001, 0.360000), (-0.690001, 0.200000)),
((-0.540000, 0.890000), (-0.590000, 0.890000)),
((0.839999, 0.250000), (0.830000, 0.250000)),
((0.830000, -0.490000), (0.730000, -0.490000)),
((-0.640000, -0.930000), (-0.640000, -0.940000)),
((0.200000, -0.350000), (0.190000, -0.350000)),
((-0.150001, -0.740000), (-0.150001, -0.830000)),
((-0.100000, -0.100000), (-0.110001, -0.100000)),
((0.429999, 0.790000), (0.380000, 0.790000)),
((-0.050000, 0.800000), (0.049999, 0.800000)),
((-0.160001, 0.350000), (-0.250000, 0.350000)),
((0.589999, 0.550000), (0.589999, 0.500000)),
((-0.740001, -0.490000), (-0.740001, -0.540000)),
((-0.300000, -0.250000), (-0.310000, -0.250000)),
((0.480000, -0.140000), (0.480000, 0.000000)),
((-0.310000, 0.500000), (-0.210000, 0.500000)),
((-0.540000, 0.600000), (-0.490001, 0.600000)),
((0.839999, 0.740000), (0.839999, 0.640000)),
((-0.950001, -0.980000), (-0.950001, -0.930000)),
((0.490000, 0.250000), (0.250000, 0.250000)),
((0.389999, 0.850000), (0.389999, 0.800000)),
((-0.800000, -0.590000), (-0.850000, -0.590000)),
((0.940000, 0.350000), (0.880000, 0.350000)),
((-0.900001, 0.740000), (-0.950001, 0.740000)),
((0.679999, 0.700000), (0.679999, 0.800000)),
((0.679999, -0.780000), (0.679999, -0.730000)),
((-0.400001, 0.940000), (-0.400001, 0.950000)),
((0.530000, -0.040000), (0.580000, -0.040000)),
((0.630000, -0.730000), (0.630000, -0.680000)),
((-0.160001, 0.500000), (-0.100000, 0.500000)),
((0.440000, -0.250000), (0.440000, -0.480000)),
((-0.900001, -0.250000), (-0.950001, -0.250000)),
((0.290000, 0.210000), (0.530000, 0.210000)),
((0.190000, -0.830000), (0.190000, -0.790000)),
((0.349999, -0.490000), (0.299999, -0.490000)),
((0.630000, 0.640000), (0.630000, 0.650000)),
((0.790000, -0.590000), (0.790000, -0.690000)),
((-0.050000, 0.540000), (-0.200001, 0.540000)),
((0.150000, -0.200000), (0.150000, -0.250000)),
((-0.150001, -0.830000), (-0.110001, -0.830000)),
((0.380000, 0.790000), (0.380000, 0.840000)),
((-0.050000, 0.940000), (-0.050000, 0.800000)),
((0.040000, 0.350000), (-0.110001, 0.350000)),
((-0.260000, -0.830000), (-0.250000, -0.830000)),
((-0.160001, 0.300000), (-0.160001, 0.350000)),
((-0.100000, -0.050000), (-0.100000, -0.100000)),
((-0.500000, -0.430000), (-0.450001, -0.430000)),
((0.389999, -0.140000), (0.480000, -0.140000)),
((-0.210000, 0.500000), (-0.210000, 0.550000)),
((-0.590000, 0.540000), (-0.590000, 0.490000)),
((0.839999, 0.640000), (0.830000, 0.640000)),
((-0.800000, -0.830000), (-0.790000, -0.830000)),
((-0.750000, 0.700000), (-0.750000, 0.790000)),
((0.250000, 0.850000), (0.389999, 0.850000)),
((-0.790000, -0.590000), (-0.790000, -0.630000)),
((0.730000, 0.390000), (0.730000, 0.400000)),
((-0.250000, -0.150000), (-0.250000, -0.190000)),
((0.490000, -0.680000), (0.540000, -0.680000)),
((-0.360001, 0.940000), (-0.400001, 0.940000)),
((0.000000, 0.060000), (0.049999, 0.060000)),
((0.530000, -0.050000), (0.530000, -0.040000)),
((0.190000, 0.840000), (-0.010000, 0.840000)),
((0.839999, -0.730000), (0.940000, -0.730000)),
((0.580000, -0.430000), (0.630000, -0.430000)),
((0.099999, -0.830000), (0.190000, -0.830000)),
((0.299999, -0.490000), (0.299999, -0.530000)),
((-0.890000, 0.100000), (-0.900001, 0.100000)),
((0.790000, -0.690000), (0.780000, -0.690000)),
((0.299999, -0.200000), (0.200000, -0.200000)),
((0.540000, 0.690000), (0.429999, 0.690000)),
((-0.790000, 0.440000), (-0.890000, 0.440000)),
((-0.250000, 0.590000), (-0.250000, 0.540000)),
((0.190000, 0.450000), (0.389999, 0.450000)),
((0.190000, -0.530000), (0.200000, -0.530000)),
((-0.100000, 0.640000), (-0.100000, 0.590000)),
((-0.050000, -0.050000), (-0.100000, -0.050000)),
((-0.540000, -0.430000), (-0.540000, -0.630000)),
((0.389999, -0.090000), (0.389999, -0.140000)),
((-0.210000, 0.550000), (-0.160001, 0.550000)),
((-0.590000, 0.700000), (-0.540000, 0.700000)),
((-0.790000, -0.830000), (-0.790000, -0.980000)),
((0.440000, 0.800000), (0.440000, 0.750000)),
((-0.790000, -0.630000), (-0.740001, -0.630000)),
((0.889999, 0.360000), (0.940000, 0.360000)),
((-0.010000, -0.980000), (-0.010000, -0.880000)),
((-0.200001, -0.150000), (-0.250000, -0.150000)),
((0.690000, -0.730000), (0.690000, -0.830000)),
((0.580000, 0.210000), (0.679999, 0.210000)),
((-0.360001, 0.890000), (-0.360001, 0.940000)),
((0.040000, -0.040000), (0.040000, 0.050000)),
((0.580000, 0.010000), (0.589999, 0.010000)),
((0.940000, -0.730000), (0.940000, -0.740000)),
((-0.250000, 0.390000), (-0.300000, 0.390000)),
((0.540000, -0.390000), (0.540000, -0.440000)),
((0.099999, -0.780000), (0.099999, -0.830000)),
((0.830000, -0.580000), (0.830000, -0.490000)),
((-0.640000, -0.940000), (-0.750000, -0.940000)),
((0.299999, -0.100000), (0.299999, -0.200000)),
((-0.050000, -0.940000), (-0.260000, -0.940000)),
((0.040000, 0.890000), (0.040000, 0.940000)),
((-0.740001, 0.800000), (-0.740001, 0.690000)),
((-0.200001, 0.260000), (0.040000, 0.260000)),
((-0.890000, -0.290000), (-0.840000, -0.290000)),
((0.389999, 0.450000), (0.389999, 0.400000)),
((-0.750000, -0.050000), (-0.750000, 0.000000)),
((-0.050000, 0.640000), (-0.100000, 0.640000)),
((-0.110001, -0.100000), (-0.110001, 0.010000)),
((-0.450001, -0.390000), (-0.550000, -0.390000)),
((0.480000, 0.000000), (0.429999, 0.000000)),
((-0.540000, 0.700000), (-0.540000, 0.600000)),
((-0.990001, -0.980000), (-0.950001, -0.980000)),
((-0.010000, -0.790000), (-0.010000, -0.690000)),
((0.349999, 0.940000), (0.250000, 0.940000)),
((-0.800000, -0.690000), (-0.800000, -0.590000)),
((0.889999, 0.390000), (0.889999, 0.360000)),
((-0.900001, 0.640000), (-0.900001, 0.740000)),
((0.679999, 0.800000), (0.690000, 0.800000)),
((-0.200001, -0.190000), (-0.200001, -0.290000)),
((0.679999, -0.730000), (0.690000, -0.730000)),
((0.299999, 0.060000), (0.299999, 0.010000)),
((-0.400001, 0.950000), (-0.360001, 0.950000)),
((-0.010000, -0.040000), (0.040000, -0.040000)),
((0.580000, -0.090000), (0.580000, -0.050000)),
((0.589999, 0.010000), (0.589999, -0.040000)),
((0.639999, 0.550000), (0.679999, 0.550000)),
((0.990000, -0.840000), (0.929999, -0.840000)),
((0.589999, -0.730000), (0.630000, -0.730000)),
((-0.250000, 0.440000), (-0.250000, 0.390000)),
((0.440000, -0.240000), (0.490000, -0.240000)),
((0.190000, -0.790000), (0.139999, -0.790000)),
((-0.700001, 0.310000), (-0.700001, 0.350000)),
((-0.800000, 0.300000), (-0.800000, 0.350000)),
((0.839999, -0.580000), (0.929999, -0.580000)),
((-0.900001, -0.530000), (-0.900001, -0.440000)),
((0.380000, -0.100000), (0.299999, -0.100000)),
((-0.840000, 0.800000), (-0.740001, 0.800000)),
((0.139999, 0.260000), (0.240000, 0.260000)),
((0.040000, 0.260000), (0.040000, 0.350000)),
((0.389999, 0.400000), (0.480000, 0.400000)),
((-0.740001, -0.050000), (-0.750000, -0.050000)),
((-0.110001, 0.590000), (-0.110001, 0.640000)),
((0.000000, 0.000000), (-0.050000, 0.000000)),
((-0.450001, -0.430000), (-0.450001, -0.390000)),
((0.349999, -0.040000), (0.349999, -0.090000)),
((-0.500000, 0.540000), (-0.590000, 0.540000)),
((-0.590000, -0.680000), (-0.590000, -0.730000)),
((-0.500000, -0.980000), (-0.500000, -0.930000)),
((-0.010000, -0.690000), (-0.110001, -0.690000)),
((0.490000, 0.750000), (0.490000, 0.740000)),
((-0.750000, 0.790000), (-0.850000, 0.790000)),
((-0.840000, -0.580000), (-0.700001, -0.580000)),
((0.940000, 0.390000), (0.889999, 0.390000)),
((0.880000, 0.450000), (0.880000, 0.490000)),
((-0.390000, -0.840000), (-0.390000, -0.980000)),
((-0.260000, -0.140000), (-0.200001, -0.140000)),
((0.540000, -0.680000), (0.540000, -0.690000)),
((0.299999, 0.010000), (0.380000, 0.010000)),
((-0.440001, 0.990000), (-0.440001, 0.900000)),
)
|
425253
|
from mushroom_rl.policy import Policy, ParametricPolicy
def abstract_method_tester(f, ex, *args):
try:
f(*args)
except ex:
pass
else:
assert False
def test_policy_interface():
tmp = Policy()
abstract_method_tester(tmp.__call__, NotImplementedError)
abstract_method_tester(tmp.draw_action, NotImplementedError, None)
tmp.reset()
def test_parametric_policy():
tmp = ParametricPolicy()
abstract_method_tester(tmp.diff_log, RuntimeError, None, None)
abstract_method_tester(tmp.diff, RuntimeError, None, None)
abstract_method_tester(tmp.set_weights, NotImplementedError, None)
abstract_method_tester(tmp.get_weights, NotImplementedError)
try:
tmp.weights_size
except NotImplementedError:
pass
else:
assert False
|
425269
|
import copy
import os
import time
from jinja2 import Environment, FileSystemLoader
from os.path import join, dirname
import pytest
from cosmo_tester.framework.test_hosts import Hosts
from cosmo_tester.framework import util
from .cfy_cluster_manager_shared import REMOTE_CLUSTER_CONFIG_PATH
CONFIG_DIR = join(dirname(__file__), 'config')
class InsufficientVmsError(Exception):
pass
def skip(*args, **kwargs):
return True
@pytest.fixture(scope='session')
def three_session_vms(request, ssh_key, session_tmpdir, test_config,
session_logger):
hosts = Hosts(ssh_key, session_tmpdir, test_config,
session_logger, request, bootstrappable=True,
number_of_instances=3)
try:
hosts.create()
yield hosts.instances
finally:
hosts.destroy()
@pytest.fixture(scope='session')
def three_ipv6_session_vms(request, ssh_key, session_tmpdir, test_config,
session_logger):
hosts = Hosts(ssh_key, session_tmpdir, test_config,
session_logger, request, bootstrappable=True,
number_of_instances=3, ipv6_net=True)
try:
hosts.create()
yield hosts.instances
finally:
hosts.destroy()
@pytest.fixture(scope='session')
def four_session_vms(request, ssh_key, session_tmpdir, test_config,
session_logger):
hosts = Hosts(ssh_key, session_tmpdir, test_config,
session_logger, request, bootstrappable=True,
number_of_instances=4)
try:
hosts.create()
yield hosts.instances
finally:
hosts.destroy()
@pytest.fixture(scope='session')
def six_session_vms(request, ssh_key, session_tmpdir, test_config,
session_logger):
hosts = Hosts(ssh_key, session_tmpdir, test_config,
session_logger, request, bootstrappable=True,
number_of_instances=6)
try:
hosts.create()
yield hosts.instances
finally:
hosts.destroy()
@pytest.fixture(scope='session')
def nine_session_vms(request, ssh_key, session_tmpdir, test_config,
session_logger):
hosts = Hosts(ssh_key, session_tmpdir, test_config,
session_logger, request, bootstrappable=True,
number_of_instances=9)
try:
hosts.create()
yield hosts.instances
finally:
hosts.destroy()
@pytest.fixture(scope='function')
def brokers(three_session_vms, test_config, logger):
for vm in three_session_vms:
_ensure_installer_installed(vm)
yield _get_hosts(three_session_vms, test_config, logger,
broker_count=3)
for vm in three_session_vms:
vm.teardown()
@pytest.fixture(scope='function')
def broker(session_manager, test_config, logger):
_brokers = _get_hosts([session_manager], test_config, logger,
broker_count=1)
yield _brokers[0]
session_manager.teardown()
@pytest.fixture(scope='function')
def dbs(three_session_vms, test_config, logger):
for vm in three_session_vms:
_ensure_installer_installed(vm)
yield _get_hosts(three_session_vms, test_config, logger,
db_count=3)
for vm in three_session_vms:
vm.teardown()
@pytest.fixture(scope='function')
def brokers_and_manager(three_session_vms, test_config, logger):
for vm in three_session_vms:
_ensure_installer_installed(vm)
yield _get_hosts(three_session_vms, test_config, logger,
broker_count=2, manager_count=1)
for vm in three_session_vms:
vm.teardown()
@pytest.fixture(scope='function')
def brokers3_and_manager(four_session_vms, test_config, logger):
yield _get_hosts(four_session_vms, test_config, logger,
broker_count=3, manager_count=1)
for vm in four_session_vms:
vm.teardown()
@pytest.fixture(scope='function')
def full_cluster_ips(nine_session_vms, test_config, logger):
for vm in nine_session_vms:
_ensure_installer_installed(vm)
yield _get_hosts(nine_session_vms, test_config, logger,
broker_count=3, db_count=3, manager_count=3,
pre_cluster_rabbit=True)
for vm in nine_session_vms:
vm.teardown()
@pytest.fixture(scope='function')
def full_cluster_names(nine_session_vms, test_config, logger):
for vm in nine_session_vms:
_ensure_installer_installed(vm)
yield _get_hosts(nine_session_vms, test_config, logger,
broker_count=3, db_count=3, manager_count=3,
pre_cluster_rabbit=True, use_hostnames=True)
for vm in nine_session_vms:
vm.teardown()
@pytest.fixture(scope='function')
def cluster_with_lb(six_session_vms, test_config, logger):
yield _get_hosts(six_session_vms, test_config, logger,
broker_count=1, db_count=1, manager_count=3,
use_load_balancer=True, pre_cluster_rabbit=True)
for vm in six_session_vms:
vm.teardown()
@pytest.fixture(scope='function')
def cluster_missing_one_db(nine_session_vms, test_config, logger):
for vm in nine_session_vms:
_ensure_installer_installed(vm)
yield _get_hosts(nine_session_vms, test_config, logger,
broker_count=3, db_count=3, manager_count=3,
skip_bootstrap_list=['db3'],
pre_cluster_rabbit=True)
for vm in nine_session_vms:
vm.teardown()
@pytest.fixture(scope='function')
def cluster_with_single_db(six_session_vms, test_config, logger):
yield _get_hosts(six_session_vms, test_config, logger,
broker_count=3, db_count=1, manager_count=2,
pre_cluster_rabbit=True)
for vm in six_session_vms:
vm.teardown()
@pytest.fixture(scope='function')
def minimal_cluster(four_session_vms, test_config, logger):
yield _get_hosts(four_session_vms, test_config, logger,
broker_count=1, db_count=1, manager_count=2,
pre_cluster_rabbit=True)
for vm in four_session_vms:
vm.teardown()
@pytest.fixture(scope='function')
def three_nodes_cluster(three_session_vms, test_config, logger):
for vm in three_session_vms:
_ensure_installer_installed(vm)
yield _get_hosts(three_session_vms, test_config, logger,
pre_cluster_rabbit=True, three_nodes_cluster=True)
for vm in three_session_vms:
vm.teardown()
@pytest.fixture(scope='function')
def three_nodes_ipv6_cluster(three_ipv6_session_vms, test_config, logger):
for vm in three_ipv6_session_vms:
_ensure_installer_installed(vm)
yield _get_hosts(three_ipv6_session_vms, test_config, logger,
pre_cluster_rabbit=True, three_nodes_cluster=True)
for vm in three_ipv6_session_vms:
vm.teardown()
@pytest.fixture(scope='function')
def three_vms(three_session_vms, test_config, logger):
for vm in three_session_vms:
_ensure_installer_not_installed(vm)
yield _get_hosts(three_session_vms, test_config, logger,
three_nodes_cluster=True, bootstrap=False)
for vm in three_session_vms:
_remove_cluster(vm, logger)
vm.teardown()
@pytest.fixture(scope='function')
def three_vms_ipv6(three_ipv6_session_vms, test_config, logger):
for vm in three_nodes_ipv6_cluster:
_ensure_installer_not_installed(vm)
yield _get_hosts(three_ipv6_session_vms, test_config, logger,
three_nodes_cluster=True, bootstrap=False)
for vm in three_ipv6_session_vms:
vm.teardown()
@pytest.fixture(scope='function')
def nine_vms(nine_session_vms, test_config, logger):
for vm in nine_session_vms:
_ensure_installer_not_installed(vm)
yield _get_hosts(nine_session_vms, test_config, logger,
broker_count=3, db_count=3,
manager_count=3, bootstrap=False)
for vm in nine_session_vms:
_remove_cluster(vm, logger)
vm.teardown()
def _ensure_installer_not_installed(vm):
vm.wait_for_ssh()
vm.run_command(
'if rpm -qi cloudify-manager-install; then '
# yum clean all doesn't clean all, so let's be more forceful
'sudo rm -rf /var/cache/yum ; '
'sudo yum remove -y cloudify-manager-install {}; fi'.format(
# We need to remove the other components as well or we end up with
# failures when installing older clusters in the upgrade tests
' '.join([
'cloudify-agents',
'cloudify-cli',
'cloudify-composer',
'cloudify-management-worker',
'cloudify-premium',
'cloudify-rabbitmq',
'cloudify-rest-service',
'cloudify-stage',
'erlang',
'etcd',
'nginx',
'node_exporter',
'nodejs',
'patroni',
'postgres_exporter',
'postgresql95',
'postgresql95-contrib',
'postgresql95-devel',
'postgresql95-libs',
'postgresql95-server',
'prometheus',
'python-psycopg2',
'rabbitmq-server',
]),
)
)
def _ensure_installer_installed(vm):
vm.wait_for_ssh()
vm.run_command(
# yum clean all doesn't clean all, so let's be more forceful
'sudo rm -rf /var/cache/yum '
'&& (rpm -qi cloudify-manager-install '
'|| sudo yum install -y cloudify-manager-install.rpm)'
)
def _get_hosts(instances, test_config, logger,
broker_count=0, manager_count=0, db_count=0,
use_load_balancer=False, skip_bootstrap_list=None,
# Pre-cluster rabbit determines whether to cluster rabbit
# during the bootstrap.
# High security will pre-set all certs (not just required ones)
# and use postgres client certs.
pre_cluster_rabbit=False, high_security=True, extra_node=False,
use_hostnames=False, three_nodes_cluster=False,
bootstrap=True):
number_of_cluster_instances = (
3 if three_nodes_cluster else broker_count + db_count + manager_count)
has_extra_node = (1 if extra_node else 0)
number_of_instances = number_of_cluster_instances + \
(1 if use_load_balancer else 0) + has_extra_node
if skip_bootstrap_list is None:
skip_bootstrap_list = []
if len(instances) != number_of_instances:
raise InsufficientVmsError('Required %s instances, but got %s',
number_of_instances, instances)
tempdir = instances[0]._tmpdir_base
if three_nodes_cluster:
name_mappings = ['cloudify-1', 'cloudify-2', 'cloudify-3']
else:
name_mappings = ['rabbit-{}'.format(i)
for i in range(broker_count)]
name_mappings.extend([
'db-{}'.format(i) for i in range(db_count)
])
name_mappings.extend([
'manager-{}'.format(i) for i in range(manager_count)
])
if use_load_balancer:
name_mappings.append('lb')
if has_extra_node:
name_mappings.append('extra_node')
for idx, node in enumerate(instances):
node.wait_for_ssh()
# This needs to happen before we start bootstrapping nodes
# because the hostname is used by nodes that are being
# bootstrapped with reference to nodes that may not have been
# bootstrapped yet.
node.hostname = name_mappings[idx]
node.run_command('sudo hostnamectl set-hostname {}'.format(
name_mappings[idx]
))
if use_hostnames:
hosts_entries = ['\n# Added for hostname test']
hosts_entries.extend(
'{ip} {name}'.format(ip=node.private_ip_address,
name=node.hostname)
for node in instances
)
hosts_entries = '\n'.join(hosts_entries)
for node in instances:
if not hasattr(node, 'install_config'):
# This is a load balancer or other non-cloudify node
continue
node.install_config['manager']['private_ip'] = node.hostname
node.run_command(
"echo '{hosts}' | sudo tee -a /etc/hosts".format(
hosts=hosts_entries,
)
)
else:
for node in instances:
if not hasattr(node, 'install_config'):
# This is a load balancer or other non-cloudify node
continue
node.install_config['manager'][
'private_ip'] = node.private_ip_address
if three_nodes_cluster:
brokers = dbs = managers = instances[:3]
else:
brokers = instances[:broker_count]
dbs = instances[broker_count:broker_count + db_count]
managers = instances[broker_count + db_count:
broker_count + db_count + manager_count]
if use_load_balancer:
lb = instances[-1 - has_extra_node]
if bootstrap:
run_cluster_bootstrap(dbs, brokers, managers, skip_bootstrap_list,
pre_cluster_rabbit, high_security,
use_hostnames, tempdir, test_config, logger)
if use_load_balancer:
_bootstrap_lb_node(lb, managers, tempdir, logger)
logger.info('All nodes are created%s.',
' and bootstrapped' if bootstrap else '')
return instances
def run_cluster_bootstrap(dbs, brokers, managers, skip_bootstrap_list,
pre_cluster_rabbit, high_security, use_hostnames,
tempdir, test_config, logger,
revert_install_config=False, credentials=None):
for node_num, node in enumerate(brokers, start=1):
_bootstrap_rabbit_node(node, node_num, brokers,
skip_bootstrap_list, pre_cluster_rabbit,
tempdir, logger, use_hostnames, credentials)
if revert_install_config:
node.install_config = copy.deepcopy(node.basic_install_config)
for node_num, node in enumerate(dbs, start=1):
_bootstrap_db_node(node, node_num, dbs, skip_bootstrap_list,
high_security, tempdir, logger,
use_hostnames, credentials)
if revert_install_config:
node.install_config = copy.deepcopy(node.basic_install_config)
# Ensure all backend nodes are up before installing managers
for node in brokers + dbs:
if node.friendly_name in skip_bootstrap_list:
continue
while not node.bootstrap_is_complete():
logger.info('Checking state of %s', node.friendly_name)
time.sleep(5)
for node_num, node in enumerate(managers, start=1):
_bootstrap_manager_node(node, node_num, dbs, brokers,
skip_bootstrap_list,
pre_cluster_rabbit, high_security,
tempdir, logger, test_config,
use_hostnames, credentials)
if revert_install_config:
node.install_config = copy.deepcopy(node.basic_install_config)
def _base_prep(node, tempdir):
with node.ssh() as fabric_ssh:
fabric_ssh.run(
'mkdir -p /tmp/bs_logs'
)
fabric_ssh.run(
'echo {name} > /tmp/bs_logs/0_node_name'.format(
name=node.friendly_name,
)
)
ca_base = os.path.join(tempdir, 'ca.')
ca_cert = ca_base + 'cert'
ca_key = ca_base + 'key'
if not os.path.exists(ca_cert):
util.generate_ca_cert(ca_cert, ca_key)
cert_base = os.path.join(tempdir, '{node_friendly_name}.{extension}')
node_cert = cert_base.format(node_friendly_name=node.friendly_name,
extension='crt')
node_key = cert_base.format(node_friendly_name=node.friendly_name,
extension='key')
util.generate_ssl_certificate(
[node.friendly_name, node.hostname,
node.private_ip_address,
node.ip_address],
node.hostname,
tempdir,
node_cert,
node_key,
ca_cert,
ca_key,
)
remote_cert = '/tmp/' + node.friendly_name + '.crt'
remote_key = '/tmp/' + node.friendly_name + '.key'
remote_ca = '/tmp/ca.crt'
node.put_remote_file(
local_path=node_cert,
remote_path=remote_cert,
)
node.put_remote_file(
local_path=node_key,
remote_path=remote_key,
)
node.put_remote_file(
local_path=ca_cert,
remote_path=remote_ca,
)
node.local_cert = node_cert
node.remote_cert = remote_cert
node.local_key = node_key
node.remote_key = remote_key
node.api_ca_path = ca_cert
node.remote_ca = remote_ca
def _bootstrap_rabbit_node(node, rabbit_num, brokers, skip_bootstrap_list,
pre_cluster_rabbit, tempdir, logger,
use_hostnames, credentials=None):
node.friendly_name = 'rabbit' + str(rabbit_num)
_base_prep(node, tempdir)
logger.info('Preparing rabbit {}'.format(node.hostname))
join_target = ''
if pre_cluster_rabbit and rabbit_num != 1:
join_target = brokers[0].hostname
if pre_cluster_rabbit:
rabbit_nodes = {
broker.hostname: {
'networks': {
'default': (
broker.hostname if use_hostnames else
str(broker.private_ip_address)
)
}
}
for broker in brokers
}
else:
rabbit_nodes = {}
node.install_config['rabbitmq'] = {
'ca_path': '/tmp/ca.crt',
'cert_path': node.remote_cert,
'key_path': node.remote_key,
'erlang_cookie': 'thisisacookiefortestingnotproduction',
'cluster_members': rabbit_nodes,
'nodename': node.hostname,
'join_cluster': join_target,
}
node.install_config['services_to_install'] = ['queue_service']
if node.friendly_name in skip_bootstrap_list:
return
_add_monitoring_config(node)
if credentials:
util.update_dictionary(node.install_config, credentials)
if pre_cluster_rabbit and rabbit_num == 1:
node.bootstrap(blocking=True, restservice_expected=False,
config_name='rabbit')
else:
node.bootstrap(blocking=False, restservice_expected=False,
config_name='rabbit')
def _bootstrap_db_node(node, db_num, dbs, skip_bootstrap_list, high_security,
tempdir, logger, use_hostnames, credentials=None):
node.friendly_name = 'db' + str(db_num)
_base_prep(node, tempdir)
logger.info('Preparing db {}'.format(node.hostname))
node.pg_password = '<PASSWORD>"<PASSWORD>'
node.install_config['postgresql_server'] = {
'postgres_password': node.pg_password,
'cert_path': node.remote_cert,
'key_path': node.remote_key,
'ca_path': '/tmp/ca.crt',
}
node.install_config['services_to_install'] = ['database_service']
server_conf = node.install_config['postgresql_server']
if len(dbs) > 1:
db_nodes = {
db.hostname: {
'ip': (
db.hostname if use_hostnames else
str(db.private_ip_address)
)
}
for db in dbs
}
server_conf['cluster'] = {
'nodes': db_nodes,
'etcd': {
'cluster_token': '<PASSWORD>',
'root_password': '<PASSWORD>',
'patroni_password': '<PASSWORD>',
},
'patroni': {
'rest_user': 'patroni',
'rest_password': '<PASSWORD>',
},
'postgres': {
'replicator_password': '<PASSWORD>',
},
}
else:
server_conf['enable_remote_connections'] = True
if high_security:
server_conf['ssl_client_verification'] = True
server_conf['ssl_only_connections'] = True
if node.friendly_name in skip_bootstrap_list:
return
_add_monitoring_config(node)
if credentials:
util.update_dictionary(node.install_config, credentials)
node.bootstrap(blocking=False, restservice_expected=False,
config_name='db')
def _bootstrap_manager_node(node, mgr_num, dbs, brokers, skip_bootstrap_list,
pre_cluster_rabbit, high_security, tempdir,
logger, test_config, use_hostnames,
credentials=None):
node.friendly_name = 'manager' + str(mgr_num)
_base_prep(node, tempdir)
logger.info('Preparing manager {}'.format(node.hostname))
if pre_cluster_rabbit:
rabbit_nodes = {
broker.hostname: {
'networks': {
'default': (
broker.hostname if use_hostnames else
str(broker.private_ip_address)
)
}
}
for broker in brokers
}
else:
broker = brokers[0]
rabbit_nodes = {
broker.hostname: {
'networks': {
'default': (
broker.hostname if use_hostnames else
str(broker.private_ip_address)
)
}
}
}
node.install_config['manager'] = {
'private_ip': str(node.private_ip_address),
'public_ip': str(node.private_ip_address),
'security': {
'admin_password': test_config['test_manager']['password'],
},
}
node.install_config['rabbitmq'] = {
'ca_path': '/tmp/ca.crt',
'cluster_members': rabbit_nodes,
}
node.install_config['services_to_install'] = ['manager_service',
'entropy_service']
if high_security:
node.install_config['ssl_inputs'] = {
'external_cert_path': node.remote_cert,
'external_key_path': node.remote_key,
'internal_cert_path': node.remote_cert,
'internal_key_path': node.remote_key,
'ca_cert_path': node.remote_ca,
'external_ca_cert_path': node.remote_ca,
}
node.install_config['manager']['security'][
'ssl_enabled'] = True
if dbs:
node.install_config['postgresql_server'] = {
'ca_path': node.remote_ca,
'cluster': {'nodes': {}},
}
node.install_config['postgresql_client'] = {
'server_username': 'postgres',
'server_password': dbs[0].pg_password,
}
if len(dbs) > 1:
db_nodes = {
db.hostname: {
'ip': (
db.hostname if use_hostnames else
str(db.private_ip_address)
),
}
for db in dbs
if db.friendly_name not in skip_bootstrap_list
}
node.install_config['postgresql_server']['cluster'][
'nodes'] = db_nodes
else:
node.install_config['postgresql_client'][
'host'] = str(dbs[0].private_ip_address)
if high_security:
node.install_config['postgresql_client'][
'ssl_client_verification'] = True
node.install_config['postgresql_client']['ssl_enabled'] = True
node.install_config['ssl_inputs'][
'postgresql_client_cert_path'] = node.remote_cert
node.install_config['ssl_inputs'][
'postgresql_client_key_path'] = node.remote_key
else:
# If we're installing no db nodes we must put the db on the
# manager (this only makes sense for testing external rabbit)
node.install_config['services_to_install'].append('database_service')
if node.friendly_name in skip_bootstrap_list:
return
upload_license = mgr_num == 1
_add_monitoring_config(node, manager=True)
if credentials:
util.update_dictionary(node.install_config, credentials)
# We have to block on every manager
node.bootstrap(blocking=True, restservice_expected=False,
upload_license=upload_license, config_name='manager')
# Correctly configure the rest client for the node
node.client = node.get_rest_client(proto='https')
def _bootstrap_lb_node(node, managers, tempdir, logger):
node.friendly_name = 'haproxy'
_base_prep(node, tempdir)
logger.info('Preparing load balancer {}'.format(node.hostname))
# install haproxy and import certs
install_sh = """yum install -y /opt/cloudify/sources/haproxy*
cat {cert} {key} > /tmp/cert.pem\n mv /tmp/cert.pem /etc/haproxy
chown haproxy. /etc/haproxy/cert.pem\n chmod 400 /etc/haproxy/cert.pem
cp {ca} /etc/haproxy\n chown haproxy. /etc/haproxy/ca.crt
restorecon /etc/haproxy/*""".format(
cert=node.remote_cert, key=node.remote_key, ca=node.remote_ca)
node.run_command('echo "{}" > /tmp/haproxy_install.sh'.format(install_sh))
node.run_command('chmod 700 /tmp/haproxy_install.sh')
node.run_command('sudo /tmp/haproxy_install.sh')
# configure haproxy
template = Environment(
loader=FileSystemLoader(CONFIG_DIR)).get_template('haproxy.cfg')
config = template.render(managers=managers)
config_path = '/etc/haproxy/haproxy.cfg'
node.put_remote_file_content(config_path, config)
node.run_command('sudo chown root. {}'.format(config_path))
node.run_command('sudo chmod 644 {}'.format(config_path))
node.run_command('sudo restorecon {}'.format(config_path))
node.run_command('sudo systemctl enable haproxy')
node.run_command('sudo systemctl restart haproxy')
node.is_manager = True
node.client = node.get_rest_client(proto='https', download_ca=False)
def _add_monitoring_config(node, manager=False):
"""Add monitoring settings to config."""
monitoring_user = 'friendlymonitoringuser'
monitoring_pass = '<PASSWORD>'
config = node.install_config
config['services_to_install'] = config.get(
'services_to_install', []) + ['monitoring_service']
config['prometheus'] = {
'credentials': {
'username': monitoring_user,
'password': <PASSWORD>,
},
'cert_path': node.remote_cert,
'key_path': node.remote_key,
'ca_path': node.remote_ca,
}
if manager:
for section_name in ['rabbitmq', 'postgresql_client', 'manager']:
section = config[section_name] = config.get(section_name, {})
section['monitoring'] = {
'username': monitoring_user,
'password': <PASSWORD>,
}
config[section_name] = section
def _remove_cluster(node, logger):
logger.info('Attempting to clean up cluster using '
'cloudify_cluster_manager')
logger.info('Checking for cluster manager on {}'.format(node.hostname))
if node.run_command('which cfy_cluster_manager', warn_only=True).ok:
logger.info('Found cluster manager on {}, tearing down '
'cluster...'.format(node.hostname))
node.run_command('sudo cfy_cluster_manager remove --config-path '
'{}'.format(REMOTE_CLUSTER_CONFIG_PATH))
# yum clean all doesn't clean all, so let's be more forceful
node.run_command('sudo rm -rf /var/cache/yum')
|
425285
|
from unittest import TestCase, skip
import mock
from pjon_python.strategies.pjon_hwserial_strategy import PJONserialStrategy, UnsupportedPayloadType
class TestPJONserialStrategy(TestCase):
def test_send_byte_should_convert_int_to_chr(self):
with mock.patch('serial.Serial', create=True) as ser:
serial_strategy = PJONserialStrategy(serial_port=ser)
self.assertEquals(serial_strategy.send_byte(11), 0)
def test_send_byte_should_convert_hex_to_chr(self):
with mock.patch('serial.Serial', create=True) as ser:
serial_strategy = PJONserialStrategy(serial_port=ser)
self.assertEquals(serial_strategy.send_byte(0x22), 0)
def test_send_byte_should_accept_char(self):
with mock.patch('serial.Serial', create=True) as ser:
serial_strategy = PJONserialStrategy(serial_port=ser)
self.assertEquals(serial_strategy.send_byte('a'), 0)
def test_send_byte_should_raise_on_unsupported_type(self):
with mock.patch('serial.Serial', create=True) as ser:
serial_strategy = PJONserialStrategy(serial_port=ser)
self.assertRaises(UnsupportedPayloadType, serial_strategy.send_byte, ['a', 'b'])
self.assertRaises(UnsupportedPayloadType, serial_strategy.send_byte, 'abc')
self.assertRaises(UnsupportedPayloadType, serial_strategy.send_byte, [1, 2])
self.assertRaises(UnsupportedPayloadType, serial_strategy.send_byte, {'a': 'b'})
@skip("skipped because receive buffer was disabled")
def test_serial_client_should_read_all_available_bytes_to_receive_buffer(self):
with mock.patch('serial.Serial', create=True) as ser:
def arr_return(size):
vars = [chr(item) for item in [1, 9, 2, 45]]
vars.reverse()
return vars[:size]
ser.read.side_effect = arr_return
ser.inWaiting.side_effect = [4, 3, 2, 1]
serial_strategy = PJONserialStrategy(serial_port=ser)
self.assertEquals(serial_strategy.receive_byte(), 1)
self.assertEquals(len(serial_strategy._read_buffer), 3)
self.assertEquals(serial_strategy.receive_byte(), 9)
self.assertEquals(len(serial_strategy._read_buffer), 2)
self.assertEquals(serial_strategy.receive_byte(), 2)
self.assertEquals(len(serial_strategy._read_buffer), 1)
self.assertEquals(serial_strategy.receive_byte(), 45)
self.assertEquals(len(serial_strategy._read_buffer), 0)
@skip("skipped because receive buffer was disabled")
def test_serial_client_should_trim_serial_buffer(self):
with mock.patch('serial.Serial', create=True) as ser:
serial_strategy = PJONserialStrategy(serial_port=ser)
def return_payload_twice_the_buffer_length(size):
vars = [chr(13)] * serial_strategy._READ_BUFFER_SIZE * 2
return vars[:size]
ser.read.side_effect = return_payload_twice_the_buffer_length
ser.inWaiting.side_effect = [len(return_payload_twice_the_buffer_length(serial_strategy._READ_BUFFER_SIZE * 2))]
self.assertEqual(serial_strategy.receive_byte(), 13)
self.assertEqual(len(serial_strategy._read_buffer), 32767)
|
425328
|
import numpy as np
from scipy import stats
import torch
def test(test_loader, encoder, decoder, critic_x):
reconstruction_error = list()
critic_score = list()
y_true = list()
for batch, sample in enumerate(test_loader):
reconstructed_signal = decoder(encoder(sample['signal']))
reconstructed_signal = torch.squeeze(reconstructed_signal)
for i in range(0, 64):
x_ = reconstructed_signal[i].detach().numpy()
x = sample['signal'][i].numpy()
y_true.append(int(sample['anomaly'][i].detach()))
reconstruction_error.append(dtw_reconstruction_error(x, x_))
critic_score.extend(torch.squeeze(critic_x(sample['signal'])).detach().numpy())
reconstruction_error = stats.zscore(reconstruction_error)
critic_score = stats.zscore(critic_score)
anomaly_score = reconstruction_error * critic_score
y_predict = detect_anomaly(anomaly_score)
y_predict = prune_false_positive(y_predict, anomaly_score, change_threshold=0.1)
find_scores(y_true, y_predict)
#Other error metrics - point wise difference, Area difference.
def dtw_reconstruction_error(x, x_):
n, m = x.shape[0], x_.shape[0]
dtw_matrix = np.zeros((n+1, m+1))
for i in range(n+1):
for j in range(m+1):
dtw_matrix[i, j] = np.inf
dtw_matrix[0, 0] = 0
for i in range(1, n+1):
for j in range(1, m+1):
cost = abs(x[i-1] - x_[j-1])
# take last min from a square box
last_min = np.min([dtw_matrix[i-1, j], dtw_matrix[i, j-1], dtw_matrix[i-1, j-1]])
dtw_matrix[i, j] = cost + last_min
return dtw_matrix[n][m]
def unroll_signal(x):
x = np.array(x).reshape(100)
return np.median(x)
def prune_false_positive(is_anomaly, anomaly_score, change_threshold):
#The model might detect a high number of false positives.
#In such a scenario, pruning of the false positive is suggested.
#Method used is as described in the Section 5, part D Identifying Anomalous
#Sequence, sub-part - Mitigating False positives
#TODO code optimization
seq_details = []
delete_sequence = 0
start_position = 0
end_position = 0
max_seq_element = anomaly_score[0]
for i in range(1, len(is_anomaly)):
if i+1 == len(is_anomaly):
seq_details.append([start_position, i, max_seq_element, delete_sequence])
elif is_anomaly[i] == 1 and is_anomaly[i+1] == 0:
end_position = i
seq_details.append([start_position, end_position, max_seq_element, delete_sequence])
elif is_anomaly[i] == 1 and is_anomaly[i-1] == 0:
start_position = i
max_seq_element = anomaly_score[i]
if is_anomaly[i] == 1 and is_anomaly[i-1] == 1 and anomaly_score[i] > max_seq_element:
max_seq_element = anomaly_score[i]
max_elements = list()
for i in range(0, len(seq_details)):
max_elements.append(seq_details[i][2])
max_elements.sort(reverse=True)
max_elements = np.array(max_elements)
change_percent = abs(max_elements[1:] - max_elements[:-1]) / max_elements[1:]
#Appending 0 for the 1 st element which is not change percent
delete_seq = np.append(np.array([0]), change_percent < change_threshold)
#Mapping max element and seq details
for i, max_elt in enumerate(max_elements):
for j in range(0, len(seq_details)):
if seq_details[j][2] == max_elt:
seq_details[j][3] = delete_seq[i]
for seq in seq_details:
if seq[3] == 1: #Delete sequence
is_anomaly[seq[0]:seq[1]+1] = [0] * (seq[1] - seq[0] + 1)
return is_anomaly
def detect_anomaly(anomaly_score):
window_size = len(anomaly_score) // 3
step_size = len(anomaly_score) // (3 * 10)
is_anomaly = np.zeros(len(anomaly_score))
for i in range(0, len(anomaly_score) - window_size, step_size):
window_elts = anomaly_score[i:i+window_size]
window_mean = np.mean(window_elts)
window_std = np.std(window_elts)
for j, elt in enumerate(window_elts):
if (window_mean - 3 * window_std) < elt < (window_mean + 3 * window_std):
is_anomaly[i + j] = 0
else:
is_anomaly[i + j] = 1
return is_anomaly
def find_scores(y_true, y_predict):
tp = tn = fp = fn = 0
for i in range(0, len(y_true)):
if y_true[i] == 1 and y_predict[i] == 1:
tp += 1
elif y_true[i] == 1 and y_predict[i] == 0:
fn += 1
elif y_true[i] == 0 and y_predict[i] == 0:
tn += 1
elif y_true[i] == 0 and y_predict[i] == 1:
fp += 1
print ('Accuracy {:.2f}'.format((tp + tn)/(len(y_true))))
precision = tp / (tp + fp)
recall = tp / (tp + fn)
print ('Precision {:.2f}'.format(precision))
print ('Recall {:.2f}'.format(recall))
print ('F1 Score {:.2f}'.format(2 * precision * recall / (precision + recall)))
|
425352
|
import unittest
from ABBA import ABBA
import numpy as np
import warnings
from util import dtw
def ignore_warnings(test_func):
def do_test(self, *args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
test_func(self, *args, **kwargs)
return do_test
class test_ABBA(unittest.TestCase):
#--------------------------------------------------------------------------#
# _check_parameters
#--------------------------------------------------------------------------#
def test_CheckParameters_TolFloat(self):
"""
tolerance should be float not integer
"""
self.assertRaises(ValueError, ABBA, tol=1)
def test_CheckParameters_TolList(self):
"""
tolerance should be list, maximum size 2
"""
self.assertRaises(ValueError, ABBA, tol=[1.0, 1.0, 1.0])
def test_CheckParameters_SclPositive(self):
"""
Scaling parameter should be >=0
"""
self.assertRaises(ValueError, ABBA, scl=-0.1)
def test_CheckParameters_KBounds(self):
"""
min_k and max_k bounds should be such that min_k < max_k
"""
self.assertRaises(ValueError, ABBA, min_k=6, max_k=3)
#--------------------------------------------------------------------------#
# transform
#--------------------------------------------------------------------------#
def test_transform_SimpleExample(self):
"""
Check transform function returns identical results as performing
compression followed by digitization.
"""
abba = ABBA(verbose=0, scl=1)
ts = np.random.rand(20).tolist()
string, centers = abba.transform(ts)
pieces = abba.compress(np.array(ts))
string2, centers2 = abba.digitize(pieces)
self.assertTrue(np.allclose(centers, centers2))
#--------------------------------------------------------------------------#
# inverse_transform
#--------------------------------------------------------------------------#
def test_InverseTransform_SimpleExample(self):
"""
Check inverse_transform function returns identical results as performing
inverse_digitization followed by quantization then inverse_compression.
"""
abba = ABBA(verbose=0, scl=1)
ts = np.random.rand(20)
pieces = abba.compress(np.array(ts))
string, centers = abba.digitize(pieces)
reconstructed_ts1 = abba.inverse_transform(string, centers, ts[0])
pieces1 = abba.inverse_digitize(string, centers)
pieces1 = abba.quantize(pieces1)
reconstructed_ts2 = abba.inverse_compress(ts[0], pieces1)
self.assertTrue(np.allclose(reconstructed_ts1, reconstructed_ts2))
#--------------------------------------------------------------------------#
# compress
#--------------------------------------------------------------------------#
@ignore_warnings
def test_Compress_tslength2(self):
"""
Test compression when time series given is of length 2
"""
ts = [1, 3]
abba = ABBA(verbose=0)
pieces = abba.compress(ts)
self.assertTrue(np.allclose(np.array([[1.0,2.0,0.0]]), pieces))
@ignore_warnings
def test_Compress_Flatline(self):
"""
Test compression on a flat time series
"""
ts = [1]*100
abba = ABBA(verbose=0, tol=[0.1])
pieces = abba.compress(ts)
self.assertTrue(np.allclose(np.array([[99,0.0,0.0]]), pieces))
@ignore_warnings
def test_Compress_NoCompression(self):
"""
Test compression on time series where tolerance so small that no compression
is achieved
"""
ts = [1, -1]*50
abba = ABBA(verbose=0)
pieces = abba.compress(ts)
correct_pieces = [[1, -2, 0], [1, 2, 0]]*49
correct_pieces += [[1, -2, 0]]
correct_pieces = np.array(correct_pieces)
self.assertTrue(np.allclose(correct_pieces, pieces))
@ignore_warnings
def test_Compress_Norm2(self):
"""
Test compression with norm = 2
"""
ts = [0, 2, 3, 2, 4, -1, 0, -1, 1, 0, -4, 0]
abba = ABBA(tol=2.0, verbose=0)
pieces = abba.compress(ts)
correct_pieces = [[4, 4, 3],
[1, -5, 0],
[4, 1, 38/16],
[1, -4, 0],
[1, 4, 0]]
correct_pieces = np.array(correct_pieces)
self.assertTrue(np.allclose(correct_pieces, pieces))
@ignore_warnings
def test_Compress_Norm1(self):
"""
Test compression with norm = 1
"""
ts = [0, 2, 3, 2, 4, -1, 0, -1, 1, 0, -4, 0]
abba = ABBA(tol=2.0, verbose=0, norm=1)
pieces = abba.compress(ts)
correct_pieces = [[4, 4, 3],
[1, -5, 0],
[4, 1, 5/2],
[1, -4, 0],
[1, 4, 0]]
correct_pieces = np.array(correct_pieces)
self.assertTrue(np.allclose(correct_pieces, pieces))
#--------------------------------------------------------------------------#
# inverse_compress
#--------------------------------------------------------------------------#
@ignore_warnings
def test_InverseCompress_OnePiece(self):
"""
Test inverse_compress with only one piece
"""
abba = ABBA(verbose=0)
pieces = np.array([[1,4.0,0]])
ts = abba.inverse_compress(0, pieces)
correct_ts = np.array([0, 4])
self.assertTrue(np.allclose(ts, correct_ts))
@ignore_warnings
def test_InverseCompress_Example(self):
"""
Test inverse_compress on generic example
"""
pieces = [[4, 4, 3],
[1, -5, 0],
[4, 1, 5/2],
[1, -4, 0],
[1, 4, 0]]
pieces = np.array(pieces)
abba = ABBA(verbose=0)
ts = abba.inverse_compress(0, pieces)
correct_ts = np.array([0, 1, 2, 3, 4, -1, -3/4, -2/4, -1/4, 0, -4, 0])
self.assertTrue(np.allclose(ts, correct_ts))
#--------------------------------------------------------------------------#
# digitize
#--------------------------------------------------------------------------#
@ignore_warnings
def test_Digitize_ExampleScl0(self):
"""
Test digitize function on same generic example with scl = 0
"""
abba = ABBA(scl=0, verbose=0, seed=True)
pieces = [[4, 4, 3],
[1, -5, 0],
[4, 1, 5/2],
[1, -4, 0],
[1, 4, 0]]
pieces = np.array(pieces)
string, centers = abba.digitize(pieces)
correct_centers = np.array([[3, 3], [1, -9/2]])
self.assertTrue(all([string=='ababa', np.allclose(centers, correct_centers)]))
@ignore_warnings
def test_Digitize_ExampleScl1(self):
"""
Test digitize function on same generic example with scl = 1
"""
abba = ABBA(scl=1, verbose=0, seed=True)
pieces = [[4, 4, 3],
[1, -5, 0],
[4, 1, 5/2],
[1, -4, 0],
[1, 4, 0]]
pieces = np.array(pieces)
string, centers = abba.digitize(pieces)
correct_centers = np.array([[4, 5/2], [1, -9/2], [1, 4]])
self.assertTrue(all([string=='ababc', np.allclose(centers, correct_centers)]))
@ignore_warnings
def test_Digitize_ExampleSclInf(self):
"""
Test digitize function on same generic example with scl = inf
"""
abba = ABBA(scl=np.inf, verbose=0, seed=True)
pieces = [[4, 4, 3],
[1, -5, 0],
[4, 1, 5/2],
[1, -4, 0],
[1, 4, 0]]
pieces = np.array(pieces)
string, centers = abba.digitize(pieces)
correct_centers = np.array([[1, -5/3], [4, 5/2]])
self.assertTrue(all([string=='babaa', np.allclose(centers, correct_centers)]))
@ignore_warnings
def test_Digitize_SymbolOrdering(self):
"""
Test digitize function orders letters by most occuring symbol.
"""
abba = ABBA(verbose=0)
pieces = [[1,1,0],
[50,50,0],
[100,100,0],
[2,2,0],
[51,51,0],
[3,3,0]]
pieces = np.array(pieces).astype(float)
string, centers = abba.digitize(pieces)
self.assertTrue('abcaba'==string)
@ignore_warnings
def test_Digitize_OneCluster(self):
"""
Test digitize function to make one large cluster
"""
inc = np.random.randn(100,1)
abba = ABBA(verbose=0, min_k=1, tol=10.0)
pieces = np.hstack([np.ones((100,1)), inc, np.zeros((100,1))])
string, centers = abba.digitize(pieces)
self.assertTrue('a'*100 == string)
@ignore_warnings
def test_Digitize_NotEnoughPieces(self):
"""
Test digitize function where min_k is greater than the number of pieces
"""
abba = ABBA(verbose=0, min_k=10)
pieces = [[4, 4, 3],
[1, -5, 0],
[4, 1, 5/2],
[1, -4, 0],
[1, 4, 0]]
pieces = np.array(pieces)
self.assertRaises(ValueError, abba.digitize, pieces)
@ignore_warnings
def test_Digitize_TooManyK(self):
"""
Test digitize function where less than min_k are required for perfect
clustering.
"""
abba = ABBA(verbose=0, min_k=3, seed=True)
pieces = [[1, 1, 0],
[1, 1, 0],
[1, 1, 0],
[1, 1, 0],
[1, 1, 0]]
pieces = np.array(pieces).astype(float)
string, centers = abba.digitize(pieces)
correct_centers = np.array([[1, 1], [1, 1], [1, 1]])
self.assertTrue(all([string=='aaaaa', np.allclose(centers, correct_centers)]))
@ignore_warnings
def test_Digitize_zeroerror(self):
"""
Test digitize function when zero error, i.e. use max amount of clusters.
"""
abba = ABBA(verbose=0, max_k=5, tol=[0.01, 0])
pieces = [[1, 1, 0],
[1, 2, 0],
[1, 3, 0],
[1, 4, 0],
[1, 5, 0]]
pieces = np.array(pieces).astype(float)
string, centers = abba.digitize(pieces)
correct_centers = np.array([[1, 1], [1, 2], [1, 3], [1, 4], [1, 5]])
self.assertTrue(all([string=='abcde', np.allclose(centers, correct_centers)]))
#--------------------------------------------------------------------------#
# inverse_digitize
#--------------------------------------------------------------------------#
@ignore_warnings
def test_InverseDigitize_example(self):
"""
Test inverse digitize on a generic example
"""
abba = ABBA(verbose=0)
centers = np.array([[3, 3], [1, -9/2]]).astype(float)
string = 'ababa'
pieces = abba.inverse_digitize(string, centers)
correct_pieces = [[3, 3],
[1, -9/2],
[3, 3],
[1, -9/2],
[3, 3]]
correct_pieces = np.array(correct_pieces).astype(float)
self.assertTrue(np.allclose(pieces, correct_pieces))
#--------------------------------------------------------------------------#
# quantize
#--------------------------------------------------------------------------#
@ignore_warnings
def test_Quantize_NoRoundingNeeded(self):
"""
Test quantize function on an array where no rounding is needed
"""
pieces = [[2, 1],
[3, 1],
[4, 2],
[1, 2],
[1, -5],
[2, -1]]
pieces = np.array(pieces)
abba = ABBA(verbose=0)
self.assertTrue(np.allclose(pieces, abba.quantize(pieces)))
@ignore_warnings
def test_Quantize_AccumulateError(self):
"""
Test quantize function with distributed rounding
"""
pieces = [[7/4, 1],
[7/4, 1],
[7/4, 1],
[7/4, 1],
[5/4, 1],
[5/4, 1],
[5/4, 1],
[5/4, 1]]
pieces = np.array(pieces).astype(float)
abba = ABBA(verbose=0)
pieces = abba.quantize(pieces)
correct_pieces = [[2, 1],
[2, 1],
[1, 1],
[2, 1],
[1, 1],
[2, 1],
[1, 1],
[1, 1]]
self.assertTrue(np.allclose(correct_pieces, abba.quantize(pieces)))
@ignore_warnings
def test_Quantise_Half(self):
"""
Test quantize function where all values are 1.5
"""
pieces = [[3/2, 1],
[3/2, 1],
[3/2, 1],
[3/2, 1],
[3/2, 1],
[3/2, 1],
[3/2, 1],
[3/2, 1]]
pieces = np.array(pieces).astype(float)
abba = ABBA(verbose=0)
pieces = abba.quantize(pieces)
correct_pieces = [[2, 1],
[1, 1],
[2, 1],
[1, 1],
[2, 1],
[1, 1],
[2, 1],
[1, 1]]
self.assertTrue(np.allclose(correct_pieces, abba.quantize(pieces)))
#--------------------------------------------------------------------------#
# _build_centers
#--------------------------------------------------------------------------#
@ignore_warnings
def test_BuildCenters_c1(self):
"""
Test utility function _build_centers on column 2
"""
pieces = [[4, 4],
[1, -5],
[4, 1],
[1, -4],
[1, 4]]
pieces = np.array(pieces).astype(float)
labels = np.array([0, 1, 1, 1, 0])
k = 2
c1 = [4,-4]
col = 0
abba = ABBA(verbose=0)
c = abba._build_centers(pieces, labels, c1, k, col)
correct_c = np.array([[5/2, 4], [2, -4]])
self.assertTrue(np.allclose(correct_c, c))
@ignore_warnings
def test_BuildCenters_c2(self):
"""
Test utility function _build_centers on column 1
"""
pieces = [[4, 4],
[1, -5],
[4, 1],
[1, -4],
[1, 4]]
pieces = np.array(pieces).astype(float)
labels = np.array([0, 1, 0, 1, 1])
k = 2
c1 = [4,1]
col = 1
abba = ABBA(verbose=0)
c = abba._build_centers(pieces, labels, c1, k, col)
correct_c = np.array([[4, 5/2], [1, -5/3]])
self.assertTrue(np.allclose(correct_c, c))
#--------------------------------------------------------------------------#
# _max_cluster_var
#--------------------------------------------------------------------------#
@ignore_warnings
def test_MaxClusterVar_example(self):
"""
Test utility function _max_cluster_var
"""
pieces = [[4, 4],
[1, -5],
[4, 1],
[1, -4],
[1, 4]]
pieces = np.array(pieces).astype(float)
labels = np.array([0, 0, 0, 1, 1])
centers = np.array([[3, 0], [1, 0]]).astype(float)
k = 2
abba = ABBA()
(e1, e2) = abba._max_cluster_var(pieces, labels, centers, k)
ee1 = max([np.var([1,-2,1]), np.var([0,0])])
ee2 = max([np.var([4,-5,1]), np.var([4,-4])])
self.assertTrue(np.allclose([e1, e2], [ee1, ee2]))
#--------------------------------------------------------------------------#
# digitize when ordered=True
#--------------------------------------------------------------------------#
@ignore_warnings
def test_DigitizeInc_NotWeightedNotSymmetricOneNorm(self):
"""
Test digitize_inc with weighted=False and symmetric=False and 1 norm
"""
pieces = [[1, -5],
[2, 0],
[1, -6],
[2, 2],
[1, -4],
[1, 3],
[4, 8]]
pieces = np.array(pieces).astype(float)
abba = ABBA(verbose=0, norm=1, c_method='incremental', tol=2/3+1e-10, weighted=False, symmetric=False)
string, centers = abba.digitize(pieces)
correct_centers = [[1, -5],
[5/3, 2],
[4, 8]]
correct_centers = np.array(correct_centers)
self.assertTrue(np.allclose(centers, correct_centers))
@ignore_warnings
def test_DigitizeInc_NotWeightedNotSymmetricTwoNorm(self):
"""
Test digitize_inc with weighted=False and symmetric=False and 2 norm
"""
pieces = [[1, -5],
[2, 0],
[1, -6],
[2, 2],
[1, -4],
[1, 3],
[4, 8]]
pieces = np.array(pieces).astype(float)
abba = ABBA(verbose=0, norm=2, c_method='incremental', tol=42/27+1e-10, weighted=False, symmetric=False)
string, centers = abba.digitize(pieces)
correct_centers = [[1, -5],
[5/3, 5/3],
[4, 8]]
correct_centers = np.array(correct_centers)
self.assertTrue(np.allclose(centers, correct_centers))
@ignore_warnings
def test_DigitizeInc_WeightedNotSymmetricOneNorm(self):
"""
Test digitize_inc with weighted=True and symmetric=False and 1 norm
"""
pieces = [[1, -5],
[2, 0],
[1, -6],
[2, 2],
[1, -4],
[1, 3],
[4, 8]]
pieces = np.array(pieces).astype(float)
abba = ABBA(verbose=0, norm=1, c_method='incremental', tol=1+1e-10, weighted=True, symmetric=False)
string, centers = abba.digitize(pieces)
correct_centers = [[5/4, -89/24],
[3/2, 5/2],
[4, 8]]
correct_centers = np.array(correct_centers)
self.assertTrue(np.allclose(centers, correct_centers))
@ignore_warnings
def test_DigitizeInc_WeightedNotSymmetricTwoNorm(self):
"""
Test digitize_inc with weighted=True and symmetric=False and 2 norm
"""
pieces = [[1, -5],
[2, 0],
[1, -6],
[2, 2],
[1, -4],
[1, 3],
[4, 8]]
pieces = np.array(pieces).astype(float)
abba = ABBA(verbose=0, norm=2, c_method='incremental', tol=(140/(196*3)+1e-10), weighted=True, symmetric=False)
string, centers = abba.digitize(pieces)
correct_centers = [[1, -72/14],
[3/2, 12/5],
[2, 0],
[4, 8]]
correct_centers = np.array(correct_centers)
self.assertTrue(np.allclose(centers, correct_centers))
# TODO Weighted symmetric 1 norm
# TODO Weighted symmetric 2 norm
# TODO Not Weighted symmetric 1 norm
# TODO Not Weighted symmetric 2 norm
@ignore_warnings
def test_DigitizeInc_SymbolOrdering(self):
"""
Test digitize function orders letters by most occuring symbol.
"""
abba = ABBA(verbose=0, tol=1.0, c_method='incremental')
pieces = [[1,1,0],
[50,50,0],
[100,100,0],
[2,2,0],
[51,51,0],
[3,3,0]]
pieces = np.array(pieces).astype(float)
string, centers = abba.digitize(pieces)
self.assertTrue('abcaba'==string)
#--------------------------------------------------------------------------#
# get_patches
#--------------------------------------------------------------------------#
def test_GetPatches_SimpleExample(self):
"""
Check the get_patches function works as expected
"""
abba = ABBA(verbose=0)
ts = np.array([0, 1, 2, 3, 4, 2, 0, 2, 4, 3, 2, 1, 0])
pieces = [[4, 4, 0],
[2, -4, 0],
[2, 4, 0],
[4, -4, 0]]
pieces = np.array(pieces)
string = 'abab'
centers = [[3, 4],
[3, -4]]
centers = np.array(centers)
patches = abba.get_patches(ts, pieces, string, centers)
self.assertTrue(np.allclose(patches['a'][0] + patches['a'][1], -patches['b'][0] - patches['b'][1]))
#--------------------------------------------------------------------------#
# patched_reconstruction
#--------------------------------------------------------------------------#
def test_PatchedReconstruction_SimpleExample(self):
"""
Check the patched_reconstruction function works as expected
"""
abba = ABBA(verbose=0)
ts = np.array([0, 2, 2, 2, 4, 2, 2, 2, 0, 2, 2, 2, 4, 2, 2, 2, 0])
pieces = [[4, 4, 0],
[4, -4, 0],
[4, 4, 0],
[4, -4, 0]]
pieces = np.array(pieces)
string = 'abab'
centers = [[4, 4],
[4, -4]]
centers = np.array(centers)
reconstructed_ts = abba.patched_reconstruction(ts, pieces, string, centers)
self.assertTrue(np.allclose(ts, reconstructed_ts))
#--------------------------------------------------------------------------#
# util/dtw
#--------------------------------------------------------------------------#
def test_dtw_warping(self):
"""
Compare dynamic time warping distance between two time series that can be
warped perfectly
"""
x = [0, 1, 0, 0, 0, 0, 0, 0, 0 ,0]
y = [0, 0, 0, 0, 0, 0, 0, 1, 0 ,0]
d = dtw(x, y)
self.assertTrue(np.allclose(d, 0))
def test_dtw_path(self):
"""
Check dtw returns the right path for a specific example.
"""
x = [0, 0, 1, 2, 1, 0, 0]
y = [0, 1, 3, 1, 0]
d, path = dtw(x, y, return_path=True)
correct_path = [(0,0), (1,0), (2,1), (3,2), (4,3), (5,4), (6,4)]
self.assertTrue(path, correct_path)
def test_dtw_1norm(self):
"""
Check dtw using an alternative distance measure
"""
dist = lambda a, b: np.abs(a-b)
x = [1, 2, 4, 1, 3, 1, 5]
y = [2, 1, 3, 4]
d, path = dtw(x, y, return_path=True, dist=dist)
correct_path = [(0,0), (1,0), (2,0), (3,1), (4,2), (5,2), (6,3)]
self.assertTrue(all([np.allclose(d, 6), correct_path==path]))
def test_dtw_redundant(self):
"""
Test dtw with filter_redundant turned on.
"""
x = [0, 1, 2, 3, 4, 5, 6, 7, 6, 5, 4, 3, 2, 1, 0]
y = [0, 7, 0]
d = dtw(x, y, filter_redundant=True)
self.assertTrue(np.allclose(d, 0))
def test_dtw_NoRedundant(self):
"""
Test example when redudant should remove no datapoints.
"""
x = [2, 4, 3, 7, 2, -5, 6, 2, 0, -1, 5]
y = [2, -1, -5, 3, 2, 0, 3, -2, -4, 0]
d1 = dtw(x, y, filter_redundant=True)
d2 = dtw(x, y, filter_redundant=False)
self.assertEqual(d1, d2)
def test_dtw_RedundantWithPath(self):
"""
Check warning given when attempt unsupported feature
"""
x = [0, 3, 6, 9, 12]
y = [0, 12]
d, path = dtw(x, y, filter_redundant=True, return_path=True)
correct_path = [(0,0), (4,1)]
self.assertEqual(correct_path, path)
def test_dtw_RedundantBothShort(self):
"""
Check dtw on two time series of length 2.
"""
x = [0, 4]
y = [2, 5]
d, path = dtw(x, y, filter_redundant=True, return_path=True)
self.assertEqual(d, 5)
if __name__ == "__main__":
unittest.main()
|
425374
|
import csv
import codecs
import random
from utils import normalizeString
def process(_f):
csvfile = codecs.open(_f, 'r+', 'utf_8_sig')
reader = csv.reader(csvfile)
datas = []
for line in reader:
if len(line) != 6: continue
q, d, label = line[3], line[4], line[5]
q = " ".join(q.strip().split())
d = " ".join(d.strip().split())
label = (label.strip())
datas.append([normalizeString(q), normalizeString(d), label])
random.shuffle(datas)
_train = open("data/train", "w")
_val = open("data/val", "w")
_split = len(datas) // 10
[_train.write("\t".join(d) + "\n") for d in datas[:9*_split]]
[_val.write("\t".join(d) + "\n") for d in datas[9*_split:]]
_train.close()
_val.close()
if __name__ == "__main__":
process("data/train.csv")
|
425385
|
import csv
import json
from datetime import datetime
import django
import pytest
from django.contrib.auth.models import User
from django.utils import timezone
import data_browser.models
from .core import models
from .util import update_fe_fixture
def dump(val):
print(json.dumps(val, indent=4, sort_keys=True))
@pytest.fixture
def products(db):
address = models.Address.objects.create(city="london")
producer = models.Producer.objects.create(name="Bob", address=address)
models.Product.objects.create(name="a", size=1, size_unit="g", producer=producer)
models.Product.objects.create(name="b", size=1, size_unit="g", producer=producer)
models.Product.objects.create(name="c", size=2, size_unit="g", producer=producer)
@pytest.fixture
def pivot_products(db):
address = models.Address.objects.create(city="london", street="bad")
producer = models.Producer.objects.create(name="Bob", address=address)
datetimes = [
datetime(2020, 1, 1, tzinfo=timezone.utc),
datetime(2020, 2, 1, tzinfo=timezone.utc),
datetime(2020, 2, 2, tzinfo=timezone.utc),
datetime(2021, 1, 1, tzinfo=timezone.utc),
datetime(2021, 1, 2, tzinfo=timezone.utc),
datetime(2021, 1, 3, tzinfo=timezone.utc),
]
for i, dt in enumerate(datetimes):
models.Product.objects.create(
created_time=dt, name=str(dt), size=i + 1, producer=producer
)
@pytest.mark.skipif(django.VERSION < (2, 2), reason="Django version 2.2 required")
def test_query_html(admin_client, snapshot):
res = admin_client.get(
"/data_browser/query/core.Product/size-0,name+1,size_unit.html?size__lt=2&id__gt=0"
)
assert res.status_code == 200
config = json.loads(res.context["config"])
snapshot.assert_match(config, "config")
def test_query_query(admin_client, snapshot):
res = admin_client.get(
"/data_browser/query/core.Product/size-0,name+1,size_unit.query?size__lt=2&id__gt=0"
)
assert res.status_code == 200
query = json.loads(res.content.decode("utf-8"))
snapshot.assert_match(query, "query")
@pytest.mark.parametrize("format", ["sql", "profile", "pstats", "profile_sql", "qs"])
def test_query_misc_formats(admin_client, format):
# we're not going to check the result as they vary and it's sufficient that it doesn't blow up
res = admin_client.get(
f"/data_browser/query/core.Product/size-0,name+1,size_unit.{format}?size__lt=2&id__gt=0"
)
assert res.status_code == 200
@pytest.mark.skipif(django.VERSION < (2, 1), reason="Django version 2.1 required")
def test_query_explain(admin_client):
res = admin_client.get(
"/data_browser/query/core.Product/size-0,name+1,size_unit.explain?size__lt=2&id__gt=0"
)
assert res.status_code == 200
def test_query_sql_aggregate(admin_client):
res = admin_client.get("/data_browser/query/core.Product/size__count.sql")
assert res.status_code == 200
def test_query_qs_variants(admin_client, snapshot):
res = admin_client.get(
"/data_browser/query/core.Product/size__is_null,size__count,annotated.qs"
)
assert res.status_code == 200
snapshot.assert_match(res.content.decode("utf-8").splitlines(), "content")
@pytest.mark.parametrize(
"format", ["bad", "profile_bad", "pstats_bad", "profilesql", "pstatsbad"]
)
def test_query_bad_formats(admin_client, format):
res = admin_client.get(
f"/data_browser/query/core.Product/size-0,name+1,size_unit.{format}?size__lt=2&id__gt=0"
)
assert res.status_code == 404
@pytest.mark.skipif(django.VERSION < (2, 2), reason="Django version 2.2 required")
def test_query_html_no_perms(admin_user, admin_client, snapshot):
admin_user.is_superuser = False
admin_user.save()
res = admin_client.get("/data_browser/query//.html?")
assert res.status_code == 200
config = json.loads(res.context["config"])
snapshot.assert_match(config, "config")
@pytest.mark.skipif(django.VERSION < (2, 2), reason="Django version 2.2 required")
def test_query_ctx(admin_client, snapshot):
res = admin_client.get("/data_browser/query//.ctx?")
assert res.status_code == 200
config = res.json()
snapshot.assert_match(config, "config")
update_fe_fixture("frontend/src/context_fixture.json", config)
@pytest.mark.skipif(django.VERSION < (2, 2), reason="Django version 2.2 required")
def test_query_ctx_m2m(admin_client, snapshot, mocker):
mocker.patch("data_browser.orm_admin.get_feature_flag", return_value=True)
res = admin_client.get("/data_browser/query//.ctx?")
assert res.status_code == 200
config = res.json()
snapshot.assert_match(config, "config")
update_fe_fixture("frontend/src/context_fixture.json", config)
@pytest.mark.usefixtures("products")
def test_query_json_bad_fields(admin_client):
res = admin_client.get(
"".join(
[
"/data_browser/query/core.Product/",
"size-0,name+1,size_unit,bob-2,is_onsale,pooducer__name,producer__name.json",
"?size__lt=2&id__gt=0&bob__gt=1&size__xx=1&size__lt=xx",
]
)
)
assert res.status_code == 200
assert json.loads(res.content.decode("utf-8"))["rows"] == [
{
"size": 1,
"name": "a",
"size_unit": "g",
"is_onsale": "False",
"producer__name": "Bob",
},
{
"size": 1,
"name": "b",
"size_unit": "g",
"is_onsale": "False",
"producer__name": "Bob",
},
]
def test_query_bad_media(admin_client):
res = admin_client.get(
"/data_browser/query/core.Product/size-0,name+1,size_unit.bob?size__lt=2&id__gt=0"
)
assert res.status_code == 404
@pytest.mark.usefixtures("products")
def test_query_csv(admin_client):
res = admin_client.get(
"/data_browser/query/core.Product/size-0,name+1,size_unit.csv?size__lt=2&id__gt=0"
)
assert res.status_code == 200
res = res.getvalue().decode("utf-8")
dump(res)
rows = list(csv.reader(res.splitlines()))
dump(rows)
assert rows == [["Size", "Name", "Size unit"], ["1.0", "a", "g"], ["1.0", "b", "g"]]
@pytest.mark.usefixtures("pivot_products")
def test_query_csv_pivoted(admin_client):
res = admin_client.get(
"/data_browser/query/core.Product/created_time__year+0,&created_time__month+1,id__count,size__max.csv?"
)
assert res.status_code == 200
res = res.getvalue().decode("utf-8")
dump(res)
rows = list(csv.reader(res.splitlines()))
dump(rows)
assert rows == [
["Created time month", "January", "", "February", ""],
["Created time year", "ID count", "Size max", "ID count", "Size max"],
["2020.0", "1.0", "1.0", "2.0", "3.0"],
["2021.0", "3.0", "6.0", "", ""],
]
testdata = [
"----",
"---b",
"--c-",
"--cb",
"-r--",
"-r-b",
"-rc-",
"-rcb",
"d---",
"d--b",
"d-c-",
"d-cb",
"dr--",
"dr-b",
"drc-",
"drcb",
]
@pytest.mark.usefixtures("pivot_products")
@pytest.mark.parametrize("key", testdata)
def test_query_csv_pivot_permutations(admin_client, key, snapshot):
fields = []
if "r" in key:
fields.append("created_time__year+0")
if "c" in key:
fields.append("&created_time__month+1")
if "b" in key:
fields.extend(["id__count", "size__max"])
filters = "" if "d" in key else "id__equals=-1"
res = admin_client.get(
f"/data_browser/query/core.Product/{','.join(fields)}.csv?{filters}"
)
assert res.status_code == 200
res = res.getvalue().decode("utf-8")
dump(res)
rows = list(csv.reader(res.splitlines()))
dump(rows)
snapshot.assert_match(rows, "key")
@pytest.mark.usefixtures("products")
def test_query_json(admin_client, snapshot):
res = admin_client.get(
"/data_browser/query/core.Product/size-0,name+1,size_unit.json?size__lt=2&id__gt=0"
)
assert res.status_code == 200
data = json.loads(res.content.decode("utf-8"))
snapshot.assert_match(data, "data")
@pytest.mark.usefixtures("products")
def test_query_is_null_date_filter(admin_client, snapshot):
res = admin_client.get(
"/data_browser/query/core.Product/name+0.json?created_time__is_null=NotNull"
)
assert res.status_code == 200
data = json.loads(res.content.decode("utf-8"))
snapshot.assert_match(data, "data")
@pytest.mark.usefixtures("pivot_products")
def test_query_json_pivot(admin_client, snapshot):
res = admin_client.get(
"/data_browser/query/core.Product/created_time__year+0,&created_time__month+1,id__count,size__max.json?"
)
assert res.status_code == 200
data = json.loads(res.content.decode("utf-8"))
snapshot.assert_match(data, "data")
@pytest.mark.usefixtures("products")
def test_query_json_bad_model(admin_client):
res = admin_client.get(
"/data_browser/query/core.Bob/size-0,name+1,size_unit.json?size__lt=2&id__gt=0"
)
assert res.status_code == 404
@pytest.mark.usefixtures("products")
def test_view_csv(admin_client, settings, mock_admin_get_queryset):
view = data_browser.models.View.objects.create(
model_name="core.Product",
fields="size-0,name+1,size_unit",
query="size__lt=2&id__gt=0",
owner=User.objects.get(),
)
res = admin_client.get(f"/data_browser/view/{view.public_slug}.csv")
assert res.status_code == 404
view.public = True
view.save()
res = admin_client.get(f"/data_browser/view/{view.public_slug}.csv")
assert res.status_code == 200
assert mock_admin_get_queryset.call_args[0][1].data_browser["public_view"]
res = res.getvalue().decode("utf-8")
dump(res)
rows = list(csv.reader(res.splitlines()))
dump(rows)
assert rows == [["Size", "Name", "Size unit"], ["1.0", "a", "g"], ["1.0", "b", "g"]]
settings.DATA_BROWSER_ALLOW_PUBLIC = False
res = admin_client.get(f"/data_browser/view/{view.public_slug}.csv")
assert res.status_code == 404
settings.DATA_BROWSER_ALLOW_PUBLIC = True
view.owner = User.objects.create(is_staff=True)
view.save()
res = admin_client.get(f"/data_browser/view/{view.public_slug}.csv")
assert res.status_code == 404
@pytest.mark.usefixtures("products")
def test_view_json(admin_client):
view = data_browser.models.View.objects.create(
model_name="core.Product",
fields="size-0,name+1,size_unit",
query="size__lt=2&id__gt=0",
owner=User.objects.get(),
)
res = admin_client.get(f"/data_browser/view/{view.public_slug}.json")
assert res.status_code == 404
view.public = True
view.save()
res = admin_client.get(f"/data_browser/view/{view.public_slug}.json")
assert res.status_code == 200
data = json.loads(res.content.decode("utf-8"))
dump(data)
assert data == {
"rows": [
{"size": 1, "name": "a", "size_unit": "g"},
{"size": 1, "name": "b", "size_unit": "g"},
],
"cols": [{}],
"body": [[{}, {}]],
"length": 2,
"formatHints": {
"name": {},
"size": {
"highCutOff": 10000000000.0,
"lowCutOff": 0.0001,
"maximumFractionDigits": 0,
"minimumFractionDigits": 0,
"significantFigures": 3,
},
"size_unit": {},
},
}
view.owner = User.objects.create(is_staff=True)
view.save()
res = admin_client.get(f"/data_browser/view/{view.public_slug}.csv")
assert res.status_code == 404
@pytest.mark.usefixtures("products")
def test_view_bad_filter(admin_client):
view = data_browser.models.View.objects.create(
model_name="core.Product",
fields="size-0,name+1,size_unit",
query="size__lt=2&id__gt=0",
owner=User.objects.get(),
public=True,
)
res = admin_client.get(f"/data_browser/view/{view.public_slug}.json")
assert res.status_code == 200
view.query = "sixe__lt=2&id__gt=0"
view.save()
res = admin_client.get(f"/data_browser/view/{view.public_slug}.json")
assert res.status_code == 400
view.query = "size__lx=2&id__gt=0"
view.save()
res = admin_client.get(f"/data_browser/view/{view.public_slug}.json")
assert res.status_code == 400
view.query = "size__lt=a&id__gt=0"
view.save()
res = admin_client.get(f"/data_browser/view/{view.public_slug}.json")
assert res.status_code == 400
@pytest.mark.usefixtures("products")
def test_action(admin_client):
url = "/data_browser/query/core.Product/id.%s"
ids = set(models.Product.objects.values_list("id", flat=True))
assert len(ids) == 3
# check our view is right
res = admin_client.get(url % "json")
assert {row["id"] for row in res.json()["rows"]} == ids
# ask data browser for the action request
res = admin_client.post(
url % "html",
{"action": "delete_selected", "field": "id"},
content_type="application/json",
).json()
assert res == {
"method": "post",
"url": "/admin/core/product/?",
"data": [
["action", "delete_selected"],
["select_across", 0],
["index", 0],
["data_browser", 1],
*[["_selected_action", id_] for id_ in ids],
],
}
# post action to changelist
data = dict(res["data"])
data["_selected_action"] = [int(id_) for id_ in ids] # JS will format 1.0 as 1
res = admin_client.post(res["url"], data)
assert "Are you sure you want to delete the selected" in res.rendered_content
assert set(res.context[0]["queryset"].values_list("id", flat=True)) == ids
@pytest.mark.usefixtures("products")
def test_action_filtered(admin_client):
url = "/data_browser/query/core.Product/id.%s?size__equals=2"
(id_,) = set(models.Product.objects.filter(size=2).values_list("id", flat=True))
# check our view is right
res = admin_client.get(url % "json")
assert {row["id"] for row in res.json()["rows"]} == {id_}
# ask data browser for the action request
res = admin_client.post(
url % "html",
{"action": "delete_selected", "field": "id"},
content_type="application/json",
).json()
assert res == {
"method": "post",
"url": "/admin/core/product/?",
"data": [
["action", "delete_selected"],
["select_across", 0],
["index", 0],
["data_browser", 1],
["_selected_action", id_],
],
}
# post action to changelist
data = dict(res["data"])
data["_selected_action"] = int(data["_selected_action"]) # JS will format 1.0 as 1
res = admin_client.post(res["url"], data)
assert "Are you sure you want to delete the selected" in res.rendered_content
assert set(res.context[0]["queryset"].values_list("id", flat=True)) == {id_}
@pytest.mark.usefixtures("products")
def test_related_action(admin_client):
url = "/data_browser/query/core.Product/address__id,producer__id,id.%s"
product_ids = set(models.Product.objects.values_list("id", flat=True))
assert len(product_ids) == 3
(producer_id,) = set(models.Producer.objects.values_list("id", flat=True))
# check our view is right
res = admin_client.get(url % "json")
assert {row["id"] for row in res.json()["rows"]} == product_ids
assert {row["producer__id"] for row in res.json()["rows"]} == {producer_id}
# ask data browser for the action request
res = admin_client.post(
url % "html",
{"action": "delete_selected", "field": "producer__id"},
content_type="application/json",
).json()
assert res == {
"method": "post",
"url": "/admin/core/producer/?",
"data": [
["action", "delete_selected"],
["select_across", 0],
["index", 0],
["data_browser", 1],
["_selected_action", producer_id],
],
}
# post action to changelist
data = dict(res["data"])
data["_selected_action"] = int(data["_selected_action"]) # JS will format 1.0 as 1
res = admin_client.post(res["url"], data)
assert "Are you sure you want to delete the selected" in res.rendered_content
assert set(res.context[0]["queryset"].values_list("id", flat=True)) == {producer_id}
@pytest.mark.usefixtures("products")
def test_admin_action(admin_client):
url = "/data_browser/query/core.Product/admin.%s"
ids = set(models.Product.objects.values_list("id", flat=True))
assert len(ids) == 3
# check our view is right
res = admin_client.get(url % "json")
assert len(res.json()["rows"]) == 3
# ask data browser for the action request
res = admin_client.post(
url % "html",
{"action": "delete_selected", "field": "admin"},
content_type="application/json",
).json()
assert res == {
"method": "post",
"url": "/admin/core/product/?",
"data": [
["action", "delete_selected"],
["select_across", 0],
["index", 0],
["data_browser", 1],
*[["_selected_action", id_] for id_ in ids],
],
}
# post action to changelist
data = dict(res["data"])
data["_selected_action"] = [int(id_) for id_ in ids] # JS will format 1.0 as 1
res = admin_client.post(res["url"], data)
assert "Are you sure you want to delete the selected" in res.rendered_content
assert set(res.context[0]["queryset"].values_list("id", flat=True)) == ids
|
425392
|
def test_str(RS, str_data):
assert RS(str_data, 0.8) != str_data
assert type(RS(str_data, 0.8)) is str
assert len(RS(str_data, 0.8, repetition=3)) == 3
def test_list(RS, list_data):
assert RS(list_data, 0.8) != list_data
assert type(RS(list_data, 0.8)) is list
assert len(RS(list_data, 0.8, repetition=3)) == len(list_data) * 3
|
425394
|
import os
from prometheus_client.core import GaugeMetricFamily
DEFAULT_LOG_PATH = '/var/log/cloudchef/vmware_exporter/vmware_exporter.log'
APP_NAME = 'vmware-exporter'
log_path = os.environ.get('LOG_PATH', DEFAULT_LOG_PATH)
cloudentry_path = '/v1/kv/cmp/cloud_entry/vsphere?recurse'
vms_path = '/v1/kv/cmp/resource/vms?recurse'
vm_labels = ['external_id', 'external_name']
metric_list = {}
metric_list['vms'] = {
'vmware_vm_power_state': GaugeMetricFamily(
'vmware_vm_power_state',
'VMWare VM Power state (On / Off)',
labels=vm_labels),
'vmware_vm_boot_timestamp_seconds': GaugeMetricFamily(
'vmware_vm_boot_timestamp_seconds',
'VMWare VM boot time in seconds',
labels=vm_labels),
'vmware_vm_snapshots': GaugeMetricFamily(
'vmware_vm_snapshots',
'VMWare current number of existing snapshots',
labels=vm_labels),
'vmware_vm_snapshot_timestamp_seconds': GaugeMetricFamily(
'vmware_vm_snapshot_timestamp_seconds',
'VMWare Snapshot creation time in seconds',
labels=vm_labels + ['vm_snapshot_name']),
'vmware_vm_num_cpu': GaugeMetricFamily(
'vmware_vm_num_cpu',
'VMWare Number of processors in the virtual machine',
labels=vm_labels)
}
metric_list['datastores'] = {
'vmware_datastore_capacity_size': GaugeMetricFamily(
'vmware_datastore_capacity_size',
'VMWare Datasore capacity in bytes',
labels=['cloud_entry_id', 'name', 'datastore_id', 'host_id']),
'vmware_datastore_freespace_size': GaugeMetricFamily(
'vmware_datastore_freespace_size',
'VMWare Datastore freespace in bytes',
labels=['cloud_entry_id', 'name', 'datastore_id', 'host_id']),
'vmware_datastore_uncommited_size': GaugeMetricFamily(
'vmware_datastore_uncommited_size',
'VMWare Datastore uncommitted in bytes',
labels=['cloud_entry_id', 'name', 'datastore_id', 'host_id']),
'vmware_datastore_provisoned_size': GaugeMetricFamily(
'vmware_datastore_provisoned_size',
'VMWare Datastore provisoned in bytes',
labels=['cloud_entry_id', 'name', 'datastore_id', 'host_id']),
'vmware_datastore_hosts': GaugeMetricFamily(
'vmware_datastore_hosts',
'VMWare Hosts number using this datastore',
labels=['cloud_entry_id', 'name', 'datastore_id']),
'vmware_datastore_vms': GaugeMetricFamily(
'vmware_datastore_vms',
'VMWare Virtual Machines number using this datastore',
labels=['cloud_entry_id', 'name', 'datastore_id'])
}
metric_list['hosts'] = {
'vmware_host_power_state': GaugeMetricFamily(
'vmware_host_power_state',
'VMWare Host Power state (On / Off)',
labels=['cloud_entry_id', 'name', 'host_id']),
'vmware_host_boot_timestamp_seconds': GaugeMetricFamily(
'vmware_host_boot_timestamp_seconds',
'VMWare Host boot time in seconds',
labels=['cloud_entry_id', 'name', 'host_id']),
'vmware_host_cpu_usage': GaugeMetricFamily(
'vmware_host_cpu_usage',
'VMWare Host CPU usage in Mhz',
labels=['cloud_entry_id', 'name', 'host_id']),
'vmware_host_cpu_max': GaugeMetricFamily(
'vmware_host_cpu_max',
'VMWare Host CPU max availability in Mhz',
labels=['cloud_entry_id', 'name', 'host_id']),
'vmware_host_memory_usage': GaugeMetricFamily(
'vmware_host_memory_usage',
'VMWare Host Memory usage in Mbytes',
labels=['cloud_entry_id', 'name', 'host_id']),
'vmware_host_memory_max': GaugeMetricFamily(
'vmware_host_memory_max',
'VMWare Host Memory Max availability in Mbytes',
labels=['cloud_entry_id', 'name', 'host_id']),
}
perf_labels = {'vmware_vm_host_memory_usage': "summary.quickStats.hostMemoryUsage",
"vmware_vm_overall_cpu_usage": "summary.quickStats.overallCpuUsage",
"vmware_vm_overall_cpu_demand": "summary.quickStats.overallCpuDemand",
"vmware_vm_max_cpu_usage": "summary.runtime.maxCpuUsage",
"vmware_vm_memory_size_mb": "summary.config.memorySizeMB",
"vmware_vm_guest_memory_usage": "summary.quickStats.guestMemoryUsage",
"vmware_vm_max_memory_usage": "summary.runtime.maxMemoryUsage",
"vmware_vm_private_memory": "summary.quickStats.privateMemory",
"vmware_vm_shared_memory": "summary.quickStats.sharedMemory",
"vmware_vm_compressed_memory": "summary.quickStats.compressedMemory",
"vmware_vm_ballooned_memory": "summary.quickStats.balloonedMemory",
"vmware_vm_swapped_memory": "summary.quickStats.swappedMemory",
"vmware_vm_consumed_overhead_memory": "summary.quickStats.consumedOverheadMemory",
"vmware_vm_storage_committed": "summary.storage.committed",
"vmware_vm_storage_uncommitted": "summary.storage.uncommitted",
"vmware_vm_storage_unshared": "summary.storage.unshared",
"vmware_vm_storage_committed_and_uncommitted": None
}
vm_properties = ["summary.runtime.powerState", "summary.runtime.bootTime",
"summary.runtime.maxMemoryUsage", "summary.quickStats.privateMemory",
"summary.quickStats.sharedMemory", "summary.quickStats.compressedMemory",
"summary.quickStats.balloonedMemory", "summary.quickStats.swappedMemory",
"summary.runtime.maxCpuUsage", "summary.quickStats.overallCpuUsage",
"summary.quickStats.consumedOverheadMemory",
"summary.quickStats.hostMemoryUsage", "summary.quickStats.overallCpuDemand",
"summary.quickStats.guestMemoryUsage", "summary.runtime.maxMemoryUsage",
"summary.storage.committed", "summary.storage.uncommitted",
"summary.storage.unshared", "guest.disk",
"name", "snapshot", "snapshot.rootSnapshotList",
"summary.quickStats.hostMemoryUsage",
"summary.vm", "summary.runtime.host", "datastore",
"summary.config.memorySizeMB", "summary.config.numCpu"]
data_properties = ["summary.capacity", "summary.freeSpace", "summary.uncommitted",
"summary.name", "host", "vm", "summary.datastore"]
host_properties = ["name", "summary.quickStats.overallCpuUsage", "summary.host",
"summary.quickStats.overallMemoryUsage", "summary.hardware.memorySize",
"summary.hardware.cpuMhz", "summary.hardware.numCpuCores",
"summary.runtime.bootTime", "summary.runtime.powerState"]
|
425439
|
from django.urls import path
from story.views import StoryListView
app_name = "story"
urlpatterns = [
path('', StoryListView.as_view(), name='stories'),
]
|
425440
|
import visr_bear
import numpy as np
import numpy.testing as npt
from pathlib import Path
import scipy.signal as sig
from utils import data_path
def do_render(renderer, period, objects=None, direct_speakers=None, hoa=None):
not_none = [x for x in [objects, direct_speakers, hoa] if x is not None][0]
length = not_none.shape[1]
dummy_samples = np.zeros((0, length), dtype=np.float32)
output = np.zeros((2, length), dtype=np.float32)
def convert(samples):
if samples is None:
return dummy_samples
return samples.astype(np.float32, order="C", copy=False)
objects = convert(objects)
direct_speakers = convert(direct_speakers)
hoa = convert(hoa)
for i in range(length // period):
s = np.s_[:, i * period : (i + 1) * period]
renderer.process(objects[s], direct_speakers[s], hoa[s], output[s])
return output
def correlate(a, b):
"""returns (delay, correlation), where correlation
is the full cross-correlation, and delay is a vector of
delays corresponding to the delay from a to b for each
sample in correlation."""
correlation = np.correlate(b, a, mode="full")
delay = np.arange(len(correlation)) - (len(a) - 1)
return delay, correlation
period = 512
def render_directspeakers_front(data_file, samples):
config = visr_bear.api.Config()
config.num_objects_channels = 0
config.num_direct_speakers_channels = 1
config.period_size = period
config.data_path = data_file
renderer = visr_bear.api.Renderer(config)
dsi = visr_bear.api.DirectSpeakersInput()
dsi.rtime = visr_bear.api.Time(0, 1)
dsi.duration = visr_bear.api.Time(1, 1)
renderer.add_direct_speakers_block(0, dsi)
return do_render(renderer, period, direct_speakers=samples)
def render_objects_front(data_file, samples):
config = visr_bear.api.Config()
config.num_objects_channels = 1
config.num_direct_speakers_channels = 0
config.period_size = period
config.data_path = data_file
renderer = visr_bear.api.Renderer(config)
oi = visr_bear.api.ObjectsInput()
oi.rtime = visr_bear.api.Time(0, 1)
oi.duration = visr_bear.api.Time(1, 1)
oi.type_metadata.position = visr_bear.api.PolarPosition(0, 0, 1)
renderer.add_objects_block(0, oi)
return do_render(renderer, period, objects=samples)
def render_diffuse_front(data_file, samples):
config = visr_bear.api.Config()
config.num_objects_channels = 1
config.num_direct_speakers_channels = 0
config.period_size = period
config.data_path = data_file
renderer = visr_bear.api.Renderer(config)
oi = visr_bear.api.ObjectsInput()
oi.rtime = visr_bear.api.Time(0, 1)
oi.duration = visr_bear.api.Time(1, 1)
oi.type_metadata.position = visr_bear.api.PolarPosition(0, 0, 1)
oi.type_metadata.diffuse = 1.0
renderer.add_objects_block(0, oi)
return do_render(renderer, period, objects=samples)
def render_hoa_omni(data_file, samples):
config = visr_bear.api.Config()
config.num_objects_channels = 0
config.num_direct_speakers_channels = 0
config.num_hoa_channels = 1
config.period_size = period
config.data_path = data_file
renderer = visr_bear.api.Renderer(config)
hi = visr_bear.api.HOAInput()
hi.rtime = visr_bear.api.Time(0, 1)
hi.duration = visr_bear.api.Time(1, 1)
hi.channels = [0]
hi.type_metadata.orders = [0]
hi.type_metadata.degrees = [0]
hi.type_metadata.normalization = "SN3D"
renderer.add_hoa_block(0, hi)
return do_render(renderer, period, hoa=samples)
def test_objects_direct_speakers_delays():
"""check that delays between direct/diffuse/directspeakers paths match.
These share the same IRs so can be tested exactly."""
files_dir = Path(__file__).parent / "files"
data_file = str(files_dir / "unity_brirs_decorrelators.tf")
input_samples = np.random.normal(size=(1, 48000)).astype(np.float32)
direct_speakers_samples = render_directspeakers_front(data_file, input_samples)
objects_samples = render_objects_front(data_file, input_samples)
diffuse_samples = render_diffuse_front(data_file, input_samples)
# skip 2 periods, because the gains settle during the first period, and
# some of this will still be coming through the delays in the second period
npt.assert_allclose(
direct_speakers_samples[:, 2 * period :],
objects_samples[:, 2 * period :],
atol=2e-4,
)
npt.assert_allclose(
direct_speakers_samples[:, 2 * period :],
diffuse_samples[:, 2 * period :],
atol=2e-4,
)
def test_objects_hoa_delays():
"""check that delays between objects and HOA paths match. These use
different IRs, so check with cross-correlation."""
input_samples = np.zeros(shape=(1, 10240)).astype(np.float32)
input_samples[:, 4800] = 1.0
objects_samples = render_objects_front(data_path, input_samples)
hoa_samples = render_hoa_omni(data_path, input_samples)
def check_delay(a, b):
osa = 4
a_osa = sig.resample(a, len(a) * osa)
b_osa = sig.resample(b, len(b) * osa)
delay, correlation = correlate(a_osa, b_osa)
# check that 0 delay is a peak comparable with the delay that has the
# highest correlation
assert correlation[np.where(delay == 0)[0][0]] > 0.50 * np.max(correlation)
skip = period * 2 + 3000
check_delay(objects_samples[0, skip:], hoa_samples[0, skip:])
check_delay(objects_samples[1, skip:], hoa_samples[1, skip:])
|
425469
|
from unittest import TestCase
from memory import RamController
from memory import MemoryController
class RamTests(TestCase):
def test_create_ram(self):
ram = RamController(500)
self.assertEqual(len(ram), 500)
def test_read_write(self):
ram = RamController(32)
ram[0] = 0xFF
self.assertEqual(ram[0], 0xFF)
class MemoryControllerTests(TestCase):
def test_register_controller(self):
ram = RamController(32)
mem = MemoryController()
mem.register_controller(ram, 0)
self.assertIs(mem._memory_map[0].controller, ram)
def test_get_controller(self):
ram1 = RamController(32)
ram2 = RamController(64)
mem = MemoryController()
mem.register_controller(ram1, 0)
mem.register_controller(ram2, 32)
c = mem._get_controller(15)
self.assertIs(c.controller, ram1)
c = mem._get_controller(31)
self.assertIs(c.controller, ram1)
c = mem._get_controller(32)
self.assertIs(c.controller, ram2)
c = mem._get_controller(95)
self.assertIs(c.controller, ram2)
with self.assertRaises(IndexError):
mem._get_controller(96)
def test_read_write_byte(self):
ram = RamController(32)
mem = MemoryController()
mem.register_controller(ram, 0)
mem.write_byte(0x5A, 0)
self.assertEqual(mem.read_byte(0), 0x5A)
def test_read_write_word(self):
ram = RamController(32)
mem = MemoryController()
mem.register_controller(ram, 0)
mem.write_word(0xAA55, 0)
self.assertEqual(mem.read_word(0), 0xAA55)
|
425503
|
import ast
import collections
import contextlib
import functools
import inspect
import io
import logging
import sys
import traceback
import types
from typing import Any, Optional, Union
log = logging.getLogger(__name__)
# A type alias to annotate the tuples returned from `sys.exc_info()`
ExcInfo = tuple[type[Exception], Exception, types.TracebackType]
Namespace = dict[str, Any]
# This will be used as an coroutine function wrapper for the code
# to be evaluated. The wrapper contains one `pass` statement which
# will be replaced with `ast` with the code that we want to have
# evaluated.
# The function redirects output and captures exceptions that were
# raised in the code we evaluate. The latter is used to provide a
# meaningful traceback to the end user.
EVAL_WRAPPER = """
async def _eval_wrapper_function():
try:
with contextlib.redirect_stdout(_eval_context.stdout):
pass
if '_value_last_expression' in locals():
if inspect.isawaitable(_value_last_expression):
_value_last_expression = await _value_last_expression
_eval_context._value_last_expression = _value_last_expression
else:
_eval_context._value_last_expression = None
except Exception:
_eval_context.exc_info = sys.exc_info()
finally:
_eval_context.locals = locals()
_eval_context.function = _eval_wrapper_function
"""
INTERNAL_EVAL_FRAMENAME = "<internal eval>"
EVAL_WRAPPER_FUNCTION_FRAMENAME = "_eval_wrapper_function"
def format_internal_eval_exception(exc_info: ExcInfo, code: str) -> str:
"""Format an exception caught while evaluation code by inserting lines."""
exc_type, exc_value, tb = exc_info
stack_summary = traceback.StackSummary.extract(traceback.walk_tb(tb))
code = code.split("\n")
output = ["Traceback (most recent call last):"]
for frame in stack_summary:
if frame.filename == INTERNAL_EVAL_FRAMENAME:
line = code[frame.lineno - 1].lstrip()
if frame.name == EVAL_WRAPPER_FUNCTION_FRAMENAME:
name = INTERNAL_EVAL_FRAMENAME
else:
name = frame.name
else:
line = frame.line
name = frame.name
output.append(
f' File "{frame.filename}", line {frame.lineno}, in {name}\n'
f" {line}"
)
output.extend(traceback.format_exception_only(exc_type, exc_value))
return "\n".join(output)
class EvalContext:
"""
Represents the current `internal eval` context.
The context remembers names set during earlier runs of `internal eval`. To
clear the context, use the `.internal clear` command.
"""
def __init__(self, context_vars: Namespace, local_vars: Namespace):
self._locals = dict(local_vars)
self.context_vars = dict(context_vars)
self.stdout = io.StringIO()
self._value_last_expression = None
self.exc_info = None
self.code = ""
self.function = None
self.eval_tree = None
@property
def dependencies(self) -> dict[str, Any]:
"""
Return a mapping of the dependencies for the wrapper function.
By using a property descriptor, the mapping can't be accidentally
mutated during evaluation. This ensures the dependencies are always
available.
"""
return {
"print": functools.partial(print, file=self.stdout),
"contextlib": contextlib,
"inspect": inspect,
"sys": sys,
"_eval_context": self,
"_": self._value_last_expression,
}
@property
def locals(self) -> dict[str, Any]:
"""Return a mapping of names->values needed for evaluation."""
return {**collections.ChainMap(self.dependencies, self.context_vars, self._locals)}
@locals.setter
def locals(self, locals_: dict[str, Any]) -> None:
"""Update the contextual mapping of names to values."""
log.trace(f"Updating {self._locals} with {locals_}")
self._locals.update(locals_)
def prepare_eval(self, code: str) -> Optional[str]:
"""Prepare an evaluation by processing the code and setting up the context."""
self.code = code
if not self.code:
log.debug("No code was attached to the evaluation command")
return "[No code detected]"
try:
code_tree = ast.parse(code, filename=INTERNAL_EVAL_FRAMENAME)
except SyntaxError:
log.debug("Got a SyntaxError while parsing the eval code")
return "".join(traceback.format_exception(*sys.exc_info(), limit=0))
log.trace("Parsing the AST to see if there's a trailing expression we need to capture")
code_tree = CaptureLastExpression(code_tree).capture()
log.trace("Wrapping the AST in the AST of the wrapper coroutine")
eval_tree = WrapEvalCodeTree(code_tree).wrap()
self.eval_tree = eval_tree
return None
async def run_eval(self) -> Namespace:
"""Run the evaluation and return the updated locals."""
log.trace("Compiling the AST to bytecode using `exec` mode")
compiled_code = compile(self.eval_tree, filename=INTERNAL_EVAL_FRAMENAME, mode="exec")
log.trace("Executing the compiled code with the desired namespace environment")
exec(compiled_code, self.locals) # noqa: B102,S102
log.trace("Awaiting the created evaluation wrapper coroutine.")
await self.function()
log.trace("Returning the updated captured locals.")
return self._locals
def format_output(self) -> str:
"""Format the output of the most recent evaluation."""
output = []
log.trace(f"Getting output from stdout `{id(self.stdout)}`")
stdout_text = self.stdout.getvalue()
if stdout_text:
log.trace("Appending output captured from stdout/print")
output.append(stdout_text)
if self._value_last_expression is not None:
log.trace("Appending the output of a captured trialing expression")
output.append(f"[Captured] {self._value_last_expression!r}")
if self.exc_info:
log.trace("Appending exception information")
output.append(format_internal_eval_exception(self.exc_info, self.code))
log.trace(f"Generated output: {output!r}")
return "\n".join(output) or "[No output]"
class WrapEvalCodeTree(ast.NodeTransformer):
"""Wraps the AST of eval code with the wrapper function."""
def __init__(self, eval_code_tree: ast.AST, *args, **kwargs):
super().__init__(*args, **kwargs)
self.eval_code_tree = eval_code_tree
# To avoid mutable aliasing, parse the WRAPPER_FUNC for each wrapping
self.wrapper = ast.parse(EVAL_WRAPPER, filename=INTERNAL_EVAL_FRAMENAME)
def wrap(self) -> ast.AST:
"""Wrap the tree of the code by the tree of the wrapper function."""
new_tree = self.visit(self.wrapper)
return ast.fix_missing_locations(new_tree)
def visit_Pass(self, node: ast.Pass) -> list[ast.AST]: # noqa: N802
"""
Replace the `_ast.Pass` node in the wrapper function by the eval AST.
This method works on the assumption that there's a single `pass`
statement in the wrapper function.
"""
return list(ast.iter_child_nodes(self.eval_code_tree))
class CaptureLastExpression(ast.NodeTransformer):
"""Captures the return value from a loose expression."""
def __init__(self, tree: ast.AST, *args, **kwargs):
super().__init__(*args, **kwargs)
self.tree = tree
self.last_node = list(ast.iter_child_nodes(tree))[-1]
def visit_Expr(self, node: ast.Expr) -> Union[ast.Expr, ast.Assign]: # noqa: N802
"""
Replace the Expr node that is last child node of Module with an assignment.
We use an assignment to capture the value of the last node, if it's a loose
Expr node. Normally, the value of an Expr node is lost, meaning we don't get
the output of such a last "loose" expression. By assigning it a name, we can
retrieve it for our output.
"""
if node is not self.last_node:
return node
log.trace("Found a trailing last expression in the evaluation code")
log.trace("Creating assignment statement with trailing expression as the right-hand side")
right_hand_side = list(ast.iter_child_nodes(node))[0]
assignment = ast.Assign(
targets=[ast.Name(id='_value_last_expression', ctx=ast.Store())],
value=right_hand_side,
lineno=node.lineno,
col_offset=0,
)
ast.fix_missing_locations(assignment)
return assignment
def capture(self) -> ast.AST:
"""Capture the value of the last expression with an assignment."""
if not isinstance(self.last_node, ast.Expr):
# We only have to replace a node if the very last node is an Expr node
return self.tree
new_tree = self.visit(self.tree)
return ast.fix_missing_locations(new_tree)
|
425546
|
from gaphor.plugins.console.console import docstring_dedent
def test_docstring_with_leading_space():
docstr = """\
line one
line two
"""
expected = "line one\nline two\n"
assert docstring_dedent(docstr) == expected
def test_docstring_without_leading_space():
docstr = """line one
line two
"""
expected = "line one\nline two\n"
assert docstring_dedent(docstr) == expected
def test_docstring_without_leading_space_with_blank_line():
docstr = """line one
line two
"""
expected = "line one\n\nline two\n"
assert docstring_dedent(docstr) == expected
|
425568
|
class Options:
def __init__(self, *, filename: str, collapse_single_pages: bool, strict: bool):
self.filename = filename
self.collapse_single_pages = collapse_single_pages
self.strict = strict
|
425588
|
from mamba import description, it, before
from unittest.mock import MagicMock
from crowd_anki.history.archiver import AllDeckArchiver
with description(AllDeckArchiver) as self:
with before.each:
self.deck_without_children = MagicMock()
self.deck_manager = MagicMock()
self.deck_manager.leaf_decks.return_value = [self.deck_without_children]
self.archiver_supplier = MagicMock()
self.all_deck_archiver = AllDeckArchiver(self.deck_manager, self.archiver_supplier)
with it("should call archival on all leaf decks by default"):
self.all_deck_archiver.archive()
self.archiver_supplier.assert_called_once_with(self.deck_without_children)
|
425599
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.stats, name='rq_stats'),
url(r'^queues/(?P<queue>.+)/$', views.queue, name='rq_queue'),
url(r'^workers/(?P<worker>.+)/$', views.worker, name='rq_worker'),
url(r'^jobs/(?P<job>.+)/$', views.job, name='rq_job'),
url(r'^scheduler/(?P<queue>.+)/$', views.scheduler, name='rq_scheduler'),
]
|
425617
|
import os
import sys
if(len(sys.argv) != 2):
print(__file__ + ' file.lua')
exit()
lua_file = sys.argv[1]
os.system("python2.7 luatool.py --port /dev/tty.SLAB_USBtoUART --src " + lua_file + " --dest " + lua_file + " --baud 115200")
|
425631
|
def method1(n: int) -> int:
'''Return a list of prime factors of an integer by
first checking if the modulo of the value of d and number is equal to 0.
Then applying the for loop inside the for loop to count a factor only once.
'''
divisors = [d for d in range(2, n // 2 + 1) if n % d == 0]
return [d for d in divisors if all(d % od != 0 for od in divisors if od != d)]
if __name__ == "__main__":
"""
from timeit import timeit
print(timeit(lambda: method1(20), number=10000)) # 0.028740440000547096
"""
|
425648
|
import pytest
from tests import assert_result
from presidio_analyzer.predefined_recognizers import CryptoRecognizer
@pytest.fixture(scope="module")
def recognizer():
return CryptoRecognizer()
@pytest.fixture(scope="module")
def entities():
return ["CRYPTO"]
# Generate random address https://www.bitaddress.org/
@pytest.mark.parametrize(
"text, expected_len, expected_positions",
[
# fmt: off
("<KEY>", 1, ((0, 34),),),
("my wallet address is: <KEY>", 1, ((22, 56),),),
("<KEY>", 0, ()),
("my wallet address is: <KEY>", 0, ()),
# fmt: on
],
)
def test_when_all_cryptos_then_succeed(
text, expected_len, expected_positions, recognizer, entities, max_score
):
results = recognizer.analyze(text, entities)
assert len(results) == expected_len
for res, (st_pos, fn_pos) in zip(results, expected_positions):
assert_result(res, entities[0], st_pos, fn_pos, max_score)
|
425651
|
from myhdl import block, Signal, intbv, always, concat, always_seq, instances, modbv
@block
def encode(clock, reset, video_in, audio_in, c0, c1, vde, ade, data_out, channel='BLUE'):
"""
This module performs the TMDS encoding logic of a hdmi encoder for a particular channel.
It is modelled after the xilinx application notes xapp460 and xapp495.
Args:
clock: The pixel clock
reset: An asynchronous reset signal
video_in: input video data
audio_in: input audio data
c0: control signal (hsync for BLUE channel)
c1: control signal (vsync for BLUE channel)
vde: video data enable
ade: auxiliary data enable
data_out: output encoded 10 bit data
channel: The color of the channel (Default: BLUE)
Returns:
myhdl.instances() : A list of myhdl instances.
"""
control_token = (<PASSWORD>, 171, 340, 683)
terc4_encoding = (668, 611, 740, 738, 369, 286, 398, 316,
716, 313, 412, 710, 654, 625, 355, 707)
video_guard_band = 307
data_island_guard_band = 307
if channel == 'BLUE':
video_guard_band = 716
data_island_guard_band = 0
elif channel == 'RED':
video_guard_band = 716
no_of_ones_video_in = Signal(intbv(0)[4:0])
decision1 = Signal(bool(0))
decision2 = Signal(bool(0))
decision3 = Signal(bool(0))
# input video delayed by a clock cycle
_video_in = Signal(intbv(0, min=video_in.min,
max=video_in.max))
# 1 bit more than the input (Signal after first stage of encoding the input)
q_m = Signal(intbv(0, min=video_in.min,
max=video_in.max * 2))
no_of_ones_q_m = Signal(intbv(0)[4:])
no_of_zeros_q_m = Signal(intbv(0)[4:])
count = Signal(modbv(0)[5:0])
# delayed versions of vde signal
_vde, __vde = [Signal(bool(0)) for _ in range(2)]
# delayed versions of ade signal
_ade, __ade, ___ade, ____ade = [Signal(bool(0)) for _ in range(4)]
# delayed versions of c0 signal
_c0, __c0 = [Signal(bool(0)) for _ in range(2)]
# delayed versions of c1 signal
_c1, __c1 = [Signal(bool(0)) for _ in range(2)]
# delayed versions of audio_in signal
_audio_in, __audio_in = [Signal(intbv(0, min=audio_in.min,
max=audio_in.max)) for _ in range(2)]
_q_m = Signal(intbv(0, min=video_in.min,
max=video_in.max * 2))
# Digital island guard band period
digb_period = Signal(bool(0))
ade_vld = Signal(bool(0))
audio_in_vld = Signal(intbv(0, min=audio_in.min,
max=audio_in.max))
is_blue = True if channel == 'BLUE' else False
@always(clock.posedge)
def sequential_logic():
no_of_ones_video_in.next = video_in[0] + video_in[1] + video_in[2] + video_in[3] + \
video_in[4] + video_in[5] + video_in[6] + video_in[7]
_video_in.next = video_in
no_of_ones_q_m.next = (q_m[0] + q_m[1] + q_m[2] + q_m[3] + q_m[4] +
q_m[5] + q_m[6] + q_m[7])
no_of_zeros_q_m.next = 8 - (q_m[0] + q_m[1] + q_m[2] + q_m[3] + q_m[4] +
q_m[5] + q_m[6] + q_m[7])
_vde.next = vde
__vde.next = _vde
_ade.next = ade
__ade.next = _ade
___ade.next = __ade
____ade.next = ___ade
_c0.next = c0
__c0.next = _c0
_c1.next = c1
__c1.next = _c1
_audio_in.next = audio_in
__audio_in.next = _audio_in
_q_m.next = q_m
@always(____ade, ade, __ade, no_of_ones_video_in, _video_in, count, no_of_ones_q_m, no_of_zeros_q_m, q_m,
digb_period, __c1, __c0, __audio_in, decision1)
def continuous_assignment():
digb_period.next = (not __ade) and (____ade or ade)
decision1.next = (no_of_ones_video_in > 4) or \
(no_of_ones_video_in == 4 and not _video_in[0])
decision2.next = (count == 0) | (no_of_zeros_q_m == no_of_ones_q_m)
decision3.next = (not count[4]) & (no_of_ones_q_m > no_of_zeros_q_m) | \
(count[4]) & (no_of_ones_q_m < no_of_zeros_q_m)
if is_blue:
ade_vld.next = ade | __ade | ____ade
if digb_period:
audio_in_vld.next = concat(bool(1), bool(1), __c1, __c0)
else:
audio_in_vld.next = concat(__audio_in[3], __audio_in[2], __c1, __c0)
else:
ade_vld.next = __ade
audio_in_vld.next = __audio_in
q_m.next[0] = _video_in[0]
temp = _video_in[0]
if decision1:
for i in range(1, 8):
temp = temp ^ (not _video_in[i])
q_m.next[i] = 1 if temp else 0
q_m.next[8] = 0
else:
for i in range(1, 8):
temp = temp ^ _video_in[i]
q_m.next[i] = 1 if temp else 0
q_m.next[8] = 1
@always_seq(clock.posedge, reset=reset)
def output_logic():
if __vde:
if decision2:
data_out.next[9] = not _q_m[8]
data_out.next[8] = _q_m[8]
if _q_m[8]:
data_out.next[8:0] = _q_m[8:0]
count.next = count + no_of_ones_q_m - no_of_zeros_q_m
else:
data_out.next[8:0] = ~_q_m[8:0]
count.next = count + no_of_zeros_q_m - no_of_ones_q_m
elif decision3:
data_out.next[9] = True
data_out.next[8] = _q_m[8]
data_out.next[8:0] = ~_q_m[8:0]
count.next = count - concat(_q_m[8], bool(0)) + no_of_zeros_q_m - no_of_ones_q_m
else:
data_out.next[9] = False
data_out.next[8] = _q_m[8]
data_out.next[8:0] = _q_m[8:0]
count.next = count - concat(not _q_m[8], bool(0)) + no_of_ones_q_m - no_of_zeros_q_m
else:
if vde:
data_out.next = video_guard_band
elif ade_vld:
data_out.next = terc4_encoding[audio_in_vld]
elif (ade or ____ade) and (not is_blue):
data_out.next = data_island_guard_band
else:
concat_c = concat(__c1, __c0)
data_out.next = control_token[concat_c]
count.next = 0
return instances()
|
425697
|
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import numpy as np
import tensorflow as tf
import cv2
import cfg
from shufflenetv2_centernet_V2 import ShuffleNetV2_centernet
# from shufflenetv2_centernet_V2_SEB import Shufflenetv2_Centernet_SEB
# from yolov3_centernet_V2 import yolov3_centernet
from create_label import CreatGroundTruth
def parse_color_data(example_proto):
features = {"img_raw": tf.FixedLenFeature([], tf.string),
"label": tf.FixedLenFeature([], tf.string),
"width": tf.FixedLenFeature([], tf.int64),
"height": tf.FixedLenFeature([], tf.int64)}
parsed_features = tf.parse_single_example(example_proto, features)
img = parsed_features["img_raw"]
img = tf.decode_raw(img, tf.uint8)
width = parsed_features["width"]
height = parsed_features["height"]
img = tf.reshape(img, [height, width, 3])
img = tf.cast(img, tf.float32) * (1. / 255.) - 0.5
label = parsed_features["label"]
label = tf.decode_raw(label, tf.float32)
return img, label
def erase_invalid_val(sequence):
label = []
h, w = sequence.shape
mask = (sequence != -1.0)
for i in range(h):
seq_new = sequence[i][mask[i]]
label.append(list(seq_new))
return label
filenames = [cfg.tfrecords_path]
dataset = tf.data.TFRecordDataset(filenames)
dataset = dataset.shuffle(buffer_size=1000)
dataset = dataset.map(parse_color_data)
val1=tf.constant(-0.5,tf.float32)
val2 = tf.constant(-1, tf.float32)
dataset = dataset.padded_batch(cfg.batch_size, padded_shapes=([None, None, 3], [None]), padding_values=(val1, val2))
dataset = dataset.repeat(cfg.epochs)
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
train_start_time = cv2.getTickCount()
model=ShuffleNetV2_centernet()
sess = tf.Session()
sess.run(tf.global_variables_initializer())
train_writer = tf.summary.FileWriter("shufflenetv2_voc_summary", sess.graph)
saver=tf.train.Saver(max_to_keep=20)
if 0:#reload model
model_file = tf.train.latest_checkpoint('shufflenetv2_voc/')
saver.restore(sess, model_file)
print("reload ckpt from "+model_file)
try:
while True:
batch_start_time=cv2.getTickCount()
img_batch, label_batch = sess.run(next_element)
label_batch = erase_invalid_val(label_batch)
cls_gt_batch, size_gt_batch = CreatGroundTruth(label_batch)
feed = {model.inputs: img_batch,
model.is_training:True,
model.size_gt:size_gt_batch,
model.cls_gt:cls_gt_batch
}
fetches = [
model.cls_loss,
model.size_loss,
model.total_loss,
model.global_step,
model.lr,
model.merged_summay,
model.train_op,
]
cls_loss,size_loss,total_loss,global_step, lr, summary, _ = sess.run(fetches, feed)
train_writer.add_summary(summary, global_step)
time_elapsed = (cv2.getTickCount()-batch_start_time)/cv2.getTickFrequency()
if global_step%200==0:
saver.save(sess,"shufflenetv2_seb_voc/shufflenetv2_seb_voc.ckpt",global_step=global_step)
# saver.save(sess,"shufflenetv2_face_SEB_summary/shufflenetv2_face_SEB.ckpt",global_step=global_step)
# saver.save(sess,"shufflenetv2_voc/shufflenetv2_voc.ckpt",global_step=global_step)
# saver.save(sess,"yolov3_voc/yolov3_voc.ckpt",global_step=global_step)
# saver.save(sess,"shufflenev2_face_ori/shufflenev2_face.ckpt",global_step=global_step)
if global_step % 10 == 0:
print("-------Training {0}th batch-------".format(global_step))
print("global_step:{0} total_loss:{1:0.3f} cls_loss:{2:0.3f} size_loss:{3:0.3f}".format(global_step,total_loss,cls_loss,size_loss))
print("learning_rate:{0:0.6f}".format(lr))
# print("predicts:", predicts)
print('The batch run total {0:0.5f}s'.format(time_elapsed))
except tf.errors.OutOfRangeError:
print('Training has completed...')
train_total_time=(cv2.getTickCount()-train_start_time)/cv2.getTickFrequency()
print('Training has stopped...')
hour=train_total_time // 3600
minute=(train_total_time-hour*3600)//60
print('Training runs {:.0f}h {:.0f}m...'.format(hour,minute))
sess.close()
|
425701
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class PerceptionNet(nn.Module):
def __init__(self):
super(PerceptionNet, self).__init__()
self.conv1 = nn.Conv2d(3, 24, 5, stride=2, bias=False)
self.bn1 = nn.BatchNorm2d(24)
self.conv2 = nn.Conv2d(24, 36, 5, stride=2, bias=False)
self.bn2 = nn.BatchNorm2d(36)
self.conv3 = nn.Conv2d(36, 48, 5, stride=2, bias=False)
self.bn3 = nn.BatchNorm2d(48)
self.conv4 = nn.Conv2d(48, 64, 3, stride=1, bias=False)
self.bn4 = nn.BatchNorm2d(64)
self.conv5 = nn.Conv2d(64, 64, 3, stride=1, bias=False)
self.bn5 = nn.BatchNorm2d(64)
self.fc1 = nn.Linear(64 * 21 * 21, 100, bias=False)
self.fc2 = nn.Linear(100, 50, bias=False)
self.fc3 = nn.Linear(50, 10, bias=False)
self.output = nn.Linear(10, 1, bias=False)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(0.3)
self.flatten = nn.Flatten()
def forward(self, x):
x = self.relu(self.bn1(self.conv1(x)))
x = self.relu(self.bn2(self.conv2(x)))
x = self.relu(self.bn3(self.conv3(x)))
x = self.relu(self.bn4(self.conv4(x)))
x = self.relu(self.bn5(self.conv5(x)))
x = self.flatten(x)
x = self.dropout(x)
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
x = self.relu(self.fc3(x))
return torch.sigmoid(self.output(x))
if __name__ == '__main__':
net = PerceptionNet()
input = torch.randn((32, 3, 300, 400))
output = net(input)
print(output.shape)
|
425704
|
import pytest
import magma as m
import magma.testing
import fault as f
def test_basic():
class _Top(m.Circuit):
io = m.IO(I=m.In(m.Bit), O=m.Out(m.Bit)) + m.ClockIO()
with m.compile_guard("COND", defn_name="COND_compile_guard"):
out = m.Register(m.Bit)()(io.I)
io.O @= io.I
m.compile("build/test_compile_guard_basic", _Top)
assert m.testing.check_files_equal(
__file__, f"build/test_compile_guard_basic.json",
f"gold/test_compile_guard_basic.json")
def test_assert():
class _Top(m.Circuit):
io = m.IO(I=m.In(m.Valid[m.Bits[4]]), O=m.Out(m.Bits[4])) + m.ClockIO()
io.O @= m.Register(m.Bits[4])()(io.I.data)
with m.compile_guard("ASSERT_ON", "ASSERT_ON_compile_guard"):
count = m.Register(m.UInt[2], has_enable=True)()
count.I @= count.O + 1
count.CE @= io.I.valid
f.assert_immediate((count.O != 3) | (io.O.value() == 3))
m.compile("build/test_compile_guard_assert", _Top, inline=True)
assert m.testing.check_files_equal(
__file__, f"build/test_compile_guard_assert.json",
f"gold/test_compile_guard_assert.json")
def test_array():
class _Top(m.Circuit):
io = m.IO(I=m.In(m.Array[2, m.Bit]), O=m.Out(m.Bit)) + m.ClockIO()
with m.compile_guard("COND", defn_name="COND_compile_guard"):
out = m.Register(m.Bit)()(io.I[0])
io.O @= io.I[1]
m.compile("build/test_compile_guard_array", _Top)
assert m.testing.check_files_equal(
__file__, f"build/test_compile_guard_array.json",
f"gold/test_compile_guard_array.json")
def test_multiple_array():
class _Top(m.Circuit):
io = m.IO(I=m.In(m.Array[2, m.Bit]), O=m.Out(m.Bit)) + m.ClockIO()
with m.compile_guard("COND", defn_name="COND_compile_guard"):
m.Register(m.Bit)()(io.I[1])
m.Register(m.Bit)()(io.I[0])
io.O @= io.I[1]
m.compile("build/test_compile_guard_multiple_array", _Top)
assert m.testing.check_files_equal(
__file__, f"build/test_compile_guard_multiple_array.json",
f"gold/test_compile_guard_multiple_array.json")
def test_nested_type():
class _Top(m.Circuit):
T = m.Product.from_fields("anon", dict(x=m.Bit, y=m.Bit))
T = m.Array[2, T]
io = m.IO(I=m.In(T), O=m.Out(m.Bit)) + m.ClockIO()
with m.compile_guard("COND", defn_name="COND_compile_guard"):
m.Register(m.Bit)()(io.I[1].x)
m.Register(m.Bit)()(io.I[0].y)
io.O @= io.I[0].x
m.compile("build/test_compile_guard_nested_type", _Top)
assert m.testing.check_files_equal(
__file__, f"build/test_compile_guard_nested_type.json",
f"gold/test_compile_guard_nested_type.json")
@pytest.mark.skip(reason="nested compile guard context not yet implemented")
def test_nested_context():
class _Top(m.Circuit):
io = m.IO(I0=m.In(m.Bit), I1=m.In(m.Bit), O=m.Out(m.Bit)) + m.ClockIO()
with m.compile_guard("OUTER", defn_name="OUTER_compile_guard"):
m.Register(m.Bit)()(io.I0)
with m.compile_guard("INNER", defn_name="INNER_compile_guard"):
m.Register(m.Bit)()(io.I1)
io.O @= io.I0
m.compile("build/test_compile_guard_nested_context", _Top)
assert m.testing.check_files_equal(
__file__, f"build/test_compile_guard_nested_context.json",
f"gold/test_compile_guard_nested_context.json")
def test_basic_oldstyle():
class _Top(m.Circuit):
IO = ["I", m.In(m.Bit), "O", m.Out(m.Bit)] + m.ClockInterface()
@classmethod
def definition(io):
with m.compile_guard("COND", defn_name="COND_compile_guard"):
out = m.Register(m.Bit)()(io.I)
io.O @= io.I
m.compile("build/test_compile_guard_basic", _Top)
assert m.testing.check_files_equal(
__file__, f"build/test_compile_guard_basic.json",
f"gold/test_compile_guard_basic.json")
def test_basic_undefined():
class _Top(m.Circuit):
io = m.IO(I=m.In(m.Bit), O=m.Out(m.Bit)) + m.ClockIO()
with m.compile_guard("COND", defn_name="COND_compile_guard",
type='undefined'):
out = m.Register(m.Bit)()(io.I)
io.O @= io.I
m.compile("build/test_compile_guard_basic_undefined", _Top)
assert m.testing.check_files_equal(
__file__, f"build/test_compile_guard_basic_undefined.json",
f"gold/test_compile_guard_basic_undefined.json")
def test_vcc():
class _Top(m.Circuit):
io = m.IO(I=m.In(m.Bit), O=m.Out(m.Bit)) + m.ClockIO()
with m.compile_guard("COND"):
out = m.Register(m.Bit)()(io.I ^ 1)
io.O @= io.I
m.compile("build/test_compile_guard_basic_vcc", _Top)
assert m.testing.check_files_equal(
__file__, f"build/test_compile_guard_basic_vcc.json",
f"gold/test_compile_guard_basic_vcc.json")
def test_drive_outputs():
class _Top(m.Circuit):
io = m.IO(I=m.In(m.Bit), O=m.Out(m.Bit)) + m.ClockIO()
with m.compile_guard("COND"):
io.O @= m.Register(m.Bit)()(io.I ^ 1)
m.compile("build/test_compile_guard_drive_output", _Top)
assert m.testing.check_files_equal(
__file__, f"build/test_compile_guard_drive_output.json",
f"gold/test_compile_guard_drive_output.json")
def test_compile_guard_select_basic():
class _Top(m.Circuit):
io = m.IO(I=m.In(m.Bit), O=m.Out(m.Bit)) + m.ClockIO()
x = m.Register(m.Bit)()(io.I ^ 1)
y = m.Register(m.Bit)()(io.I)
io.O @= m.compile_guard_select(
COND1=x, COND2=y, default=io.I
)
basename = "test_compile_guard_select_basic"
m.compile(f"build/{basename}", _Top, inline=True)
assert m.testing.check_files_equal(
__file__, f"build/{basename}.v", f"gold/{basename}.v")
def test_compile_guard_select_complex_type():
T = m.Product.from_fields("anonymous", dict(x=m.Bit, y=m.Bit))
def make_top():
class _Top(m.Circuit):
io = m.IO(I0=m.In(T), I1=m.In(T), O=m.Out(T))
io.O @= m.compile_guard_select(
COND1=io.I0, COND2=io.I1, default=io.I0)
with pytest.raises(TypeError):
make_top()
def test_contained_inline_verilog():
class Top(m.Circuit):
io = m.IO(I=m.In(m.Bit), O=m.Out(m.Bit))
io.O @= io.I
with m.compile_guard("DEBUG", "DebugModule"):
reg = m.Register(m.Bit)(name="reg")
reg.I @= reg.O | io.I
m.inline_verilog("assert {io.I};")
basename = "test_compile_guard_contained_inline_verilog"
m.compile(f"build/{basename}", Top)
assert m.testing.check_files_equal(
__file__, f"build/{basename}.v", f"gold/{basename}.v")
|
425730
|
import os
class Config:
def __init__(self, workspace):
self._config = dict()
self._path = os.path.join(workspace.root, "package.config")
lines = None
if os.path.exists(self._path):
with open(self._path) as f:
lines = f.readlines()
for line in lines:
if not "=" in line:
continue
key = line.split("=")[0].strip()
self._config[key] = line[line.index('=') + 1:].strip()
def get(self, key):
return self._config.get(key)
def set(self, key, value):
self._config[key] = value
with open(self._path, "w") as f:
for key in self._config:
value = self._config[key]
f.write("{}={}\n".format(key, value))
|
425734
|
import pytest
from bocadillo import configure, create_client, static
FILE_DIR = "js"
FILE_NAME = "foo.js"
FILE_CONTENTS = "console.log('foo!');"
def _create_asset(static_dir):
asset = static_dir.mkdir(FILE_DIR).join(FILE_NAME)
asset.write(FILE_CONTENTS)
return asset
def test_assets_are_served_at_static_by_default(raw_app, tmpdir_factory):
static_dir = tmpdir_factory.mktemp("static")
_create_asset(static_dir)
app = configure(raw_app, static_dir=str(static_dir))
client = create_client(app)
response = client.get(f"/static/{FILE_DIR}/{FILE_NAME}")
assert response.status_code == 200
assert response.text == FILE_CONTENTS
def test_if_asset_does_not_exist_then_404(client):
assert client.get(f"/static/{FILE_DIR}/{FILE_NAME}").status_code == 404
def test_customize_static_root(raw_app, tmpdir_factory):
static_dir = tmpdir_factory.mktemp("static")
_create_asset(static_dir)
app = configure(raw_app, static_dir=str(static_dir), static_root="assets")
client = create_client(app)
assert client.get(f"/static/{FILE_DIR}/{FILE_NAME}").status_code == 404
response = client.get(f"/assets/{FILE_DIR}/{FILE_NAME}")
assert response.status_code == 200
assert response.text == FILE_CONTENTS
def test_if_static_dir_is_none_then_no_assets_served(raw_app, tmpdir_factory):
static_dir = tmpdir_factory.mktemp("static")
_create_asset(static_dir)
app = configure(raw_app, static_dir=None)
client = create_client(app)
assert client.get(f"/static/{FILE_DIR}/{FILE_NAME}").status_code == 404
def test_mount_extra_static_files_dirs(raw_app, tmpdir_factory):
static_dir = tmpdir_factory.mktemp("staticfiles")
_create_asset(static_dir)
app = configure(raw_app, static_dir=None)
app.mount("assets", static(str(static_dir)))
client = create_client(app)
response = client.get(f"/assets/{FILE_DIR}/{FILE_NAME}")
assert response.status_code == 200
assert response.text == FILE_CONTENTS
def test_if_static_dir_does_not_exist_then_no_files_mounted(raw_app):
with pytest.warns(None) as record:
configure(raw_app, static_dir="foo")
assert len(record) == 0
def test_whitenoise_config(raw_app):
app = configure(
raw_app, static_root="static", static_config={"max_age": 30}
)
whitenoise = next(
route.app
for route in app.router.routes
if hasattr(route, "path") and route.path == "/static"
)
assert whitenoise.max_age == 30
|
425747
|
import json
import os
from tensorflow.python.client import timeline
from runai.utils import Hook
class Profiler(Hook):
def __init__(self, module, method, steps, dst):
super(Profiler, self).__init__(module, method)
self._timeline = None
self._step = 0
self._steps = steps
self._dst = dst
def _update(self, run_metadata):
chrome_trace = json.loads(timeline.Timeline(run_metadata.step_stats).generate_chrome_trace_format())
if self._timeline is None:
self._timeline = chrome_trace
else:
self._timeline['traceEvents'] += [event for event in chrome_trace['traceEvents'] if 'ts' in event]
if self._step % self._steps == 0:
with open(os.path.join(self._dst, 'timeline_%d' % self._step), 'w') as f:
f.write(json.dumps(self._timeline))
self._step += 1
|
425802
|
import sys
import limix
from limix.core.covar import LowRankCov
from limix.core.covar import FixedCov
from limix.core.covar import FreeFormCov
from limix.core.covar import CategoricalLR
from limix.core.mean import MeanBase
from limix.core.gp import GP
import scipy as sp
import scipy.stats as st
from limix.mtSet.core.iset_utils import *
import numpy as np
import numpy.linalg as nla
import scipy.linalg as la
import copy
import pdb
from limix.utils.preprocess import gaussianize
from scipy.optimize import fmin
import time
import pandas as pd
from .linalg_utils import msqrt
from .linalg_utils import lowrank_approx
ntype_dict = {'assoc':'null', 'gxe':'block', 'gxehet':'rank1'}
def define_gp(Y, Xr, mean, Ie, type):
P = 2
if type=='null':
_Cr = FixedCov(sp.ones([2, 2]))
_Cr.scale = 1e-9
_Cr.act_scale = False
covar = CategoricalLR(_Cr, sp.ones((Xr.shape[0], 1)), Ie)
else:
if type=='block': _Cr = FixedCov(sp.ones((P,P)))
elif type=='rank1': _Cr = LowRankCov(P,1)
elif type=='full': _Cr = FreeFormCov(P)
else: print('poppo')
covar = CategoricalLR(_Cr, Xr, Ie)
_gp = GP(covar=covar, mean=mean)
return _gp
class MvSetTestInc():
def __init__(self, Y=None, Xr=None, F=None, factr=1e7, Ie=None, debug=False):
"""
Args:
Y: [N, 1] phenotype matrix
Xr: [N, S] genotype data of the set component
R: [N, S] genotype data of the set component
factr: paramenter that determines the accuracy of the solution
(see scipy.optimize.fmin_l_bfgs_b for more details)
"""
if F is None:
F = sp.ones((y.shape[0], 1))
# kroneckerize F
W = sp.zeros((Y.shape[0], 2*F.shape[1]))
W[:, :F.shape[1]] = Ie[:, sp.newaxis] * F
W[:, F.shape[1]:] = (~Ie[:, sp.newaxis]) * F
self.mean = MeanBase(Y, W)
# avoid SVD failus by adding some jitter
Xr+= 2e-6*(sp.rand(*Xr.shape)-0.5)
# store stuff
Xr-= Xr.mean(0)
Xr/= Xr.std(0)
Xr/= sp.sqrt(Xr.shape[1])
self.Y = Y
self.F = F
self.Xr = Xr
self.Ie = Ie
self.covY = sp.cov(Y.T)
self.factr = factr
self.debug = debug
self.gp = {}
self.info = {}
def assoc(self):
# fit model
for key in ['null', 'full']:
if key not in list(self.gp.keys()):
if self.debug: print('.. dening %s' % key)
self.gp[key] = define_gp(self.Y, self.Xr, self.mean, self.Ie, key)
if self.debug: print('.. fitting %s' % key)
self.info[key] = self._fit(key, vc=True)
return self.info['null']['LML']-self.info['full']['LML']
def gxe(self):
# fit model
for key in ['null', 'full', 'block']:
if key not in list(self.gp.keys()):
if self.debug: print('.. defining %s' % key)
self.gp[key] = define_gp(self.Y, self.Xr, self.mean, self.Ie, key)
if self.debug: print('.. fitting %s' % key)
self.info[key] = self._fit(key, vc=True)
return self.info['block']['LML']-self.info['full']['LML']
def gxehet(self):
# fit model
for key in ['null', 'full', 'rank1']:
if key not in list(self.gp.keys()):
if self.debug: print('.. defining %s' % key)
self.gp[key] = define_gp(self.Y, self.Xr, self.mean, self.Ie, key)
if self.debug: print('.. fitting %s' % key)
self.info[key] = self._fit(key, vc=True)
return self.info['rank1']['LML']-self.info['full']['LML']
def assoc_null(self, n_nulls=30):
LLR0 = sp.zeros(n_nulls)
for ni in range(n_nulls):
idx_perms = sp.random.permutation(self.Y.shape[0])
_Xr = self.Xr[idx_perms]
mvset0 = MvSetTestInc(Y=self.Y, F=self.F, Xr=_Xr, Ie=self.Ie)
LLR0[ni] = mvset0.assoc()
return LLR0
def gxe_null(self, n_nulls=30):
LLR0 = sp.zeros(n_nulls)
for ni in range(n_nulls):
Xb = sp.dot(self.mean.W, self.mean.b)
_Y = Xb+self.gp['block'].covar.Kh_dot(sp.randn(self.Y.shape[0],1))
mvset0 = MvSetTestInc(Y=_Y, F=self.F, Xr=self.Xr, Ie=self.Ie)
LLR0[ni] = mvset0.gxe()
return LLR0
def gxehet_null(self, n_nulls=30):
LLR0 = sp.zeros(n_nulls)
for ni in range(n_nulls):
Xb = sp.dot(self.mean.W, self.mean.b)
_Y = Xb+self.gp['rank1'].covar.Kh_dot(sp.randn(self.Y.shape[0],1))
mvset0 = MvSetTestInc(Y=_Y, F=self.F, Xr=self.Xr, Ie=self.Ie)
LLR0[ni] = mvset0.gxehet()
return LLR0
def _fit(self, type, vc=False):
#2. init
if type=='null':
self.gp[type].covar.Cn.setCovariance(sp.eye(2))
elif type=='full':
Cr0_K = 1e-4*sp.ones((2,2))+1e-4*sp.eye(2)
Cn0_K = 0.99*self.gp['null'].covar.Cn.K()
self.gp[type].covar.Cr.setCovariance(Cr0_K)
self.gp[type].covar.Cn.setCovariance(Cn0_K)
elif type=='block':
Crf_K = self.gp['full'].covar.Cr.K()
Cnf_K = self.gp['full'].covar.Cn.K()
self.gp[type].covar.Cr.scale = sp.mean(Crf_K)
self.gp[type].covar.Cn.setCovariance(Cnf_K)
elif type=='rank1':
Crf_K = self.gp['full'].covar.Cr.K()
Cnf_K = self.gp['full'].covar.Cn.K()
self.gp[type].covar.Cr.setCovariance(Crf_K)
self.gp[type].covar.Cn.setCovariance(Cnf_K)
else:
print('poppo')
conv = self.gp[type].optimize(factr=self.factr, verbose=False)[0]
B = self.gp[type].mean.b.reshape((self.mean.W.shape[1]/2,2), order='F')
RV = {'Cr': self.gp[type].covar.Cr.K(),
'Cn': self.gp[type].covar.Cn.K(),
'B': B,
'conv': sp.array([conv]),
'LML': sp.array([self.gp[type].LML()]),
'LMLgrad': sp.array([sp.mean((self.gp[type].LML_grad()['covar'])**2)])}
if vc:
# tr(P WW) = tr(PWWP) = ((PW)**2).sum()
# tr(P D) = (PD).sum() = D.sum() - 1/n * (Ones*D).sum()
# = D.sum() - D.sum()
PW = self.gp[type].covar.W()
PW-= PW.mean(0)
var_r = (PW**2).sum()/ float(self.Y.size-1)
var_c = sp.var(sp.dot(self.mean.W, self.gp[type].mean.b))
D = self.gp[type].covar.d_inv()**(-1)
var_n = (1-1/float(D.shape[0]))*D.sum()/float(self.Y.size-1)
#var_n = sp.diagonal(sp.diag(D)-sp.diag(D).mean(0)).sum()/float(self.Y.size-1)
RV['var'] = sp.array([var_r, var_c, var_n])
if 0 and self.Y.size<5000:
pdb.set_trace()
Kr = sp.kron(RV['Cr'], sp.dot(self.Xr, self.Xr.T))
Kn = sp.kron(RV['Cn'], sp.eye(self.Y.shape[0]))
_var_r = sp.trace(Kr-Kr.mean(0)) / float(self.Y.size-1)
_var_n = sp.trace(Kn-Kn.mean(0)) / float(self.Y.size-1)
_var = sp.array([_var_r, var_c, _var_n])
print(((_var-RV['var'])**2).mean())
if type=='full':
trRr = (self.Xr**2).sum()
# calculate within region vcs
Cr_block = sp.mean(RV['Cr']) * sp.ones(RV['Cr'].shape)
Cr_rank1 = lowrank_approx(RV['Cr'], rank=1)
var_block = sp.trace(Cr_block)*trRr / float(self.Y.size-1)
var_rank1 = sp.trace(Cr_rank1)*trRr / float(self.Y.size-1)
RV['var_r'] = sp.array([var_block, var_rank1-var_block, var_r-var_rank1])
return RV
if 0:
def _sim_from(self, set_covar='block', seed=None, qq=False):
##1. region term
if set_covar=='block':
Cr = self.block['Cr']
Cg = self.block['Cg']
Cn = self.block['Cn']
if set_covar=='rank1':
Cr = self.lr['Cr']
Cg = self.lr['Cg']
Cn = self.lr['Cn']
Lc = msqrt(Cr)
U, Sh, V = nla.svd(self.Xr, full_matrices=0)
Lr = sp.zeros((self.Y.shape[0], self.Y.shape[0]))
Lr[:, :Sh.shape[0]] = U * Sh[sp.newaxis, :]
Z = sp.randn(*self.Y.shape)
Yr = sp.dot(Lr, sp.dot(Z, Lc.T))
##2. bg term
Lc = msqrt(Cg)
Lr = self.XXh
Z = sp.randn(*self.Y.shape)
Yg = sp.dot(Lr, sp.dot(Z, Lc.T))
# noise terms
Lc = msqrt(Cn)
Z = sp.randn(*self.Y.shape)
Yn = sp.dot(Z, Lc.T)
# normalize
Y = Yr + Yg + Yn
if qq:
Y = gaussianize(Y)
Y-= Y.mean(0)
Y/= Y.std(0)
return Y
if __name__=='__main__':
if 1:
N = 1000
S = 20
Xr = 1.*(sp.rand(N,S)<0.2)
Ie = sp.randn(N)<0.
Y = sp.randn(N, 1)
F = sp.ones((N,1))
pdb.set_trace()
t0 = time.time()
mvset = MvSetTestInc(Y=Y, Xr=Xr, F=F, Ie=Ie, factr=1e7)
mvset.assoc()
mvset.gxe()
mvset.gxehet()
print('.. permutations')
mvset.assoc_null()
print('.. bootstrap gxe')
mvset.gxe_null()
print('.. bootstrap gxehet')
mvset.gxehet_null()
print(time.time()-t0)
pdb.set_trace()
|
425803
|
import collections
class CupClass():
def __init__(self, id):
self.id = id
self.next = None
def problem1(puzzle_input,rounds):
cups = collections.deque([int(i) for i in puzzle_input],len(puzzle_input))
current_cup = cups[0]
length = len(cups)
for move in range(1,rounds+1):
cups.rotate(-1)
cups_to_move = [cups.popleft() for n in range(3)]
ind = (current_cup-1)%(length+1)
while ind == 0 or ind in cups_to_move:
ind = (ind-1)%(length+1)
insert_ind = cups.index(ind)
for i in range(3):
cups.insert(insert_ind+i+1,cups_to_move[i])
current_cup = cups[0]
one_ind = cups.index(1)
cups.rotate(length - one_ind)
cups.popleft()
problem1 = str(cups[0])
for i in range(1,8):
problem1 += (str(cups[i]))
return problem1
def main():
puzzle_input = str(476138259)
print("The labels on the cups after cup 1 are",problem1(puzzle_input,100))
# create Cups
cups_dictionary = dict()
for digit in puzzle_input:
temp_cup = CupClass(int(digit))
cups_dictionary[int(digit)] = temp_cup
for i in range(10,1000001):
temp_cup = CupClass(i)
cups_dictionary[i] = temp_cup
# Link the cups
for i in range(len(puzzle_input)-1):
cups_dictionary[int(puzzle_input[i])].next = cups_dictionary[int(puzzle_input[i+1])]
cups_dictionary[int(puzzle_input[8])].next = cups_dictionary[10]
for i in range(10, 1000000):
cups_dictionary[i].next = cups_dictionary[i+1]
cups_dictionary[1000000].next = cups_dictionary[int(puzzle_input[0])]
current_cup = cups_dictionary[int(puzzle_input[0])]
for move in range(1, 10000001):
cups_to_move = [current_cup.next.id, current_cup.next.next.id, current_cup.next.next.next.id]
current_cup.next = current_cup.next.next.next.next
ind = (current_cup.id - 1)
if ind == 0: ind = 1000000
while ind in cups_to_move:
ind -= 1
if ind == 0: ind = 1000000
cups_dictionary[cups_to_move[2]].next = cups_dictionary[ind].next
cups_dictionary[ind].next = cups_dictionary[cups_to_move[0]]
current_cup = current_cup.next
ones_cup = cups_dictionary[1]
print(ones_cup.id)
print(ones_cup.next.id)
print(ones_cup.next.next.id)
print(ones_cup.next.id*ones_cup.next.next.id)
main()
|
425847
|
import importlib
import logging
import traceback
from threading import Thread
from typing import Optional
from django.conf import settings
from django.template.loader import render_to_string
from magic_notifier.utils import get_settings, import_attribute
logger = logging.getLogger("notifier")
class ExternalSMS:
def __init__(self, receivers: list, context: dict, template: Optional[str] = None,
final_message: Optional[str] = None, sms_gateway: Optional[str] = None, **kwargs):
"""This class is reponsible of sending a notification via sms.
:param receivers: list of User
:param template: the name of the template to user. Default None
:param context: the context to be passed to template. Default None
:param final_message: the final message to be sent as the notification content, must be sent if template is None, template is ignored if it is sent. Default None
:param sms_gateway: the sms gateway to use. Default to None
:param kwargs:
"""
self.receivers: list = receivers
self.template: Optional[str] = template
self.context: dict = context
self.threaded: bool = kwargs.get("threaded", False)
self.final_message: Optional[str] = final_message
# get the default sms gateway
self.sms_gateway = get_settings('SMS::DEFAULT_GATEWAY') if sms_gateway is None else sms_gateway
# get the sms gateway definition
NOTIFIER_SMS_GATEWAY = get_settings('SMS')["GATEWAYS"][self.sms_gateway]
# get the sms client to be used
NOTIFIER_SMS_CLIENT = NOTIFIER_SMS_GATEWAY['CLIENT']
# load the sms client
module_name, class_name = NOTIFIER_SMS_CLIENT.rsplit(".", 1)
module = importlib.import_module(module_name)
assert hasattr(module, class_name), "class {} is not in {}".format(class_name, module_name)
self.client_class = getattr(module, class_name)
self.sms_class_options = NOTIFIER_SMS_GATEWAY
def send(self):
if self.threaded:
t = Thread(target=self._send)
t.setDaemon(True)
t.start()
else:
self._send()
def _send(self):
get_user_number = import_attribute(get_settings("GET_USER_NUMBER"))
try:
for rec in self.receivers:
ctx = self.context.copy()
ctx["user"] = rec
number = get_user_number(rec)
if not number:
logger.warning(f"Can't find a number for {rec}, ignoring.")
if self.final_message:
sms_content = self.final_message
else:
sms_content = render_to_string("notifier/{}/sms.txt".format(self.template), ctx)
self.client_class.send(number, sms_content, **self.sms_class_options)
except:
logger.error(traceback.format_exc())
|
425849
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class MLP_Encoder(nn.Module):
def __init__(self, input_dim=784, hidden_dim=256, latent_dim=32, nb_layers=2, deterministic=False, dropout_p=0.0):
"""
A simple MLP encoder with gated activations.
:param input_dim: input features
:param hidden_dim: hidden features
:param latent_dim: latent feature size OR number of parameters for the posterior_flow distribution
:param nb_layers: excluding the output projection
:param deterministic:
True: return a single deterministic latent variable (e.g. for autoencoder, WAE, AAE)
False: return parameters of a gaussian base posterior_flow, along with the final hidden representation,
which can serve as context for a normalising flow.
:param dropout_p:
"""
super().__init__()
self.deterministic = deterministic
layers = []
for i in range(nb_layers):
input_dim = hidden_dim if i > 0 else input_dim
layers += [nn.Linear(input_dim, hidden_dim * 2), nn.GLU(dim=1), nn.Dropout(dropout_p)]
self.layers = nn.Sequential(*layers)
# projection to latent OR posterior_flow parameters
output_dim = latent_dim
if not deterministic:
output_dim *= 2
self.final = nn.Linear(hidden_dim, output_dim)
def forward(self, x):
"""
:param x: (batch, input_dim)
:return:
if deterministic:
z: (batch, latent_dim)
else: (mu, logvar, h)
mu: posterior_flow mean (batch, latent_dim)
logvar: posterior_flow log-variance (batch, latent_dim)
h: final hidden state / 'context' vector (batch, hidden_dim)
"""
h = self.layers(x) # (batch, hidden_dim)
params = self.final(h)
if self.deterministic:
return params
mu, logvar = torch.chunk(params, 2, dim=1)
logvar = F.hardtanh(logvar, min_val=-6.0, max_val=2.0)
return mu, logvar, h
|
425862
|
from math import sqrt, floor, ceil
from datetime import datetime
from asyncio import TimeoutError
from discord import Message, Color
from discord.errors import Forbidden
from discord.ext.commands import (
Cog,
Context,
command,
group,
cooldown,
BucketType,
)
from nagatoro.converters import Member
from nagatoro.objects import Embed
from nagatoro.utils import aenumerate, t, tg
from nagatoro.db import Guild, User, Mute, Warn
class Social(Cog):
"""Social commands"""
def __init__(self, bot):
self.bot = bot
@command(name="profile")
@cooldown(rate=2, per=10, type=BucketType.user)
async def profile(self, ctx: Context, *, member: Member = None):
"""User's profile"""
if not member:
member = ctx.author
user, _ = await User.get_or_create(id=member.id)
# Calculate current level progress:
# (exp - curr lvl req) * 100 / (curr lvl req - next lvl req)
current_level_exp = (user.level * 4) ** 2
next_level_exp = ((user.level + 1) * 4) ** 2
progress = round(
(user.exp - current_level_exp) * 100 / (next_level_exp - current_level_exp)
)
# Find position of profile in global user ranking
rank = (await User.all().order_by("-exp")).index(user)
embed = Embed(
ctx, title=t(ctx, "title", member=member.name), color=member.color
)
embed.set_thumbnail(url=member.avatar_url)
embed.add_fields(
(t(ctx, "rank"), str(rank + 1)),
(t(ctx, "level"), f"{user.level}"),
(t(ctx, "experience"), f"{user.exp}/{next_level_exp} ({progress}%)"),
(t(ctx, "balance"), t(ctx, "balance_value", bal=user.balance)),
)
if mutes := await Mute.filter(
guild__id=ctx.guild.id, user__id=member.id
).count():
embed.add_field(name=t(ctx, "mutes"), value=str(mutes))
if warns := await Warn.filter(
guild__id=ctx.guild.id, user__id=member.id
).count():
embed.add_field(name=t(ctx, "warns"), value=str(warns))
await ctx.send(embed=embed)
@command(name="balance", aliases=["bal", "money"])
@cooldown(rate=5, per=10, type=BucketType.user)
async def balance(self, ctx: Context, *, member: Member = None):
"""Coin balance"""
if not member:
member = ctx.author
user, _ = await User.get_or_create(id=member.id)
await ctx.send(t(ctx, "messsage", member=member.name, bal=user.balance))
@command(name="level", aliases=["lvl"])
@cooldown(rate=5, per=10, type=BucketType.user)
async def level(self, ctx: Context, *, member: Member = None):
"""User's level"""
if not member:
member = ctx.author
user, _ = await User.get_or_create(id=member.id)
await ctx.send(t(ctx, "message", member=member.name, lvl=user.level))
@group(name="ranking", aliases=["top", "baltop"], invoke_without_command=True)
@cooldown(rate=2, per=30, type=BucketType.guild)
async def ranking(self, ctx: Context):
"""User ranking
Use 'baltop' for quicker access to the balance ranking
"""
if ctx.invoked_with == "baltop":
return await self.ranking_balance.__call__(ctx)
await self.ranking_level.__call__(ctx)
@ranking.command(name="level", aliases=["lvl"])
@cooldown(rate=2, per=30, type=BucketType.guild)
async def ranking_level(self, ctx: Context):
"""User ranking, by level"""
embed = Embed(ctx, title=t(ctx, "title"), description="", color=Color.blue())
await ctx.trigger_typing()
async for pos, i in aenumerate(User.all().order_by("-exp").limit(10), start=1):
user = await self.bot.fetch_user(i.id)
embed.description += t(
ctx, "ranking_entry", pos=pos, user=user, lvl=i.level, exp=i.exp
)
await ctx.send(embed=embed)
@ranking.command(name="balance", aliases=["bal", "money"])
@cooldown(rate=2, per=30, type=BucketType.guild)
async def ranking_balance(self, ctx: Context):
"""User ranking, sorted by balance"""
embed = Embed(ctx, title=t(ctx, "title"), description="", color=Color.blue())
await ctx.trigger_typing()
async for pos, i in aenumerate(
User.all().order_by("-balance").limit(10), start=1
):
user = await self.bot.fetch_user(i.id)
embed.description += t(
ctx, "ranking_entry", pos=pos, user=user, lvl=i.level, exp=i.exp
)
await ctx.send(embed=embed)
@command(name="pay", aliases=["give", "transfer"])
@cooldown(rate=2, per=10, type=BucketType.user)
async def pay(self, ctx: Context, amount: int, *, member: Member):
"""Give coins to someone
You can't give money to yourself or any bots.
Transfer amount should be more than 0.
"""
if member == ctx.author or member.bot:
return await ctx.send(t(ctx, "other_users_only"))
if amount <= 0:
return await ctx.send(t(ctx, "at_least_one"))
user, _ = await User.get_or_create(id=ctx.author.id)
if user.balance < amount:
return await ctx.send(
t(
ctx,
"not_enough_funds",
coins=user.balance,
missing=amount - user.balance,
)
)
embed = Embed(
ctx,
title=t(ctx, "title"),
description=t(ctx, "confirmation", amount=amount, member=member.mention),
)
message = await ctx.send(embed=embed)
await message.add_reaction("✅")
try:
await self.bot.wait_for(
"reaction_add",
timeout=30,
check=lambda r, u: u == ctx.message.author and str(r.emoji) == "✅",
)
except TimeoutError:
embed.description = t(ctx, "cancelled")
return await message.edit(embed=embed)
target_user, _ = await User.get_or_create(id=member.id)
user.balance -= amount
target_user.balance += amount
await user.save()
await target_user.save()
try:
await message.clear_reactions()
except Forbidden:
pass
embed.description = t(ctx, "success", amount=amount, member=member.mention)
await message.edit(embed=embed)
@command(name="daily", aliases=["dly"])
async def daily(self, ctx: Context, member: Member = None):
"""Daily coin reward
Mention someone to give your them your reward.
Can be used once every 23 hours.
Streak gives you more coins over time, but will be lost after 2 days of inactivity.
"""
if member and member.bot:
return await ctx.send(t(ctx, "cannot_give_to_bot"))
user, _ = await User.get_or_create(id=ctx.author.id)
def hours_til_next_daily() -> int:
return ceil(
(user.next_daily.timestamp() - datetime.utcnow().timestamp()) / 3600
)
if not user.daily_available:
try:
await ctx.send(
t(
ctx,
"next_daily",
remaining=hours_til_next_daily(),
streak=user.daily_streak,
)
)
except Forbidden:
pass
return
expired = t(ctx, "lost_streak") if user.daily_streak_expired else ""
if user.daily_streak_expired:
user.daily_streak = 1
else:
user.daily_streak += 1
bonus = floor(sqrt(user.daily_streak) * 20)
user.last_daily = datetime.utcnow()
if member:
target_user, _ = await User.get_or_create(id=member.id)
else:
target_user = user
target_user.balance += 100 + bonus
await user.save()
if user != target_user:
await target_user.save()
embed = Embed(ctx, title=t(ctx, "title"), color=ctx.author.color)
if user == target_user:
embed.description = t(
ctx,
"received_daily",
amount=100 + bonus,
streak=user.daily_streak,
expired=expired,
remaining=hours_til_next_daily(),
)
else:
embed.description = t(
ctx,
"received_daily",
amount=100 + bonus,
member=member.mention,
streak=user.daily_streak,
expired=expired,
remaining=hours_til_next_daily(),
)
try:
await ctx.send(embed=embed)
except Forbidden:
pass
@Cog.listener()
async def on_message(self, message: Message):
if (
message.author.bot
or not message.guild
or len(message.content) <= 5
or "spam" in message.channel.name.lower()
):
# TODO: Make better spam filter.
return
ctx = await self.bot.get_context(message)
if ctx.valid:
return
user, _ = await User.get_or_create(id=ctx.author.id)
user.exp += 1
await user.save()
if user.level != (new_level := floor(sqrt(user.exp) / 4)):
user.level = new_level
bonus = floor(sqrt(user.level) * 100)
user.balance += bonus
await user.save()
if user.level < 5:
return
# Level up message, don't send if the guild has them turned off
guild, _ = await Guild.get_or_create(id=ctx.guild.id)
if not guild.level_up_messages:
return
try:
await ctx.send(
tg(
ctx,
"level_up_message",
user=ctx.author.name,
level=user.level,
bonus=bonus,
)
)
except Forbidden:
pass
# TODO: Let the admin choose if they want embed or text level ups
# embed = Embed(ctx, title="Level up!")
# embed.set_thumbnail(url=ctx.author.avatar_url)
# embed.description = (
# f"Congratulations, {ctx.author.mention}! "
# f"You have advanced to **level {user.level}** "
# f"and got a bonus of **{bonus} points**."
# )
#
# level_up_message = await ctx.send(embed=embed)
# await level_up_message.delete(delay=30)
def setup(bot):
bot.add_cog(Social(bot))
|
425879
|
import tensorflow as tf
import matplotlib.pyplot as plt
def segmentation_to_image(pred):
img = tf.argmax(pred, axis=-1)
img = img[..., tf.newaxis]
return tf.keras.preprocessing.image.array_to_img(img)
def predict_tf(model):
def predict_func(sample):
pred = model.predict(tf.expand_dims(sample[0], axis=0))
return sample[0], pred[0]
return predict_func
def display_dataset(ds, pred_func):
for sample in ds:
imgs = pred_func(sample)
fig, axes = plt.subplots(1, len(imgs))
for ax, img in zip(axes, imgs):
if img.shape[-1] != 3:
img = segmentation_to_image(img)
ax.imshow(img)
|
425933
|
from datetime import datetime, timedelta
from time import time
from django.core.management.base import BaseCommand, CommandError
from django.db.models import Q
from drawquest.apps.quest_comments.models import QuestComment
from drawquest.apps.quests.models import Quest
from drawquest.apps.quests.top import top_quests_buffer
class Command(BaseCommand):
args = ''
help = 'Update quest scores for the top quests view.'
def handle(self, *args, **options):
start = time()
updates = 0
def flatten(list_of_lists):
return set([int(item) for sublist in list_of_lists for item in sublist])
quest_ids = [int(id_) for id_ in top_quests_buffer[:]]
for quest in Quest.all_objects.in_bulk_list(quest_ids):
updates += 1
quest.update_score()
print "Scores updated. Rows updated: %s Total elapsed time: %0.2fs" % (updates, (time() - start))
|
425954
|
from discord.ext import commands
from discord.ext.commands.errors import NotOwner
import errors
import functions
from bot_config import OWNER_ID
def is_owner():
async def predicate(ctx):
if ctx.message.author.id != OWNER_ID:
raise NotOwner("This command can only be run by the owner.")
return True
return commands.check(predicate)
def premium_guild():
async def predicate(ctx):
endsat = await functions.get_prem_endsat(
ctx.bot, ctx.guild.id
)
if endsat is None:
raise errors.NoPremiumError(
"Only premium guilds can run this command."
)
return True
return commands.check(predicate)
|
425966
|
import os
import sys
import re
import json
import pandas as pd
import collections
import pytz
from datetime import datetime, timedelta
try:
from jaws import tilt_angle, fsds_adjust
except ImportError:
import tilt_angle, fsds_adjust
###############################################################################
freezing_point_temp = 273.15
pascal_per_millibar = 100
seconds_in_hour = 3600
seconds_in_half_hour = 1800
fillvalue_double = 9.969209968386869e+36
fillvalue_float = 9.96921e+36
jaws_version = '1.0'
###############################################################################
def log(args, level, message):
"""Print log messages"""
if args.dbg_lvl > level:
print(message)
def get_fillvalue(args):
"""Return user provided fillvalue_float"""
if args.fll_val_flt:
return args.fll_val_flt
return fillvalue_float
def relative_path(path):
"""Get relative path based on the location of this file."""
this_dir = os.path.dirname(os.path.realpath(__file__))
return os.path.join(this_dir, path)
def read_ordered_json(path):
"""Return json file as an ordered dict"""
path = relative_path(path)
decoder = json.JSONDecoder(object_pairs_hook=collections.OrderedDict)
with open(path) as stream:
return decoder.decode(stream.read())
def load_dataframe(name, input_file, header_rows, **kwargs):
"""Create skeleton of dataframe based on type of input file"""
input_file_vars = [item for sublist in[v for k,v in kwargs.items()] for item in sublist]
global columns
if (name == 'gcnet' and header_rows == 54) or (name == 'promice' and len(input_file_vars) == 46) or (
name == 'aaws' and len(input_file_vars) == 6) or (name == 'imau/ant') or (name == 'imau/grl') or (
name == 'scar') or (name == 'nsidc'):
path = relative_path('resources/{}/columns.txt'.format(name))
with open(path) as stream:
columns = stream.read().split('\n')
columns = [i.strip() for i in columns if i.strip()]
elif name == 'gcnet':
path = relative_path('resources/{}/original_columns.json'.format(name))
org_columns = read_ordered_json(path)
columns = []
with open(input_file) as stream:
stream.readline()
count = 0
for line in stream:
isColumnFoundForThisLine = False
for column_name, std_name in org_columns.items():
if re.search(r'\b' + column_name + r'\b', line):
isColumnFoundForThisLine = True
columns.append(std_name)
if not isColumnFoundForThisLine:
if '[W m-2]' in line:
count += 1
if count == 1:
columns.append('sw_down_max')
elif count == 2:
columns.append('sw_up_max')
elif name == 'promice' or 'aaws':
path = relative_path('resources/{}/original_columns.json'.format(name))
org_columns = read_ordered_json(path)
columns = []
if name == 'aaws':
columns.append('timestamp')
for column_name,std_name in org_columns.items():
if column_name in input_file_vars:
columns.append(std_name)
df = pd.read_csv(
input_file,
skiprows=header_rows,
skip_blank_lines=True,
header=None,
names=columns,
sep=r'\t|\s+|\,',
engine='python')
df.index.name = 'time'
return df, columns
def load_dataset_attributes(name, ds, args, **kwargs):
"""Assign global and variable attributes"""
global derived_vars, no_drv_tm_vars, rigb_vars, flx_vars
path = 'resources/{}/ds.json'.format(name)
attr_dict = read_ordered_json(path)
ds.attrs = attr_dict.pop('attributes')
if name == 'scar':
country = kwargs.pop('country')
institution = kwargs.pop('institution')
if country:
ds.attrs['operated_by'] = country
if institution:
ds.attrs['institution'] = institution
if name == 'nsidc':
qlty_ctrl = kwargs.pop('qlty_ctrl')
if qlty_ctrl:
ds.attrs['quality_control_process'] = qlty_ctrl
ds.attrs['history'] = '{} {}'.format(datetime.now(), ' '.join(sys.argv))
ds.attrs['JAWS'] = 'Justified Automated Weather Station software version {} (Homepage = https://github.com/' \
'jaws/jaws)'.format(jaws_version)
derived_vars = ['time', 'time_bounds', 'sza', 'az','station_name', 'latitude', 'longitude',
'ice_velocity_GPS_total', 'ice_velocity_GPS_x', 'ice_velocity_GPS_y', 'height']
no_drv_tm_vars = []
if not args.no_drv_tm:
no_drv_tm_vars = ['hour', 'month', 'day', 'day_of_year']
rigb_vars = []
if name in ['imau/ant', 'imau/grl', 'gcnet', 'promice']:
rigb_vars = kwargs.pop('rigb_vars')
flx_vars = []
if name == 'gcnet' and args.flx:
flx_vars = ['sh', 'lh']
for key, value in attr_dict.items():
for key1, value1 in value.items():
if (key1 in columns) or (key1 in derived_vars) or (key1 in no_drv_tm_vars) or (
key1 in rigb_vars) or (key1 in flx_vars):
for key2, value2 in value1.items():
if key2 == 'type':
pass
else:
ds[key1].attrs = value2.items()
if args.celsius:
temperature_vars = kwargs.pop('temperature_vars')
for var in temperature_vars:
ds[var].attrs.update([('units', 'celsius')])
if args.mb:
pressure_vars = kwargs.pop('pressure_vars')
for var in pressure_vars:
ds[var].attrs.update([('units', 'hPa')])
if name == 'nsidc' and kwargs.pop('year1900'):
ds['time'].attrs.update([('units', 'seconds since 1900-01-01 00:00:00')])
for column in columns:
if column in ('qc1', 'qc9', 'qc17', 'qc25'):
load_dataset_attributes_gcnet_qltyctrl(name, ds)
def load_dataset_attributes_gcnet_qltyctrl(name, ds):
"""Assign attributes for GCNet Quality Control variables"""
path = 'resources/{}/ds_derived.json'.format(name)
attr_dict = read_ordered_json(path)
for key, value in attr_dict.items():
for key1, value1 in value.items():
for key2, value2 in value1.items():
if key2 == 'type':
pass
else:
try:
ds[key1].attrs = value2.items()
except KeyError:
pass
def get_encoding(name, fillvalue, comp_level, args):
"""Assign encoding to all variables"""
path = relative_path('resources/{}/encoding.json'.format(name))
with open(path) as stream:
data = json.load(stream)
def recursive_fill(data):
for k, v in data.items():
if k == '_FillValue' and v == 'FILL':
data[k] = fillvalue
elif k == 'complevel' and v == 'COMP':
data[k] = comp_level
elif isinstance(v, dict):
recursive_fill(v)
recursive_fill(data)
masterlist = [columns, derived_vars]
if not args.no_drv_tm:
masterlist.append(no_drv_tm_vars)
if args.rigb:
masterlist.append(rigb_vars)
if args.flx:
masterlist.append(flx_vars)
# Get encoding for only those variables present in input file
masterlist = [item for sublist in masterlist for item in sublist]
data = {k: data[k] for k in masterlist if k in data.keys()}
return data
def parse_station(args, station):
"""Get latitude, longitude and name for each station"""
if len(station) == 3:
latitude, longitude, name = station
else:
latitude, longitude = station
name = None
if args.stn_nm:
print('Default station name overrided by user provided station name')
name = args.stn_nm
return latitude, longitude, name
def time_common(tzone):
"""Define common time variables to be used across different scripts"""
tz = pytz.timezone(tzone)
dtime_1970 = datetime(1970, 1, 1)
dtime_1970 = tz.localize(dtime_1970.replace(tzinfo=None))
return dtime_1970, tz
def get_month_day(year, day, one_based=False):
"""Get month and day from day of year"""
if one_based: # if Jan 1st is 1 instead of 0
day -= 1
dt = datetime(year, 1, 1) + timedelta(days=day)
return dt.month, dt.day
def get_cleardays_df(station_name, first_date, last_date):
"""
Get clear-sky periods
:param station_name: Station name
:param first_date: First date of input data
:param last_date: Last date of input data
:return: Dataframe containing clear periods between first and last date of that station
"""
path_cleardays = relative_path('resources/cleardays.csv')
clr_df = pd.read_csv(path_cleardays)
clr_df = clr_df.loc[clr_df['network_name'] == station_name]
clr_df = clr_df.drop('network_name', 1)
clr_df = clr_df.loc[(clr_df['date'] >= first_date) & (clr_df['date'] <= last_date)]
# Convert half-hour values to full-hour to subset variable values in tilt_angle script (e.g. 10.5 to 10)
clr_df[['start_hour', 'end_hour']] = clr_df[['start_hour', 'end_hour']].astype(int)
return clr_df
def call_rigb(args, station_name, first_date, last_date, ds, latitude, longitude, rigb_vars):
"""Calculate tilt angle, tilt direction and adjusted downwelling shortwave flux"""
log(args, 6, 'Detecting clear-sky day(s)')
clr_df = get_cleardays_df(station_name, first_date, last_date)
if args.dbg_lvl > 6:
print("Found {} clear-sky day(s)".format(len(clr_df.index)))
if clr_df.empty:
if args.dbg_lvl > 6:
print('Skipping RIGB, since no clear-sky day found')
else:
log(args, 7, 'Calculating tilt angle and direction')
if len(clr_df.index) >= 5: # It takes around 3 minutes for 1 day, so print message if 5 or more days (15 min)
print('Tilt correction will take long time')
# Call tilt_angle script to get tilt_angle and tilt_direction
ds = tilt_angle.main(ds, latitude, longitude, clr_df, args)
log(args, 8, 'Calculating corrected_fsds')
# Call fsds_adjust script to get fsds_adjusted
ds = fsds_adjust.main(ds, args)
# Define rigb_vars for attributes and encoding
rigb_vars = ['tilt_direction', 'tilt_angle', 'fsds_adjusted', 'fsus_adjusted', 'cloud_fraction']
return ds, rigb_vars
def write_data(args, ds, op_file, encoding):
"""Write data to netCDF file"""
if args.format3 == 1:
ds.to_netcdf(op_file, format='NETCDF3_CLASSIC', unlimited_dims={'time': True}, encoding=encoding)
elif args.format4 == 1:
ds.to_netcdf(op_file, format='NETCDF4', unlimited_dims={'time': True}, encoding=encoding)
elif args.format5 == 1:
ds.to_netcdf(op_file, format='NETCDF3_64BIT', unlimited_dims={'time': True}, encoding=encoding)
elif args.format6 == 1:
ds.to_netcdf(op_file, format='NETCDF3_64BIT', unlimited_dims={'time': True}, encoding=encoding)
elif args.format7 == 1:
ds.to_netcdf(op_file, format='NETCDF4_CLASSIC', unlimited_dims={'time': True}, encoding=encoding)
else:
ds.to_netcdf(op_file, unlimited_dims={'time': True}, encoding=encoding)
|
425978
|
class NeispyException(Exception):
pass
class ArgumentError(NeispyException):
def __init__(self):
super().__init__("인자값이 틀립니다.")
class HTTPException(NeispyException):
def __init__(self, code: int, message: str):
super().__init__(f"{code} {message}")
class MissingRequiredValues(HTTPException):
pass
class AuthenticationKeyInvaild(HTTPException):
pass
class ServiceNotFound(HTTPException):
pass
class LocationValueTypeInvaild(HTTPException):
pass
class CannotExceed1000(HTTPException):
pass
class DailyTrafficLimit(HTTPException):
pass
class ServerError(HTTPException):
pass
class DatabaseConnectionError(HTTPException):
pass
class SQLStatementError(HTTPException):
pass
class LimitUseAuthenticationkey(HTTPException):
pass
class DataNotFound(HTTPException):
pass
ExceptionsMapping = {
"INFO-200": DataNotFound,
"INFO-300": LimitUseAuthenticationkey,
"ERROR-290": AuthenticationKeyInvaild,
"ERROR-300": MissingRequiredValues,
"ERROR-310": ServiceNotFound,
"ERROR-333": LocationValueTypeInvaild,
"ERROR-336": CannotExceed1000,
"ERROR-337": DailyTrafficLimit,
"ERROR-500": ServerError,
"ERROR-600": DatabaseConnectionError,
"ERROR-601": SQLStatementError,
}
|
426000
|
import itertools
import typing
from typing import Dict, List, Optional
from hearthstone.asyncio import asyncio_utils
from hearthstone.simulator.agent.actions import EndPhaseAction
from hearthstone.simulator.agent.agent import AnnotatingAgent
from hearthstone.simulator.core.randomizer import Randomizer
from hearthstone.simulator.host.host import Host
from hearthstone.simulator.replay.observer import Observer
from hearthstone.simulator.replay.replay import Replay
class RoundRobinHost(Host):
def __init__(self, agents: Dict[str, 'AnnotatingAgent'],
observers: Optional[List['Observer']] = None,
randomizer: Optional[Randomizer] = None):
super().__init__(agents, observers, randomizer)
def start_game(self):
for player_name, player in self.tavern.players.items():
hero_choice_action = asyncio_utils.get_or_create_event_loop().run_until_complete(
self.agents[player_name].hero_choice_action(player))
self._apply_and_record(player_name, hero_choice_action)
def play_round_generator(self) -> typing.Generator: # TODO: think about how to test this code
self.tavern.buying_step()
for player_name, player in self.tavern.players.items():
agent = self.agents[player_name]
for i in itertools.count():
if player.dead:
break
if player.discover_queue:
discover_choice_action, agent_annotation = asyncio_utils.get_or_create_event_loop().run_until_complete(
agent.annotated_discover_choice_action(player))
self._apply_and_record(player_name, discover_choice_action, agent_annotation)
elif i > 40:
break
else:
action, agent_annotation = asyncio_utils.get_or_create_event_loop().run_until_complete(
agent.annotated_buy_phase_action(player))
self._apply_and_record(player_name, action, agent_annotation)
yield
if type(action) is EndPhaseAction:
break
if player.dead:
continue
if len(player.in_play) > 1:
rearrange_action, agent_annotation = asyncio_utils.get_or_create_event_loop().run_until_complete(
agent.annotated_rearrange_cards(player))
self._apply_and_record(player_name, rearrange_action, agent_annotation)
self.tavern.combat_step()
if self.tavern.game_over():
for position, (name, player) in enumerate(reversed(self.tavern.losers)):
annotation = asyncio_utils.get_or_create_event_loop().run_until_complete(
self.agents[name].game_over(player, position))
self.replay.agent_annotate(name, annotation)
self._on_game_over()
def play_round(self):
for _ in self.play_round_generator():
pass
def game_over(self):
return self.tavern.game_over()
def play_game(self):
self.start_game()
while not self.game_over():
self.play_round()
def get_replay(self) -> Replay:
return self.replay
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.