hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f72660bfc1f52e8bce65d7defe6754157512b67a | 3,196 | py | Python | grr/server/grr_response_server/databases/mem.py | billstackpole/grr | 203a0a99990a2d4004aed84a5cd822cbda2b418c | [
"Apache-2.0"
] | 1 | 2019-03-28T07:09:41.000Z | 2019-03-28T07:09:41.000Z | grr/server/grr_response_server/databases/mem.py | gingogo/grr | 203a0a99990a2d4004aed84a5cd822cbda2b418c | [
"Apache-2.0"
] | null | null | null | grr/server/grr_response_server/databases/mem.py | gingogo/grr | 203a0a99990a2d4004aed84a5cd822cbda2b418c | [
"Apache-2.0"
] | 1 | 2018-08-30T14:50:24.000Z | 2018-08-30T14:50:24.000Z | #!/usr/bin/env python
"""An in memory database implementation used for testing."""
import sys
import threading
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import utils
from grr_response_server import db
from grr_response_server.databases import mem_blobs
from grr_response_server.databases import mem_clients
from grr_response_server.databases import mem_cronjobs
from grr_response_server.databases import mem_events
from grr_response_server.databases import mem_flows
from grr_response_server.databases import mem_foreman_rules
from grr_response_server.databases import mem_paths
from grr_response_server.databases import mem_users
from grr_response_server.rdfvalues import objects as rdf_objects
# pyformat: disable
class InMemoryDB(mem_blobs.InMemoryDBBlobsMixin,
mem_clients.InMemoryDBClientMixin,
mem_cronjobs.InMemoryDBCronJobMixin,
mem_events.InMemoryDBEventMixin,
mem_flows.InMemoryDBFlowMixin,
mem_foreman_rules.InMemoryDBForemanRulesMixin,
mem_paths.InMemoryDBPathMixin,
mem_users.InMemoryDBUsersMixin,
db.Database):
"""An in memory database implementation used for testing."""
# pyformat: enable
def __init__(self):
super(InMemoryDB, self).__init__()
self._Init()
self.lock = threading.RLock()
def _Init(self):
self.approvals_by_username = {}
self.clients = {}
self.client_messages = {}
self.client_message_leases = {}
self.crash_history = {}
self.cronjob_leases = {}
self.cronjobs = {}
self.events = []
self.foreman_rules = []
self.keywords = {}
self.labels = {}
self.message_handler_leases = {}
self.message_handler_requests = {}
self.metadatas = {}
self.notifications_by_username = {}
self.startup_history = {}
# TODO(hanuszczak): Consider chaning this to nested dicts for improved
# debugging experience.
# Maps (client_id, path_type, components) to a path record.
self.path_records = {}
# Maps (client_id, path_type, path_id) to a blob record.
self.blob_records = {}
self.message_handler_requests = {}
self.message_handler_leases = {}
self.events = []
self.cronjobs = {}
self.cronjob_leases = {}
self.cronjob_runs = {}
self.foreman_rules = []
self.blobs = {}
self.users = {}
self.handler_thread = None
self.handler_stop = True
@utils.Synchronized
def ClearTestDB(self):
self._Init()
def _AllPathIDs(self):
result = set()
for client_id, path_type, components in self.path_records:
path_id = rdf_objects.PathID.FromComponents(components)
result.add((client_id, path_type, path_id))
return result
def _ParseTimeRange(self, timerange):
"""Parses a timerange argument and always returns non-None timerange."""
if timerange is None:
timerange = (None, None)
from_time, to_time = timerange
if not from_time:
from_time = rdfvalue.RDFDatetime().FromSecondsSinceEpoch(0)
if not to_time:
to_time = rdfvalue.RDFDatetime().FromSecondsSinceEpoch(sys.maxsize)
return (from_time, to_time)
| 31.643564 | 76 | 0.708385 |
import sys
import threading
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import utils
from grr_response_server import db
from grr_response_server.databases import mem_blobs
from grr_response_server.databases import mem_clients
from grr_response_server.databases import mem_cronjobs
from grr_response_server.databases import mem_events
from grr_response_server.databases import mem_flows
from grr_response_server.databases import mem_foreman_rules
from grr_response_server.databases import mem_paths
from grr_response_server.databases import mem_users
from grr_response_server.rdfvalues import objects as rdf_objects
class InMemoryDB(mem_blobs.InMemoryDBBlobsMixin,
mem_clients.InMemoryDBClientMixin,
mem_cronjobs.InMemoryDBCronJobMixin,
mem_events.InMemoryDBEventMixin,
mem_flows.InMemoryDBFlowMixin,
mem_foreman_rules.InMemoryDBForemanRulesMixin,
mem_paths.InMemoryDBPathMixin,
mem_users.InMemoryDBUsersMixin,
db.Database):
def __init__(self):
super(InMemoryDB, self).__init__()
self._Init()
self.lock = threading.RLock()
def _Init(self):
self.approvals_by_username = {}
self.clients = {}
self.client_messages = {}
self.client_message_leases = {}
self.crash_history = {}
self.cronjob_leases = {}
self.cronjobs = {}
self.events = []
self.foreman_rules = []
self.keywords = {}
self.labels = {}
self.message_handler_leases = {}
self.message_handler_requests = {}
self.metadatas = {}
self.notifications_by_username = {}
self.startup_history = {}
self.path_records = {}
self.blob_records = {}
self.message_handler_requests = {}
self.message_handler_leases = {}
self.events = []
self.cronjobs = {}
self.cronjob_leases = {}
self.cronjob_runs = {}
self.foreman_rules = []
self.blobs = {}
self.users = {}
self.handler_thread = None
self.handler_stop = True
@utils.Synchronized
def ClearTestDB(self):
self._Init()
def _AllPathIDs(self):
result = set()
for client_id, path_type, components in self.path_records:
path_id = rdf_objects.PathID.FromComponents(components)
result.add((client_id, path_type, path_id))
return result
def _ParseTimeRange(self, timerange):
if timerange is None:
timerange = (None, None)
from_time, to_time = timerange
if not from_time:
from_time = rdfvalue.RDFDatetime().FromSecondsSinceEpoch(0)
if not to_time:
to_time = rdfvalue.RDFDatetime().FromSecondsSinceEpoch(sys.maxsize)
return (from_time, to_time)
| true | true |
f726618245de60741ddd3c6b158e46d436440d0d | 751 | py | Python | example1.py | djinn/python-duckduckgo | e4bb5729cdf8c1e086226760af01e2c0c7dbb500 | [
"BSD-3-Clause"
] | 2 | 2015-02-19T10:41:31.000Z | 2021-11-12T11:42:48.000Z | example1.py | djinn/python-duckduckgo | e4bb5729cdf8c1e086226760af01e2c0c7dbb500 | [
"BSD-3-Clause"
] | null | null | null | example1.py | djinn/python-duckduckgo | e4bb5729cdf8c1e086226760af01e2c0c7dbb500 | [
"BSD-3-Clause"
] | null | null | null | from duckduckgo import query, Topic
from sys import argv
visited = []
def build_web_tree(qr, depth=0):
print ' '* depth * 4 + qr
ds = query(qr)
if depth == 2:
return
if ds.error_code != 0:
return
visited.append(qr)
if ds.related == []:
return
else:
for r in ds.related:
if isinstance(r, Topic) == True:
r_used = r.name.encode('ascii', 'ignore')
else:
r_used = r.text.encode('ascii', 'ignore').split('-')[0].strip()
try:
visited.index(r_used)
except:
build_web_tree(r_used, depth=depth+1)
if __name__ == '__main__':
build_web_tree(' '.join(argv[1:]))
| 25.896552 | 80 | 0.50466 | from duckduckgo import query, Topic
from sys import argv
visited = []
def build_web_tree(qr, depth=0):
print ' '* depth * 4 + qr
ds = query(qr)
if depth == 2:
return
if ds.error_code != 0:
return
visited.append(qr)
if ds.related == []:
return
else:
for r in ds.related:
if isinstance(r, Topic) == True:
r_used = r.name.encode('ascii', 'ignore')
else:
r_used = r.text.encode('ascii', 'ignore').split('-')[0].strip()
try:
visited.index(r_used)
except:
build_web_tree(r_used, depth=depth+1)
if __name__ == '__main__':
build_web_tree(' '.join(argv[1:]))
| false | true |
f726620976060383077043d92b478dbeab78b397 | 571 | py | Python | migrations/versions/51387d8fda8d_add_default_value_to_is_invited.py | sicness9/BugHub | 2af45b0840757f7826927d4fefc0e626fef136e1 | [
"FTL"
] | null | null | null | migrations/versions/51387d8fda8d_add_default_value_to_is_invited.py | sicness9/BugHub | 2af45b0840757f7826927d4fefc0e626fef136e1 | [
"FTL"
] | null | null | null | migrations/versions/51387d8fda8d_add_default_value_to_is_invited.py | sicness9/BugHub | 2af45b0840757f7826927d4fefc0e626fef136e1 | [
"FTL"
] | null | null | null | """add default value to is_invited
Revision ID: 51387d8fda8d
Revises: 6779bebb64e6
Create Date: 2021-12-21 18:19:50.864781
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '51387d8fda8d'
down_revision = '6779bebb64e6'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| 19.689655 | 65 | 0.691769 | from alembic import op
import sqlalchemy as sa
revision = '51387d8fda8d'
down_revision = '6779bebb64e6'
branch_labels = None
depends_on = None
def upgrade():
| true | true |
f7266308ffe97be95f1df1a47115c73c0ac247a1 | 6,815 | py | Python | main.py | drkostas/COSC525-Project2 | a33c786621e6047b0a586c7c3a3b5b85cb51fd6d | [
"Apache-2.0"
] | null | null | null | main.py | drkostas/COSC525-Project2 | a33c786621e6047b0a586c7c3a3b5b85cb51fd6d | [
"Apache-2.0"
] | null | null | null | main.py | drkostas/COSC525-Project2 | a33c786621e6047b0a586c7c3a3b5b85cb51fd6d | [
"Apache-2.0"
] | null | null | null | import traceback
import argparse
import numpy as np
from src import NeuralNetwork, generateExample, getTensorExample
from typing import *
def get_args() -> argparse.Namespace:
"""Set-up the argument parser
Returns:
argparse.Namespace:
"""
parser = argparse.ArgumentParser(
description='Project 2 for the Deep Learning class (COSC 525). '
'Involves the development of a Convolutional Neural Network.',
add_help=False)
# Required Args
required_args = parser.add_argument_group('Required Arguments')
required_args.add_argument('-d', '--dataset', required=True,
help="The datasets to train the network on. "
"Options: [example1, example2, example3]")
# Optional args
optional_args = parser.add_argument_group('Optional Arguments')
optional_args.add_argument("-h", "--help", action="help", help="Show this help message and exit")
return parser.parse_args()
def main():
"""This is the main function of main.py
Example:
python main.py --dataset example1
"""
# Initializing
args = get_args()
# Load the configurations
dataset_type = args.dataset
if dataset_type in ('example1', 'example2', 'example3'):
example_num = int(dataset_type[-1])
inputs, targets, layers = generateExample(example_num)
getTensorExample(example_num)
else:
raise ValueError('Invalid dataset type')
# ------- Start of Code ------- #
# # Initialize the network # #
netWork = NeuralNetwork(input_size=inputs.shape, loss_function="square_error",
learning_rate=100, input_channels=1)
# Add layers
for layer in layers:
if layer['type'] == 'Conv':
weights = []
for k_ind in range(layer['num_kernels']):
kernels = [k_w.flatten() for k_w in layer['weights'][k_ind]]
kernel_weights = np.concatenate((*kernels,
layer['biases'][k_ind]))
weights.append(kernel_weights)
weights = np.array(weights)
netWork.addConvLayer(num_kernels=layer['num_kernels'],
kernel_size=layer['kernel_size'],
activation=layer['activation'],
weights=weights)
elif layer['type'] == 'Flat':
netWork.addFlattenLayer()
elif layer['type'] == 'MaxPool':
netWork.addMaxPoolLayer(kernel_size=layer['kernel_size'])
elif layer['type'] == 'Dense':
weights = np.array([np.concatenate((layer['weights'].flatten(), layer['bias']))])
netWork.addFCLayer(num_neurons=targets.shape[0],
activation=layer['activation'],
weights=weights)
else:
raise ValueError(f'Invalid layer type: {layer["type"]}')
# # Train the network # #
# First Feed forward
outputs = netWork.calculate(inputs=inputs)
print("----------- Custom Model -----------")
print(f"model output before:\n{outputs}")
# Calculate Loss derivative
loss_der = netWork.loss_derivative(outputs, targets)
loss = netWork.calculate_loss(np.array([inputs]), targets)
netWork.train(np.array([inputs]), targets) # Train the network
outputs = netWork.calculate(inputs=inputs)
print(f"model output after: \n{outputs}")
if example_num == 1:
print('1st convolutional layer, kernel weights:')
print(netWork.layers[0].kernels[0][0][0].weights[:-1].reshape((3, 3)))
print('1st convolutional layer, kernel bias:')
print(np.array([netWork.layers[0].kernels[0][0][0].weights[-1]]))
print('fully connected layer weights:')
print(netWork.layers[2].neurons[0].weights[:-1])
print('fully connected layer bias:')
print(np.array([netWork.layers[2].neurons[0].weights[-1]]))
elif example_num == 2:
print('1st convolutional layer, 1st kernel weights:')
print(netWork.layers[0].kernels[0][0][0].weights[:-1].reshape((3, 3)))
print('1st convolutional layer, 1st kernel bias:')
print(np.array([netWork.layers[0].kernels[0][0][0].weights[-1]]))
print('1st convolutional layer, 2st kernel weights:')
print(netWork.layers[0].kernels[1][0][0].weights[:-1].reshape((3, 3)))
print('1st convolutional layer, 2st kernel bias:')
print(np.array([netWork.layers[0].kernels[1][0][0].weights[-1]]))
print('2nd convolutional layer, 1st kernel weights:')
print(netWork.layers[1].kernels[0][0][0].weights[:-1].reshape((2, 3, 3)))
print('2nd convolutional layer, 1st kernel bias:')
print(np.array([netWork.layers[1].kernels[0][0][0].weights[-1]]))
print('fully connected layer weights:')
print(netWork.layers[3].neurons[0].weights[:-1])
print('fully connected layer bias:')
print(np.array([netWork.layers[3].neurons[0].weights[-1]]))
elif example_num == 3:
print('1st convolutional layer, 1st kernel weights:')
print(netWork.layers[0].kernels[0][0][0].weights[:-1].reshape((3, 3)))
print('1st convolutional layer, 1st kernel bias:')
print(np.array([netWork.layers[0].kernels[0][0][0].weights[-1]]))
print('1st convolutional layer, 2st kernel weights:')
print(netWork.layers[0].kernels[1][0][0].weights[:-1].reshape((3, 3)))
print('1st convolutional layer, 2st kernel bias:')
print(np.array([netWork.layers[0].kernels[1][0][0].weights[-1]]))
print('fully connected layer weights:')
print(netWork.layers[3].neurons[0].weights[:-1])
print('fully connected layer bias:')
print(np.array([netWork.layers[3].neurons[0].weights[-1]]))
else:
raise ValueError(f'Invalid example number: {example_num}')
if __name__ == '__main__':
try:
main()
except Exception as e:
print(str(e) + '\n' + str(traceback.format_exc()))
raise e
# # First Layer (Convolutional)
# weights_L1 = np.array(
# [np.concatenate((l1k1.flatten(), l1b1)), np.concatenate((l1k2.flatten(), l1b2))])
# netWork.addConvLayer(num_kernels=2, kernel_size=3, activation="logistic", weights=weights_L1)
# # Second Layer (Convolutional)
# weights_L2 = np.array([np.concatenate((l2c1.flatten(), l2c2.flatten(), l2b))])
# netWork.addConvLayer(num_kernels=1, kernel_size=3, activation="logistic", weights=weights_L2)
# # Third Layer (Fully Connected)
# netWork.addFlattenLayer()
# weights_L3 = np.array([np.concatenate((l3.flatten(), l3b))])
# netWork.addFCLayer(num_neurons=1, activation="logistic", weights=weights_L3)
| 42.329193 | 101 | 0.612032 | import traceback
import argparse
import numpy as np
from src import NeuralNetwork, generateExample, getTensorExample
from typing import *
def get_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
description='Project 2 for the Deep Learning class (COSC 525). '
'Involves the development of a Convolutional Neural Network.',
add_help=False)
required_args = parser.add_argument_group('Required Arguments')
required_args.add_argument('-d', '--dataset', required=True,
help="The datasets to train the network on. "
"Options: [example1, example2, example3]")
optional_args = parser.add_argument_group('Optional Arguments')
optional_args.add_argument("-h", "--help", action="help", help="Show this help message and exit")
return parser.parse_args()
def main():
args = get_args()
dataset_type = args.dataset
if dataset_type in ('example1', 'example2', 'example3'):
example_num = int(dataset_type[-1])
inputs, targets, layers = generateExample(example_num)
getTensorExample(example_num)
else:
raise ValueError('Invalid dataset type')
ut_size=inputs.shape, loss_function="square_error",
learning_rate=100, input_channels=1)
for layer in layers:
if layer['type'] == 'Conv':
weights = []
for k_ind in range(layer['num_kernels']):
kernels = [k_w.flatten() for k_w in layer['weights'][k_ind]]
kernel_weights = np.concatenate((*kernels,
layer['biases'][k_ind]))
weights.append(kernel_weights)
weights = np.array(weights)
netWork.addConvLayer(num_kernels=layer['num_kernels'],
kernel_size=layer['kernel_size'],
activation=layer['activation'],
weights=weights)
elif layer['type'] == 'Flat':
netWork.addFlattenLayer()
elif layer['type'] == 'MaxPool':
netWork.addMaxPoolLayer(kernel_size=layer['kernel_size'])
elif layer['type'] == 'Dense':
weights = np.array([np.concatenate((layer['weights'].flatten(), layer['bias']))])
netWork.addFCLayer(num_neurons=targets.shape[0],
activation=layer['activation'],
weights=weights)
else:
raise ValueError(f'Invalid layer type: {layer["type"]}')
.calculate(inputs=inputs)
print("----------- Custom Model -----------")
print(f"model output before:\n{outputs}")
loss_der = netWork.loss_derivative(outputs, targets)
loss = netWork.calculate_loss(np.array([inputs]), targets)
netWork.train(np.array([inputs]), targets)
outputs = netWork.calculate(inputs=inputs)
print(f"model output after: \n{outputs}")
if example_num == 1:
print('1st convolutional layer, kernel weights:')
print(netWork.layers[0].kernels[0][0][0].weights[:-1].reshape((3, 3)))
print('1st convolutional layer, kernel bias:')
print(np.array([netWork.layers[0].kernels[0][0][0].weights[-1]]))
print('fully connected layer weights:')
print(netWork.layers[2].neurons[0].weights[:-1])
print('fully connected layer bias:')
print(np.array([netWork.layers[2].neurons[0].weights[-1]]))
elif example_num == 2:
print('1st convolutional layer, 1st kernel weights:')
print(netWork.layers[0].kernels[0][0][0].weights[:-1].reshape((3, 3)))
print('1st convolutional layer, 1st kernel bias:')
print(np.array([netWork.layers[0].kernels[0][0][0].weights[-1]]))
print('1st convolutional layer, 2st kernel weights:')
print(netWork.layers[0].kernels[1][0][0].weights[:-1].reshape((3, 3)))
print('1st convolutional layer, 2st kernel bias:')
print(np.array([netWork.layers[0].kernels[1][0][0].weights[-1]]))
print('2nd convolutional layer, 1st kernel weights:')
print(netWork.layers[1].kernels[0][0][0].weights[:-1].reshape((2, 3, 3)))
print('2nd convolutional layer, 1st kernel bias:')
print(np.array([netWork.layers[1].kernels[0][0][0].weights[-1]]))
print('fully connected layer weights:')
print(netWork.layers[3].neurons[0].weights[:-1])
print('fully connected layer bias:')
print(np.array([netWork.layers[3].neurons[0].weights[-1]]))
elif example_num == 3:
print('1st convolutional layer, 1st kernel weights:')
print(netWork.layers[0].kernels[0][0][0].weights[:-1].reshape((3, 3)))
print('1st convolutional layer, 1st kernel bias:')
print(np.array([netWork.layers[0].kernels[0][0][0].weights[-1]]))
print('1st convolutional layer, 2st kernel weights:')
print(netWork.layers[0].kernels[1][0][0].weights[:-1].reshape((3, 3)))
print('1st convolutional layer, 2st kernel bias:')
print(np.array([netWork.layers[0].kernels[1][0][0].weights[-1]]))
print('fully connected layer weights:')
print(netWork.layers[3].neurons[0].weights[:-1])
print('fully connected layer bias:')
print(np.array([netWork.layers[3].neurons[0].weights[-1]]))
else:
raise ValueError(f'Invalid example number: {example_num}')
if __name__ == '__main__':
try:
main()
except Exception as e:
print(str(e) + '\n' + str(traceback.format_exc()))
raise e
| true | true |
f72665b02611c25f6224783cfd40d4362312e741 | 62,701 | py | Python | tests/job_metadata.py | fossabot/DIRBS-Core-1 | 70bf72e2e6dda6e0d7a20cf744300930d88ee70c | [
"PostgreSQL",
"Unlicense"
] | null | null | null | tests/job_metadata.py | fossabot/DIRBS-Core-1 | 70bf72e2e6dda6e0d7a20cf744300930d88ee70c | [
"PostgreSQL",
"Unlicense"
] | null | null | null | tests/job_metadata.py | fossabot/DIRBS-Core-1 | 70bf72e2e6dda6e0d7a20cf744300930d88ee70c | [
"PostgreSQL",
"Unlicense"
] | 3 | 2019-10-24T11:40:06.000Z | 2022-02-24T07:34:00.000Z | """
job_metadata api data import unit tests.
Copyright (c) 2018-2019 Qualcomm Technologies, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the
limitations in the disclaimer below) provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
- Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with the distribution.
- Neither the name of Qualcomm Technologies, Inc. nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written permission.
- The origin of this software must not be misrepresented; you must not claim that you wrote the original software.
If you use this software in a product, an acknowledgment is required by displaying the trademark/log as per the
details provided here: https://www.qualcomm.com/documents/dirbs-logo-and-brand-guidelines
- Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
- This notice may not be removed or altered from any source distribution.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY
THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import json
from flask import url_for
from _fixtures import * # noqa: F403, F401
from _helpers import job_metadata_importer
import dirbs.metadata as metadata
def test_classification_json_api(flask_app, db_conn, api_version):
"""Test Depot ID not known yet.
Verify that job_metadata returns a JSON containing job metadata
for classification job.
"""
extra_metadata = {'matched_imei_counts':
{'compound_dimension': 0,
'simple_dimension': 0},
'curr_date': None,
'conditions':
[{'dimensions':
[{'module': 'gsma_not_found'}],
'grace_period_days': 30,
'sticky': False,
'reason': 'Violated simple dimension',
'max_allowed_matching_ratio': 0.1,
'label': 'simple_dimension',
'blocking': True},
{'dimensions':
[{'module': 'stolen_list'},
{'invert': True,
'parameters':
{'threshold': 3.1,
'period_days': 30},
'module': 'duplicate_daily_avg'}],
'grace_period_days': 0,
'sticky': False,
'reason': 'Violated compound dimension',
'max_allowed_matching_ratio': 0.1,
'label': 'compound_dimension',
'blocking': True}]} # noqa E127
job_metadata_importer(db_conn=db_conn, command='dirbs-classify', run_id=1, subcommand='', status='success',
extra_metadata=extra_metadata)
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
command='dirbs-classify',
run_id=1,
subcommand='',
status='success',
show_details=True))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-classify'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'success'
assert data['extra_metadata'] == extra_metadata
else: # job_metadata api version 2.0
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
command='dirbs-classify',
run_id=1,
subcommand='',
status='success',
show_details=True))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data['_keys']['result_size'] == 1
assert data['_keys']['previous_key'] == ''
assert data['_keys']['next_key'] == ''
assert data['jobs'][0]['command'] == 'dirbs-classify'
assert data['jobs'][0]['run_id'] == 1
assert data['jobs'][0]['subcommand'] == ''
assert data['jobs'][0]['status'] == 'success'
assert data['jobs'][0]['extra_metadata'] == extra_metadata
def test_prune_json_api(flask_app, db_conn, api_version):
"""Test Depot ID not known yet.
Verify that job_metadata returns a JSON containing job metadata
for pruning triplets and classification_state job.
"""
extra_metadata = {'rows_before': 0,
'retention_months': 6,
'curr_date': None,
'rows_after': 0}
job_metadata_importer(db_conn=db_conn, command='dirbs-prune',
run_id=9, subcommand='triplets', status='success',
extra_metadata=extra_metadata)
job_metadata_importer(db_conn=db_conn, command='dirbs-prune', run_id=10, subcommand='classification_state',
status='success', extra_metadata=extra_metadata)
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
command='dirbs-prune',
status='success',
show_details=True))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
triplets_data = data[0]
assert triplets_data['command'] == 'dirbs-prune'
assert triplets_data['run_id'] == 9
assert triplets_data['subcommand'] == 'triplets'
assert triplets_data['status'] == 'success'
assert triplets_data['extra_metadata'] == extra_metadata
class_data = data[1]
assert class_data['command'] == 'dirbs-prune'
assert class_data['run_id'] == 10
assert class_data['subcommand'] == 'classification_state'
assert class_data['status'] == 'success'
assert class_data['extra_metadata'] == extra_metadata
else: # job_metadata api version 2.0
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
command='dirbs-prune',
status='success',
show_details=True))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
triplets_data = data['jobs'][0]
assert triplets_data['command'] == 'dirbs-prune'
assert triplets_data['run_id'] == 9
assert triplets_data['subcommand'] == 'triplets'
assert triplets_data['status'] == 'success'
assert triplets_data['extra_metadata'] == extra_metadata
class_data = data['jobs'][1]
assert class_data['command'] == 'dirbs-prune'
assert class_data['run_id'] == 10
assert class_data['subcommand'] == 'classification_state'
assert class_data['status'] == 'success'
assert class_data['extra_metadata'] == extra_metadata
assert data['_keys']['result_size'] == 2
assert data['_keys']['previous_key'] == ''
assert data['_keys']['next_key'] == ''
def test_operator_import_json_api(flask_app, db_conn, api_version):
"""Test Depot ID not known yet.
Verify that job_metadata returns a JSON containing job metadata
for importing operator job.
"""
extra_metadata = {'performance_timing':
{'init_staging_end': '2017-08-16T01:05:17.17081+00:00',
'init_staging_start': '2017-08-16T01:05:16.817426+00:00',
'extract_split_start': '2017-08-16T01:05:16.10788+00:00',
'prevalidate_upload_start': '2017-08-16T01:05:17.34236+00:00',
'analyze_staging_end': '2017-08-16T01:05: 20.807413+00:00',
'validation_binary_checks_end': '2017-08-16T01:05:25.565519+00:00',
'prevalidate_upload_end': '2017-08-16T01:05:20.125746+00:00',
'analyze_staging_start': '2017-08-16T01:05:20.296765+00:00',
'preprocess_start': '2017-08-16T01:05:16.474489+00:00',
'extract_split_end': '2017-08-16T01:05:16.301238+00:00',
'preprocess_end': '2017-08-16T01:05:16.645968+00:00',
'postprocess_staging_end': '2017-08-16T01:05:24.531709+00:00',
'validation_threshold_checks_start': '2017-08-16T01:05:25.741384+00:00',
'validation_binary_checks_start': '2017-08-16T01:05:24.705607+00:00',
'postprocess_staging_start': '2017-08-16T01:05:20.978153+00:00'},
'home_threshold': 0.2,
'cc': ['22%'],
'clean_threshold': 0.05,
'null_msisdn_threshold': 0.05,
'perform_leading_zero_check': True,
'perform_file_daterange_check': True,
'perform_null_check': True,
'perform_clean_check': True,
'perform_historic_imsi_check': True,
'perform_null_imsi_check': True,
'perform_null_msisdn_check': True,
'perform_historic_msisdn_check': True,
'operator_id':
'operator1',
'input_file':
'/workspace/data/operator1_home_'
'check_exceeded_20160701_20160731.zip',
'batch_size':
1000000, 'mcc_mnc_pairs':
[{'mnc': '01', 'mcc': '111'}],
'perform_historic_imei_check': True,
'null_imsi_threshold': 0.05,
'perform_rat_import': False,
'perform_null_imei_check': True,
'perform_home_check': True,
'null_imei_threshold': 0.05,
'region_threshold': 0.1,
'perform_region_check': False} # noqa E127
job_metadata_importer(db_conn=db_conn, command='dirbs-import', run_id=1, subcommand='operator',
status='error', extra_metadata=extra_metadata)
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version), show_details=False))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-import'
assert data['run_id'] == 1
assert data['subcommand'] == 'operator'
assert data['status'] == 'error'
else: # api version 2.0
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version), show_details=False))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
print(data['command'])
assert data['command'] == 'dirbs-import'
assert data['run_id'] == 1
assert data['subcommand'] == 'operator'
assert data['status'] == 'error'
def test_stolen_import_json_api(flask_app, db_conn, api_version):
"""Test Depot ID not known yet.
Verify that job_metadata returns a JSON containing job metadata
for importing stolen_list job.
"""
extra_metadata = {'output_stats':
{'num_records_updated': 20,
'num_records': 20,
'num_records_inserted': 20},
'performance_timing':
{'init_staging_end': '2017-08-22T01:42:30.695313+00:00',
'analyze_staging_end': '2017-08-22T01:42:34.286028+00:00',
'validation_threshold_checks_end': '2017-08-22T01:42:36.380127+00:00',
'analyze_staging_start': '2017-08-22T01:42:33.78045+00:00',
'preprocess_start': '2017-08-22T01:42:30.023073+00:00',
'copy_from_staging_end': '2017-08-22T01:42:38.553902+00:00',
'validation_binary_checks_start': '2017-08-22T01:42:35.537445+00:00',
'validation_threshold_checks_start': '2017-08-22T01:42:36.208775+00:00',
'output_stats_start': '2017-08-22T01:42:38.721215+00:00',
'validation_historical_checks_end': '2017-08-22T01:42:37.049421+00:00',
'extract_split_end': '2017-08-22T01:42:29.855514+00:00',
'copy_from_staging_start': '2017-08-22T01:42:37.38383+00:00',
'extract_split_start': '2017-08-22T01:42:29.674068+00:00',
'validation_historical_checks_start': '2017-08-22T01:42:36.547579+00:00',
'preprocess_end': '2017-08-22T01:42:30.191182+00:00',
'postprocess_staging_end': '2017-08-22T01:42:35.370151+00:00',
'init_staging_start': '2017-08-22T01:42:30.358302+00:00',
'validation_binary_checks_end': '2017-08-22T01:42:36.041237+00:00',
'output_stats_end': '2017-08-22T01:42:39.225688+00:00',
'prevalidate_upload_end': '2017-08-22T01:42:33.612194+00:00',
'prevalidate_upload_start': '2017-08-22T01:42:30.862953+00:00',
'postprocess_staging_start': '2017-08-22T01:42:34.458834+00:00'},
'perform_historic_check': True,
'input_file':
'/workspace/data/sample_import_list.zip',
'batch_size': 1000000,
'input_stats':
{'num_records_valid': 20,
'num_records': 20,
'num_records_invalid': 0}} # noqa E127
job_metadata_importer(db_conn=db_conn, command='dirbs-import', run_id=1, subcommand='stolen_list',
status='success', extra_metadata=extra_metadata)
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version), command='dirbs-import'))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-import'
assert data['run_id'] == 1
assert data['subcommand'] == 'stolen_list'
assert data['status'] == 'success'
else: # api version 2.0
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version), command='dirbs-import'))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['command'] == 'dirbs-import'
assert data['run_id'] == 1
assert data['subcommand'] == 'stolen_list'
assert data['status'] == 'success'
def test_pairing_import_json_api(flask_app, db_conn, api_version):
"""Test Depot ID not known yet.
Verify that job_metadata returns a JSON containing job metadata
for importing pairing_list job.
"""
extra_metadata = {'perform_duplicates_check': True,
'perform_historic_check': True,
'performance_timing':
{'init_staging_end': '2017-08-22T01:41:59.925562+00:00',
'init_staging_start': '2017-08-22T01:41:59.588253+00:00',
'extract_split_start': '2017-08-22T01:41:58.901343+00:00',
'prevalidate_upload_start': '2017-08-22T01:42:00.093237+00:00',
'analyze_staging_end': '2017-08-22T01:42:03.478264+00:00',
'prevalidate_upload_end': '2017-08-22T01:42:02.788264+00:00',
'analyze_staging_start': '2017-08-22T01:42:02.956404+00:00',
'preprocess_start': '2017-08-22T01:41:59.252764+00:00',
'extract_split_end': '2017-08-22T01:41:59.08492+00:00',
'preprocess_end': '2017-08-22T01:41:59.421052+00:00',
'postprocess_staging_end': '2017-08-22T01:42:04.520465+00:00',
'validation_binary_checks_start': '2017-08-22T01:42:04.68826+00:00',
'postprocess_staging_start': '2017-08-22T01:42:03.646232+00:00'},
'batch_size': 1000000,
'input_file':
'/workspace/data/duplicate.zip'} # noqa E127
job_metadata_importer(db_conn=db_conn, command='dirbs-import', run_id=1, subcommand='pairing_list',
status='error', extra_metadata=extra_metadata)
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version)))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-import'
assert data['run_id'] == 1
assert data['subcommand'] == 'pairing_list'
assert data['status'] == 'error'
assert data['extra_metadata'] == extra_metadata
else: # api version 2.0
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version)))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['command'] == 'dirbs-import'
assert data['run_id'] == 1
assert data['subcommand'] == 'pairing_list'
assert data['status'] == 'error'
assert data['extra_metadata'] == extra_metadata
def test_gsma_import_json_api(flask_app, db_conn, api_version):
"""Test Depot ID not known yet.
Verify that job_metadata returns a JSON containing job metadata
for importing GSMA TAC data job.
"""
extra_metadata = {'output_stats':
{'num_records_updated': 4,
'num_records': 4,
'num_records_inserted': 4},
'performance_timing':
{'init_staging_end': '2017-08-22T01:56:25.875908+00:00',
'analyze_staging_end': '2017-08-22T01:56:29.386537+00:00',
'validation_threshold_checks_end': '2017-08-22T01:56:31.231756+00:00',
'analyze_staging_start': '2017-08-22T01:56:28.886486+00:00',
'preprocess_start': '2017-08-22T01:56:25.192466+00:00',
'copy_from_staging_end': '2017-08-22T01:56:33.42097+00:00',
'validation_binary_checks_start': '2017-08-22T01:56:30.725186+00:00',
'validation_threshold_checks_start': '2017-08-22T01:56:31.063007+00:00',
'output_stats_start': '2017-08-22T01:56:33.589227+00:00',
'validation_historical_checks_end': '2017-08-22T01:56:31.915001+00:00',
'extract_split_end': '2017-08-22T01:56:25.023654+00:00',
'copy_from_staging_start': '2017-08-22T01:56:32.250857+00:00',
'extract_split_start': '2017-08-22T01:56:24.844737+00:00',
'validation_historical_checks_start': '2017-08-22T01:56:31.400242+00:00',
'preprocess_end': '2017-08-22T01:56:25.368138+00:00',
'postprocess_staging_end': '2017-08-22T01:56:30.557336+00:00',
'init_staging_start': '2017-08-22T01:56:25.536523+00:00',
'validation_binary_checks_end': '2017-08-22T01:56:30.895228+00:00',
'output_stats_end': '2017-08-22T01:56:34.097277+00:00',
'prevalidate_upload_end': '2017-08-22T01:56:28.718421+00:00',
'prevalidate_upload_start': '2017-08-22T01:56:26.043878+00:00',
'postprocess_staging_start': '2017-08-22T01:56:29.554878+00:00'},
'perform_historic_check': True,
'input_file':
'/workspace/data/duplicate_gsma.zip',
'batch_size': 1000000,
'input_stats':
{'num_records_valid': 4,
'num_records': 7,
'num_records_invalid': 3}} # noqa E127
job_metadata_importer(db_conn=db_conn, command='dirbs-import', run_id=1, subcommand='gsma_tac',
status='success', extra_metadata=extra_metadata)
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version), show_details=False))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-import'
assert data['run_id'] == 1
assert data['subcommand'] == 'gsma_tac'
assert data['status'] == 'success'
else: # api version 2.0
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version), show_details=False))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['command'] == 'dirbs-import'
assert data['run_id'] == 1
assert data['subcommand'] == 'gsma_tac'
assert data['status'] == 'success'
def test_registration_import_json_api(flask_app, db_conn, api_version):
"""Test Depot ID not known yet.
Verify that job_metadata returns a JSON containing job metadata
for importing registration_list job.
"""
extra_metadata = {'perform_duplicates_check': True,
'perform_historic_check': True,
'performance_timing':
{'init_staging_end': '2017-08-22T01:43:21.386498+00:00',
'init_staging_start': '2017-08-22T01:43:21.035571+00:00',
'extract_split_start': '2017-08-22T01:43:20.35253+00:00',
'prevalidate_upload_start': '2017-08-22T01:43:21.554073+00:00',
'preprocess_start': '2017-08-22T01:43:20.699411+00:00',
'extract_split_end': '2017-08-22T01:43:20.531135+00:00',
'preprocess_end': '2017-08-22T01:43:20.867795+00:00'},
'batch_size': 1000000,
'input_file':
'/workspace/data/'
'sample_import_list.zip'} # noqa E127
job_metadata_importer(db_conn=db_conn, command='dirbs-import', run_id=1, subcommand='registration_list',
status='error', extra_metadata=extra_metadata)
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version), command='dirbs-import'))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-import'
assert data['run_id'] == 1
assert data['subcommand'] == 'registration_list'
assert data['status'] == 'error'
assert data['extra_metadata'] == extra_metadata
else: # api version 2.0
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version), command='dirbs-import'))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['command'] == 'dirbs-import'
assert data['run_id'] == 1
assert data['subcommand'] == 'registration_list'
assert data['status'] == 'error'
assert data['extra_metadata'] == extra_metadata
def test_golden_import_json_api(flask_app, db_conn, api_version):
"""Test Depot ID not known yet.
Verify that job_metadata returns a JSON containing job metadata
for importing golden_list job.
"""
extra_metadata = {'performance_timing':
{'init_staging_end': '2017-08-22T01:43:05.017337+00:00',
'init_staging_start': '2017-08-22T01:43:04.681766+00:00',
'extract_split_start': '2017-08-22T01:43:03.993331+00:00',
'prevalidate_upload_start': '2017-08-22T01:43:05.18436+00:00',
'preprocess_start': '2017-08-22T01:43:04.337401+00:00',
'extract_split_end': '2017-08-22T01:43:04.17081+00:00',
'preprocess_end': '2017-08-22T01:43:04.504815+00:00'},
'perform_historic_check': True,
'pre_hashed': False,
'input_file':
'/workspace/data/sample_import_list.zip',
'batch_size': 1000000} # noqa E127
job_metadata_importer(db_conn=db_conn, command='dirbs-import', run_id=1, subcommand='golden_list',
status='error', extra_metadata=extra_metadata)
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version), show_details=True))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-import'
assert data['run_id'] == 1
assert data['subcommand'] == 'golden_list'
assert data['status'] == 'error'
assert data['extra_metadata'] == extra_metadata
else: # api version 2.0
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version), show_details=True))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['command'] == 'dirbs-import'
assert data['run_id'] == 1
assert data['subcommand'] == 'golden_list'
assert data['status'] == 'error'
assert data['extra_metadata'] == extra_metadata
def test_db_schema_json_api(flask_app, db_conn, api_version):
"""Test Depot ID not known yet.
Verify that job_metadata returns a JSON containing job metadata
for db_schema.
"""
job_metadata_importer(db_conn=db_conn, command='dirbs-db', run_id=1, subcommand='upgrade',
status='success')
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version), show_details=True))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-db'
assert data['run_id'] == 1
assert data['subcommand'] == 'upgrade'
assert data['status'] == 'success'
assert data['extra_metadata'] == {}
else: # api version 2.0
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version), show_details=True))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['command'] == 'dirbs-db'
assert data['run_id'] == 1
assert data['subcommand'] == 'upgrade'
assert data['status'] == 'success'
assert data['extra_metadata'] == {}
def test_list_gen_schema_json_api(flask_app, db_conn, api_version):
"""Test Depot ID not known yet.
Verify that job_metadata returns a JSON containing list generation metadata.
"""
extra_metadata = {'blacklist':
{'file_size_bytes': 25,
'md5sum': 'd623e56b7c73d27fc7ce68e3dfc6e448',
'num_records': 0,
'filename': '/workspace/data/20170822_021142/blacklist.csv'},
'notification_lists':
[{'file_size_bytes': 37,
'md5sum': '3ac7b8ae8722e47e1ce4b0a01fe8b1e2',
'num_records': 0,
'filename': '/workspace/data/20170822_021142/notifications_operator1.csv'},
{'file_size_bytes': 37,
'md5sum': '3ac7b8ae8722e47e1ce4b0a01fe8b1e2',
'num_records': 0,
'filename': '/workspace/data/20170822_021142/notifications_operator2.csv'},
{'file_size_bytes': 37,
'md5sum': '3ac7b8ae8722e47e1ce4b0a01fe8b1e2',
'num_records': 0,
'filename': '/workspace/data/20170822_021142/notifications_operator3.csv'},
{'file_size_bytes': 37,
'md5sum': '3ac7b8ae8722e47e1ce4b0a01fe8b1e2',
'num_records': 0,
'filename': '/workspace/data/20170822_021142/notifications_operator4.csv'}],
'curr_date': None,
'exception_lists':
[{'file_size_bytes': 11,
'md5sum': 'b9a2f42722d13636dfb6c84e2ee765fe',
'num_records': 0,
'filename': '/workspace/data/20170822_021142/exceptions_operator1.csv'},
{'file_size_bytes': 11,
'md5sum': 'b9a2f42722d13636dfb6c84e2ee765fe',
'num_records': 0,
'filename': '/workspace/data/20170822_021142/exceptions_operator2.csv'},
{'file_size_bytes': 11,
'md5sum': 'b9a2f42722d13636dfb6c84e2ee765fe',
'num_records': 0,
'filename': '/workspace/data/20170822_021142/exceptions_operator3.csv'},
{'file_size_bytes': 11,
'md5sum': 'b9a2f42722d13636dfb6c84e2ee765fe',
'num_records': 0,
'filename': '/workspace/data/20170822_021142/exceptions_operator4.csv'}],
'blocking_conditions':
[{'dimensions':
[{'module': 'gsma_not_found'}],
'grace_period_days': 30,
'sticky': False,
'reason': 'Violated simple dimension',
'max_allowed_matching_ratio': 0.1,
'label': 'simple_dimension',
'blocking': True},
{'dimensions':
[{'module': 'stolen_list'},
{'invert': True,
'parameters':
{'threshold': 3.1,
'period_days': 30},
'module': 'duplicate_daily_avg'}],
'grace_period_days': 0,
'sticky': False,
'reason': 'Violated compound dimension',
'max_allowed_matching_ratio': 0.1,
'label': 'compound_dimension',
'blocking': True}]} # noqa E127
job_metadata_importer(db_conn=db_conn, command='dirbs-listgen', run_id=1, subcommand='',
status='success', extra_metadata=extra_metadata)
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
show_details=False))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-listgen'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'success'
else: # api version 2.0
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
show_details=False))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['command'] == 'dirbs-listgen'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'success'
def test_report_schema_json_api(flask_app, db_conn, api_version):
"""Test Depot ID not known yet.
Verify that job_metadata returns a JSON containing report metadata.
"""
extra_metadata = {'refreshed_data': True,
'month': 2,
'output_dir': '/workspace/data',
'year': 2016}
job_metadata_importer(db_conn=db_conn, command='dirbs-report', run_id=1, subcommand='',
status='error', extra_metadata=extra_metadata)
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version)))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-report'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'error'
assert data['extra_metadata'] == extra_metadata
else: # api version 2.0
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version)))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['command'] == 'dirbs-report'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'error'
assert data['extra_metadata'] == extra_metadata
def test_job_metadata_bad_pos_int_params(flask_app, db_conn, api_version):
"""Test Depot ID unknown yet.
Verify that job_metadata API returns a 400 status for not positive integer run_id or max_result,
"""
if api_version == 'v1':
# not numeric run_id
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
command='dirbs-classify',
run_id='aaa',
status='success',
show_details=False))
assert rv.status_code == 400
assert b'Bad \'run_id\':\'aaa\' argument format. Accepts only integer' in rv.data
# not positive run_id
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
command='dirbs-classify',
run_id=-1,
status='success',
show_details=False))
assert rv.status_code == 400
assert b'Param \'run_id\':\'-1\' must be greater than 0' in rv.data
# not numeric max_result
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
command='dirbs-classify',
run_id=1,
status='success',
max_results='a',
show_details=False))
assert rv.status_code == 400
assert b'Bad \'max_results\':\'a\' argument format. Accepts only integer' in rv.data
# not positive max_result
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
command='dirbs-classify',
run_id=1,
status='success',
max_results=0,
show_details=False))
assert rv.status_code == 400
assert b'Param \'max_results\':\'0\' must be greater than 0' in rv.data
# list of max_result (will take just the first elem of the list)
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
command='dirbs-classify',
run_id=1,
status='success',
max_results=[1, -2],
show_details=False))
assert rv.status_code == 200
# set max_result to 1 and check that only one record is returned
job_metadata_importer(db_conn=db_conn, command='dirbs-classify', run_id=1, subcommand='sub_one',
status='success')
job_metadata_importer(db_conn=db_conn, command='dirbs-classify', run_id=2, subcommand='sub_two',
status='success')
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
run_id=1,
db_user='test-user',
subcommand=['sub_one', 'sub_two'],
show_details=False,
max_results=1))
assert rv.status_code == 200
assert len(json.loads(rv.data.decode('utf-8'))) == 1
else: # api version 2.0
# not numeric run_id
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
command='dirbs-classify',
run_id='aaa',
status='success',
show_details=False))
assert rv.status_code == 400
assert b'Bad \'run_id\':\'aaa\' argument format. Accepts only integer' in rv.data
# not positive run_id
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
command='dirbs-classify',
run_id=-1,
status='success',
show_details=False))
assert rv.status_code == 400
assert b'Param \'run_id\':\'-1\' must be greater than 0' in rv.data
# set max_result to 1 and check that only one record is returned
job_metadata_importer(db_conn=db_conn, command='dirbs-classify', run_id=1, subcommand='sub_one',
status='success')
job_metadata_importer(db_conn=db_conn, command='dirbs-classify', run_id=2, subcommand='sub_two',
status='success')
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
run_id=1,
db_user='test-user',
subcommand=['sub_one', 'sub_two'],
show_details=False,
max_results=1))
assert rv.status_code == 200
assert len(json.loads(rv.data.decode('utf-8'))['jobs']) == 1
def test_job_metadata_bad_params(flask_app, api_version):
"""Test Depot ID unknown yet.
Verify that job_metadata API returns a 400 status for unknown status or not boolean show_details.
"""
if api_version == 'v1':
# unknown status
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version), status='unknown'))
assert rv.status_code == 400
assert b'Bad \'status\':\'unknown\' argument format. ' \
b'Accepts only one of [\'running\', \'success\', \'error\']' in rv.data
# list of status containing an unknown status
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version), status=['error', 'unknown']))
assert rv.status_code == 400
assert b'Bad \'status\':\'unknown\' argument format. ' \
b'Accepts only one of [\'running\', \'success\', \'error\']' in rv.data
# not boolean show_details
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
show_details='not_boolean'))
assert rv.status_code == 400
assert b'Bad \'show_details\':\'not_boolean\' argument format. ' \
b'Accepts only one of [\'0\', \'1\', \'true\', \'false\']' in rv.data
else: # api version 2.0
# unknown status
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version), status='unknown'))
assert rv.status_code == 400
assert b'Bad \'status\':\'unknown\' argument format. ' \
b'Accepts only one of [\'running\', \'success\', \'error\']' in rv.data
# list of status containing an unknown status
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version), status=['error', 'unknown']))
assert rv.status_code == 400
assert b'Bad \'status\':\'unknown\' argument format. ' \
b'Accepts only one of [\'running\', \'success\', \'error\']' in rv.data
# not boolean show_details
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
show_details='not_boolean'))
assert rv.status_code == 400
assert b'Bad \'show_details\':\'not_boolean\' argument format. ' \
b'Accepts only one of [\'0\', \'1\', \'true\', \'false\']' in rv.data
def test_json_show_details(flask_app, db_conn, api_version):
"""Test Depot ID not known yet.
Verify that job_metadata returns a JSON containing job metadata
with extra information if show_details is set to true.
"""
extra_metadata = {'matched_imei_counts':
{'compound_dimension': 0,
'simple_dimension': 0}, # noqa E127
'conditions':
[{'label': 'simple_dimension',
'blocking': True,
'sticky': False,
'reason': 'Violated simple dimension',
'max_allowed_matching_ratio': 0.1,
'dimensions':
[{'module': 'gsma_not_found'}],
'grace_period_days': 30},
{'label': 'compound_dimension',
'blocking': True,
'sticky': False,
'reason':
'Violated compound dimension',
'max_allowed_matching_ratio': 0.1,
'dimensions':
[{'module': 'stolen_list'},
{'invert': True,
'module': 'duplicate_daily_avg',
'parameters':
{'period_days': 30,
'threshold': 3.1}}],
'grace_period_days': 0}],
'curr_date': None}
job_metadata_importer(db_conn=db_conn, command='dirbs-classify', run_id=1, subcommand='',
status='success', extra_metadata=extra_metadata)
if api_version == 'v1':
# Step 1 show_details=True
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
command='dirbs-classify',
run_id=1,
status='success',
show_details=True))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-classify'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'success'
assert data['extra_metadata'] == extra_metadata
# Step 2 show_details=False
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
command='dirbs-classify',
run_id=1,
status='success',
max_results=10,
show_details=False))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-classify'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'success'
assert 'extra_metadata' not in data
else: # api version 2.0
# Step 1 show_details=True
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
command='dirbs-classify',
run_id=1,
status='success',
show_details=True))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['command'] == 'dirbs-classify'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'success'
assert data['extra_metadata'] == extra_metadata
# Step 2 show_details=False
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
command='dirbs-classify',
run_id=1,
status='success',
max_results=10,
show_details=False))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['command'] == 'dirbs-classify'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'success'
assert 'extra_metadata' not in data
def test_json_no_record_for_get_params(flask_app, db_conn, api_version):
"""Test Depot ID not known yet.
Verify that job_metadata should return an empty JSON if params are well formatted
but not stored in the job_metadata table.
"""
job_metadata_importer(db_conn=db_conn, command='dirbs-classify', run_id=1, subcommand='',
status='success', extra_metadata={'metadata': 'metadata'})
if api_version == 'v1':
# Add row into job_metadata table with run_id=1 and get url for param run_id=2.
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
command='dirbs-classify',
run_id=2,
db_user='test-user',
status='success',
max_results=10,
show_details=True))
assert rv.status_code == 200
assert json.loads(rv.data.decode('utf-8')) == []
else: # api version 2.0
# Add row into job_metadata table with run_id=1 and get url for param run_id=2.
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
command='dirbs-classify',
run_id=2,
db_user='test-user',
status='success',
max_results=10,
show_details=True))
assert rv.status_code == 200
assert json.loads(rv.data.decode('utf-8'))['jobs'] == []
def test_json_unknown_command_param(flask_app, db_conn, api_version):
"""Test Depot ID not known yet.
Verify that job_metadata doesn't allow unknown command params.
"""
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
command='dirbs-unknown',
run_id=2,
db_user='test-user',
status='success',
max_results=10,
show_details=True))
assert rv.status_code == 400
assert b'Bad \'command\':\'dirbs-unknown\' argument format. ' \
b'Accepts only one of [\'dirbs-catalog\', \'dirbs-classify\', ' \
b'\'dirbs-db\', \'dirbs-import\', \'dirbs-listgen\', \'dirbs-prune\', \'dirbs-report\']' in rv.data
else: # api version 2.0
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
command='dirbs-unknown',
run_id=2,
db_user='test-user',
status='success',
show_details=True))
assert rv.status_code == 400
assert b'Bad \'command\':\'dirbs-unknown\' argument format. ' \
b'Accepts only one of [\'dirbs-catalog\', \'dirbs-classify\', ' \
b'\'dirbs-db\', \'dirbs-import\', \'dirbs-listgen\', \'dirbs-prune\', \'dirbs-report\']' in rv.data
def test_json_multiple_values_same_param(flask_app, db_conn, api_version):
"""Test Depot ID not known yet.
Verify that job_metadata returns a JSON containing job metadata if get params
consists of a list of values.
"""
# Step 1 list of valid params: run_id=[1,2]; subcommand=['upgrade', 'operator']
job_metadata_importer(db_conn=db_conn, command='dirbs-classify', run_id=1, subcommand='sub_one',
status='success')
job_metadata_importer(db_conn=db_conn, command='dirbs-classify', run_id=2, subcommand='sub_two',
status='success')
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
run_id=[1, 2],
db_user='test-user',
subcommand=['sub_one', 'sub_two'],
show_details=False))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['subcommand'] == 'sub_one'
assert data['run_id'] == 1
data = json.loads(rv.data.decode('utf-8'))[1]
assert data['run_id'] == 2
assert data['subcommand'] == 'sub_two'
# Step 2 list with invalid params: run_id=[1,-2];
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
command='dirbs-classify',
run_id=[1, -2],
db_user='test-user',
subcommand=['sub_one', 'sub_two'],
status=['success', 'error'],
max_results=10,
show_details=False))
assert rv.status_code == 400
assert b'Param \'run_id\':\'-2\' must be greater than 0' in rv.data
else: # api version 2.0
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
run_id=[1, 2],
db_user='test-user',
subcommand=['sub_one', 'sub_two'],
show_details=False))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['subcommand'] == 'sub_one'
assert data['run_id'] == 1
data = json.loads(rv.data.decode('utf-8'))['jobs'][1]
assert data['run_id'] == 2
assert data['subcommand'] == 'sub_two'
# Step 2 list with invalid params: run_id=[1,-2];
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
command='dirbs-classify',
run_id=[1, -2],
db_user='test-user',
subcommand=['sub_one', 'sub_two'],
status=['success', 'error'],
show_details=False))
assert rv.status_code == 400
assert b'Param \'run_id\':\'-2\' must be greater than 0' in rv.data
def test_json_no_run_id_param(flask_app, db_conn, api_version):
"""Test Depot ID not known yet.
Verify that if run_id is set to empty list, it will not be used to filter the results of the query.
"""
job_metadata_importer(db_conn=db_conn, command='dirbs-classify', run_id=1, subcommand='',
status='success')
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
run_id=[],
show_details=False))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-classify'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'success'
else: # api version 2.0
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
run_id=[],
show_details=False))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['command'] == 'dirbs-classify'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'success'
def test_default_params(flask_app, db_conn, api_version):
"""Test Depot ID not known yet.
Verify that job_metadata returns a JSON containing all job metadata
if no request params are given.
"""
job_metadata_importer(db_conn=db_conn, command='dirbs-classify', run_id=1, subcommand='',
status='success')
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version)))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-classify'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'success'
assert data['extra_metadata'] == {}
else: # api version 2.0
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version)))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['command'] == 'dirbs-classify'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'success'
assert data['extra_metadata'] == {}
def test_method_delete_not_allowed(flask_app, db_conn, api_version):
"""Test Depot ID not known yet.
Verify the job_metadata API does not support HTTP DELETE and returns HTTP 405 METHOD NOT ALLOWED.
"""
if api_version == 'v1':
rv = flask_app.delete(url_for('{0}.job_metadata_api'.format(api_version)))
assert rv.status_code == 405
assert b'Method Not Allowed' in rv.data
else: # api version 2.0
rv = flask_app.delete(url_for('{0}.job_metadata_get_api'.format(api_version)))
assert rv.status_code == 405
assert b'Method Not Allowed' in rv.data
def test_method_post_not_allowed(flask_app, db_conn, api_version):
"""Test Depot ID not known yet.
Verify the job_metadata API does not support HTTP POST and returns HTTP 405 METHOD NOT ALLOWED.
"""
if api_version == 'v1':
rv = flask_app.delete(url_for('{0}.job_metadata_api'.format(api_version)))
assert rv.status_code == 405
assert b'Method Not Allowed' in rv.data
else: # api version 2.0
rv = flask_app.delete(url_for('{0}.job_metadata_get_api'.format(api_version)))
assert rv.status_code == 405
assert b'Method Not Allowed' in rv.data
def test_method_put_not_allowed(flask_app, db_conn, api_version):
"""Test Depot ID not known yet.
Verify the job_metadata API does not support HTTP PUT and returns HTTP 405 METHOD NOT ALLOWED.
"""
if api_version == 'v1':
rv = flask_app.delete(url_for('{0}.job_metadata_api'.format(api_version)))
assert rv.status_code == 405
assert b'Method Not Allowed' in rv.data
else: # api version 2.0
rv = flask_app.delete(url_for('{0}.job_metadata_get_api'.format(api_version)))
assert rv.status_code == 405
assert b'Method Not Allowed' in rv.data
def test_job_metadata_most_recent_successful_job_start_time(db_conn):
"""Test Depot ID not known yet.
Verify metadata::test_job_metadata_most_recent_successful_job_start_time function.
"""
extra_metadata = {'perform_duplicates_check': True,
'perform_historic_check': True,
'performance_timing': {}} # noqa E127
job_metadata_importer(db_conn=db_conn, command='dirbs-import', run_id=1, subcommand='pairing-list',
status='success', extra_metadata=extra_metadata)
metadata.most_recent_job_start_time_by_command(db_conn, 'dirbs-import', subcommand='pairing-list',
successful_only=True)
def test_job_metadata_v2_pagination(flask_app, db_conn):
"""Test Depot ID not known yet.
Verify that results returned by metadata api version 2.0 are paginated.
"""
# insert 20 records
for i in range(10):
job_metadata_importer(db_conn=db_conn, command='dirbs-classify', run_id=i, subcommand='',
status='success')
job_metadata_importer(db_conn=db_conn, command='dirbs-prune',
run_id=i, subcommand='triplets', status='success')
# test all records are fetched when no pagination params are given
rv = flask_app.get(url_for('v2.job_metadata_get_api'))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data['_keys']['result_size'] == 20
assert data['_keys']['previous_key'] == ''
assert data['_keys']['next_key'] == ''
assert len(data['jobs']) == 20
# test pagination, start from 1st record and 5 records per page
offset = 1
limit = 5
rv = flask_app.get(url_for('v2.job_metadata_get_api', offset=offset, limit=limit))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data['_keys']['result_size'] == 20
assert data['_keys']['previous_key'] == ''
assert data['_keys']['next_key'] == '?offset={0}&limit={1}'.format(offset + limit, limit)
assert len(data['jobs']) == 5
next_offset = offset + limit
rv = flask_app.get(url_for('v2.job_metadata_get_api', offset=next_offset, limit=limit))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data['_keys']['result_size'] == 20
assert data['_keys']['previous_key'] == '?offset={0}&limit={1}'.format(next_offset - limit, limit)
assert data['_keys']['next_key'] == '?offset={0}&limit={1}'.format(next_offset + limit, limit)
next_offset = next_offset + limit
rv = flask_app.get(url_for('v2.job_metadata_get_api', offset=next_offset, limit=limit))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data['_keys']['result_size'] == 20
assert data['_keys']['previous_key'] == '?offset={0}&limit={1}'.format(next_offset - limit, limit * 2)
assert data['_keys']['next_key'] == '?offset={0}&limit={1}'.format(next_offset + limit, limit)
# pagination with sorting order ascending based on run_id
offset = 1
limit = 5
order = 'Ascending'
rv = flask_app.get(url_for('v2.job_metadata_get_api', offset=offset, limit=limit, order=order))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data['_keys']['result_size'] == 20
assert data['_keys']['previous_key'] == ''
assert data['_keys']['next_key'] == '?offset={0}&limit={1}'.format(offset + limit, limit)
assert len(data['jobs']) == 5
assert data['jobs'][0]['run_id'] <= data['jobs'][1]['run_id']
assert data['jobs'][1]['run_id'] <= data['jobs'][2]['run_id']
assert data['jobs'][2]['run_id'] <= data['jobs'][3]['run_id']
assert data['jobs'][3]['run_id'] <= data['jobs'][4]['run_id']
# order Descending
order = 'Descending'
rv = flask_app.get(url_for('v2.job_metadata_get_api', offset=offset, limit=limit, order=order))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data['_keys']['result_size'] == 20
assert data['_keys']['previous_key'] == ''
assert data['_keys']['next_key'] == '?offset={0}&limit={1}'.format(offset + limit, limit)
assert len(data['jobs']) == 5
assert data['jobs'][0]['run_id'] >= data['jobs'][1]['run_id']
assert data['jobs'][1]['run_id'] >= data['jobs'][2]['run_id']
assert data['jobs'][2]['run_id'] >= data['jobs'][3]['run_id']
assert data['jobs'][3]['run_id'] >= data['jobs'][4]['run_id']
| 49.023456 | 120 | 0.54409 |
import json
from flask import url_for
from _fixtures import *
from _helpers import job_metadata_importer
import dirbs.metadata as metadata
def test_classification_json_api(flask_app, db_conn, api_version):
extra_metadata = {'matched_imei_counts':
{'compound_dimension': 0,
'simple_dimension': 0},
'curr_date': None,
'conditions':
[{'dimensions':
[{'module': 'gsma_not_found'}],
'grace_period_days': 30,
'sticky': False,
'reason': 'Violated simple dimension',
'max_allowed_matching_ratio': 0.1,
'label': 'simple_dimension',
'blocking': True},
{'dimensions':
[{'module': 'stolen_list'},
{'invert': True,
'parameters':
{'threshold': 3.1,
'period_days': 30},
'module': 'duplicate_daily_avg'}],
'grace_period_days': 0,
'sticky': False,
'reason': 'Violated compound dimension',
'max_allowed_matching_ratio': 0.1,
'label': 'compound_dimension',
'blocking': True}]}
job_metadata_importer(db_conn=db_conn, command='dirbs-classify', run_id=1, subcommand='', status='success',
extra_metadata=extra_metadata)
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
command='dirbs-classify',
run_id=1,
subcommand='',
status='success',
show_details=True))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-classify'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'success'
assert data['extra_metadata'] == extra_metadata
else:
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
command='dirbs-classify',
run_id=1,
subcommand='',
status='success',
show_details=True))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data['_keys']['result_size'] == 1
assert data['_keys']['previous_key'] == ''
assert data['_keys']['next_key'] == ''
assert data['jobs'][0]['command'] == 'dirbs-classify'
assert data['jobs'][0]['run_id'] == 1
assert data['jobs'][0]['subcommand'] == ''
assert data['jobs'][0]['status'] == 'success'
assert data['jobs'][0]['extra_metadata'] == extra_metadata
def test_prune_json_api(flask_app, db_conn, api_version):
extra_metadata = {'rows_before': 0,
'retention_months': 6,
'curr_date': None,
'rows_after': 0}
job_metadata_importer(db_conn=db_conn, command='dirbs-prune',
run_id=9, subcommand='triplets', status='success',
extra_metadata=extra_metadata)
job_metadata_importer(db_conn=db_conn, command='dirbs-prune', run_id=10, subcommand='classification_state',
status='success', extra_metadata=extra_metadata)
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
command='dirbs-prune',
status='success',
show_details=True))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
triplets_data = data[0]
assert triplets_data['command'] == 'dirbs-prune'
assert triplets_data['run_id'] == 9
assert triplets_data['subcommand'] == 'triplets'
assert triplets_data['status'] == 'success'
assert triplets_data['extra_metadata'] == extra_metadata
class_data = data[1]
assert class_data['command'] == 'dirbs-prune'
assert class_data['run_id'] == 10
assert class_data['subcommand'] == 'classification_state'
assert class_data['status'] == 'success'
assert class_data['extra_metadata'] == extra_metadata
else:
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
command='dirbs-prune',
status='success',
show_details=True))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
triplets_data = data['jobs'][0]
assert triplets_data['command'] == 'dirbs-prune'
assert triplets_data['run_id'] == 9
assert triplets_data['subcommand'] == 'triplets'
assert triplets_data['status'] == 'success'
assert triplets_data['extra_metadata'] == extra_metadata
class_data = data['jobs'][1]
assert class_data['command'] == 'dirbs-prune'
assert class_data['run_id'] == 10
assert class_data['subcommand'] == 'classification_state'
assert class_data['status'] == 'success'
assert class_data['extra_metadata'] == extra_metadata
assert data['_keys']['result_size'] == 2
assert data['_keys']['previous_key'] == ''
assert data['_keys']['next_key'] == ''
def test_operator_import_json_api(flask_app, db_conn, api_version):
extra_metadata = {'performance_timing':
{'init_staging_end': '2017-08-16T01:05:17.17081+00:00',
'init_staging_start': '2017-08-16T01:05:16.817426+00:00',
'extract_split_start': '2017-08-16T01:05:16.10788+00:00',
'prevalidate_upload_start': '2017-08-16T01:05:17.34236+00:00',
'analyze_staging_end': '2017-08-16T01:05: 20.807413+00:00',
'validation_binary_checks_end': '2017-08-16T01:05:25.565519+00:00',
'prevalidate_upload_end': '2017-08-16T01:05:20.125746+00:00',
'analyze_staging_start': '2017-08-16T01:05:20.296765+00:00',
'preprocess_start': '2017-08-16T01:05:16.474489+00:00',
'extract_split_end': '2017-08-16T01:05:16.301238+00:00',
'preprocess_end': '2017-08-16T01:05:16.645968+00:00',
'postprocess_staging_end': '2017-08-16T01:05:24.531709+00:00',
'validation_threshold_checks_start': '2017-08-16T01:05:25.741384+00:00',
'validation_binary_checks_start': '2017-08-16T01:05:24.705607+00:00',
'postprocess_staging_start': '2017-08-16T01:05:20.978153+00:00'},
'home_threshold': 0.2,
'cc': ['22%'],
'clean_threshold': 0.05,
'null_msisdn_threshold': 0.05,
'perform_leading_zero_check': True,
'perform_file_daterange_check': True,
'perform_null_check': True,
'perform_clean_check': True,
'perform_historic_imsi_check': True,
'perform_null_imsi_check': True,
'perform_null_msisdn_check': True,
'perform_historic_msisdn_check': True,
'operator_id':
'operator1',
'input_file':
'/workspace/data/operator1_home_'
'check_exceeded_20160701_20160731.zip',
'batch_size':
1000000, 'mcc_mnc_pairs':
[{'mnc': '01', 'mcc': '111'}],
'perform_historic_imei_check': True,
'null_imsi_threshold': 0.05,
'perform_rat_import': False,
'perform_null_imei_check': True,
'perform_home_check': True,
'null_imei_threshold': 0.05,
'region_threshold': 0.1,
'perform_region_check': False}
job_metadata_importer(db_conn=db_conn, command='dirbs-import', run_id=1, subcommand='operator',
status='error', extra_metadata=extra_metadata)
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version), show_details=False))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-import'
assert data['run_id'] == 1
assert data['subcommand'] == 'operator'
assert data['status'] == 'error'
else:
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version), show_details=False))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
print(data['command'])
assert data['command'] == 'dirbs-import'
assert data['run_id'] == 1
assert data['subcommand'] == 'operator'
assert data['status'] == 'error'
def test_stolen_import_json_api(flask_app, db_conn, api_version):
extra_metadata = {'output_stats':
{'num_records_updated': 20,
'num_records': 20,
'num_records_inserted': 20},
'performance_timing':
{'init_staging_end': '2017-08-22T01:42:30.695313+00:00',
'analyze_staging_end': '2017-08-22T01:42:34.286028+00:00',
'validation_threshold_checks_end': '2017-08-22T01:42:36.380127+00:00',
'analyze_staging_start': '2017-08-22T01:42:33.78045+00:00',
'preprocess_start': '2017-08-22T01:42:30.023073+00:00',
'copy_from_staging_end': '2017-08-22T01:42:38.553902+00:00',
'validation_binary_checks_start': '2017-08-22T01:42:35.537445+00:00',
'validation_threshold_checks_start': '2017-08-22T01:42:36.208775+00:00',
'output_stats_start': '2017-08-22T01:42:38.721215+00:00',
'validation_historical_checks_end': '2017-08-22T01:42:37.049421+00:00',
'extract_split_end': '2017-08-22T01:42:29.855514+00:00',
'copy_from_staging_start': '2017-08-22T01:42:37.38383+00:00',
'extract_split_start': '2017-08-22T01:42:29.674068+00:00',
'validation_historical_checks_start': '2017-08-22T01:42:36.547579+00:00',
'preprocess_end': '2017-08-22T01:42:30.191182+00:00',
'postprocess_staging_end': '2017-08-22T01:42:35.370151+00:00',
'init_staging_start': '2017-08-22T01:42:30.358302+00:00',
'validation_binary_checks_end': '2017-08-22T01:42:36.041237+00:00',
'output_stats_end': '2017-08-22T01:42:39.225688+00:00',
'prevalidate_upload_end': '2017-08-22T01:42:33.612194+00:00',
'prevalidate_upload_start': '2017-08-22T01:42:30.862953+00:00',
'postprocess_staging_start': '2017-08-22T01:42:34.458834+00:00'},
'perform_historic_check': True,
'input_file':
'/workspace/data/sample_import_list.zip',
'batch_size': 1000000,
'input_stats':
{'num_records_valid': 20,
'num_records': 20,
'num_records_invalid': 0}}
job_metadata_importer(db_conn=db_conn, command='dirbs-import', run_id=1, subcommand='stolen_list',
status='success', extra_metadata=extra_metadata)
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version), command='dirbs-import'))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-import'
assert data['run_id'] == 1
assert data['subcommand'] == 'stolen_list'
assert data['status'] == 'success'
else:
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version), command='dirbs-import'))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['command'] == 'dirbs-import'
assert data['run_id'] == 1
assert data['subcommand'] == 'stolen_list'
assert data['status'] == 'success'
def test_pairing_import_json_api(flask_app, db_conn, api_version):
extra_metadata = {'perform_duplicates_check': True,
'perform_historic_check': True,
'performance_timing':
{'init_staging_end': '2017-08-22T01:41:59.925562+00:00',
'init_staging_start': '2017-08-22T01:41:59.588253+00:00',
'extract_split_start': '2017-08-22T01:41:58.901343+00:00',
'prevalidate_upload_start': '2017-08-22T01:42:00.093237+00:00',
'analyze_staging_end': '2017-08-22T01:42:03.478264+00:00',
'prevalidate_upload_end': '2017-08-22T01:42:02.788264+00:00',
'analyze_staging_start': '2017-08-22T01:42:02.956404+00:00',
'preprocess_start': '2017-08-22T01:41:59.252764+00:00',
'extract_split_end': '2017-08-22T01:41:59.08492+00:00',
'preprocess_end': '2017-08-22T01:41:59.421052+00:00',
'postprocess_staging_end': '2017-08-22T01:42:04.520465+00:00',
'validation_binary_checks_start': '2017-08-22T01:42:04.68826+00:00',
'postprocess_staging_start': '2017-08-22T01:42:03.646232+00:00'},
'batch_size': 1000000,
'input_file':
'/workspace/data/duplicate.zip'}
job_metadata_importer(db_conn=db_conn, command='dirbs-import', run_id=1, subcommand='pairing_list',
status='error', extra_metadata=extra_metadata)
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version)))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-import'
assert data['run_id'] == 1
assert data['subcommand'] == 'pairing_list'
assert data['status'] == 'error'
assert data['extra_metadata'] == extra_metadata
else:
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version)))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['command'] == 'dirbs-import'
assert data['run_id'] == 1
assert data['subcommand'] == 'pairing_list'
assert data['status'] == 'error'
assert data['extra_metadata'] == extra_metadata
def test_gsma_import_json_api(flask_app, db_conn, api_version):
extra_metadata = {'output_stats':
{'num_records_updated': 4,
'num_records': 4,
'num_records_inserted': 4},
'performance_timing':
{'init_staging_end': '2017-08-22T01:56:25.875908+00:00',
'analyze_staging_end': '2017-08-22T01:56:29.386537+00:00',
'validation_threshold_checks_end': '2017-08-22T01:56:31.231756+00:00',
'analyze_staging_start': '2017-08-22T01:56:28.886486+00:00',
'preprocess_start': '2017-08-22T01:56:25.192466+00:00',
'copy_from_staging_end': '2017-08-22T01:56:33.42097+00:00',
'validation_binary_checks_start': '2017-08-22T01:56:30.725186+00:00',
'validation_threshold_checks_start': '2017-08-22T01:56:31.063007+00:00',
'output_stats_start': '2017-08-22T01:56:33.589227+00:00',
'validation_historical_checks_end': '2017-08-22T01:56:31.915001+00:00',
'extract_split_end': '2017-08-22T01:56:25.023654+00:00',
'copy_from_staging_start': '2017-08-22T01:56:32.250857+00:00',
'extract_split_start': '2017-08-22T01:56:24.844737+00:00',
'validation_historical_checks_start': '2017-08-22T01:56:31.400242+00:00',
'preprocess_end': '2017-08-22T01:56:25.368138+00:00',
'postprocess_staging_end': '2017-08-22T01:56:30.557336+00:00',
'init_staging_start': '2017-08-22T01:56:25.536523+00:00',
'validation_binary_checks_end': '2017-08-22T01:56:30.895228+00:00',
'output_stats_end': '2017-08-22T01:56:34.097277+00:00',
'prevalidate_upload_end': '2017-08-22T01:56:28.718421+00:00',
'prevalidate_upload_start': '2017-08-22T01:56:26.043878+00:00',
'postprocess_staging_start': '2017-08-22T01:56:29.554878+00:00'},
'perform_historic_check': True,
'input_file':
'/workspace/data/duplicate_gsma.zip',
'batch_size': 1000000,
'input_stats':
{'num_records_valid': 4,
'num_records': 7,
'num_records_invalid': 3}}
job_metadata_importer(db_conn=db_conn, command='dirbs-import', run_id=1, subcommand='gsma_tac',
status='success', extra_metadata=extra_metadata)
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version), show_details=False))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-import'
assert data['run_id'] == 1
assert data['subcommand'] == 'gsma_tac'
assert data['status'] == 'success'
else:
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version), show_details=False))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['command'] == 'dirbs-import'
assert data['run_id'] == 1
assert data['subcommand'] == 'gsma_tac'
assert data['status'] == 'success'
def test_registration_import_json_api(flask_app, db_conn, api_version):
extra_metadata = {'perform_duplicates_check': True,
'perform_historic_check': True,
'performance_timing':
{'init_staging_end': '2017-08-22T01:43:21.386498+00:00',
'init_staging_start': '2017-08-22T01:43:21.035571+00:00',
'extract_split_start': '2017-08-22T01:43:20.35253+00:00',
'prevalidate_upload_start': '2017-08-22T01:43:21.554073+00:00',
'preprocess_start': '2017-08-22T01:43:20.699411+00:00',
'extract_split_end': '2017-08-22T01:43:20.531135+00:00',
'preprocess_end': '2017-08-22T01:43:20.867795+00:00'},
'batch_size': 1000000,
'input_file':
'/workspace/data/'
'sample_import_list.zip'}
job_metadata_importer(db_conn=db_conn, command='dirbs-import', run_id=1, subcommand='registration_list',
status='error', extra_metadata=extra_metadata)
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version), command='dirbs-import'))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-import'
assert data['run_id'] == 1
assert data['subcommand'] == 'registration_list'
assert data['status'] == 'error'
assert data['extra_metadata'] == extra_metadata
else:
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version), command='dirbs-import'))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['command'] == 'dirbs-import'
assert data['run_id'] == 1
assert data['subcommand'] == 'registration_list'
assert data['status'] == 'error'
assert data['extra_metadata'] == extra_metadata
def test_golden_import_json_api(flask_app, db_conn, api_version):
extra_metadata = {'performance_timing':
{'init_staging_end': '2017-08-22T01:43:05.017337+00:00',
'init_staging_start': '2017-08-22T01:43:04.681766+00:00',
'extract_split_start': '2017-08-22T01:43:03.993331+00:00',
'prevalidate_upload_start': '2017-08-22T01:43:05.18436+00:00',
'preprocess_start': '2017-08-22T01:43:04.337401+00:00',
'extract_split_end': '2017-08-22T01:43:04.17081+00:00',
'preprocess_end': '2017-08-22T01:43:04.504815+00:00'},
'perform_historic_check': True,
'pre_hashed': False,
'input_file':
'/workspace/data/sample_import_list.zip',
'batch_size': 1000000}
job_metadata_importer(db_conn=db_conn, command='dirbs-import', run_id=1, subcommand='golden_list',
status='error', extra_metadata=extra_metadata)
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version), show_details=True))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-import'
assert data['run_id'] == 1
assert data['subcommand'] == 'golden_list'
assert data['status'] == 'error'
assert data['extra_metadata'] == extra_metadata
else:
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version), show_details=True))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['command'] == 'dirbs-import'
assert data['run_id'] == 1
assert data['subcommand'] == 'golden_list'
assert data['status'] == 'error'
assert data['extra_metadata'] == extra_metadata
def test_db_schema_json_api(flask_app, db_conn, api_version):
job_metadata_importer(db_conn=db_conn, command='dirbs-db', run_id=1, subcommand='upgrade',
status='success')
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version), show_details=True))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-db'
assert data['run_id'] == 1
assert data['subcommand'] == 'upgrade'
assert data['status'] == 'success'
assert data['extra_metadata'] == {}
else:
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version), show_details=True))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['command'] == 'dirbs-db'
assert data['run_id'] == 1
assert data['subcommand'] == 'upgrade'
assert data['status'] == 'success'
assert data['extra_metadata'] == {}
def test_list_gen_schema_json_api(flask_app, db_conn, api_version):
extra_metadata = {'blacklist':
{'file_size_bytes': 25,
'md5sum': 'd623e56b7c73d27fc7ce68e3dfc6e448',
'num_records': 0,
'filename': '/workspace/data/20170822_021142/blacklist.csv'},
'notification_lists':
[{'file_size_bytes': 37,
'md5sum': '3ac7b8ae8722e47e1ce4b0a01fe8b1e2',
'num_records': 0,
'filename': '/workspace/data/20170822_021142/notifications_operator1.csv'},
{'file_size_bytes': 37,
'md5sum': '3ac7b8ae8722e47e1ce4b0a01fe8b1e2',
'num_records': 0,
'filename': '/workspace/data/20170822_021142/notifications_operator2.csv'},
{'file_size_bytes': 37,
'md5sum': '3ac7b8ae8722e47e1ce4b0a01fe8b1e2',
'num_records': 0,
'filename': '/workspace/data/20170822_021142/notifications_operator3.csv'},
{'file_size_bytes': 37,
'md5sum': '3ac7b8ae8722e47e1ce4b0a01fe8b1e2',
'num_records': 0,
'filename': '/workspace/data/20170822_021142/notifications_operator4.csv'}],
'curr_date': None,
'exception_lists':
[{'file_size_bytes': 11,
'md5sum': 'b9a2f42722d13636dfb6c84e2ee765fe',
'num_records': 0,
'filename': '/workspace/data/20170822_021142/exceptions_operator1.csv'},
{'file_size_bytes': 11,
'md5sum': 'b9a2f42722d13636dfb6c84e2ee765fe',
'num_records': 0,
'filename': '/workspace/data/20170822_021142/exceptions_operator2.csv'},
{'file_size_bytes': 11,
'md5sum': 'b9a2f42722d13636dfb6c84e2ee765fe',
'num_records': 0,
'filename': '/workspace/data/20170822_021142/exceptions_operator3.csv'},
{'file_size_bytes': 11,
'md5sum': 'b9a2f42722d13636dfb6c84e2ee765fe',
'num_records': 0,
'filename': '/workspace/data/20170822_021142/exceptions_operator4.csv'}],
'blocking_conditions':
[{'dimensions':
[{'module': 'gsma_not_found'}],
'grace_period_days': 30,
'sticky': False,
'reason': 'Violated simple dimension',
'max_allowed_matching_ratio': 0.1,
'label': 'simple_dimension',
'blocking': True},
{'dimensions':
[{'module': 'stolen_list'},
{'invert': True,
'parameters':
{'threshold': 3.1,
'period_days': 30},
'module': 'duplicate_daily_avg'}],
'grace_period_days': 0,
'sticky': False,
'reason': 'Violated compound dimension',
'max_allowed_matching_ratio': 0.1,
'label': 'compound_dimension',
'blocking': True}]}
job_metadata_importer(db_conn=db_conn, command='dirbs-listgen', run_id=1, subcommand='',
status='success', extra_metadata=extra_metadata)
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
show_details=False))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-listgen'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'success'
else:
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
show_details=False))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['command'] == 'dirbs-listgen'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'success'
def test_report_schema_json_api(flask_app, db_conn, api_version):
extra_metadata = {'refreshed_data': True,
'month': 2,
'output_dir': '/workspace/data',
'year': 2016}
job_metadata_importer(db_conn=db_conn, command='dirbs-report', run_id=1, subcommand='',
status='error', extra_metadata=extra_metadata)
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version)))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-report'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'error'
assert data['extra_metadata'] == extra_metadata
else:
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version)))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['command'] == 'dirbs-report'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'error'
assert data['extra_metadata'] == extra_metadata
def test_job_metadata_bad_pos_int_params(flask_app, db_conn, api_version):
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
command='dirbs-classify',
run_id='aaa',
status='success',
show_details=False))
assert rv.status_code == 400
assert b'Bad \'run_id\':\'aaa\' argument format. Accepts only integer' in rv.data
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
command='dirbs-classify',
run_id=-1,
status='success',
show_details=False))
assert rv.status_code == 400
assert b'Param \'run_id\':\'-1\' must be greater than 0' in rv.data
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
command='dirbs-classify',
run_id=1,
status='success',
max_results='a',
show_details=False))
assert rv.status_code == 400
assert b'Bad \'max_results\':\'a\' argument format. Accepts only integer' in rv.data
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
command='dirbs-classify',
run_id=1,
status='success',
max_results=0,
show_details=False))
assert rv.status_code == 400
assert b'Param \'max_results\':\'0\' must be greater than 0' in rv.data
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
command='dirbs-classify',
run_id=1,
status='success',
max_results=[1, -2],
show_details=False))
assert rv.status_code == 200
job_metadata_importer(db_conn=db_conn, command='dirbs-classify', run_id=1, subcommand='sub_one',
status='success')
job_metadata_importer(db_conn=db_conn, command='dirbs-classify', run_id=2, subcommand='sub_two',
status='success')
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
run_id=1,
db_user='test-user',
subcommand=['sub_one', 'sub_two'],
show_details=False,
max_results=1))
assert rv.status_code == 200
assert len(json.loads(rv.data.decode('utf-8'))) == 1
else:
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
command='dirbs-classify',
run_id='aaa',
status='success',
show_details=False))
assert rv.status_code == 400
assert b'Bad \'run_id\':\'aaa\' argument format. Accepts only integer' in rv.data
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
command='dirbs-classify',
run_id=-1,
status='success',
show_details=False))
assert rv.status_code == 400
assert b'Param \'run_id\':\'-1\' must be greater than 0' in rv.data
job_metadata_importer(db_conn=db_conn, command='dirbs-classify', run_id=1, subcommand='sub_one',
status='success')
job_metadata_importer(db_conn=db_conn, command='dirbs-classify', run_id=2, subcommand='sub_two',
status='success')
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
run_id=1,
db_user='test-user',
subcommand=['sub_one', 'sub_two'],
show_details=False,
max_results=1))
assert rv.status_code == 200
assert len(json.loads(rv.data.decode('utf-8'))['jobs']) == 1
def test_job_metadata_bad_params(flask_app, api_version):
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version), status='unknown'))
assert rv.status_code == 400
assert b'Bad \'status\':\'unknown\' argument format. ' \
b'Accepts only one of [\'running\', \'success\', \'error\']' in rv.data
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version), status=['error', 'unknown']))
assert rv.status_code == 400
assert b'Bad \'status\':\'unknown\' argument format. ' \
b'Accepts only one of [\'running\', \'success\', \'error\']' in rv.data
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
show_details='not_boolean'))
assert rv.status_code == 400
assert b'Bad \'show_details\':\'not_boolean\' argument format. ' \
b'Accepts only one of [\'0\', \'1\', \'true\', \'false\']' in rv.data
else:
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version), status='unknown'))
assert rv.status_code == 400
assert b'Bad \'status\':\'unknown\' argument format. ' \
b'Accepts only one of [\'running\', \'success\', \'error\']' in rv.data
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version), status=['error', 'unknown']))
assert rv.status_code == 400
assert b'Bad \'status\':\'unknown\' argument format. ' \
b'Accepts only one of [\'running\', \'success\', \'error\']' in rv.data
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
show_details='not_boolean'))
assert rv.status_code == 400
assert b'Bad \'show_details\':\'not_boolean\' argument format. ' \
b'Accepts only one of [\'0\', \'1\', \'true\', \'false\']' in rv.data
def test_json_show_details(flask_app, db_conn, api_version):
extra_metadata = {'matched_imei_counts':
{'compound_dimension': 0,
'simple_dimension': 0},
'conditions':
[{'label': 'simple_dimension',
'blocking': True,
'sticky': False,
'reason': 'Violated simple dimension',
'max_allowed_matching_ratio': 0.1,
'dimensions':
[{'module': 'gsma_not_found'}],
'grace_period_days': 30},
{'label': 'compound_dimension',
'blocking': True,
'sticky': False,
'reason':
'Violated compound dimension',
'max_allowed_matching_ratio': 0.1,
'dimensions':
[{'module': 'stolen_list'},
{'invert': True,
'module': 'duplicate_daily_avg',
'parameters':
{'period_days': 30,
'threshold': 3.1}}],
'grace_period_days': 0}],
'curr_date': None}
job_metadata_importer(db_conn=db_conn, command='dirbs-classify', run_id=1, subcommand='',
status='success', extra_metadata=extra_metadata)
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
command='dirbs-classify',
run_id=1,
status='success',
show_details=True))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-classify'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'success'
assert data['extra_metadata'] == extra_metadata
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
command='dirbs-classify',
run_id=1,
status='success',
max_results=10,
show_details=False))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-classify'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'success'
assert 'extra_metadata' not in data
else:
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
command='dirbs-classify',
run_id=1,
status='success',
show_details=True))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['command'] == 'dirbs-classify'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'success'
assert data['extra_metadata'] == extra_metadata
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
command='dirbs-classify',
run_id=1,
status='success',
max_results=10,
show_details=False))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['command'] == 'dirbs-classify'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'success'
assert 'extra_metadata' not in data
def test_json_no_record_for_get_params(flask_app, db_conn, api_version):
job_metadata_importer(db_conn=db_conn, command='dirbs-classify', run_id=1, subcommand='',
status='success', extra_metadata={'metadata': 'metadata'})
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
command='dirbs-classify',
run_id=2,
db_user='test-user',
status='success',
max_results=10,
show_details=True))
assert rv.status_code == 200
assert json.loads(rv.data.decode('utf-8')) == []
else:
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
command='dirbs-classify',
run_id=2,
db_user='test-user',
status='success',
max_results=10,
show_details=True))
assert rv.status_code == 200
assert json.loads(rv.data.decode('utf-8'))['jobs'] == []
def test_json_unknown_command_param(flask_app, db_conn, api_version):
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
command='dirbs-unknown',
run_id=2,
db_user='test-user',
status='success',
max_results=10,
show_details=True))
assert rv.status_code == 400
assert b'Bad \'command\':\'dirbs-unknown\' argument format. ' \
b'Accepts only one of [\'dirbs-catalog\', \'dirbs-classify\', ' \
b'\'dirbs-db\', \'dirbs-import\', \'dirbs-listgen\', \'dirbs-prune\', \'dirbs-report\']' in rv.data
else:
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
command='dirbs-unknown',
run_id=2,
db_user='test-user',
status='success',
show_details=True))
assert rv.status_code == 400
assert b'Bad \'command\':\'dirbs-unknown\' argument format. ' \
b'Accepts only one of [\'dirbs-catalog\', \'dirbs-classify\', ' \
b'\'dirbs-db\', \'dirbs-import\', \'dirbs-listgen\', \'dirbs-prune\', \'dirbs-report\']' in rv.data
def test_json_multiple_values_same_param(flask_app, db_conn, api_version):
job_metadata_importer(db_conn=db_conn, command='dirbs-classify', run_id=1, subcommand='sub_one',
status='success')
job_metadata_importer(db_conn=db_conn, command='dirbs-classify', run_id=2, subcommand='sub_two',
status='success')
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
run_id=[1, 2],
db_user='test-user',
subcommand=['sub_one', 'sub_two'],
show_details=False))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['subcommand'] == 'sub_one'
assert data['run_id'] == 1
data = json.loads(rv.data.decode('utf-8'))[1]
assert data['run_id'] == 2
assert data['subcommand'] == 'sub_two'
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
command='dirbs-classify',
run_id=[1, -2],
db_user='test-user',
subcommand=['sub_one', 'sub_two'],
status=['success', 'error'],
max_results=10,
show_details=False))
assert rv.status_code == 400
assert b'Param \'run_id\':\'-2\' must be greater than 0' in rv.data
else:
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
run_id=[1, 2],
db_user='test-user',
subcommand=['sub_one', 'sub_two'],
show_details=False))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['subcommand'] == 'sub_one'
assert data['run_id'] == 1
data = json.loads(rv.data.decode('utf-8'))['jobs'][1]
assert data['run_id'] == 2
assert data['subcommand'] == 'sub_two'
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
command='dirbs-classify',
run_id=[1, -2],
db_user='test-user',
subcommand=['sub_one', 'sub_two'],
status=['success', 'error'],
show_details=False))
assert rv.status_code == 400
assert b'Param \'run_id\':\'-2\' must be greater than 0' in rv.data
def test_json_no_run_id_param(flask_app, db_conn, api_version):
job_metadata_importer(db_conn=db_conn, command='dirbs-classify', run_id=1, subcommand='',
status='success')
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version),
run_id=[],
show_details=False))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-classify'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'success'
else:
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version),
run_id=[],
show_details=False))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['command'] == 'dirbs-classify'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'success'
def test_default_params(flask_app, db_conn, api_version):
job_metadata_importer(db_conn=db_conn, command='dirbs-classify', run_id=1, subcommand='',
status='success')
if api_version == 'v1':
rv = flask_app.get(url_for('{0}.job_metadata_api'.format(api_version)))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))[0]
assert data['command'] == 'dirbs-classify'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'success'
assert data['extra_metadata'] == {}
else:
rv = flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version)))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))['jobs'][0]
assert data['command'] == 'dirbs-classify'
assert data['run_id'] == 1
assert data['subcommand'] == ''
assert data['status'] == 'success'
assert data['extra_metadata'] == {}
def test_method_delete_not_allowed(flask_app, db_conn, api_version):
if api_version == 'v1':
rv = flask_app.delete(url_for('{0}.job_metadata_api'.format(api_version)))
assert rv.status_code == 405
assert b'Method Not Allowed' in rv.data
else:
rv = flask_app.delete(url_for('{0}.job_metadata_get_api'.format(api_version)))
assert rv.status_code == 405
assert b'Method Not Allowed' in rv.data
def test_method_post_not_allowed(flask_app, db_conn, api_version):
if api_version == 'v1':
rv = flask_app.delete(url_for('{0}.job_metadata_api'.format(api_version)))
assert rv.status_code == 405
assert b'Method Not Allowed' in rv.data
else:
rv = flask_app.delete(url_for('{0}.job_metadata_get_api'.format(api_version)))
assert rv.status_code == 405
assert b'Method Not Allowed' in rv.data
def test_method_put_not_allowed(flask_app, db_conn, api_version):
if api_version == 'v1':
rv = flask_app.delete(url_for('{0}.job_metadata_api'.format(api_version)))
assert rv.status_code == 405
assert b'Method Not Allowed' in rv.data
else:
rv = flask_app.delete(url_for('{0}.job_metadata_get_api'.format(api_version)))
assert rv.status_code == 405
assert b'Method Not Allowed' in rv.data
def test_job_metadata_most_recent_successful_job_start_time(db_conn):
extra_metadata = {'perform_duplicates_check': True,
'perform_historic_check': True,
'performance_timing': {}}
job_metadata_importer(db_conn=db_conn, command='dirbs-import', run_id=1, subcommand='pairing-list',
status='success', extra_metadata=extra_metadata)
metadata.most_recent_job_start_time_by_command(db_conn, 'dirbs-import', subcommand='pairing-list',
successful_only=True)
def test_job_metadata_v2_pagination(flask_app, db_conn):
for i in range(10):
job_metadata_importer(db_conn=db_conn, command='dirbs-classify', run_id=i, subcommand='',
status='success')
job_metadata_importer(db_conn=db_conn, command='dirbs-prune',
run_id=i, subcommand='triplets', status='success')
rv = flask_app.get(url_for('v2.job_metadata_get_api'))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data['_keys']['result_size'] == 20
assert data['_keys']['previous_key'] == ''
assert data['_keys']['next_key'] == ''
assert len(data['jobs']) == 20
offset = 1
limit = 5
rv = flask_app.get(url_for('v2.job_metadata_get_api', offset=offset, limit=limit))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data['_keys']['result_size'] == 20
assert data['_keys']['previous_key'] == ''
assert data['_keys']['next_key'] == '?offset={0}&limit={1}'.format(offset + limit, limit)
assert len(data['jobs']) == 5
next_offset = offset + limit
rv = flask_app.get(url_for('v2.job_metadata_get_api', offset=next_offset, limit=limit))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data['_keys']['result_size'] == 20
assert data['_keys']['previous_key'] == '?offset={0}&limit={1}'.format(next_offset - limit, limit)
assert data['_keys']['next_key'] == '?offset={0}&limit={1}'.format(next_offset + limit, limit)
next_offset = next_offset + limit
rv = flask_app.get(url_for('v2.job_metadata_get_api', offset=next_offset, limit=limit))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data['_keys']['result_size'] == 20
assert data['_keys']['previous_key'] == '?offset={0}&limit={1}'.format(next_offset - limit, limit * 2)
assert data['_keys']['next_key'] == '?offset={0}&limit={1}'.format(next_offset + limit, limit)
offset = 1
limit = 5
order = 'Ascending'
rv = flask_app.get(url_for('v2.job_metadata_get_api', offset=offset, limit=limit, order=order))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data['_keys']['result_size'] == 20
assert data['_keys']['previous_key'] == ''
assert data['_keys']['next_key'] == '?offset={0}&limit={1}'.format(offset + limit, limit)
assert len(data['jobs']) == 5
assert data['jobs'][0]['run_id'] <= data['jobs'][1]['run_id']
assert data['jobs'][1]['run_id'] <= data['jobs'][2]['run_id']
assert data['jobs'][2]['run_id'] <= data['jobs'][3]['run_id']
assert data['jobs'][3]['run_id'] <= data['jobs'][4]['run_id']
order = 'Descending'
rv = flask_app.get(url_for('v2.job_metadata_get_api', offset=offset, limit=limit, order=order))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data['_keys']['result_size'] == 20
assert data['_keys']['previous_key'] == ''
assert data['_keys']['next_key'] == '?offset={0}&limit={1}'.format(offset + limit, limit)
assert len(data['jobs']) == 5
assert data['jobs'][0]['run_id'] >= data['jobs'][1]['run_id']
assert data['jobs'][1]['run_id'] >= data['jobs'][2]['run_id']
assert data['jobs'][2]['run_id'] >= data['jobs'][3]['run_id']
assert data['jobs'][3]['run_id'] >= data['jobs'][4]['run_id']
| true | true |
f726669c4c59c770ca27ddaf00f3eafdc8ca9522 | 3,606 | py | Python | mocks/mock.py | pandora-auth-ros-pkg/dashboard | 0e66c47d1987e1dfdc91dcd4f876791673938158 | [
"MIT"
] | 1 | 2016-04-01T02:37:05.000Z | 2016-04-01T02:37:05.000Z | mocks/mock.py | pandora-auth-ros-pkg/dashboard | 0e66c47d1987e1dfdc91dcd4f876791673938158 | [
"MIT"
] | null | null | null | mocks/mock.py | pandora-auth-ros-pkg/dashboard | 0e66c47d1987e1dfdc91dcd4f876791673938158 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from __future__ import print_function
from time import sleep
import random
import sys
import rospy
from rospy import Publisher, init_node
from sensor_msgs.msg import Range
from pandora_data_fusion_msgs.msg import VictimProbabilities
from pandora_sensor_msgs.msg import BatteryMsg, Co2Msg, Temperature
from pandora_sensor_msgs.msg import ThermalMeanMsg, ImuRPY
def random_battery(delay=1):
print('Starting random battery data...')
pub = Publisher('/sensors/battery', BatteryMsg)
msg = BatteryMsg()
msg.name = ['PSU', 'Motors']
while not rospy.is_shutdown():
battery1 = random.randint(18, 25)
battery2 = random.randint(18, 25)
msg.voltage = [battery1, battery2]
sleep(delay)
pub.publish(msg)
def random_temperatures(delay=1):
print('Starting random temperatures...')
pub = Publisher('/cpu/temperature', Temperature)
msg = Temperature()
msg.name = ['cpu0', 'cpu1', 'cpu2', 'cpu2']
while not rospy.is_shutdown():
msg.temperature = [random.randint(30, 80) for i in range(4)]
sleep(delay)
pub.publish(msg)
def random_co2(delay=1):
print('Starting random battery data...')
pub = Publisher('/sensors/co2', Co2Msg)
msg = Co2Msg()
while not rospy.is_shutdown():
msg.header = rospy.Header()
msg.co2_percentage = random.random()
sleep(delay)
pub.publish(msg)
def random_sonar(delay=1):
print('Starting random sonar...')
pub = Publisher('/sensors/range', Range)
msg = Range()
while not rospy.is_shutdown():
msg.header = rospy.Header(frame_id='right_sonar_frame')
msg.range = random.random() * 20
pub.publish(msg)
sleep(delay)
msg.header = rospy.Header(frame_id='left_sonar_frame')
msg.range = random.random() * 20
pub.publish(msg)
def random_imu(delay=1):
print('Starting random imu...')
pub = Publisher('/sensors/imu_rpy', ImuRPY)
msg = ImuRPY()
while not rospy.is_shutdown():
msg.roll = random.random() * 50
msg.pitch = random.random() * 50
msg.yaw = random.random() * 50
pub.publish(msg)
sleep(delay)
def random_thermal(delay=1):
print('Starting random thermal data...')
pub = Publisher('/sensors/thermal', ThermalMeanMsg)
msg = ThermalMeanMsg()
while not rospy.is_shutdown():
msg.header = rospy.Header()
msg.thermal_mean = random.randint(20, 40)
sleep(delay)
pub.publish(msg)
def random_signs_of_life(delay=1):
print('Starting random signs of life.')
pub = Publisher('/data_fusion/signs_of_life', VictimProbabilities)
msg = VictimProbabilities()
while not rospy.is_shutdown():
msg.thermal = random.random()
msg.co2 = random.random()
msg.sound = random.random()
msg.motion = random.random()
msg.visualVictim = random.random()
msg.hazmat = random.random()
sleep(delay)
pub.publish(msg)
if __name__ == '__main__':
init_node('mock_node', anonymous=True)
# Select a mock to use.
selection = sys.argv[1]
delay = float(sys.argv[2])
if selection == 'battery':
random_battery(delay)
elif selection == 'co2':
random_co2(delay)
elif selection == 'thermal':
random_thermal(delay)
elif selection == 'temp':
random_temperatures(delay)
elif selection == 'sonar':
random_sonar(delay)
elif selection == 'imu':
random_imu(delay)
elif selection == 'sol':
random_signs_of_life(delay)
| 28.848 | 70 | 0.642818 |
from __future__ import print_function
from time import sleep
import random
import sys
import rospy
from rospy import Publisher, init_node
from sensor_msgs.msg import Range
from pandora_data_fusion_msgs.msg import VictimProbabilities
from pandora_sensor_msgs.msg import BatteryMsg, Co2Msg, Temperature
from pandora_sensor_msgs.msg import ThermalMeanMsg, ImuRPY
def random_battery(delay=1):
print('Starting random battery data...')
pub = Publisher('/sensors/battery', BatteryMsg)
msg = BatteryMsg()
msg.name = ['PSU', 'Motors']
while not rospy.is_shutdown():
battery1 = random.randint(18, 25)
battery2 = random.randint(18, 25)
msg.voltage = [battery1, battery2]
sleep(delay)
pub.publish(msg)
def random_temperatures(delay=1):
print('Starting random temperatures...')
pub = Publisher('/cpu/temperature', Temperature)
msg = Temperature()
msg.name = ['cpu0', 'cpu1', 'cpu2', 'cpu2']
while not rospy.is_shutdown():
msg.temperature = [random.randint(30, 80) for i in range(4)]
sleep(delay)
pub.publish(msg)
def random_co2(delay=1):
print('Starting random battery data...')
pub = Publisher('/sensors/co2', Co2Msg)
msg = Co2Msg()
while not rospy.is_shutdown():
msg.header = rospy.Header()
msg.co2_percentage = random.random()
sleep(delay)
pub.publish(msg)
def random_sonar(delay=1):
print('Starting random sonar...')
pub = Publisher('/sensors/range', Range)
msg = Range()
while not rospy.is_shutdown():
msg.header = rospy.Header(frame_id='right_sonar_frame')
msg.range = random.random() * 20
pub.publish(msg)
sleep(delay)
msg.header = rospy.Header(frame_id='left_sonar_frame')
msg.range = random.random() * 20
pub.publish(msg)
def random_imu(delay=1):
print('Starting random imu...')
pub = Publisher('/sensors/imu_rpy', ImuRPY)
msg = ImuRPY()
while not rospy.is_shutdown():
msg.roll = random.random() * 50
msg.pitch = random.random() * 50
msg.yaw = random.random() * 50
pub.publish(msg)
sleep(delay)
def random_thermal(delay=1):
print('Starting random thermal data...')
pub = Publisher('/sensors/thermal', ThermalMeanMsg)
msg = ThermalMeanMsg()
while not rospy.is_shutdown():
msg.header = rospy.Header()
msg.thermal_mean = random.randint(20, 40)
sleep(delay)
pub.publish(msg)
def random_signs_of_life(delay=1):
print('Starting random signs of life.')
pub = Publisher('/data_fusion/signs_of_life', VictimProbabilities)
msg = VictimProbabilities()
while not rospy.is_shutdown():
msg.thermal = random.random()
msg.co2 = random.random()
msg.sound = random.random()
msg.motion = random.random()
msg.visualVictim = random.random()
msg.hazmat = random.random()
sleep(delay)
pub.publish(msg)
if __name__ == '__main__':
init_node('mock_node', anonymous=True)
selection = sys.argv[1]
delay = float(sys.argv[2])
if selection == 'battery':
random_battery(delay)
elif selection == 'co2':
random_co2(delay)
elif selection == 'thermal':
random_thermal(delay)
elif selection == 'temp':
random_temperatures(delay)
elif selection == 'sonar':
random_sonar(delay)
elif selection == 'imu':
random_imu(delay)
elif selection == 'sol':
random_signs_of_life(delay)
| true | true |
f72666c6d7fa28865bf0cc1af0a8928f8b710444 | 674 | py | Python | src/gufo/err/failfast/always.py | gufolabs/gufo_err | d3996f355b38a3efe1fa3ecae578846ffebd7790 | [
"BSD-3-Clause"
] | null | null | null | src/gufo/err/failfast/always.py | gufolabs/gufo_err | d3996f355b38a3efe1fa3ecae578846ffebd7790 | [
"BSD-3-Clause"
] | null | null | null | src/gufo/err/failfast/always.py | gufolabs/gufo_err | d3996f355b38a3efe1fa3ecae578846ffebd7790 | [
"BSD-3-Clause"
] | null | null | null | # ---------------------------------------------------------------------
# Gufo Err: AlwaysFailFast
# ---------------------------------------------------------------------
# Copyright (C) 2022, Gufo Labs
# ---------------------------------------------------------------------
# Python modules
from typing import Type
from types import TracebackType
# Gufo Labs modules
from ..abc.failfast import BaseFailFast
class AlwaysFailFast(BaseFailFast):
"""
Always fail-fast. Trigger fail-fast unconditionally.
"""
def must_die(
self,
t: Type[BaseException],
v: BaseException,
tb: TracebackType,
) -> bool:
return True
| 24.962963 | 71 | 0.449555 |
from typing import Type
from types import TracebackType
from ..abc.failfast import BaseFailFast
class AlwaysFailFast(BaseFailFast):
def must_die(
self,
t: Type[BaseException],
v: BaseException,
tb: TracebackType,
) -> bool:
return True
| true | true |
f726670921d44f21aa09f17d795a742ee0c1fa0c | 8,397 | py | Python | test/bitfinex_test.py | laisee/bitfinex | 6a3e7cd412f186eca0039602d32c65938a392747 | [
"MIT"
] | null | null | null | test/bitfinex_test.py | laisee/bitfinex | 6a3e7cd412f186eca0039602d32c65938a392747 | [
"MIT"
] | null | null | null | test/bitfinex_test.py | laisee/bitfinex | 6a3e7cd412f186eca0039602d32c65938a392747 | [
"MIT"
] | null | null | null | import unittest
import mock
import requests
import httpretty
import settings
from bitfinex.client import Client, TradeClient
API_KEY = settings.API_KEY
API_SECRET = settings.API_SECRET
class BitfinexTest(unittest.TestCase):
def setUp(self):
self.client = Client()
def test_should_have_server(self):
self.assertEqual("https://api.bitfinex.com/v1", self.client.server())
def test_should_have_url_for_foo(self):
expected = "https://api.bitfinex.com/v1/foo"
self.assertEqual(expected, self.client.url_for("foo"))
def test_should_have_url_for_path_arg(self):
expected = "https://api.bitfinex.com/v1/foo/bar"
actual = self.client.url_for('foo/%s', path_arg="bar")
self.assertEqual(expected, actual)
def test_should_have_url_with_parameters(self):
expected = "https://api.bitfinex.com/v1/foo?a=1&b=2"
actual = self.client.url_for('foo', parameters={'a': 1, 'b': 2})
self.assertEqual(expected, actual)
def test_should_have_url_for(self):
expected = self.client.url_for("foo")
self.assertEqual("https://api.bitfinex.com/v1/foo", expected)
def test_should_have_url_for_with_path_arg(self):
expected = "https://api.bitfinex.com/v1/foo/bar"
path = "foo/%s"
self.assertEqual(expected, self.client.url_for(path, path_arg='bar'))
self.assertEqual(expected, self.client.url_for(path, 'bar'))
def test_should_have_url_for_with_parameters(self):
expected = "https://api.bitfinex.com/v1/foo?a=1"
self.assertEqual(expected, self.client.url_for("foo", parameters={'a': 1}))
self.assertEqual(expected, self.client.url_for("foo", None, {'a': 1}))
def test_should_have_url_for_with_path_arg_and_parameters(self):
expected = "https://api.bitfinex.com/v1/foo/bar?a=1"
path = "foo/%s"
self.assertEqual(expected, self.client.url_for(path, path_arg='bar', parameters={'a': 1}))
self.assertEqual(expected, self.client.url_for(path, 'bar', {'a': 1}))
@httpretty.activate
def test_should_have_symbols(self):
# mock out the request
mock_body = '["btcusd","ltcusd","ltcbtc"]'
url = self.client.url_for('symbols')
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = ["btcusd","ltcusd","ltcbtc"]
self.assertEqual(expected, self.client.symbols())
@httpretty.activate
def test_should_have_ticker(self):
# mock out the request
mock_body = '{"mid":"562.56495","bid":"562.15","ask":"562.9799","last_price":"562.25","timestamp":"1395552658.339936691"}'
url = self.client.url_for('ticker/%s', path_arg='btcusd')
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = {
"mid": 562.56495,
"bid": 562.15,
"ask": 562.9799,
"last_price": 562.25,
"timestamp": 1395552658.339936691
}
self.assertEqual(expected, self.client.ticker('btcusd'))
@httpretty.activate
def test_should_have_today(self):
# mock out the request
mock_body = '{"low":"550.09","high":"572.2398","volume":"7305.33119836"}'
url = self.client.url_for('today/%s', path_arg='btcusd')
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = {
"low": 550.09,
"high": 572.2398,
"volume": 7305.33119836
}
self.assertEqual(expected, self.client.today('btcusd'))
@httpretty.activate
def test_should_have_stats(self):
# mock out the request
mock_body = '[{"period":1,"volume":"7410.27250155"},{"period":7,"volume":"52251.37118006"},{"period":30,"volume":"464505.07753251"}]'
url = self.client.url_for('stats/%s', path_arg='btcusd')
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = [
{"period": 1, "volume": 7410.27250155},
{"period": 7, "volume": 52251.37118006},
{"period": 30,"volume": 464505.07753251}
]
self.assertEqual(expected, self.client.stats('btcusd'))
@httpretty.activate
def test_should_have_lendbook(self):
# mock out the request
mock_body = '{"bids":[{"rate":"5.475","amount":"15.03894663","period":30,"timestamp":"1395112149.0","frr":"No"},{"rate":"2.409","amount":"14.5121868","period":7,"timestamp":"1395497599.0","frr":"No"}],"asks":[{"rate":"6.351","amount":"15.5180735","period":5,"timestamp":"1395549996.0","frr":"No"},{"rate":"6.3588","amount":"626.94808249","period":30,"timestamp":"1395400654.0","frr":"Yes"}]}'
url = self.client.url_for('lendbook/%s', 'btc')
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = {
"bids": [
{"rate": 5.475, "amount": 15.03894663, "period": 30, "timestamp": 1395112149.0, "frr": False},
{"rate": 2.409, "amount": 14.5121868, "period": 7, "timestamp": 1395497599.0, "frr": False}
],
"asks": [
{"rate": 6.351, "amount": 15.5180735, "period": 5, "timestamp": 1395549996.0, "frr": False},
{"rate": 6.3588, "amount": 626.94808249, "period": 30, "timestamp": 1395400654.0, "frr": True}
]
}
self.assertEqual(expected, self.client.lendbook('btc'))
@httpretty.activate
def test_should_have_lendbook_with_parameters(self):
# mock out the request
mock_body = '{"bids":[{"rate":"5.475","amount":"15.03894663","period":30,"timestamp":"1395112149.0","frr":"No"},{"rate":"2.409","amount":"14.5121868","period":7,"timestamp":"1395497599.0","frr":"No"}],"asks":[]}'
parameters = {'limit_bids': 2, 'limit_asks': 0}
url = self.client.url_for('lendbook/%s', 'btc', parameters)
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = {
"bids": [
{"rate": 5.475, "amount": 15.03894663, "period": 30, "timestamp": 1395112149.0, "frr": False},
{"rate": 2.409, "amount": 14.5121868, "period": 7, "timestamp": 1395497599.0, "frr": False}
],
"asks": [
]
}
self.assertEqual(expected, self.client.lendbook('btc', parameters))
@httpretty.activate
def test_should_have_order_book(self):
# mock out the request
mock_body = '{"bids":[{"price":"562.2601","amount":"0.985","timestamp":"1395567556.0"}],"asks":[{"price":"563.001","amount":"0.3","timestamp":"1395532200.0"}]}'
url = self.client.url_for('book/%s', 'btcusd')
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = {
"bids": [
{"price": 562.2601, "amount": 0.985, "timestamp": 1395567556.0}
],
"asks": [
{"price": 563.001, "amount": 0.3, "timestamp": 1395532200.0}
]
}
self.assertEqual(expected, self.client.order_book('btcusd'))
@httpretty.activate
def test_should_have_order_book_with_parameters(self):
# mock out the request
mock_body = '{"bids":[{"price":"562.2601","amount":"0.985","timestamp":"1395567556.0"}],"asks":[]}'
parameters = {'limit_asks': 0}
url = self.client.url_for('book/%s', 'btcusd', parameters)
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = {
"bids": [
{"price": 562.2601, "amount": 0.985, "timestamp": 1395567556.0}
],
"asks": []
}
self.assertEqual(expected, self.client.order_book('btcusd', parameters))
class TestTradeClient(unittest.TestCase):
def setUp(self):
self.tc = TradeClient(API_KEY, API_SECRET)
def test_instantiate_tradeclient(self):
self.assertIsInstance(self.tc, TradeClient)
def test_get_active_orders_returns_json(self):
ao = self.tc.active_orders()
self.assertIsInstance(ao, list)
def test_get_active_positions_returns_json(self):
ap = self.tc.active_positions()
self.assertIsInstance(ap, list)
def test_get_full_history(self):
ap = self.tc.active_positions()
self.assertIsInstance(ap, list)
| 37.995475 | 400 | 0.609265 | import unittest
import mock
import requests
import httpretty
import settings
from bitfinex.client import Client, TradeClient
API_KEY = settings.API_KEY
API_SECRET = settings.API_SECRET
class BitfinexTest(unittest.TestCase):
def setUp(self):
self.client = Client()
def test_should_have_server(self):
self.assertEqual("https://api.bitfinex.com/v1", self.client.server())
def test_should_have_url_for_foo(self):
expected = "https://api.bitfinex.com/v1/foo"
self.assertEqual(expected, self.client.url_for("foo"))
def test_should_have_url_for_path_arg(self):
expected = "https://api.bitfinex.com/v1/foo/bar"
actual = self.client.url_for('foo/%s', path_arg="bar")
self.assertEqual(expected, actual)
def test_should_have_url_with_parameters(self):
expected = "https://api.bitfinex.com/v1/foo?a=1&b=2"
actual = self.client.url_for('foo', parameters={'a': 1, 'b': 2})
self.assertEqual(expected, actual)
def test_should_have_url_for(self):
expected = self.client.url_for("foo")
self.assertEqual("https://api.bitfinex.com/v1/foo", expected)
def test_should_have_url_for_with_path_arg(self):
expected = "https://api.bitfinex.com/v1/foo/bar"
path = "foo/%s"
self.assertEqual(expected, self.client.url_for(path, path_arg='bar'))
self.assertEqual(expected, self.client.url_for(path, 'bar'))
def test_should_have_url_for_with_parameters(self):
expected = "https://api.bitfinex.com/v1/foo?a=1"
self.assertEqual(expected, self.client.url_for("foo", parameters={'a': 1}))
self.assertEqual(expected, self.client.url_for("foo", None, {'a': 1}))
def test_should_have_url_for_with_path_arg_and_parameters(self):
expected = "https://api.bitfinex.com/v1/foo/bar?a=1"
path = "foo/%s"
self.assertEqual(expected, self.client.url_for(path, path_arg='bar', parameters={'a': 1}))
self.assertEqual(expected, self.client.url_for(path, 'bar', {'a': 1}))
@httpretty.activate
def test_should_have_symbols(self):
mock_body = '["btcusd","ltcusd","ltcbtc"]'
url = self.client.url_for('symbols')
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = ["btcusd","ltcusd","ltcbtc"]
self.assertEqual(expected, self.client.symbols())
@httpretty.activate
def test_should_have_ticker(self):
mock_body = '{"mid":"562.56495","bid":"562.15","ask":"562.9799","last_price":"562.25","timestamp":"1395552658.339936691"}'
url = self.client.url_for('ticker/%s', path_arg='btcusd')
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = {
"mid": 562.56495,
"bid": 562.15,
"ask": 562.9799,
"last_price": 562.25,
"timestamp": 1395552658.339936691
}
self.assertEqual(expected, self.client.ticker('btcusd'))
@httpretty.activate
def test_should_have_today(self):
mock_body = '{"low":"550.09","high":"572.2398","volume":"7305.33119836"}'
url = self.client.url_for('today/%s', path_arg='btcusd')
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = {
"low": 550.09,
"high": 572.2398,
"volume": 7305.33119836
}
self.assertEqual(expected, self.client.today('btcusd'))
@httpretty.activate
def test_should_have_stats(self):
mock_body = '[{"period":1,"volume":"7410.27250155"},{"period":7,"volume":"52251.37118006"},{"period":30,"volume":"464505.07753251"}]'
url = self.client.url_for('stats/%s', path_arg='btcusd')
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = [
{"period": 1, "volume": 7410.27250155},
{"period": 7, "volume": 52251.37118006},
{"period": 30,"volume": 464505.07753251}
]
self.assertEqual(expected, self.client.stats('btcusd'))
@httpretty.activate
def test_should_have_lendbook(self):
mock_body = '{"bids":[{"rate":"5.475","amount":"15.03894663","period":30,"timestamp":"1395112149.0","frr":"No"},{"rate":"2.409","amount":"14.5121868","period":7,"timestamp":"1395497599.0","frr":"No"}],"asks":[{"rate":"6.351","amount":"15.5180735","period":5,"timestamp":"1395549996.0","frr":"No"},{"rate":"6.3588","amount":"626.94808249","period":30,"timestamp":"1395400654.0","frr":"Yes"}]}'
url = self.client.url_for('lendbook/%s', 'btc')
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = {
"bids": [
{"rate": 5.475, "amount": 15.03894663, "period": 30, "timestamp": 1395112149.0, "frr": False},
{"rate": 2.409, "amount": 14.5121868, "period": 7, "timestamp": 1395497599.0, "frr": False}
],
"asks": [
{"rate": 6.351, "amount": 15.5180735, "period": 5, "timestamp": 1395549996.0, "frr": False},
{"rate": 6.3588, "amount": 626.94808249, "period": 30, "timestamp": 1395400654.0, "frr": True}
]
}
self.assertEqual(expected, self.client.lendbook('btc'))
@httpretty.activate
def test_should_have_lendbook_with_parameters(self):
mock_body = '{"bids":[{"rate":"5.475","amount":"15.03894663","period":30,"timestamp":"1395112149.0","frr":"No"},{"rate":"2.409","amount":"14.5121868","period":7,"timestamp":"1395497599.0","frr":"No"}],"asks":[]}'
parameters = {'limit_bids': 2, 'limit_asks': 0}
url = self.client.url_for('lendbook/%s', 'btc', parameters)
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = {
"bids": [
{"rate": 5.475, "amount": 15.03894663, "period": 30, "timestamp": 1395112149.0, "frr": False},
{"rate": 2.409, "amount": 14.5121868, "period": 7, "timestamp": 1395497599.0, "frr": False}
],
"asks": [
]
}
self.assertEqual(expected, self.client.lendbook('btc', parameters))
@httpretty.activate
def test_should_have_order_book(self):
mock_body = '{"bids":[{"price":"562.2601","amount":"0.985","timestamp":"1395567556.0"}],"asks":[{"price":"563.001","amount":"0.3","timestamp":"1395532200.0"}]}'
url = self.client.url_for('book/%s', 'btcusd')
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = {
"bids": [
{"price": 562.2601, "amount": 0.985, "timestamp": 1395567556.0}
],
"asks": [
{"price": 563.001, "amount": 0.3, "timestamp": 1395532200.0}
]
}
self.assertEqual(expected, self.client.order_book('btcusd'))
@httpretty.activate
def test_should_have_order_book_with_parameters(self):
mock_body = '{"bids":[{"price":"562.2601","amount":"0.985","timestamp":"1395567556.0"}],"asks":[]}'
parameters = {'limit_asks': 0}
url = self.client.url_for('book/%s', 'btcusd', parameters)
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = {
"bids": [
{"price": 562.2601, "amount": 0.985, "timestamp": 1395567556.0}
],
"asks": []
}
self.assertEqual(expected, self.client.order_book('btcusd', parameters))
class TestTradeClient(unittest.TestCase):
def setUp(self):
self.tc = TradeClient(API_KEY, API_SECRET)
def test_instantiate_tradeclient(self):
self.assertIsInstance(self.tc, TradeClient)
def test_get_active_orders_returns_json(self):
ao = self.tc.active_orders()
self.assertIsInstance(ao, list)
def test_get_active_positions_returns_json(self):
ap = self.tc.active_positions()
self.assertIsInstance(ap, list)
def test_get_full_history(self):
ap = self.tc.active_positions()
self.assertIsInstance(ap, list)
| true | true |
f726671fe689fac6b9c56332427272898befdccc | 388 | py | Python | api/v10/urls.py | huylb314/sample-django | 5c53e05ccd62abc075e4a9942681ab845d5be2e0 | [
"MIT"
] | null | null | null | api/v10/urls.py | huylb314/sample-django | 5c53e05ccd62abc075e4a9942681ab845d5be2e0 | [
"MIT"
] | null | null | null | api/v10/urls.py | huylb314/sample-django | 5c53e05ccd62abc075e4a9942681ab845d5be2e0 | [
"MIT"
] | null | null | null | from django.conf.urls import url
from . import views
from django.views.decorators.csrf import csrf_exempt
#GET charts?userid=0&clientid=1
#GET charts?userid=0&clientid=1&chartid=2
#DELETE charts?userid=0&clientid=1&chartid=2
#POST charts?userid=0&clientid=1&chartid=2
#POST charts?userid=0&clientid=1
urlpatterns = [
url(r'^charts$', csrf_exempt(views.doTheMagic)),
]
| 27.714286 | 53 | 0.744845 | from django.conf.urls import url
from . import views
from django.views.decorators.csrf import csrf_exempt
urlpatterns = [
url(r'^charts$', csrf_exempt(views.doTheMagic)),
]
| true | true |
f7266798c32cdc9f7db93d85cd1ba7543a2f7525 | 4,011 | py | Python | users/views.py | mayronH/desafio-frexco | 636b0d5ef11cb663cb46c022eed69fe8fcee49e3 | [
"MIT"
] | null | null | null | users/views.py | mayronH/desafio-frexco | 636b0d5ef11cb663cb46c022eed69fe8fcee49e3 | [
"MIT"
] | null | null | null | users/views.py | mayronH/desafio-frexco | 636b0d5ef11cb663cb46c022eed69fe8fcee49e3 | [
"MIT"
] | null | null | null | import csv
import xlsxwriter
import io
from django.http import HttpResponse, JsonResponse
from django.shortcuts import redirect, render
from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
from django.utils.translation import gettext
from users.models import CustomUser
from users.forms import SignUpForm
# Create your views here.
def index(request):
"""Index"""
user = request.user
return render(request, 'index.html', {
"user": user,
})
def signup(request):
"""User Sign Up"""
if request.method == 'POST':
form = SignUpForm(request.POST)
if form.is_valid():
user = form.save()
password = form.cleaned_data.get('password1')
if not password:
password = CustomUser.make_password()
user.set_password(password)
user.refresh_from_db()
user.save()
login(request, user, backend='django.contrib.auth.backends.ModelBackend')
return redirect('index')
else:
form = SignUpForm()
return render(request, 'registration/signup.html', {'form': form})
@login_required
def dashboard(request):
"""List of Users"""
user_list = CustomUser.objects.order_by('date_joined')
return render(request, 'dashboard.html', {'users' : user_list})
@login_required
def jsonUsers(request):
"""Export user list to JSON"""
user_list = list(CustomUser.objects.values())
return JsonResponse(user_list, safe=False)
@login_required
def csvUsers(request):
"""Export user list to CSV"""
users = CustomUser.objects.all().values_list('username', 'birthdate', 'date_joined', 'last_login')
response = HttpResponse(
content_type='text/csv',
headers={'Content-Disposition': 'attachment; filename="users.csv"'},
)
writer = csv.writer(response)
writer.writerow(['username', 'birthdate', 'date_joined', 'last_login'])
for user in users:
writer.writerow(user)
return response
@login_required
def xlsxUsers(request):
"""Export user list to XLSX"""
output = io.BytesIO()
workbook = xlsxwriter.Workbook(output)
title = workbook.add_format({
'bold': True,
'font_size': 18,
'align': 'center',
'valign': 'vcenter',
'text_wrap': True,
})
header = workbook.add_format({
'color': 'black',
'align': 'center',
'valign': 'top',
'text_wrap': True,
'border': 1
})
cell = workbook.add_format({
'color': 'black',
'text_wrap': True,
'top': 1,
'bottom': 1
})
worksheet = workbook.add_worksheet()
title_text = u"Usuários Cadastrados"
worksheet.merge_range('A2:E2', title_text, title)
worksheet.write(2, 0, ("N"), header)
worksheet.write(2, 1, ("username"), header)
worksheet.write(2, 2, ("birthdate"), header)
worksheet.write(2, 3, ("date_joined"), header)
worksheet.write(2, 4, ("last_login"), header)
users = CustomUser.objects.all()
for index, user in enumerate(users):
row = 3 + index
worksheet.write_number(row, 0, index + 1, cell)
worksheet.write_string(row, 1, user.username, cell)
worksheet.write(row, 2, user.birthdate.strftime('%d/%M/%Y'), cell)
worksheet.write(row, 3, user.date_joined.strftime('%d/%M/%Y'), cell)
if user.last_login != None:
worksheet.write(row, 4, user.last_login.strftime('%d/%M/%Y'), cell)
else:
worksheet.write(row, 4, str(user.last_login), cell)
workbook.close()
output.seek(0)
response = HttpResponse(
output,
content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
headers={'Content-Disposition': 'attachment; filename="users.xlsx"'},
)
return response | 29.065217 | 102 | 0.612565 | import csv
import xlsxwriter
import io
from django.http import HttpResponse, JsonResponse
from django.shortcuts import redirect, render
from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
from django.utils.translation import gettext
from users.models import CustomUser
from users.forms import SignUpForm
def index(request):
user = request.user
return render(request, 'index.html', {
"user": user,
})
def signup(request):
if request.method == 'POST':
form = SignUpForm(request.POST)
if form.is_valid():
user = form.save()
password = form.cleaned_data.get('password1')
if not password:
password = CustomUser.make_password()
user.set_password(password)
user.refresh_from_db()
user.save()
login(request, user, backend='django.contrib.auth.backends.ModelBackend')
return redirect('index')
else:
form = SignUpForm()
return render(request, 'registration/signup.html', {'form': form})
@login_required
def dashboard(request):
user_list = CustomUser.objects.order_by('date_joined')
return render(request, 'dashboard.html', {'users' : user_list})
@login_required
def jsonUsers(request):
user_list = list(CustomUser.objects.values())
return JsonResponse(user_list, safe=False)
@login_required
def csvUsers(request):
users = CustomUser.objects.all().values_list('username', 'birthdate', 'date_joined', 'last_login')
response = HttpResponse(
content_type='text/csv',
headers={'Content-Disposition': 'attachment; filename="users.csv"'},
)
writer = csv.writer(response)
writer.writerow(['username', 'birthdate', 'date_joined', 'last_login'])
for user in users:
writer.writerow(user)
return response
@login_required
def xlsxUsers(request):
output = io.BytesIO()
workbook = xlsxwriter.Workbook(output)
title = workbook.add_format({
'bold': True,
'font_size': 18,
'align': 'center',
'valign': 'vcenter',
'text_wrap': True,
})
header = workbook.add_format({
'color': 'black',
'align': 'center',
'valign': 'top',
'text_wrap': True,
'border': 1
})
cell = workbook.add_format({
'color': 'black',
'text_wrap': True,
'top': 1,
'bottom': 1
})
worksheet = workbook.add_worksheet()
title_text = u"Usuários Cadastrados"
worksheet.merge_range('A2:E2', title_text, title)
worksheet.write(2, 0, ("N"), header)
worksheet.write(2, 1, ("username"), header)
worksheet.write(2, 2, ("birthdate"), header)
worksheet.write(2, 3, ("date_joined"), header)
worksheet.write(2, 4, ("last_login"), header)
users = CustomUser.objects.all()
for index, user in enumerate(users):
row = 3 + index
worksheet.write_number(row, 0, index + 1, cell)
worksheet.write_string(row, 1, user.username, cell)
worksheet.write(row, 2, user.birthdate.strftime('%d/%M/%Y'), cell)
worksheet.write(row, 3, user.date_joined.strftime('%d/%M/%Y'), cell)
if user.last_login != None:
worksheet.write(row, 4, user.last_login.strftime('%d/%M/%Y'), cell)
else:
worksheet.write(row, 4, str(user.last_login), cell)
workbook.close()
output.seek(0)
response = HttpResponse(
output,
content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
headers={'Content-Disposition': 'attachment; filename="users.xlsx"'},
)
return response | true | true |
f72667a1d24d7941c968cd6a40963f081df72e57 | 2,540 | py | Python | tests/unit/kubernetes/test_Ssh.py | owasp-sbot/OSBot-K8s | 74403194481215abb24ca82714bd23ea51b33dfd | [
"Apache-2.0"
] | null | null | null | tests/unit/kubernetes/test_Ssh.py | owasp-sbot/OSBot-K8s | 74403194481215abb24ca82714bd23ea51b33dfd | [
"Apache-2.0"
] | null | null | null | tests/unit/kubernetes/test_Ssh.py | owasp-sbot/OSBot-K8s | 74403194481215abb24ca82714bd23ea51b33dfd | [
"Apache-2.0"
] | null | null | null | import os
from unittest import TestCase
import pytest
from dotenv import load_dotenv
from pytest import skip
from osbot_utils.utils.Files import file_not_exists
from osbot_k8s.kubernetes.Ssh import Ssh
@pytest.mark.skip('needs live server') # todo add to test setup the creation of pods and nodes we can SSH into
class test_Ssh(TestCase):
def setUp(self) -> None:
load_dotenv()
self.ssh_config = {
"user" : os.environ.get('TEST_SSH_USER' ),
"server" : os.environ.get('TEST_SSH_SERVER'),
"ssh_key" : os.environ.get('TEST_SSH_KEY' )
}
if file_not_exists(self.ssh_config.get('ssh_key')):
skip('no ssh key in current test environment')
self.ssh = Ssh(ssh_config=self.ssh_config)
print()
# base methods
def test_server_in_known_hosts(self):
result = self.ssh.server_in_known_hosts() # todo: add method to programatically add the server to the known_hosts file
assert type(result) is bool
def test_exec_ssh_command(self):
assert self.ssh.exec_ssh_command('uname') == {'error': '', 'output': 'Linux\n', 'status': True}
assert self.ssh.exec_ssh_command('aaaa' ) == {'error': 'bash: aaaa: command not found\n', 'output': '', 'status': False}
def test_get_get_scp_params(self):
source_file = 'source_file'
target_file = 'target_file'
ssh_params = self.ssh.get_scp_params(source_file, target_file)
assert ssh_params == ['-i', self.ssh_config.get('ssh_key'),
f"{self.ssh_config.get('user')}@{self.ssh_config.get('server')}:{source_file}",
target_file]
def test_get_get_ssh_params(self):
ssh_params = self.ssh.get_ssh_params('aaa')
assert ssh_params == ['-o StrictHostKeyChecking=no',
'-t', '-i', self.ssh_config.get('ssh_key'),
self.ssh_config.get('user') + '@' + self.ssh_config.get('server'),
'aaa']
def test_exec(self):
assert 'bin' in self.ssh.exec('cd /; ls')
# helper methods
def test_uname(self):
assert self.ssh.uname() == 'Linux'
# def create_pods(self, count):
# return self.ssh.exec(f'/home/ubuntu/icap-infrastructure/scripts/create_pod.sh {count}')
#
# def test_created_pod(self):
# self.create_pods(1)
# #assert 'bin' in self.ssh.exec('ls')
# # helper methods: esxcli
| 35.277778 | 136 | 0.606693 | import os
from unittest import TestCase
import pytest
from dotenv import load_dotenv
from pytest import skip
from osbot_utils.utils.Files import file_not_exists
from osbot_k8s.kubernetes.Ssh import Ssh
@pytest.mark.skip('needs live server')
class test_Ssh(TestCase):
def setUp(self) -> None:
load_dotenv()
self.ssh_config = {
"user" : os.environ.get('TEST_SSH_USER' ),
"server" : os.environ.get('TEST_SSH_SERVER'),
"ssh_key" : os.environ.get('TEST_SSH_KEY' )
}
if file_not_exists(self.ssh_config.get('ssh_key')):
skip('no ssh key in current test environment')
self.ssh = Ssh(ssh_config=self.ssh_config)
print()
def test_server_in_known_hosts(self):
result = self.ssh.server_in_known_hosts()
assert type(result) is bool
def test_exec_ssh_command(self):
assert self.ssh.exec_ssh_command('uname') == {'error': '', 'output': 'Linux\n', 'status': True}
assert self.ssh.exec_ssh_command('aaaa' ) == {'error': 'bash: aaaa: command not found\n', 'output': '', 'status': False}
def test_get_get_scp_params(self):
source_file = 'source_file'
target_file = 'target_file'
ssh_params = self.ssh.get_scp_params(source_file, target_file)
assert ssh_params == ['-i', self.ssh_config.get('ssh_key'),
f"{self.ssh_config.get('user')}@{self.ssh_config.get('server')}:{source_file}",
target_file]
def test_get_get_ssh_params(self):
ssh_params = self.ssh.get_ssh_params('aaa')
assert ssh_params == ['-o StrictHostKeyChecking=no',
'-t', '-i', self.ssh_config.get('ssh_key'),
self.ssh_config.get('user') + '@' + self.ssh_config.get('server'),
'aaa']
def test_exec(self):
assert 'bin' in self.ssh.exec('cd /; ls')
def test_uname(self):
assert self.ssh.uname() == 'Linux'
| true | true |
f72667ffda4b29a371dcff42c53030225f8f2e83 | 331 | py | Python | app/main/forms.py | DevWaweru/Watchlist | e9671c61fc543668b82fd1422fad0f6483640cca | [
"MIT"
] | null | null | null | app/main/forms.py | DevWaweru/Watchlist | e9671c61fc543668b82fd1422fad0f6483640cca | [
"MIT"
] | null | null | null | app/main/forms.py | DevWaweru/Watchlist | e9671c61fc543668b82fd1422fad0f6483640cca | [
"MIT"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import StringField,TextAreaField,SubmitField
from wtforms.validators import Required
class ReviewForm(FlaskForm):
title = StringField('Review Title', validators=[Required()])
review = TextAreaField('Movie review',validators=[Required()])
submit = SubmitField('Submit')
| 36.777778 | 66 | 0.770393 | from flask_wtf import FlaskForm
from wtforms import StringField,TextAreaField,SubmitField
from wtforms.validators import Required
class ReviewForm(FlaskForm):
title = StringField('Review Title', validators=[Required()])
review = TextAreaField('Movie review',validators=[Required()])
submit = SubmitField('Submit')
| true | true |
f72668f9f0f1bcd1855b3e06f521866410ab3bc5 | 23,269 | py | Python | InvenTree/company/models.py | inmys/InvenTree | a0d1622926ba9a13839adfe64a8fe21c073692f2 | [
"MIT"
] | null | null | null | InvenTree/company/models.py | inmys/InvenTree | a0d1622926ba9a13839adfe64a8fe21c073692f2 | [
"MIT"
] | null | null | null | InvenTree/company/models.py | inmys/InvenTree | a0d1622926ba9a13839adfe64a8fe21c073692f2 | [
"MIT"
] | null | null | null | """
Company database model definitions
"""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
from django.utils.translation import ugettext_lazy as _
from django.core.validators import MinValueValidator
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models import Sum, Q, UniqueConstraint
from django.apps import apps
from django.urls import reverse
from moneyed import CURRENCIES
from markdownx.models import MarkdownxField
from stdimage.models import StdImageField
from InvenTree.helpers import getMediaUrl, getBlankImage, getBlankThumbnail
from InvenTree.fields import InvenTreeURLField
from InvenTree.status_codes import PurchaseOrderStatus
import InvenTree.validators
import common.models
import common.settings
from common.settings import currency_code_default
def rename_company_image(instance, filename):
""" Function to rename a company image after upload
Args:
instance: Company object
filename: uploaded image filename
Returns:
New image filename
"""
base = 'company_images'
if filename.count('.') > 0:
ext = filename.split('.')[-1]
else:
ext = ''
fn = 'company_{pk}_img'.format(pk=instance.pk)
if ext:
fn += '.' + ext
return os.path.join(base, fn)
class Company(models.Model):
""" A Company object represents an external company.
It may be a supplier or a customer or a manufacturer (or a combination)
- A supplier is a company from which parts can be purchased
- A customer is a company to which parts can be sold
- A manufacturer is a company which manufactures a raw good (they may or may not be a "supplier" also)
Attributes:
name: Brief name of the company
description: Longer form description
website: URL for the company website
address: Postal address
phone: contact phone number
email: contact email address
link: Secondary URL e.g. for link to internal Wiki page
image: Company image / logo
notes: Extra notes about the company
is_customer: boolean value, is this company a customer
is_supplier: boolean value, is this company a supplier
is_manufacturer: boolean value, is this company a manufacturer
currency_code: Specifies the default currency for the company
"""
@staticmethod
def get_api_url():
return reverse('api-company-list')
class Meta:
ordering = ['name', ]
constraints = [
UniqueConstraint(fields=['name', 'email'], name='unique_name_email_pair')
]
verbose_name_plural = "Companies"
name = models.CharField(max_length=100, blank=False,
help_text=_('Company name'),
verbose_name=_('Company name'))
description = models.CharField(
max_length=500,
verbose_name=_('Company description'),
help_text=_('Description of the company'),
blank=True,
)
website = models.URLField(
blank=True,
verbose_name=_('Website'),
help_text=_('Company website URL')
)
address = models.CharField(max_length=200,
verbose_name=_('Address'),
blank=True, help_text=_('Company address'))
phone = models.CharField(max_length=50,
verbose_name=_('Phone number'),
blank=True, help_text=_('Contact phone number'))
email = models.EmailField(blank=True, null=True,
verbose_name=_('Email'), help_text=_('Contact email address'))
contact = models.CharField(max_length=100,
verbose_name=_('Contact'),
blank=True, help_text=_('Point of contact'))
link = InvenTreeURLField(blank=True, verbose_name=_('Link'), help_text=_('Link to external company information'))
image = StdImageField(
upload_to=rename_company_image,
null=True,
blank=True,
variations={'thumbnail': (128, 128)},
delete_orphans=True,
verbose_name=_('Image'),
)
notes = MarkdownxField(blank=True, verbose_name=_('Notes'))
is_customer = models.BooleanField(default=False, verbose_name=_('is customer'), help_text=_('Do you sell items to this company?'))
is_supplier = models.BooleanField(default=True, verbose_name=_('is supplier'), help_text=_('Do you purchase items from this company?'))
is_manufacturer = models.BooleanField(default=False, verbose_name=_('is manufacturer'), help_text=_('Does this company manufacture parts?'))
currency = models.CharField(
max_length=3,
verbose_name=_('Currency'),
blank=True,
default=currency_code_default,
help_text=_('Default currency used for this company'),
validators=[InvenTree.validators.validate_currency_code],
)
@property
def currency_code(self):
"""
Return the currency code associated with this company.
- If the currency code is invalid, use the default currency
- If the currency code is not specified, use the default currency
"""
code = self.currency
if code not in CURRENCIES:
code = common.settings.currency_code_default()
return code
def __str__(self):
""" Get string representation of a Company """
return "{n} - {d}".format(n=self.name, d=self.description)
def get_absolute_url(self):
""" Get the web URL for the detail view for this Company """
return reverse('company-detail', kwargs={'pk': self.id})
def get_image_url(self):
""" Return the URL of the image for this company """
if self.image:
return getMediaUrl(self.image.url)
else:
return getBlankImage()
def get_thumbnail_url(self):
""" Return the URL for the thumbnail image for this Company """
if self.image:
return getMediaUrl(self.image.thumbnail.url)
else:
return getBlankThumbnail()
@property
def manufactured_part_count(self):
""" The number of parts manufactured by this company """
return self.manufactured_parts.count()
@property
def has_manufactured_parts(self):
return self.manufactured_part_count > 0
@property
def supplied_part_count(self):
""" The number of parts supplied by this company """
return self.supplied_parts.count()
@property
def has_supplied_parts(self):
""" Return True if this company supplies any parts """
return self.supplied_part_count > 0
@property
def parts(self):
""" Return SupplierPart objects which are supplied or manufactured by this company """
return SupplierPart.objects.filter(Q(supplier=self.id) | Q(manufacturer_part__manufacturer=self.id))
@property
def part_count(self):
""" The number of parts manufactured (or supplied) by this Company """
return self.parts.count()
@property
def has_parts(self):
return self.part_count > 0
@property
def stock_items(self):
""" Return a list of all stock items supplied or manufactured by this company """
stock = apps.get_model('stock', 'StockItem')
return stock.objects.filter(Q(supplier_part__supplier=self.id) | Q(supplier_part__manufacturer_part__manufacturer=self.id)).all()
@property
def stock_count(self):
""" Return the number of stock items supplied or manufactured by this company """
return self.stock_items.count()
def outstanding_purchase_orders(self):
""" Return purchase orders which are 'outstanding' """
return self.purchase_orders.filter(status__in=PurchaseOrderStatus.OPEN)
def pending_purchase_orders(self):
""" Return purchase orders which are PENDING (not yet issued) """
return self.purchase_orders.filter(status=PurchaseOrderStatus.PENDING)
def closed_purchase_orders(self):
""" Return purchase orders which are not 'outstanding'
- Complete
- Failed / lost
- Returned
"""
return self.purchase_orders.exclude(status__in=PurchaseOrderStatus.OPEN)
def complete_purchase_orders(self):
return self.purchase_orders.filter(status=PurchaseOrderStatus.COMPLETE)
def failed_purchase_orders(self):
""" Return any purchase orders which were not successful """
return self.purchase_orders.filter(status__in=PurchaseOrderStatus.FAILED)
class Contact(models.Model):
""" A Contact represents a person who works at a particular company.
A Company may have zero or more associated Contact objects.
Attributes:
company: Company link for this contact
name: Name of the contact
phone: contact phone number
email: contact email
role: position in company
"""
company = models.ForeignKey(Company, related_name='contacts',
on_delete=models.CASCADE)
name = models.CharField(max_length=100)
phone = models.CharField(max_length=100, blank=True)
email = models.EmailField(blank=True)
role = models.CharField(max_length=100, blank=True)
company = models.ForeignKey(Company, related_name='contacts',
on_delete=models.CASCADE)
class ManufacturerPart(models.Model):
""" Represents a unique part as provided by a Manufacturer
Each ManufacturerPart is identified by a MPN (Manufacturer Part Number)
Each ManufacturerPart is also linked to a Part object.
A Part may be available from multiple manufacturers
Attributes:
part: Link to the master Part
manufacturer: Company that manufactures the ManufacturerPart
MPN: Manufacture part number
link: Link to external website for this manufacturer part
description: Descriptive notes field
"""
@staticmethod
def get_api_url():
return reverse('api-manufacturer-part-list')
class Meta:
unique_together = ('part', 'manufacturer', 'MPN')
part = models.ForeignKey('part.Part', on_delete=models.CASCADE,
related_name='manufacturer_parts',
verbose_name=_('Base Part'),
limit_choices_to={
'purchaseable': True,
},
help_text=_('Select part'),
)
manufacturer = models.ForeignKey(
Company,
on_delete=models.CASCADE,
null=True,
related_name='manufactured_parts',
limit_choices_to={
'is_manufacturer': True
},
verbose_name=_('Manufacturer'),
help_text=_('Select manufacturer'),
)
MPN = models.CharField(
null=True,
max_length=100,
verbose_name=_('MPN'),
help_text=_('Manufacturer Part Number')
)
link = InvenTreeURLField(
blank=True, null=True,
verbose_name=_('Link'),
help_text=_('URL for external manufacturer part link')
)
description = models.CharField(
max_length=250, blank=True, null=True,
verbose_name=_('Description'),
help_text=_('Manufacturer part description')
)
@classmethod
def create(cls, part, manufacturer, mpn, description, link=None):
""" Check if ManufacturerPart instance does not already exist
then create it
"""
manufacturer_part = None
try:
manufacturer_part = ManufacturerPart.objects.get(part=part, manufacturer=manufacturer, MPN=mpn)
except ManufacturerPart.DoesNotExist:
pass
if not manufacturer_part:
manufacturer_part = ManufacturerPart(part=part, manufacturer=manufacturer, MPN=mpn, description=description, link=link)
manufacturer_part.save()
return manufacturer_part
def __str__(self):
s = ''
if self.manufacturer:
s += f'{self.manufacturer.name}'
s += ' | '
s += f'{self.MPN}'
return s
class ManufacturerPartParameter(models.Model):
"""
A ManufacturerPartParameter represents a key:value parameter for a MnaufacturerPart.
This is used to represent parmeters / properties for a particular manufacturer part.
Each parameter is a simple string (text) value.
"""
@staticmethod
def get_api_url():
return reverse('api-manufacturer-part-parameter-list')
class Meta:
unique_together = ('manufacturer_part', 'name')
manufacturer_part = models.ForeignKey(
ManufacturerPart,
on_delete=models.CASCADE,
related_name='parameters',
verbose_name=_('Manufacturer Part'),
)
name = models.CharField(
max_length=500,
blank=False,
verbose_name=_('Name'),
help_text=_('Parameter name')
)
value = models.CharField(
max_length=500,
blank=False,
verbose_name=_('Value'),
help_text=_('Parameter value')
)
units = models.CharField(
max_length=64,
blank=True, null=True,
verbose_name=_('Units'),
help_text=_('Parameter units')
)
class SupplierPartManager(models.Manager):
""" Define custom SupplierPart objects manager
The main purpose of this manager is to improve database hit as the
SupplierPart model involves A LOT of foreign keys lookups
"""
def get_queryset(self):
# Always prefetch related models
return super().get_queryset().prefetch_related(
'part',
'supplier',
'manufacturer_part__manufacturer',
)
class SupplierPart(models.Model):
""" Represents a unique part as provided by a Supplier
Each SupplierPart is identified by a SKU (Supplier Part Number)
Each SupplierPart is also linked to a Part or ManufacturerPart object.
A Part may be available from multiple suppliers
Attributes:
part: Link to the master Part (Obsolete)
source_item: The sourcing item linked to this SupplierPart instance
supplier: Company that supplies this SupplierPart object
SKU: Stock keeping unit (supplier part number)
link: Link to external website for this supplier part
description: Descriptive notes field
note: Longer form note field
base_cost: Base charge added to order independent of quantity e.g. "Reeling Fee"
multiple: Multiple that the part is provided in
lead_time: Supplier lead time
packaging: packaging that the part is supplied in, e.g. "Reel"
"""
objects = SupplierPartManager()
@staticmethod
def get_api_url():
return reverse('api-supplier-part-list')
def get_absolute_url(self):
return reverse('supplier-part-detail', kwargs={'pk': self.id})
def api_instance_filters(self):
return {
'manufacturer_part': {
'part': self.part.pk
}
}
class Meta:
unique_together = ('part', 'supplier', 'SKU')
# This model was moved from the 'Part' app
db_table = 'part_supplierpart'
def clean(self):
super().clean()
# Ensure that the linked manufacturer_part points to the same part!
if self.manufacturer_part and self.part:
if not self.manufacturer_part.part == self.part:
raise ValidationError({
'manufacturer_part': _("Linked manufacturer part must reference the same base part"),
})
def save(self, *args, **kwargs):
""" Overriding save method to connect an existing ManufacturerPart """
manufacturer_part = None
if all(key in kwargs for key in ('manufacturer', 'MPN')):
manufacturer_name = kwargs.pop('manufacturer')
MPN = kwargs.pop('MPN')
# Retrieve manufacturer part
try:
manufacturer_part = ManufacturerPart.objects.get(manufacturer__name=manufacturer_name, MPN=MPN)
except (ValueError, Company.DoesNotExist):
# ManufacturerPart does not exist
pass
if manufacturer_part:
if not self.manufacturer_part:
# Connect ManufacturerPart to SupplierPart
self.manufacturer_part = manufacturer_part
else:
raise ValidationError(f'SupplierPart {self.__str__} is already linked to {self.manufacturer_part}')
self.clean()
self.validate_unique()
super().save(*args, **kwargs)
part = models.ForeignKey('part.Part', on_delete=models.CASCADE,
related_name='supplier_parts',
verbose_name=_('Base Part'),
limit_choices_to={
'purchaseable': True,
},
help_text=_('Select part'),
)
supplier = models.ForeignKey(Company, on_delete=models.CASCADE,
related_name='supplied_parts',
limit_choices_to={'is_supplier': True},
verbose_name=_('Supplier'),
help_text=_('Select supplier'),
)
SKU = models.CharField(
max_length=100,
verbose_name=_('SKU'),
help_text=_('Supplier stock keeping unit')
)
manufacturer_part = models.ForeignKey(ManufacturerPart, on_delete=models.CASCADE,
blank=True, null=True,
related_name='supplier_parts',
verbose_name=_('Manufacturer Part'),
help_text=_('Select manufacturer part'),
)
link = InvenTreeURLField(
blank=True, null=True,
verbose_name=_('Link'),
help_text=_('URL for external supplier part link')
)
description = models.CharField(
max_length=250, blank=True, null=True,
verbose_name=_('Description'),
help_text=_('Supplier part description')
)
note = models.CharField(
max_length=100, blank=True, null=True,
verbose_name=_('Note'),
help_text=_('Notes')
)
base_cost = models.DecimalField(max_digits=10, decimal_places=3, default=0, validators=[MinValueValidator(0)], verbose_name=_('base cost'), help_text=_('Minimum charge (e.g. stocking fee)'))
packaging = models.CharField(max_length=50, blank=True, null=True, verbose_name=_('Packaging'), help_text=_('Part packaging'))
multiple = models.PositiveIntegerField(default=1, validators=[MinValueValidator(1)], verbose_name=_('multiple'), help_text=_('Order multiple'))
# TODO - Reimplement lead-time as a charfield with special validation (pattern matching).
# lead_time = models.DurationField(blank=True, null=True)
@property
def manufacturer_string(self):
""" Format a MPN string for this SupplierPart.
Concatenates manufacture name and part number.
"""
items = []
if self.manufacturer_part:
if self.manufacturer_part.manufacturer:
items.append(self.manufacturer_part.manufacturer.name)
if self.manufacturer_part.MPN:
items.append(self.manufacturer_part.MPN)
return ' | '.join(items)
@property
def has_price_breaks(self):
return self.price_breaks.count() > 0
@property
def price_breaks(self):
""" Return the associated price breaks in the correct order """
return self.pricebreaks.order_by('quantity').all()
@property
def unit_pricing(self):
return self.get_price(1)
def add_price_break(self, quantity, price):
"""
Create a new price break for this part
args:
quantity - Numerical quantity
price - Must be a Money object
"""
# Check if a price break at that quantity already exists...
if self.price_breaks.filter(quantity=quantity, part=self.pk).exists():
return
SupplierPriceBreak.objects.create(
part=self,
quantity=quantity,
price=price
)
get_price = common.models.get_price
def open_orders(self):
""" Return a database query for PO line items for this SupplierPart,
limited to purchase orders that are open / outstanding.
"""
return self.purchase_order_line_items.prefetch_related('order').filter(order__status__in=PurchaseOrderStatus.OPEN)
def on_order(self):
""" Return the total quantity of items currently on order.
Subtract partially received stock as appropriate
"""
totals = self.open_orders().aggregate(Sum('quantity'), Sum('received'))
# Quantity on order
q = totals.get('quantity__sum', 0)
# Quantity received
r = totals.get('received__sum', 0)
if q is None or r is None:
return 0
else:
return max(q - r, 0)
def purchase_orders(self):
""" Returns a list of purchase orders relating to this supplier part """
return [line.order for line in self.purchase_order_line_items.all().prefetch_related('order')]
@property
def pretty_name(self):
return str(self)
def __str__(self):
s = ''
if self.part.IPN:
s += f'{self.part.IPN}'
s += ' | '
s += f'{self.supplier.name} | {self.SKU}'
if self.manufacturer_string:
s = s + ' | ' + self.manufacturer_string
return s
class SupplierPriceBreak(common.models.PriceBreak):
""" Represents a quantity price break for a SupplierPart.
- Suppliers can offer discounts at larger quantities
- SupplierPart(s) may have zero-or-more associated SupplierPriceBreak(s)
Attributes:
part: Link to a SupplierPart object that this price break applies to
updated: Automatic DateTime field that shows last time the price break was updated
quantity: Quantity required for price break
cost: Cost at specified quantity
currency: Reference to the currency of this pricebreak (leave empty for base currency)
"""
@staticmethod
def get_api_url():
return reverse('api-part-supplier-price-list')
part = models.ForeignKey(SupplierPart, on_delete=models.CASCADE, related_name='pricebreaks', verbose_name=_('Part'),)
updated = models.DateTimeField(auto_now=True, null=True, verbose_name=_('last updated'))
class Meta:
unique_together = ("part", "quantity")
# This model was moved from the 'Part' app
db_table = 'part_supplierpricebreak'
def __str__(self):
return f'{self.part.SKU} - {self.price} @ {self.quantity}'
| 32.408078 | 194 | 0.628089 |
from __future__ import unicode_literals
import os
from django.utils.translation import ugettext_lazy as _
from django.core.validators import MinValueValidator
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models import Sum, Q, UniqueConstraint
from django.apps import apps
from django.urls import reverse
from moneyed import CURRENCIES
from markdownx.models import MarkdownxField
from stdimage.models import StdImageField
from InvenTree.helpers import getMediaUrl, getBlankImage, getBlankThumbnail
from InvenTree.fields import InvenTreeURLField
from InvenTree.status_codes import PurchaseOrderStatus
import InvenTree.validators
import common.models
import common.settings
from common.settings import currency_code_default
def rename_company_image(instance, filename):
base = 'company_images'
if filename.count('.') > 0:
ext = filename.split('.')[-1]
else:
ext = ''
fn = 'company_{pk}_img'.format(pk=instance.pk)
if ext:
fn += '.' + ext
return os.path.join(base, fn)
class Company(models.Model):
@staticmethod
def get_api_url():
return reverse('api-company-list')
class Meta:
ordering = ['name', ]
constraints = [
UniqueConstraint(fields=['name', 'email'], name='unique_name_email_pair')
]
verbose_name_plural = "Companies"
name = models.CharField(max_length=100, blank=False,
help_text=_('Company name'),
verbose_name=_('Company name'))
description = models.CharField(
max_length=500,
verbose_name=_('Company description'),
help_text=_('Description of the company'),
blank=True,
)
website = models.URLField(
blank=True,
verbose_name=_('Website'),
help_text=_('Company website URL')
)
address = models.CharField(max_length=200,
verbose_name=_('Address'),
blank=True, help_text=_('Company address'))
phone = models.CharField(max_length=50,
verbose_name=_('Phone number'),
blank=True, help_text=_('Contact phone number'))
email = models.EmailField(blank=True, null=True,
verbose_name=_('Email'), help_text=_('Contact email address'))
contact = models.CharField(max_length=100,
verbose_name=_('Contact'),
blank=True, help_text=_('Point of contact'))
link = InvenTreeURLField(blank=True, verbose_name=_('Link'), help_text=_('Link to external company information'))
image = StdImageField(
upload_to=rename_company_image,
null=True,
blank=True,
variations={'thumbnail': (128, 128)},
delete_orphans=True,
verbose_name=_('Image'),
)
notes = MarkdownxField(blank=True, verbose_name=_('Notes'))
is_customer = models.BooleanField(default=False, verbose_name=_('is customer'), help_text=_('Do you sell items to this company?'))
is_supplier = models.BooleanField(default=True, verbose_name=_('is supplier'), help_text=_('Do you purchase items from this company?'))
is_manufacturer = models.BooleanField(default=False, verbose_name=_('is manufacturer'), help_text=_('Does this company manufacture parts?'))
currency = models.CharField(
max_length=3,
verbose_name=_('Currency'),
blank=True,
default=currency_code_default,
help_text=_('Default currency used for this company'),
validators=[InvenTree.validators.validate_currency_code],
)
@property
def currency_code(self):
code = self.currency
if code not in CURRENCIES:
code = common.settings.currency_code_default()
return code
def __str__(self):
return "{n} - {d}".format(n=self.name, d=self.description)
def get_absolute_url(self):
return reverse('company-detail', kwargs={'pk': self.id})
def get_image_url(self):
if self.image:
return getMediaUrl(self.image.url)
else:
return getBlankImage()
def get_thumbnail_url(self):
if self.image:
return getMediaUrl(self.image.thumbnail.url)
else:
return getBlankThumbnail()
@property
def manufactured_part_count(self):
return self.manufactured_parts.count()
@property
def has_manufactured_parts(self):
return self.manufactured_part_count > 0
@property
def supplied_part_count(self):
return self.supplied_parts.count()
@property
def has_supplied_parts(self):
return self.supplied_part_count > 0
@property
def parts(self):
return SupplierPart.objects.filter(Q(supplier=self.id) | Q(manufacturer_part__manufacturer=self.id))
@property
def part_count(self):
return self.parts.count()
@property
def has_parts(self):
return self.part_count > 0
@property
def stock_items(self):
stock = apps.get_model('stock', 'StockItem')
return stock.objects.filter(Q(supplier_part__supplier=self.id) | Q(supplier_part__manufacturer_part__manufacturer=self.id)).all()
@property
def stock_count(self):
return self.stock_items.count()
def outstanding_purchase_orders(self):
return self.purchase_orders.filter(status__in=PurchaseOrderStatus.OPEN)
def pending_purchase_orders(self):
return self.purchase_orders.filter(status=PurchaseOrderStatus.PENDING)
def closed_purchase_orders(self):
return self.purchase_orders.exclude(status__in=PurchaseOrderStatus.OPEN)
def complete_purchase_orders(self):
return self.purchase_orders.filter(status=PurchaseOrderStatus.COMPLETE)
def failed_purchase_orders(self):
return self.purchase_orders.filter(status__in=PurchaseOrderStatus.FAILED)
class Contact(models.Model):
company = models.ForeignKey(Company, related_name='contacts',
on_delete=models.CASCADE)
name = models.CharField(max_length=100)
phone = models.CharField(max_length=100, blank=True)
email = models.EmailField(blank=True)
role = models.CharField(max_length=100, blank=True)
company = models.ForeignKey(Company, related_name='contacts',
on_delete=models.CASCADE)
class ManufacturerPart(models.Model):
@staticmethod
def get_api_url():
return reverse('api-manufacturer-part-list')
class Meta:
unique_together = ('part', 'manufacturer', 'MPN')
part = models.ForeignKey('part.Part', on_delete=models.CASCADE,
related_name='manufacturer_parts',
verbose_name=_('Base Part'),
limit_choices_to={
'purchaseable': True,
},
help_text=_('Select part'),
)
manufacturer = models.ForeignKey(
Company,
on_delete=models.CASCADE,
null=True,
related_name='manufactured_parts',
limit_choices_to={
'is_manufacturer': True
},
verbose_name=_('Manufacturer'),
help_text=_('Select manufacturer'),
)
MPN = models.CharField(
null=True,
max_length=100,
verbose_name=_('MPN'),
help_text=_('Manufacturer Part Number')
)
link = InvenTreeURLField(
blank=True, null=True,
verbose_name=_('Link'),
help_text=_('URL for external manufacturer part link')
)
description = models.CharField(
max_length=250, blank=True, null=True,
verbose_name=_('Description'),
help_text=_('Manufacturer part description')
)
@classmethod
def create(cls, part, manufacturer, mpn, description, link=None):
manufacturer_part = None
try:
manufacturer_part = ManufacturerPart.objects.get(part=part, manufacturer=manufacturer, MPN=mpn)
except ManufacturerPart.DoesNotExist:
pass
if not manufacturer_part:
manufacturer_part = ManufacturerPart(part=part, manufacturer=manufacturer, MPN=mpn, description=description, link=link)
manufacturer_part.save()
return manufacturer_part
def __str__(self):
s = ''
if self.manufacturer:
s += f'{self.manufacturer.name}'
s += ' | '
s += f'{self.MPN}'
return s
class ManufacturerPartParameter(models.Model):
@staticmethod
def get_api_url():
return reverse('api-manufacturer-part-parameter-list')
class Meta:
unique_together = ('manufacturer_part', 'name')
manufacturer_part = models.ForeignKey(
ManufacturerPart,
on_delete=models.CASCADE,
related_name='parameters',
verbose_name=_('Manufacturer Part'),
)
name = models.CharField(
max_length=500,
blank=False,
verbose_name=_('Name'),
help_text=_('Parameter name')
)
value = models.CharField(
max_length=500,
blank=False,
verbose_name=_('Value'),
help_text=_('Parameter value')
)
units = models.CharField(
max_length=64,
blank=True, null=True,
verbose_name=_('Units'),
help_text=_('Parameter units')
)
class SupplierPartManager(models.Manager):
def get_queryset(self):
return super().get_queryset().prefetch_related(
'part',
'supplier',
'manufacturer_part__manufacturer',
)
class SupplierPart(models.Model):
objects = SupplierPartManager()
@staticmethod
def get_api_url():
return reverse('api-supplier-part-list')
def get_absolute_url(self):
return reverse('supplier-part-detail', kwargs={'pk': self.id})
def api_instance_filters(self):
return {
'manufacturer_part': {
'part': self.part.pk
}
}
class Meta:
unique_together = ('part', 'supplier', 'SKU')
db_table = 'part_supplierpart'
def clean(self):
super().clean()
if self.manufacturer_part and self.part:
if not self.manufacturer_part.part == self.part:
raise ValidationError({
'manufacturer_part': _("Linked manufacturer part must reference the same base part"),
})
def save(self, *args, **kwargs):
manufacturer_part = None
if all(key in kwargs for key in ('manufacturer', 'MPN')):
manufacturer_name = kwargs.pop('manufacturer')
MPN = kwargs.pop('MPN')
try:
manufacturer_part = ManufacturerPart.objects.get(manufacturer__name=manufacturer_name, MPN=MPN)
except (ValueError, Company.DoesNotExist):
pass
if manufacturer_part:
if not self.manufacturer_part:
self.manufacturer_part = manufacturer_part
else:
raise ValidationError(f'SupplierPart {self.__str__} is already linked to {self.manufacturer_part}')
self.clean()
self.validate_unique()
super().save(*args, **kwargs)
part = models.ForeignKey('part.Part', on_delete=models.CASCADE,
related_name='supplier_parts',
verbose_name=_('Base Part'),
limit_choices_to={
'purchaseable': True,
},
help_text=_('Select part'),
)
supplier = models.ForeignKey(Company, on_delete=models.CASCADE,
related_name='supplied_parts',
limit_choices_to={'is_supplier': True},
verbose_name=_('Supplier'),
help_text=_('Select supplier'),
)
SKU = models.CharField(
max_length=100,
verbose_name=_('SKU'),
help_text=_('Supplier stock keeping unit')
)
manufacturer_part = models.ForeignKey(ManufacturerPart, on_delete=models.CASCADE,
blank=True, null=True,
related_name='supplier_parts',
verbose_name=_('Manufacturer Part'),
help_text=_('Select manufacturer part'),
)
link = InvenTreeURLField(
blank=True, null=True,
verbose_name=_('Link'),
help_text=_('URL for external supplier part link')
)
description = models.CharField(
max_length=250, blank=True, null=True,
verbose_name=_('Description'),
help_text=_('Supplier part description')
)
note = models.CharField(
max_length=100, blank=True, null=True,
verbose_name=_('Note'),
help_text=_('Notes')
)
base_cost = models.DecimalField(max_digits=10, decimal_places=3, default=0, validators=[MinValueValidator(0)], verbose_name=_('base cost'), help_text=_('Minimum charge (e.g. stocking fee)'))
packaging = models.CharField(max_length=50, blank=True, null=True, verbose_name=_('Packaging'), help_text=_('Part packaging'))
multiple = models.PositiveIntegerField(default=1, validators=[MinValueValidator(1)], verbose_name=_('multiple'), help_text=_('Order multiple'))
@property
def manufacturer_string(self):
items = []
if self.manufacturer_part:
if self.manufacturer_part.manufacturer:
items.append(self.manufacturer_part.manufacturer.name)
if self.manufacturer_part.MPN:
items.append(self.manufacturer_part.MPN)
return ' | '.join(items)
@property
def has_price_breaks(self):
return self.price_breaks.count() > 0
@property
def price_breaks(self):
return self.pricebreaks.order_by('quantity').all()
@property
def unit_pricing(self):
return self.get_price(1)
def add_price_break(self, quantity, price):
if self.price_breaks.filter(quantity=quantity, part=self.pk).exists():
return
SupplierPriceBreak.objects.create(
part=self,
quantity=quantity,
price=price
)
get_price = common.models.get_price
def open_orders(self):
return self.purchase_order_line_items.prefetch_related('order').filter(order__status__in=PurchaseOrderStatus.OPEN)
def on_order(self):
totals = self.open_orders().aggregate(Sum('quantity'), Sum('received'))
q = totals.get('quantity__sum', 0)
r = totals.get('received__sum', 0)
if q is None or r is None:
return 0
else:
return max(q - r, 0)
def purchase_orders(self):
return [line.order for line in self.purchase_order_line_items.all().prefetch_related('order')]
@property
def pretty_name(self):
return str(self)
def __str__(self):
s = ''
if self.part.IPN:
s += f'{self.part.IPN}'
s += ' | '
s += f'{self.supplier.name} | {self.SKU}'
if self.manufacturer_string:
s = s + ' | ' + self.manufacturer_string
return s
class SupplierPriceBreak(common.models.PriceBreak):
@staticmethod
def get_api_url():
return reverse('api-part-supplier-price-list')
part = models.ForeignKey(SupplierPart, on_delete=models.CASCADE, related_name='pricebreaks', verbose_name=_('Part'),)
updated = models.DateTimeField(auto_now=True, null=True, verbose_name=_('last updated'))
class Meta:
unique_together = ("part", "quantity")
db_table = 'part_supplierpricebreak'
def __str__(self):
return f'{self.part.SKU} - {self.price} @ {self.quantity}'
| true | true |
f7266b0a144bf41133781daf4df6f25ffb28d3a9 | 463 | py | Python | data/scripts/templates/object/tangible/furniture/jedi/shared_frn_all_table_dark_01.py | obi-two/GameServer | 7d37024e2291a97d49522610cd8f1dbe5666afc2 | [
"MIT"
] | 20 | 2015-02-23T15:11:56.000Z | 2022-03-18T20:56:48.000Z | data/scripts/templates/object/tangible/furniture/jedi/shared_frn_all_table_dark_01.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | null | null | null | data/scripts/templates/object/tangible/furniture/jedi/shared_frn_all_table_dark_01.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | 20 | 2015-04-04T16:35:59.000Z | 2022-03-24T14:54:37.000Z | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/furniture/jedi/shared_frn_all_table_dark_01.iff"
result.attribute_template_id = 6
result.stfName("frn_n","frn_all_jedi_table")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | 27.235294 | 84 | 0.736501 | true | true | |
f7266b31a38ebc7a5721d50d8c4e8bcd245cd5d2 | 292 | py | Python | tests/urls.py | pkeeper/pdf-crawler-test | 7f8b9ca135fae7301f7902ada3669cf82726c4e0 | [
"MIT"
] | null | null | null | tests/urls.py | pkeeper/pdf-crawler-test | 7f8b9ca135fae7301f7902ada3669cf82726c4e0 | [
"MIT"
] | null | null | null | tests/urls.py | pkeeper/pdf-crawler-test | 7f8b9ca135fae7301f7902ada3669cf82726c4e0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from django.conf.urls import url, include
from pdf_crawler_test.urls import urlpatterns as pdf_crawler_test_urls
urlpatterns = [
url(r'^', include(pdf_crawler_test_urls, namespace='pdf_crawler_test')),
]
| 26.545455 | 76 | 0.777397 |
from __future__ import unicode_literals, absolute_import
from django.conf.urls import url, include
from pdf_crawler_test.urls import urlpatterns as pdf_crawler_test_urls
urlpatterns = [
url(r'^', include(pdf_crawler_test_urls, namespace='pdf_crawler_test')),
]
| true | true |
f7266dca016862413492651e6ea69b794172bb4d | 1,926 | py | Python | src/ksl.py | adnan007d/scraping-jobs-for-alex | 393c2f703d939cdf5944faa59863336070b611f3 | [
"MIT"
] | null | null | null | src/ksl.py | adnan007d/scraping-jobs-for-alex | 393c2f703d939cdf5944faa59863336070b611f3 | [
"MIT"
] | null | null | null | src/ksl.py | adnan007d/scraping-jobs-for-alex | 393c2f703d939cdf5944faa59863336070b611f3 | [
"MIT"
] | null | null | null | import time
import json
import requests
import urllib3
from random import randint
from bs4 import BeautifulSoup
from threading import Thread
urllib3.disable_warnings()
BASE_URL = "https://jobs.ksl.com/search/posted/last-7-days"
HEADERS = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36"
}
JOBS = {}
def getJobDescriptions(url, headers):
data = requests.get(url=url, headers=headers, verify=False, timeout=20)
data.close()
soup = BeautifulSoup(data.text, "html.parser")
descriptionTag = soup.find_all(
"meta", {"property": "og:description"}, "html.parser"
)
description = descriptionTag[0]["content"]
JOBS[url]["description"] = description
def writeToFile():
global JOBS
with open("sample.json", "w") as outfile:
json.dump(JOBS, outfile)
def getJobListings(url, headers):
dataX = requests.get(url=url, headers=headers, verify=False, timeout=20)
soup = BeautifulSoup(dataX.text, "html.parser")
dataX.close()
script = soup.find_all('script', {'type': 'application/ld+json'})
content = script[0].contents[0]
jobsArray = json.loads(content)["itemListElement"]
threads = []
for job in jobsArray:
JOBS[job["url"]] = {
"name": job["title"],
"employer": job["hiringOrganization"]["name"],
"url": job["url"],
}
t = Thread(target=getJobDescriptions, args=(job["url"], headers))
threads.append(t)
for i in threads:
i.start()
# Making sure all the jobs description is fetched
for i in threads:
i.join()
print(f"Number of jobs Scraped {len(JOBS)}")
writeToFile()
next_page = soup.find("a", {"class": "next link"})
if next_page is not None:
getJobListings(next_page.get("href"), HEADERS)
getJobListings(BASE_URL, HEADERS)
| 26.027027 | 141 | 0.648494 | import time
import json
import requests
import urllib3
from random import randint
from bs4 import BeautifulSoup
from threading import Thread
urllib3.disable_warnings()
BASE_URL = "https://jobs.ksl.com/search/posted/last-7-days"
HEADERS = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36"
}
JOBS = {}
def getJobDescriptions(url, headers):
data = requests.get(url=url, headers=headers, verify=False, timeout=20)
data.close()
soup = BeautifulSoup(data.text, "html.parser")
descriptionTag = soup.find_all(
"meta", {"property": "og:description"}, "html.parser"
)
description = descriptionTag[0]["content"]
JOBS[url]["description"] = description
def writeToFile():
global JOBS
with open("sample.json", "w") as outfile:
json.dump(JOBS, outfile)
def getJobListings(url, headers):
dataX = requests.get(url=url, headers=headers, verify=False, timeout=20)
soup = BeautifulSoup(dataX.text, "html.parser")
dataX.close()
script = soup.find_all('script', {'type': 'application/ld+json'})
content = script[0].contents[0]
jobsArray = json.loads(content)["itemListElement"]
threads = []
for job in jobsArray:
JOBS[job["url"]] = {
"name": job["title"],
"employer": job["hiringOrganization"]["name"],
"url": job["url"],
}
t = Thread(target=getJobDescriptions, args=(job["url"], headers))
threads.append(t)
for i in threads:
i.start()
for i in threads:
i.join()
print(f"Number of jobs Scraped {len(JOBS)}")
writeToFile()
next_page = soup.find("a", {"class": "next link"})
if next_page is not None:
getJobListings(next_page.get("href"), HEADERS)
getJobListings(BASE_URL, HEADERS)
| true | true |
f7266f67638e9576f4b43d525e963cfc0fa2a7b5 | 536 | py | Python | sdk/deviceupdate/azure-mgmt-deviceupdate/azure/mgmt/deviceupdate/aio/__init__.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/deviceupdate/azure-mgmt-deviceupdate/azure/mgmt/deviceupdate/aio/__init__.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/deviceupdate/azure-mgmt-deviceupdate/azure/mgmt/deviceupdate/aio/__init__.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._device_update import DeviceUpdate
__all__ = ['DeviceUpdate']
| 48.727273 | 94 | 0.561567 |
from ._device_update import DeviceUpdate
__all__ = ['DeviceUpdate']
| true | true |
f726701582b7fe85721853e639a19adb9c7fe1fe | 7,883 | py | Python | test/functional/rpc_users.py | FcoinFup/litecoin | f60e79f2bf373dafd258264ae197cee44ab4a314 | [
"MIT"
] | null | null | null | test/functional/rpc_users.py | FcoinFup/litecoin | f60e79f2bf373dafd258264ae197cee44ab4a314 | [
"MIT"
] | null | null | null | test/functional/rpc_users.py | FcoinFup/litecoin | f60e79f2bf373dafd258264ae197cee44ab4a314 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multiple RPC users."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
get_datadir_path,
str_to_b64str,
)
import os
import http.client
import urllib.parse
import subprocess
from random import SystemRandom
import string
import configparser
import sys
class HTTPBasicsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
def setup_chain(self):
super().setup_chain()
#Append rpcauth to bitcoin.conf before initialization
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
rpcuser = "rpcuser=rpcuser💻"
rpcpassword = "rpcpassword=rpcpassword🔑"
self.user = ''.join(SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(10))
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
gen_rpcauth = config['environment']['RPCAUTH']
p = subprocess.Popen([sys.executable, gen_rpcauth, self.user], stdout=subprocess.PIPE, universal_newlines=True)
lines = p.stdout.read().splitlines()
rpcauth3 = lines[1]
self.password = lines[3]
with open(os.path.join(get_datadir_path(self.options.tmpdir, 0), "deliverycoin.conf"), 'a', encoding='utf8') as f:
f.write(rpcauth+"\n")
f.write(rpcauth2+"\n")
f.write(rpcauth3+"\n")
with open(os.path.join(get_datadir_path(self.options.tmpdir, 1), "deliverycoin.conf"), 'a', encoding='utf8') as f:
f.write(rpcuser+"\n")
f.write(rpcpassword+"\n")
def run_test(self):
##################################################
# Check correctness of the rpcauth config option #
##################################################
url = urllib.parse.urlparse(self.nodes[0].url)
#Old authpair
authpair = url.username + ':' + url.password
#New authpair generated via share/rpcauth tool
password = "cA773lm788buwYe4g4WT+05pKyNruVKjQ25x3n0DQcM="
#Second authpair with different username
password2 = "8/F3uMDw4KSEbw96U3CA1C4X05dkHDN2BPFjTgZW4KI="
authpairnew = "rt:"+password
self.log.info('Correct...')
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Use new authpair to confirm both work
self.log.info('Correct...')
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong login name with rt's password
self.log.info('Wrong...')
authpairnew = "rtwrong:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Wrong password for rt
self.log.info('Wrong...')
authpairnew = "rt:"+password+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Correct for rt2
self.log.info('Correct...')
authpairnew = "rt2:"+password2
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong password for rt2
self.log.info('Wrong...')
authpairnew = "rt2:"+password2+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Correct for randomly generated user
self.log.info('Correct...')
authpairnew = self.user+":"+self.password
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong password for randomly generated user
self.log.info('Wrong...')
authpairnew = self.user+":"+self.password+"Wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
###############################################################
# Check correctness of the rpcuser/rpcpassword config options #
###############################################################
url = urllib.parse.urlparse(self.nodes[1].url)
# rpcuser and rpcpassword authpair
self.log.info('Correct...')
rpcuserauthpair = "rpcuser💻:rpcpassword🔑"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong login name with rpcuser's password
rpcuserauthpair = "rpcuserwrong:rpcpassword"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Wrong password for rpcuser
self.log.info('Wrong...')
rpcuserauthpair = "rpcuser:rpcpasswordwrong"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
if __name__ == '__main__':
HTTPBasicsTest ().main ()
| 38.082126 | 129 | 0.614994 |
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
get_datadir_path,
str_to_b64str,
)
import os
import http.client
import urllib.parse
import subprocess
from random import SystemRandom
import string
import configparser
import sys
class HTTPBasicsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
def setup_chain(self):
super().setup_chain()
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
rpcuser = "rpcuser=rpcuser💻"
rpcpassword = "rpcpassword=rpcpassword🔑"
self.user = ''.join(SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(10))
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
gen_rpcauth = config['environment']['RPCAUTH']
p = subprocess.Popen([sys.executable, gen_rpcauth, self.user], stdout=subprocess.PIPE, universal_newlines=True)
lines = p.stdout.read().splitlines()
rpcauth3 = lines[1]
self.password = lines[3]
with open(os.path.join(get_datadir_path(self.options.tmpdir, 0), "deliverycoin.conf"), 'a', encoding='utf8') as f:
f.write(rpcauth+"\n")
f.write(rpcauth2+"\n")
f.write(rpcauth3+"\n")
with open(os.path.join(get_datadir_path(self.options.tmpdir, 1), "deliverycoin.conf"), 'a', encoding='utf8') as f:
f.write(rpcuser+"\n")
f.write(rpcpassword+"\n")
def run_test(self):
authpairnew = "rt2:"+password2+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Correct for randomly generated user
self.log.info('Correct...')
authpairnew = self.user+":"+self.password
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong password for randomly generated user
self.log.info('Wrong...')
authpairnew = self.user+":"+self.password+"Wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
###############################################################
# Check correctness of the rpcuser/rpcpassword config options #
###############################################################
url = urllib.parse.urlparse(self.nodes[1].url)
# rpcuser and rpcpassword authpair
self.log.info('Correct...')
rpcuserauthpair = "rpcuser💻:rpcpassword🔑"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong login name with rpcuser's password
rpcuserauthpair = "rpcuserwrong:rpcpassword"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
self.log.info('Wrong...')
rpcuserauthpair = "rpcuser:rpcpasswordwrong"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
if __name__ == '__main__':
HTTPBasicsTest ().main ()
| true | true |
f7267055d2c097c704fe63ba2a7aa2a991fa61d0 | 6,510 | py | Python | touchdown/tests/test_aws_iam_server_certificate.py | yaybu/touchdown | 70ecda5191ce2d095bc074dcb23bfa1584464814 | [
"Apache-2.0"
] | 14 | 2015-01-05T18:18:04.000Z | 2022-02-07T19:35:12.000Z | touchdown/tests/test_aws_iam_server_certificate.py | yaybu/touchdown | 70ecda5191ce2d095bc074dcb23bfa1584464814 | [
"Apache-2.0"
] | 106 | 2015-01-06T00:17:13.000Z | 2019-09-07T00:35:32.000Z | touchdown/tests/test_aws_iam_server_certificate.py | yaybu/touchdown | 70ecda5191ce2d095bc074dcb23bfa1584464814 | [
"Apache-2.0"
] | 5 | 2015-01-30T10:18:24.000Z | 2022-02-07T19:35:13.000Z | # Copyright 2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from touchdown.core import errors
from touchdown.tests.aws import StubberTestCase
from touchdown.tests.stubs.aws import ServerCertificateStubber
class TestCreateServerCertificate(StubberTestCase):
def test_create_server_certificate(self):
goal = self.create_goal("apply")
with open(ServerCertificateStubber.cert_file) as cert_file, open(
ServerCertificateStubber.key_file
) as key_file, open(ServerCertificateStubber.chain_file) as chain_file:
server_certificate = self.fixtures.enter_context(
ServerCertificateStubber(
goal.get_service(
self.aws.add_server_certificate(
name="my-test-server-certificate",
certificate_body=cert_file.read(),
private_key=key_file.read(),
certificate_chain=chain_file.read(),
),
"apply",
)
)
)
# first list is to find things to delete
server_certificate.add_list_server_certificate_empty_response()
# second is to find if there is an existing matching cert
server_certificate.add_list_server_certificate_empty_response()
server_certificate.add_upload_server_certificate()
# CreateAction needs to look up cert again as create response has no info
server_certificate.add_list_server_certificate_one_response()
server_certificate.add_get_server_certificate()
# refresh resource metadata
server_certificate.add_list_server_certificate_one_response()
server_certificate.add_get_server_certificate()
# sanity check / PostCreation
server_certificate.add_list_server_certificate_one_response()
server_certificate.add_get_server_certificate()
goal.execute()
def test_create_server_certificate_idempotent(self):
goal = self.create_goal("apply")
with open(ServerCertificateStubber.cert_file) as cert_file, open(
ServerCertificateStubber.key_file
) as key_file:
server_certificate = self.fixtures.enter_context(
ServerCertificateStubber(
goal.get_service(
self.aws.add_server_certificate(
name="my-test-server-certificate",
certificate_body=cert_file.read(),
private_key=key_file.read(),
),
"apply",
)
)
)
server_certificate.add_list_server_certificate_one_response()
server_certificate.add_get_server_certificate()
server_certificate.add_list_server_certificate_one_response()
server_certificate.add_get_server_certificate()
self.assertEqual(len(list(goal.plan())), 0)
self.assertEqual(len(goal.get_changes(server_certificate.resource)), 0)
def test_create_server_certificate_wrong_chain(self):
with open(ServerCertificateStubber.cert_file) as cert_file, open(
ServerCertificateStubber.key_file
) as key_file, open(ServerCertificateStubber.chain_file) as chain_file:
with self.assertRaises(errors.Error) as cm:
self.aws.add_server_certificate(
name="my-test-server-certificate",
certificate_body=chain_file.read(), # to trigger error
private_key=key_file.read(),
certificate_chain=cert_file.read(), # to trigger error
)
self.assertIn("Certificate does not match private_key", str(cm.exception))
def test_create_server_certificate_bad_chain(self):
with open(ServerCertificateStubber.cert_file) as cert_file, open(
ServerCertificateStubber.key_file
) as key_file, open(ServerCertificateStubber.bad_chain_file) as bad_chain_file:
with self.assertRaises(errors.Error) as cm:
self.aws.add_server_certificate(
name="my-test-server-certificate",
certificate_body=cert_file.read(), # to trigger error
private_key=key_file.read(),
certificate_chain=bad_chain_file.read(), # to trigger error
)
self.assertIn("Invalid chain for", str(cm.exception))
class TestDestroyServerCertificate(StubberTestCase):
def test_destroy_server_certificate(self):
goal = self.create_goal("destroy")
server_certificate = self.fixtures.enter_context(
ServerCertificateStubber(
goal.get_service(
self.aws.add_server_certificate(name="my-test-server-certificate"),
"destroy",
)
)
)
server_certificate.add_list_server_certificate_one_response()
server_certificate.add_get_server_certificate()
server_certificate.add_list_server_certificate_one_response()
server_certificate.add_get_server_certificate()
server_certificate.add_delete_server_certificate()
goal.execute()
def test_destroy_server_certificate_idempotent(self):
goal = self.create_goal("destroy")
server_certificate = self.fixtures.enter_context(
ServerCertificateStubber(
goal.get_service(
self.aws.add_server_certificate(name="my-test-server-certificate"),
"destroy",
)
)
)
server_certificate.add_list_server_certificate_empty_response()
server_certificate.add_list_server_certificate_empty_response()
self.assertEqual(len(list(goal.plan())), 0)
self.assertEqual(len(goal.get_changes(server_certificate.resource)), 0)
| 43.986486 | 87 | 0.650691 |
from touchdown.core import errors
from touchdown.tests.aws import StubberTestCase
from touchdown.tests.stubs.aws import ServerCertificateStubber
class TestCreateServerCertificate(StubberTestCase):
def test_create_server_certificate(self):
goal = self.create_goal("apply")
with open(ServerCertificateStubber.cert_file) as cert_file, open(
ServerCertificateStubber.key_file
) as key_file, open(ServerCertificateStubber.chain_file) as chain_file:
server_certificate = self.fixtures.enter_context(
ServerCertificateStubber(
goal.get_service(
self.aws.add_server_certificate(
name="my-test-server-certificate",
certificate_body=cert_file.read(),
private_key=key_file.read(),
certificate_chain=chain_file.read(),
),
"apply",
)
)
)
server_certificate.add_list_server_certificate_empty_response()
server_certificate.add_list_server_certificate_empty_response()
server_certificate.add_upload_server_certificate()
server_certificate.add_list_server_certificate_one_response()
server_certificate.add_get_server_certificate()
server_certificate.add_list_server_certificate_one_response()
server_certificate.add_get_server_certificate()
server_certificate.add_list_server_certificate_one_response()
server_certificate.add_get_server_certificate()
goal.execute()
def test_create_server_certificate_idempotent(self):
goal = self.create_goal("apply")
with open(ServerCertificateStubber.cert_file) as cert_file, open(
ServerCertificateStubber.key_file
) as key_file:
server_certificate = self.fixtures.enter_context(
ServerCertificateStubber(
goal.get_service(
self.aws.add_server_certificate(
name="my-test-server-certificate",
certificate_body=cert_file.read(),
private_key=key_file.read(),
),
"apply",
)
)
)
server_certificate.add_list_server_certificate_one_response()
server_certificate.add_get_server_certificate()
server_certificate.add_list_server_certificate_one_response()
server_certificate.add_get_server_certificate()
self.assertEqual(len(list(goal.plan())), 0)
self.assertEqual(len(goal.get_changes(server_certificate.resource)), 0)
def test_create_server_certificate_wrong_chain(self):
with open(ServerCertificateStubber.cert_file) as cert_file, open(
ServerCertificateStubber.key_file
) as key_file, open(ServerCertificateStubber.chain_file) as chain_file:
with self.assertRaises(errors.Error) as cm:
self.aws.add_server_certificate(
name="my-test-server-certificate",
certificate_body=chain_file.read(),
private_key=key_file.read(),
certificate_chain=cert_file.read(),
)
self.assertIn("Certificate does not match private_key", str(cm.exception))
def test_create_server_certificate_bad_chain(self):
with open(ServerCertificateStubber.cert_file) as cert_file, open(
ServerCertificateStubber.key_file
) as key_file, open(ServerCertificateStubber.bad_chain_file) as bad_chain_file:
with self.assertRaises(errors.Error) as cm:
self.aws.add_server_certificate(
name="my-test-server-certificate",
certificate_body=cert_file.read(),
private_key=key_file.read(),
certificate_chain=bad_chain_file.read(),
)
self.assertIn("Invalid chain for", str(cm.exception))
class TestDestroyServerCertificate(StubberTestCase):
def test_destroy_server_certificate(self):
goal = self.create_goal("destroy")
server_certificate = self.fixtures.enter_context(
ServerCertificateStubber(
goal.get_service(
self.aws.add_server_certificate(name="my-test-server-certificate"),
"destroy",
)
)
)
server_certificate.add_list_server_certificate_one_response()
server_certificate.add_get_server_certificate()
server_certificate.add_list_server_certificate_one_response()
server_certificate.add_get_server_certificate()
server_certificate.add_delete_server_certificate()
goal.execute()
def test_destroy_server_certificate_idempotent(self):
goal = self.create_goal("destroy")
server_certificate = self.fixtures.enter_context(
ServerCertificateStubber(
goal.get_service(
self.aws.add_server_certificate(name="my-test-server-certificate"),
"destroy",
)
)
)
server_certificate.add_list_server_certificate_empty_response()
server_certificate.add_list_server_certificate_empty_response()
self.assertEqual(len(list(goal.plan())), 0)
self.assertEqual(len(goal.get_changes(server_certificate.resource)), 0)
| true | true |
f7267151612dbfc56f3defd079b3c1b033ab0459 | 2,573 | py | Python | task/models.py | Redhead95/taskful_api | 2b28a7f6ecaeb1c77d3fd3bea9ae5922667b1044 | [
"MIT"
] | null | null | null | task/models.py | Redhead95/taskful_api | 2b28a7f6ecaeb1c77d3fd3bea9ae5922667b1044 | [
"MIT"
] | null | null | null | task/models.py | Redhead95/taskful_api | 2b28a7f6ecaeb1c77d3fd3bea9ae5922667b1044 | [
"MIT"
] | null | null | null | import os
import uuid
from django.db import models
from django.utils.deconstruct import deconstructible
NOT_COMPLETE = 'NC'
COMPLETE = 'C'
TASK_STATUS_CHOICES = [
(NOT_COMPLETE, 'Not Complete'),
(COMPLETE, 'Complete'),
]
@deconstructible
class GenerateAttachmentFilePath(object):
def __init__(self):
pass
def __call__(self, instance, filename):
ext = filename.split('.')[-1]
path = f'task/{instance.task.id}/attachments/'
name = f'{instance.id}.{ext}'
return os.path.join(path, name)
attachment_file_path = GenerateAttachmentFilePath()
class TaskList(models.Model):
name = models.CharField(max_length=120)
description = models.TextField(blank=True, null=True)
status = models.CharField(max_length=2, choices=TASK_STATUS_CHOICES, default=NOT_COMPLETE)
house = models.ForeignKey('house.House', on_delete=models.CASCADE, related_name='lists')
created_by = models.ForeignKey('user.Profile', blank=True, null=True, on_delete=models.SET_NULL, related_name='lists')
completed_at = models.DateTimeField(blank=True, null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return f'{self.id} | {self.name}'
class Task(models.Model):
name = models.CharField(max_length=120)
description = models.TextField(blank=True, null=True)
status = models.CharField(max_length=2, choices=TASK_STATUS_CHOICES, default=NOT_COMPLETE)
task_list = models.ForeignKey('task.TaskList', default=1, on_delete=models.CASCADE, related_name='tasks')
created_by = models.ForeignKey('user.Profile', blank=True, null=True, on_delete=models.SET_NULL, related_name='created_tasks')
completed_by = models.ForeignKey('user.Profile', blank=True, null=True, on_delete=models.SET_NULL, related_name='completed_tasks')
completed_at = models.DateTimeField(blank=True, null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return f'{self.id} | {self.name}'
class Attachment(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
task = models.ForeignKey('task.Task', on_delete=models.CASCADE, related_name='attachments')
file_path = models.FileField(upload_to=attachment_file_path)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return f'{self.id} | {self.task}'
| 38.402985 | 134 | 0.733385 | import os
import uuid
from django.db import models
from django.utils.deconstruct import deconstructible
NOT_COMPLETE = 'NC'
COMPLETE = 'C'
TASK_STATUS_CHOICES = [
(NOT_COMPLETE, 'Not Complete'),
(COMPLETE, 'Complete'),
]
@deconstructible
class GenerateAttachmentFilePath(object):
def __init__(self):
pass
def __call__(self, instance, filename):
ext = filename.split('.')[-1]
path = f'task/{instance.task.id}/attachments/'
name = f'{instance.id}.{ext}'
return os.path.join(path, name)
attachment_file_path = GenerateAttachmentFilePath()
class TaskList(models.Model):
name = models.CharField(max_length=120)
description = models.TextField(blank=True, null=True)
status = models.CharField(max_length=2, choices=TASK_STATUS_CHOICES, default=NOT_COMPLETE)
house = models.ForeignKey('house.House', on_delete=models.CASCADE, related_name='lists')
created_by = models.ForeignKey('user.Profile', blank=True, null=True, on_delete=models.SET_NULL, related_name='lists')
completed_at = models.DateTimeField(blank=True, null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return f'{self.id} | {self.name}'
class Task(models.Model):
name = models.CharField(max_length=120)
description = models.TextField(blank=True, null=True)
status = models.CharField(max_length=2, choices=TASK_STATUS_CHOICES, default=NOT_COMPLETE)
task_list = models.ForeignKey('task.TaskList', default=1, on_delete=models.CASCADE, related_name='tasks')
created_by = models.ForeignKey('user.Profile', blank=True, null=True, on_delete=models.SET_NULL, related_name='created_tasks')
completed_by = models.ForeignKey('user.Profile', blank=True, null=True, on_delete=models.SET_NULL, related_name='completed_tasks')
completed_at = models.DateTimeField(blank=True, null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return f'{self.id} | {self.name}'
class Attachment(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
task = models.ForeignKey('task.Task', on_delete=models.CASCADE, related_name='attachments')
file_path = models.FileField(upload_to=attachment_file_path)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return f'{self.id} | {self.task}'
| true | true |
f72671733725c0c68f0d949630bf8d7a44e4778a | 14,483 | py | Python | shadowsocks/udprelay.py | whayne1103/shadowsocks | d5196497f7b0445d355c05dffa5c6a3e12b1d79b | [
"Apache-2.0"
] | 13 | 2019-09-22T06:34:18.000Z | 2021-07-17T06:19:39.000Z | shadowsocks/udprelay.py | whayne1103/shadowsocks | d5196497f7b0445d355c05dffa5c6a3e12b1d79b | [
"Apache-2.0"
] | 3 | 2020-02-10T03:23:08.000Z | 2020-06-06T10:02:36.000Z | shadowsocks/udprelay.py | whayne1103/shadowsocks | d5196497f7b0445d355c05dffa5c6a3e12b1d79b | [
"Apache-2.0"
] | 22 | 2015-08-23T00:44:46.000Z | 2020-03-16T08:33:13.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# SOCKS5 UDP Request
# +----+------+------+----------+----------+----------+
# |RSV | FRAG | ATYP | DST.ADDR | DST.PORT | DATA |
# +----+------+------+----------+----------+----------+
# | 2 | 1 | 1 | Variable | 2 | Variable |
# +----+------+------+----------+----------+----------+
# SOCKS5 UDP Response
# +----+------+------+----------+----------+----------+
# |RSV | FRAG | ATYP | DST.ADDR | DST.PORT | DATA |
# +----+------+------+----------+----------+----------+
# | 2 | 1 | 1 | Variable | 2 | Variable |
# +----+------+------+----------+----------+----------+
# shadowsocks UDP Request (before encrypted)
# +------+----------+----------+----------+
# | ATYP | DST.ADDR | DST.PORT | DATA |
# +------+----------+----------+----------+
# | 1 | Variable | 2 | Variable |
# +------+----------+----------+----------+
# shadowsocks UDP Response (before encrypted)
# +------+----------+----------+----------+
# | ATYP | DST.ADDR | DST.PORT | DATA |
# +------+----------+----------+----------+
# | 1 | Variable | 2 | Variable |
# +------+----------+----------+----------+
# shadowsocks UDP Request and Response (after encrypted)
# +-------+--------------+
# | IV | PAYLOAD |
# +-------+--------------+
# | Fixed | Variable |
# +-------+--------------+
# HOW TO NAME THINGS
# ------------------
# `dest` means destination server, which is from DST fields in the SOCKS5
# request
# `local` means local server of shadowsocks
# `remote` means remote server of shadowsocks
# `client` means UDP clients that connects to other servers
# `server` means the UDP server that handles user requests
from __future__ import absolute_import, division, print_function, \
with_statement
import socket
import logging
import struct
import errno
import random
from shadowsocks import cryptor, eventloop, lru_cache, common, shell
from shadowsocks.common import parse_header, pack_addr, onetimeauth_verify, \
onetimeauth_gen, ONETIMEAUTH_BYTES, ADDRTYPE_AUTH
BUF_SIZE = 65536
def client_key(source_addr, server_af):
# notice this is server af, not dest af
return '%s:%s:%d' % (source_addr[0], source_addr[1], server_af)
class UDPRelay(object):
def __init__(self, config, dns_resolver, is_local, stat_callback=None):
self._config = config
if is_local:
self._listen_addr = config['local_address']
self._listen_port = config['local_port']
self._remote_addr = config['server']
self._remote_port = config['server_port']
else:
self._listen_addr = config['server']
self._listen_port = config['server_port']
self._remote_addr = None
self._remote_port = None
self.tunnel_remote = config.get('tunnel_remote', "8.8.8.8")
self.tunnel_remote_port = config.get('tunnel_remote_port', 53)
self.tunnel_port = config.get('tunnel_port', 53)
self._is_tunnel = False
self._dns_resolver = dns_resolver
self._password = common.to_bytes(config['password'])
self._method = config['method']
self._timeout = config['timeout']
self._ota_enable = config.get('one_time_auth', False)
self._ota_enable_session = self._ota_enable
self._is_local = is_local
self._cache = lru_cache.LRUCache(timeout=config['timeout'],
close_callback=self._close_client)
self._client_fd_to_server_addr = \
lru_cache.LRUCache(timeout=config['timeout'])
self._dns_cache = lru_cache.LRUCache(timeout=300)
self._eventloop = None
self._closed = False
self._sockets = set()
self._forbidden_iplist = config.get('forbidden_ip')
self._crypto_path = config['crypto_path']
addrs = socket.getaddrinfo(self._listen_addr, self._listen_port, 0,
socket.SOCK_DGRAM, socket.SOL_UDP)
if len(addrs) == 0:
raise Exception("UDP can't get addrinfo for %s:%d" %
(self._listen_addr, self._listen_port))
af, socktype, proto, canonname, sa = addrs[0]
server_socket = socket.socket(af, socktype, proto)
server_socket.bind((self._listen_addr, self._listen_port))
server_socket.setblocking(False)
self._server_socket = server_socket
self._stat_callback = stat_callback
def _get_a_server(self):
server = self._config['server']
server_port = self._config['server_port']
if type(server_port) == list:
server_port = random.choice(server_port)
if type(server) == list:
server = random.choice(server)
logging.debug('chosen server: %s:%d', server, server_port)
return server, server_port
def _close_client(self, client):
if hasattr(client, 'close'):
self._sockets.remove(client.fileno())
self._eventloop.remove(client)
client.close()
else:
# just an address
pass
def _handle_server(self):
server = self._server_socket
data, r_addr = server.recvfrom(BUF_SIZE)
key = None
iv = None
if not data:
logging.debug('UDP handle_server: data is empty')
if self._stat_callback:
self._stat_callback(self._listen_port, len(data))
if self._is_local:
if self._is_tunnel:
# add ss header to data
tunnel_remote = self.tunnel_remote
tunnel_remote_port = self.tunnel_remote_port
data = common.add_header(tunnel_remote,
tunnel_remote_port, data)
else:
frag = common.ord(data[2])
if frag != 0:
logging.warn('UDP drop a message since frag is not 0')
return
else:
data = data[3:]
else:
# decrypt data
try:
data, key, iv = cryptor.decrypt_all(self._password,
self._method,
data, self._crypto_path)
except Exception:
logging.debug('UDP handle_server: decrypt data failed')
return
if not data:
logging.debug('UDP handle_server: data is empty after decrypt')
return
header_result = parse_header(data)
if header_result is None:
return
addrtype, dest_addr, dest_port, header_length = header_result
logging.info("udp data to %s:%d from %s:%d"
% (dest_addr, dest_port, r_addr[0], r_addr[1]))
if self._is_local:
server_addr, server_port = self._get_a_server()
else:
server_addr, server_port = dest_addr, dest_port
# spec https://shadowsocks.org/en/spec/one-time-auth.html
self._ota_enable_session = addrtype & ADDRTYPE_AUTH
if self._ota_enable and not self._ota_enable_session:
logging.warn('client one time auth is required')
return
if self._ota_enable_session:
if len(data) < header_length + ONETIMEAUTH_BYTES:
logging.warn('UDP one time auth header is too short')
return
_hash = data[-ONETIMEAUTH_BYTES:]
data = data[: -ONETIMEAUTH_BYTES]
_key = iv + key
if onetimeauth_verify(_hash, data, _key) is False:
logging.warn('UDP one time auth fail')
return
addrs = self._dns_cache.get(server_addr, None)
if addrs is None:
addrs = socket.getaddrinfo(server_addr, server_port, 0,
socket.SOCK_DGRAM, socket.SOL_UDP)
if not addrs:
# drop
return
else:
self._dns_cache[server_addr] = addrs
af, socktype, proto, canonname, sa = addrs[0]
key = client_key(r_addr, af)
client = self._cache.get(key, None)
if not client:
# TODO async getaddrinfo
if self._forbidden_iplist:
if common.to_str(sa[0]) in self._forbidden_iplist:
logging.debug('IP %s is in forbidden list, drop' %
common.to_str(sa[0]))
# drop
return
client = socket.socket(af, socktype, proto)
client.setblocking(False)
self._cache[key] = client
self._client_fd_to_server_addr[client.fileno()] = r_addr
self._sockets.add(client.fileno())
self._eventloop.add(client, eventloop.POLL_IN, self)
if self._is_local:
key, iv, m = cryptor.gen_key_iv(self._password, self._method)
# spec https://shadowsocks.org/en/spec/one-time-auth.html
if self._ota_enable_session:
data = self._ota_chunk_data_gen(key, iv, data)
try:
data = cryptor.encrypt_all_m(key, iv, m, self._method, data,
self._crypto_path)
except Exception:
logging.debug("UDP handle_server: encrypt data failed")
return
if not data:
return
else:
data = data[header_length:]
if not data:
return
try:
client.sendto(data, (server_addr, server_port))
except IOError as e:
err = eventloop.errno_from_exception(e)
if err in (errno.EINPROGRESS, errno.EAGAIN):
pass
else:
shell.print_exception(e)
def _handle_client(self, sock):
data, r_addr = sock.recvfrom(BUF_SIZE)
if not data:
logging.debug('UDP handle_client: data is empty')
return
if self._stat_callback:
self._stat_callback(self._listen_port, len(data))
if not self._is_local:
addrlen = len(r_addr[0])
if addrlen > 255:
# drop
return
data = pack_addr(r_addr[0]) + struct.pack('>H', r_addr[1]) + data
try:
response = cryptor.encrypt_all(self._password,
self._method, data,
self._crypto_path)
except Exception:
logging.debug("UDP handle_client: encrypt data failed")
return
if not response:
return
else:
try:
data, key, iv = cryptor.decrypt_all(self._password,
self._method, data,
self._crypto_path)
except Exception:
logging.debug('UDP handle_client: decrypt data failed')
return
if not data:
return
header_result = parse_header(data)
if header_result is None:
return
addrtype, dest_addr, dest_port, header_length = header_result
if self._is_tunnel:
# remove ss header
response = data[header_length:]
else:
response = b'\x00\x00\x00' + data
client_addr = self._client_fd_to_server_addr.get(sock.fileno())
if client_addr:
logging.debug("send udp response to %s:%d"
% (client_addr[0], client_addr[1]))
self._server_socket.sendto(response, client_addr)
else:
# this packet is from somewhere else we know
# simply drop that packet
pass
def _ota_chunk_data_gen(self, key, iv, data):
data = common.chr(common.ord(data[0]) | ADDRTYPE_AUTH) + data[1:]
key = iv + key
return data + onetimeauth_gen(data, key)
def add_to_loop(self, loop):
if self._eventloop:
raise Exception('already add to loop')
if self._closed:
raise Exception('already closed')
self._eventloop = loop
server_socket = self._server_socket
self._eventloop.add(server_socket,
eventloop.POLL_IN | eventloop.POLL_ERR, self)
loop.add_periodic(self.handle_periodic)
def handle_event(self, sock, fd, event):
if sock == self._server_socket:
if event & eventloop.POLL_ERR:
logging.error('UDP server_socket err')
self._handle_server()
elif sock and (fd in self._sockets):
if event & eventloop.POLL_ERR:
logging.error('UDP client_socket err')
self._handle_client(sock)
def handle_periodic(self):
if self._closed:
if self._server_socket:
self._server_socket.close()
self._server_socket = None
for sock in self._sockets:
sock.close()
logging.info('closed UDP port %d', self._listen_port)
self._cache.sweep()
self._client_fd_to_server_addr.sweep()
self._dns_cache.sweep()
def close(self, next_tick=False):
logging.debug('UDP close')
self._closed = True
if not next_tick:
if self._eventloop:
self._eventloop.remove_periodic(self.handle_periodic)
self._eventloop.remove(self._server_socket)
self._server_socket.close()
for client in list(self._cache.values()):
client.close()
| 39.571038 | 79 | 0.538908 |
from __future__ import absolute_import, division, print_function, \
with_statement
import socket
import logging
import struct
import errno
import random
from shadowsocks import cryptor, eventloop, lru_cache, common, shell
from shadowsocks.common import parse_header, pack_addr, onetimeauth_verify, \
onetimeauth_gen, ONETIMEAUTH_BYTES, ADDRTYPE_AUTH
BUF_SIZE = 65536
def client_key(source_addr, server_af):
return '%s:%s:%d' % (source_addr[0], source_addr[1], server_af)
class UDPRelay(object):
def __init__(self, config, dns_resolver, is_local, stat_callback=None):
self._config = config
if is_local:
self._listen_addr = config['local_address']
self._listen_port = config['local_port']
self._remote_addr = config['server']
self._remote_port = config['server_port']
else:
self._listen_addr = config['server']
self._listen_port = config['server_port']
self._remote_addr = None
self._remote_port = None
self.tunnel_remote = config.get('tunnel_remote', "8.8.8.8")
self.tunnel_remote_port = config.get('tunnel_remote_port', 53)
self.tunnel_port = config.get('tunnel_port', 53)
self._is_tunnel = False
self._dns_resolver = dns_resolver
self._password = common.to_bytes(config['password'])
self._method = config['method']
self._timeout = config['timeout']
self._ota_enable = config.get('one_time_auth', False)
self._ota_enable_session = self._ota_enable
self._is_local = is_local
self._cache = lru_cache.LRUCache(timeout=config['timeout'],
close_callback=self._close_client)
self._client_fd_to_server_addr = \
lru_cache.LRUCache(timeout=config['timeout'])
self._dns_cache = lru_cache.LRUCache(timeout=300)
self._eventloop = None
self._closed = False
self._sockets = set()
self._forbidden_iplist = config.get('forbidden_ip')
self._crypto_path = config['crypto_path']
addrs = socket.getaddrinfo(self._listen_addr, self._listen_port, 0,
socket.SOCK_DGRAM, socket.SOL_UDP)
if len(addrs) == 0:
raise Exception("UDP can't get addrinfo for %s:%d" %
(self._listen_addr, self._listen_port))
af, socktype, proto, canonname, sa = addrs[0]
server_socket = socket.socket(af, socktype, proto)
server_socket.bind((self._listen_addr, self._listen_port))
server_socket.setblocking(False)
self._server_socket = server_socket
self._stat_callback = stat_callback
def _get_a_server(self):
server = self._config['server']
server_port = self._config['server_port']
if type(server_port) == list:
server_port = random.choice(server_port)
if type(server) == list:
server = random.choice(server)
logging.debug('chosen server: %s:%d', server, server_port)
return server, server_port
def _close_client(self, client):
if hasattr(client, 'close'):
self._sockets.remove(client.fileno())
self._eventloop.remove(client)
client.close()
else:
# just an address
pass
def _handle_server(self):
server = self._server_socket
data, r_addr = server.recvfrom(BUF_SIZE)
key = None
iv = None
if not data:
logging.debug('UDP handle_server: data is empty')
if self._stat_callback:
self._stat_callback(self._listen_port, len(data))
if self._is_local:
if self._is_tunnel:
# add ss header to data
tunnel_remote = self.tunnel_remote
tunnel_remote_port = self.tunnel_remote_port
data = common.add_header(tunnel_remote,
tunnel_remote_port, data)
else:
frag = common.ord(data[2])
if frag != 0:
logging.warn('UDP drop a message since frag is not 0')
return
else:
data = data[3:]
else:
# decrypt data
try:
data, key, iv = cryptor.decrypt_all(self._password,
self._method,
data, self._crypto_path)
except Exception:
logging.debug('UDP handle_server: decrypt data failed')
return
if not data:
logging.debug('UDP handle_server: data is empty after decrypt')
return
header_result = parse_header(data)
if header_result is None:
return
addrtype, dest_addr, dest_port, header_length = header_result
logging.info("udp data to %s:%d from %s:%d"
% (dest_addr, dest_port, r_addr[0], r_addr[1]))
if self._is_local:
server_addr, server_port = self._get_a_server()
else:
server_addr, server_port = dest_addr, dest_port
# spec https://shadowsocks.org/en/spec/one-time-auth.html
self._ota_enable_session = addrtype & ADDRTYPE_AUTH
if self._ota_enable and not self._ota_enable_session:
logging.warn('client one time auth is required')
return
if self._ota_enable_session:
if len(data) < header_length + ONETIMEAUTH_BYTES:
logging.warn('UDP one time auth header is too short')
return
_hash = data[-ONETIMEAUTH_BYTES:]
data = data[: -ONETIMEAUTH_BYTES]
_key = iv + key
if onetimeauth_verify(_hash, data, _key) is False:
logging.warn('UDP one time auth fail')
return
addrs = self._dns_cache.get(server_addr, None)
if addrs is None:
addrs = socket.getaddrinfo(server_addr, server_port, 0,
socket.SOCK_DGRAM, socket.SOL_UDP)
if not addrs:
# drop
return
else:
self._dns_cache[server_addr] = addrs
af, socktype, proto, canonname, sa = addrs[0]
key = client_key(r_addr, af)
client = self._cache.get(key, None)
if not client:
# TODO async getaddrinfo
if self._forbidden_iplist:
if common.to_str(sa[0]) in self._forbidden_iplist:
logging.debug('IP %s is in forbidden list, drop' %
common.to_str(sa[0]))
# drop
return
client = socket.socket(af, socktype, proto)
client.setblocking(False)
self._cache[key] = client
self._client_fd_to_server_addr[client.fileno()] = r_addr
self._sockets.add(client.fileno())
self._eventloop.add(client, eventloop.POLL_IN, self)
if self._is_local:
key, iv, m = cryptor.gen_key_iv(self._password, self._method)
# spec https://shadowsocks.org/en/spec/one-time-auth.html
if self._ota_enable_session:
data = self._ota_chunk_data_gen(key, iv, data)
try:
data = cryptor.encrypt_all_m(key, iv, m, self._method, data,
self._crypto_path)
except Exception:
logging.debug("UDP handle_server: encrypt data failed")
return
if not data:
return
else:
data = data[header_length:]
if not data:
return
try:
client.sendto(data, (server_addr, server_port))
except IOError as e:
err = eventloop.errno_from_exception(e)
if err in (errno.EINPROGRESS, errno.EAGAIN):
pass
else:
shell.print_exception(e)
def _handle_client(self, sock):
data, r_addr = sock.recvfrom(BUF_SIZE)
if not data:
logging.debug('UDP handle_client: data is empty')
return
if self._stat_callback:
self._stat_callback(self._listen_port, len(data))
if not self._is_local:
addrlen = len(r_addr[0])
if addrlen > 255:
# drop
return
data = pack_addr(r_addr[0]) + struct.pack('>H', r_addr[1]) + data
try:
response = cryptor.encrypt_all(self._password,
self._method, data,
self._crypto_path)
except Exception:
logging.debug("UDP handle_client: encrypt data failed")
return
if not response:
return
else:
try:
data, key, iv = cryptor.decrypt_all(self._password,
self._method, data,
self._crypto_path)
except Exception:
logging.debug('UDP handle_client: decrypt data failed')
return
if not data:
return
header_result = parse_header(data)
if header_result is None:
return
addrtype, dest_addr, dest_port, header_length = header_result
if self._is_tunnel:
# remove ss header
response = data[header_length:]
else:
response = b'\x00\x00\x00' + data
client_addr = self._client_fd_to_server_addr.get(sock.fileno())
if client_addr:
logging.debug("send udp response to %s:%d"
% (client_addr[0], client_addr[1]))
self._server_socket.sendto(response, client_addr)
else:
# this packet is from somewhere else we know
# simply drop that packet
pass
def _ota_chunk_data_gen(self, key, iv, data):
data = common.chr(common.ord(data[0]) | ADDRTYPE_AUTH) + data[1:]
key = iv + key
return data + onetimeauth_gen(data, key)
def add_to_loop(self, loop):
if self._eventloop:
raise Exception('already add to loop')
if self._closed:
raise Exception('already closed')
self._eventloop = loop
server_socket = self._server_socket
self._eventloop.add(server_socket,
eventloop.POLL_IN | eventloop.POLL_ERR, self)
loop.add_periodic(self.handle_periodic)
def handle_event(self, sock, fd, event):
if sock == self._server_socket:
if event & eventloop.POLL_ERR:
logging.error('UDP server_socket err')
self._handle_server()
elif sock and (fd in self._sockets):
if event & eventloop.POLL_ERR:
logging.error('UDP client_socket err')
self._handle_client(sock)
def handle_periodic(self):
if self._closed:
if self._server_socket:
self._server_socket.close()
self._server_socket = None
for sock in self._sockets:
sock.close()
logging.info('closed UDP port %d', self._listen_port)
self._cache.sweep()
self._client_fd_to_server_addr.sweep()
self._dns_cache.sweep()
def close(self, next_tick=False):
logging.debug('UDP close')
self._closed = True
if not next_tick:
if self._eventloop:
self._eventloop.remove_periodic(self.handle_periodic)
self._eventloop.remove(self._server_socket)
self._server_socket.close()
for client in list(self._cache.values()):
client.close()
| true | true |
f72671937db542e223720da043ede8cedbc9e9b9 | 15,617 | py | Python | frigate/events.py | gpete/frigate | e6594c6d98fc18e05df29e66924039cd53a67dd8 | [
"MIT"
] | null | null | null | frigate/events.py | gpete/frigate | e6594c6d98fc18e05df29e66924039cd53a67dd8 | [
"MIT"
] | null | null | null | frigate/events.py | gpete/frigate | e6594c6d98fc18e05df29e66924039cd53a67dd8 | [
"MIT"
] | null | null | null | import datetime
import json
import logging
import os
import queue
import subprocess as sp
import threading
import time
from collections import defaultdict
from pathlib import Path
import psutil
import shutil
from frigate.config import FrigateConfig
from frigate.const import RECORD_DIR, CLIPS_DIR, CACHE_DIR
from frigate.models import Event
from peewee import fn
logger = logging.getLogger(__name__)
class EventProcessor(threading.Thread):
def __init__(
self, config, camera_processes, event_queue, event_processed_queue, stop_event
):
threading.Thread.__init__(self)
self.name = "event_processor"
self.config = config
self.camera_processes = camera_processes
self.cached_clips = {}
self.event_queue = event_queue
self.event_processed_queue = event_processed_queue
self.events_in_process = {}
self.stop_event = stop_event
def should_create_clip(self, camera, event_data):
if event_data["false_positive"]:
return False
# if there are required zones and there is no overlap
required_zones = self.config.cameras[camera].clips.required_zones
if len(required_zones) > 0 and not set(event_data["entered_zones"]) & set(
required_zones
):
logger.debug(
f"Not creating clip for {event_data['id']} because it did not enter required zones"
)
return False
return True
def refresh_cache(self):
cached_files = os.listdir(CACHE_DIR)
files_in_use = []
for process in psutil.process_iter():
try:
if process.name() != "ffmpeg":
continue
flist = process.open_files()
if flist:
for nt in flist:
if nt.path.startswith(CACHE_DIR):
files_in_use.append(nt.path.split("/")[-1])
except:
continue
for f in cached_files:
if f in files_in_use or f in self.cached_clips:
continue
basename = os.path.splitext(f)[0]
camera, date = basename.rsplit("-", maxsplit=1)
start_time = datetime.datetime.strptime(date, "%Y%m%d%H%M%S")
ffprobe_cmd = [
"ffprobe",
"-v",
"error",
"-show_entries",
"format=duration",
"-of",
"default=noprint_wrappers=1:nokey=1",
f"{os.path.join(CACHE_DIR, f)}",
]
p = sp.run(ffprobe_cmd, capture_output=True)
if p.returncode == 0:
duration = float(p.stdout.decode().strip())
else:
logger.info(f"bad file: {f}")
os.remove(os.path.join(CACHE_DIR, f))
continue
self.cached_clips[f] = {
"path": f,
"camera": camera,
"start_time": start_time.timestamp(),
"duration": duration,
}
if len(self.events_in_process) > 0:
earliest_event = min(
self.events_in_process.values(), key=lambda x: x["start_time"]
)["start_time"]
else:
earliest_event = datetime.datetime.now().timestamp()
# if the earliest event is more tha max seconds ago, cap it
max_seconds = self.config.clips.max_seconds
earliest_event = max(
earliest_event,
datetime.datetime.now().timestamp() - self.config.clips.max_seconds,
)
for f, data in list(self.cached_clips.items()):
if earliest_event - 90 > data["start_time"] + data["duration"]:
del self.cached_clips[f]
logger.debug(f"Cleaning up cached file {f}")
os.remove(os.path.join(CACHE_DIR, f))
# if we are still using more than 90% of the cache, proactively cleanup
cache_usage = shutil.disk_usage("/tmp/cache")
if (
cache_usage.used / cache_usage.total > 0.9
and cache_usage.free < 200000000
and len(self.cached_clips) > 0
):
logger.warning("More than 90% of the cache is used.")
logger.warning(
"Consider increasing space available at /tmp/cache or reducing max_seconds in your clips config."
)
logger.warning("Proactively cleaning up the cache...")
while cache_usage.used / cache_usage.total > 0.9:
oldest_clip = min(
self.cached_clips.values(), key=lambda x: x["start_time"]
)
del self.cached_clips[oldest_clip["path"]]
os.remove(os.path.join(CACHE_DIR, oldest_clip["path"]))
cache_usage = shutil.disk_usage("/tmp/cache")
def create_clip(self, camera, event_data, pre_capture, post_capture):
# get all clips from the camera with the event sorted
sorted_clips = sorted(
[c for c in self.cached_clips.values() if c["camera"] == camera],
key=lambda i: i["start_time"],
)
# if there are no clips in the cache or we are still waiting on a needed file check every 5 seconds
wait_count = 0
while (
len(sorted_clips) == 0
or sorted_clips[-1]["start_time"] + sorted_clips[-1]["duration"]
< event_data["end_time"] + post_capture
):
if wait_count > 4:
logger.warning(
f"Unable to create clip for {camera} and event {event_data['id']}. There were no cache files for this event."
)
return False
logger.debug(f"No cache clips for {camera}. Waiting...")
time.sleep(5)
self.refresh_cache()
# get all clips from the camera with the event sorted
sorted_clips = sorted(
[c for c in self.cached_clips.values() if c["camera"] == camera],
key=lambda i: i["start_time"],
)
wait_count += 1
playlist_start = event_data["start_time"] - pre_capture
playlist_end = event_data["end_time"] + post_capture
playlist_lines = []
for clip in sorted_clips:
# clip ends before playlist start time, skip
if clip["start_time"] + clip["duration"] < playlist_start:
continue
# clip starts after playlist ends, finish
if clip["start_time"] > playlist_end:
break
playlist_lines.append(f"file '{os.path.join(CACHE_DIR,clip['path'])}'")
# if this is the starting clip, add an inpoint
if clip["start_time"] < playlist_start:
playlist_lines.append(
f"inpoint {int(playlist_start-clip['start_time'])}"
)
# if this is the ending clip, add an outpoint
if clip["start_time"] + clip["duration"] > playlist_end:
playlist_lines.append(
f"outpoint {int(playlist_end-clip['start_time'])}"
)
clip_name = f"{camera}-{event_data['id']}"
ffmpeg_cmd = [
"ffmpeg",
"-y",
"-protocol_whitelist",
"pipe,file",
"-f",
"concat",
"-safe",
"0",
"-i",
"-",
"-c",
"copy",
"-movflags",
"+faststart",
f"{os.path.join(CLIPS_DIR, clip_name)}.mp4",
]
p = sp.run(
ffmpeg_cmd,
input="\n".join(playlist_lines),
encoding="ascii",
capture_output=True,
)
if p.returncode != 0:
logger.error(p.stderr)
return False
return True
def run(self):
while not self.stop_event.is_set():
try:
event_type, camera, event_data = self.event_queue.get(timeout=10)
except queue.Empty:
if not self.stop_event.is_set():
self.refresh_cache()
continue
logger.debug(f"Event received: {event_type} {camera} {event_data['id']}")
self.refresh_cache()
if event_type == "start":
self.events_in_process[event_data["id"]] = event_data
if event_type == "end":
clips_config = self.config.cameras[camera].clips
clip_created = False
if self.should_create_clip(camera, event_data):
if clips_config.enabled and (
clips_config.objects is None
or event_data["label"] in clips_config.objects
):
clip_created = self.create_clip(
camera,
event_data,
clips_config.pre_capture,
clips_config.post_capture,
)
if clip_created or event_data["has_snapshot"]:
Event.create(
id=event_data["id"],
label=event_data["label"],
camera=camera,
start_time=event_data["start_time"],
end_time=event_data["end_time"],
top_score=event_data["top_score"],
false_positive=event_data["false_positive"],
zones=list(event_data["entered_zones"]),
thumbnail=event_data["thumbnail"],
has_clip=clip_created,
has_snapshot=event_data["has_snapshot"],
)
del self.events_in_process[event_data["id"]]
self.event_processed_queue.put((event_data["id"], camera))
logger.info(f"Exiting event processor...")
class EventCleanup(threading.Thread):
def __init__(self, config: FrigateConfig, stop_event):
threading.Thread.__init__(self)
self.name = "event_cleanup"
self.config = config
self.stop_event = stop_event
self.camera_keys = list(self.config.cameras.keys())
def expire(self, media_type):
## Expire events from unlisted cameras based on the global config
if media_type == 'clips':
retain_config = self.config.clips.retain
file_extension = "mp4"
update_params = {"has_clip": False}
else:
retain_config = self.config.snapshots.retain
file_extension = "jpg"
update_params = {"has_snapshot": False}
distinct_labels = (
Event.select(Event.label)
.where(Event.camera.not_in(self.camera_keys))
.distinct()
)
# loop over object types in db
for l in distinct_labels:
# get expiration time for this label
expire_days = retain_config.objects.get(l.label, retain_config.default)
expire_after = (
datetime.datetime.now() - datetime.timedelta(days=expire_days)
).timestamp()
# grab all events after specific time
expired_events = Event.select().where(
Event.camera.not_in(self.camera_keys),
Event.start_time < expire_after,
Event.label == l.label,
)
# delete the media from disk
for event in expired_events:
media_name = f"{event.camera}-{event.id}"
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}")
media_path.unlink(missing_ok=True)
# update the clips attribute for the db entry
update_query = Event.update(update_params).where(
Event.camera.not_in(self.camera_keys),
Event.start_time < expire_after,
Event.label == l.label,
)
update_query.execute()
## Expire events from cameras based on the camera config
for name, camera in self.config.cameras.items():
if media_type == 'clips':
retain_config = camera.clips.retain
else:
retain_config = camera.snapshots.retain
# get distinct objects in database for this camera
distinct_labels = (
Event.select(Event.label).where(Event.camera == name).distinct()
)
# loop over object types in db
for l in distinct_labels:
# get expiration time for this label
expire_days = retain_config.objects.get(l.label, retain_config.default)
expire_after = (
datetime.datetime.now() - datetime.timedelta(days=expire_days)
).timestamp()
# grab all events after specific time
expired_events = Event.select().where(
Event.camera == name,
Event.start_time < expire_after,
Event.label == l.label,
)
# delete the grabbed clips from disk
for event in expired_events:
media_name = f"{event.camera}-{event.id}"
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}")
media_path.unlink(missing_ok=True)
# update the clips attribute for the db entry
update_query = Event.update(update_params).where(
Event.camera == name,
Event.start_time < expire_after,
Event.label == l.label,
)
update_query.execute()
def purge_duplicates(self):
duplicate_query = """with grouped_events as (
select id,
label,
camera,
has_snapshot,
has_clip,
row_number() over (
partition by label, camera, round(start_time/5,0)*5
order by end_time-start_time desc
) as copy_number
from event
)
select distinct id, camera, has_snapshot, has_clip from grouped_events
where copy_number > 1;"""
duplicate_events = Event.raw(duplicate_query)
for event in duplicate_events:
logger.debug(f"Removing duplicate: {event.id}")
media_name = f"{event.camera}-{event.id}"
if event.has_snapshot:
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.jpg")
media_path.unlink(missing_ok=True)
if event.has_clip:
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.mp4")
media_path.unlink(missing_ok=True)
(
Event.delete()
.where(Event.id << [event.id for event in duplicate_events])
.execute()
)
def run(self):
# only expire events every 5 minutes
while not self.stop_event.wait(300):
self.expire("clips")
self.expire("snapshots")
self.purge_duplicates()
# drop events from db where has_clip and has_snapshot are false
delete_query = Event.delete().where(
Event.has_clip == False, Event.has_snapshot == False
)
delete_query.execute()
logger.info(f"Exiting event cleanup...")
| 37.90534 | 129 | 0.539284 | import datetime
import json
import logging
import os
import queue
import subprocess as sp
import threading
import time
from collections import defaultdict
from pathlib import Path
import psutil
import shutil
from frigate.config import FrigateConfig
from frigate.const import RECORD_DIR, CLIPS_DIR, CACHE_DIR
from frigate.models import Event
from peewee import fn
logger = logging.getLogger(__name__)
class EventProcessor(threading.Thread):
def __init__(
self, config, camera_processes, event_queue, event_processed_queue, stop_event
):
threading.Thread.__init__(self)
self.name = "event_processor"
self.config = config
self.camera_processes = camera_processes
self.cached_clips = {}
self.event_queue = event_queue
self.event_processed_queue = event_processed_queue
self.events_in_process = {}
self.stop_event = stop_event
def should_create_clip(self, camera, event_data):
if event_data["false_positive"]:
return False
required_zones = self.config.cameras[camera].clips.required_zones
if len(required_zones) > 0 and not set(event_data["entered_zones"]) & set(
required_zones
):
logger.debug(
f"Not creating clip for {event_data['id']} because it did not enter required zones"
)
return False
return True
def refresh_cache(self):
cached_files = os.listdir(CACHE_DIR)
files_in_use = []
for process in psutil.process_iter():
try:
if process.name() != "ffmpeg":
continue
flist = process.open_files()
if flist:
for nt in flist:
if nt.path.startswith(CACHE_DIR):
files_in_use.append(nt.path.split("/")[-1])
except:
continue
for f in cached_files:
if f in files_in_use or f in self.cached_clips:
continue
basename = os.path.splitext(f)[0]
camera, date = basename.rsplit("-", maxsplit=1)
start_time = datetime.datetime.strptime(date, "%Y%m%d%H%M%S")
ffprobe_cmd = [
"ffprobe",
"-v",
"error",
"-show_entries",
"format=duration",
"-of",
"default=noprint_wrappers=1:nokey=1",
f"{os.path.join(CACHE_DIR, f)}",
]
p = sp.run(ffprobe_cmd, capture_output=True)
if p.returncode == 0:
duration = float(p.stdout.decode().strip())
else:
logger.info(f"bad file: {f}")
os.remove(os.path.join(CACHE_DIR, f))
continue
self.cached_clips[f] = {
"path": f,
"camera": camera,
"start_time": start_time.timestamp(),
"duration": duration,
}
if len(self.events_in_process) > 0:
earliest_event = min(
self.events_in_process.values(), key=lambda x: x["start_time"]
)["start_time"]
else:
earliest_event = datetime.datetime.now().timestamp()
max_seconds = self.config.clips.max_seconds
earliest_event = max(
earliest_event,
datetime.datetime.now().timestamp() - self.config.clips.max_seconds,
)
for f, data in list(self.cached_clips.items()):
if earliest_event - 90 > data["start_time"] + data["duration"]:
del self.cached_clips[f]
logger.debug(f"Cleaning up cached file {f}")
os.remove(os.path.join(CACHE_DIR, f))
cache_usage = shutil.disk_usage("/tmp/cache")
if (
cache_usage.used / cache_usage.total > 0.9
and cache_usage.free < 200000000
and len(self.cached_clips) > 0
):
logger.warning("More than 90% of the cache is used.")
logger.warning(
"Consider increasing space available at /tmp/cache or reducing max_seconds in your clips config."
)
logger.warning("Proactively cleaning up the cache...")
while cache_usage.used / cache_usage.total > 0.9:
oldest_clip = min(
self.cached_clips.values(), key=lambda x: x["start_time"]
)
del self.cached_clips[oldest_clip["path"]]
os.remove(os.path.join(CACHE_DIR, oldest_clip["path"]))
cache_usage = shutil.disk_usage("/tmp/cache")
def create_clip(self, camera, event_data, pre_capture, post_capture):
sorted_clips = sorted(
[c for c in self.cached_clips.values() if c["camera"] == camera],
key=lambda i: i["start_time"],
)
wait_count = 0
while (
len(sorted_clips) == 0
or sorted_clips[-1]["start_time"] + sorted_clips[-1]["duration"]
< event_data["end_time"] + post_capture
):
if wait_count > 4:
logger.warning(
f"Unable to create clip for {camera} and event {event_data['id']}. There were no cache files for this event."
)
return False
logger.debug(f"No cache clips for {camera}. Waiting...")
time.sleep(5)
self.refresh_cache()
sorted_clips = sorted(
[c for c in self.cached_clips.values() if c["camera"] == camera],
key=lambda i: i["start_time"],
)
wait_count += 1
playlist_start = event_data["start_time"] - pre_capture
playlist_end = event_data["end_time"] + post_capture
playlist_lines = []
for clip in sorted_clips:
if clip["start_time"] + clip["duration"] < playlist_start:
continue
if clip["start_time"] > playlist_end:
break
playlist_lines.append(f"file '{os.path.join(CACHE_DIR,clip['path'])}'")
if clip["start_time"] < playlist_start:
playlist_lines.append(
f"inpoint {int(playlist_start-clip['start_time'])}"
)
if clip["start_time"] + clip["duration"] > playlist_end:
playlist_lines.append(
f"outpoint {int(playlist_end-clip['start_time'])}"
)
clip_name = f"{camera}-{event_data['id']}"
ffmpeg_cmd = [
"ffmpeg",
"-y",
"-protocol_whitelist",
"pipe,file",
"-f",
"concat",
"-safe",
"0",
"-i",
"-",
"-c",
"copy",
"-movflags",
"+faststart",
f"{os.path.join(CLIPS_DIR, clip_name)}.mp4",
]
p = sp.run(
ffmpeg_cmd,
input="\n".join(playlist_lines),
encoding="ascii",
capture_output=True,
)
if p.returncode != 0:
logger.error(p.stderr)
return False
return True
def run(self):
while not self.stop_event.is_set():
try:
event_type, camera, event_data = self.event_queue.get(timeout=10)
except queue.Empty:
if not self.stop_event.is_set():
self.refresh_cache()
continue
logger.debug(f"Event received: {event_type} {camera} {event_data['id']}")
self.refresh_cache()
if event_type == "start":
self.events_in_process[event_data["id"]] = event_data
if event_type == "end":
clips_config = self.config.cameras[camera].clips
clip_created = False
if self.should_create_clip(camera, event_data):
if clips_config.enabled and (
clips_config.objects is None
or event_data["label"] in clips_config.objects
):
clip_created = self.create_clip(
camera,
event_data,
clips_config.pre_capture,
clips_config.post_capture,
)
if clip_created or event_data["has_snapshot"]:
Event.create(
id=event_data["id"],
label=event_data["label"],
camera=camera,
start_time=event_data["start_time"],
end_time=event_data["end_time"],
top_score=event_data["top_score"],
false_positive=event_data["false_positive"],
zones=list(event_data["entered_zones"]),
thumbnail=event_data["thumbnail"],
has_clip=clip_created,
has_snapshot=event_data["has_snapshot"],
)
del self.events_in_process[event_data["id"]]
self.event_processed_queue.put((event_data["id"], camera))
logger.info(f"Exiting event processor...")
class EventCleanup(threading.Thread):
def __init__(self, config: FrigateConfig, stop_event):
threading.Thread.__init__(self)
self.name = "event_cleanup"
self.config = config
self.stop_event = stop_event
self.camera_keys = list(self.config.cameras.keys())
def expire(self, media_type):
elf.config.clips.retain
file_extension = "mp4"
update_params = {"has_clip": False}
else:
retain_config = self.config.snapshots.retain
file_extension = "jpg"
update_params = {"has_snapshot": False}
distinct_labels = (
Event.select(Event.label)
.where(Event.camera.not_in(self.camera_keys))
.distinct()
)
for l in distinct_labels:
expire_days = retain_config.objects.get(l.label, retain_config.default)
expire_after = (
datetime.datetime.now() - datetime.timedelta(days=expire_days)
).timestamp()
expired_events = Event.select().where(
Event.camera.not_in(self.camera_keys),
Event.start_time < expire_after,
Event.label == l.label,
)
for event in expired_events:
media_name = f"{event.camera}-{event.id}"
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}")
media_path.unlink(missing_ok=True)
update_query = Event.update(update_params).where(
Event.camera.not_in(self.camera_keys),
Event.start_time < expire_after,
Event.label == l.label,
)
update_query.execute()
):
if media_type == 'clips':
retain_config = camera.clips.retain
else:
retain_config = camera.snapshots.retain
distinct_labels = (
Event.select(Event.label).where(Event.camera == name).distinct()
)
for l in distinct_labels:
expire_days = retain_config.objects.get(l.label, retain_config.default)
expire_after = (
datetime.datetime.now() - datetime.timedelta(days=expire_days)
).timestamp()
expired_events = Event.select().where(
Event.camera == name,
Event.start_time < expire_after,
Event.label == l.label,
)
for event in expired_events:
media_name = f"{event.camera}-{event.id}"
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}")
media_path.unlink(missing_ok=True)
update_query = Event.update(update_params).where(
Event.camera == name,
Event.start_time < expire_after,
Event.label == l.label,
)
update_query.execute()
def purge_duplicates(self):
duplicate_query = """with grouped_events as (
select id,
label,
camera,
has_snapshot,
has_clip,
row_number() over (
partition by label, camera, round(start_time/5,0)*5
order by end_time-start_time desc
) as copy_number
from event
)
select distinct id, camera, has_snapshot, has_clip from grouped_events
where copy_number > 1;"""
duplicate_events = Event.raw(duplicate_query)
for event in duplicate_events:
logger.debug(f"Removing duplicate: {event.id}")
media_name = f"{event.camera}-{event.id}"
if event.has_snapshot:
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.jpg")
media_path.unlink(missing_ok=True)
if event.has_clip:
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.mp4")
media_path.unlink(missing_ok=True)
(
Event.delete()
.where(Event.id << [event.id for event in duplicate_events])
.execute()
)
def run(self):
while not self.stop_event.wait(300):
self.expire("clips")
self.expire("snapshots")
self.purge_duplicates()
delete_query = Event.delete().where(
Event.has_clip == False, Event.has_snapshot == False
)
delete_query.execute()
logger.info(f"Exiting event cleanup...")
| true | true |
f726743c2af55562de0a399b95ee66d6a3d5ea4c | 900 | py | Python | WebSpider/threads.py | bianQ/similarweb | 3df31af1267a285d0bc6adf720409ceb43eb56cb | [
"MIT"
] | null | null | null | WebSpider/threads.py | bianQ/similarweb | 3df31af1267a285d0bc6adf720409ceb43eb56cb | [
"MIT"
] | null | null | null | WebSpider/threads.py | bianQ/similarweb | 3df31af1267a285d0bc6adf720409ceb43eb56cb | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
@author: Alan
@time: 2021/05/18
"""
from concurrent.futures import ThreadPoolExecutor, wait, ALL_COMPLETED
import traceback
class MultiThread(ThreadPoolExecutor):
def __init__(self, max_workers=None, thread_name_prefix=''):
super().__init__(max_workers, thread_name_prefix)
def thread_log(self, worker):
"""捕获线程异常,并保存日志"""
try:
result = worker.result()
return result
except:
traceback.print_exc()
def execute(self, fn, *args, **kwargs):
"""生成新线程,并捕捉异常"""
thread = self.submit(fn, *args, **kwargs)
thread.add_done_callback(self.thread_log)
return thread
@staticmethod
def execute_after_done(fn, workers, *args, **kwargs):
wait(workers, timeout=86400, return_when=ALL_COMPLETED)
return fn(*args, **kwargs)
| 26.470588 | 70 | 0.628889 |
from concurrent.futures import ThreadPoolExecutor, wait, ALL_COMPLETED
import traceback
class MultiThread(ThreadPoolExecutor):
def __init__(self, max_workers=None, thread_name_prefix=''):
super().__init__(max_workers, thread_name_prefix)
def thread_log(self, worker):
try:
result = worker.result()
return result
except:
traceback.print_exc()
def execute(self, fn, *args, **kwargs):
thread = self.submit(fn, *args, **kwargs)
thread.add_done_callback(self.thread_log)
return thread
@staticmethod
def execute_after_done(fn, workers, *args, **kwargs):
wait(workers, timeout=86400, return_when=ALL_COMPLETED)
return fn(*args, **kwargs)
| true | true |
f7267621fdc306ffafe3ecda31e41278dccdb8f2 | 2,478 | py | Python | tests/test_metadata_get.py | iris-edu-int/datacite | 4a3aa5b9bb156cee616848cc7c8d929ad76fa3cc | [
"BSD-3-Clause"
] | null | null | null | tests/test_metadata_get.py | iris-edu-int/datacite | 4a3aa5b9bb156cee616848cc7c8d929ad76fa3cc | [
"BSD-3-Clause"
] | null | null | null | tests/test_metadata_get.py | iris-edu-int/datacite | 4a3aa5b9bb156cee616848cc7c8d929ad76fa3cc | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# This file is part of DataCite.
#
# Copyright (C) 2015, 2016 CERN.
#
# DataCite is free software; you can redistribute it and/or modify it
# under the terms of the Revised BSD License; see LICENSE file for
# more details.
"""Tests for /metadata GET."""
from __future__ import absolute_import, print_function
import pytest
import responses
from helpers import APIURL, get_client
from datacite.errors import DataCiteForbiddenError, DataCiteGoneError, \
DataCiteNotFoundError, DataCiteServerError, DataCiteUnauthorizedError
@responses.activate
def test_metadata_get_200():
"""Test."""
doc = "<resource></resource>"
responses.add(
responses.GET,
"{0}metadata/10.1234/1".format(APIURL),
body=doc,
status=200,
content_type="application/xml",
)
d = get_client()
assert doc == d.metadata_get("10.1234/1")
@responses.activate
def test_metadata_get_401():
"""Test."""
responses.add(
responses.GET,
"{0}metadata/10.1234/1".format(APIURL),
body="Unauthorized",
status=401,
)
d = get_client()
with pytest.raises(DataCiteUnauthorizedError):
d.metadata_get("10.1234/1")
@responses.activate
def test_metadata_get_403():
"""Test."""
responses.add(
responses.GET,
"{0}metadata/10.1234/1".format(APIURL),
body="Forbidden",
status=403,
)
d = get_client()
with pytest.raises(DataCiteForbiddenError):
d.metadata_get("10.1234/1")
@responses.activate
def test_metadata_get_404():
"""Test."""
responses.add(
responses.GET,
"{0}metadata/10.1234/1".format(APIURL),
body="Not Found",
status=404,
)
d = get_client()
with pytest.raises(DataCiteNotFoundError):
d.metadata_get("10.1234/1")
@responses.activate
def test_metadata_get_410():
"""Test."""
responses.add(
responses.GET,
"{0}metadata/10.1234/1".format(APIURL),
body="Gone",
status=410,
)
d = get_client()
with pytest.raises(DataCiteGoneError):
d.metadata_get("10.1234/1")
@responses.activate
def test_metadata_get_500():
"""Test."""
responses.add(
responses.GET,
"{0}metadata/10.1234/1".format(APIURL),
body="Internal Server Error",
status=500,
)
d = get_client()
with pytest.raises(DataCiteServerError):
d.metadata_get("10.1234/1")
| 22.125 | 73 | 0.633172 |
from __future__ import absolute_import, print_function
import pytest
import responses
from helpers import APIURL, get_client
from datacite.errors import DataCiteForbiddenError, DataCiteGoneError, \
DataCiteNotFoundError, DataCiteServerError, DataCiteUnauthorizedError
@responses.activate
def test_metadata_get_200():
doc = "<resource></resource>"
responses.add(
responses.GET,
"{0}metadata/10.1234/1".format(APIURL),
body=doc,
status=200,
content_type="application/xml",
)
d = get_client()
assert doc == d.metadata_get("10.1234/1")
@responses.activate
def test_metadata_get_401():
responses.add(
responses.GET,
"{0}metadata/10.1234/1".format(APIURL),
body="Unauthorized",
status=401,
)
d = get_client()
with pytest.raises(DataCiteUnauthorizedError):
d.metadata_get("10.1234/1")
@responses.activate
def test_metadata_get_403():
responses.add(
responses.GET,
"{0}metadata/10.1234/1".format(APIURL),
body="Forbidden",
status=403,
)
d = get_client()
with pytest.raises(DataCiteForbiddenError):
d.metadata_get("10.1234/1")
@responses.activate
def test_metadata_get_404():
responses.add(
responses.GET,
"{0}metadata/10.1234/1".format(APIURL),
body="Not Found",
status=404,
)
d = get_client()
with pytest.raises(DataCiteNotFoundError):
d.metadata_get("10.1234/1")
@responses.activate
def test_metadata_get_410():
responses.add(
responses.GET,
"{0}metadata/10.1234/1".format(APIURL),
body="Gone",
status=410,
)
d = get_client()
with pytest.raises(DataCiteGoneError):
d.metadata_get("10.1234/1")
@responses.activate
def test_metadata_get_500():
responses.add(
responses.GET,
"{0}metadata/10.1234/1".format(APIURL),
body="Internal Server Error",
status=500,
)
d = get_client()
with pytest.raises(DataCiteServerError):
d.metadata_get("10.1234/1")
| true | true |
f72676b4fc76edae6f6177c02163528b28ab531e | 7,484 | py | Python | app/states/load.py | jchrisfarris/antiope-scorecards | 82a1e228f4bd23f756c1dec8c0582fcde98de564 | [
"Apache-2.0"
] | 1 | 2020-09-23T21:40:16.000Z | 2020-09-23T21:40:16.000Z | app/states/load.py | jchrisfarris/antiope-scorecards | 82a1e228f4bd23f756c1dec8c0582fcde98de564 | [
"Apache-2.0"
] | null | null | null | app/states/load.py | jchrisfarris/antiope-scorecards | 82a1e228f4bd23f756c1dec8c0582fcde98de564 | [
"Apache-2.0"
] | 3 | 2020-07-11T19:18:12.000Z | 2021-08-14T17:43:06.000Z | import json
import os
import boto3
import yaml
from lib.dynamodb import accounts_table, requirements_table, user_table, config_table
from lib.lambda_decorator.decorator import states_decorator
client_s3 = boto3.client('s3')
user_bucket = os.getenv('USER_BUCKET')
account_bucket = os.getenv('ACCOUNT_BUCKET')
requirements_bucket = os.getenv('REQUIREMENTS_BUCKET')
@states_decorator
def load_handler(event, context):
"""
Imports users, accounts, and requirements files.
Returns assorted information regarding the scan
including account ids, accounts to scan with
cloudsploit, payer account ids, cloudsploit settings,
user emails, s3 import requirements, etc
Expected input event format
{}
"""
accounts = load_accounts()
load_user()
requirements = load_requirements()
return {
'accountIds': list({a['accountId'] for a in accounts}),
'payerIds': list({a.get('payer_id') for a in accounts if a.get('payer_id')}),
's3RequirementIds': list({r_id for r_id, r in requirements['requirements'].items() if r.get('source') == 's3Import'}),
'cloudsploitSettingsMap': requirements['cloudsploitSettingsMap']
}
def load_accounts():
"""Syncs accounts in accounts table with those present in the S3 bucket"""
account_ids_to_delete = []
accounts_to_add = []
s3_response = client_s3.get_object(Bucket=account_bucket, Key=os.getenv('ACCOUNT_FILE_PATH'))
account_list_from_s3 = json.loads(s3_response['Body'].read())['accounts']
for account in account_list_from_s3:
accounts_table.normalize_account_record(account)
accounts_from_s3 = {account['accountId']: account for account in account_list_from_s3}
ddb_data = accounts_table.scan_all()
accounts_from_ddb = {account['accountId']: account for account in ddb_data}
for account_id in accounts_from_ddb:
if account_id in accounts_from_s3:
if accounts_from_ddb[account_id] != accounts_from_s3[account_id]:
accounts_to_add.append(accounts_from_s3[account_id])
else:
account_ids_to_delete.append(account_id)
for account_id in accounts_from_s3:
if account_id not in accounts_from_ddb:
accounts_to_add.append(accounts_from_s3[account_id])
with accounts_table.batch_writer() as batch:
for account_id in account_ids_to_delete:
batch.delete_item(Key={'accountId': account_id})
for account in accounts_to_add:
batch.put_item(Item=account)
return account_list_from_s3
def load_user():
"""Syncs users in user's table with those present in S3 bucket,
ensures admin permissions are retained"""
user_emails_to_delete = []
users_to_add = []
s3_response = client_s3.get_object(Bucket=user_bucket, Key=os.getenv('USER_FILE_PATH'))
user_list_from_s3 = json.loads(s3_response['Body'].read())
users_from_s3 = {}
for user in user_list_from_s3:
user['email'] = user['email'].lower()
users_from_s3[user['email']] = user
ddb_data = user_table.scan_all()
users_from_ddb = {user['email']: user for user in ddb_data}
for user_email, existing_user in users_from_ddb.items():
if user_email in users_from_s3:
if existing_user != users_from_s3[user_email]:
if existing_user.get('isAdmin', False):
# update incoming user
users_to_add.append(dict(
users_from_s3[user_email],
**{
'isAdmin': existing_user.get('isAdmin'),
}))
else:
users_to_add.append(users_from_s3[user_email])
else:
if existing_user.get('isAdmin', False):
users_to_add.append({
'email': existing_user.get('email'),
'isAdmin': existing_user.get('isAdmin'),
})
else:
user_emails_to_delete.append(user_email)
for user_email in users_from_s3:
if user_email not in users_from_ddb:
users_to_add.append(users_from_s3[user_email])
with user_table.batch_writer() as batch:
for user_email in user_emails_to_delete:
batch.delete_item(Key={'email': user_email})
for user in users_to_add:
batch.put_item(Item=user)
return user_list_from_s3
def load_requirements():
"""Loads requirements yaml from s3 and updates
requirements in requirements table along with
various other configs"""
s3_response = client_s3.get_object(Bucket=requirements_bucket, Key=os.getenv('REQUIREMENTS_FILE_PATH'))
requirements_file = yaml.safe_load(s3_response['Body'].read())
cloudsploit_settings_map = requirements_file['cloudsploitSettings']
severity_weight_map = requirements_file['severityWeightings']
exclusion_types = requirements_file['exclusionTypes']
version = requirements_file['version']
severity_colors = requirements_file['severityColors']
remediations = requirements_file['remediations']
requirements = requirements_file['database']
# denormalize weights and add requirement id inside object for dynamodb storage
for requirement_id, requirement in requirements.items():
requirement['requirementId'] = requirement_id
requirement['weight'] = severity_weight_map[requirement['severity']]
update_requirements(requirements)
update_exclusion_types(exclusion_types)
update_version(version)
update_severity_colors(severity_colors)
update_severity_weights(severity_weight_map)
update_remediations(remediations)
return {
'requirements': requirements,
'cloudsploitSettingsMap': cloudsploit_settings_map,
}
def update_requirements(requirements):
"""Syncs requirements in requirements table
with the parameters that are passed"""
requirement_ids_to_delete = []
reqs_to_add = []
# load requirements saved in dynamodb
ddb_data = requirements_table.scan_all()
requirements_from_ddb = {requirement['requirementId']: requirement for requirement in ddb_data}
for requirement_id in requirements_from_ddb:
if requirement_id in requirements:
if requirements_from_ddb[requirement_id] != requirements[requirement_id]:
reqs_to_add.append(requirements[requirement_id])
else:
requirement_ids_to_delete.append(requirement_id)
for requirement_id in requirements:
if requirement_id not in requirements_from_ddb:
reqs_to_add.append(requirements[requirement_id])
with requirements_table.batch_writer() as batch:
for requirement_id in requirement_ids_to_delete:
batch.delete_item(Key={'requirementId': requirement_id})
for requirement in reqs_to_add:
batch.put_item(Item=requirement)
def update_version(version):
config_table.set_config(config_table.VERSION, version)
def update_exclusion_types(exclusions):
config_table.set_config(config_table.EXCLUSIONS, exclusions)
def update_severity_colors(severity_colors):
config_table.set_config(config_table.SEVERITYCOLORS, severity_colors)
def update_severity_weights(severity_weight_map):
config_table.set_config(config_table.SEVERITYWEIGHTS, severity_weight_map)
def update_remediations(remediations):
config_table.set_config(config_table.REMEDIATIONS, remediations)
| 36.686275 | 126 | 0.704436 | import json
import os
import boto3
import yaml
from lib.dynamodb import accounts_table, requirements_table, user_table, config_table
from lib.lambda_decorator.decorator import states_decorator
client_s3 = boto3.client('s3')
user_bucket = os.getenv('USER_BUCKET')
account_bucket = os.getenv('ACCOUNT_BUCKET')
requirements_bucket = os.getenv('REQUIREMENTS_BUCKET')
@states_decorator
def load_handler(event, context):
accounts = load_accounts()
load_user()
requirements = load_requirements()
return {
'accountIds': list({a['accountId'] for a in accounts}),
'payerIds': list({a.get('payer_id') for a in accounts if a.get('payer_id')}),
's3RequirementIds': list({r_id for r_id, r in requirements['requirements'].items() if r.get('source') == 's3Import'}),
'cloudsploitSettingsMap': requirements['cloudsploitSettingsMap']
}
def load_accounts():
account_ids_to_delete = []
accounts_to_add = []
s3_response = client_s3.get_object(Bucket=account_bucket, Key=os.getenv('ACCOUNT_FILE_PATH'))
account_list_from_s3 = json.loads(s3_response['Body'].read())['accounts']
for account in account_list_from_s3:
accounts_table.normalize_account_record(account)
accounts_from_s3 = {account['accountId']: account for account in account_list_from_s3}
ddb_data = accounts_table.scan_all()
accounts_from_ddb = {account['accountId']: account for account in ddb_data}
for account_id in accounts_from_ddb:
if account_id in accounts_from_s3:
if accounts_from_ddb[account_id] != accounts_from_s3[account_id]:
accounts_to_add.append(accounts_from_s3[account_id])
else:
account_ids_to_delete.append(account_id)
for account_id in accounts_from_s3:
if account_id not in accounts_from_ddb:
accounts_to_add.append(accounts_from_s3[account_id])
with accounts_table.batch_writer() as batch:
for account_id in account_ids_to_delete:
batch.delete_item(Key={'accountId': account_id})
for account in accounts_to_add:
batch.put_item(Item=account)
return account_list_from_s3
def load_user():
user_emails_to_delete = []
users_to_add = []
s3_response = client_s3.get_object(Bucket=user_bucket, Key=os.getenv('USER_FILE_PATH'))
user_list_from_s3 = json.loads(s3_response['Body'].read())
users_from_s3 = {}
for user in user_list_from_s3:
user['email'] = user['email'].lower()
users_from_s3[user['email']] = user
ddb_data = user_table.scan_all()
users_from_ddb = {user['email']: user for user in ddb_data}
for user_email, existing_user in users_from_ddb.items():
if user_email in users_from_s3:
if existing_user != users_from_s3[user_email]:
if existing_user.get('isAdmin', False):
users_to_add.append(dict(
users_from_s3[user_email],
**{
'isAdmin': existing_user.get('isAdmin'),
}))
else:
users_to_add.append(users_from_s3[user_email])
else:
if existing_user.get('isAdmin', False):
users_to_add.append({
'email': existing_user.get('email'),
'isAdmin': existing_user.get('isAdmin'),
})
else:
user_emails_to_delete.append(user_email)
for user_email in users_from_s3:
if user_email not in users_from_ddb:
users_to_add.append(users_from_s3[user_email])
with user_table.batch_writer() as batch:
for user_email in user_emails_to_delete:
batch.delete_item(Key={'email': user_email})
for user in users_to_add:
batch.put_item(Item=user)
return user_list_from_s3
def load_requirements():
s3_response = client_s3.get_object(Bucket=requirements_bucket, Key=os.getenv('REQUIREMENTS_FILE_PATH'))
requirements_file = yaml.safe_load(s3_response['Body'].read())
cloudsploit_settings_map = requirements_file['cloudsploitSettings']
severity_weight_map = requirements_file['severityWeightings']
exclusion_types = requirements_file['exclusionTypes']
version = requirements_file['version']
severity_colors = requirements_file['severityColors']
remediations = requirements_file['remediations']
requirements = requirements_file['database']
for requirement_id, requirement in requirements.items():
requirement['requirementId'] = requirement_id
requirement['weight'] = severity_weight_map[requirement['severity']]
update_requirements(requirements)
update_exclusion_types(exclusion_types)
update_version(version)
update_severity_colors(severity_colors)
update_severity_weights(severity_weight_map)
update_remediations(remediations)
return {
'requirements': requirements,
'cloudsploitSettingsMap': cloudsploit_settings_map,
}
def update_requirements(requirements):
requirement_ids_to_delete = []
reqs_to_add = []
ddb_data = requirements_table.scan_all()
requirements_from_ddb = {requirement['requirementId']: requirement for requirement in ddb_data}
for requirement_id in requirements_from_ddb:
if requirement_id in requirements:
if requirements_from_ddb[requirement_id] != requirements[requirement_id]:
reqs_to_add.append(requirements[requirement_id])
else:
requirement_ids_to_delete.append(requirement_id)
for requirement_id in requirements:
if requirement_id not in requirements_from_ddb:
reqs_to_add.append(requirements[requirement_id])
with requirements_table.batch_writer() as batch:
for requirement_id in requirement_ids_to_delete:
batch.delete_item(Key={'requirementId': requirement_id})
for requirement in reqs_to_add:
batch.put_item(Item=requirement)
def update_version(version):
config_table.set_config(config_table.VERSION, version)
def update_exclusion_types(exclusions):
config_table.set_config(config_table.EXCLUSIONS, exclusions)
def update_severity_colors(severity_colors):
config_table.set_config(config_table.SEVERITYCOLORS, severity_colors)
def update_severity_weights(severity_weight_map):
config_table.set_config(config_table.SEVERITYWEIGHTS, severity_weight_map)
def update_remediations(remediations):
config_table.set_config(config_table.REMEDIATIONS, remediations)
| true | true |
f72676d4da3438923201ee65422d31a01243108f | 25,485 | py | Python | pytext/trainers/trainer.py | suo/pytext | 400c80b4c040de12028970a85ce0af864931e0f4 | [
"BSD-3-Clause"
] | null | null | null | pytext/trainers/trainer.py | suo/pytext | 400c80b4c040de12028970a85ce0af864931e0f4 | [
"BSD-3-Clause"
] | null | null | null | pytext/trainers/trainer.py | suo/pytext | 400c80b4c040de12028970a85ce0af864931e0f4 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import itertools
import time
from contextlib import ExitStack as contextlib_ExitStack
from typing import Any, Iterable, List, Optional, Tuple
import torch
from pytext.common.constants import BatchContext, Stage
from pytext.config import PyTextConfig
from pytext.config.component import (
Component,
ComponentType,
create_optimizer,
create_scheduler,
create_sparsifier,
)
from pytext.config.pytext_config import ConfigBase
from pytext.data.data_handler import BatchIterator
from pytext.metric_reporters import MetricReporter
from pytext.models.distributed_model import DistributedModel
from pytext.models.model import Model
from pytext.optimizer import Adam, Optimizer, learning_rates
from pytext.optimizer.scheduler import Scheduler
from pytext.optimizer.sparsifier import Sparsifier
from pytext.task.serialize import save
from pytext.trainers.training_state import TrainingState
from pytext.utils import cuda, precision, timing
class TrainerBase(Component):
__COMPONENT_TYPE__ = ComponentType.TRAINER
def cycle(iterator: Iterable[Any]) -> Iterable[Any]:
"""Like itertools.cycle, but will call iter on the original iterable instead.
This limits it to not be able to run on say raw generators, but also doesn't
store a copy of the iterable in memory for repetition."""
while True:
yield from iterator
def maybe_accumulate_gradients(exit_stack, model, index, sample_size):
# index == sample_size - 1 represents the last backward pass
if (
cuda.DISTRIBUTED_WORLD_SIZE > 1
and hasattr(model, "no_sync")
and index < sample_size - 1
):
"""
Whenever *samples* contains more than one mini-batch (e.g sample_size > 1),
we want to accumulate gradients locally and only call all-reduce in the
last backwards pass.
"""
exit_stack.enter_context(model.no_sync())
if precision._FP16_ENABLED and index < sample_size - 1:
"""
Whenever *samples* contains more than one mini-batch (e.g sample_size > 1),
we want to accumulate gradients in FP16 parameters (e.g delay unscale)
and only unscale to FP32 parameters after the last backward pass.
"""
exit_stack.enter_context(precision.delay_unscale())
class Trainer(TrainerBase):
"""
Base Trainer class that provide ways to
1 Train model, compute metrics against eval set and use the metrics for
model selection.
2 Test trained model, compute and publish metrics against a blind test set.
Attributes:
epochs (int): Training epochs
early_stop_after (int): Stop after how many epochs when the eval metric
is not improving
max_clip_norm (Optional[float]): Clip gradient norm if set
report_train_metrics (bool): Whether metrics on training data should be
computed and reported.
target_time_limit_seconds (float): Target time limit for training in seconds. If
the expected time to train another epoch exceeds this limit, stop training.
"""
class Config(ConfigBase):
#: Training epochs
epochs: int = 10
#: Stop after how many epochs when the eval metric is not improving
early_stop_after: int = 0
#: Clip gradient norm if set
max_clip_norm: Optional[float] = None
#: Whether metrics on training data should be computed and reported.
report_train_metrics: bool = True
#: Target time limit for training, default (None) to no time limit.
target_time_limit_seconds: Optional[int] = None
#: Whether to do evaluation and model selection based on it.
do_eval: bool = True
#: Number of samples for logging training progress.
num_samples_to_log_progress: int = 1000
#: Number of forward & backward per batch before update gradients, the
#: actual_batch_size = batch_size x num_accumulated_batches
num_accumulated_batches: int = 1
#: Define epoch as a fixed number of batches. Subsequent epochs will continue
#: to iterate through the data, cycling through it when they reach the end.
#: If not set, use exactly one pass through the dataset as one epoch.
#: This configuration only affects the train epochs, test and eval
#: will always test their entire datasets.
num_batches_per_epoch: Optional[int] = None
#: config for optimizer, used in parameter update
optimizer: Optimizer.Config = Adam.Config()
scheduler: Optional[Scheduler.Config] = None
sparsifier: Optional[Sparsifier.Config] = None
def __init__(self, config: Config, model: torch.nn.Module):
if config.early_stop_after > 0:
assert config.do_eval, "can't do early stopping when not running evalution"
optimizer: torch.optim.Optimizer = create_optimizer(config.optimizer, model)
self.scheduler: torch.optim.lr_scheduler = (
create_scheduler(config.scheduler, optimizer)
if config.scheduler
else Scheduler()
)
self.sparsifier: Sparsifier = (
create_sparsifier(config.sparsifier) if config.sparsifier else Sparsifier()
)
model, self.optimizer = precision.initialize(model, optimizer)
self.config = config
@classmethod
def from_config(cls, config: Config, model: torch.nn.Module, *args, **kwargs):
return cls(config, model)
@timing.time("Trainer.test")
def test(self, test_iter, model, metric_reporter: MetricReporter):
state = TrainingState(stage=Stage.TEST, model=model, epoch=1)
if cuda.CUDA_ENABLED:
state.model.cuda()
state.model.eval()
with torch.no_grad():
return self.run_epoch(state, test_iter, metric_reporter)
@timing.time("pre-training")
def set_up_training(self, state: TrainingState, training_data: BatchIterator):
if cuda.CUDA_ENABLED:
state.model.cuda()
state.scheduler.prepare(training_data, self.config.epochs)
if cuda.DISTRIBUTED_WORLD_SIZE > 1:
device_id = torch.cuda.current_device()
state.model = DistributedModel(
module=state.model,
device_ids=[device_id],
output_device=device_id,
broadcast_buffers=False,
find_unused_parameters=state.model.find_unused_parameters,
)
state.start_time = time.time()
if self.config.num_batches_per_epoch:
# Set the training_data iterator to cycle, so it will never run out,
# but rather after reaching the end will loop back to the beginning.
training_data = cycle(training_data)
return training_data
@timing.time("zero gradients")
def zero_grads(self, state):
if state.stage != Stage.TRAIN:
return
state.optimizer.zero_grad()
@timing.time("backprop")
def backprop(self, state, loss):
if state.stage != Stage.TRAIN:
return
with timing.time("loss.backward"):
precision.backward(state.optimizer, loss)
@timing.time("optimizer")
def optimizer_step(self, state):
if state.stage != Stage.TRAIN:
return
state.scheduler.step_batch()
if self.config.max_clip_norm is not None:
grad_norm = precision.clip_grad_norm(
state.model, state.optimizer, self.config.max_clip_norm
)
else:
grad_norm = None
with timing.time("optimizer.step"):
state.optimizer.step()
state.step_counter += 1
# grad_norm could be used to check grads sync in distributed training
return grad_norm
@timing.time("sparsifier")
def sparsification_step(self, state):
# sparsification only if sparifier is used
if not self.config.sparsifier:
return
if state.stage != Stage.TRAIN:
return
if state.sparsifier.sparsification_condition(state):
state.sparsifier.sparsify(state)
if state.rank == 0:
current_sparsity = state.sparsifier.get_current_sparsity(state.model)
print(f"sparsity in the model: {current_sparsity}")
def continue_training(self, state: TrainingState) -> bool:
# Are we done?
if state.epoch >= self.config.epochs:
return False
# Check whether the model has improved recently enough
# Only do this if we're bothering to evaluate the model
if self.config.do_eval and state.epochs_since_last_improvement >= (
self.config.early_stop_after or float("inf")
):
print(
f"Worker {state.rank}: Eval metric hasn't changed for "
+ f"{state.epochs_since_last_improvement} epochs. Stopping now."
)
return False
# Check whether we think the next epoch will put us over the configured
# time limit.
epochs_run = state.epoch + 1
time_elapsed = time.time() - state.start_time
mean_epoch_time = time_elapsed / epochs_run
expected_next_epoch_time = time_elapsed + mean_epoch_time
target_time_limit = (
float("inf")
if self.config.target_time_limit_seconds is None
else self.config.target_time_limit_seconds
)
if expected_next_epoch_time > target_time_limit:
print(
f"Worker {state.rank}: Stopping training after {epochs_run} epochs "
f"and {int(time_elapsed)} seconds, due to the target max training "
f"time of {self.config.target_time_limit_seconds} seconds."
)
return False
return True
def update_best_model(
self, state: TrainingState, train_config: PyTextConfig, eval_metric
):
# This should be updated by all workers so they agree on when to stop training
# when `early_stop_after` is specified.
state.epochs_since_last_improvement = 0
state.best_model_metric = eval_metric
print(f"Found a better model!")
# Only one worker should save checkpoints
if state.rank != 0:
return
model_state = state.model.state_dict()
# save to cpu to avoid multiple model copies in gpu memory
if cuda.CUDA_ENABLED:
for key, parameter in model_state.items():
model_state[key] = parameter.cpu()
state.best_model_state = model_state
@timing.time("save checkpoint")
def save_checkpoint(self, state: TrainingState, train_config: PyTextConfig) -> str:
# Only one worker should save checkpoints
if state.rank != 0:
return
if train_config.save_module_checkpoints or train_config.save_all_checkpoints:
# saves per-epoch sub-modules when save_all_checkpoints or
# save_module_checkpoints is enabled
state.model.save_modules(
base_path=train_config.modules_save_dir, suffix=f"-ep{state.epoch}"
)
if state.epochs_since_last_improvement == 0:
# state.epochs_since_last_improvement == 0 means found a better
# model in current epoch, thus update best model's sub-modules
state.model.save_modules(base_path=train_config.modules_save_dir)
# next to add new config and implementation of frequency on checkpointing
if train_config.save_all_checkpoints:
return save(
config=train_config,
model=state.model,
meta=None,
tensorizers=None,
training_state=state,
identifier=str(state.epoch),
)
def load_best_model(self, state: TrainingState):
if cuda.CUDA_ENABLED:
# Move current model to CPU to avoid multiple models in GPU memory
state.model.cpu()
state.model.load_state_dict(
{k: v.cuda() for k, v in state.best_model_state.items()}
)
# Move model back to GPU
state.model.cuda()
else:
state.model.load_state_dict(state.best_model_state)
def train(
self,
training_data: BatchIterator,
eval_data: BatchIterator,
model: Model,
metric_reporter: MetricReporter,
train_config: PyTextConfig,
rank: int = 0,
) -> Tuple[torch.nn.Module, Any]:
"""
Train and eval a model, the model states will be modified.
Args:
train_iter (BatchIterator): batch iterator of training data
eval_iter (BatchIterator): batch iterator of evaluation data
model (Model): model to be trained
metric_reporter (MetricReporter): compute metric based on training
output and report results to console, file.. etc
train_config (PyTextConfig): training config
training_result (Optional): only meaningful for Hogwild training. default
is None
rank (int): only used in distributed training, the rank of the current
training thread, evaluation will only be done in rank 0
Returns:
model, best_metric: the trained model together with the best metric
"""
state = TrainingState(
model=model,
optimizer=self.optimizer,
scheduler=self.scheduler,
sparsifier=self.sparsifier,
rank=rank,
)
return self.train_from_state(
state, training_data, eval_data, metric_reporter, train_config
)
@timing.time("Trainer.train_from_state")
def train_from_state(
self,
state: TrainingState,
training_data: BatchIterator,
eval_data: BatchIterator,
metric_reporter: MetricReporter,
train_config: PyTextConfig,
) -> Tuple[torch.nn.Module, Any]:
"""
Train and eval a model from a given training state will be modified.
This function iterates epochs specified in config, and for each epoch do:
1. Train model using training data, aggregate and report training results
2. Adjust learning rate if scheduler is specified
3. Evaluate model using evaluation data
4. Calculate metrics based on evaluation results and select best model
Args:
training_state (TrainingState): contrains stateful information to be
able to restore a training job
train_iter (BatchIterator): batch iterator of training data
eval_iter (BatchIterator): batch iterator of evaluation data
model (Model): model to be trained
metric_reporter (MetricReporter): compute metric based on training
output and report results to console, file.. etc
train_config (PyTextConfig): training config
Returns:
model, best_metric: the trained model together with the best metric
"""
training_data = self.set_up_training(state, training_data)
model = state.model
rank = state.rank
trainable_params = sum(
p.numel() for p in state.model.parameters() if p.requires_grad
)
print(f"Num trainable parameters: {trainable_params}")
while self.continue_training(state):
state.epoch += 1
state.epochs_since_last_improvement += 1
lrs = learning_rates(state.optimizer)
print(f"\nWorker {state.rank} starting epoch {state.epoch}")
print(f"Learning rate(s): {', '.join(map(str, lrs))}")
with timing.time("train epoch"):
state.stage = Stage.TRAIN
state.model.train()
print(f"start training epoch {state.epoch}")
epoch_data = training_data
if self.config.num_batches_per_epoch:
# We want to limit the number of batches in the epoch;
# equivalent to epoch_data[:num_batches_per_epoch] for iterators.
# In this case we set the training data iterator to cycle earlier
# in the training process, so when it reaches the end it will
# loop back to the beginning.
epoch_data = itertools.islice(
epoch_data, self.config.num_batches_per_epoch
)
self.run_epoch(state, epoch_data, metric_reporter)
if not self.config.do_eval:
continue
with timing.time("eval epoch"):
state.stage = Stage.EVAL
model.eval(Stage.EVAL)
print(f"start evaluating epoch {state.epoch}")
with torch.no_grad():
eval_metric = self.run_epoch(state, eval_data, metric_reporter)
# Step the learning rate scheduler(s)
assert eval_metric is not None
state.scheduler.step_epoch(
metrics=metric_reporter.get_model_select_metric(eval_metric),
epoch=state.epoch,
)
# Did we train a better model?
better_model = metric_reporter.compare_metric(
eval_metric, state.best_model_metric
)
if better_model:
self.update_best_model(state, train_config, eval_metric)
if better_model or train_config.save_all_checkpoints:
self.save_checkpoint(state, train_config)
if self.optimizer.finalize():
state.stage = Stage.EVAL
model.eval(Stage.EVAL)
print(f"start evaluating finalized state")
with torch.no_grad():
eval_metric = self.run_epoch(state, eval_data, metric_reporter)
better_model = metric_reporter.compare_metric(
eval_metric, state.best_model_metric
)
if better_model:
self.update_best_model(state, train_config, eval_metric)
if better_model or train_config.save_all_checkpoints:
self.save_checkpoint(state, train_config)
# Only bother loading the best model for master worker
if rank == 0 and state.best_model_state is not None:
self.load_best_model(state)
return state.model, state.best_model_metric
@timing.report_snapshot
def run_epoch(
self, state: TrainingState, data: BatchIterator, metric_reporter: MetricReporter
):
# This method is due for some refactoring, pushing it off because it interacts
# with the metric reporter too much. Much of the logic here either changes in
# the NewTaskTrainer or should change with a better metric reporter design.
report_metric = state.stage != Stage.TRAIN or self.config.report_train_metrics
model = state.model
samples = []
"""
Sometimes, a batch of inputs is too large to fit into GPU, which has to
be split into several micro-batches. However, to improve efficiency,
it would be helpful to only apply params/gradients sync at original batch
boundaries instead of micro-batch boundaries.
num_accumulated_batches specified the number of accumulating gradients
locally before sync gradients, total training_batch_size =
train_batch_size x num_accumulated_batches and it will improve the system
performance by reduce the total network transfer bytes.
"""
for sample in enumerate(data):
samples.append(sample)
if (
state.stage != Stage.TRAIN
or len(samples) == self.config.num_accumulated_batches
):
self.run_step(samples, state, metric_reporter, report_metric)
samples = []
if samples:
self.run_step(samples, state, metric_reporter, report_metric)
samples = []
metrics = None
if report_metric:
with timing.time("report metrics"):
metrics = metric_reporter.report_metric(
model, state.stage, state.epoch, print_to_channels=(state.rank == 0)
)
else:
metric_reporter._reset()
return metrics
@timing.time("run_step")
def run_step(
self,
samples: List[Any],
state: TrainingState,
metric_reporter: MetricReporter,
report_metric: bool,
):
sample_size = len(samples)
assert sample_size <= self.config.num_accumulated_batches
model = state.model
self.zero_grads(state)
for idx, (batch_id, (inputs, targets, context)) in enumerate(samples):
with contextlib_ExitStack() as exit_stack:
maybe_accumulate_gradients(exit_stack, model, idx, sample_size)
# pass context to model to use in forward call if needed
model.contextualize(context)
with timing.time("model.forward"):
logits = model(*inputs)
with timing.time("compute loss"):
loss = precision.maybe_float(
model.get_loss(logits, targets, context)
)
if BatchContext.IGNORE_LOSS in context:
loss *= 0
elif sample_size > 1:
# gradients averaged per batch and accumulated across samples.
# divide sample_size to let gradients averaged per example
loss = loss / sample_size
self.backprop(state, loss)
if report_metric:
with timing.time("get pred"):
preds, scores = model.get_pred(
logits, targets, context, state.stage, *inputs
)
with timing.time("add metrics"):
metric_reporter.add_batch_stats(
batch_id, preds, targets, scores, loss.item(), inputs, **context
)
if batch_id % self.config.num_samples_to_log_progress == 0:
print(
f"Running batch {batch_id} for epoch {state.epoch} in {state.stage} stage",
flush=True,
)
# update gradients after len(samples) forward & backward
self.optimizer_step(state)
self.sparsification_step(state)
class TaskTrainer(Trainer):
__EXPANSIBLE__ = True
class Config(Trainer.Config):
"""Make mypy happy"""
@timing.time("run_step")
def run_step(
self,
samples: List[Any],
state: TrainingState,
metric_reporter: MetricReporter,
report_metric: bool,
):
"""Our run_step is a bit different, because we're wrapping the model forward
call with model.train_batch, which arranges tensors and gets loss, etc.
Whenever "samples" contains more than one mini-batch (sample_size > 1),
we want to accumulate gradients locally and only call all-reduce in the
last backwards pass.
"""
sample_size = len(samples)
assert sample_size <= self.config.num_accumulated_batches
model = state.model
self.zero_grads(state)
for idx, (batch_id, (raw_batch, batch)) in enumerate(samples):
with contextlib_ExitStack() as exit_stack:
# enter ddp no_sync context and fp16 delay_scale context if needed
maybe_accumulate_gradients(exit_stack, model, idx, sample_size)
with timing.time("model.train_batch"):
loss, metric_data = model.train_batch(model, batch, state)
if sample_size > 1:
# gradients averaged per batch and accumulated across samples.
# divide sample_size to let gradients averaged per example
loss = loss / sample_size
self.backprop(state, loss)
if report_metric:
with timing.time("add metrics"):
metric_reporter.add_batch_stats(
batch_id,
*metric_data,
# TODO merge this step into add_batch_stats once all data
# migration is done
**metric_reporter.batch_context(raw_batch, batch),
)
if batch_id % self.config.num_samples_to_log_progress == 0:
metric_reporter.report_realtime_metric(state.stage)
# update gradients after #len(samples) forward & backward
self.optimizer_step(state)
self.sparsification_step(state)
def _prepare_scheduler(self, training_batches, scheduler=None):
"""Batch based schedulers require knowing the number of batches in
the data. We're not supporting that yet with the Data api, need to figure out
how to expose this info or restructure batch-based schedulers to not need it."""
if scheduler.batch_based_schedulers:
raise Exception("New tasks don't yet support batch-based scheduling")
return scheduler
| 41.371753 | 95 | 0.626368 |
import itertools
import time
from contextlib import ExitStack as contextlib_ExitStack
from typing import Any, Iterable, List, Optional, Tuple
import torch
from pytext.common.constants import BatchContext, Stage
from pytext.config import PyTextConfig
from pytext.config.component import (
Component,
ComponentType,
create_optimizer,
create_scheduler,
create_sparsifier,
)
from pytext.config.pytext_config import ConfigBase
from pytext.data.data_handler import BatchIterator
from pytext.metric_reporters import MetricReporter
from pytext.models.distributed_model import DistributedModel
from pytext.models.model import Model
from pytext.optimizer import Adam, Optimizer, learning_rates
from pytext.optimizer.scheduler import Scheduler
from pytext.optimizer.sparsifier import Sparsifier
from pytext.task.serialize import save
from pytext.trainers.training_state import TrainingState
from pytext.utils import cuda, precision, timing
class TrainerBase(Component):
__COMPONENT_TYPE__ = ComponentType.TRAINER
def cycle(iterator: Iterable[Any]) -> Iterable[Any]:
while True:
yield from iterator
def maybe_accumulate_gradients(exit_stack, model, index, sample_size):
if (
cuda.DISTRIBUTED_WORLD_SIZE > 1
and hasattr(model, "no_sync")
and index < sample_size - 1
):
exit_stack.enter_context(model.no_sync())
if precision._FP16_ENABLED and index < sample_size - 1:
exit_stack.enter_context(precision.delay_unscale())
class Trainer(TrainerBase):
class Config(ConfigBase):
epochs: int = 10
early_stop_after: int = 0
max_clip_norm: Optional[float] = None
report_train_metrics: bool = True
target_time_limit_seconds: Optional[int] = None
do_eval: bool = True
num_samples_to_log_progress: int = 1000
num_accumulated_batches: int = 1
num_batches_per_epoch: Optional[int] = None
optimizer: Optimizer.Config = Adam.Config()
scheduler: Optional[Scheduler.Config] = None
sparsifier: Optional[Sparsifier.Config] = None
def __init__(self, config: Config, model: torch.nn.Module):
if config.early_stop_after > 0:
assert config.do_eval, "can't do early stopping when not running evalution"
optimizer: torch.optim.Optimizer = create_optimizer(config.optimizer, model)
self.scheduler: torch.optim.lr_scheduler = (
create_scheduler(config.scheduler, optimizer)
if config.scheduler
else Scheduler()
)
self.sparsifier: Sparsifier = (
create_sparsifier(config.sparsifier) if config.sparsifier else Sparsifier()
)
model, self.optimizer = precision.initialize(model, optimizer)
self.config = config
@classmethod
def from_config(cls, config: Config, model: torch.nn.Module, *args, **kwargs):
return cls(config, model)
@timing.time("Trainer.test")
def test(self, test_iter, model, metric_reporter: MetricReporter):
state = TrainingState(stage=Stage.TEST, model=model, epoch=1)
if cuda.CUDA_ENABLED:
state.model.cuda()
state.model.eval()
with torch.no_grad():
return self.run_epoch(state, test_iter, metric_reporter)
@timing.time("pre-training")
def set_up_training(self, state: TrainingState, training_data: BatchIterator):
if cuda.CUDA_ENABLED:
state.model.cuda()
state.scheduler.prepare(training_data, self.config.epochs)
if cuda.DISTRIBUTED_WORLD_SIZE > 1:
device_id = torch.cuda.current_device()
state.model = DistributedModel(
module=state.model,
device_ids=[device_id],
output_device=device_id,
broadcast_buffers=False,
find_unused_parameters=state.model.find_unused_parameters,
)
state.start_time = time.time()
if self.config.num_batches_per_epoch:
# Set the training_data iterator to cycle, so it will never run out,
# but rather after reaching the end will loop back to the beginning.
training_data = cycle(training_data)
return training_data
@timing.time("zero gradients")
def zero_grads(self, state):
if state.stage != Stage.TRAIN:
return
state.optimizer.zero_grad()
@timing.time("backprop")
def backprop(self, state, loss):
if state.stage != Stage.TRAIN:
return
with timing.time("loss.backward"):
precision.backward(state.optimizer, loss)
@timing.time("optimizer")
def optimizer_step(self, state):
if state.stage != Stage.TRAIN:
return
state.scheduler.step_batch()
if self.config.max_clip_norm is not None:
grad_norm = precision.clip_grad_norm(
state.model, state.optimizer, self.config.max_clip_norm
)
else:
grad_norm = None
with timing.time("optimizer.step"):
state.optimizer.step()
state.step_counter += 1
# grad_norm could be used to check grads sync in distributed training
return grad_norm
@timing.time("sparsifier")
def sparsification_step(self, state):
# sparsification only if sparifier is used
if not self.config.sparsifier:
return
if state.stage != Stage.TRAIN:
return
if state.sparsifier.sparsification_condition(state):
state.sparsifier.sparsify(state)
if state.rank == 0:
current_sparsity = state.sparsifier.get_current_sparsity(state.model)
print(f"sparsity in the model: {current_sparsity}")
def continue_training(self, state: TrainingState) -> bool:
# Are we done?
if state.epoch >= self.config.epochs:
return False
# Check whether the model has improved recently enough
# Only do this if we're bothering to evaluate the model
if self.config.do_eval and state.epochs_since_last_improvement >= (
self.config.early_stop_after or float("inf")
):
print(
f"Worker {state.rank}: Eval metric hasn't changed for "
+ f"{state.epochs_since_last_improvement} epochs. Stopping now."
)
return False
# Check whether we think the next epoch will put us over the configured
# time limit.
epochs_run = state.epoch + 1
time_elapsed = time.time() - state.start_time
mean_epoch_time = time_elapsed / epochs_run
expected_next_epoch_time = time_elapsed + mean_epoch_time
target_time_limit = (
float("inf")
if self.config.target_time_limit_seconds is None
else self.config.target_time_limit_seconds
)
if expected_next_epoch_time > target_time_limit:
print(
f"Worker {state.rank}: Stopping training after {epochs_run} epochs "
f"and {int(time_elapsed)} seconds, due to the target max training "
f"time of {self.config.target_time_limit_seconds} seconds."
)
return False
return True
def update_best_model(
self, state: TrainingState, train_config: PyTextConfig, eval_metric
):
# This should be updated by all workers so they agree on when to stop training
# when `early_stop_after` is specified.
state.epochs_since_last_improvement = 0
state.best_model_metric = eval_metric
print(f"Found a better model!")
# Only one worker should save checkpoints
if state.rank != 0:
return
model_state = state.model.state_dict()
# save to cpu to avoid multiple model copies in gpu memory
if cuda.CUDA_ENABLED:
for key, parameter in model_state.items():
model_state[key] = parameter.cpu()
state.best_model_state = model_state
@timing.time("save checkpoint")
def save_checkpoint(self, state: TrainingState, train_config: PyTextConfig) -> str:
# Only one worker should save checkpoints
if state.rank != 0:
return
if train_config.save_module_checkpoints or train_config.save_all_checkpoints:
# saves per-epoch sub-modules when save_all_checkpoints or
# save_module_checkpoints is enabled
state.model.save_modules(
base_path=train_config.modules_save_dir, suffix=f"-ep{state.epoch}"
)
if state.epochs_since_last_improvement == 0:
# state.epochs_since_last_improvement == 0 means found a better
# model in current epoch, thus update best model's sub-modules
state.model.save_modules(base_path=train_config.modules_save_dir)
if train_config.save_all_checkpoints:
return save(
config=train_config,
model=state.model,
meta=None,
tensorizers=None,
training_state=state,
identifier=str(state.epoch),
)
def load_best_model(self, state: TrainingState):
if cuda.CUDA_ENABLED:
state.model.cpu()
state.model.load_state_dict(
{k: v.cuda() for k, v in state.best_model_state.items()}
)
state.model.cuda()
else:
state.model.load_state_dict(state.best_model_state)
def train(
self,
training_data: BatchIterator,
eval_data: BatchIterator,
model: Model,
metric_reporter: MetricReporter,
train_config: PyTextConfig,
rank: int = 0,
) -> Tuple[torch.nn.Module, Any]:
state = TrainingState(
model=model,
optimizer=self.optimizer,
scheduler=self.scheduler,
sparsifier=self.sparsifier,
rank=rank,
)
return self.train_from_state(
state, training_data, eval_data, metric_reporter, train_config
)
@timing.time("Trainer.train_from_state")
def train_from_state(
self,
state: TrainingState,
training_data: BatchIterator,
eval_data: BatchIterator,
metric_reporter: MetricReporter,
train_config: PyTextConfig,
) -> Tuple[torch.nn.Module, Any]:
training_data = self.set_up_training(state, training_data)
model = state.model
rank = state.rank
trainable_params = sum(
p.numel() for p in state.model.parameters() if p.requires_grad
)
print(f"Num trainable parameters: {trainable_params}")
while self.continue_training(state):
state.epoch += 1
state.epochs_since_last_improvement += 1
lrs = learning_rates(state.optimizer)
print(f"\nWorker {state.rank} starting epoch {state.epoch}")
print(f"Learning rate(s): {', '.join(map(str, lrs))}")
with timing.time("train epoch"):
state.stage = Stage.TRAIN
state.model.train()
print(f"start training epoch {state.epoch}")
epoch_data = training_data
if self.config.num_batches_per_epoch:
epoch_data = itertools.islice(
epoch_data, self.config.num_batches_per_epoch
)
self.run_epoch(state, epoch_data, metric_reporter)
if not self.config.do_eval:
continue
with timing.time("eval epoch"):
state.stage = Stage.EVAL
model.eval(Stage.EVAL)
print(f"start evaluating epoch {state.epoch}")
with torch.no_grad():
eval_metric = self.run_epoch(state, eval_data, metric_reporter)
assert eval_metric is not None
state.scheduler.step_epoch(
metrics=metric_reporter.get_model_select_metric(eval_metric),
epoch=state.epoch,
)
better_model = metric_reporter.compare_metric(
eval_metric, state.best_model_metric
)
if better_model:
self.update_best_model(state, train_config, eval_metric)
if better_model or train_config.save_all_checkpoints:
self.save_checkpoint(state, train_config)
if self.optimizer.finalize():
state.stage = Stage.EVAL
model.eval(Stage.EVAL)
print(f"start evaluating finalized state")
with torch.no_grad():
eval_metric = self.run_epoch(state, eval_data, metric_reporter)
better_model = metric_reporter.compare_metric(
eval_metric, state.best_model_metric
)
if better_model:
self.update_best_model(state, train_config, eval_metric)
if better_model or train_config.save_all_checkpoints:
self.save_checkpoint(state, train_config)
if rank == 0 and state.best_model_state is not None:
self.load_best_model(state)
return state.model, state.best_model_metric
@timing.report_snapshot
def run_epoch(
self, state: TrainingState, data: BatchIterator, metric_reporter: MetricReporter
):
report_metric = state.stage != Stage.TRAIN or self.config.report_train_metrics
model = state.model
samples = []
for sample in enumerate(data):
samples.append(sample)
if (
state.stage != Stage.TRAIN
or len(samples) == self.config.num_accumulated_batches
):
self.run_step(samples, state, metric_reporter, report_metric)
samples = []
if samples:
self.run_step(samples, state, metric_reporter, report_metric)
samples = []
metrics = None
if report_metric:
with timing.time("report metrics"):
metrics = metric_reporter.report_metric(
model, state.stage, state.epoch, print_to_channels=(state.rank == 0)
)
else:
metric_reporter._reset()
return metrics
@timing.time("run_step")
def run_step(
self,
samples: List[Any],
state: TrainingState,
metric_reporter: MetricReporter,
report_metric: bool,
):
sample_size = len(samples)
assert sample_size <= self.config.num_accumulated_batches
model = state.model
self.zero_grads(state)
for idx, (batch_id, (inputs, targets, context)) in enumerate(samples):
with contextlib_ExitStack() as exit_stack:
maybe_accumulate_gradients(exit_stack, model, idx, sample_size)
model.contextualize(context)
with timing.time("model.forward"):
logits = model(*inputs)
with timing.time("compute loss"):
loss = precision.maybe_float(
model.get_loss(logits, targets, context)
)
if BatchContext.IGNORE_LOSS in context:
loss *= 0
elif sample_size > 1:
loss = loss / sample_size
self.backprop(state, loss)
if report_metric:
with timing.time("get pred"):
preds, scores = model.get_pred(
logits, targets, context, state.stage, *inputs
)
with timing.time("add metrics"):
metric_reporter.add_batch_stats(
batch_id, preds, targets, scores, loss.item(), inputs, **context
)
if batch_id % self.config.num_samples_to_log_progress == 0:
print(
f"Running batch {batch_id} for epoch {state.epoch} in {state.stage} stage",
flush=True,
)
self.optimizer_step(state)
self.sparsification_step(state)
class TaskTrainer(Trainer):
__EXPANSIBLE__ = True
class Config(Trainer.Config):
@timing.time("run_step")
def run_step(
self,
samples: List[Any],
state: TrainingState,
metric_reporter: MetricReporter,
report_metric: bool,
):
sample_size = len(samples)
assert sample_size <= self.config.num_accumulated_batches
model = state.model
self.zero_grads(state)
for idx, (batch_id, (raw_batch, batch)) in enumerate(samples):
with contextlib_ExitStack() as exit_stack:
maybe_accumulate_gradients(exit_stack, model, idx, sample_size)
with timing.time("model.train_batch"):
loss, metric_data = model.train_batch(model, batch, state)
if sample_size > 1:
loss = loss / sample_size
self.backprop(state, loss)
if report_metric:
with timing.time("add metrics"):
metric_reporter.add_batch_stats(
batch_id,
*metric_data,
**metric_reporter.batch_context(raw_batch, batch),
)
if batch_id % self.config.num_samples_to_log_progress == 0:
metric_reporter.report_realtime_metric(state.stage)
te)
self.sparsification_step(state)
def _prepare_scheduler(self, training_batches, scheduler=None):
if scheduler.batch_based_schedulers:
raise Exception("New tasks don't yet support batch-based scheduling")
return scheduler
| true | true |
f7267815389b318d6a8e4cfe05c3808d59d955a5 | 6,214 | py | Python | fairseq/criterions/masked_adlm.py | a1600012888/fairseq | dbd2cd08fc396f919d2e737513095fcb966896c0 | [
"MIT"
] | null | null | null | fairseq/criterions/masked_adlm.py | a1600012888/fairseq | dbd2cd08fc396f919d2e737513095fcb966896c0 | [
"MIT"
] | null | null | null | fairseq/criterions/masked_adlm.py | a1600012888/fairseq | dbd2cd08fc396f919d2e737513095fcb966896c0 | [
"MIT"
] | 1 | 2020-04-01T03:31:00.000Z | 2020-04-01T03:31:00.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
@register_criterion('masked_adlm')
class MaskedAdLmLoss(FairseqCriterion):
"""
Implementation for the loss used in masked language model (MLM) training.
"""
def __init__(self, args, task):
super(MaskedAdLmLoss, self).__init__(args, task)
self.vocab = self.task.source_dictionary
print(len(self.vocab.count))
self.register_buffer('margins', torch.zeros((len(self.vocab.count), 1)))
self.margins.requires_grad = False
self.margin_lambda = args.margin_lambda
self.margin_lr = args.margin_lr
self.margin_norm = args.margin_norm
@staticmethod
def add_args(parser):
"""Add criterion-specific arguments to the parser."""
super(MaskedAdLmLoss,
MaskedAdLmLoss).add_args(parser)
parser.add_argument('--margin_lambda', default=0.5, type=float, metavar='D',
help='weight for the adaptive margin loss')
parser.add_argument('--margin_lr', default=0.0001, type=float, metavar='D',
help='weight for the adaptive margin loss')
parser.add_argument('--margin-norm', default='l1', type=str,
help='Type of margin norm in the loss')
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
# compute MLM loss
#self.margins.requires_grad = model.training
masked_tokens = sample['target'].ne(self.padding_idx)
sample_size = masked_tokens.int().sum().item()
# (Rare case) When all tokens are masked, the model results in empty
# tensor and gives CUDA error.
if sample_size == 0:
masked_tokens = None
logits = model(**sample['net_input'], masked_tokens=masked_tokens)[0]
targets = model.get_targets(sample, [logits])
#import IPython
#IPython.embed()
if sample_size != 0:
targets = targets[masked_tokens]
# targets shape: [x]
# logits.shape: [x, 32769]
one_hot = F.one_hot(targets, len(self.vocab.count)) # [x, 32769]
#import IPython
#IPython.embed()
m = F.embedding(targets, self.margins) # [x, 1]
#m = self.margins(targets).squeeze(dim=-1)
margin = m * one_hot # [x, 32769]
#import IPython
#IPython.embed()
logits_minus_margin = logits - margin
log_softmax = F.log_softmax(
logits_minus_margin.view(-1, logits.size(-1)),
dim=-1,
dtype=torch.float32,
) # [x, 32769]
adm_loss = F.nll_loss(
log_softmax,
targets.view(-1),
reduction='sum',
ignore_index=self.padding_idx,
)
# cal margin grad
with torch.no_grad():
margin_log_grad = torch.gather(log_softmax.detach(), dim=-1,
index=targets.unsqueeze(-1)) # [x, 1]
margin_grad_cross = torch.exp(margin_log_grad) - \
torch.ones_like(margin_log_grad)
if self.margin_norm == 'l1':
margin_grad = margin_grad_cross - torch.ones_like(m) * self.margin_lambda
else:
# l2 norm
margin_grad = margin_grad_cross - m * self.margin_lambda * 2.0
margin_update = -1.0 * margin_grad * self.margin_lr
self.margins.scatter_add_(0, targets.unsqueeze(-1), margin_update.half())
# for logging below! margin_norm; normal loss
margin_norm = torch.mean(self.margins) * sample['nsentences']# used for log!
normal_loss = F.nll_loss(
F.log_softmax(
logits.view(-1, logits.size(-1)),
dim=-1,
dtype=torch.float32,
),
targets.view(-1),
reduction='sum',
ignore_index=self.padding_idx,
)
logging_output = {
'loss': utils.item(normal_loss.data) if reduce else normal_loss.data,
'margin_n':utils.item(margin_norm.data) if reduce else margin_norm.data,
'ntokens': sample['ntokens'],
'nsentences': sample['nsentences'],
'sample_size': sample_size,
'admloss': utils.item(adm_loss.data) if reduce else adm_loss.data,
}
return adm_loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get('loss', 0) for log in logging_outputs)
admloss_sum = sum(log.get('admloss', 0) for log in logging_outputs)
margin_n = sum(log.get('margin_n', 0) for log in logging_outputs)
sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)
nsentences = sum(log.get('nsentences', 0) for log in logging_outputs)
metrics.log_scalar('loss', loss_sum / sample_size / math.log(2), sample_size, round=3)
metrics.log_scalar('admloss', admloss_sum / sample_size / math.log(2), sample_size, round=3)
metrics.log_scalar('margin_norm', margin_n / nsentences, 32, round=3)
metrics.log_derived('ppl', lambda meters: round(2**meters['loss'].avg, 3))
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
| 37.660606 | 100 | 0.60251 |
import math
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
@register_criterion('masked_adlm')
class MaskedAdLmLoss(FairseqCriterion):
def __init__(self, args, task):
super(MaskedAdLmLoss, self).__init__(args, task)
self.vocab = self.task.source_dictionary
print(len(self.vocab.count))
self.register_buffer('margins', torch.zeros((len(self.vocab.count), 1)))
self.margins.requires_grad = False
self.margin_lambda = args.margin_lambda
self.margin_lr = args.margin_lr
self.margin_norm = args.margin_norm
@staticmethod
def add_args(parser):
super(MaskedAdLmLoss,
MaskedAdLmLoss).add_args(parser)
parser.add_argument('--margin_lambda', default=0.5, type=float, metavar='D',
help='weight for the adaptive margin loss')
parser.add_argument('--margin_lr', default=0.0001, type=float, metavar='D',
help='weight for the adaptive margin loss')
parser.add_argument('--margin-norm', default='l1', type=str,
help='Type of margin norm in the loss')
def forward(self, model, sample, reduce=True):
masked_tokens = sample['target'].ne(self.padding_idx)
sample_size = masked_tokens.int().sum().item()
if sample_size == 0:
masked_tokens = None
logits = model(**sample['net_input'], masked_tokens=masked_tokens)[0]
targets = model.get_targets(sample, [logits])
if sample_size != 0:
targets = targets[masked_tokens]
one_hot = F.one_hot(targets, len(self.vocab.count))
m = F.embedding(targets, self.margins)
margin = m * one_hot
logits_minus_margin = logits - margin
log_softmax = F.log_softmax(
logits_minus_margin.view(-1, logits.size(-1)),
dim=-1,
dtype=torch.float32,
)
adm_loss = F.nll_loss(
log_softmax,
targets.view(-1),
reduction='sum',
ignore_index=self.padding_idx,
)
with torch.no_grad():
margin_log_grad = torch.gather(log_softmax.detach(), dim=-1,
index=targets.unsqueeze(-1))
margin_grad_cross = torch.exp(margin_log_grad) - \
torch.ones_like(margin_log_grad)
if self.margin_norm == 'l1':
margin_grad = margin_grad_cross - torch.ones_like(m) * self.margin_lambda
else:
margin_grad = margin_grad_cross - m * self.margin_lambda * 2.0
margin_update = -1.0 * margin_grad * self.margin_lr
self.margins.scatter_add_(0, targets.unsqueeze(-1), margin_update.half())
margin_norm = torch.mean(self.margins) * sample['nsentences']
normal_loss = F.nll_loss(
F.log_softmax(
logits.view(-1, logits.size(-1)),
dim=-1,
dtype=torch.float32,
),
targets.view(-1),
reduction='sum',
ignore_index=self.padding_idx,
)
logging_output = {
'loss': utils.item(normal_loss.data) if reduce else normal_loss.data,
'margin_n':utils.item(margin_norm.data) if reduce else margin_norm.data,
'ntokens': sample['ntokens'],
'nsentences': sample['nsentences'],
'sample_size': sample_size,
'admloss': utils.item(adm_loss.data) if reduce else adm_loss.data,
}
return adm_loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
loss_sum = sum(log.get('loss', 0) for log in logging_outputs)
admloss_sum = sum(log.get('admloss', 0) for log in logging_outputs)
margin_n = sum(log.get('margin_n', 0) for log in logging_outputs)
sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)
nsentences = sum(log.get('nsentences', 0) for log in logging_outputs)
metrics.log_scalar('loss', loss_sum / sample_size / math.log(2), sample_size, round=3)
metrics.log_scalar('admloss', admloss_sum / sample_size / math.log(2), sample_size, round=3)
metrics.log_scalar('margin_norm', margin_n / nsentences, 32, round=3)
metrics.log_derived('ppl', lambda meters: round(2**meters['loss'].avg, 3))
@staticmethod
def logging_outputs_can_be_summed() -> bool:
return True
| true | true |
f726783468013df7d862175c11e8671b1275bee8 | 1,929 | py | Python | campy/cameras/emu.py | Wolfffff/campy | 988e02fea70c1a14221ad3a9c350afa52cc9bcf5 | [
"MIT"
] | null | null | null | campy/cameras/emu.py | Wolfffff/campy | 988e02fea70c1a14221ad3a9c350afa52cc9bcf5 | [
"MIT"
] | null | null | null | campy/cameras/emu.py | Wolfffff/campy | 988e02fea70c1a14221ad3a9c350afa52cc9bcf5 | [
"MIT"
] | 1 | 2021-09-02T20:01:25.000Z | 2021-09-02T20:01:25.000Z | """
"""
from campy.cameras import unicam
import os
import time
import logging
import sys
import numpy as np
from collections import deque
import csv
import imageio
def LoadSystem(params):
return params["cameraMake"]
def GetDeviceList(system):
return system
def LoadDevice(cam_params):
return cam_params["device"]
def GetSerialNumber(device):
return device
def GetModelName(camera):
return "Emulated_Camera"
def OpenCamera(cam_params, device):
# Open video reader for emulation
videoFileName = cam_params["videoFilename"][3 : len(cam_params["videoFilename"])]
full_file_name = os.path.join(
cam_params["videoFolder"], cam_params["cameraName"], videoFileName
)
camera = imageio.get_reader(full_file_name)
# Set features manually or automatically, depending on configuration
frame_size = camera.get_meta_data()["size"]
cam_params["frameWidth"] = frame_size[0]
cam_params["frameHeight"] = frame_size[1]
print("Opened {} emulation.".format(cam_params["cameraName"]))
return camera, cam_params
def LoadSettings(cam_params, camera):
return cam_params
def StartGrabbing(camera):
return True
def GrabFrame(camera, frameNumber):
return camera.get_data(frameNumber)
def GetImageArray(grabResult):
return grabResult
def GetTimeStamp(grabResult):
return time.perf_counter()
def DisplayImage(cam_params, dispQueue, grabResult):
# Downsample image
img = grabResult[
:: cam_params["displayDownsample"], :: cam_params["displayDownsample"], :
]
# Send to display queue
dispQueue.append(img)
def ReleaseFrame(grabResult):
del grabResult
def CloseCamera(cam_params, camera):
print("Closing {}... Please wait.".format(cam_params["cameraName"]))
# Close camera after acquisition stops
del camera
def CloseSystem(system, device_list):
del system
del device_list
| 18.028037 | 85 | 0.718507 |
from campy.cameras import unicam
import os
import time
import logging
import sys
import numpy as np
from collections import deque
import csv
import imageio
def LoadSystem(params):
return params["cameraMake"]
def GetDeviceList(system):
return system
def LoadDevice(cam_params):
return cam_params["device"]
def GetSerialNumber(device):
return device
def GetModelName(camera):
return "Emulated_Camera"
def OpenCamera(cam_params, device):
videoFileName = cam_params["videoFilename"][3 : len(cam_params["videoFilename"])]
full_file_name = os.path.join(
cam_params["videoFolder"], cam_params["cameraName"], videoFileName
)
camera = imageio.get_reader(full_file_name)
frame_size = camera.get_meta_data()["size"]
cam_params["frameWidth"] = frame_size[0]
cam_params["frameHeight"] = frame_size[1]
print("Opened {} emulation.".format(cam_params["cameraName"]))
return camera, cam_params
def LoadSettings(cam_params, camera):
return cam_params
def StartGrabbing(camera):
return True
def GrabFrame(camera, frameNumber):
return camera.get_data(frameNumber)
def GetImageArray(grabResult):
return grabResult
def GetTimeStamp(grabResult):
return time.perf_counter()
def DisplayImage(cam_params, dispQueue, grabResult):
img = grabResult[
:: cam_params["displayDownsample"], :: cam_params["displayDownsample"], :
]
dispQueue.append(img)
def ReleaseFrame(grabResult):
del grabResult
def CloseCamera(cam_params, camera):
print("Closing {}... Please wait.".format(cam_params["cameraName"]))
del camera
def CloseSystem(system, device_list):
del system
del device_list
| true | true |
f72678c8ed7735a73d4972ede696b84b944d05d8 | 5,881 | py | Python | src/odm_report_shot_coverage/models/reconstruction.py | terra-submersa/opensfm-camera-coverage | a9ad2bff799a5d0d07d7900fc7d1bf10bc489632 | [
"CNRI-Python"
] | null | null | null | src/odm_report_shot_coverage/models/reconstruction.py | terra-submersa/opensfm-camera-coverage | a9ad2bff799a5d0d07d7900fc7d1bf10bc489632 | [
"CNRI-Python"
] | null | null | null | src/odm_report_shot_coverage/models/reconstruction.py | terra-submersa/opensfm-camera-coverage | a9ad2bff799a5d0d07d7900fc7d1bf10bc489632 | [
"CNRI-Python"
] | null | null | null | import json
import logging
import geojson
import numpy as np
from tqdm import tqdm
from scipy import stats
from odm_report_shot_coverage.models.camera import Camera, json_parse_camera
from odm_report_shot_coverage.models.shot import Shot, shot_boundaries_from_points, Boundaries
from odm_report_shot_coverage.models.wavefront_25d import Wavefront25D, parse_wavefront_25d_obj
class Reconstruction:
cameras: 'dict[str, Camera]' = {}
_shots: 'list[Shot]' = []
mesh = Wavefront25D
orthophoto_boundaries: Boundaries
@property
def shots(self) -> 'list[Shot]':
self._shots.sort(key=lambda s: s.image_name)
return self._shots
def add_camera(self, name: str, camera: Camera):
self.cameras[name] = camera
def add_shot(self, shot: Shot):
self._shots.append(shot)
def to_json(self) -> dict:
return {
'cameras': {n: c.to_json() for n, c in self.cameras.items()},
'shots': [s.to_json() for s in self.shots],
# 'mesh': self.mesh.to_json(),
'boundaries': self.mesh.boundaries.to_json(),
'orthophotoBoundaries': self.orthophoto_boundaries.to_json(),
}
def compute_shot_boundaries(self):
"""
From shots and points, fill the shot_boundaries
:rtype: None
"""
for shot in tqdm(self.shots, desc='Computing shot boundaries'):
points = []
for i, point in enumerate(self.mesh.points):
pixel = shot.camera_pixel(point)
if shot.camera.in_frame(pixel):
points.append(point)
shot.boundaries = shot_boundaries_from_points(points)
def find_camera_by_width_height(self, width: int, height: int) -> Camera:
cs = [c for c in self.cameras.values() if c.width == width and c.height == height]
if len(cs) != 1:
raise Exception('Not exactly one camera found with size %s x %s' % (width, height))
return cs[0]
class ReconstructionCollection:
reconstructions: 'list[Reconstruction]' = []
def append(self, reconstruction: Reconstruction):
self.reconstructions.append(reconstruction)
def __getitem__(self, i: int):
return self.reconstructions[i]
def __len__(self):
return len(self.reconstructions)
def lin_reg(pairs: 'list[(float, float)]') -> (float, float, float, float):
x = [p[0] for p in pairs]
y = [p[1] for p in pairs]
return stats.linregress(x, y)
def _parse_point_cloud_boundaries(path: str) -> Boundaries:
with open('%s/odm_report/stats.json' % path, 'r') as fd:
stats_json = json.load(fd)
bbox = stats_json['point_cloud_statistics']['stats']['bbox']['native']['bbox']
return Boundaries(
x_min=bbox['minx'],
x_max=bbox['maxx'],
y_min=bbox['miny'],
y_max=bbox['maxy'],
z_min=bbox['minz'],
z_max=bbox['maxz'],
)
def _parse_camera_shotgeojson(path: str, reconstruction: Reconstruction, native_to_25d_coordinates):
with open('%s/cameras.json' % path, 'r') as fd:
cameras_json = json.load(fd)
for n, j in cameras_json.items():
camera = json_parse_camera(n, j)
reconstruction.add_camera(n, camera)
(tr_x, tr_y, tr_z) = native_to_25d_coordinates
with open('%s/odm_report/shots.geojson' % path, 'r') as fd:
shots_geojson = geojson.load(fd)
for feat in shots_geojson['features']:
shot = Shot()
props = feat['properties']
shot.camera = reconstruction.find_camera_by_width_height(props['width'], props['height'])
shot.image_name = props['filename']
translation = props['translation']
shot.translation = (tr_x(translation[0]), tr_y(translation[1]), tr_z(translation[2]))
shot.rotation = props['rotation']
reconstruction.add_shot(shot)
def _native_to_model_25d_coordinates(native_boundaries: Boundaries, model_25d_boundaries: Boundaries):
width_25d = model_25d_boundaries.x_max - model_25d_boundaries.x_min
height_25d = model_25d_boundaries.y_max - model_25d_boundaries.y_min
elevation_25d = model_25d_boundaries.y_max - model_25d_boundaries.y_min
width_native = native_boundaries.x_max - native_boundaries.x_min
height_native = native_boundaries.y_max - native_boundaries.y_min
elevation_native = native_boundaries.y_max - native_boundaries.y_min
width_ratio = np.abs(1 - width_native / width_25d)
height_ratio = np.abs(1 - height_native / height_25d)
elevation_ratio = np.abs(1 - elevation_native / elevation_25d)
logging.info(
'native/25d model boundaries discrepancies width=%.2f%% height=%.2f%% elevation=%.2f%%' % (
width_ratio * 100, height_ratio * 100, elevation_ratio * 100))
return (
lambda x: (x - (native_boundaries.x_max + native_boundaries.x_min) / 2) + (
model_25d_boundaries.x_max + model_25d_boundaries.x_min) / 2,
lambda y: (y - (native_boundaries.y_max + native_boundaries.y_min) / 2) + (
model_25d_boundaries.y_max + model_25d_boundaries.y_min) / 2,
lambda z: (z - (native_boundaries.z_max + native_boundaries.z_min) / 2) + (
model_25d_boundaries.z_max + model_25d_boundaries.z_min) / 2
)
def parse_reconstruction(path: str) -> Reconstruction:
reconstruction = Reconstruction()
wf = parse_wavefront_25d_obj('%s/odm_texturing_25d/odm_textured_model_geo.obj' % path)
reconstruction.mesh = wf
reconstruction.orthophoto_boundaries = wf.boundaries
native_boundaries = _parse_point_cloud_boundaries(path)
_parse_camera_shotgeojson(path, reconstruction,
_native_to_model_25d_coordinates(native_boundaries, wf.boundaries))
return reconstruction
| 38.94702 | 102 | 0.660602 | import json
import logging
import geojson
import numpy as np
from tqdm import tqdm
from scipy import stats
from odm_report_shot_coverage.models.camera import Camera, json_parse_camera
from odm_report_shot_coverage.models.shot import Shot, shot_boundaries_from_points, Boundaries
from odm_report_shot_coverage.models.wavefront_25d import Wavefront25D, parse_wavefront_25d_obj
class Reconstruction:
cameras: 'dict[str, Camera]' = {}
_shots: 'list[Shot]' = []
mesh = Wavefront25D
orthophoto_boundaries: Boundaries
@property
def shots(self) -> 'list[Shot]':
self._shots.sort(key=lambda s: s.image_name)
return self._shots
def add_camera(self, name: str, camera: Camera):
self.cameras[name] = camera
def add_shot(self, shot: Shot):
self._shots.append(shot)
def to_json(self) -> dict:
return {
'cameras': {n: c.to_json() for n, c in self.cameras.items()},
'shots': [s.to_json() for s in self.shots],
'boundaries': self.mesh.boundaries.to_json(),
'orthophotoBoundaries': self.orthophoto_boundaries.to_json(),
}
def compute_shot_boundaries(self):
for shot in tqdm(self.shots, desc='Computing shot boundaries'):
points = []
for i, point in enumerate(self.mesh.points):
pixel = shot.camera_pixel(point)
if shot.camera.in_frame(pixel):
points.append(point)
shot.boundaries = shot_boundaries_from_points(points)
def find_camera_by_width_height(self, width: int, height: int) -> Camera:
cs = [c for c in self.cameras.values() if c.width == width and c.height == height]
if len(cs) != 1:
raise Exception('Not exactly one camera found with size %s x %s' % (width, height))
return cs[0]
class ReconstructionCollection:
reconstructions: 'list[Reconstruction]' = []
def append(self, reconstruction: Reconstruction):
self.reconstructions.append(reconstruction)
def __getitem__(self, i: int):
return self.reconstructions[i]
def __len__(self):
return len(self.reconstructions)
def lin_reg(pairs: 'list[(float, float)]') -> (float, float, float, float):
x = [p[0] for p in pairs]
y = [p[1] for p in pairs]
return stats.linregress(x, y)
def _parse_point_cloud_boundaries(path: str) -> Boundaries:
with open('%s/odm_report/stats.json' % path, 'r') as fd:
stats_json = json.load(fd)
bbox = stats_json['point_cloud_statistics']['stats']['bbox']['native']['bbox']
return Boundaries(
x_min=bbox['minx'],
x_max=bbox['maxx'],
y_min=bbox['miny'],
y_max=bbox['maxy'],
z_min=bbox['minz'],
z_max=bbox['maxz'],
)
def _parse_camera_shotgeojson(path: str, reconstruction: Reconstruction, native_to_25d_coordinates):
with open('%s/cameras.json' % path, 'r') as fd:
cameras_json = json.load(fd)
for n, j in cameras_json.items():
camera = json_parse_camera(n, j)
reconstruction.add_camera(n, camera)
(tr_x, tr_y, tr_z) = native_to_25d_coordinates
with open('%s/odm_report/shots.geojson' % path, 'r') as fd:
shots_geojson = geojson.load(fd)
for feat in shots_geojson['features']:
shot = Shot()
props = feat['properties']
shot.camera = reconstruction.find_camera_by_width_height(props['width'], props['height'])
shot.image_name = props['filename']
translation = props['translation']
shot.translation = (tr_x(translation[0]), tr_y(translation[1]), tr_z(translation[2]))
shot.rotation = props['rotation']
reconstruction.add_shot(shot)
def _native_to_model_25d_coordinates(native_boundaries: Boundaries, model_25d_boundaries: Boundaries):
width_25d = model_25d_boundaries.x_max - model_25d_boundaries.x_min
height_25d = model_25d_boundaries.y_max - model_25d_boundaries.y_min
elevation_25d = model_25d_boundaries.y_max - model_25d_boundaries.y_min
width_native = native_boundaries.x_max - native_boundaries.x_min
height_native = native_boundaries.y_max - native_boundaries.y_min
elevation_native = native_boundaries.y_max - native_boundaries.y_min
width_ratio = np.abs(1 - width_native / width_25d)
height_ratio = np.abs(1 - height_native / height_25d)
elevation_ratio = np.abs(1 - elevation_native / elevation_25d)
logging.info(
'native/25d model boundaries discrepancies width=%.2f%% height=%.2f%% elevation=%.2f%%' % (
width_ratio * 100, height_ratio * 100, elevation_ratio * 100))
return (
lambda x: (x - (native_boundaries.x_max + native_boundaries.x_min) / 2) + (
model_25d_boundaries.x_max + model_25d_boundaries.x_min) / 2,
lambda y: (y - (native_boundaries.y_max + native_boundaries.y_min) / 2) + (
model_25d_boundaries.y_max + model_25d_boundaries.y_min) / 2,
lambda z: (z - (native_boundaries.z_max + native_boundaries.z_min) / 2) + (
model_25d_boundaries.z_max + model_25d_boundaries.z_min) / 2
)
def parse_reconstruction(path: str) -> Reconstruction:
reconstruction = Reconstruction()
wf = parse_wavefront_25d_obj('%s/odm_texturing_25d/odm_textured_model_geo.obj' % path)
reconstruction.mesh = wf
reconstruction.orthophoto_boundaries = wf.boundaries
native_boundaries = _parse_point_cloud_boundaries(path)
_parse_camera_shotgeojson(path, reconstruction,
_native_to_model_25d_coordinates(native_boundaries, wf.boundaries))
return reconstruction
| true | true |
f72678e2fc3806ded2ce42f11e552a9f29cbd3ae | 3,757 | py | Python | demo.py | SeongSuKim95/ReID-Baseline-swin | f30db86eb2690c20c4fbb0189eb52b57358705df | [
"MIT"
] | null | null | null | demo.py | SeongSuKim95/ReID-Baseline-swin | f30db86eb2690c20c4fbb0189eb52b57358705df | [
"MIT"
] | null | null | null | demo.py | SeongSuKim95/ReID-Baseline-swin | f30db86eb2690c20c4fbb0189eb52b57358705df | [
"MIT"
] | 1 | 2022-03-18T19:09:47.000Z | 2022-03-18T19:09:47.000Z | import argparse
import scipy.io
import torch
import numpy as np
import os
from torchvision import datasets
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
#######################################################################
# Evaluate
parser = argparse.ArgumentParser(description='Demo')
parser.add_argument('--query_index', default=777, type=int, help='test_image_index')
parser.add_argument('--test_dir',default='/mnt/hdd_data/Dataset/market1501_ss/pytorch',type=str, help='./test_data')
opts = parser.parse_args()
data_dir = opts.test_dir
image_datasets = {x: datasets.ImageFolder( os.path.join(data_dir,x) ) for x in ['gallery','query']}
#####################################################################
#Show result
def imshow(path, title=None):
"""Imshow for Tensor."""
im = plt.imread(path)
plt.imshow(im)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
######################################################################
result = scipy.io.loadmat('pytorch_result.mat')
query_feature = torch.FloatTensor(result['query_f'])
query_cam = result['query_cam'][0]
query_label = result['query_label'][0]
gallery_feature = torch.FloatTensor(result['gallery_f'])
gallery_cam = result['gallery_cam'][0]
gallery_label = result['gallery_label'][0]
multi = os.path.isfile('multi_query.mat')
if multi:
m_result = scipy.io.loadmat('multi_query.mat')
mquery_feature = torch.FloatTensor(m_result['mquery_f'])
mquery_cam = m_result['mquery_cam'][0]
mquery_label = m_result['mquery_label'][0]
mquery_feature = mquery_feature.cuda()
query_feature = query_feature.cuda()
gallery_feature = gallery_feature.cuda()
#######################################################################
# sort the images
def sort_img(qf, ql, qc, gf, gl, gc):
query = qf.view(-1,1)
# print(query.shape)
score = torch.mm(gf,query)
score = score.squeeze(1).cpu()
score = score.numpy()
# predict index
index = np.argsort(score) #from small to large
index = index[::-1]
# index = index[0:2000]
# good index
query_index = np.argwhere(gl==ql)
#same camera
camera_index = np.argwhere(gc==qc)
#good_index = np.setdiff1d(query_index, camera_index, assume_unique=True)
junk_index1 = np.argwhere(gl==-1)
junk_index2 = np.intersect1d(query_index, camera_index)
junk_index = np.append(junk_index2, junk_index1)
mask = np.in1d(index, junk_index, invert=True)
index = index[mask]
return index
i = opts.query_index
index = sort_img(query_feature[i],query_label[i],query_cam[i],gallery_feature,gallery_label,gallery_cam)
########################################################################
# Visualize the rank result
query_path, _ = image_datasets['query'].imgs[i]
query_label = query_label[i]
print(query_path)
print('Top 10 images are as follow:')
try: # Visualize Ranking Result
# Graphical User Interface is needed
fig = plt.figure(figsize=(16,4))
ax = plt.subplot(1,11,1)
ax.axis('off')
imshow(query_path,'query')
for i in range(10):
ax = plt.subplot(1,11,i+2)
ax.axis('off')
img_path, _ = image_datasets['gallery'].imgs[index[i]]
label = gallery_label[index[i]]
imshow(img_path)
if label == query_label:
ax.set_title('%d'%(i+1), color='green')
else:
ax.set_title('%d'%(i+1), color='red')
print(img_path)
except RuntimeError:
for i in range(10):
img_path = image_datasets.imgs[index[i]]
print(img_path[0])
print('If you want to see the visualization of the ranking result, graphical user interface is needed.')
fig.savefig("show.png")
| 33.846847 | 116 | 0.625233 | import argparse
import scipy.io
import torch
import numpy as np
import os
from torchvision import datasets
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
| true | true |
f72678eeeaa3ab01da38b3b2d287901a2acbed5a | 512 | py | Python | week_2/tests/lab8_p2.py | brown-ccv/workshop-python-2020 | e522527a077f68c4a0b11da9eb615a9f57d21b6d | [
"MIT"
] | 2 | 2020-11-20T07:43:40.000Z | 2021-05-14T14:40:41.000Z | week_2/tests/lab8_p2.py | brown-ccv/workshop-python-2020 | e522527a077f68c4a0b11da9eb615a9f57d21b6d | [
"MIT"
] | null | null | null | week_2/tests/lab8_p2.py | brown-ccv/workshop-python-2020 | e522527a077f68c4a0b11da9eb615a9f57d21b6d | [
"MIT"
] | 2 | 2020-06-18T20:35:40.000Z | 2020-09-27T02:54:31.000Z | test = {
'name': '8.2',
'suites': [
{
'cases': [
{
'code': r"""
>>> # It looks like one of your variables is not named
>>> # correctly. Maybe there's a typo?
>>> 'avg_flt' in vars()
True
"""
},
{
'code': r"""
>>> # It looks like your function returns the wrong result.
>>> # Check your function.
>>> avg_flt == 2.0
True
"""
},
]
}
]
} | 20.48 | 69 | 0.363281 | test = {
'name': '8.2',
'suites': [
{
'cases': [
{
'code': r"""
>>> # It looks like one of your variables is not named
>>> # correctly. Maybe there's a typo?
>>> 'avg_flt' in vars()
True
"""
},
{
'code': r"""
>>> # It looks like your function returns the wrong result.
>>> # Check your function.
>>> avg_flt == 2.0
True
"""
},
]
}
]
} | true | true |
f726790477f564ff202bf60ffff1010f2a6df250 | 3,687 | py | Python | rlkit/envs/point_robot.py | erinaldi/MetaRL | 6dfb8d2e63a1802ca7ef9c28f6ab1a758d07f871 | [
"MIT"
] | 381 | 2019-03-19T22:55:14.000Z | 2022-03-26T18:56:17.000Z | rlkit/envs/point_robot.py | erinaldi/MetaRL | 6dfb8d2e63a1802ca7ef9c28f6ab1a758d07f871 | [
"MIT"
] | 27 | 2019-04-30T04:04:51.000Z | 2022-03-03T18:20:11.000Z | rlkit/envs/point_robot.py | erinaldi/MetaRL | 6dfb8d2e63a1802ca7ef9c28f6ab1a758d07f871 | [
"MIT"
] | 107 | 2019-03-22T06:09:24.000Z | 2022-01-30T13:34:38.000Z | import numpy as np
from gym import spaces
from gym import Env
from . import register_env
@register_env('point-robot')
class PointEnv(Env):
"""
point robot on a 2-D plane with position control
tasks (aka goals) are positions on the plane
- tasks sampled from unit square
- reward is L2 distance
"""
def __init__(self, randomize_tasks=False, n_tasks=2):
if randomize_tasks:
np.random.seed(1337)
goals = [[np.random.uniform(-1., 1.), np.random.uniform(-1., 1.)] for _ in range(n_tasks)]
else:
# some hand-coded goals for debugging
goals = [np.array([10, -10]),
np.array([10, 10]),
np.array([-10, 10]),
np.array([-10, -10]),
np.array([0, 0]),
np.array([7, 2]),
np.array([0, 4]),
np.array([-6, 9])
]
goals = [g / 10. for g in goals]
self.goals = goals
self.reset_task(0)
self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=(2,))
self.action_space = spaces.Box(low=-0.1, high=0.1, shape=(2,))
def reset_task(self, idx):
''' reset goal AND reset the agent '''
self._goal = self.goals[idx]
self.reset()
def get_all_task_idx(self):
return range(len(self.goals))
def reset_model(self):
# reset to a random location on the unit square
self._state = np.random.uniform(-1., 1., size=(2,))
return self._get_obs()
def reset(self):
return self.reset_model()
def _get_obs(self):
return np.copy(self._state)
def step(self, action):
self._state = self._state + action
x, y = self._state
x -= self._goal[0]
y -= self._goal[1]
reward = - (x ** 2 + y ** 2) ** 0.5
done = False
ob = self._get_obs()
return ob, reward, done, dict()
def viewer_setup(self):
print('no viewer')
pass
def render(self):
print('current state:', self._state)
@register_env('sparse-point-robot')
class SparsePointEnv(PointEnv):
'''
- tasks sampled from unit half-circle
- reward is L2 distance given only within goal radius
NOTE that `step()` returns the dense reward because this is used during meta-training
the algorithm should call `sparsify_rewards()` to get the sparse rewards
'''
def __init__(self, randomize_tasks=False, n_tasks=2, goal_radius=0.2):
super().__init__(randomize_tasks, n_tasks)
self.goal_radius = goal_radius
if randomize_tasks:
np.random.seed(1337)
radius = 1.0
angles = np.linspace(0, np.pi, num=n_tasks)
xs = radius * np.cos(angles)
ys = radius * np.sin(angles)
goals = np.stack([xs, ys], axis=1)
np.random.shuffle(goals)
goals = goals.tolist()
self.goals = goals
self.reset_task(0)
def sparsify_rewards(self, r):
''' zero out rewards when outside the goal radius '''
mask = (r >= -self.goal_radius).astype(np.float32)
r = r * mask
return r
def reset_model(self):
self._state = np.array([0, 0])
return self._get_obs()
def step(self, action):
ob, reward, done, d = super().step(action)
sparse_reward = self.sparsify_rewards(reward)
# make sparse rewards positive
if reward >= -self.goal_radius:
sparse_reward += 1
d.update({'sparse_reward': sparse_reward})
return ob, reward, done, d
| 29.97561 | 102 | 0.55872 | import numpy as np
from gym import spaces
from gym import Env
from . import register_env
@register_env('point-robot')
class PointEnv(Env):
def __init__(self, randomize_tasks=False, n_tasks=2):
if randomize_tasks:
np.random.seed(1337)
goals = [[np.random.uniform(-1., 1.), np.random.uniform(-1., 1.)] for _ in range(n_tasks)]
else:
goals = [np.array([10, -10]),
np.array([10, 10]),
np.array([-10, 10]),
np.array([-10, -10]),
np.array([0, 0]),
np.array([7, 2]),
np.array([0, 4]),
np.array([-6, 9])
]
goals = [g / 10. for g in goals]
self.goals = goals
self.reset_task(0)
self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=(2,))
self.action_space = spaces.Box(low=-0.1, high=0.1, shape=(2,))
def reset_task(self, idx):
self._goal = self.goals[idx]
self.reset()
def get_all_task_idx(self):
return range(len(self.goals))
def reset_model(self):
self._state = np.random.uniform(-1., 1., size=(2,))
return self._get_obs()
def reset(self):
return self.reset_model()
def _get_obs(self):
return np.copy(self._state)
def step(self, action):
self._state = self._state + action
x, y = self._state
x -= self._goal[0]
y -= self._goal[1]
reward = - (x ** 2 + y ** 2) ** 0.5
done = False
ob = self._get_obs()
return ob, reward, done, dict()
def viewer_setup(self):
print('no viewer')
pass
def render(self):
print('current state:', self._state)
@register_env('sparse-point-robot')
class SparsePointEnv(PointEnv):
def __init__(self, randomize_tasks=False, n_tasks=2, goal_radius=0.2):
super().__init__(randomize_tasks, n_tasks)
self.goal_radius = goal_radius
if randomize_tasks:
np.random.seed(1337)
radius = 1.0
angles = np.linspace(0, np.pi, num=n_tasks)
xs = radius * np.cos(angles)
ys = radius * np.sin(angles)
goals = np.stack([xs, ys], axis=1)
np.random.shuffle(goals)
goals = goals.tolist()
self.goals = goals
self.reset_task(0)
def sparsify_rewards(self, r):
mask = (r >= -self.goal_radius).astype(np.float32)
r = r * mask
return r
def reset_model(self):
self._state = np.array([0, 0])
return self._get_obs()
def step(self, action):
ob, reward, done, d = super().step(action)
sparse_reward = self.sparsify_rewards(reward)
if reward >= -self.goal_radius:
sparse_reward += 1
d.update({'sparse_reward': sparse_reward})
return ob, reward, done, d
| true | true |
f726796f43413d696b184ac1361a1c85145f966b | 217 | py | Python | vispy/gloo/gl/ext.py | mssurajkaiga/vispy-experiments | 0f3a19e0f4ac46608da792cbd36ebe59b036bce7 | [
"BSD-3-Clause"
] | 1 | 2017-06-12T16:24:11.000Z | 2017-06-12T16:24:11.000Z | vispy/gloo/gl/ext.py | mssurajkaiga/vispy-experiments | 0f3a19e0f4ac46608da792cbd36ebe59b036bce7 | [
"BSD-3-Clause"
] | null | null | null | vispy/gloo/gl/ext.py | mssurajkaiga/vispy-experiments | 0f3a19e0f4ac46608da792cbd36ebe59b036bce7 | [
"BSD-3-Clause"
] | null | null | null | """ Namespace for functions and constants corresponding to
OpenGL ES 2.0 extensions.
"""
from __future__ import division
from ._constants_ext import * # noqa
# Filled with functions when vispy.gloo.gl is imported
| 21.7 | 58 | 0.774194 |
from __future__ import division
from ._constants_ext import *
| true | true |
f72679f63807e54ba5c5c8c8ffd1b689e7012f08 | 297 | py | Python | lab/logger/destinations/__init__.py | gear/lab | ad1c5838acbcc98abb5d5d93d5c7a6c2b74bdfa2 | [
"MIT"
] | null | null | null | lab/logger/destinations/__init__.py | gear/lab | ad1c5838acbcc98abb5d5d93d5c7a6c2b74bdfa2 | [
"MIT"
] | null | null | null | lab/logger/destinations/__init__.py | gear/lab | ad1c5838acbcc98abb5d5d93d5c7a6c2b74bdfa2 | [
"MIT"
] | null | null | null | from typing import List, Union, Tuple
from lab.logger.colors import StyleCode
class Destination:
def log(self, parts: List[Union[str, Tuple[str, StyleCode]]], *,
is_new_line=True):
raise NotImplementedError()
def new_line(self):
raise NotImplementedError()
| 22.846154 | 68 | 0.680135 | from typing import List, Union, Tuple
from lab.logger.colors import StyleCode
class Destination:
def log(self, parts: List[Union[str, Tuple[str, StyleCode]]], *,
is_new_line=True):
raise NotImplementedError()
def new_line(self):
raise NotImplementedError()
| true | true |
f7267a892bdf070ee3c7619d0af334682563f6a0 | 3,432 | py | Python | simulation_3.py | ballcarsen/Networks_3_Forwarding | af211612a097db15a1605311c5c537e8bd279b12 | [
"Apache-2.0"
] | null | null | null | simulation_3.py | ballcarsen/Networks_3_Forwarding | af211612a097db15a1605311c5c537e8bd279b12 | [
"Apache-2.0"
] | null | null | null | simulation_3.py | ballcarsen/Networks_3_Forwarding | af211612a097db15a1605311c5c537e8bd279b12 | [
"Apache-2.0"
] | null | null | null | '''
Created on Oct 12, 2016
@author: mwittie
'''
import network_3
import link_3
import threading
from time import sleep
##configuration parameters
router_queue_size = 0 # 0 means unlimited
simulation_time = 1 # give the network sufficient time to transfer all packets before quitting
if __name__ == '__main__':
routing_dict = {'router_a': {"1" : 0, "2": 1}, 'router_d': {"3" : 0, "4": 1}}
object_L = [] # keeps track of objects, so we can kill their threads
# create network nodes
host_1 = network_3.Host(1)
object_L.append(host_1)
host_2 = network_3.Host(2)
object_L.append(host_2)
host_3 = network_3.Host(3)
object_L.append(host_3)
host_4 = network_3.Host(4)
object_L.append(host_4)
router_a = network_3.Router(routing_dict,name='router_a', intf_count=2, max_queue_size=router_queue_size)
object_L.append(router_a)
router_b = network_3.Router(routing_dict, name='router_b', intf_count=1, max_queue_size=router_queue_size)
object_L.append(router_b)
router_c = network_3.Router(routing_dict, name='router_c', intf_count=1, max_queue_size=router_queue_size)
object_L.append(router_c)
router_d = network_3.Router(routing_dict, name='router_d', intf_count=2, max_queue_size=router_queue_size)
object_L.append(router_d)
# create a Link Layer to keep track of links between network nodes
link_layer = link_3.LinkLayer()
object_L.append(link_layer)
# add all the links
# link parameters: from_node, from_intf_num, to_node, to_intf_num, mtu
link_layer.add_link(link_3.Link(host_1, 0, router_a, 0, 50))
link_layer.add_link(link_3.Link(host_2, 0, router_a, 0, 50))
link_layer.add_link(link_3.Link(router_a, 0,router_b, 0, 50))
link_layer.add_link(link_3.Link(router_a, 1, router_c, 0, 50))
link_layer.add_link(link_3.Link(router_b, 0, router_d, 0, 50))
link_layer.add_link(link_3.Link(router_c, 0, router_d, 0, 50))
link_layer.add_link(link_3.Link(router_d, 0, host_3, 0, 50))
link_layer.add_link(link_3.Link(router_d, 1, host_4, 0, 50))
# start all the objects
thread_L = []
thread_L.append(threading.Thread(name=host_1.__str__(), target=host_1.run))
thread_L.append(threading.Thread(name=host_2.__str__(), target=host_2.run))
thread_L.append(threading.Thread(name=host_3.__str__(), target=host_3.run))
thread_L.append(threading.Thread(name=host_4.__str__(), target=host_4.run))
thread_L.append(threading.Thread(name=router_a.__str__(), target=router_a.run))
thread_L.append(threading.Thread(name=router_b.__str__(), target=router_b.run))
thread_L.append(threading.Thread(name=router_c.__str__(), target=router_c.run))
thread_L.append(threading.Thread(name=router_d.__str__(), target=router_d.run))
thread_L.append(threading.Thread(name="Network", target=link_layer.run))
for t in thread_L:
t.start()
# create some send events
for i in range(1):
host_1.udt_send(1,4,"from 1 to 4")
host_2.udt_send(2,4, "from 2 to 4")
host_2.udt_send(2,3, "from 2 to 3")
host_1.udt_send(1,3, "from 1 to 3")
# give the network sufficient time to transfer all packets before quitting
sleep(simulation_time)
# join all threads
for o in object_L:
o.stop = True
for t in thread_L:
t.join()
print("All simulation threads joined")
# writes to host periodically | 32.377358 | 110 | 0.708333 | import network_3
import link_3
import threading
from time import sleep
simulation_time = 1
if __name__ == '__main__':
routing_dict = {'router_a': {"1" : 0, "2": 1}, 'router_d': {"3" : 0, "4": 1}}
object_L = []
host_1 = network_3.Host(1)
object_L.append(host_1)
host_2 = network_3.Host(2)
object_L.append(host_2)
host_3 = network_3.Host(3)
object_L.append(host_3)
host_4 = network_3.Host(4)
object_L.append(host_4)
router_a = network_3.Router(routing_dict,name='router_a', intf_count=2, max_queue_size=router_queue_size)
object_L.append(router_a)
router_b = network_3.Router(routing_dict, name='router_b', intf_count=1, max_queue_size=router_queue_size)
object_L.append(router_b)
router_c = network_3.Router(routing_dict, name='router_c', intf_count=1, max_queue_size=router_queue_size)
object_L.append(router_c)
router_d = network_3.Router(routing_dict, name='router_d', intf_count=2, max_queue_size=router_queue_size)
object_L.append(router_d)
link_layer = link_3.LinkLayer()
object_L.append(link_layer)
link_layer.add_link(link_3.Link(host_1, 0, router_a, 0, 50))
link_layer.add_link(link_3.Link(host_2, 0, router_a, 0, 50))
link_layer.add_link(link_3.Link(router_a, 0,router_b, 0, 50))
link_layer.add_link(link_3.Link(router_a, 1, router_c, 0, 50))
link_layer.add_link(link_3.Link(router_b, 0, router_d, 0, 50))
link_layer.add_link(link_3.Link(router_c, 0, router_d, 0, 50))
link_layer.add_link(link_3.Link(router_d, 0, host_3, 0, 50))
link_layer.add_link(link_3.Link(router_d, 1, host_4, 0, 50))
thread_L = []
thread_L.append(threading.Thread(name=host_1.__str__(), target=host_1.run))
thread_L.append(threading.Thread(name=host_2.__str__(), target=host_2.run))
thread_L.append(threading.Thread(name=host_3.__str__(), target=host_3.run))
thread_L.append(threading.Thread(name=host_4.__str__(), target=host_4.run))
thread_L.append(threading.Thread(name=router_a.__str__(), target=router_a.run))
thread_L.append(threading.Thread(name=router_b.__str__(), target=router_b.run))
thread_L.append(threading.Thread(name=router_c.__str__(), target=router_c.run))
thread_L.append(threading.Thread(name=router_d.__str__(), target=router_d.run))
thread_L.append(threading.Thread(name="Network", target=link_layer.run))
for t in thread_L:
t.start()
for i in range(1):
host_1.udt_send(1,4,"from 1 to 4")
host_2.udt_send(2,4, "from 2 to 4")
host_2.udt_send(2,3, "from 2 to 3")
host_1.udt_send(1,3, "from 1 to 3")
sleep(simulation_time)
for o in object_L:
o.stop = True
for t in thread_L:
t.join()
print("All simulation threads joined")
| true | true |
f7267aece711cefa0549e3630c2589663f50d1e2 | 847 | py | Python | credentials.py | vishnusudheendran/Disney-Fastpass-Bot | 0a1a95c0b8782701634c7e914235e789d0fa9c06 | [
"MIT"
] | 1 | 2019-03-05T13:34:06.000Z | 2019-03-05T13:34:06.000Z | credentials.py | vishnusudheendran/Disney-Fastpass-Bot | 0a1a95c0b8782701634c7e914235e789d0fa9c06 | [
"MIT"
] | null | null | null | credentials.py | vishnusudheendran/Disney-Fastpass-Bot | 0a1a95c0b8782701634c7e914235e789d0fa9c06 | [
"MIT"
] | null | null | null | """
This file contains the email and password for MyDisneyExperience and the guests in your party
Below, add your email and password in the parentheses that you use to login into mydisneyexperience.com
"""
email = ""
password = ""
"""
guests = ["guest-00000000-0000-0000-0000-000000000000-unselected", "guest-00000000-0000-0000-0000-000000000000-unselected"]
Above is an example of what the guests variable should look like. Add guest-00000000-0000-0000-0000-000000000000-unselected for how many guests you have.
Do NOT add yourself because you should already be selected at the top, only add the other guests in your party!
Watch this tutorial on how to find your guest's id's here: youtube.com/
"""
guests = [""]
#The path to chromedriver
"""
path = r"C:\Users\YOURUSER\Desktop\Disney Fastpass Bot\chromedriver.exe"
"""
path = r""
| 26.46875 | 153 | 0.753247 | """
This file contains the email and password for MyDisneyExperience and the guests in your party
Below, add your email and password in the parentheses that you use to login into mydisneyexperience.com
"""
email = ""
password = ""
"""
guests = ["guest-00000000-0000-0000-0000-000000000000-unselected", "guest-00000000-0000-0000-0000-000000000000-unselected"]
Above is an example of what the guests variable should look like. Add guest-00000000-0000-0000-0000-000000000000-unselected for how many guests you have.
Do NOT add yourself because you should already be selected at the top, only add the other guests in your party!
Watch this tutorial on how to find your guest's id's here: youtube.com/
"""
guests = [""]
"""
path = r"C:\Users\YOURUSER\Desktop\Disney Fastpass Bot\chromedriver.exe"
"""
path = r""
| false | true |
f7267bf3fd0336a906e4c44012962c01fabd366c | 799 | py | Python | Hackerrank_codes/arithmetic_operators.py | Vyshnavmt94/HackerRankTasks | 634c71ccf0bea7585498bcd7d63e34d0334b4678 | [
"MIT"
] | null | null | null | Hackerrank_codes/arithmetic_operators.py | Vyshnavmt94/HackerRankTasks | 634c71ccf0bea7585498bcd7d63e34d0334b4678 | [
"MIT"
] | null | null | null | Hackerrank_codes/arithmetic_operators.py | Vyshnavmt94/HackerRankTasks | 634c71ccf0bea7585498bcd7d63e34d0334b4678 | [
"MIT"
] | null | null | null | """
Task
The provided code stub reads two integers from STDIN, and . Add code to print three lines where:
The first line contains the sum of the two numbers.
The second line contains the difference of the two numbers (first - second).
The third line contains the product of the two numbers.
Example
Print the following:
8
-2
15
Input Format
The first line contains the first integer, .
The second line contains the second integer, .
Constraints
Output Format
Print the three lines as explained above.
Sample Input 0
3
2
Sample Output 0
5
1
6
Explanation 0
"""
def check(n):
if n>=1 and n<=10**10:
return True
if __name__ == '__main__':
a = int(input())
b = int(input())
if check(a) and check(b):
print(a+b)
print(a-b)
print(a*b) | 13.775862 | 97 | 0.677096 |
def check(n):
if n>=1 and n<=10**10:
return True
if __name__ == '__main__':
a = int(input())
b = int(input())
if check(a) and check(b):
print(a+b)
print(a-b)
print(a*b) | true | true |
f7267c622825114ec43fa387fd5603751837e983 | 3,783 | py | Python | allure-pytest/test/acceptance/status/base_teardown_status_test.py | ammarnajjar/allure-python | 975aaf94d75428330c07976c1cdfc364b9a3cafa | [
"Apache-2.0"
] | 5 | 2018-02-12T11:40:38.000Z | 2018-06-10T20:29:00.000Z | allure-pytest/test/acceptance/status/base_teardown_status_test.py | ammarnajjar/allure-python | 975aaf94d75428330c07976c1cdfc364b9a3cafa | [
"Apache-2.0"
] | null | null | null | allure-pytest/test/acceptance/status/base_teardown_status_test.py | ammarnajjar/allure-python | 975aaf94d75428330c07976c1cdfc364b9a3cafa | [
"Apache-2.0"
] | 1 | 2020-01-25T03:54:39.000Z | 2020-01-25T03:54:39.000Z | from hamcrest import assert_that
from allure_commons_test.report import has_test_case
from allure_commons_test.result import with_status
from allure_commons_test.result import has_status_details
from allure_commons_test.result import with_message_contains
from allure_commons_test.result import with_trace_contains
from allure_commons_test.container import has_container
from allure_commons_test.container import has_after
def test_failed_finalizer_fixture(executed_docstring_source):
"""
>>> import pytest
>>> @pytest.fixture
... def failed_finalizer_fixture(request):
... def fixture_finalizer():
... assert False
... request.addfinalizer(fixture_finalizer)
...
... def test_failed_finalizer_fixture_example(failed_finalizer_fixture):
... pass
"""
assert_that(executed_docstring_source.allure_report,
has_test_case("test_failed_finalizer_fixture_example",
with_status("failed"),
has_status_details(with_message_contains("AssertionError"),
with_trace_contains("def fixture_finalizer():")
),
has_container(executed_docstring_source.allure_report,
has_after("{fixture}::{finalizer}".format(
fixture="failed_finalizer_fixture",
finalizer="fixture_finalizer"),
with_status("failed"),
has_status_details(with_message_contains("AssertionError"),
with_trace_contains("fixture_finalizer")
),
),
)
)
)
def test_pytest_failed_finalizer_fixture(executed_docstring_source):
"""
>>> import pytest
>>> @pytest.fixture
... def pytest_failed_finalizer_fixture(request):
... def fixture_finalizer():
... pytest.fail()
... request.addfinalizer(fixture_finalizer)
>>> def test_pytest_failed_finalizer_fixture_example(pytest_failed_finalizer_fixture):
... pass
"""
assert_that(executed_docstring_source.allure_report,
has_test_case("test_pytest_failed_finalizer_fixture_example",
with_status("failed"),
has_status_details(with_message_contains("Failed: <Failed instance>"),
with_trace_contains("def fixture_finalizer():")
),
has_container(executed_docstring_source.allure_report,
has_after("{fixture}::{finalizer}".format(
fixture="pytest_failed_finalizer_fixture",
finalizer="fixture_finalizer"),
with_status("failed"),
has_status_details(with_message_contains("Failed: <Failed instance>"),
with_trace_contains("fixture_finalizer")
),
),
)
)
)
| 49.12987 | 118 | 0.487708 | from hamcrest import assert_that
from allure_commons_test.report import has_test_case
from allure_commons_test.result import with_status
from allure_commons_test.result import has_status_details
from allure_commons_test.result import with_message_contains
from allure_commons_test.result import with_trace_contains
from allure_commons_test.container import has_container
from allure_commons_test.container import has_after
def test_failed_finalizer_fixture(executed_docstring_source):
assert_that(executed_docstring_source.allure_report,
has_test_case("test_failed_finalizer_fixture_example",
with_status("failed"),
has_status_details(with_message_contains("AssertionError"),
with_trace_contains("def fixture_finalizer():")
),
has_container(executed_docstring_source.allure_report,
has_after("{fixture}::{finalizer}".format(
fixture="failed_finalizer_fixture",
finalizer="fixture_finalizer"),
with_status("failed"),
has_status_details(with_message_contains("AssertionError"),
with_trace_contains("fixture_finalizer")
),
),
)
)
)
def test_pytest_failed_finalizer_fixture(executed_docstring_source):
assert_that(executed_docstring_source.allure_report,
has_test_case("test_pytest_failed_finalizer_fixture_example",
with_status("failed"),
has_status_details(with_message_contains("Failed: <Failed instance>"),
with_trace_contains("def fixture_finalizer():")
),
has_container(executed_docstring_source.allure_report,
has_after("{fixture}::{finalizer}".format(
fixture="pytest_failed_finalizer_fixture",
finalizer="fixture_finalizer"),
with_status("failed"),
has_status_details(with_message_contains("Failed: <Failed instance>"),
with_trace_contains("fixture_finalizer")
),
),
)
)
)
| true | true |
f7267ca31364e2c221d467bdf1be6ce69d403ea2 | 5,588 | py | Python | cpp/src/tools/scons/scons-local-1.2.0.d20090223/SCons/compat/builtins.py | nawien-sharma/keyczar | c55563bbd70f4b6fefc7444e296aab9894475f9a | [
"Apache-2.0"
] | 30 | 2015-01-29T14:06:05.000Z | 2022-01-10T07:47:29.000Z | cpp/src/tools/scons/scons-local-1.2.0.d20090223/SCons/compat/builtins.py | nawien-sharma/keyczar | c55563bbd70f4b6fefc7444e296aab9894475f9a | [
"Apache-2.0"
] | 1 | 2017-02-20T20:57:48.000Z | 2018-12-19T23:44:38.000Z | cpp/src/tools/scons/scons-local-1.2.0.d20090223/SCons/compat/builtins.py | nawien-sharma/keyczar | c55563bbd70f4b6fefc7444e296aab9894475f9a | [
"Apache-2.0"
] | 15 | 2015-04-23T02:38:36.000Z | 2021-03-01T20:09:39.000Z | #
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# Portions of the following are derived from the compat.py file in
# Twisted, under the following copyright:
#
# Copyright (c) 2001-2004 Twisted Matrix Laboratories
__doc__ = """
Compatibility idioms for __builtin__ names
This module adds names to the __builtin__ module for things that we want
to use in SCons but which don't show up until later Python versions than
the earliest ones we support.
This module checks for the following __builtin__ names:
all()
any()
bool()
dict()
True
False
zip()
Implementations of functions are *NOT* guaranteed to be fully compliant
with these functions in later versions of Python. We are only concerned
with adding functionality that we actually use in SCons, so be wary
if you lift this code for other uses. (That said, making these more
nearly the same as later, official versions is still a desirable goal,
we just don't need to be obsessive about it.)
If you're looking at this with pydoc and various names don't show up in
the FUNCTIONS or DATA output, that means those names are already built in
to this version of Python and we don't need to add them from this module.
"""
__revision__ = "src/engine/SCons/compat/builtins.py 4043 2009/02/23 09:06:45 scons"
import __builtin__
try:
all
except NameError:
# Pre-2.5 Python has no all() function.
def all(iterable):
"""
Returns True if all elements of the iterable are true.
"""
for element in iterable:
if not element:
return False
return True
__builtin__.all = all
all = all
try:
any
except NameError:
# Pre-2.5 Python has no any() function.
def any(iterable):
"""
Returns True if any element of the iterable is true.
"""
for element in iterable:
if element:
return True
return False
__builtin__.any = any
any = any
try:
bool
except NameError:
# Pre-2.2 Python has no bool() function.
def bool(value):
"""Demote a value to 0 or 1, depending on its truth value.
This is not to be confused with types.BooleanType, which is
way too hard to duplicate in early Python versions to be
worth the trouble.
"""
return not not value
__builtin__.bool = bool
bool = bool
try:
dict
except NameError:
# Pre-2.2 Python has no dict() keyword.
def dict(seq=[], **kwargs):
"""
New dictionary initialization.
"""
d = {}
for k, v in seq:
d[k] = v
d.update(kwargs)
return d
__builtin__.dict = dict
try:
False
except NameError:
# Pre-2.2 Python has no False keyword.
__builtin__.False = not 1
# Assign to False in this module namespace so it shows up in pydoc output.
False = False
try:
True
except NameError:
# Pre-2.2 Python has no True keyword.
__builtin__.True = not 0
# Assign to True in this module namespace so it shows up in pydoc output.
True = True
try:
file
except NameError:
# Pre-2.2 Python has no file() function.
__builtin__.file = open
#
try:
zip
except NameError:
# Pre-2.2 Python has no zip() function.
def zip(*lists):
"""
Emulates the behavior we need from the built-in zip() function
added in Python 2.2.
Returns a list of tuples, where each tuple contains the i-th
element rom each of the argument sequences. The returned
list is truncated in length to the length of the shortest
argument sequence.
"""
result = []
for i in xrange(min(map(len, lists))):
result.append(tuple(map(lambda l, i=i: l[i], lists)))
return result
__builtin__.zip = zip
#if sys.version_info[:3] in ((2, 2, 0), (2, 2, 1)):
# def lstrip(s, c=string.whitespace):
# while s and s[0] in c:
# s = s[1:]
# return s
# def rstrip(s, c=string.whitespace):
# while s and s[-1] in c:
# s = s[:-1]
# return s
# def strip(s, c=string.whitespace, l=lstrip, r=rstrip):
# return l(r(s, c), c)
#
# object.__setattr__(str, 'lstrip', lstrip)
# object.__setattr__(str, 'rstrip', rstrip)
# object.__setattr__(str, 'strip', strip)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 29.723404 | 89 | 0.658912 |
__doc__ = """
Compatibility idioms for __builtin__ names
This module adds names to the __builtin__ module for things that we want
to use in SCons but which don't show up until later Python versions than
the earliest ones we support.
This module checks for the following __builtin__ names:
all()
any()
bool()
dict()
True
False
zip()
Implementations of functions are *NOT* guaranteed to be fully compliant
with these functions in later versions of Python. We are only concerned
with adding functionality that we actually use in SCons, so be wary
if you lift this code for other uses. (That said, making these more
nearly the same as later, official versions is still a desirable goal,
we just don't need to be obsessive about it.)
If you're looking at this with pydoc and various names don't show up in
the FUNCTIONS or DATA output, that means those names are already built in
to this version of Python and we don't need to add them from this module.
"""
__revision__ = "src/engine/SCons/compat/builtins.py 4043 2009/02/23 09:06:45 scons"
import __builtin__
try:
all
except NameError:
# Pre-2.5 Python has no all() function.
def all(iterable):
"""
Returns True if all elements of the iterable are true.
"""
for element in iterable:
if not element:
return False
return True
__builtin__.all = all
all = all
try:
any
except NameError:
# Pre-2.5 Python has no any() function.
def any(iterable):
"""
Returns True if any element of the iterable is true.
"""
for element in iterable:
if element:
return True
return False
__builtin__.any = any
any = any
try:
bool
except NameError:
# Pre-2.2 Python has no bool() function.
def bool(value):
"""Demote a value to 0 or 1, depending on its truth value.
This is not to be confused with types.BooleanType, which is
way too hard to duplicate in early Python versions to be
worth the trouble.
"""
return not not value
__builtin__.bool = bool
bool = bool
try:
dict
except NameError:
# Pre-2.2 Python has no dict() keyword.
def dict(seq=[], **kwargs):
"""
New dictionary initialization.
"""
d = {}
for k, v in seq:
d[k] = v
d.update(kwargs)
return d
__builtin__.dict = dict
try:
False
except NameError:
# Pre-2.2 Python has no False keyword.
__builtin__.False = not 1
# Assign to False in this module namespace so it shows up in pydoc output.
False = False
try:
True
except NameError:
# Pre-2.2 Python has no True keyword.
__builtin__.True = not 0
# Assign to True in this module namespace so it shows up in pydoc output.
True = True
try:
file
except NameError:
# Pre-2.2 Python has no file() function.
__builtin__.file = open
#
try:
zip
except NameError:
# Pre-2.2 Python has no zip() function.
def zip(*lists):
"""
Emulates the behavior we need from the built-in zip() function
added in Python 2.2.
Returns a list of tuples, where each tuple contains the i-th
element rom each of the argument sequences. The returned
list is truncated in length to the length of the shortest
argument sequence.
"""
result = []
for i in xrange(min(map(len, lists))):
result.append(tuple(map(lambda l, i=i: l[i], lists)))
return result
__builtin__.zip = zip
#if sys.version_info[:3] in ((2, 2, 0), (2, 2, 1)):
# def lstrip(s, c=string.whitespace):
# while s and s[0] in c:
# s = s[1:]
# return s
# def rstrip(s, c=string.whitespace):
# while s and s[-1] in c:
# s = s[:-1]
# return s
# def strip(s, c=string.whitespace, l=lstrip, r=rstrip):
# return l(r(s, c), c)
#
# object.__setattr__(str, 'lstrip', lstrip)
# object.__setattr__(str, 'rstrip', rstrip)
# object.__setattr__(str, 'strip', strip)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| false | true |
f7267d5166a3b5c62a2fa38a85120010bbb91685 | 264 | py | Python | backend/authentication/admin.py | rajc1729/django-nextjs-realtime | 4d551f544729cc71a02878ad198dab665840987a | [
"MIT"
] | null | null | null | backend/authentication/admin.py | rajc1729/django-nextjs-realtime | 4d551f544729cc71a02878ad198dab665840987a | [
"MIT"
] | null | null | null | backend/authentication/admin.py | rajc1729/django-nextjs-realtime | 4d551f544729cc71a02878ad198dab665840987a | [
"MIT"
] | null | null | null | from django.contrib import admin
from authentication.models import CustomUser
# Register your models here.
class CustomUserAdmin(admin.ModelAdmin):
list_display = ['username','first_name','last_name','email']
admin.site.register(CustomUser, CustomUserAdmin) | 29.333333 | 64 | 0.799242 | from django.contrib import admin
from authentication.models import CustomUser
class CustomUserAdmin(admin.ModelAdmin):
list_display = ['username','first_name','last_name','email']
admin.site.register(CustomUser, CustomUserAdmin) | true | true |
f7267f0cc12c13e4532cd65a56c9b3d01c13894f | 14,738 | py | Python | selfdrive/locationd/locationd.py | sejongjoa/openpilot_083 | ff8277b6b51241af6b9ba37dcf55dcb6bfcc9d18 | [
"MIT"
] | 20 | 2020-12-04T12:20:57.000Z | 2022-03-31T00:40:15.000Z | selfdrive/locationd/locationd.py | wangyueguo/- | 301500dff6bd53e64257898cac939b24f56befac | [
"MIT"
] | 6 | 2020-03-06T18:13:55.000Z | 2020-07-20T05:10:20.000Z | selfdrive/locationd/locationd.py | wangyueguo/- | 301500dff6bd53e64257898cac939b24f56befac | [
"MIT"
] | 35 | 2021-03-18T23:28:11.000Z | 2021-06-24T17:36:22.000Z | #!/usr/bin/env python3
import json
import numpy as np
import sympy as sp
import cereal.messaging as messaging
from cereal import log
from common.params import Params
import common.transformations.coordinates as coord
from common.transformations.orientation import ecef_euler_from_ned, \
euler_from_quat, \
ned_euler_from_ecef, \
quat_from_euler, euler_from_rot, \
rot_from_quat, rot_from_euler
from rednose.helpers import KalmanError
from selfdrive.locationd.models.live_kf import LiveKalman, States, ObservationKind
from selfdrive.locationd.models.constants import GENERATED_DIR
from selfdrive.swaglog import cloudlog
#from datetime import datetime
#from laika.gps_time import GPSTime
from sympy.utilities.lambdify import lambdify
from rednose.helpers.sympy_helpers import euler_rotate
SensorSource = log.SensorEventData.SensorSource
VISION_DECIMATION = 2
SENSOR_DECIMATION = 10
POSENET_STD_HIST = 40
def to_float(arr):
return [float(arr[0]), float(arr[1]), float(arr[2])]
def get_H():
# this returns a function to eval the jacobian
# of the observation function of the local vel
roll = sp.Symbol('roll')
pitch = sp.Symbol('pitch')
yaw = sp.Symbol('yaw')
vx = sp.Symbol('vx')
vy = sp.Symbol('vy')
vz = sp.Symbol('vz')
h = euler_rotate(roll, pitch, yaw).T*(sp.Matrix([vx, vy, vz]))
H = h.jacobian(sp.Matrix([roll, pitch, yaw, vx, vy, vz]))
H_f = lambdify([roll, pitch, yaw, vx, vy, vz], H)
return H_f
class Localizer():
def __init__(self, disabled_logs=None, dog=None):
if disabled_logs is None:
disabled_logs = []
self.kf = LiveKalman(GENERATED_DIR)
self.reset_kalman()
self.max_age = .1 # seconds
self.disabled_logs = disabled_logs
self.calib = np.zeros(3)
self.device_from_calib = np.eye(3)
self.calib_from_device = np.eye(3)
self.calibrated = False
self.H = get_H()
self.posenet_invalid_count = 0
self.posenet_speed = 0
self.car_speed = 0
self.posenet_stds = 10*np.ones((POSENET_STD_HIST))
self.converter = coord.LocalCoord.from_ecef(self.kf.x[States.ECEF_POS])
self.unix_timestamp_millis = 0
self.last_gps_fix = 0
self.device_fell = False
@staticmethod
def msg_from_state(converter, calib_from_device, H, predicted_state, predicted_cov, calibrated):
predicted_std = np.sqrt(np.diagonal(predicted_cov))
fix_ecef = predicted_state[States.ECEF_POS]
fix_ecef_std = predicted_std[States.ECEF_POS_ERR]
vel_ecef = predicted_state[States.ECEF_VELOCITY]
vel_ecef_std = predicted_std[States.ECEF_VELOCITY_ERR]
fix_pos_geo = coord.ecef2geodetic(fix_ecef)
#fix_pos_geo_std = np.abs(coord.ecef2geodetic(fix_ecef + fix_ecef_std) - fix_pos_geo)
orientation_ecef = euler_from_quat(predicted_state[States.ECEF_ORIENTATION])
orientation_ecef_std = predicted_std[States.ECEF_ORIENTATION_ERR]
device_from_ecef = rot_from_quat(predicted_state[States.ECEF_ORIENTATION]).T
calibrated_orientation_ecef = euler_from_rot(calib_from_device.dot(device_from_ecef))
acc_calib = calib_from_device.dot(predicted_state[States.ACCELERATION])
acc_calib_std = np.sqrt(np.diagonal(calib_from_device.dot(
predicted_cov[States.ACCELERATION_ERR, States.ACCELERATION_ERR]).dot(
calib_from_device.T)))
ang_vel_calib = calib_from_device.dot(predicted_state[States.ANGULAR_VELOCITY])
ang_vel_calib_std = np.sqrt(np.diagonal(calib_from_device.dot(
predicted_cov[States.ANGULAR_VELOCITY_ERR, States.ANGULAR_VELOCITY_ERR]).dot(
calib_from_device.T)))
vel_device = device_from_ecef.dot(vel_ecef)
device_from_ecef_eul = euler_from_quat(predicted_state[States.ECEF_ORIENTATION]).T
idxs = list(range(States.ECEF_ORIENTATION_ERR.start, States.ECEF_ORIENTATION_ERR.stop)) + \
list(range(States.ECEF_VELOCITY_ERR.start, States.ECEF_VELOCITY_ERR.stop))
condensed_cov = predicted_cov[idxs][:, idxs]
HH = H(*list(np.concatenate([device_from_ecef_eul, vel_ecef])))
vel_device_cov = HH.dot(condensed_cov).dot(HH.T)
vel_device_std = np.sqrt(np.diagonal(vel_device_cov))
vel_calib = calib_from_device.dot(vel_device)
vel_calib_std = np.sqrt(np.diagonal(calib_from_device.dot(
vel_device_cov).dot(calib_from_device.T)))
orientation_ned = ned_euler_from_ecef(fix_ecef, orientation_ecef)
#orientation_ned_std = ned_euler_from_ecef(fix_ecef, orientation_ecef + orientation_ecef_std) - orientation_ned
ned_vel = converter.ecef2ned(fix_ecef + vel_ecef) - converter.ecef2ned(fix_ecef)
#ned_vel_std = self.converter.ecef2ned(fix_ecef + vel_ecef + vel_ecef_std) - self.converter.ecef2ned(fix_ecef + vel_ecef)
fix = messaging.log.LiveLocationKalman.new_message()
# write measurements to msg
measurements = [
# measurement field, value, std, valid
(fix.positionGeodetic, fix_pos_geo, np.nan*np.zeros(3), True),
(fix.positionECEF, fix_ecef, fix_ecef_std, True),
(fix.velocityECEF, vel_ecef, vel_ecef_std, True),
(fix.velocityNED, ned_vel, np.nan*np.zeros(3), True),
(fix.velocityDevice, vel_device, vel_device_std, True),
(fix.accelerationDevice, predicted_state[States.ACCELERATION], predicted_std[States.ACCELERATION_ERR], True),
(fix.orientationECEF, orientation_ecef, orientation_ecef_std, True),
(fix.calibratedOrientationECEF, calibrated_orientation_ecef, np.nan*np.zeros(3), calibrated),
(fix.orientationNED, orientation_ned, np.nan*np.zeros(3), True),
(fix.angularVelocityDevice, predicted_state[States.ANGULAR_VELOCITY], predicted_std[States.ANGULAR_VELOCITY_ERR], True),
(fix.velocityCalibrated, vel_calib, vel_calib_std, calibrated),
(fix.angularVelocityCalibrated, ang_vel_calib, ang_vel_calib_std, calibrated),
(fix.accelerationCalibrated, acc_calib, acc_calib_std, calibrated),
]
for field, value, std, valid in measurements:
# TODO: can we write the lists faster?
field.value = to_float(value)
field.std = to_float(std)
field.valid = valid
return fix
def liveLocationMsg(self):
fix = self.msg_from_state(self.converter, self.calib_from_device, self.H, self.kf.x, self.kf.P, self.calibrated)
# experimentally found these values, no false positives in 20k minutes of driving
old_mean, new_mean = np.mean(self.posenet_stds[:POSENET_STD_HIST//2]), np.mean(self.posenet_stds[POSENET_STD_HIST//2:])
std_spike = new_mean/old_mean > 4 and new_mean > 7
fix.posenetOK = not (std_spike and self.car_speed > 5)
fix.deviceStable = not self.device_fell
self.device_fell = False
#fix.gpsWeek = self.time.week
#fix.gpsTimeOfWeek = self.time.tow
fix.unixTimestampMillis = self.unix_timestamp_millis
if np.linalg.norm(fix.positionECEF.std) < 50 and self.calibrated:
fix.status = 'valid'
elif np.linalg.norm(fix.positionECEF.std) < 50:
fix.status = 'uncalibrated'
else:
fix.status = 'uninitialized'
return fix
def update_kalman(self, time, kind, meas, R=None):
try:
self.kf.predict_and_observe(time, kind, meas, R)
except KalmanError:
cloudlog.error("Error in predict and observe, kalman reset")
self.reset_kalman()
def handle_gps(self, current_time, log):
# ignore the message if the fix is invalid
if log.flags % 2 == 0:
return
self.last_gps_fix = current_time
self.converter = coord.LocalCoord.from_geodetic([log.latitude, log.longitude, log.altitude])
ecef_pos = self.converter.ned2ecef([0, 0, 0])
ecef_vel = self.converter.ned2ecef(np.array(log.vNED)) - ecef_pos
ecef_pos_R = np.diag([(3*log.verticalAccuracy)**2]*3)
ecef_vel_R = np.diag([(log.speedAccuracy)**2]*3)
#self.time = GPSTime.from_datetime(datetime.utcfromtimestamp(log.timestamp*1e-3))
self.unix_timestamp_millis = log.timestamp
gps_est_error = np.sqrt((self.kf.x[0] - ecef_pos[0])**2 +
(self.kf.x[1] - ecef_pos[1])**2 +
(self.kf.x[2] - ecef_pos[2])**2)
orientation_ecef = euler_from_quat(self.kf.x[States.ECEF_ORIENTATION])
orientation_ned = ned_euler_from_ecef(ecef_pos, orientation_ecef)
orientation_ned_gps = np.array([0, 0, np.radians(log.bearingDeg)])
orientation_error = np.mod(orientation_ned - orientation_ned_gps - np.pi, 2*np.pi) - np.pi
initial_pose_ecef_quat = quat_from_euler(ecef_euler_from_ned(ecef_pos, orientation_ned_gps))
if np.linalg.norm(ecef_vel) > 5 and np.linalg.norm(orientation_error) > 1:
cloudlog.error("Locationd vs ubloxLocation orientation difference too large, kalman reset")
self.reset_kalman(init_pos=ecef_pos, init_orient=initial_pose_ecef_quat)
self.update_kalman(current_time, ObservationKind.ECEF_ORIENTATION_FROM_GPS, initial_pose_ecef_quat)
elif gps_est_error > 50:
cloudlog.error("Locationd vs ubloxLocation position difference too large, kalman reset")
self.reset_kalman(init_pos=ecef_pos, init_orient=initial_pose_ecef_quat)
self.update_kalman(current_time, ObservationKind.ECEF_POS, ecef_pos, R=ecef_pos_R)
self.update_kalman(current_time, ObservationKind.ECEF_VEL, ecef_vel, R=ecef_vel_R)
def handle_car_state(self, current_time, log):
self.speed_counter += 1
if self.speed_counter % SENSOR_DECIMATION == 0:
self.update_kalman(current_time, ObservationKind.ODOMETRIC_SPEED, [log.vEgo])
self.car_speed = abs(log.vEgo)
if log.vEgo == 0:
self.update_kalman(current_time, ObservationKind.NO_ROT, [0, 0, 0])
def handle_cam_odo(self, current_time, log):
self.cam_counter += 1
if self.cam_counter % VISION_DECIMATION == 0:
rot_device = self.device_from_calib.dot(log.rot)
rot_device_std = self.device_from_calib.dot(log.rotStd)
self.update_kalman(current_time,
ObservationKind.CAMERA_ODO_ROTATION,
np.concatenate([rot_device, 10*rot_device_std]))
trans_device = self.device_from_calib.dot(log.trans)
trans_device_std = self.device_from_calib.dot(log.transStd)
self.posenet_speed = np.linalg.norm(trans_device)
self.posenet_stds[:-1] = self.posenet_stds[1:]
self.posenet_stds[-1] = trans_device_std[0]
self.update_kalman(current_time,
ObservationKind.CAMERA_ODO_TRANSLATION,
np.concatenate([trans_device, 10*trans_device_std]))
def handle_sensors(self, current_time, log):
# TODO does not yet account for double sensor readings in the log
for sensor_reading in log:
sensor_time = 1e-9 * sensor_reading.timestamp
# TODO: handle messages from two IMUs at the same time
if sensor_reading.source == SensorSource.lsm6ds3:
continue
# Gyro Uncalibrated
if sensor_reading.sensor == 5 and sensor_reading.type == 16:
self.gyro_counter += 1
if self.gyro_counter % SENSOR_DECIMATION == 0:
v = sensor_reading.gyroUncalibrated.v
self.update_kalman(sensor_time, ObservationKind.PHONE_GYRO, [-v[2], -v[1], -v[0]])
# Accelerometer
if sensor_reading.sensor == 1 and sensor_reading.type == 1:
# check if device fell, estimate 10 for g
# 40m/s**2 is a good filter for falling detection, no false positives in 20k minutes of driving
self.device_fell = self.device_fell or (np.linalg.norm(np.array(sensor_reading.acceleration.v) - np.array([10, 0, 0])) > 40)
self.acc_counter += 1
if self.acc_counter % SENSOR_DECIMATION == 0:
v = sensor_reading.acceleration.v
self.update_kalman(sensor_time, ObservationKind.PHONE_ACCEL, [-v[2], -v[1], -v[0]])
def handle_live_calib(self, current_time, log):
if len(log.rpyCalib):
self.calib = log.rpyCalib
self.device_from_calib = rot_from_euler(self.calib)
self.calib_from_device = self.device_from_calib.T
self.calibrated = log.calStatus == 1
def reset_kalman(self, current_time=None, init_orient=None, init_pos=None):
self.filter_time = current_time
init_x = LiveKalman.initial_x.copy()
# too nonlinear to init on completely wrong
if init_orient is not None:
init_x[3:7] = init_orient
if init_pos is not None:
init_x[:3] = init_pos
self.kf.init_state(init_x, covs=np.diag(LiveKalman.initial_P_diag), filter_time=current_time)
self.observation_buffer = []
self.gyro_counter = 0
self.acc_counter = 0
self.speed_counter = 0
self.cam_counter = 0
def locationd_thread(sm, pm, disabled_logs=None):
if disabled_logs is None:
disabled_logs = []
if sm is None:
socks = ['gpsLocationExternal', 'sensorEvents', 'cameraOdometry', 'liveCalibration', 'carState']
sm = messaging.SubMaster(socks, ignore_alive=['gpsLocationExternal'])
if pm is None:
pm = messaging.PubMaster(['liveLocationKalman'])
params = Params()
localizer = Localizer(disabled_logs=disabled_logs)
while True:
sm.update()
for sock, updated in sm.updated.items():
if updated and sm.valid[sock]:
t = sm.logMonoTime[sock] * 1e-9
if sock == "sensorEvents":
localizer.handle_sensors(t, sm[sock])
elif sock == "gpsLocationExternal":
localizer.handle_gps(t, sm[sock])
elif sock == "carState":
localizer.handle_car_state(t, sm[sock])
elif sock == "cameraOdometry":
localizer.handle_cam_odo(t, sm[sock])
elif sock == "liveCalibration":
localizer.handle_live_calib(t, sm[sock])
if sm.updated['cameraOdometry']:
t = sm.logMonoTime['cameraOdometry']
msg = messaging.new_message('liveLocationKalman')
msg.logMonoTime = t
msg.liveLocationKalman = localizer.liveLocationMsg()
msg.liveLocationKalman.inputsOK = sm.all_alive_and_valid()
msg.liveLocationKalman.sensorsOK = sm.alive['sensorEvents'] and sm.valid['sensorEvents']
gps_age = (t / 1e9) - localizer.last_gps_fix
msg.liveLocationKalman.gpsOK = gps_age < 1.0
pm.send('liveLocationKalman', msg)
if sm.frame % 1200 == 0 and msg.liveLocationKalman.gpsOK: # once a minute
location = {
'latitude': msg.liveLocationKalman.positionGeodetic.value[0],
'longitude': msg.liveLocationKalman.positionGeodetic.value[1],
'altitude': msg.liveLocationKalman.positionGeodetic.value[2],
}
params.put("LastGPSPosition", json.dumps(location))
def main(sm=None, pm=None):
locationd_thread(sm, pm)
if __name__ == "__main__":
import os
os.environ["OMP_NUM_THREADS"] = "1"
main()
| 41.988604 | 132 | 0.70688 |
import json
import numpy as np
import sympy as sp
import cereal.messaging as messaging
from cereal import log
from common.params import Params
import common.transformations.coordinates as coord
from common.transformations.orientation import ecef_euler_from_ned, \
euler_from_quat, \
ned_euler_from_ecef, \
quat_from_euler, euler_from_rot, \
rot_from_quat, rot_from_euler
from rednose.helpers import KalmanError
from selfdrive.locationd.models.live_kf import LiveKalman, States, ObservationKind
from selfdrive.locationd.models.constants import GENERATED_DIR
from selfdrive.swaglog import cloudlog
from sympy.utilities.lambdify import lambdify
from rednose.helpers.sympy_helpers import euler_rotate
SensorSource = log.SensorEventData.SensorSource
VISION_DECIMATION = 2
SENSOR_DECIMATION = 10
POSENET_STD_HIST = 40
def to_float(arr):
return [float(arr[0]), float(arr[1]), float(arr[2])]
def get_H():
roll = sp.Symbol('roll')
pitch = sp.Symbol('pitch')
yaw = sp.Symbol('yaw')
vx = sp.Symbol('vx')
vy = sp.Symbol('vy')
vz = sp.Symbol('vz')
h = euler_rotate(roll, pitch, yaw).T*(sp.Matrix([vx, vy, vz]))
H = h.jacobian(sp.Matrix([roll, pitch, yaw, vx, vy, vz]))
H_f = lambdify([roll, pitch, yaw, vx, vy, vz], H)
return H_f
class Localizer():
def __init__(self, disabled_logs=None, dog=None):
if disabled_logs is None:
disabled_logs = []
self.kf = LiveKalman(GENERATED_DIR)
self.reset_kalman()
self.max_age = .1
self.disabled_logs = disabled_logs
self.calib = np.zeros(3)
self.device_from_calib = np.eye(3)
self.calib_from_device = np.eye(3)
self.calibrated = False
self.H = get_H()
self.posenet_invalid_count = 0
self.posenet_speed = 0
self.car_speed = 0
self.posenet_stds = 10*np.ones((POSENET_STD_HIST))
self.converter = coord.LocalCoord.from_ecef(self.kf.x[States.ECEF_POS])
self.unix_timestamp_millis = 0
self.last_gps_fix = 0
self.device_fell = False
@staticmethod
def msg_from_state(converter, calib_from_device, H, predicted_state, predicted_cov, calibrated):
predicted_std = np.sqrt(np.diagonal(predicted_cov))
fix_ecef = predicted_state[States.ECEF_POS]
fix_ecef_std = predicted_std[States.ECEF_POS_ERR]
vel_ecef = predicted_state[States.ECEF_VELOCITY]
vel_ecef_std = predicted_std[States.ECEF_VELOCITY_ERR]
fix_pos_geo = coord.ecef2geodetic(fix_ecef)
orientation_ecef = euler_from_quat(predicted_state[States.ECEF_ORIENTATION])
orientation_ecef_std = predicted_std[States.ECEF_ORIENTATION_ERR]
device_from_ecef = rot_from_quat(predicted_state[States.ECEF_ORIENTATION]).T
calibrated_orientation_ecef = euler_from_rot(calib_from_device.dot(device_from_ecef))
acc_calib = calib_from_device.dot(predicted_state[States.ACCELERATION])
acc_calib_std = np.sqrt(np.diagonal(calib_from_device.dot(
predicted_cov[States.ACCELERATION_ERR, States.ACCELERATION_ERR]).dot(
calib_from_device.T)))
ang_vel_calib = calib_from_device.dot(predicted_state[States.ANGULAR_VELOCITY])
ang_vel_calib_std = np.sqrt(np.diagonal(calib_from_device.dot(
predicted_cov[States.ANGULAR_VELOCITY_ERR, States.ANGULAR_VELOCITY_ERR]).dot(
calib_from_device.T)))
vel_device = device_from_ecef.dot(vel_ecef)
device_from_ecef_eul = euler_from_quat(predicted_state[States.ECEF_ORIENTATION]).T
idxs = list(range(States.ECEF_ORIENTATION_ERR.start, States.ECEF_ORIENTATION_ERR.stop)) + \
list(range(States.ECEF_VELOCITY_ERR.start, States.ECEF_VELOCITY_ERR.stop))
condensed_cov = predicted_cov[idxs][:, idxs]
HH = H(*list(np.concatenate([device_from_ecef_eul, vel_ecef])))
vel_device_cov = HH.dot(condensed_cov).dot(HH.T)
vel_device_std = np.sqrt(np.diagonal(vel_device_cov))
vel_calib = calib_from_device.dot(vel_device)
vel_calib_std = np.sqrt(np.diagonal(calib_from_device.dot(
vel_device_cov).dot(calib_from_device.T)))
orientation_ned = ned_euler_from_ecef(fix_ecef, orientation_ecef)
ned_vel = converter.ecef2ned(fix_ecef + vel_ecef) - converter.ecef2ned(fix_ecef)
fix = messaging.log.LiveLocationKalman.new_message()
measurements = [
(fix.positionGeodetic, fix_pos_geo, np.nan*np.zeros(3), True),
(fix.positionECEF, fix_ecef, fix_ecef_std, True),
(fix.velocityECEF, vel_ecef, vel_ecef_std, True),
(fix.velocityNED, ned_vel, np.nan*np.zeros(3), True),
(fix.velocityDevice, vel_device, vel_device_std, True),
(fix.accelerationDevice, predicted_state[States.ACCELERATION], predicted_std[States.ACCELERATION_ERR], True),
(fix.orientationECEF, orientation_ecef, orientation_ecef_std, True),
(fix.calibratedOrientationECEF, calibrated_orientation_ecef, np.nan*np.zeros(3), calibrated),
(fix.orientationNED, orientation_ned, np.nan*np.zeros(3), True),
(fix.angularVelocityDevice, predicted_state[States.ANGULAR_VELOCITY], predicted_std[States.ANGULAR_VELOCITY_ERR], True),
(fix.velocityCalibrated, vel_calib, vel_calib_std, calibrated),
(fix.angularVelocityCalibrated, ang_vel_calib, ang_vel_calib_std, calibrated),
(fix.accelerationCalibrated, acc_calib, acc_calib_std, calibrated),
]
for field, value, std, valid in measurements:
field.value = to_float(value)
field.std = to_float(std)
field.valid = valid
return fix
def liveLocationMsg(self):
fix = self.msg_from_state(self.converter, self.calib_from_device, self.H, self.kf.x, self.kf.P, self.calibrated)
old_mean, new_mean = np.mean(self.posenet_stds[:POSENET_STD_HIST//2]), np.mean(self.posenet_stds[POSENET_STD_HIST//2:])
std_spike = new_mean/old_mean > 4 and new_mean > 7
fix.posenetOK = not (std_spike and self.car_speed > 5)
fix.deviceStable = not self.device_fell
self.device_fell = False
fix.unixTimestampMillis = self.unix_timestamp_millis
if np.linalg.norm(fix.positionECEF.std) < 50 and self.calibrated:
fix.status = 'valid'
elif np.linalg.norm(fix.positionECEF.std) < 50:
fix.status = 'uncalibrated'
else:
fix.status = 'uninitialized'
return fix
def update_kalman(self, time, kind, meas, R=None):
try:
self.kf.predict_and_observe(time, kind, meas, R)
except KalmanError:
cloudlog.error("Error in predict and observe, kalman reset")
self.reset_kalman()
def handle_gps(self, current_time, log):
if log.flags % 2 == 0:
return
self.last_gps_fix = current_time
self.converter = coord.LocalCoord.from_geodetic([log.latitude, log.longitude, log.altitude])
ecef_pos = self.converter.ned2ecef([0, 0, 0])
ecef_vel = self.converter.ned2ecef(np.array(log.vNED)) - ecef_pos
ecef_pos_R = np.diag([(3*log.verticalAccuracy)**2]*3)
ecef_vel_R = np.diag([(log.speedAccuracy)**2]*3)
self.unix_timestamp_millis = log.timestamp
gps_est_error = np.sqrt((self.kf.x[0] - ecef_pos[0])**2 +
(self.kf.x[1] - ecef_pos[1])**2 +
(self.kf.x[2] - ecef_pos[2])**2)
orientation_ecef = euler_from_quat(self.kf.x[States.ECEF_ORIENTATION])
orientation_ned = ned_euler_from_ecef(ecef_pos, orientation_ecef)
orientation_ned_gps = np.array([0, 0, np.radians(log.bearingDeg)])
orientation_error = np.mod(orientation_ned - orientation_ned_gps - np.pi, 2*np.pi) - np.pi
initial_pose_ecef_quat = quat_from_euler(ecef_euler_from_ned(ecef_pos, orientation_ned_gps))
if np.linalg.norm(ecef_vel) > 5 and np.linalg.norm(orientation_error) > 1:
cloudlog.error("Locationd vs ubloxLocation orientation difference too large, kalman reset")
self.reset_kalman(init_pos=ecef_pos, init_orient=initial_pose_ecef_quat)
self.update_kalman(current_time, ObservationKind.ECEF_ORIENTATION_FROM_GPS, initial_pose_ecef_quat)
elif gps_est_error > 50:
cloudlog.error("Locationd vs ubloxLocation position difference too large, kalman reset")
self.reset_kalman(init_pos=ecef_pos, init_orient=initial_pose_ecef_quat)
self.update_kalman(current_time, ObservationKind.ECEF_POS, ecef_pos, R=ecef_pos_R)
self.update_kalman(current_time, ObservationKind.ECEF_VEL, ecef_vel, R=ecef_vel_R)
def handle_car_state(self, current_time, log):
self.speed_counter += 1
if self.speed_counter % SENSOR_DECIMATION == 0:
self.update_kalman(current_time, ObservationKind.ODOMETRIC_SPEED, [log.vEgo])
self.car_speed = abs(log.vEgo)
if log.vEgo == 0:
self.update_kalman(current_time, ObservationKind.NO_ROT, [0, 0, 0])
def handle_cam_odo(self, current_time, log):
self.cam_counter += 1
if self.cam_counter % VISION_DECIMATION == 0:
rot_device = self.device_from_calib.dot(log.rot)
rot_device_std = self.device_from_calib.dot(log.rotStd)
self.update_kalman(current_time,
ObservationKind.CAMERA_ODO_ROTATION,
np.concatenate([rot_device, 10*rot_device_std]))
trans_device = self.device_from_calib.dot(log.trans)
trans_device_std = self.device_from_calib.dot(log.transStd)
self.posenet_speed = np.linalg.norm(trans_device)
self.posenet_stds[:-1] = self.posenet_stds[1:]
self.posenet_stds[-1] = trans_device_std[0]
self.update_kalman(current_time,
ObservationKind.CAMERA_ODO_TRANSLATION,
np.concatenate([trans_device, 10*trans_device_std]))
def handle_sensors(self, current_time, log):
for sensor_reading in log:
sensor_time = 1e-9 * sensor_reading.timestamp
if sensor_reading.source == SensorSource.lsm6ds3:
continue
if sensor_reading.sensor == 5 and sensor_reading.type == 16:
self.gyro_counter += 1
if self.gyro_counter % SENSOR_DECIMATION == 0:
v = sensor_reading.gyroUncalibrated.v
self.update_kalman(sensor_time, ObservationKind.PHONE_GYRO, [-v[2], -v[1], -v[0]])
if sensor_reading.sensor == 1 and sensor_reading.type == 1:
self.device_fell = self.device_fell or (np.linalg.norm(np.array(sensor_reading.acceleration.v) - np.array([10, 0, 0])) > 40)
self.acc_counter += 1
if self.acc_counter % SENSOR_DECIMATION == 0:
v = sensor_reading.acceleration.v
self.update_kalman(sensor_time, ObservationKind.PHONE_ACCEL, [-v[2], -v[1], -v[0]])
def handle_live_calib(self, current_time, log):
if len(log.rpyCalib):
self.calib = log.rpyCalib
self.device_from_calib = rot_from_euler(self.calib)
self.calib_from_device = self.device_from_calib.T
self.calibrated = log.calStatus == 1
def reset_kalman(self, current_time=None, init_orient=None, init_pos=None):
self.filter_time = current_time
init_x = LiveKalman.initial_x.copy()
if init_orient is not None:
init_x[3:7] = init_orient
if init_pos is not None:
init_x[:3] = init_pos
self.kf.init_state(init_x, covs=np.diag(LiveKalman.initial_P_diag), filter_time=current_time)
self.observation_buffer = []
self.gyro_counter = 0
self.acc_counter = 0
self.speed_counter = 0
self.cam_counter = 0
def locationd_thread(sm, pm, disabled_logs=None):
if disabled_logs is None:
disabled_logs = []
if sm is None:
socks = ['gpsLocationExternal', 'sensorEvents', 'cameraOdometry', 'liveCalibration', 'carState']
sm = messaging.SubMaster(socks, ignore_alive=['gpsLocationExternal'])
if pm is None:
pm = messaging.PubMaster(['liveLocationKalman'])
params = Params()
localizer = Localizer(disabled_logs=disabled_logs)
while True:
sm.update()
for sock, updated in sm.updated.items():
if updated and sm.valid[sock]:
t = sm.logMonoTime[sock] * 1e-9
if sock == "sensorEvents":
localizer.handle_sensors(t, sm[sock])
elif sock == "gpsLocationExternal":
localizer.handle_gps(t, sm[sock])
elif sock == "carState":
localizer.handle_car_state(t, sm[sock])
elif sock == "cameraOdometry":
localizer.handle_cam_odo(t, sm[sock])
elif sock == "liveCalibration":
localizer.handle_live_calib(t, sm[sock])
if sm.updated['cameraOdometry']:
t = sm.logMonoTime['cameraOdometry']
msg = messaging.new_message('liveLocationKalman')
msg.logMonoTime = t
msg.liveLocationKalman = localizer.liveLocationMsg()
msg.liveLocationKalman.inputsOK = sm.all_alive_and_valid()
msg.liveLocationKalman.sensorsOK = sm.alive['sensorEvents'] and sm.valid['sensorEvents']
gps_age = (t / 1e9) - localizer.last_gps_fix
msg.liveLocationKalman.gpsOK = gps_age < 1.0
pm.send('liveLocationKalman', msg)
if sm.frame % 1200 == 0 and msg.liveLocationKalman.gpsOK:
location = {
'latitude': msg.liveLocationKalman.positionGeodetic.value[0],
'longitude': msg.liveLocationKalman.positionGeodetic.value[1],
'altitude': msg.liveLocationKalman.positionGeodetic.value[2],
}
params.put("LastGPSPosition", json.dumps(location))
def main(sm=None, pm=None):
locationd_thread(sm, pm)
if __name__ == "__main__":
import os
os.environ["OMP_NUM_THREADS"] = "1"
main()
| true | true |
f7267fe4a54f406a59df97aff7b19b741d435024 | 3,773 | py | Python | OpenStack.py | minidfx/Cloud-Python- | c9e4741c4c4f7de77f439e2786cca7f03f70cad9 | [
"MIT"
] | null | null | null | OpenStack.py | minidfx/Cloud-Python- | c9e4741c4c4f7de77f439e2786cca7f03f70cad9 | [
"MIT"
] | null | null | null | OpenStack.py | minidfx/Cloud-Python- | c9e4741c4c4f7de77f439e2786cca7f03f70cad9 | [
"MIT"
] | null | null | null | # Getting started with OpenStack using libcloud
# http://developer.openstack.org/firstapp-libcloud/getting_started.html
from libcloud.compute.ssh import *
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from Cloud import Cloud
from settings import *
# noinspection PyPep8Naming
class OpenStack(Cloud):
def __init__(self):
super().__init__()
openstack = get_driver(Provider.OPENSTACK)
self.driver = openstack(user,
password,
ex_tenant_name = tenant_name,
ex_force_auth_url = auth_url,
ex_force_auth_version = '2.0_password',
ex_force_service_region = service_region)
self.activeIps = []
def create(self):
print('Retrieving infrastructure information from SwitchEngines ...')
images = self.driver.list_images()
sizes = self.driver.list_sizes()
security_groups = self.driver.ex_list_security_groups()
networks = self.driver.ex_list_networks()
print('Done.')
security_group = [s for s in security_groups if s.name == 'anywhere'][0]
network = [s for s in networks if s.name == 'My network'][0]
size = [s for s in sizes if s.name == 'c1.micro'][0]
# noinspection PyPep8Naming
mongoDbIp = self.__run_instance('MongoDB', size, images, security_group, network)
restServerIP = self.__run_instance('RESTServer', size, images, security_group, network)
restClientIP = self.__run_instance('RESTClient', size, images, security_group, network)
self.__additionalOperations(restServerIP, restClientIP, mongoDbIp)
@staticmethod
def __additionalOperations(restServerIP, restClientIP, mongoDbIp):
clientSSH = ShellOutSSHClient(restServerIP, username = 'ubuntu')
clientSSH.connect()
try:
clientSSH.run('python /home/ubuntu/Downloads/pyserver.py %s &' % mongoDbIp)
finally:
clientSSH.close()
clientSSH = ShellOutSSHClient(restClientIP, username = 'ubuntu')
clientSSH.connect()
try:
clientSSH.run('python /home/ubuntu/Downloads/pyclient.py %s &' % mongoDbIp)
finally:
clientSSH.close()
def __run_instance(self, instancename, size, images, security_group, network):
print('Creating a new node ...')
image = [s for s in images if s.name == instancename][0]
node = self.driver.create_node(name = instancename,
size = size,
image = image,
ex_security_groups = [security_group],
ex_keyname = 'switch-engine',
networks = [network])
print('Done.')
print("Waiting for %s ..." % instancename)
self.driver.wait_until_running([node])
self.activeNodes.append(node)
nodes = self.driver.list_nodes()
instanceNode = [s for s in nodes if s.name == instancename][0]
privateIp = instanceNode.private_ips[0]
print('Instance ready.')
print('Attaching a Public IP ...')
ip = self.driver.ex_create_floating_ip()
self.activeIps.append(ip)
self.driver.ex_attach_floating_ip_to_node(node, ip)
print('Done.')
return privateIp
def destroy(self):
print('Destroying the instance on SwitchEngines ...')
for node in self.activeNodes:
node.destroy()
for ip in self.activeIps:
self.driver.ex_delete_floating_ip(ip)
print('Done.')
| 35.59434 | 95 | 0.602703 |
from libcloud.compute.ssh import *
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from Cloud import Cloud
from settings import *
class OpenStack(Cloud):
def __init__(self):
super().__init__()
openstack = get_driver(Provider.OPENSTACK)
self.driver = openstack(user,
password,
ex_tenant_name = tenant_name,
ex_force_auth_url = auth_url,
ex_force_auth_version = '2.0_password',
ex_force_service_region = service_region)
self.activeIps = []
def create(self):
print('Retrieving infrastructure information from SwitchEngines ...')
images = self.driver.list_images()
sizes = self.driver.list_sizes()
security_groups = self.driver.ex_list_security_groups()
networks = self.driver.ex_list_networks()
print('Done.')
security_group = [s for s in security_groups if s.name == 'anywhere'][0]
network = [s for s in networks if s.name == 'My network'][0]
size = [s for s in sizes if s.name == 'c1.micro'][0]
mongoDbIp = self.__run_instance('MongoDB', size, images, security_group, network)
restServerIP = self.__run_instance('RESTServer', size, images, security_group, network)
restClientIP = self.__run_instance('RESTClient', size, images, security_group, network)
self.__additionalOperations(restServerIP, restClientIP, mongoDbIp)
@staticmethod
def __additionalOperations(restServerIP, restClientIP, mongoDbIp):
clientSSH = ShellOutSSHClient(restServerIP, username = 'ubuntu')
clientSSH.connect()
try:
clientSSH.run('python /home/ubuntu/Downloads/pyserver.py %s &' % mongoDbIp)
finally:
clientSSH.close()
clientSSH = ShellOutSSHClient(restClientIP, username = 'ubuntu')
clientSSH.connect()
try:
clientSSH.run('python /home/ubuntu/Downloads/pyclient.py %s &' % mongoDbIp)
finally:
clientSSH.close()
def __run_instance(self, instancename, size, images, security_group, network):
print('Creating a new node ...')
image = [s for s in images if s.name == instancename][0]
node = self.driver.create_node(name = instancename,
size = size,
image = image,
ex_security_groups = [security_group],
ex_keyname = 'switch-engine',
networks = [network])
print('Done.')
print("Waiting for %s ..." % instancename)
self.driver.wait_until_running([node])
self.activeNodes.append(node)
nodes = self.driver.list_nodes()
instanceNode = [s for s in nodes if s.name == instancename][0]
privateIp = instanceNode.private_ips[0]
print('Instance ready.')
print('Attaching a Public IP ...')
ip = self.driver.ex_create_floating_ip()
self.activeIps.append(ip)
self.driver.ex_attach_floating_ip_to_node(node, ip)
print('Done.')
return privateIp
def destroy(self):
print('Destroying the instance on SwitchEngines ...')
for node in self.activeNodes:
node.destroy()
for ip in self.activeIps:
self.driver.ex_delete_floating_ip(ip)
print('Done.')
| true | true |
f726800b4ff9c7c04844b758f410a70599a4f3f6 | 967 | py | Python | mergeforms/from cms10/test_wordTemplate.py | mbronstein/ssa412 | 32de4e44f16cf2044788428da2a3bab271ebcb9a | [
"MIT"
] | null | null | null | mergeforms/from cms10/test_wordTemplate.py | mbronstein/ssa412 | 32de4e44f16cf2044788428da2a3bab271ebcb9a | [
"MIT"
] | null | null | null | mergeforms/from cms10/test_wordTemplate.py | mbronstein/ssa412 | 32de4e44f16cf2044788428da2a3bab271ebcb9a | [
"MIT"
] | null | null | null | from unittest import TestCase, skip
import os
from docxtpl import DocxTemplate
class TestDocxTemplate(TestCase):
def setUp(self):
self.APP_DIR = os.path.abspath(os.path.dirname(__file__)) # This directory
self.FIXTURE_DIR = os.path.join(os.path.dirname(self.APP_DIR), 'fixtures')
self.FORM_DIR = os.path.join(self.APP_DIR, 'static', 'formlib')
def test_render1(self):
doc = DocxTemplate(os.path.join( self.FIXTURE_DIR, 'test_docx.docx'))
context = {'claimant_full_name':"Mary Smith", 'claimant_ssn': '111-22-1234'}
doc.render(context)
doc.save(os.path.join(self.FIXTURE_DIR, 'generated_doc.docx'))
def test_render2(self):
doc = DocxTemplate(os.path.join( self.FIXTURE_DIR,'do-letter-form.docx'))
context = {'claimant_full_name':"Mary Smith", 'claimant_ssn': '111-22-1234'}
doc.render(context)
doc.save(os.path.join(self.FIXTURE_DIR, 'generated_doc2.docx'))
| 34.535714 | 84 | 0.679421 | from unittest import TestCase, skip
import os
from docxtpl import DocxTemplate
class TestDocxTemplate(TestCase):
def setUp(self):
self.APP_DIR = os.path.abspath(os.path.dirname(__file__))
self.FIXTURE_DIR = os.path.join(os.path.dirname(self.APP_DIR), 'fixtures')
self.FORM_DIR = os.path.join(self.APP_DIR, 'static', 'formlib')
def test_render1(self):
doc = DocxTemplate(os.path.join( self.FIXTURE_DIR, 'test_docx.docx'))
context = {'claimant_full_name':"Mary Smith", 'claimant_ssn': '111-22-1234'}
doc.render(context)
doc.save(os.path.join(self.FIXTURE_DIR, 'generated_doc.docx'))
def test_render2(self):
doc = DocxTemplate(os.path.join( self.FIXTURE_DIR,'do-letter-form.docx'))
context = {'claimant_full_name':"Mary Smith", 'claimant_ssn': '111-22-1234'}
doc.render(context)
doc.save(os.path.join(self.FIXTURE_DIR, 'generated_doc2.docx'))
| true | true |
f726812fa813373e14802dbfe2defbcd7a440e6d | 4,637 | py | Python | apps/permissions.py | Github-shipchain/transmission | 867971cdc366ccfd6ef632f39652633c269e7969 | [
"Apache-2.0"
] | 1 | 2019-12-15T13:44:29.000Z | 2019-12-15T13:44:29.000Z | apps/permissions.py | Github-shipchain/transmission | 867971cdc366ccfd6ef632f39652633c269e7969 | [
"Apache-2.0"
] | null | null | null | apps/permissions.py | Github-shipchain/transmission | 867971cdc366ccfd6ef632f39652633c269e7969 | [
"Apache-2.0"
] | null | null | null | """
Copyright 2019 ShipChain, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.db.models import Q
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from rest_framework import permissions, status
from shipchain_common.authentication import get_jwt_from_request
from apps.shipments.models import Shipment, PermissionLink
PROFILES_WALLET_URL = f'{settings.PROFILES_URL}/api/v1/wallet'
def get_user(request):
if request.user.is_authenticated:
return request.user.id, request.user.token.get('organization_id', None)
return None, None
def shipment_owner_access_filter(request):
user_id, organization_id = get_user(request)
return Q(shipment__owner_id=organization_id) | Q(shipment__owner_id=user_id) if organization_id else \
Q(shipment__owner_id=user_id)
def owner_access_filter(request):
user_id, organization_id = get_user(request)
return Q(owner_id=organization_id) | Q(owner_id=user_id) if organization_id else Q(owner_id=user_id)
def get_owner_id(request):
user_id, organization_id = get_user(request)
return organization_id if organization_id else user_id
def has_owner_access(request, obj):
user_id, organization_id = get_user(request)
return (organization_id and obj.owner_id == organization_id) or obj.owner_id == user_id
def is_carrier(request, shipment):
"""
Custom permission for carrier shipment access
"""
response = settings.REQUESTS_SESSION.get(f'{PROFILES_WALLET_URL}/{shipment.carrier_wallet_id}/?is_active',
headers={'Authorization': f'JWT {get_jwt_from_request(request)}'})
return response.status_code == status.HTTP_200_OK and request.method in ('GET', 'PATCH')
def is_moderator(request, shipment):
"""
Custom permission for moderator shipment access
"""
if shipment.moderator_wallet_id:
response = settings.REQUESTS_SESSION.get(f'{PROFILES_WALLET_URL}/{shipment.moderator_wallet_id}/?is_active',
headers={'Authorization': f'JWT {get_jwt_from_request(request)}'})
return response.status_code == status.HTTP_200_OK and request.method in ('GET', 'PATCH')
return False
def is_shipper(request, shipment):
"""
Custom permission for shipper shipment access
"""
response = settings.REQUESTS_SESSION.get(f'{PROFILES_WALLET_URL}/{shipment.shipper_wallet_id}/?is_active',
headers={'Authorization': f'JWT {get_jwt_from_request(request)}'})
return response.status_code == status.HTTP_200_OK and request.method in ('GET', 'PATCH')
def shipment_exists(shipment_id):
"""
Check whether a shipment_id included in a nested route exists.
Returns False if it isn't otherwise returns the Shipment object
"""
try:
shipment_obj = Shipment.objects.get(pk=shipment_id)
except ObjectDoesNotExist:
return False
return shipment_obj
def check_permission_link(request, shipment_obj):
permission_link_id = request.query_params.get('permission_link', None)
if permission_link_id:
try:
permission_obj = PermissionLink.objects.get(pk=permission_link_id)
except ObjectDoesNotExist:
return False
if not permission_obj.is_valid:
return check_has_shipment_owner_access(request, shipment_obj)
return shipment_obj.pk == permission_obj.shipment.pk and request.method == 'GET'
return check_has_shipment_owner_access(request, shipment_obj)
def check_has_shipment_owner_access(request, obj):
return request.user.is_authenticated and (has_owner_access(request, obj) or is_shipper(request, obj) or
is_carrier(request, obj) or is_moderator(request, obj))
class IsOwner(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it
"""
def has_object_permission(self, request, view, obj):
# Permissions are only allowed to the owner of the shipment.
return has_owner_access(request, obj)
| 35.669231 | 116 | 0.723528 |
from django.db.models import Q
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from rest_framework import permissions, status
from shipchain_common.authentication import get_jwt_from_request
from apps.shipments.models import Shipment, PermissionLink
PROFILES_WALLET_URL = f'{settings.PROFILES_URL}/api/v1/wallet'
def get_user(request):
if request.user.is_authenticated:
return request.user.id, request.user.token.get('organization_id', None)
return None, None
def shipment_owner_access_filter(request):
user_id, organization_id = get_user(request)
return Q(shipment__owner_id=organization_id) | Q(shipment__owner_id=user_id) if organization_id else \
Q(shipment__owner_id=user_id)
def owner_access_filter(request):
user_id, organization_id = get_user(request)
return Q(owner_id=organization_id) | Q(owner_id=user_id) if organization_id else Q(owner_id=user_id)
def get_owner_id(request):
user_id, organization_id = get_user(request)
return organization_id if organization_id else user_id
def has_owner_access(request, obj):
user_id, organization_id = get_user(request)
return (organization_id and obj.owner_id == organization_id) or obj.owner_id == user_id
def is_carrier(request, shipment):
response = settings.REQUESTS_SESSION.get(f'{PROFILES_WALLET_URL}/{shipment.carrier_wallet_id}/?is_active',
headers={'Authorization': f'JWT {get_jwt_from_request(request)}'})
return response.status_code == status.HTTP_200_OK and request.method in ('GET', 'PATCH')
def is_moderator(request, shipment):
if shipment.moderator_wallet_id:
response = settings.REQUESTS_SESSION.get(f'{PROFILES_WALLET_URL}/{shipment.moderator_wallet_id}/?is_active',
headers={'Authorization': f'JWT {get_jwt_from_request(request)}'})
return response.status_code == status.HTTP_200_OK and request.method in ('GET', 'PATCH')
return False
def is_shipper(request, shipment):
response = settings.REQUESTS_SESSION.get(f'{PROFILES_WALLET_URL}/{shipment.shipper_wallet_id}/?is_active',
headers={'Authorization': f'JWT {get_jwt_from_request(request)}'})
return response.status_code == status.HTTP_200_OK and request.method in ('GET', 'PATCH')
def shipment_exists(shipment_id):
try:
shipment_obj = Shipment.objects.get(pk=shipment_id)
except ObjectDoesNotExist:
return False
return shipment_obj
def check_permission_link(request, shipment_obj):
permission_link_id = request.query_params.get('permission_link', None)
if permission_link_id:
try:
permission_obj = PermissionLink.objects.get(pk=permission_link_id)
except ObjectDoesNotExist:
return False
if not permission_obj.is_valid:
return check_has_shipment_owner_access(request, shipment_obj)
return shipment_obj.pk == permission_obj.shipment.pk and request.method == 'GET'
return check_has_shipment_owner_access(request, shipment_obj)
def check_has_shipment_owner_access(request, obj):
return request.user.is_authenticated and (has_owner_access(request, obj) or is_shipper(request, obj) or
is_carrier(request, obj) or is_moderator(request, obj))
class IsOwner(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
return has_owner_access(request, obj)
| true | true |
f7268157ef2a0bd8d69f9adfb748756be3029bea | 5,766 | py | Python | pgoapi/protos/pogoprotos/networking/requests/messages/get_asset_digest_message_pb2.py | linherest/pgoapi | e3bdce71b06c099663e9796c8df166883059edd9 | [
"MIT"
] | 14 | 2017-03-28T16:32:24.000Z | 2021-03-13T23:03:57.000Z | pgoapi/protos/pogoprotos/networking/requests/messages/get_asset_digest_message_pb2.py | linherest/pgoapi | e3bdce71b06c099663e9796c8df166883059edd9 | [
"MIT"
] | 8 | 2017-03-01T07:56:09.000Z | 2017-08-15T07:37:12.000Z | pgoapi/protos/pogoprotos/networking/requests/messages/get_asset_digest_message_pb2.py | linherest/pgoapi | e3bdce71b06c099663e9796c8df166883059edd9 | [
"MIT"
] | 15 | 2017-02-24T01:30:23.000Z | 2021-06-27T08:46:43.000Z | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pogoprotos/networking/requests/messages/get_asset_digest_message.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from pogoprotos.enums import platform_pb2 as pogoprotos_dot_enums_dot_platform__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='pogoprotos/networking/requests/messages/get_asset_digest_message.proto',
package='pogoprotos.networking.requests.messages',
syntax='proto3',
serialized_pb=_b('\nFpogoprotos/networking/requests/messages/get_asset_digest_message.proto\x12\'pogoprotos.networking.requests.messages\x1a\x1fpogoprotos/enums/platform.proto\"\xdc\x01\n\x15GetAssetDigestMessage\x12,\n\x08platform\x18\x01 \x01(\x0e\x32\x1a.pogoprotos.enums.Platform\x12\x1b\n\x13\x64\x65vice_manufacturer\x18\x02 \x01(\t\x12\x14\n\x0c\x64\x65vice_model\x18\x03 \x01(\t\x12\x0e\n\x06locale\x18\x04 \x01(\t\x12\x13\n\x0b\x61pp_version\x18\x05 \x01(\r\x12\x10\n\x08paginate\x18\x06 \x01(\x08\x12\x13\n\x0bpage_offset\x18\x07 \x01(\x05\x12\x16\n\x0epage_timestamp\x18\x08 \x01(\x04\x62\x06proto3')
,
dependencies=[pogoprotos_dot_enums_dot_platform__pb2.DESCRIPTOR,])
_GETASSETDIGESTMESSAGE = _descriptor.Descriptor(
name='GetAssetDigestMessage',
full_name='pogoprotos.networking.requests.messages.GetAssetDigestMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='platform', full_name='pogoprotos.networking.requests.messages.GetAssetDigestMessage.platform', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='device_manufacturer', full_name='pogoprotos.networking.requests.messages.GetAssetDigestMessage.device_manufacturer', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='device_model', full_name='pogoprotos.networking.requests.messages.GetAssetDigestMessage.device_model', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='locale', full_name='pogoprotos.networking.requests.messages.GetAssetDigestMessage.locale', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='app_version', full_name='pogoprotos.networking.requests.messages.GetAssetDigestMessage.app_version', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='paginate', full_name='pogoprotos.networking.requests.messages.GetAssetDigestMessage.paginate', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='page_offset', full_name='pogoprotos.networking.requests.messages.GetAssetDigestMessage.page_offset', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='page_timestamp', full_name='pogoprotos.networking.requests.messages.GetAssetDigestMessage.page_timestamp', index=7,
number=8, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=149,
serialized_end=369,
)
_GETASSETDIGESTMESSAGE.fields_by_name['platform'].enum_type = pogoprotos_dot_enums_dot_platform__pb2._PLATFORM
DESCRIPTOR.message_types_by_name['GetAssetDigestMessage'] = _GETASSETDIGESTMESSAGE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetAssetDigestMessage = _reflection.GeneratedProtocolMessageType('GetAssetDigestMessage', (_message.Message,), dict(
DESCRIPTOR = _GETASSETDIGESTMESSAGE,
__module__ = 'pogoprotos.networking.requests.messages.get_asset_digest_message_pb2'
# @@protoc_insertion_point(class_scope:pogoprotos.networking.requests.messages.GetAssetDigestMessage)
))
_sym_db.RegisterMessage(GetAssetDigestMessage)
# @@protoc_insertion_point(module_scope)
| 47.262295 | 613 | 0.772112 |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
_sym_db = _symbol_database.Default()
from pogoprotos.enums import platform_pb2 as pogoprotos_dot_enums_dot_platform__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='pogoprotos/networking/requests/messages/get_asset_digest_message.proto',
package='pogoprotos.networking.requests.messages',
syntax='proto3',
serialized_pb=_b('\nFpogoprotos/networking/requests/messages/get_asset_digest_message.proto\x12\'pogoprotos.networking.requests.messages\x1a\x1fpogoprotos/enums/platform.proto\"\xdc\x01\n\x15GetAssetDigestMessage\x12,\n\x08platform\x18\x01 \x01(\x0e\x32\x1a.pogoprotos.enums.Platform\x12\x1b\n\x13\x64\x65vice_manufacturer\x18\x02 \x01(\t\x12\x14\n\x0c\x64\x65vice_model\x18\x03 \x01(\t\x12\x0e\n\x06locale\x18\x04 \x01(\t\x12\x13\n\x0b\x61pp_version\x18\x05 \x01(\r\x12\x10\n\x08paginate\x18\x06 \x01(\x08\x12\x13\n\x0bpage_offset\x18\x07 \x01(\x05\x12\x16\n\x0epage_timestamp\x18\x08 \x01(\x04\x62\x06proto3')
,
dependencies=[pogoprotos_dot_enums_dot_platform__pb2.DESCRIPTOR,])
_GETASSETDIGESTMESSAGE = _descriptor.Descriptor(
name='GetAssetDigestMessage',
full_name='pogoprotos.networking.requests.messages.GetAssetDigestMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='platform', full_name='pogoprotos.networking.requests.messages.GetAssetDigestMessage.platform', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='device_manufacturer', full_name='pogoprotos.networking.requests.messages.GetAssetDigestMessage.device_manufacturer', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='device_model', full_name='pogoprotos.networking.requests.messages.GetAssetDigestMessage.device_model', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='locale', full_name='pogoprotos.networking.requests.messages.GetAssetDigestMessage.locale', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='app_version', full_name='pogoprotos.networking.requests.messages.GetAssetDigestMessage.app_version', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='paginate', full_name='pogoprotos.networking.requests.messages.GetAssetDigestMessage.paginate', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='page_offset', full_name='pogoprotos.networking.requests.messages.GetAssetDigestMessage.page_offset', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='page_timestamp', full_name='pogoprotos.networking.requests.messages.GetAssetDigestMessage.page_timestamp', index=7,
number=8, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=149,
serialized_end=369,
)
_GETASSETDIGESTMESSAGE.fields_by_name['platform'].enum_type = pogoprotos_dot_enums_dot_platform__pb2._PLATFORM
DESCRIPTOR.message_types_by_name['GetAssetDigestMessage'] = _GETASSETDIGESTMESSAGE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetAssetDigestMessage = _reflection.GeneratedProtocolMessageType('GetAssetDigestMessage', (_message.Message,), dict(
DESCRIPTOR = _GETASSETDIGESTMESSAGE,
__module__ = 'pogoprotos.networking.requests.messages.get_asset_digest_message_pb2'
# @@protoc_insertion_point(class_scope:pogoprotos.networking.requests.messages.GetAssetDigestMessage)
))
_sym_db.RegisterMessage(GetAssetDigestMessage)
# @@protoc_insertion_point(module_scope)
| true | true |
f726815c9d9c875e611e685946fa5bb15c9a3094 | 13,843 | py | Python | 3dversion/g_model.py | ginobilinie/medSynthesis | fee24e955313b4032855901327a7485390866e91 | [
"MIT"
] | 77 | 2017-03-24T11:51:05.000Z | 2022-02-17T00:14:52.000Z | 3dversion/g_model.py | liu123-t/medSynthesis | 7373f2974356bf1a944ed25183273c3e132d63af | [
"MIT"
] | 19 | 2018-02-03T15:55:49.000Z | 2021-11-16T10:27:22.000Z | 3dversion/g_model.py | liu123-t/medSynthesis | 7373f2974356bf1a944ed25183273c3e132d63af | [
"MIT"
] | 27 | 2017-03-18T09:17:09.000Z | 2021-06-05T02:59:22.000Z | from __future__ import division
import os
import time
from glob import glob
import tensorflow as tf
import numpy as np
from six.moves import xrange
from utils import *
from loss_functions import *
from scipy.misc import imsave
class MR2CT(object):
def __init__(self, sess, batch_size=10, depth_MR=32, height_MR=32,
width_MR=32, depth_CT=32, height_CT=24,
width_CT=24, l_num=2, wd=0.0005, checkpoint_dir=None, path_patients_h5=None, learning_rate=2e-8):
"""
Args:
sess: TensorFlow session
batch_size: The size of batch. Should be specified before training.
output_size: (optional) The resolution in pixels of the images. [64]
y_dim: (optional) Dimension of dim for y. [None]
z_dim: (optional) Dimension of dim for Z. [100]
gf_dim: (optional) Dimension of gen filters in first conv layer. [64]
df_dim: (optional) Dimension of discrim filters in first conv layer. [64]
gfc_dim: (optional) Dimension of gen units for for fully connected layer. [1024]
dfc_dim: (optional) Dimension of discrim units for fully connected layer. [1024]
c_dim: (optional) Dimension of image color. For grayscale input, set to 1. [3]
"""
self.sess = sess
self.l_num=l_num
self.wd=wd
self.learning_rate=learning_rate
self.batch_size=batch_size
self.depth_MR=depth_MR
self.height_MR=height_MR
self.width_MR=width_MR
self.depth_CT=depth_CT
self.height_CT=height_CT
self.width_CT=width_CT
self.checkpoint_dir = checkpoint_dir
self.data_generator = Generator_3D_patches(path_patients_h5,self.batch_size)
self.build_model()
def build_model(self):
self.inputMR=tf.placeholder(tf.float32, shape=[None, self.depth_MR, self.height_MR, self.width_MR, 1])
self.CT_GT=tf.placeholder(tf.float32, shape=[None, self.depth_CT, self.height_CT, self.width_CT, 1])
batch_size_tf = tf.shape(self.inputMR)[0] #variable batchsize so we can test here
self.train_phase = tf.placeholder(tf.bool, name='phase_train')
self.G = self.generator(self.inputMR,batch_size_tf)
print 'shape output G ',self.G.get_shape()
self.global_step = tf.Variable(0, name='global_step', trainable=False)
self.g_loss=lp_loss(self.G, self.CT_GT, self.l_num, batch_size_tf)
print 'learning rate ',self.learning_rate
#self.g_optim =tf.train.AdamOptimizer(self.learning_rate).minimize(self.g_loss)
#tf.train.GradientDescentOptimizer(self.learning_rate).minimize(self.g_loss)
self.merged = tf.merge_all_summaries()
self.writer = tf.train.SummaryWriter("./summaries", self.sess.graph)
self.saver = tf.train.Saver()
def generator(self,inputMR,batch_size_tf):
######## FCN for the 32x32x32 to 24x24x24 , added dilaion by yourself####################################
conv1_a = conv_op_3d_bn(inputMR, name="conv1_a", kh=5, kw=5, kz=5, n_out=48, dh=1, dw=1, dz=1, wd=self.wd, padding='VALID',train_phase=self.train_phase)#30
conv2_a = conv_op_3d_bn(conv1_a, name="conv2_a", kh=3, kw=3, kz=3, n_out=96, dh=1, dw=1, dz=1, wd=self.wd, padding='SAME',train_phase=self.train_phase)
conv3_a = conv_op_3d_bn(conv2_a, name="conv3_a", kh=3, kw=3, kz=3, n_out=128, dh=1, dw=1, dz=1, wd=self.wd, padding='SAME',train_phase=self.train_phase)#28
conv4_a = conv_op_3d_bn(conv3_a, name="conv4_a", kh=5, kw=5, kz=5, n_out=96, dh=1, dw=1, dz=1, wd=self.wd, padding='VALID',train_phase=self.train_phase)
conv5_a = conv_op_3d_bn(conv4_a, name="conv5_a", kh=3, kw=3, kz=3, n_out=48, dh=1, dw=1, dz=1, wd=self.wd, padding='SAME',train_phase=self.train_phase)#26
conv6_a = conv_op_3d_bn(conv5_a, name="conv6_a", kh=3, kw=3, kz=3, n_out=32, dh=1, dw=1, dz=1, wd=self.wd, padding='SAME',train_phase=self.train_phase)
#conv7_a = conv_op_3d_bn(conv6_a, name="conv7_a", kh=3, kw=3, kz=3, n_out=1, dh=1, dw=1, dz=1, wd=self.wd, padding='SAME',train_phase=self.train_phase)#24
conv7_a = conv_op_3d_norelu(conv6_a, name="conv7_a", kh=3, kw=3, kz=3, n_out=1, dh=1, dw=1, dz=1, wd=self.wd, padding='SAME')#24 I modified it here,dong
self.MR_16_downsampled=conv7_a#JUST FOR TEST
return conv7_a
def train(self, config):
path_test='/home/dongnie/warehouse/prostate/ganData64to24Test'
print 'global_step ', self.global_step.name
print 'trainable vars '
for v in tf.trainable_variables():
print v.name
if self.load(self.checkpoint_dir):
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
self.sess.run(tf.initialize_all_variables())
temp = set(tf.all_variables())
start = self.global_step.eval() # get last global_step
print("Start from:", start)
############ This is for only initializing adam vars####################
temp = set(tf.all_variables())
self.g_optim =tf.train.AdamOptimizer(self.learning_rate).minimize(self.g_loss)
self.sess.run(tf.initialize_variables(set(tf.all_variables()) - temp))
print("Start after adam (should be the same):", start)
#####################################
for it in range(start,config.iterations):
X,y=self.data_generator.next()
# Update G network
_, loss_eval, layer_out_eval = self.sess.run([self.g_optim, self.g_loss, self.MR_16_downsampled],
feed_dict={ self.inputMR: X, self.CT_GT:y, self.train_phase: True })
self.global_step.assign(it).eval() # set and update(eval) global_step with index, i
if it%config.show_every==0:#show loss every show_every its
print 'it ',it,'loss ',loss_eval
print 'layer min ', np.min(layer_out_eval)
print 'layer max ', np.max(layer_out_eval)
print 'layer mean ', np.mean(layer_out_eval)
# print 'trainable vars '
# for v in tf.trainable_variables():
# print v.name
# data_var=self.sess.run(v)
# grads = tf.gradients(self.g_loss, v)
# var_grad_val = self.sess.run(grads, feed_dict={self.inputMR: X, self.CT_GT:y })
# print 'grad min ', np.min(var_grad_val)
# print 'grad max ', np.max(var_grad_val)
# print 'grad mean ', np.mean(var_grad_val)
# #print 'shape ',data_var.shape
# print 'filter min ', np.min(data_var)
# print 'filter max ', np.max(data_var)
# print 'filter mean ', np.mean(data_var)
#self.writer.add_summary(summary, it)
# print 'trainable vars '
if it%config.test_every==0 and it!=0:#==0:#test one subject
mr_test_itk=sitk.ReadImage(os.path.join(path_test,'prostate_1to1_MRI.nii'))
ct_test_itk=sitk.ReadImage(os.path.join(path_test,'prostate_1to1_CT.nii'))
mrnp=sitk.GetArrayFromImage(mr_test_itk)
#mu=np.mean(mrnp)
#mrnp=(mrnp-mu)/(np.max(mrnp)-np.min(mrnp))
ctnp=sitk.GetArrayFromImage(ct_test_itk)
print mrnp.dtype
print ctnp.dtype
ct_estimated=self.test_1_subject(mrnp,ctnp,[32,32,32],[24,24,24],[5,5,2])
psnrval=psnr(ct_estimated,ctnp)
print ct_estimated.dtype
print ctnp.dtype
print 'psnr= ',psnrval
volout=sitk.GetImageFromArray(ct_estimated)
sitk.WriteImage(volout,'ct_estimated_{}'.format(it)+'.nii.gz')
if it%config.save_every==0:#save weights every save_every iterations
self.save(self.checkpoint_dir, it)
def evaluate(self,patch_MR):
""" patch_MR is a np array of shape [H,W,nchans]
"""
patch_MR=np.expand_dims(patch_MR,axis=0)#[1,H,W,nchans]
patch_MR=np.expand_dims(patch_MR,axis=4)#[1,H,W,nchans]
patch_CT_pred, MR16_eval= self.sess.run([self.G,self.MR_16_downsampled],
feed_dict={ self.inputMR: patch_MR, self.train_phase: False})
patch_CT_pred=np.squeeze(patch_CT_pred)#[Z,H,W]
#imsave('mr32.png',np.squeeze(MR16_eval[0,:,:,2]))
#imsave('ctpred.png',np.squeeze(patch_CT_pred[0,:,:,0]))
#print 'mean of layer ',np.mean(MR16_eval)
#print 'min ct estimated ',np.min(patch_CT_pred)
#print 'max ct estimated ',np.max(patch_CT_pred)
#print 'mean of ctpatch estimated ',np.mean(patch_CT_pred)
return patch_CT_pred
def test_1_subject(self,MR_image,CT_GT,MR_patch_sz,CT_patch_sz,step):
"""
receives an MR image and returns an estimated CT image of the same size
"""
matFA=MR_image
matSeg=CT_GT
dFA=MR_patch_sz
dSeg=CT_patch_sz
eps=1e-5
[row,col,leng]=matFA.shape
margin1=int((dFA[0]-dSeg[0])/2)
margin2=int((dFA[1]-dSeg[1])/2)
margin3=int((dFA[2]-dSeg[2])/2)
cubicCnt=0
marginD=[margin1,margin2,margin3]
print 'matFA shape is ',matFA.shape
matFAOut=np.zeros([row+2*marginD[0],col+2*marginD[1],leng+2*marginD[2]])
print 'matFAOut shape is ',matFAOut.shape
matFAOut[marginD[0]:row+marginD[0],marginD[1]:col+marginD[1],marginD[2]:leng+marginD[2]]=matFA
# matFAOut[0:marginD[0],marginD[1]:col+marginD[1],marginD[2]:leng+marginD[2]]=matFA[0:marginD[0],:,:] #we'd better flip it along the first dimension
# matFAOut[row+marginD[0]:matFAOut.shape[0],marginD[1]:col+marginD[1],marginD[2]:leng+marginD[2]]=matFA[row-marginD[0]:matFA.shape[0],:,:] #we'd better flip it along the 1st dimension
# matFAOut[marginD[0]:row+marginD[0],0:marginD[1],marginD[2]:leng+marginD[2]]=matFA[:,0:marginD[1],:] #we'd better flip it along the 2nd dimension
# matFAOut[marginD[0]:row+marginD[0],col+marginD[1]:matFAOut.shape[1],marginD[2]:leng+marginD[2]]=matFA[:,col-marginD[1]:matFA.shape[1],:] #we'd better to flip it along the 2nd dimension
# matFAOut[marginD[0]:row+marginD[0],marginD[1]:col+marginD[1],0:marginD[2]]=matFA[:,:,0:marginD[2]] #we'd better flip it along the 3rd dimension
# matFAOut[marginD[0]:row+marginD[0],marginD[1]:col+marginD[1],marginD[2]+leng:matFAOut.shape[2]]=matFA[:,:,leng-marginD[2]:matFA.shape[2]]
if margin1!=0:
matFAOut[0:marginD[0],marginD[1]:col+marginD[1],marginD[2]:leng+marginD[2]]=matFA[marginD[0]-1::-1,:,:] #reverse 0:marginD[0]
matFAOut[row+marginD[0]:matFAOut.shape[0],marginD[1]:col+marginD[1],marginD[2]:leng+marginD[2]]=matFA[matFA.shape[0]-1:row-marginD[0]-1:-1,:,:] #we'd better flip it along the 1st dimension
if margin2!=0:
matFAOut[marginD[0]:row+marginD[0],0:marginD[1],marginD[2]:leng+marginD[2]]=matFA[:,marginD[1]-1::-1,:] #we'd flip it along the 2nd dimension
matFAOut[marginD[0]:row+marginD[0],col+marginD[1]:matFAOut.shape[1],marginD[2]:leng+marginD[2]]=matFA[:,matFA.shape[1]-1:col-marginD[1]-1:-1,:] #we'd flip it along the 2nd dimension
if margin3!=0:
matFAOut[marginD[0]:row+marginD[0],marginD[1]:col+marginD[1],0:marginD[2]]=matFA[:,:,marginD[2]-1::-1] #we'd better flip it along the 3rd dimension
matFAOut[marginD[0]:row+marginD[0],marginD[1]:col+marginD[1],marginD[2]+leng:matFAOut.shape[2]]=matFA[:,:,matFA.shape[2]-1:leng-marginD[2]-1:-1]
matOut=np.zeros((matSeg.shape[0],matSeg.shape[1],matSeg.shape[2]))
used=np.zeros((matSeg.shape[0],matSeg.shape[1],matSeg.shape[2]))+eps
#fid=open('trainxxx_list.txt','a');
for i in range(0,row-dSeg[0],step[0]):
for j in range(0,col-dSeg[1],step[1]):
for k in range(0,leng-dSeg[2],step[2]):
volSeg=matSeg[i:i+dSeg[0],j:j+dSeg[1],k:k+dSeg[2]]
#print 'volSeg shape is ',volSeg.shape
volFA=matFAOut[i:i+dSeg[0]+2*marginD[0],j:j+dSeg[1]+2*marginD[1],k:k+dSeg[2]+2*marginD[2]]
#print 'volFA shape is ',volFA.shape
#mynet.blobs['dataMR'].data[0,0,...]=volFA
#mynet.forward()
#temppremat = mynet.blobs['softmax'].data[0].argmax(axis=0) #Note you have add softmax layer in deploy prototxt
temppremat=self.evaluate(volFA)
#print 'patchout shape ',temppremat.shape
#temppremat=volSeg
matOut[i:i+dSeg[0],j:j+dSeg[1],k:k+dSeg[2]]=matOut[i:i+dSeg[0],j:j+dSeg[1],k:k+dSeg[2]]+temppremat;
used[i:i+dSeg[0],j:j+dSeg[1],k:k+dSeg[2]]=used[i:i+dSeg[0],j:j+dSeg[1],k:k+dSeg[2]]+1;
matOut=matOut/used
return matOut
def save(self, checkpoint_dir, step):
model_name = "MR2CT.model"
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
self.saver.save(self.sess,
os.path.join(checkpoint_dir, model_name),
global_step=step)
def load(self, checkpoint_dir):
print(" [*] Reading checkpoints...")
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess, ckpt.model_checkpoint_path)
return True
else:
return False
| 53.242308 | 200 | 0.60955 | from __future__ import division
import os
import time
from glob import glob
import tensorflow as tf
import numpy as np
from six.moves import xrange
from utils import *
from loss_functions import *
from scipy.misc import imsave
class MR2CT(object):
def __init__(self, sess, batch_size=10, depth_MR=32, height_MR=32,
width_MR=32, depth_CT=32, height_CT=24,
width_CT=24, l_num=2, wd=0.0005, checkpoint_dir=None, path_patients_h5=None, learning_rate=2e-8):
"""
Args:
sess: TensorFlow session
batch_size: The size of batch. Should be specified before training.
output_size: (optional) The resolution in pixels of the images. [64]
y_dim: (optional) Dimension of dim for y. [None]
z_dim: (optional) Dimension of dim for Z. [100]
gf_dim: (optional) Dimension of gen filters in first conv layer. [64]
df_dim: (optional) Dimension of discrim filters in first conv layer. [64]
gfc_dim: (optional) Dimension of gen units for for fully connected layer. [1024]
dfc_dim: (optional) Dimension of discrim units for fully connected layer. [1024]
c_dim: (optional) Dimension of image color. For grayscale input, set to 1. [3]
"""
self.sess = sess
self.l_num=l_num
self.wd=wd
self.learning_rate=learning_rate
self.batch_size=batch_size
self.depth_MR=depth_MR
self.height_MR=height_MR
self.width_MR=width_MR
self.depth_CT=depth_CT
self.height_CT=height_CT
self.width_CT=width_CT
self.checkpoint_dir = checkpoint_dir
self.data_generator = Generator_3D_patches(path_patients_h5,self.batch_size)
self.build_model()
def build_model(self):
self.inputMR=tf.placeholder(tf.float32, shape=[None, self.depth_MR, self.height_MR, self.width_MR, 1])
self.CT_GT=tf.placeholder(tf.float32, shape=[None, self.depth_CT, self.height_CT, self.width_CT, 1])
batch_size_tf = tf.shape(self.inputMR)[0]
self.train_phase = tf.placeholder(tf.bool, name='phase_train')
self.G = self.generator(self.inputMR,batch_size_tf)
print 'shape output G ',self.G.get_shape()
self.global_step = tf.Variable(0, name='global_step', trainable=False)
self.g_loss=lp_loss(self.G, self.CT_GT, self.l_num, batch_size_tf)
print 'learning rate ',self.learning_rate
self.merged = tf.merge_all_summaries()
self.writer = tf.train.SummaryWriter("./summaries", self.sess.graph)
self.saver = tf.train.Saver()
def generator(self,inputMR,batch_size_tf):
tart from:", start)
ubject(mrnp,ctnp,[32,32,32],[24,24,24],[5,5,2])
psnrval=psnr(ct_estimated,ctnp)
print ct_estimated.dtype
print ctnp.dtype
print 'psnr= ',psnrval
volout=sitk.GetImageFromArray(ct_estimated)
sitk.WriteImage(volout,'ct_estimated_{}'.format(it)+'.nii.gz')
if it%config.save_every==0:
self.save(self.checkpoint_dir, it)
def evaluate(self,patch_MR):
""" patch_MR is a np array of shape [H,W,nchans]
"""
patch_MR=np.expand_dims(patch_MR,axis=0)
patch_MR=np.expand_dims(patch_MR,axis=4)
patch_CT_pred, MR16_eval= self.sess.run([self.G,self.MR_16_downsampled],
feed_dict={ self.inputMR: patch_MR, self.train_phase: False})
patch_CT_pred=np.squeeze(patch_CT_pred)
return patch_CT_pred
def test_1_subject(self,MR_image,CT_GT,MR_patch_sz,CT_patch_sz,step):
"""
receives an MR image and returns an estimated CT image of the same size
"""
matFA=MR_image
matSeg=CT_GT
dFA=MR_patch_sz
dSeg=CT_patch_sz
eps=1e-5
[row,col,leng]=matFA.shape
margin1=int((dFA[0]-dSeg[0])/2)
margin2=int((dFA[1]-dSeg[1])/2)
margin3=int((dFA[2]-dSeg[2])/2)
cubicCnt=0
marginD=[margin1,margin2,margin3]
print 'matFA shape is ',matFA.shape
matFAOut=np.zeros([row+2*marginD[0],col+2*marginD[1],leng+2*marginD[2]])
print 'matFAOut shape is ',matFAOut.shape
matFAOut[marginD[0]:row+marginD[0],marginD[1]:col+marginD[1],marginD[2]:leng+marginD[2]]=matFA
ape[0],marginD[1]:col+marginD[1],marginD[2]:leng+marginD[2]]=matFA[row-marginD[0]:matFA.shape[0],:,:] #we'd better flip it along the 1st dimension
],col+marginD[1]:matFAOut.shape[1],marginD[2]:leng+marginD[2]]=matFA[:,col-marginD[1]:matFA.shape[1],:] #we'd better to flip it along the 2nd dimension
],marginD[1]:col+marginD[1],marginD[2]+leng:matFAOut.shape[2]]=matFA[:,:,leng-marginD[2]:matFA.shape[2]]
if margin1!=0:
matFAOut[0:marginD[0],marginD[1]:col+marginD[1],marginD[2]:leng+marginD[2]]=matFA[marginD[0]-1::-1,:,:] #reverse 0:marginD[0]
matFAOut[row+marginD[0]:matFAOut.shape[0],marginD[1]:col+marginD[1],marginD[2]:leng+marginD[2]]=matFA[matFA.shape[0]-1:row-marginD[0]-1:-1,:,:] #we'd better flip it along the 1st dimension
if margin2!=0:
matFAOut[marginD[0]:row+marginD[0],0:marginD[1],marginD[2]:leng+marginD[2]]=matFA[:,marginD[1]-1::-1,:]
matFAOut[marginD[0]:row+marginD[0],col+marginD[1]:matFAOut.shape[1],marginD[2]:leng+marginD[2]]=matFA[:,matFA.shape[1]-1:col-marginD[1]-1:-1,:] #we'd flip it along the 2nd dimension
if margin3!=0:
matFAOut[marginD[0]:row+marginD[0],marginD[1]:col+marginD[1],0:marginD[2]]=matFA[:,:,marginD[2]-1::-1]
matFAOut[marginD[0]:row+marginD[0],marginD[1]:col+marginD[1],marginD[2]+leng:matFAOut.shape[2]]=matFA[:,:,matFA.shape[2]-1:leng-marginD[2]-1:-1]
matOut=np.zeros((matSeg.shape[0],matSeg.shape[1],matSeg.shape[2]))
used=np.zeros((matSeg.shape[0],matSeg.shape[1],matSeg.shape[2]))+eps
#fid=open('trainxxx_list.txt','a');
for i in range(0,row-dSeg[0],step[0]):
for j in range(0,col-dSeg[1],step[1]):
for k in range(0,leng-dSeg[2],step[2]):
volSeg=matSeg[i:i+dSeg[0],j:j+dSeg[1],k:k+dSeg[2]]
#print 'volSeg shape is ',volSeg.shape
volFA=matFAOut[i:i+dSeg[0]+2*marginD[0],j:j+dSeg[1]+2*marginD[1],k:k+dSeg[2]+2*marginD[2]]
#print 'volFA shape is ',volFA.shape
#mynet.blobs['dataMR'].data[0,0,...]=volFA
#mynet.forward()
#temppremat = mynet.blobs['softmax'].data[0].argmax(axis=0) #Note you have add softmax layer in deploy prototxt
temppremat=self.evaluate(volFA)
#print 'patchout shape ',temppremat.shape
#temppremat=volSeg
matOut[i:i+dSeg[0],j:j+dSeg[1],k:k+dSeg[2]]=matOut[i:i+dSeg[0],j:j+dSeg[1],k:k+dSeg[2]]+temppremat;
used[i:i+dSeg[0],j:j+dSeg[1],k:k+dSeg[2]]=used[i:i+dSeg[0],j:j+dSeg[1],k:k+dSeg[2]]+1;
matOut=matOut/used
return matOut
def save(self, checkpoint_dir, step):
model_name = "MR2CT.model"
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
self.saver.save(self.sess,
os.path.join(checkpoint_dir, model_name),
global_step=step)
def load(self, checkpoint_dir):
print(" [*] Reading checkpoints...")
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess, ckpt.model_checkpoint_path)
return True
else:
return False
| false | true |
f726822728d9822a12dc779ca59f2edd95742b2b | 812 | py | Python | vendimaenv/lib/python2.7/site-packages/funcy/py2.py | soru13/vendimia | ebcf85473855e6f990b1a49574ac669fdd4d443e | [
"MIT"
] | null | null | null | vendimaenv/lib/python2.7/site-packages/funcy/py2.py | soru13/vendimia | ebcf85473855e6f990b1a49574ac669fdd4d443e | [
"MIT"
] | 5 | 2020-02-11T23:26:24.000Z | 2022-01-13T00:39:54.000Z | vendimaenv/lib/python2.7/site-packages/funcy/py2.py | soru13/vendimia | ebcf85473855e6f990b1a49574ac669fdd4d443e | [
"MIT"
] | null | null | null | import sys
from .calc import *
from .colls import *
from .tree import *
from .decorators import *
from .funcolls import *
from .funcs import *
from .seqs import *
from .types import *
from .strings import *
from .flow import *
from .objects import *
from .debug import *
from .primitives import *
# Setup __all__
modules = ('calc', 'colls', 'tree', 'decorators', 'funcolls', 'funcs', 'seqs', 'types',
'strings', 'flow', 'objects', 'debug', 'primitives')
__all__ = cat(sys.modules['funcy.' + m].__all__ for m in modules)
# Python 2 style zip() for Python 3
from .cross import PY3
if PY3:
_zip = zip
def zip(*seqs):
"""List zip() version."""
return list(_zip(*seqs))
__all__ += ['zip'] # HACK: using this instead of .append() to not trigger PyCharm
else:
zip = zip
| 23.882353 | 87 | 0.64532 | import sys
from .calc import *
from .colls import *
from .tree import *
from .decorators import *
from .funcolls import *
from .funcs import *
from .seqs import *
from .types import *
from .strings import *
from .flow import *
from .objects import *
from .debug import *
from .primitives import *
modules = ('calc', 'colls', 'tree', 'decorators', 'funcolls', 'funcs', 'seqs', 'types',
'strings', 'flow', 'objects', 'debug', 'primitives')
__all__ = cat(sys.modules['funcy.' + m].__all__ for m in modules)
from .cross import PY3
if PY3:
_zip = zip
def zip(*seqs):
return list(_zip(*seqs))
__all__ += ['zip']
else:
zip = zip
| true | true |
f726841a3369bd4d21c176bdb301ddb10b209714 | 160 | py | Python | scenarios/api_key_delete/executable.py | timgates42/balanced-python | 1df86b45c36a97ec2e214480c6806c4df3c79860 | [
"MIT"
] | 12 | 2015-04-12T06:18:33.000Z | 2021-03-03T23:54:19.000Z | scenarios/api_key_delete/executable.py | timgates42/balanced-python | 1df86b45c36a97ec2e214480c6806c4df3c79860 | [
"MIT"
] | 1 | 2021-11-24T20:10:19.000Z | 2021-11-24T20:10:19.000Z | scenarios/api_key_delete/executable.py | timgates42/balanced-python | 1df86b45c36a97ec2e214480c6806c4df3c79860 | [
"MIT"
] | 14 | 2015-03-23T17:52:06.000Z | 2021-11-24T11:04:15.000Z | import balanced
balanced.configure('ak-test-2eKlj1ZDfAcZSARMf3NMhBHywDej0avSY')
key = balanced.APIKey.fetch('/api_keys/AK3DQGzROuoRYulKXMQdHBxX')
key.delete() | 26.666667 | 65 | 0.83125 | import balanced
balanced.configure('ak-test-2eKlj1ZDfAcZSARMf3NMhBHywDej0avSY')
key = balanced.APIKey.fetch('/api_keys/AK3DQGzROuoRYulKXMQdHBxX')
key.delete() | true | true |
f726841edd23cffe106d88311ba375ae4ca2b996 | 7,722 | py | Python | cornac/models/hft/recom_hft.py | redhat6/cornac | 856cf0f546a0dc6b46f407128d89ef2534994c60 | [
"Apache-2.0"
] | null | null | null | cornac/models/hft/recom_hft.py | redhat6/cornac | 856cf0f546a0dc6b46f407128d89ef2534994c60 | [
"Apache-2.0"
] | null | null | null | cornac/models/hft/recom_hft.py | redhat6/cornac | 856cf0f546a0dc6b46f407128d89ef2534994c60 | [
"Apache-2.0"
] | 1 | 2020-03-19T13:58:33.000Z | 2020-03-19T13:58:33.000Z | # Copyright 2018 The Cornac Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
from ..recommender import Recommender
from ...exception import ScoreException
class HFT(Recommender):
"""Hidden Factors and Hidden Topics
Parameters
----------
name: string, default: 'HFT'
The name of the recommender model.
k: int, optional, default: 10
The dimension of the latent factors.
max_iter: int, optional, default: 50
Maximum number of iterations for EM.
grad_iter: int, optional, default: 50
Maximum number of iterations for L-BFGS.
lambda_text: float, default: 0.1
Weight of corpus likelihood in objective function.
l2_reg: float, default: 0.001
Regularization for user item latent factors.
vocab_size: int, optional, default: 8000
Size of vocabulary for review text.
init_params: dictionary, optional, default: None
List of initial parameters, e.g., init_params = {'alpha': alpha, 'beta_u': beta_u,
'beta_i': beta_i, 'gamma_u': gamma_u, 'gamma_v': gamma_v}
alpha: float
Model offset, optional initialization via init_params.
beta_u: ndarray. shape (n_user, 1)
User biases, optional initialization via init_params.
beta_u: ndarray. shape (n_item, 1)
Item biases, optional initialization via init_params.
gamma_u: ndarray, shape (n_users,k)
The user latent factors, optional initialization via init_params.
gamma_v: ndarray, shape (n_items,k)
The item latent factors, optional initialization via init_params.
trainable: boolean, optional, default: True
When False, the model will not be re-trained, and input of pre-trained parameters are required.
verbose: boolean, optional, default: True
When True, some running logs are displayed.
seed: int, optional, default: None
Random seed for weight initialization.
References
----------
Julian McAuley, Jure Leskovec. "Hidden Factors and Hidden Topics: Understanding Rating Dimensions with Review Text"
RecSys '13 Proceedings of the 7th ACM conference on Recommender systems Pages 165-172
"""
def __init__(self, name='HFT', k=10, max_iter=50, grad_iter=50,
lambda_text=0.1, l2_reg=0.001, vocab_size=8000,
init_params=None, trainable=True, verbose=True, seed=None):
super().__init__(name=name, trainable=trainable, verbose=verbose)
self.k = k
self.lambda_text = lambda_text
self.l2_reg = l2_reg
self.grad_iter = grad_iter
self.name = name
self.max_iter = max_iter
self.verbose = verbose
self.init_params = {} if not init_params else init_params
self.seed = seed
self.vocab_size = vocab_size
def fit(self, train_set, val_set=None):
"""Fit the model to observations.
Parameters
----------
train_set: :obj:`cornac.data.Dataset`, required
User-Item preference data as well as additional modalities.
val_set: :obj:`cornac.data.Dataset`, optional, default: None
User-Item preference data for model selection purposes (e.g., early stopping).
Returns
-------
self : object
"""
Recommender.fit(self, train_set, val_set)
from ...utils.init_utils import normal
self.n_item = self.train_set.num_items
self.n_user = self.train_set.num_users
self.alpha = self.init_params.get('alpha', train_set.global_mean)
self.beta_u = self.init_params.get('beta_u', normal(self.n_user, std=0.01, random_state=self.seed))
self.beta_i = self.init_params.get('beta_i', normal(self.n_item, std=0.01, random_state=self.seed))
self.gamma_u = self.init_params.get('gamma_u', normal((self.n_user, self.k), std=0.01, random_state=self.seed))
self.gamma_i = self.init_params.get('gamma_i', normal((self.n_item, self.k), std=0.01, random_state=self.seed))
if self.trainable:
self._fit_hft()
return self
@staticmethod
def _build_data(csr_mat):
index_list = []
rating_list = []
for i in range(csr_mat.shape[0]):
j, k = csr_mat.indptr[i], csr_mat.indptr[i + 1]
index_list.append(csr_mat.indices[j:k])
rating_list.append(csr_mat.data[j:k])
return index_list, rating_list
def _fit_hft(self):
from .hft import Model
from tqdm import trange
# document data
bow_mat = self.train_set.item_text.batch_bow(np.arange(self.n_item), keep_sparse=True)
documents, _ = self._build_data(bow_mat) # bag of word feature
# Rating data
user_data = self._build_data(self.train_set.matrix)
item_data = self._build_data(self.train_set.matrix.T.tocsr())
model = Model(n_user=self.n_user, n_item=self.n_item, alpha=self.alpha, beta_u=self.beta_u, beta_i=self.beta_i,
gamma_u=self.gamma_u, gamma_i=self.gamma_i, n_vocab=self.vocab_size, k=self.k,
lambda_text=self.lambda_text, l2_reg=self.l2_reg, grad_iter=self.grad_iter)
model.init_count(docs=documents)
# training
loop = trange(self.max_iter, disable=not self.verbose)
for _ in loop:
model.assign_word_topics(docs=documents)
loss = model.update_params(rating_data=(user_data, item_data))
loop.set_postfix(loss=loss)
self.alpha, self.beta_u, self.beta_i, self.gamma_u, self.gamma_i = model.get_parameter()
if self.verbose:
print('Learning completed!')
def score(self, user_idx, item_idx=None):
"""Predict the scores/ratings of a user for an item.
Parameters
----------
user_idx: int, required
The index of the user for whom to perform score prediction.
item_idx: int, optional, default: None
The index of the item for that to perform score prediction.
If None, scores for all known items will be returned.
Returns
-------
res : A scalar or a Numpy array
Relative scores that the user gives to the item or to all known items
"""
if item_idx is None:
if self.train_set.is_unk_user(user_idx):
raise ScoreException("Can't make score prediction for (user_id=%d)" % user_idx)
known_item_scores = self.alpha + self.beta_u[user_idx] + self.beta_i + self.gamma_i.dot(
self.gamma_u[user_idx, :])
return known_item_scores
else:
if self.train_set.is_unk_user(user_idx) or self.train_set.is_unk_item(item_idx):
raise ScoreException("Can't make score prediction for (user_id=%d, item_id=%d)" % (user_idx, item_idx))
user_pred = self.alpha + self.beta_u[user_idx] + self.beta_i[item_idx] + self.gamma_i[item_idx, :].dot(
self.gamma_u[user_idx, :])
return user_pred
| 38.41791 | 119 | 0.642191 |
import numpy as np
from ..recommender import Recommender
from ...exception import ScoreException
class HFT(Recommender):
def __init__(self, name='HFT', k=10, max_iter=50, grad_iter=50,
lambda_text=0.1, l2_reg=0.001, vocab_size=8000,
init_params=None, trainable=True, verbose=True, seed=None):
super().__init__(name=name, trainable=trainable, verbose=verbose)
self.k = k
self.lambda_text = lambda_text
self.l2_reg = l2_reg
self.grad_iter = grad_iter
self.name = name
self.max_iter = max_iter
self.verbose = verbose
self.init_params = {} if not init_params else init_params
self.seed = seed
self.vocab_size = vocab_size
def fit(self, train_set, val_set=None):
Recommender.fit(self, train_set, val_set)
from ...utils.init_utils import normal
self.n_item = self.train_set.num_items
self.n_user = self.train_set.num_users
self.alpha = self.init_params.get('alpha', train_set.global_mean)
self.beta_u = self.init_params.get('beta_u', normal(self.n_user, std=0.01, random_state=self.seed))
self.beta_i = self.init_params.get('beta_i', normal(self.n_item, std=0.01, random_state=self.seed))
self.gamma_u = self.init_params.get('gamma_u', normal((self.n_user, self.k), std=0.01, random_state=self.seed))
self.gamma_i = self.init_params.get('gamma_i', normal((self.n_item, self.k), std=0.01, random_state=self.seed))
if self.trainable:
self._fit_hft()
return self
@staticmethod
def _build_data(csr_mat):
index_list = []
rating_list = []
for i in range(csr_mat.shape[0]):
j, k = csr_mat.indptr[i], csr_mat.indptr[i + 1]
index_list.append(csr_mat.indices[j:k])
rating_list.append(csr_mat.data[j:k])
return index_list, rating_list
def _fit_hft(self):
from .hft import Model
from tqdm import trange
bow_mat = self.train_set.item_text.batch_bow(np.arange(self.n_item), keep_sparse=True)
documents, _ = self._build_data(bow_mat)
user_data = self._build_data(self.train_set.matrix)
item_data = self._build_data(self.train_set.matrix.T.tocsr())
model = Model(n_user=self.n_user, n_item=self.n_item, alpha=self.alpha, beta_u=self.beta_u, beta_i=self.beta_i,
gamma_u=self.gamma_u, gamma_i=self.gamma_i, n_vocab=self.vocab_size, k=self.k,
lambda_text=self.lambda_text, l2_reg=self.l2_reg, grad_iter=self.grad_iter)
model.init_count(docs=documents)
loop = trange(self.max_iter, disable=not self.verbose)
for _ in loop:
model.assign_word_topics(docs=documents)
loss = model.update_params(rating_data=(user_data, item_data))
loop.set_postfix(loss=loss)
self.alpha, self.beta_u, self.beta_i, self.gamma_u, self.gamma_i = model.get_parameter()
if self.verbose:
print('Learning completed!')
def score(self, user_idx, item_idx=None):
if item_idx is None:
if self.train_set.is_unk_user(user_idx):
raise ScoreException("Can't make score prediction for (user_id=%d)" % user_idx)
known_item_scores = self.alpha + self.beta_u[user_idx] + self.beta_i + self.gamma_i.dot(
self.gamma_u[user_idx, :])
return known_item_scores
else:
if self.train_set.is_unk_user(user_idx) or self.train_set.is_unk_item(item_idx):
raise ScoreException("Can't make score prediction for (user_id=%d, item_id=%d)" % (user_idx, item_idx))
user_pred = self.alpha + self.beta_u[user_idx] + self.beta_i[item_idx] + self.gamma_i[item_idx, :].dot(
self.gamma_u[user_idx, :])
return user_pred
| true | true |
f726847a942ebeed78eb3cd894dacd72c3644eb4 | 101 | py | Python | main.py | marcacohen/mynewsfeed.io | b1debfab02c5dd6d618acd798792c906a4bb4c47 | [
"Apache-2.0"
] | 1 | 2020-11-10T16:07:24.000Z | 2020-11-10T16:07:24.000Z | main.py | marcacohen/mynewsfeed.io | b1debfab02c5dd6d618acd798792c906a4bb4c47 | [
"Apache-2.0"
] | null | null | null | main.py | marcacohen/mynewsfeed.io | b1debfab02c5dd6d618acd798792c906a4bb4c47 | [
"Apache-2.0"
] | null | null | null | from web import app
app.run(host='0.0.0.0',port=8080, use_reloader=True, debug=True, threaded=True)
| 25.25 | 79 | 0.742574 | from web import app
app.run(host='0.0.0.0',port=8080, use_reloader=True, debug=True, threaded=True)
| true | true |
f726861fe715da3c897ba6b113c2784dd07089b9 | 572 | py | Python | planutils/settings.py | AI-Planning/planning-utils | 2ab7015cfe52505c7972ed9e7066bdd3c769153f | [
"MIT"
] | 1 | 2020-04-18T15:30:58.000Z | 2020-04-18T15:30:58.000Z | planutils/settings.py | AI-Planning/planning-utils | 2ab7015cfe52505c7972ed9e7066bdd3c769153f | [
"MIT"
] | 3 | 2020-04-21T17:09:06.000Z | 2020-04-28T15:50:07.000Z | planutils/settings.py | AI-Planning/planning-utils | 2ab7015cfe52505c7972ed9e7066bdd3c769153f | [
"MIT"
] | null | null | null |
import json, os
from planutils import manifest_converter
# This should eventually be changed once the prefix is customizable
PLANUTILS_PREFIX = os.path.join(os.path.expanduser('~'), '.planutils')
SETTINGS_FILE = os.path.join(PLANUTILS_PREFIX, 'settings.json')
PAAS_SERVER = 'http://45.113.232.43:5001'
PAAS_SERVER_LIMIT = 100
def load():
with open(SETTINGS_FILE, 'r') as f:
settings = json.loads(f.read())
return settings
def save(s):
with open(SETTINGS_FILE, 'w') as f:
f.write(json.dumps(s))
manifest_converter.generate_manifest()
| 24.869565 | 70 | 0.713287 |
import json, os
from planutils import manifest_converter
PLANUTILS_PREFIX = os.path.join(os.path.expanduser('~'), '.planutils')
SETTINGS_FILE = os.path.join(PLANUTILS_PREFIX, 'settings.json')
PAAS_SERVER = 'http://45.113.232.43:5001'
PAAS_SERVER_LIMIT = 100
def load():
with open(SETTINGS_FILE, 'r') as f:
settings = json.loads(f.read())
return settings
def save(s):
with open(SETTINGS_FILE, 'w') as f:
f.write(json.dumps(s))
manifest_converter.generate_manifest()
| true | true |
f726862b7ddae2271ecd69d2a01c433b3d758f10 | 2,349 | py | Python | tools/mo/unit_tests/mo/load/loader_test.py | ryanloney/openvino-1 | 4e0a740eb3ee31062ba0df88fcf438564f67edb7 | [
"Apache-2.0"
] | 1,127 | 2018-10-15T14:36:58.000Z | 2020-04-20T09:29:44.000Z | tools/mo/unit_tests/mo/load/loader_test.py | ryanloney/openvino-1 | 4e0a740eb3ee31062ba0df88fcf438564f67edb7 | [
"Apache-2.0"
] | 439 | 2018-10-20T04:40:35.000Z | 2020-04-19T05:56:25.000Z | tools/mo/unit_tests/mo/load/loader_test.py | ryanloney/openvino-1 | 4e0a740eb3ee31062ba0df88fcf438564f67edb7 | [
"Apache-2.0"
] | 414 | 2018-10-17T05:53:46.000Z | 2020-04-16T17:29:53.000Z | # Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import unittest
import numpy as np
from openvino.tools.mo.load.tf.loader import graph_or_sub_graph_has_nhwc_ops
from unit_tests.utils.graph import build_graph, result, regular_op, const, connect_front
class TFLoaderTest(unittest.TestCase):
@staticmethod
def build_conv_graph():
nodes = {
**const('weights', np.random.randn(1, 1, 1, 1)),
**regular_op('input', {'op': 'Parameter'}),
**regular_op('conv', {'op': 'Conv2D', 'layout': 'NHWC'}),
**result('result'),
}
edges = [*connect_front('input', '0:conv'),
*connect_front('weights', '1:conv'),
*connect_front('conv:0', 'result'),
]
graph = build_graph(nodes, edges)
graph.stage = 'front'
return graph
@staticmethod
def build_parameter_result_graph():
nodes = {
**regular_op('input', {'op': 'Parameter'}),
**result('result'),
}
edges = [*connect_front('input', '0:result'),
]
graph = build_graph(nodes, edges)
graph.stage = 'front'
return graph
@staticmethod
def build_loop_graph(body_graph):
# create fake Loop operation
nodes = {
**regular_op('input', {'op': 'Parameter'}),
**regular_op('loop', {'op': 'Loop', 'body': body_graph, 'sub_graphs': ['body']}),
**result('result'),
}
edges = [*connect_front('input', '0:loop'),
*connect_front('loop:0', 'result'),
]
graph = build_graph(nodes, edges)
graph.stage = 'front'
return graph
def test_convolution_main_graph(self):
self.assertTrue(graph_or_sub_graph_has_nhwc_ops(self.build_conv_graph()))
def test_convolution_loop_body_graph(self):
self.assertTrue(graph_or_sub_graph_has_nhwc_ops(self.build_loop_graph(self.build_conv_graph())))
def test_no_convolution_main_graph(self):
self.assertFalse(graph_or_sub_graph_has_nhwc_ops(self.build_parameter_result_graph()))
def test_no_convolution_main_and_sub_graph(self):
self.assertFalse(graph_or_sub_graph_has_nhwc_ops(self.build_loop_graph(self.build_parameter_result_graph())))
| 34.544118 | 117 | 0.613027 |
import unittest
import numpy as np
from openvino.tools.mo.load.tf.loader import graph_or_sub_graph_has_nhwc_ops
from unit_tests.utils.graph import build_graph, result, regular_op, const, connect_front
class TFLoaderTest(unittest.TestCase):
@staticmethod
def build_conv_graph():
nodes = {
**const('weights', np.random.randn(1, 1, 1, 1)),
**regular_op('input', {'op': 'Parameter'}),
**regular_op('conv', {'op': 'Conv2D', 'layout': 'NHWC'}),
**result('result'),
}
edges = [*connect_front('input', '0:conv'),
*connect_front('weights', '1:conv'),
*connect_front('conv:0', 'result'),
]
graph = build_graph(nodes, edges)
graph.stage = 'front'
return graph
@staticmethod
def build_parameter_result_graph():
nodes = {
**regular_op('input', {'op': 'Parameter'}),
**result('result'),
}
edges = [*connect_front('input', '0:result'),
]
graph = build_graph(nodes, edges)
graph.stage = 'front'
return graph
@staticmethod
def build_loop_graph(body_graph):
nodes = {
**regular_op('input', {'op': 'Parameter'}),
**regular_op('loop', {'op': 'Loop', 'body': body_graph, 'sub_graphs': ['body']}),
**result('result'),
}
edges = [*connect_front('input', '0:loop'),
*connect_front('loop:0', 'result'),
]
graph = build_graph(nodes, edges)
graph.stage = 'front'
return graph
def test_convolution_main_graph(self):
self.assertTrue(graph_or_sub_graph_has_nhwc_ops(self.build_conv_graph()))
def test_convolution_loop_body_graph(self):
self.assertTrue(graph_or_sub_graph_has_nhwc_ops(self.build_loop_graph(self.build_conv_graph())))
def test_no_convolution_main_graph(self):
self.assertFalse(graph_or_sub_graph_has_nhwc_ops(self.build_parameter_result_graph()))
def test_no_convolution_main_and_sub_graph(self):
self.assertFalse(graph_or_sub_graph_has_nhwc_ops(self.build_loop_graph(self.build_parameter_result_graph())))
| true | true |
f726869b16a3c31ae1abced42c8498b5f9a71273 | 390 | py | Python | shorty/wsgi.py | alazaro/shorty | 2a75c2f1351b3ada20c551159f8f22b04284ead1 | [
"MIT"
] | null | null | null | shorty/wsgi.py | alazaro/shorty | 2a75c2f1351b3ada20c551159f8f22b04284ead1 | [
"MIT"
] | 1 | 2021-06-10T19:02:19.000Z | 2021-06-10T19:02:19.000Z | shorty/wsgi.py | alazaro/shorty | 2a75c2f1351b3ada20c551159f8f22b04284ead1 | [
"MIT"
] | null | null | null | """
WSGI config for shorty project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "shorty.settings")
application = get_wsgi_application()
| 22.941176 | 78 | 0.784615 |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "shorty.settings")
application = get_wsgi_application()
| true | true |
f72686bfb1c06e1c972ddd0550910b6f66064e90 | 1,361 | py | Python | other/password_generator.py | Pratiyush27/Python | be48a876c7746611099974e572ea82691a7cbb20 | [
"MIT"
] | 12 | 2020-02-11T22:18:10.000Z | 2021-06-23T02:56:07.000Z | other/password_generator.py | Pratiyush27/Python | be48a876c7746611099974e572ea82691a7cbb20 | [
"MIT"
] | 1 | 2019-09-26T08:03:36.000Z | 2019-09-26T08:03:36.000Z | other/password_generator.py | Pratiyush27/Python | be48a876c7746611099974e572ea82691a7cbb20 | [
"MIT"
] | 18 | 2020-02-09T13:00:11.000Z | 2021-03-11T08:47:36.000Z | """Password generator allows you to generate a random password of length N."""
from random import choice
from string import ascii_letters, digits, punctuation
def password_generator(length=8):
"""
>>> len(password_generator())
8
>>> len(password_generator(length=16))
16
>>> len(password_generator(257))
257
>>> len(password_generator(length=0))
0
>>> len(password_generator(-1))
0
"""
chars = tuple(ascii_letters) + tuple(digits) + tuple(punctuation)
return "".join(choice(chars) for x in range(length))
# ALTERNATIVE METHODS
# ctbi= characters that must be in password
# i= how many letters or characters the password length will be
def alternative_password_generator(ctbi, i):
# Password generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
pass # Put your code here...
def random_number(ctbi, i):
pass # Put your code here...
def random_letters(ctbi, i):
pass # Put your code here...
def random_characters(ctbi, i):
pass # Put your code here...
def main():
length = int(input("Please indicate the max length of your password: ").strip())
print("Password generated:", password_generator(length))
print("[If you are thinking of using this passsword, You better save it.]")
if __name__ == "__main__":
main()
| 26.173077 | 84 | 0.68626 | from random import choice
from string import ascii_letters, digits, punctuation
def password_generator(length=8):
chars = tuple(ascii_letters) + tuple(digits) + tuple(punctuation)
return "".join(choice(chars) for x in range(length))
def alternative_password_generator(ctbi, i):
pass
def random_number(ctbi, i):
pass
def random_letters(ctbi, i):
pass
def random_characters(ctbi, i):
pass
def main():
length = int(input("Please indicate the max length of your password: ").strip())
print("Password generated:", password_generator(length))
print("[If you are thinking of using this passsword, You better save it.]")
if __name__ == "__main__":
main()
| true | true |
f726874106f79a0d94063c9a43eae7fc697a4f84 | 2,283 | py | Python | mindware/components/feature_engineering/transformations/rescaler/quantile_transformer.py | jhj0411jhj/soln-ml | 002ec06bf139b14bc059e0f0438501b31d9ed16a | [
"MIT"
] | 27 | 2021-07-19T09:03:34.000Z | 2022-03-31T06:19:23.000Z | mindware/components/feature_engineering/transformations/rescaler/quantile_transformer.py | jhj0411jhj/soln-ml | 002ec06bf139b14bc059e0f0438501b31d9ed16a | [
"MIT"
] | 4 | 2021-07-15T12:17:10.000Z | 2022-01-26T17:16:58.000Z | mindware/components/feature_engineering/transformations/rescaler/quantile_transformer.py | jhj0411jhj/soln-ml | 002ec06bf139b14bc059e0f0438501b31d9ed16a | [
"MIT"
] | 17 | 2020-05-12T20:24:50.000Z | 2021-07-11T03:31:38.000Z | from ConfigSpace.configuration_space import ConfigurationSpace
from ConfigSpace.hyperparameters import UniformIntegerHyperparameter, \
CategoricalHyperparameter
from mindware.components.feature_engineering.transformations.base_transformer import *
class QuantileTransformation(Transformer):
type = 5
def __init__(self, n_quantiles=1000, output_distribution='uniform', random_state=1):
super().__init__("quantile_transformer")
self.input_type = [NUMERICAL, DISCRETE]
self.compound_mode = 'in_place'
self.output_type = NUMERICAL
self.output_distribution = output_distribution
self.n_quantiles = n_quantiles
self.random_state = random_state
@ease_trans
def operate(self, input_datanode, target_fields=None):
from mindware.components.feature_engineering.transformations.utils import QuantileTransformer
X, y = input_datanode.data
X_new = X[:, target_fields]
if not self.model:
self.model = QuantileTransformer(output_distribution=self.output_distribution,
n_quantiles=self.n_quantiles, copy=False,
random_state=self.random_state)
self.model.fit(X_new)
_X = self.model.transform(X_new)
return _X
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None, optimizer='smac'):
if optimizer == 'smac':
cs = ConfigurationSpace()
# TODO parametrize like the Random Forest as n_quantiles = n_features^param
n_quantiles = UniformIntegerHyperparameter(
'n_quantiles', lower=10, upper=2000, default_value=1000
)
output_distribution = CategoricalHyperparameter(
'output_distribution', ['uniform', 'normal'], default_value="uniform"
)
cs.add_hyperparameters([n_quantiles, output_distribution])
return cs
elif optimizer == 'tpe':
from hyperopt import hp
space = {'n_quantiles': hp.randint('quantile_n_quantiles', 1990) + 10,
'output_distribution': hp.choice('quantile_output_distribution', ['uniform', 'normal'])}
return space
| 40.767857 | 109 | 0.65922 | from ConfigSpace.configuration_space import ConfigurationSpace
from ConfigSpace.hyperparameters import UniformIntegerHyperparameter, \
CategoricalHyperparameter
from mindware.components.feature_engineering.transformations.base_transformer import *
class QuantileTransformation(Transformer):
type = 5
def __init__(self, n_quantiles=1000, output_distribution='uniform', random_state=1):
super().__init__("quantile_transformer")
self.input_type = [NUMERICAL, DISCRETE]
self.compound_mode = 'in_place'
self.output_type = NUMERICAL
self.output_distribution = output_distribution
self.n_quantiles = n_quantiles
self.random_state = random_state
@ease_trans
def operate(self, input_datanode, target_fields=None):
from mindware.components.feature_engineering.transformations.utils import QuantileTransformer
X, y = input_datanode.data
X_new = X[:, target_fields]
if not self.model:
self.model = QuantileTransformer(output_distribution=self.output_distribution,
n_quantiles=self.n_quantiles, copy=False,
random_state=self.random_state)
self.model.fit(X_new)
_X = self.model.transform(X_new)
return _X
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None, optimizer='smac'):
if optimizer == 'smac':
cs = ConfigurationSpace()
n_quantiles = UniformIntegerHyperparameter(
'n_quantiles', lower=10, upper=2000, default_value=1000
)
output_distribution = CategoricalHyperparameter(
'output_distribution', ['uniform', 'normal'], default_value="uniform"
)
cs.add_hyperparameters([n_quantiles, output_distribution])
return cs
elif optimizer == 'tpe':
from hyperopt import hp
space = {'n_quantiles': hp.randint('quantile_n_quantiles', 1990) + 10,
'output_distribution': hp.choice('quantile_output_distribution', ['uniform', 'normal'])}
return space
| true | true |
f726877422782de259695a0ce16eb5bc2f697d80 | 360 | py | Python | resource_path.py | UAlbanyArchives/EADMachine-2.0 | 18e155f76374b295c287d32c6e54ef8ecabe29d2 | [
"MIT"
] | 5 | 2016-01-25T15:27:12.000Z | 2021-08-17T22:31:48.000Z | source/GUI/resource_path.py | gwiedeman/eadmachine | f6c0c0f92fc20ab6dcf4962fda827b7adb4749d4 | [
"Unlicense"
] | null | null | null | source/GUI/resource_path.py | gwiedeman/eadmachine | f6c0c0f92fc20ab6dcf4962fda827b7adb4749d4 | [
"Unlicense"
] | null | null | null | import os
import sys
def resource_path(relative_path):
""" Get absolute path to resource, works for dev and for PyInstaller """
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path) | 30 | 76 | 0.691667 | import os
import sys
def resource_path(relative_path):
try:
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path) | true | true |
f72687e80bced94fac9dfc6fc885ba3a3a16f55e | 1,067 | py | Python | plugins/threatstack/icon_threatstack/actions/get_rule/action.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 46 | 2019-06-05T20:47:58.000Z | 2022-03-29T10:18:01.000Z | plugins/threatstack/icon_threatstack/actions/get_rule/action.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 386 | 2019-06-07T20:20:39.000Z | 2022-03-30T17:35:01.000Z | plugins/threatstack/icon_threatstack/actions/get_rule/action.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 43 | 2019-07-09T14:13:58.000Z | 2022-03-28T12:04:46.000Z | import insightconnect_plugin_runtime
from .schema import GetRuleInput, GetRuleOutput, Input, Output, Component
# Custom imports below
from insightconnect_plugin_runtime.helper import clean
from threatstack.errors import ThreatStackAPIError, ThreatStackClientError, APIRateLimitError
from insightconnect_plugin_runtime.exceptions import PluginException
class GetRule(insightconnect_plugin_runtime.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="get_rule",
description=Component.DESCRIPTION,
input=GetRuleInput(),
output=GetRuleOutput(),
)
def run(self, params={}):
rule_id, ruleset_id = params.get(Input.RULE_ID), params.get(Input.RULESET_ID)
try:
rule = clean(self.connection.client.rulesets.rules(ruleset_id=ruleset_id, rule_id=rule_id))
except (ThreatStackAPIError, ThreatStackClientError, APIRateLimitError) as e:
raise PluginException(cause="An error occurred!", assistance=e)
return {Output.RULE: rule}
| 38.107143 | 103 | 0.730084 | import insightconnect_plugin_runtime
from .schema import GetRuleInput, GetRuleOutput, Input, Output, Component
from insightconnect_plugin_runtime.helper import clean
from threatstack.errors import ThreatStackAPIError, ThreatStackClientError, APIRateLimitError
from insightconnect_plugin_runtime.exceptions import PluginException
class GetRule(insightconnect_plugin_runtime.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="get_rule",
description=Component.DESCRIPTION,
input=GetRuleInput(),
output=GetRuleOutput(),
)
def run(self, params={}):
rule_id, ruleset_id = params.get(Input.RULE_ID), params.get(Input.RULESET_ID)
try:
rule = clean(self.connection.client.rulesets.rules(ruleset_id=ruleset_id, rule_id=rule_id))
except (ThreatStackAPIError, ThreatStackClientError, APIRateLimitError) as e:
raise PluginException(cause="An error occurred!", assistance=e)
return {Output.RULE: rule}
| true | true |
f72688f4329669467a3225780b13734675bc50e3 | 383 | py | Python | geonames/exceptions.py | flyingdice/geonames-sqlite | acf51d9af723d46815c43509ce22712ce910a61e | [
"Apache-2.0"
] | null | null | null | geonames/exceptions.py | flyingdice/geonames-sqlite | acf51d9af723d46815c43509ce22712ce910a61e | [
"Apache-2.0"
] | null | null | null | geonames/exceptions.py | flyingdice/geonames-sqlite | acf51d9af723d46815c43509ce22712ce910a61e | [
"Apache-2.0"
] | null | null | null | """
geonames/exceptions
~~~~~~~~~~~~~~~~~~~
"""
from . import base
def ignore_foreign_key_constraint(db, options, record: base.T, exception: Exception) -> bool:
return 'FOREIGN KEY constraint failed' in str(exception)
def ignore_unique_key_constraint(db, options, record: base.T, exception: Exception) -> bool:
return 'UNIQUE constraint failed' in str(exception)
| 27.357143 | 93 | 0.697128 | from . import base
def ignore_foreign_key_constraint(db, options, record: base.T, exception: Exception) -> bool:
return 'FOREIGN KEY constraint failed' in str(exception)
def ignore_unique_key_constraint(db, options, record: base.T, exception: Exception) -> bool:
return 'UNIQUE constraint failed' in str(exception)
| true | true |
f7268900a2f97b94ecc6e2a71419685ab31bc7d7 | 131,714 | py | Python | rpython/rlib/parsing/pypackrat.py | olliemath/pypy | 8b873bd0b8bf76075aba3d915c260789f26f5788 | [
"Apache-2.0",
"OpenSSL"
] | null | null | null | rpython/rlib/parsing/pypackrat.py | olliemath/pypy | 8b873bd0b8bf76075aba3d915c260789f26f5788 | [
"Apache-2.0",
"OpenSSL"
] | null | null | null | rpython/rlib/parsing/pypackrat.py | olliemath/pypy | 8b873bd0b8bf76075aba3d915c260789f26f5788 | [
"Apache-2.0",
"OpenSSL"
] | null | null | null |
from rpython.rlib.parsing.tree import Nonterminal, Symbol
from rpython.rlib.parsing.makepackrat import PackratParser, BacktrackException, Status
class Parser(object):
def NAME(self):
return self._NAME().result
def _NAME(self):
_key = self._pos
_status = self._dict_NAME.get(_key, None)
if _status is None:
_status = self._dict_NAME[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
_startingpos = self._pos
try:
_result = None
_error = None
_result = self._regex1074651696()
assert _status.status != _status.LEFTRECURSION
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def SPACE(self):
return self._SPACE().result
def _SPACE(self):
_key = self._pos
_status = self._dict_SPACE.get(_key, None)
if _status is None:
_status = self._dict_SPACE[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
_startingpos = self._pos
try:
_result = None
_error = None
_result = self.__chars__(' ')
assert _status.status != _status.LEFTRECURSION
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def COMMENT(self):
return self._COMMENT().result
def _COMMENT(self):
_key = self._pos
_status = self._dict_COMMENT.get(_key, None)
if _status is None:
_status = self._dict_COMMENT[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
_startingpos = self._pos
try:
_result = None
_error = None
_result = self._regex528667127()
assert _status.status != _status.LEFTRECURSION
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def IGNORE(self):
return self._IGNORE().result
def _IGNORE(self):
_key = self._pos
_status = self._dict_IGNORE.get(_key, None)
if _status is None:
_status = self._dict_IGNORE[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
_startingpos = self._pos
try:
_result = None
_error = None
_result = self._regex1979538501()
assert _status.status != _status.LEFTRECURSION
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def newline(self):
return self._newline().result
def _newline(self):
_key = self._pos
_status = self._dict_newline.get(_key, None)
if _status is None:
_status = self._dict_newline[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_call_status = self._COMMENT()
_result = _call_status.result
_error = _call_status.error
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice1 = self._pos
try:
_result = self._regex299149370()
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
raise BacktrackException(_error)
_result = self._regex299149370()
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._newline()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def REGEX(self):
return self._REGEX().result
def _REGEX(self):
_key = self._pos
_status = self._dict_REGEX.get(_key, None)
if _status is None:
_status = self._dict_REGEX[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
_startingpos = self._pos
try:
_result = None
_error = None
_result = self._regex1006631623()
r = _result
_result = (Symbol('REGEX', r, None))
assert _status.status != _status.LEFTRECURSION
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def QUOTE(self):
return self._QUOTE().result
def _QUOTE(self):
_key = self._pos
_status = self._dict_QUOTE.get(_key, None)
if _status is None:
_status = self._dict_QUOTE[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
_startingpos = self._pos
try:
_result = None
_error = None
_result = self._regex1124192327()
r = _result
_result = (Symbol('QUOTE', r, None))
assert _status.status != _status.LEFTRECURSION
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def PYTHONCODE(self):
return self._PYTHONCODE().result
def _PYTHONCODE(self):
_key = self._pos
_status = self._dict_PYTHONCODE.get(_key, None)
if _status is None:
_status = self._dict_PYTHONCODE[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
_startingpos = self._pos
try:
_result = None
_error = None
_result = self._regex291086639()
r = _result
_result = (Symbol('PYTHONCODE', r, None))
assert _status.status != _status.LEFTRECURSION
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def EOF(self):
return self._EOF().result
def _EOF(self):
_key = self._pos
_status = self._dict_EOF.get(_key, None)
if _status is None:
_status = self._dict_EOF[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_choice0 = self._pos
_stored_result1 = _result
try:
_result = self.__any__()
except BacktrackException:
self._pos = _choice0
_result = _stored_result1
else:
raise BacktrackException(None)
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._EOF()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def file(self):
return self._file().result
def _file(self):
_key = self._pos
_status = self._dict_file.get(_key, None)
if _status is None:
_status = self._dict_file[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_all0 = []
while 1:
_choice1 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = _call_status.error
_all0.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
_result = _all0
_call_status = self._list()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_before_discard2 = _result
_call_status = self._EOF()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_result = _before_discard2
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._file()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def list(self):
return self._list().result
def _list(self):
_key = self._pos
_status = self._dict_list.get(_key, None)
if _status is None:
_status = self._dict_list[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_all0 = []
_call_status = self._production()
_result = _call_status.result
_error = _call_status.error
_all0.append(_result)
while 1:
_choice1 = self._pos
try:
_call_status = self._production()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all0.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
_result = _all0
content = _result
_result = (Nonterminal('list', content))
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._list()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def production(self):
return self._production().result
def _production(self):
_key = self._pos
_status = self._dict_production.get(_key, None)
if _status is None:
_status = self._dict_production[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_call_status = self._NAME()
_result = _call_status.result
_error = _call_status.error
name = _result
_all0 = []
while 1:
_choice1 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all0.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
_result = _all0
_call_status = self._productionargs()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
args = _result
_result = self.__chars__(':')
_all2 = []
while 1:
_choice3 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all2.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice3
break
_result = _all2
_call_status = self._or_()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
what = _result
_all4 = []
while 1:
_choice5 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all4.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
break
_result = _all4
_result = self.__chars__(';')
_all6 = []
while 1:
_choice7 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all6.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice7
break
_result = _all6
_result = (Nonterminal('production', [name, args, what]))
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._production()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def productionargs(self):
return self._productionargs().result
def _productionargs(self):
_key = self._pos
_status = self._dict_productionargs.get(_key, None)
if _status is None:
_status = self._dict_productionargs[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_result = self.__chars__('(')
_all1 = []
while 1:
_choice2 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = _call_status.error
_all1.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice2
break
_result = _all1
_all3 = []
while 1:
_choice4 = self._pos
try:
_call_status = self._NAME()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_before_discard5 = _result
_all6 = []
while 1:
_choice7 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all6.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice7
break
_result = _all6
_result = self.__chars__(',')
_all8 = []
while 1:
_choice9 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all8.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice9
break
_result = _all8
_result = _before_discard5
_all3.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
_result = _all3
args = _result
_call_status = self._NAME()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
arg = _result
_all10 = []
while 1:
_choice11 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all10.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice11
break
_result = _all10
_result = self.__chars__(')')
_all12 = []
while 1:
_choice13 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all12.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice13
break
_result = _all12
_result = (Nonterminal('productionargs', args + [arg]))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice14 = self._pos
try:
_result = (Nonterminal('productionargs', []))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice14
raise BacktrackException(_error)
_result = (Nonterminal('productionargs', []))
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._productionargs()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def or_(self):
return self._or_().result
def _or_(self):
_key = self._pos
_status = self._dict_or_.get(_key, None)
if _status is None:
_status = self._dict_or_[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_all1 = []
_call_status = self._commands()
_result = _call_status.result
_error = _call_status.error
_before_discard2 = _result
_result = self.__chars__('|')
_all3 = []
while 1:
_choice4 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all3.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
_result = _all3
_result = _before_discard2
_all1.append(_result)
while 1:
_choice5 = self._pos
try:
_call_status = self._commands()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_before_discard6 = _result
_result = self.__chars__('|')
_all7 = []
while 1:
_choice8 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all7.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice8
break
_result = _all7
_result = _before_discard6
_all1.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
break
_result = _all1
l = _result
_call_status = self._commands()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
last = _result
_result = (Nonterminal('or', l + [last]))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice9 = self._pos
try:
_call_status = self._commands()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice9
raise BacktrackException(_error)
_call_status = self._commands()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._or_()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def commands(self):
return self._commands().result
def _commands(self):
_key = self._pos
_status = self._dict_commands.get(_key, None)
if _status is None:
_status = self._dict_commands[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_call_status = self._command()
_result = _call_status.result
_error = _call_status.error
cmd = _result
_call_status = self._newline()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all1 = []
_call_status = self._command()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_before_discard2 = _result
_call_status = self._newline()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_result = _before_discard2
_all1.append(_result)
while 1:
_choice3 = self._pos
try:
_call_status = self._command()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_before_discard4 = _result
_call_status = self._newline()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_result = _before_discard4
_all1.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice3
break
_result = _all1
cmds = _result
_result = (Nonterminal('commands', [cmd] + cmds))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice5 = self._pos
try:
_call_status = self._command()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
raise BacktrackException(_error)
_call_status = self._command()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._commands()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def command(self):
return self._command().result
def _command(self):
_key = self._pos
_status = self._dict_command.get(_key, None)
if _status is None:
_status = self._dict_command[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_call_status = self._simplecommand()
_result = _call_status.result
_error = _call_status.error
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._command()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def simplecommand(self):
return self._simplecommand().result
def _simplecommand(self):
_key = self._pos
_status = self._dict_simplecommand.get(_key, None)
if _status is None:
_status = self._dict_simplecommand[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_call_status = self._return_()
_result = _call_status.result
_error = _call_status.error
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice1 = self._pos
try:
_call_status = self._if_()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
_choice2 = self._pos
try:
_call_status = self._named_command()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice2
_choice3 = self._pos
try:
_call_status = self._repetition()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice3
_choice4 = self._pos
try:
_call_status = self._choose()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
_choice5 = self._pos
try:
_call_status = self._negation()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
raise BacktrackException(_error)
_call_status = self._negation()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._simplecommand()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def return_(self):
return self._return_().result
def _return_(self):
_key = self._pos
_status = self._dict_return_.get(_key, None)
if _status is None:
_status = self._dict_return_[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_result = self.__chars__('return')
_all0 = []
while 1:
_choice1 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = _call_status.error
_all0.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
_result = _all0
_call_status = self._PYTHONCODE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
code = _result
_all2 = []
while 1:
_choice3 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all2.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice3
break
_result = _all2
_result = (Nonterminal('return', [code]))
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._return_()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def if_(self):
return self._if_().result
def _if_(self):
_key = self._pos
_status = self._dict_if_.get(_key, None)
if _status is None:
_status = self._dict_if_[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_result = self.__chars__('do')
_call_status = self._newline()
_result = _call_status.result
_error = _call_status.error
_call_status = self._command()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
cmd = _result
_all1 = []
while 1:
_choice2 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all1.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice2
break
_result = _all1
_result = self.__chars__('if')
_all3 = []
while 1:
_choice4 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all3.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
_result = _all3
_call_status = self._PYTHONCODE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
condition = _result
_all5 = []
while 1:
_choice6 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all5.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice6
break
_result = _all5
_result = (Nonterminal('if', [cmd, condition]))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice7 = self._pos
try:
_result = self.__chars__('if')
_all8 = []
while 1:
_choice9 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all8.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice9
break
_result = _all8
_call_status = self._PYTHONCODE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
condition = _result
_all10 = []
while 1:
_choice11 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all10.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice11
break
_result = _all10
_result = (Nonterminal('if', [condition]))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice7
raise BacktrackException(_error)
_result = self.__chars__('if')
_all12 = []
while 1:
_choice13 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all12.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice13
break
_result = _all12
_call_status = self._PYTHONCODE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
condition = _result
_all14 = []
while 1:
_choice15 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all14.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice15
break
_result = _all14
_result = (Nonterminal('if', [condition]))
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._if_()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def choose(self):
return self._choose().result
def _choose(self):
_key = self._pos
_status = self._dict_choose.get(_key, None)
if _status is None:
_status = self._dict_choose[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_result = self.__chars__('choose')
_all0 = []
while 1:
_choice1 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = _call_status.error
_all0.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
_result = _all0
_call_status = self._NAME()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
name = _result
_all2 = []
while 1:
_choice3 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all2.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice3
break
_result = _all2
_result = self.__chars__('in')
_all4 = []
while 1:
_choice5 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all4.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
break
_result = _all4
_call_status = self._PYTHONCODE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
expr = _result
_all6 = []
while 1:
_choice7 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all6.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice7
break
_result = _all6
_call_status = self._commands()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
cmds = _result
_result = (Nonterminal('choose', [name, expr, cmds]))
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._choose()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def commandchain(self):
return self._commandchain().result
def _commandchain(self):
_key = self._pos
_status = self._dict_commandchain.get(_key, None)
if _status is None:
_status = self._dict_commandchain[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_all0 = []
_call_status = self._simplecommand()
_result = _call_status.result
_error = _call_status.error
_all0.append(_result)
while 1:
_choice1 = self._pos
try:
_call_status = self._simplecommand()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all0.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
_result = _all0
result = _result
_result = (Nonterminal('commands', result))
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._commandchain()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def named_command(self):
return self._named_command().result
def _named_command(self):
_key = self._pos
_status = self._dict_named_command.get(_key, None)
if _status is None:
_status = self._dict_named_command[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_call_status = self._NAME()
_result = _call_status.result
_error = _call_status.error
name = _result
_all0 = []
while 1:
_choice1 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all0.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
_result = _all0
_result = self.__chars__('=')
_all2 = []
while 1:
_choice3 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all2.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice3
break
_result = _all2
_call_status = self._command()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
cmd = _result
_result = (Nonterminal('named_command', [name, cmd]))
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._named_command()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def repetition(self):
return self._repetition().result
def _repetition(self):
_key = self._pos
_status = self._dict_repetition.get(_key, None)
if _status is None:
_status = self._dict_repetition[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_call_status = self._enclosed()
_result = _call_status.result
_error = _call_status.error
what = _result
_all1 = []
while 1:
_choice2 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all1.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice2
break
_result = _all1
_result = self.__chars__('?')
_all3 = []
while 1:
_choice4 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all3.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
_result = _all3
_result = (Nonterminal('maybe', [what]))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice5 = self._pos
try:
_call_status = self._enclosed()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
what = _result
_all6 = []
while 1:
_choice7 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all6.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice7
break
_result = _all6
while 1:
_choice8 = self._pos
try:
_result = self.__chars__('*')
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice8
_choice9 = self._pos
try:
_result = self.__chars__('+')
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice9
raise BacktrackException(_error)
_result = self.__chars__('+')
break
repetition = _result
_all10 = []
while 1:
_choice11 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all10.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice11
break
_result = _all10
_result = (Nonterminal('repetition', [repetition, what]))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
raise BacktrackException(_error)
_call_status = self._enclosed()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
what = _result
_all12 = []
while 1:
_choice13 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all12.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice13
break
_result = _all12
while 1:
_choice14 = self._pos
try:
_result = self.__chars__('*')
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice14
_choice15 = self._pos
try:
_result = self.__chars__('+')
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice15
raise BacktrackException(_error)
_result = self.__chars__('+')
break
repetition = _result
_all16 = []
while 1:
_choice17 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all16.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice17
break
_result = _all16
_result = (Nonterminal('repetition', [repetition, what]))
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._repetition()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def negation(self):
return self._negation().result
def _negation(self):
_key = self._pos
_status = self._dict_negation.get(_key, None)
if _status is None:
_status = self._dict_negation[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_result = self.__chars__('!')
_all1 = []
while 1:
_choice2 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = _call_status.error
_all1.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice2
break
_result = _all1
_call_status = self._negation()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
what = _result
_all3 = []
while 1:
_choice4 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all3.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
_result = _all3
_result = (Nonterminal('negation', [what]))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice5 = self._pos
try:
_call_status = self._enclosed()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
raise BacktrackException(_error)
_call_status = self._enclosed()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._negation()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def enclosed(self):
return self._enclosed().result
def _enclosed(self):
_key = self._pos
_status = self._dict_enclosed.get(_key, None)
if _status is None:
_status = self._dict_enclosed[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_result = self.__chars__('<')
_all1 = []
while 1:
_choice2 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = _call_status.error
_all1.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice2
break
_result = _all1
_call_status = self._primary()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
what = _result
_all3 = []
while 1:
_choice4 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all3.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
_result = _all3
_result = self.__chars__('>')
_all5 = []
while 1:
_choice6 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all5.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice6
break
_result = _all5
_result = (Nonterminal('exclusive', [what]))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice7 = self._pos
try:
_result = self.__chars__('[')
_all8 = []
while 1:
_choice9 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all8.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice9
break
_result = _all8
_call_status = self._or_()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
what = _result
_all10 = []
while 1:
_choice11 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all10.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice11
break
_result = _all10
_result = self.__chars__(']')
_all12 = []
while 1:
_choice13 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all12.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice13
break
_result = _all12
_result = (Nonterminal('ignore', [what]))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice7
_choice14 = self._pos
try:
_before_discard15 = _result
_result = self.__chars__('(')
_all16 = []
while 1:
_choice17 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all16.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice17
break
_result = _all16
_result = _before_discard15
_call_status = self._or_()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_before_discard18 = _result
_result = self.__chars__(')')
_all19 = []
while 1:
_choice20 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all19.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice20
break
_result = _all19
_result = _before_discard18
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice14
_choice21 = self._pos
try:
_call_status = self._primary()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice21
raise BacktrackException(_error)
_call_status = self._primary()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._enclosed()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def primary(self):
return self._primary().result
def _primary(self):
_key = self._pos
_status = self._dict_primary.get(_key, None)
if _status is None:
_status = self._dict_primary[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_call_status = self._call()
_result = _call_status.result
_error = _call_status.error
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice1 = self._pos
try:
_call_status = self._REGEX()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_before_discard2 = _result
_all3 = []
while 1:
_choice4 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all3.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
_result = _all3
_result = _before_discard2
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
_choice5 = self._pos
try:
_call_status = self._QUOTE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_before_discard6 = _result
_all7 = []
while 1:
_choice8 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all7.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice8
break
_result = _all7
_result = _before_discard6
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
raise BacktrackException(_error)
_call_status = self._QUOTE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_before_discard9 = _result
_all10 = []
while 1:
_choice11 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all10.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice11
break
_result = _all10
_result = _before_discard9
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._primary()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def call(self):
return self._call().result
def _call(self):
_key = self._pos
_status = self._dict_call.get(_key, None)
if _status is None:
_status = self._dict_call[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_call_status = self._NAME()
_result = _call_status.result
_error = _call_status.error
x = _result
_call_status = self._arguments()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
args = _result
_all0 = []
while 1:
_choice1 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all0.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
_result = _all0
_result = (Nonterminal("call", [x, args]))
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._call()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def arguments(self):
return self._arguments().result
def _arguments(self):
_key = self._pos
_status = self._dict_arguments.get(_key, None)
if _status is None:
_status = self._dict_arguments[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_result = self.__chars__('(')
_all1 = []
while 1:
_choice2 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = _call_status.error
_all1.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice2
break
_result = _all1
_all3 = []
while 1:
_choice4 = self._pos
try:
_call_status = self._PYTHONCODE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_before_discard5 = _result
_all6 = []
while 1:
_choice7 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all6.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice7
break
_result = _all6
_result = self.__chars__(',')
_all8 = []
while 1:
_choice9 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all8.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice9
break
_result = _all8
_result = _before_discard5
_all3.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
_result = _all3
args = _result
_call_status = self._PYTHONCODE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
last = _result
_result = self.__chars__(')')
_all10 = []
while 1:
_choice11 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all10.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice11
break
_result = _all10
_result = (Nonterminal("args", args + [last]))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice12 = self._pos
try:
_result = (Nonterminal("args", []))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice12
raise BacktrackException(_error)
_result = (Nonterminal("args", []))
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._arguments()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def __init__(self, inputstream):
self._dict_NAME = {}
self._dict_SPACE = {}
self._dict_COMMENT = {}
self._dict_IGNORE = {}
self._dict_newline = {}
self._dict_REGEX = {}
self._dict_QUOTE = {}
self._dict_PYTHONCODE = {}
self._dict_EOF = {}
self._dict_file = {}
self._dict_list = {}
self._dict_production = {}
self._dict_productionargs = {}
self._dict_or_ = {}
self._dict_commands = {}
self._dict_command = {}
self._dict_simplecommand = {}
self._dict_return_ = {}
self._dict_if_ = {}
self._dict_choose = {}
self._dict_commandchain = {}
self._dict_named_command = {}
self._dict_repetition = {}
self._dict_negation = {}
self._dict_enclosed = {}
self._dict_primary = {}
self._dict_call = {}
self._dict_arguments = {}
self._pos = 0
self._inputstream = inputstream
def _regex299149370(self):
_choice13 = self._pos
_runner = self._Runner(self._inputstream, self._pos)
_i = _runner.recognize_299149370(self._pos)
if _runner.last_matched_state == -1:
self._pos = _choice13
raise BacktrackException
_upto = _runner.last_matched_index + 1
_pos = self._pos
assert _pos >= 0
assert _upto >= 0
_result = self._inputstream[_pos: _upto]
self._pos = _upto
return _result
def _regex1006631623(self):
_choice14 = self._pos
_runner = self._Runner(self._inputstream, self._pos)
_i = _runner.recognize_1006631623(self._pos)
if _runner.last_matched_state == -1:
self._pos = _choice14
raise BacktrackException
_upto = _runner.last_matched_index + 1
_pos = self._pos
assert _pos >= 0
assert _upto >= 0
_result = self._inputstream[_pos: _upto]
self._pos = _upto
return _result
def _regex528667127(self):
_choice15 = self._pos
_runner = self._Runner(self._inputstream, self._pos)
_i = _runner.recognize_528667127(self._pos)
if _runner.last_matched_state == -1:
self._pos = _choice15
raise BacktrackException
_upto = _runner.last_matched_index + 1
_pos = self._pos
assert _pos >= 0
assert _upto >= 0
_result = self._inputstream[_pos: _upto]
self._pos = _upto
return _result
def _regex291086639(self):
_choice16 = self._pos
_runner = self._Runner(self._inputstream, self._pos)
_i = _runner.recognize_291086639(self._pos)
if _runner.last_matched_state == -1:
self._pos = _choice16
raise BacktrackException
_upto = _runner.last_matched_index + 1
_pos = self._pos
assert _pos >= 0
assert _upto >= 0
_result = self._inputstream[_pos: _upto]
self._pos = _upto
return _result
def _regex1074651696(self):
_choice17 = self._pos
_runner = self._Runner(self._inputstream, self._pos)
_i = _runner.recognize_1074651696(self._pos)
if _runner.last_matched_state == -1:
self._pos = _choice17
raise BacktrackException
_upto = _runner.last_matched_index + 1
_pos = self._pos
assert _pos >= 0
assert _upto >= 0
_result = self._inputstream[_pos: _upto]
self._pos = _upto
return _result
def _regex1124192327(self):
_choice18 = self._pos
_runner = self._Runner(self._inputstream, self._pos)
_i = _runner.recognize_1124192327(self._pos)
if _runner.last_matched_state == -1:
self._pos = _choice18
raise BacktrackException
_upto = _runner.last_matched_index + 1
_pos = self._pos
assert _pos >= 0
assert _upto >= 0
_result = self._inputstream[_pos: _upto]
self._pos = _upto
return _result
def _regex1979538501(self):
_choice19 = self._pos
_runner = self._Runner(self._inputstream, self._pos)
_i = _runner.recognize_1979538501(self._pos)
if _runner.last_matched_state == -1:
self._pos = _choice19
raise BacktrackException
_upto = _runner.last_matched_index + 1
_pos = self._pos
assert _pos >= 0
assert _upto >= 0
_result = self._inputstream[_pos: _upto]
self._pos = _upto
return _result
class _Runner(object):
def __init__(self, text, pos):
self.text = text
self.pos = pos
self.last_matched_state = -1
self.last_matched_index = -1
self.state = -1
def recognize_299149370(runner, i):
#auto-generated code, don't edit
assert i >= 0
input = runner.text
state = 0
while 1:
if state == 0:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 0
return i
if char == '\n':
state = 1
elif char == ' ':
state = 2
else:
break
if state == 1:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 1
return i
if char == '\n':
state = 1
continue
elif char == ' ':
state = 1
continue
else:
break
if state == 2:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 2
return ~i
if char == '\n':
state = 1
continue
elif char == ' ':
state = 2
continue
else:
break
runner.last_matched_state = state
runner.last_matched_index = i - 1
runner.state = state
if i == len(input):
return i
else:
return ~i
break
runner.state = state
return ~i
def recognize_1006631623(runner, i):
#auto-generated code, don't edit
assert i >= 0
input = runner.text
state = 0
while 1:
if state == 0:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 0
return ~i
if char == '`':
state = 3
else:
break
if state == 2:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 2
return ~i
if '\x00' <= char <= '\xff':
state = 3
else:
break
if state == 3:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 3
return ~i
if char == '`':
state = 1
elif char == '\\':
state = 2
continue
elif ']' <= char <= '_':
state = 3
continue
elif '\x00' <= char <= '[':
state = 3
continue
elif 'a' <= char <= '\xff':
state = 3
continue
else:
break
runner.last_matched_state = state
runner.last_matched_index = i - 1
runner.state = state
if i == len(input):
return i
else:
return ~i
break
runner.state = state
return ~i
def recognize_528667127(runner, i):
#auto-generated code, don't edit
assert i >= 0
input = runner.text
state = 0
while 1:
if state == 0:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 0
return ~i
if char == ' ':
state = 0
continue
elif char == '#':
state = 2
else:
break
if state == 1:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 1
return i
if char == ' ':
state = 0
continue
elif char == '#':
state = 2
else:
break
if state == 2:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 2
return ~i
if char == '\n':
state = 1
continue
elif '\x00' <= char <= '\t':
state = 2
continue
elif '\x0b' <= char <= '\xff':
state = 2
continue
else:
break
runner.last_matched_state = state
runner.last_matched_index = i - 1
runner.state = state
if i == len(input):
return i
else:
return ~i
break
runner.state = state
return ~i
def recognize_291086639(runner, i):
#auto-generated code, don't edit
assert i >= 0
input = runner.text
state = 0
while 1:
if state == 0:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 0
return ~i
if char == '{':
state = 2
else:
break
if state == 2:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 2
return ~i
if char == '}':
state = 1
elif '\x00' <= char <= '\t':
state = 2
continue
elif '\x0b' <= char <= '|':
state = 2
continue
elif '~' <= char <= '\xff':
state = 2
continue
else:
break
runner.last_matched_state = state
runner.last_matched_index = i - 1
runner.state = state
if i == len(input):
return i
else:
return ~i
break
runner.state = state
return ~i
def recognize_1074651696(runner, i):
#auto-generated code, don't edit
assert i >= 0
input = runner.text
state = 0
while 1:
if state == 0:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 0
return ~i
if char == '_':
state = 1
elif 'A' <= char <= 'Z':
state = 1
elif 'a' <= char <= 'z':
state = 1
else:
break
if state == 1:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 1
return i
if char == '_':
state = 1
continue
elif '0' <= char <= '9':
state = 1
continue
elif 'A' <= char <= 'Z':
state = 1
continue
elif 'a' <= char <= 'z':
state = 1
continue
else:
break
runner.last_matched_state = state
runner.last_matched_index = i - 1
runner.state = state
if i == len(input):
return i
else:
return ~i
break
runner.state = state
return ~i
def recognize_1124192327(runner, i):
#auto-generated code, don't edit
assert i >= 0
input = runner.text
state = 0
while 1:
if state == 0:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 0
return ~i
if char == "'":
state = 1
else:
break
if state == 1:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 1
return ~i
if '\x00' <= char <= '&':
state = 1
continue
elif '(' <= char <= '\xff':
state = 1
continue
elif char == "'":
state = 2
else:
break
runner.last_matched_state = state
runner.last_matched_index = i - 1
runner.state = state
if i == len(input):
return i
else:
return ~i
break
runner.state = state
return ~i
def recognize_1979538501(runner, i):
#auto-generated code, don't edit
assert i >= 0
input = runner.text
state = 0
while 1:
if state == 0:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 0
return ~i
if char == '#':
state = 1
elif char == ' ':
state = 2
elif char == '\t':
state = 2
elif char == '\n':
state = 2
else:
break
if state == 1:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 1
return ~i
if '\x00' <= char <= '\t':
state = 1
continue
elif '\x0b' <= char <= '\xff':
state = 1
continue
elif char == '\n':
state = 2
else:
break
runner.last_matched_state = state
runner.last_matched_index = i - 1
runner.state = state
if i == len(input):
return i
else:
return ~i
break
runner.state = state
return ~i
class PyPackratSyntaxParser(PackratParser):
def __init__(self, stream):
self.init_parser(stream)
forbidden = dict.fromkeys(("__weakref__ __doc__ "
"__dict__ __module__").split())
initthere = "__init__" in PyPackratSyntaxParser.__dict__
for key, value in Parser.__dict__.items():
if key not in PyPackratSyntaxParser.__dict__ and key not in forbidden:
setattr(PyPackratSyntaxParser, key, value)
PyPackratSyntaxParser.init_parser = Parser.__init__.im_func
| 42.014035 | 93 | 0.466374 |
from rpython.rlib.parsing.tree import Nonterminal, Symbol
from rpython.rlib.parsing.makepackrat import PackratParser, BacktrackException, Status
class Parser(object):
def NAME(self):
return self._NAME().result
def _NAME(self):
_key = self._pos
_status = self._dict_NAME.get(_key, None)
if _status is None:
_status = self._dict_NAME[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
_startingpos = self._pos
try:
_result = None
_error = None
_result = self._regex1074651696()
assert _status.status != _status.LEFTRECURSION
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def SPACE(self):
return self._SPACE().result
def _SPACE(self):
_key = self._pos
_status = self._dict_SPACE.get(_key, None)
if _status is None:
_status = self._dict_SPACE[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
_startingpos = self._pos
try:
_result = None
_error = None
_result = self.__chars__(' ')
assert _status.status != _status.LEFTRECURSION
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def COMMENT(self):
return self._COMMENT().result
def _COMMENT(self):
_key = self._pos
_status = self._dict_COMMENT.get(_key, None)
if _status is None:
_status = self._dict_COMMENT[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
_startingpos = self._pos
try:
_result = None
_error = None
_result = self._regex528667127()
assert _status.status != _status.LEFTRECURSION
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def IGNORE(self):
return self._IGNORE().result
def _IGNORE(self):
_key = self._pos
_status = self._dict_IGNORE.get(_key, None)
if _status is None:
_status = self._dict_IGNORE[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
_startingpos = self._pos
try:
_result = None
_error = None
_result = self._regex1979538501()
assert _status.status != _status.LEFTRECURSION
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def newline(self):
return self._newline().result
def _newline(self):
_key = self._pos
_status = self._dict_newline.get(_key, None)
if _status is None:
_status = self._dict_newline[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_call_status = self._COMMENT()
_result = _call_status.result
_error = _call_status.error
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice1 = self._pos
try:
_result = self._regex299149370()
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
raise BacktrackException(_error)
_result = self._regex299149370()
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._newline()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def REGEX(self):
return self._REGEX().result
def _REGEX(self):
_key = self._pos
_status = self._dict_REGEX.get(_key, None)
if _status is None:
_status = self._dict_REGEX[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
_startingpos = self._pos
try:
_result = None
_error = None
_result = self._regex1006631623()
r = _result
_result = (Symbol('REGEX', r, None))
assert _status.status != _status.LEFTRECURSION
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def QUOTE(self):
return self._QUOTE().result
def _QUOTE(self):
_key = self._pos
_status = self._dict_QUOTE.get(_key, None)
if _status is None:
_status = self._dict_QUOTE[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
_startingpos = self._pos
try:
_result = None
_error = None
_result = self._regex1124192327()
r = _result
_result = (Symbol('QUOTE', r, None))
assert _status.status != _status.LEFTRECURSION
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def PYTHONCODE(self):
return self._PYTHONCODE().result
def _PYTHONCODE(self):
_key = self._pos
_status = self._dict_PYTHONCODE.get(_key, None)
if _status is None:
_status = self._dict_PYTHONCODE[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
_startingpos = self._pos
try:
_result = None
_error = None
_result = self._regex291086639()
r = _result
_result = (Symbol('PYTHONCODE', r, None))
assert _status.status != _status.LEFTRECURSION
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def EOF(self):
return self._EOF().result
def _EOF(self):
_key = self._pos
_status = self._dict_EOF.get(_key, None)
if _status is None:
_status = self._dict_EOF[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_choice0 = self._pos
_stored_result1 = _result
try:
_result = self.__any__()
except BacktrackException:
self._pos = _choice0
_result = _stored_result1
else:
raise BacktrackException(None)
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._EOF()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = _exc.error
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def file(self):
return self._file().result
def _file(self):
_key = self._pos
_status = self._dict_file.get(_key, None)
if _status is None:
_status = self._dict_file[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_all0 = []
while 1:
_choice1 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = _call_status.error
_all0.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
_result = _all0
_call_status = self._list()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_before_discard2 = _result
_call_status = self._EOF()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_result = _before_discard2
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._file()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def list(self):
return self._list().result
def _list(self):
_key = self._pos
_status = self._dict_list.get(_key, None)
if _status is None:
_status = self._dict_list[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_all0 = []
_call_status = self._production()
_result = _call_status.result
_error = _call_status.error
_all0.append(_result)
while 1:
_choice1 = self._pos
try:
_call_status = self._production()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all0.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
_result = _all0
content = _result
_result = (Nonterminal('list', content))
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._list()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def production(self):
return self._production().result
def _production(self):
_key = self._pos
_status = self._dict_production.get(_key, None)
if _status is None:
_status = self._dict_production[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_call_status = self._NAME()
_result = _call_status.result
_error = _call_status.error
name = _result
_all0 = []
while 1:
_choice1 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all0.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
_result = _all0
_call_status = self._productionargs()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
args = _result
_result = self.__chars__(':')
_all2 = []
while 1:
_choice3 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all2.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice3
break
_result = _all2
_call_status = self._or_()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
what = _result
_all4 = []
while 1:
_choice5 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all4.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
break
_result = _all4
_result = self.__chars__(';')
_all6 = []
while 1:
_choice7 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all6.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice7
break
_result = _all6
_result = (Nonterminal('production', [name, args, what]))
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._production()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def productionargs(self):
return self._productionargs().result
def _productionargs(self):
_key = self._pos
_status = self._dict_productionargs.get(_key, None)
if _status is None:
_status = self._dict_productionargs[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_result = self.__chars__('(')
_all1 = []
while 1:
_choice2 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = _call_status.error
_all1.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice2
break
_result = _all1
_all3 = []
while 1:
_choice4 = self._pos
try:
_call_status = self._NAME()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_before_discard5 = _result
_all6 = []
while 1:
_choice7 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all6.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice7
break
_result = _all6
_result = self.__chars__(',')
_all8 = []
while 1:
_choice9 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all8.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice9
break
_result = _all8
_result = _before_discard5
_all3.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
_result = _all3
args = _result
_call_status = self._NAME()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
arg = _result
_all10 = []
while 1:
_choice11 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all10.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice11
break
_result = _all10
_result = self.__chars__(')')
_all12 = []
while 1:
_choice13 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all12.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice13
break
_result = _all12
_result = (Nonterminal('productionargs', args + [arg]))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice14 = self._pos
try:
_result = (Nonterminal('productionargs', []))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice14
raise BacktrackException(_error)
_result = (Nonterminal('productionargs', []))
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._productionargs()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def or_(self):
return self._or_().result
def _or_(self):
_key = self._pos
_status = self._dict_or_.get(_key, None)
if _status is None:
_status = self._dict_or_[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_all1 = []
_call_status = self._commands()
_result = _call_status.result
_error = _call_status.error
_before_discard2 = _result
_result = self.__chars__('|')
_all3 = []
while 1:
_choice4 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all3.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
_result = _all3
_result = _before_discard2
_all1.append(_result)
while 1:
_choice5 = self._pos
try:
_call_status = self._commands()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_before_discard6 = _result
_result = self.__chars__('|')
_all7 = []
while 1:
_choice8 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all7.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice8
break
_result = _all7
_result = _before_discard6
_all1.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
break
_result = _all1
l = _result
_call_status = self._commands()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
last = _result
_result = (Nonterminal('or', l + [last]))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice9 = self._pos
try:
_call_status = self._commands()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice9
raise BacktrackException(_error)
_call_status = self._commands()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._or_()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def commands(self):
return self._commands().result
def _commands(self):
_key = self._pos
_status = self._dict_commands.get(_key, None)
if _status is None:
_status = self._dict_commands[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_call_status = self._command()
_result = _call_status.result
_error = _call_status.error
cmd = _result
_call_status = self._newline()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all1 = []
_call_status = self._command()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_before_discard2 = _result
_call_status = self._newline()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_result = _before_discard2
_all1.append(_result)
while 1:
_choice3 = self._pos
try:
_call_status = self._command()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_before_discard4 = _result
_call_status = self._newline()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_result = _before_discard4
_all1.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice3
break
_result = _all1
cmds = _result
_result = (Nonterminal('commands', [cmd] + cmds))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice5 = self._pos
try:
_call_status = self._command()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
raise BacktrackException(_error)
_call_status = self._command()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._commands()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def command(self):
return self._command().result
def _command(self):
_key = self._pos
_status = self._dict_command.get(_key, None)
if _status is None:
_status = self._dict_command[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_call_status = self._simplecommand()
_result = _call_status.result
_error = _call_status.error
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._command()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def simplecommand(self):
return self._simplecommand().result
def _simplecommand(self):
_key = self._pos
_status = self._dict_simplecommand.get(_key, None)
if _status is None:
_status = self._dict_simplecommand[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_call_status = self._return_()
_result = _call_status.result
_error = _call_status.error
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice1 = self._pos
try:
_call_status = self._if_()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
_choice2 = self._pos
try:
_call_status = self._named_command()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice2
_choice3 = self._pos
try:
_call_status = self._repetition()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice3
_choice4 = self._pos
try:
_call_status = self._choose()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
_choice5 = self._pos
try:
_call_status = self._negation()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
raise BacktrackException(_error)
_call_status = self._negation()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._simplecommand()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def return_(self):
return self._return_().result
def _return_(self):
_key = self._pos
_status = self._dict_return_.get(_key, None)
if _status is None:
_status = self._dict_return_[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_result = self.__chars__('return')
_all0 = []
while 1:
_choice1 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = _call_status.error
_all0.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
_result = _all0
_call_status = self._PYTHONCODE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
code = _result
_all2 = []
while 1:
_choice3 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all2.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice3
break
_result = _all2
_result = (Nonterminal('return', [code]))
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._return_()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def if_(self):
return self._if_().result
def _if_(self):
_key = self._pos
_status = self._dict_if_.get(_key, None)
if _status is None:
_status = self._dict_if_[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_result = self.__chars__('do')
_call_status = self._newline()
_result = _call_status.result
_error = _call_status.error
_call_status = self._command()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
cmd = _result
_all1 = []
while 1:
_choice2 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all1.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice2
break
_result = _all1
_result = self.__chars__('if')
_all3 = []
while 1:
_choice4 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all3.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
_result = _all3
_call_status = self._PYTHONCODE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
condition = _result
_all5 = []
while 1:
_choice6 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all5.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice6
break
_result = _all5
_result = (Nonterminal('if', [cmd, condition]))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice7 = self._pos
try:
_result = self.__chars__('if')
_all8 = []
while 1:
_choice9 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all8.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice9
break
_result = _all8
_call_status = self._PYTHONCODE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
condition = _result
_all10 = []
while 1:
_choice11 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all10.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice11
break
_result = _all10
_result = (Nonterminal('if', [condition]))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice7
raise BacktrackException(_error)
_result = self.__chars__('if')
_all12 = []
while 1:
_choice13 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all12.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice13
break
_result = _all12
_call_status = self._PYTHONCODE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
condition = _result
_all14 = []
while 1:
_choice15 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all14.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice15
break
_result = _all14
_result = (Nonterminal('if', [condition]))
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._if_()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def choose(self):
return self._choose().result
def _choose(self):
_key = self._pos
_status = self._dict_choose.get(_key, None)
if _status is None:
_status = self._dict_choose[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_result = self.__chars__('choose')
_all0 = []
while 1:
_choice1 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = _call_status.error
_all0.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
_result = _all0
_call_status = self._NAME()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
name = _result
_all2 = []
while 1:
_choice3 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all2.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice3
break
_result = _all2
_result = self.__chars__('in')
_all4 = []
while 1:
_choice5 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all4.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
break
_result = _all4
_call_status = self._PYTHONCODE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
expr = _result
_all6 = []
while 1:
_choice7 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all6.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice7
break
_result = _all6
_call_status = self._commands()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
cmds = _result
_result = (Nonterminal('choose', [name, expr, cmds]))
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._choose()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def commandchain(self):
return self._commandchain().result
def _commandchain(self):
_key = self._pos
_status = self._dict_commandchain.get(_key, None)
if _status is None:
_status = self._dict_commandchain[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_all0 = []
_call_status = self._simplecommand()
_result = _call_status.result
_error = _call_status.error
_all0.append(_result)
while 1:
_choice1 = self._pos
try:
_call_status = self._simplecommand()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all0.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
_result = _all0
result = _result
_result = (Nonterminal('commands', result))
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._commandchain()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def named_command(self):
return self._named_command().result
def _named_command(self):
_key = self._pos
_status = self._dict_named_command.get(_key, None)
if _status is None:
_status = self._dict_named_command[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_call_status = self._NAME()
_result = _call_status.result
_error = _call_status.error
name = _result
_all0 = []
while 1:
_choice1 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all0.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
_result = _all0
_result = self.__chars__('=')
_all2 = []
while 1:
_choice3 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all2.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice3
break
_result = _all2
_call_status = self._command()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
cmd = _result
_result = (Nonterminal('named_command', [name, cmd]))
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._named_command()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def repetition(self):
return self._repetition().result
def _repetition(self):
_key = self._pos
_status = self._dict_repetition.get(_key, None)
if _status is None:
_status = self._dict_repetition[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_call_status = self._enclosed()
_result = _call_status.result
_error = _call_status.error
what = _result
_all1 = []
while 1:
_choice2 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all1.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice2
break
_result = _all1
_result = self.__chars__('?')
_all3 = []
while 1:
_choice4 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all3.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
_result = _all3
_result = (Nonterminal('maybe', [what]))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice5 = self._pos
try:
_call_status = self._enclosed()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
what = _result
_all6 = []
while 1:
_choice7 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all6.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice7
break
_result = _all6
while 1:
_choice8 = self._pos
try:
_result = self.__chars__('*')
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice8
_choice9 = self._pos
try:
_result = self.__chars__('+')
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice9
raise BacktrackException(_error)
_result = self.__chars__('+')
break
repetition = _result
_all10 = []
while 1:
_choice11 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all10.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice11
break
_result = _all10
_result = (Nonterminal('repetition', [repetition, what]))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
raise BacktrackException(_error)
_call_status = self._enclosed()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
what = _result
_all12 = []
while 1:
_choice13 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all12.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice13
break
_result = _all12
while 1:
_choice14 = self._pos
try:
_result = self.__chars__('*')
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice14
_choice15 = self._pos
try:
_result = self.__chars__('+')
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice15
raise BacktrackException(_error)
_result = self.__chars__('+')
break
repetition = _result
_all16 = []
while 1:
_choice17 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all16.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice17
break
_result = _all16
_result = (Nonterminal('repetition', [repetition, what]))
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._repetition()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def negation(self):
return self._negation().result
def _negation(self):
_key = self._pos
_status = self._dict_negation.get(_key, None)
if _status is None:
_status = self._dict_negation[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_result = self.__chars__('!')
_all1 = []
while 1:
_choice2 = self._pos
try:
_call_status = self._SPACE()
_result = _call_status.result
_error = _call_status.error
_all1.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice2
break
_result = _all1
_call_status = self._negation()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
what = _result
_all3 = []
while 1:
_choice4 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all3.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
_result = _all3
_result = (Nonterminal('negation', [what]))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice5 = self._pos
try:
_call_status = self._enclosed()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
raise BacktrackException(_error)
_call_status = self._enclosed()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._negation()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def enclosed(self):
return self._enclosed().result
def _enclosed(self):
_key = self._pos
_status = self._dict_enclosed.get(_key, None)
if _status is None:
_status = self._dict_enclosed[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_result = self.__chars__('<')
_all1 = []
while 1:
_choice2 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = _call_status.error
_all1.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice2
break
_result = _all1
_call_status = self._primary()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
what = _result
_all3 = []
while 1:
_choice4 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all3.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
_result = _all3
_result = self.__chars__('>')
_all5 = []
while 1:
_choice6 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all5.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice6
break
_result = _all5
_result = (Nonterminal('exclusive', [what]))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice7 = self._pos
try:
_result = self.__chars__('[')
_all8 = []
while 1:
_choice9 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all8.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice9
break
_result = _all8
_call_status = self._or_()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
what = _result
_all10 = []
while 1:
_choice11 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all10.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice11
break
_result = _all10
_result = self.__chars__(']')
_all12 = []
while 1:
_choice13 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all12.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice13
break
_result = _all12
_result = (Nonterminal('ignore', [what]))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice7
_choice14 = self._pos
try:
_before_discard15 = _result
_result = self.__chars__('(')
_all16 = []
while 1:
_choice17 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all16.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice17
break
_result = _all16
_result = _before_discard15
_call_status = self._or_()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_before_discard18 = _result
_result = self.__chars__(')')
_all19 = []
while 1:
_choice20 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all19.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice20
break
_result = _all19
_result = _before_discard18
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice14
_choice21 = self._pos
try:
_call_status = self._primary()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice21
raise BacktrackException(_error)
_call_status = self._primary()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._enclosed()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def primary(self):
return self._primary().result
def _primary(self):
_key = self._pos
_status = self._dict_primary.get(_key, None)
if _status is None:
_status = self._dict_primary[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_call_status = self._call()
_result = _call_status.result
_error = _call_status.error
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice1 = self._pos
try:
_call_status = self._REGEX()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_before_discard2 = _result
_all3 = []
while 1:
_choice4 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all3.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
_result = _all3
_result = _before_discard2
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
_choice5 = self._pos
try:
_call_status = self._QUOTE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_before_discard6 = _result
_all7 = []
while 1:
_choice8 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all7.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice8
break
_result = _all7
_result = _before_discard6
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice5
raise BacktrackException(_error)
_call_status = self._QUOTE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_before_discard9 = _result
_all10 = []
while 1:
_choice11 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all10.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice11
break
_result = _all10
_result = _before_discard9
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._primary()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def call(self):
return self._call().result
def _call(self):
_key = self._pos
_status = self._dict_call.get(_key, None)
if _status is None:
_status = self._dict_call[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
_call_status = self._NAME()
_result = _call_status.result
_error = _call_status.error
x = _result
_call_status = self._arguments()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
args = _result
_all0 = []
while 1:
_choice1 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all0.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice1
break
_result = _all0
_result = (Nonterminal("call", [x, args]))
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._call()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def arguments(self):
return self._arguments().result
def _arguments(self):
_key = self._pos
_status = self._dict_arguments.get(_key, None)
if _status is None:
_status = self._dict_arguments[_key] = Status()
else:
_statusstatus = _status.status
if _statusstatus == _status.NORMAL:
self._pos = _status.pos
return _status
elif _statusstatus == _status.ERROR:
raise BacktrackException(_status.error)
elif (_statusstatus == _status.INPROGRESS or
_statusstatus == _status.LEFTRECURSION):
_status.status = _status.LEFTRECURSION
if _status.result is not None:
self._pos = _status.pos
return _status
else:
raise BacktrackException(None)
elif _statusstatus == _status.SOMESOLUTIONS:
_status.status = _status.INPROGRESS
_startingpos = self._pos
try:
_result = None
_error = None
while 1:
_choice0 = self._pos
try:
_result = self.__chars__('(')
_all1 = []
while 1:
_choice2 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = _call_status.error
_all1.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice2
break
_result = _all1
_all3 = []
while 1:
_choice4 = self._pos
try:
_call_status = self._PYTHONCODE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_before_discard5 = _result
_all6 = []
while 1:
_choice7 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all6.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice7
break
_result = _all6
_result = self.__chars__(',')
_all8 = []
while 1:
_choice9 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all8.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice9
break
_result = _all8
_result = _before_discard5
_all3.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice4
break
_result = _all3
args = _result
_call_status = self._PYTHONCODE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
last = _result
_result = self.__chars__(')')
_all10 = []
while 1:
_choice11 = self._pos
try:
_call_status = self._IGNORE()
_result = _call_status.result
_error = self._combine_errors(_error, _call_status.error)
_all10.append(_result)
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice11
break
_result = _all10
_result = (Nonterminal("args", args + [last]))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice0
_choice12 = self._pos
try:
_result = (Nonterminal("args", []))
break
except BacktrackException as _exc:
_error = self._combine_errors(_error, _exc.error)
self._pos = _choice12
raise BacktrackException(_error)
_result = (Nonterminal("args", []))
break
if _status.status == _status.LEFTRECURSION:
if _status.result is not None:
if _status.pos >= self._pos:
_status.status = _status.NORMAL
self._pos = _status.pos
return _status
_status.pos = self._pos
_status.status = _status.SOMESOLUTIONS
_status.result = _result
_status.error = _error
self._pos = _startingpos
return self._arguments()
_status.status = _status.NORMAL
_status.pos = self._pos
_status.result = _result
_status.error = _error
return _status
except BacktrackException as _exc:
_status.pos = -1
_status.result = None
_error = self._combine_errors(_error, _exc.error)
_status.error = _error
_status.status = _status.ERROR
raise BacktrackException(_error)
def __init__(self, inputstream):
self._dict_NAME = {}
self._dict_SPACE = {}
self._dict_COMMENT = {}
self._dict_IGNORE = {}
self._dict_newline = {}
self._dict_REGEX = {}
self._dict_QUOTE = {}
self._dict_PYTHONCODE = {}
self._dict_EOF = {}
self._dict_file = {}
self._dict_list = {}
self._dict_production = {}
self._dict_productionargs = {}
self._dict_or_ = {}
self._dict_commands = {}
self._dict_command = {}
self._dict_simplecommand = {}
self._dict_return_ = {}
self._dict_if_ = {}
self._dict_choose = {}
self._dict_commandchain = {}
self._dict_named_command = {}
self._dict_repetition = {}
self._dict_negation = {}
self._dict_enclosed = {}
self._dict_primary = {}
self._dict_call = {}
self._dict_arguments = {}
self._pos = 0
self._inputstream = inputstream
def _regex299149370(self):
_choice13 = self._pos
_runner = self._Runner(self._inputstream, self._pos)
_i = _runner.recognize_299149370(self._pos)
if _runner.last_matched_state == -1:
self._pos = _choice13
raise BacktrackException
_upto = _runner.last_matched_index + 1
_pos = self._pos
assert _pos >= 0
assert _upto >= 0
_result = self._inputstream[_pos: _upto]
self._pos = _upto
return _result
def _regex1006631623(self):
_choice14 = self._pos
_runner = self._Runner(self._inputstream, self._pos)
_i = _runner.recognize_1006631623(self._pos)
if _runner.last_matched_state == -1:
self._pos = _choice14
raise BacktrackException
_upto = _runner.last_matched_index + 1
_pos = self._pos
assert _pos >= 0
assert _upto >= 0
_result = self._inputstream[_pos: _upto]
self._pos = _upto
return _result
def _regex528667127(self):
_choice15 = self._pos
_runner = self._Runner(self._inputstream, self._pos)
_i = _runner.recognize_528667127(self._pos)
if _runner.last_matched_state == -1:
self._pos = _choice15
raise BacktrackException
_upto = _runner.last_matched_index + 1
_pos = self._pos
assert _pos >= 0
assert _upto >= 0
_result = self._inputstream[_pos: _upto]
self._pos = _upto
return _result
def _regex291086639(self):
_choice16 = self._pos
_runner = self._Runner(self._inputstream, self._pos)
_i = _runner.recognize_291086639(self._pos)
if _runner.last_matched_state == -1:
self._pos = _choice16
raise BacktrackException
_upto = _runner.last_matched_index + 1
_pos = self._pos
assert _pos >= 0
assert _upto >= 0
_result = self._inputstream[_pos: _upto]
self._pos = _upto
return _result
def _regex1074651696(self):
_choice17 = self._pos
_runner = self._Runner(self._inputstream, self._pos)
_i = _runner.recognize_1074651696(self._pos)
if _runner.last_matched_state == -1:
self._pos = _choice17
raise BacktrackException
_upto = _runner.last_matched_index + 1
_pos = self._pos
assert _pos >= 0
assert _upto >= 0
_result = self._inputstream[_pos: _upto]
self._pos = _upto
return _result
def _regex1124192327(self):
_choice18 = self._pos
_runner = self._Runner(self._inputstream, self._pos)
_i = _runner.recognize_1124192327(self._pos)
if _runner.last_matched_state == -1:
self._pos = _choice18
raise BacktrackException
_upto = _runner.last_matched_index + 1
_pos = self._pos
assert _pos >= 0
assert _upto >= 0
_result = self._inputstream[_pos: _upto]
self._pos = _upto
return _result
def _regex1979538501(self):
_choice19 = self._pos
_runner = self._Runner(self._inputstream, self._pos)
_i = _runner.recognize_1979538501(self._pos)
if _runner.last_matched_state == -1:
self._pos = _choice19
raise BacktrackException
_upto = _runner.last_matched_index + 1
_pos = self._pos
assert _pos >= 0
assert _upto >= 0
_result = self._inputstream[_pos: _upto]
self._pos = _upto
return _result
class _Runner(object):
def __init__(self, text, pos):
self.text = text
self.pos = pos
self.last_matched_state = -1
self.last_matched_index = -1
self.state = -1
def recognize_299149370(runner, i):
assert i >= 0
input = runner.text
state = 0
while 1:
if state == 0:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 0
return i
if char == '\n':
state = 1
elif char == ' ':
state = 2
else:
break
if state == 1:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 1
return i
if char == '\n':
state = 1
continue
elif char == ' ':
state = 1
continue
else:
break
if state == 2:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 2
return ~i
if char == '\n':
state = 1
continue
elif char == ' ':
state = 2
continue
else:
break
runner.last_matched_state = state
runner.last_matched_index = i - 1
runner.state = state
if i == len(input):
return i
else:
return ~i
break
runner.state = state
return ~i
def recognize_1006631623(runner, i):
#auto-generated code, don't edit
assert i >= 0
input = runner.text
state = 0
while 1:
if state == 0:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 0
return ~i
if char == '`':
state = 3
else:
break
if state == 2:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 2
return ~i
if '\x00' <= char <= '\xff':
state = 3
else:
break
if state == 3:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 3
return ~i
if char == '`':
state = 1
elif char == '\\':
state = 2
continue
elif ']' <= char <= '_':
state = 3
continue
elif '\x00' <= char <= '[':
state = 3
continue
elif 'a' <= char <= '\xff':
state = 3
continue
else:
break
runner.last_matched_state = state
runner.last_matched_index = i - 1
runner.state = state
if i == len(input):
return i
else:
return ~i
break
runner.state = state
return ~i
def recognize_528667127(runner, i):
assert i >= 0
input = runner.text
state = 0
while 1:
if state == 0:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 0
return ~i
if char == ' ':
state = 0
continue
elif char == '
state = 2
else:
break
if state == 1:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 1
return i
if char == ' ':
state = 0
continue
elif char == '
state = 2
else:
break
if state == 2:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 2
return ~i
if char == '\n':
state = 1
continue
elif '\x00' <= char <= '\t':
state = 2
continue
elif '\x0b' <= char <= '\xff':
state = 2
continue
else:
break
runner.last_matched_state = state
runner.last_matched_index = i - 1
runner.state = state
if i == len(input):
return i
else:
return ~i
break
runner.state = state
return ~i
def recognize_291086639(runner, i):
#auto-generated code, don't edit
assert i >= 0
input = runner.text
state = 0
while 1:
if state == 0:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 0
return ~i
if char == '{':
state = 2
else:
break
if state == 2:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 2
return ~i
if char == '}':
state = 1
elif '\x00' <= char <= '\t':
state = 2
continue
elif '\x0b' <= char <= '|':
state = 2
continue
elif '~' <= char <= '\xff':
state = 2
continue
else:
break
runner.last_matched_state = state
runner.last_matched_index = i - 1
runner.state = state
if i == len(input):
return i
else:
return ~i
break
runner.state = state
return ~i
def recognize_1074651696(runner, i):
assert i >= 0
input = runner.text
state = 0
while 1:
if state == 0:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 0
return ~i
if char == '_':
state = 1
elif 'A' <= char <= 'Z':
state = 1
elif 'a' <= char <= 'z':
state = 1
else:
break
if state == 1:
runner.last_matched_index = i - 1
runner.last_matched_state = state
try:
char = input[i]
i += 1
except IndexError:
runner.state = 1
return i
if char == '_':
state = 1
continue
elif '0' <= char <= '9':
state = 1
continue
elif 'A' <= char <= 'Z':
state = 1
continue
elif 'a' <= char <= 'z':
state = 1
continue
else:
break
runner.last_matched_state = state
runner.last_matched_index = i - 1
runner.state = state
if i == len(input):
return i
else:
return ~i
break
runner.state = state
return ~i
def recognize_1124192327(runner, i):
#auto-generated code, don't edit
assert i >= 0
input = runner.text
state = 0
while 1:
if state == 0:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 0
return ~i
if char == "'":
state = 1
else:
break
if state == 1:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 1
return ~i
if '\x00' <= char <= '&':
state = 1
continue
elif '(' <= char <= '\xff':
state = 1
continue
elif char == "'":
state = 2
else:
break
runner.last_matched_state = state
runner.last_matched_index = i - 1
runner.state = state
if i == len(input):
return i
else:
return ~i
break
runner.state = state
return ~i
def recognize_1979538501(runner, i):
assert i >= 0
input = runner.text
state = 0
while 1:
if state == 0:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 0
return ~i
if char == '
state = 1
elif char == ' ':
state = 2
elif char == '\t':
state = 2
elif char == '\n':
state = 2
else:
break
if state == 1:
try:
char = input[i]
i += 1
except IndexError:
runner.state = 1
return ~i
if '\x00' <= char <= '\t':
state = 1
continue
elif '\x0b' <= char <= '\xff':
state = 1
continue
elif char == '\n':
state = 2
else:
break
runner.last_matched_state = state
runner.last_matched_index = i - 1
runner.state = state
if i == len(input):
return i
else:
return ~i
break
runner.state = state
return ~i
class PyPackratSyntaxParser(PackratParser):
def __init__(self, stream):
self.init_parser(stream)
forbidden = dict.fromkeys(("__weakref__ __doc__ "
"__dict__ __module__").split())
initthere = "__init__" in PyPackratSyntaxParser.__dict__
for key, value in Parser.__dict__.items():
if key not in PyPackratSyntaxParser.__dict__ and key not in forbidden:
setattr(PyPackratSyntaxParser, key, value)
PyPackratSyntaxParser.init_parser = Parser.__init__.im_func
| true | true |
f7268b17e5afdf9edaac16ec22aa1865bf00ab9e | 6,337 | py | Python | RecoTracker/ConversionSeedGenerators/python/PhotonConversionTrajectorySeedProducerFromSingleLeg_cfi.py | gputtley/cmssw | c1ef8454804e4ebea8b65f59c4a952a6c94fde3b | [
"Apache-2.0"
] | 3 | 2018-08-24T19:10:26.000Z | 2019-02-19T11:45:32.000Z | RecoTracker/ConversionSeedGenerators/python/PhotonConversionTrajectorySeedProducerFromSingleLeg_cfi.py | gputtley/cmssw | c1ef8454804e4ebea8b65f59c4a952a6c94fde3b | [
"Apache-2.0"
] | 26 | 2018-10-30T12:47:58.000Z | 2022-03-29T08:39:00.000Z | RecoTracker/ConversionSeedGenerators/python/PhotonConversionTrajectorySeedProducerFromSingleLeg_cfi.py | p2l1pfp/cmssw | 9bda22bf33ecf18dd19a3af2b3a8cbdb1de556a9 | [
"Apache-2.0"
] | 5 | 2018-08-21T16:37:52.000Z | 2020-01-09T13:33:17.000Z | import FWCore.ParameterSet.Config as cms
from RecoTracker.TkSeedGenerator.SeedGeneratorFromRegionHitsEDProducer_cfi import seedGeneratorFromRegionHitsEDProducer
CommonClusterCheckPSet = seedGeneratorFromRegionHitsEDProducer.ClusterCheckPSet
photonConvTrajSeedFromSingleLeg = cms.EDProducer("PhotonConversionTrajectorySeedProducerFromSingleLeg",
TrackRefitter = cms.InputTag('TrackRefitter',''),
primaryVerticesTag = cms.InputTag("offlinePrimaryVertices"),
beamSpotInputTag = cms.InputTag("offlineBeamSpot"),
newSeedCandidates = cms.string("convSeedCandidates"),
xcheckSeedCandidates = cms.string("xcheckSeedCandidates"),
vtxMinDoF = cms.double(4),
maxDZSigmas = cms.double(10.),
maxNumSelVtx = cms.uint32(2),
applyTkVtxConstraint = cms.bool(True),
DoxcheckSeedCandidates = cms.bool(False),
OrderedHitsFactoryPSet = cms.PSet(
maxHitPairsPerTrackAndGenerator = cms.uint32(10),
maxElement = cms.uint32(40000),
SeedingLayers = cms.InputTag('convLayerPairs')
),
SeedComparitorPSet = cms.PSet(
ComponentName = cms.string('none')
),
ClusterCheckPSet = CommonClusterCheckPSet,
RegionFactoryPSet = cms.PSet(
RegionPSet = cms.PSet( precise = cms.bool(True),
beamSpot = cms.InputTag("offlineBeamSpot"),
originRadius = cms.double(3.0),
ptMin = cms.double(0.2),
originHalfLength = cms.double(12.0)
),
ComponentName = cms.string('GlobalRegionProducerFromBeamSpot')
),
SeedCreatorPSet = cms.PSet(
ComponentName = cms.string('SeedForPhotonConversion1Leg'),
SeedMomentumForBOFF = cms.double(5.0),
propagator = cms.string('PropagatorWithMaterial'),
TTRHBuilder = cms.string('WithTrackAngle')
)
)
from Configuration.Eras.Modifier_trackingLowPU_cff import trackingLowPU
trackingLowPU.toModify(photonConvTrajSeedFromSingleLeg,
OrderedHitsFactoryPSet = dict(maxElement = 10000),
ClusterCheckPSet = dict(
MaxNumberOfCosmicClusters = 150000,
MaxNumberOfPixelClusters = 20000,
cut = "strip < 150000 && pixel < 20000 && (strip < 20000 + 7* pixel)"
)
)
from Configuration.Eras.Modifier_trackingPhase2PU140_cff import trackingPhase2PU140
trackingPhase2PU140.toModify(photonConvTrajSeedFromSingleLeg,
ClusterCheckPSet = dict(
MaxNumberOfCosmicClusters = 1000000,
MaxNumberOfPixelClusters = 100000,
cut = None
),
OrderedHitsFactoryPSet = dict(maxElement = 100000),
RegionFactoryPSet = dict(RegionPSet = dict(ptMin = 0.3)),
)
from Configuration.Eras.Modifier_peripheralPbPb_cff import peripheralPbPb
peripheralPbPb.toModify(photonConvTrajSeedFromSingleLeg,
ClusterCheckPSet = dict(cut = "strip < 400000 && pixel < 40000 && (strip < 60000 + 7.0*pixel) && (pixel < 8000 + 0.14*strip)")
)
from Configuration.Eras.Modifier_pp_on_XeXe_2017_cff import pp_on_XeXe_2017
from Configuration.Eras.Modifier_pp_on_AA_2018_cff import pp_on_AA_2018
(pp_on_XeXe_2017 | pp_on_AA_2018 ).toModify(photonConvTrajSeedFromSingleLeg,
ClusterCheckPSet = dict(MaxNumberOfPixelClusters = 100000,
cut = "strip < 1000000 && pixel < 100000 && (strip < 50000 + 10*pixel) && (pixel < 5000 + strip/2.)"
),
OrderedHitsFactoryPSet = dict(maxElement = 100000)
)
from RecoTracker.TkTrackingRegions.globalTrackingRegionWithVertices_cff import globalTrackingRegionWithVertices as _globalTrackingRegionWithVertices
(pp_on_XeXe_2017 | pp_on_AA_2018 ).toModify(photonConvTrajSeedFromSingleLeg,
RegionFactoryPSet = dict(ComponentName = 'GlobalTrackingRegionWithVerticesProducer',
RegionPSet = _globalTrackingRegionWithVertices.RegionPSet.clone(
originRadius = 0,
originRScaling4BigEvts = True,
minOriginR = 0,
scalingStartNPix = 0,
scalingEndNPix = 1#essentially turn off immediately
),
)
)
| 70.411111 | 153 | 0.453369 | import FWCore.ParameterSet.Config as cms
from RecoTracker.TkSeedGenerator.SeedGeneratorFromRegionHitsEDProducer_cfi import seedGeneratorFromRegionHitsEDProducer
CommonClusterCheckPSet = seedGeneratorFromRegionHitsEDProducer.ClusterCheckPSet
photonConvTrajSeedFromSingleLeg = cms.EDProducer("PhotonConversionTrajectorySeedProducerFromSingleLeg",
TrackRefitter = cms.InputTag('TrackRefitter',''),
primaryVerticesTag = cms.InputTag("offlinePrimaryVertices"),
beamSpotInputTag = cms.InputTag("offlineBeamSpot"),
newSeedCandidates = cms.string("convSeedCandidates"),
xcheckSeedCandidates = cms.string("xcheckSeedCandidates"),
vtxMinDoF = cms.double(4),
maxDZSigmas = cms.double(10.),
maxNumSelVtx = cms.uint32(2),
applyTkVtxConstraint = cms.bool(True),
DoxcheckSeedCandidates = cms.bool(False),
OrderedHitsFactoryPSet = cms.PSet(
maxHitPairsPerTrackAndGenerator = cms.uint32(10),
maxElement = cms.uint32(40000),
SeedingLayers = cms.InputTag('convLayerPairs')
),
SeedComparitorPSet = cms.PSet(
ComponentName = cms.string('none')
),
ClusterCheckPSet = CommonClusterCheckPSet,
RegionFactoryPSet = cms.PSet(
RegionPSet = cms.PSet( precise = cms.bool(True),
beamSpot = cms.InputTag("offlineBeamSpot"),
originRadius = cms.double(3.0),
ptMin = cms.double(0.2),
originHalfLength = cms.double(12.0)
),
ComponentName = cms.string('GlobalRegionProducerFromBeamSpot')
),
SeedCreatorPSet = cms.PSet(
ComponentName = cms.string('SeedForPhotonConversion1Leg'),
SeedMomentumForBOFF = cms.double(5.0),
propagator = cms.string('PropagatorWithMaterial'),
TTRHBuilder = cms.string('WithTrackAngle')
)
)
from Configuration.Eras.Modifier_trackingLowPU_cff import trackingLowPU
trackingLowPU.toModify(photonConvTrajSeedFromSingleLeg,
OrderedHitsFactoryPSet = dict(maxElement = 10000),
ClusterCheckPSet = dict(
MaxNumberOfCosmicClusters = 150000,
MaxNumberOfPixelClusters = 20000,
cut = "strip < 150000 && pixel < 20000 && (strip < 20000 + 7* pixel)"
)
)
from Configuration.Eras.Modifier_trackingPhase2PU140_cff import trackingPhase2PU140
trackingPhase2PU140.toModify(photonConvTrajSeedFromSingleLeg,
ClusterCheckPSet = dict(
MaxNumberOfCosmicClusters = 1000000,
MaxNumberOfPixelClusters = 100000,
cut = None
),
OrderedHitsFactoryPSet = dict(maxElement = 100000),
RegionFactoryPSet = dict(RegionPSet = dict(ptMin = 0.3)),
)
from Configuration.Eras.Modifier_peripheralPbPb_cff import peripheralPbPb
peripheralPbPb.toModify(photonConvTrajSeedFromSingleLeg,
ClusterCheckPSet = dict(cut = "strip < 400000 && pixel < 40000 && (strip < 60000 + 7.0*pixel) && (pixel < 8000 + 0.14*strip)")
)
from Configuration.Eras.Modifier_pp_on_XeXe_2017_cff import pp_on_XeXe_2017
from Configuration.Eras.Modifier_pp_on_AA_2018_cff import pp_on_AA_2018
(pp_on_XeXe_2017 | pp_on_AA_2018 ).toModify(photonConvTrajSeedFromSingleLeg,
ClusterCheckPSet = dict(MaxNumberOfPixelClusters = 100000,
cut = "strip < 1000000 && pixel < 100000 && (strip < 50000 + 10*pixel) && (pixel < 5000 + strip/2.)"
),
OrderedHitsFactoryPSet = dict(maxElement = 100000)
)
from RecoTracker.TkTrackingRegions.globalTrackingRegionWithVertices_cff import globalTrackingRegionWithVertices as _globalTrackingRegionWithVertices
(pp_on_XeXe_2017 | pp_on_AA_2018 ).toModify(photonConvTrajSeedFromSingleLeg,
RegionFactoryPSet = dict(ComponentName = 'GlobalTrackingRegionWithVerticesProducer',
RegionPSet = _globalTrackingRegionWithVertices.RegionPSet.clone(
originRadius = 0,
originRScaling4BigEvts = True,
minOriginR = 0,
scalingStartNPix = 0,
scalingEndNPix = 1
),
)
)
| true | true |
f7268b2e949d82a8bd564b36aff123357d5bc3a1 | 9,689 | py | Python | client_server_test/NEWGUI.py | hades208002/mdp-project | c242a8d00412cc3772d298986977f6acc47002ee | [
"MIT"
] | null | null | null | client_server_test/NEWGUI.py | hades208002/mdp-project | c242a8d00412cc3772d298986977f6acc47002ee | [
"MIT"
] | null | null | null | client_server_test/NEWGUI.py | hades208002/mdp-project | c242a8d00412cc3772d298986977f6acc47002ee | [
"MIT"
] | null | null | null | from tkinter import *
from tkinter import ttk
import tkinter.filedialog as fd
import pandas as pd
from LocalModelCommunication import LocalModelCommunication
from APP import APP
class GUI(object):
def __init__(self):
# overall
self.tabControl = None
self.tab_step1 = None
self.tab_step2 = None
self.tab_step3 = None
self.tab_step4 = None
self.dataframe = None
self.img_wait = PhotoImage(file='test.GIF')
# 1 step
self.fname = None
self.data = None
self.features = None
self.import_lable = None
self.import_label_text = StringVar()
self.import_label_text.set(' ')
# 2 step
self.required = ['RR', 'QTm_old', 'sbjBeatConsidered', 'numRRaveraged', 'QR', 'QTn', 'QRS', 'IPG',
'PQ', 'PCpos', 'PCneg', 'patsex', 'AFclass', 'Age']
self.required_ordered = []
i = 0
for item in self.required:
self.required_ordered.append(str(i) + ': ' + item)
i = i + 1
self.leftbox = StringVar()
self.rightbox = StringVar()
self.rrightbox = StringVar()
self.list_left = None
self.list_right = None
self.list_rright = None
# 3 step
self.model_label = None
self.model_label_text = StringVar()
self.model_label_text.set('Waiting for model training...')
self.img_gif = PhotoImage(file='img.GIF')
# 4 step
self.connect_label = None
self.connect_label_text = StringVar()
self.connect_label_text.set('Waiting for central server response...')
# 5 step
# help functions
def add_tab(self, tabControl, tab_name):
tab = ttk.Frame(tabControl) # Create a tab
tabControl.add(tab, text=tab_name)
return tab
# Callback functions
## step 1
def get_csv(self): # open file system
self.fname = fd.askopenfilename(filetypes=[(".csv file", ".csv")])
self.data = pd.read_csv(self.fname, delimiter=',')
self.features = self.data.columns
self.import_label_text.set('Import data from: ' + self.fname + '\n' + str(self.features))
self.import_lable.pack(side=TOP)
def go_next_step2(self):
self.tab_step2 = self.add_tab(self.tabControl, "Step 2: Match Features")
self.tab_match(self.tab_step2)
self.tabControl.select(self.tab_step2)
self.tabControl.forget(self.tab_step1)
## step 2
def move_to_right(self):
self.list_right.insert(END,
str(self.list_right.size()) + ': ' + self.list_left.get(self.list_left.curselection()))
self.list_left.delete(self.list_left.curselection())
def move_to_left(self):
content = self.list_right.get(self.list_right.curselection())
contents = content.split(': ')
self.list_left.insert(END, contents[1])
self.list_right.delete(self.list_right.curselection())
def add_nan(self):
self.list_right.insert(END, str(self.list_right.size()) + ': ' + 'NAN')
def go_next_step3(self):
# prepare dataframe for localmodel
columns = []
contents = self.rightbox.get()
contents = contents.replace('(', '')
contents = contents.replace(')', '')
contents = contents.replace("'", '')
item_list = contents.split(', ')
for item in item_list:
content = item.split(': ')[1]
if content != 'NAN':
columns.append(content)
self.dataframe = self.data[columns]
print(self.dataframe.head(2))
self.tab_step3 = self.add_tab(self.tabControl, "Step 3: Train Model")
# render tab3
self.tab_model(self.tab_step3)
self.tabControl.select(self.tab_step3)
self.tabControl.forget(self.tab_step2)
def go_back_step1(self):
self.tab_step1 = self.add_tab(self.tabControl, "Step 1: Import Data")
# render tab1
self.tab_import(self.tab_step1, self.tabControl)
self.tabControl.select(self.tab_step1)
self.tabControl.forget(self.tab_step2)
## step 3
def go_next_step4(self):
self.tab_step4 = self.add_tab(self.tabControl, "Step 4: Connect to Central Server")
# render tab4
self.tab_connect(self.tab_step4)
self.tabControl.select(self.tab_step4)
self.tabControl.forget(self.tab_step3)
def go_back_step2(self):
self.tab_step2 = self.add_tab(self.tabControl, "Step 2: Match Features")
# render tab2
self.tab_match(self.tab_step2)
self.tabControl.select(self.tab_step2)
self.tabControl.forget(self.tab_step3)
## step 4
def go_next_step5(self):
self.tab_step5 = self.add_tab(self.tabControl, "Step 5: Wait for Prediction Call")
# render tab5
self.tab_wait(self.tab_step5)
self.tabControl.select(self.tab_step5)
self.tabControl.forget(self.tab_step4)
def go_back_step3(self):
self.tab_step3 = self.add_tab(self.tabControl, "Step 3: Train Model")
# render tab3
self.tab_model(self.tab_step3)
self.tabControl.select(self.tab_step3)
self.tabControl.forget(self.tab_step4)
## step 5
# frames
def tab_import(self, root, tabControl):
"""
Load local data (csv file)
"""
self.tabControl = tabControl
self.tab_step1 = root
frame = Frame(root)
frame.pack(side=TOP)
Button(frame, text='Import Data', command=self.get_csv, width=16).pack(side=TOP)
label_frame = ttk.LabelFrame(frame, text='Press Button to Import Data')
label_frame.pack(side=TOP)
self.import_lable = ttk.Label(label_frame, textvariable=self.import_label_text)
self.import_lable.pack(side=TOP)
frame = Frame(root)
frame.pack(side=BOTTOM)
Button(frame, text='Next>>', command=self.go_next_step2, width=16).pack(side=TOP)
def tab_match(self, root):
"""
Feature matching
"""
self.leftbox.set(sorted(self.features))
self.rightbox.set('')
self.rrightbox.set(self.required_ordered)
frame = Frame(root)
frame.pack(side=BOTTOM)
Button(frame, text='Next>>', command=self.go_next_step3, width=16).pack(side=RIGHT)
Button(frame, text='<<Back', command=self.go_back_step1, width=16).pack(side=LEFT)
frame = Frame(root)
frame.pack(side=LEFT)
column_head = ttk.Label(frame, text='Local Features')
column_head.pack(side=TOP)
self.list_left = Listbox(frame, listvariable=self.leftbox, width=25, height=20)
self.list_left.pack(side=LEFT)
scrollbar = Scrollbar(frame, orient="vertical")
scrollbar.config(command=self.list_left.yview)
scrollbar.pack(side="right", fill="y")
frame = Frame(root)
frame.pack(side=LEFT)
Button(frame, text='->', command=self.move_to_right, width=7).pack(side=TOP)
Button(frame, text='<-', command=self.move_to_left, width=7).pack(side=TOP)
Button(frame, text='NAN', command=self.add_nan, width=7).pack(side=TOP)
frame = Frame(root)
frame.pack(side=LEFT)
column_head = ttk.Label(frame, text='Matched Features')
column_head.pack(side=TOP)
self.list_right = Listbox(frame, listvariable=self.rightbox,height=20, width=25)
self.list_right.pack(side=LEFT)
scrollbar = Scrollbar(frame, orient="vertical")
scrollbar.config(command=self.list_right.yview)
scrollbar.pack(side="right", fill="y")
frame = Frame(root)
frame.pack(side=RIGHT)
column_head = ttk.Label(frame, text='Required Features')
column_head.pack(side=TOP)
self.list_rright = Listbox(frame, listvariable=self.rrightbox,height=20, width=25)
self.list_rright.pack(side=LEFT)
scrollbar = Scrollbar(frame, orient="vertical")
scrollbar.config(command=self.list_rright.yview)
scrollbar.pack(side="right", fill="y")
def tab_model(self, root):
"""
Call localmodel.init() and localmodel.train()
Display model accuracy
"""
frame = Frame(root)
frame.pack(side=TOP)
self.label_frame = ttk.LabelFrame(frame)
self.label_frame.pack(side=TOP)
self.model_label = ttk.Label(self.label_frame, textvariable=self.model_label_text)
self.model_label.pack(side=TOP)
self.label_img = ttk.Label(self.label_frame, image=self.img_wait)
self.label_img.pack()
frame = Frame(root)
frame.pack(side=BOTTOM)
Button(frame, text='Next>>', command=self.go_next_step4, width=16).pack(side=RIGHT)
Button(frame, text='<<Back', command=self.go_back_step2, width=16).pack(side=LEFT)
print ("MODEL TRAINED -> ")
self.loca = LocalModelCommunication(data= self.dataframe)
training_result = self.loca.chooseModel_with_crossValidation_and_train()
print (training_result)
self.trainingdone()
def trainingdone(self):
self.label_img.config(image=self.img_gif)
self.label_img.pack()
def tab_connect(self, root):
"""
Connect to center server
"""
frame = Frame(root)
frame.pack(side=TOP)
label_frame = ttk.LabelFrame(frame)
label_frame.pack(side=TOP)
self.connect_label = ttk.Label(label_frame, textvariable=self.connect_label_text)
self.connect_label.pack(side=TOP)
label_img = ttk.Label(label_frame, image=self.img_wait)
label_img.pack()
frame = Frame(root)
frame.pack(side=BOTTOM)
Button(frame, text='Next>>', command=self.go_next_step5, width=16).pack(side=RIGHT)
Button(frame, text='<<Back', command=self.go_back_step3, width=16).pack(side=LEFT)
## cannot get fast responce! -> get false even if we are connected :]
if self.loca.connectToCentral() == False :
print ("not connected")
else :
print ("connected")
'''
self.root = Tk()
self.root.geometry("700x500")
self.root.title("Doctor Application")
self.root.resizable(width=False, height=False)
self.app = APP(root)
self.root.mainloop()
'''
def tab_wait(self, root):
"""
Call localmodel.predict()
:return:
"""
frame = Frame(root)
frame.pack(side=TOP)
label_frame = ttk.LabelFrame(frame)
label_frame.pack(side=TOP)
label = ttk.Label(label_frame, text='TODO')
label.pack(side=TOP)
if __name__ == '__main__':
root = Tk()
root.geometry("700x500")
root.title("Modeling Tool GUI")
root.resizable(width=False, height=False)
tabControl = ttk.Notebook(root)
tab_step1 = ttk.Frame(tabControl)
tabControl.add(tab_step1, text="Step 1: Import Data")
tabControl.pack(expand=1, fill="both") # Pack to make visible
gui = GUI()
gui.tab_import(tab_step1, tabControl)
root.mainloop()
| 31.254839 | 100 | 0.716999 | from tkinter import *
from tkinter import ttk
import tkinter.filedialog as fd
import pandas as pd
from LocalModelCommunication import LocalModelCommunication
from APP import APP
class GUI(object):
def __init__(self):
self.tabControl = None
self.tab_step1 = None
self.tab_step2 = None
self.tab_step3 = None
self.tab_step4 = None
self.dataframe = None
self.img_wait = PhotoImage(file='test.GIF')
self.fname = None
self.data = None
self.features = None
self.import_lable = None
self.import_label_text = StringVar()
self.import_label_text.set(' ')
self.required = ['RR', 'QTm_old', 'sbjBeatConsidered', 'numRRaveraged', 'QR', 'QTn', 'QRS', 'IPG',
'PQ', 'PCpos', 'PCneg', 'patsex', 'AFclass', 'Age']
self.required_ordered = []
i = 0
for item in self.required:
self.required_ordered.append(str(i) + ': ' + item)
i = i + 1
self.leftbox = StringVar()
self.rightbox = StringVar()
self.rrightbox = StringVar()
self.list_left = None
self.list_right = None
self.list_rright = None
self.model_label = None
self.model_label_text = StringVar()
self.model_label_text.set('Waiting for model training...')
self.img_gif = PhotoImage(file='img.GIF')
self.connect_label = None
self.connect_label_text = StringVar()
self.connect_label_text.set('Waiting for central server response...')
def add_tab(self, tabControl, tab_name):
tab = ttk.Frame(tabControl)
tabControl.add(tab, text=tab_name)
return tab
t_csv(self):
self.fname = fd.askopenfilename(filetypes=[(".csv file", ".csv")])
self.data = pd.read_csv(self.fname, delimiter=',')
self.features = self.data.columns
self.import_label_text.set('Import data from: ' + self.fname + '\n' + str(self.features))
self.import_lable.pack(side=TOP)
def go_next_step2(self):
self.tab_step2 = self.add_tab(self.tabControl, "Step 2: Match Features")
self.tab_match(self.tab_step2)
self.tabControl.select(self.tab_step2)
self.tabControl.forget(self.tab_step1)
ve_to_right(self):
self.list_right.insert(END,
str(self.list_right.size()) + ': ' + self.list_left.get(self.list_left.curselection()))
self.list_left.delete(self.list_left.curselection())
def move_to_left(self):
content = self.list_right.get(self.list_right.curselection())
contents = content.split(': ')
self.list_left.insert(END, contents[1])
self.list_right.delete(self.list_right.curselection())
def add_nan(self):
self.list_right.insert(END, str(self.list_right.size()) + ': ' + 'NAN')
def go_next_step3(self):
columns = []
contents = self.rightbox.get()
contents = contents.replace('(', '')
contents = contents.replace(')', '')
contents = contents.replace("'", '')
item_list = contents.split(', ')
for item in item_list:
content = item.split(': ')[1]
if content != 'NAN':
columns.append(content)
self.dataframe = self.data[columns]
print(self.dataframe.head(2))
self.tab_step3 = self.add_tab(self.tabControl, "Step 3: Train Model")
# render tab3
self.tab_model(self.tab_step3)
self.tabControl.select(self.tab_step3)
self.tabControl.forget(self.tab_step2)
def go_back_step1(self):
self.tab_step1 = self.add_tab(self.tabControl, "Step 1: Import Data")
# render tab1
self.tab_import(self.tab_step1, self.tabControl)
self.tabControl.select(self.tab_step1)
self.tabControl.forget(self.tab_step2)
## step 3
def go_next_step4(self):
self.tab_step4 = self.add_tab(self.tabControl, "Step 4: Connect to Central Server")
# render tab4
self.tab_connect(self.tab_step4)
self.tabControl.select(self.tab_step4)
self.tabControl.forget(self.tab_step3)
def go_back_step2(self):
self.tab_step2 = self.add_tab(self.tabControl, "Step 2: Match Features")
# render tab2
self.tab_match(self.tab_step2)
self.tabControl.select(self.tab_step2)
self.tabControl.forget(self.tab_step3)
## step 4
def go_next_step5(self):
self.tab_step5 = self.add_tab(self.tabControl, "Step 5: Wait for Prediction Call")
# render tab5
self.tab_wait(self.tab_step5)
self.tabControl.select(self.tab_step5)
self.tabControl.forget(self.tab_step4)
def go_back_step3(self):
self.tab_step3 = self.add_tab(self.tabControl, "Step 3: Train Model")
# render tab3
self.tab_model(self.tab_step3)
self.tabControl.select(self.tab_step3)
self.tabControl.forget(self.tab_step4)
## step 5
# frames
def tab_import(self, root, tabControl):
self.tabControl = tabControl
self.tab_step1 = root
frame = Frame(root)
frame.pack(side=TOP)
Button(frame, text='Import Data', command=self.get_csv, width=16).pack(side=TOP)
label_frame = ttk.LabelFrame(frame, text='Press Button to Import Data')
label_frame.pack(side=TOP)
self.import_lable = ttk.Label(label_frame, textvariable=self.import_label_text)
self.import_lable.pack(side=TOP)
frame = Frame(root)
frame.pack(side=BOTTOM)
Button(frame, text='Next>>', command=self.go_next_step2, width=16).pack(side=TOP)
def tab_match(self, root):
self.leftbox.set(sorted(self.features))
self.rightbox.set('')
self.rrightbox.set(self.required_ordered)
frame = Frame(root)
frame.pack(side=BOTTOM)
Button(frame, text='Next>>', command=self.go_next_step3, width=16).pack(side=RIGHT)
Button(frame, text='<<Back', command=self.go_back_step1, width=16).pack(side=LEFT)
frame = Frame(root)
frame.pack(side=LEFT)
column_head = ttk.Label(frame, text='Local Features')
column_head.pack(side=TOP)
self.list_left = Listbox(frame, listvariable=self.leftbox, width=25, height=20)
self.list_left.pack(side=LEFT)
scrollbar = Scrollbar(frame, orient="vertical")
scrollbar.config(command=self.list_left.yview)
scrollbar.pack(side="right", fill="y")
frame = Frame(root)
frame.pack(side=LEFT)
Button(frame, text='->', command=self.move_to_right, width=7).pack(side=TOP)
Button(frame, text='<-', command=self.move_to_left, width=7).pack(side=TOP)
Button(frame, text='NAN', command=self.add_nan, width=7).pack(side=TOP)
frame = Frame(root)
frame.pack(side=LEFT)
column_head = ttk.Label(frame, text='Matched Features')
column_head.pack(side=TOP)
self.list_right = Listbox(frame, listvariable=self.rightbox,height=20, width=25)
self.list_right.pack(side=LEFT)
scrollbar = Scrollbar(frame, orient="vertical")
scrollbar.config(command=self.list_right.yview)
scrollbar.pack(side="right", fill="y")
frame = Frame(root)
frame.pack(side=RIGHT)
column_head = ttk.Label(frame, text='Required Features')
column_head.pack(side=TOP)
self.list_rright = Listbox(frame, listvariable=self.rrightbox,height=20, width=25)
self.list_rright.pack(side=LEFT)
scrollbar = Scrollbar(frame, orient="vertical")
scrollbar.config(command=self.list_rright.yview)
scrollbar.pack(side="right", fill="y")
def tab_model(self, root):
frame = Frame(root)
frame.pack(side=TOP)
self.label_frame = ttk.LabelFrame(frame)
self.label_frame.pack(side=TOP)
self.model_label = ttk.Label(self.label_frame, textvariable=self.model_label_text)
self.model_label.pack(side=TOP)
self.label_img = ttk.Label(self.label_frame, image=self.img_wait)
self.label_img.pack()
frame = Frame(root)
frame.pack(side=BOTTOM)
Button(frame, text='Next>>', command=self.go_next_step4, width=16).pack(side=RIGHT)
Button(frame, text='<<Back', command=self.go_back_step2, width=16).pack(side=LEFT)
print ("MODEL TRAINED -> ")
self.loca = LocalModelCommunication(data= self.dataframe)
training_result = self.loca.chooseModel_with_crossValidation_and_train()
print (training_result)
self.trainingdone()
def trainingdone(self):
self.label_img.config(image=self.img_gif)
self.label_img.pack()
def tab_connect(self, root):
frame = Frame(root)
frame.pack(side=TOP)
label_frame = ttk.LabelFrame(frame)
label_frame.pack(side=TOP)
self.connect_label = ttk.Label(label_frame, textvariable=self.connect_label_text)
self.connect_label.pack(side=TOP)
label_img = ttk.Label(label_frame, image=self.img_wait)
label_img.pack()
frame = Frame(root)
frame.pack(side=BOTTOM)
Button(frame, text='Next>>', command=self.go_next_step5, width=16).pack(side=RIGHT)
Button(frame, text='<<Back', command=self.go_back_step3, width=16).pack(side=LEFT)
## cannot get fast responce! -> get false even if we are connected :]
if self.loca.connectToCentral() == False :
print ("not connected")
else :
print ("connected")
def tab_wait(self, root):
frame = Frame(root)
frame.pack(side=TOP)
label_frame = ttk.LabelFrame(frame)
label_frame.pack(side=TOP)
label = ttk.Label(label_frame, text='TODO')
label.pack(side=TOP)
if __name__ == '__main__':
root = Tk()
root.geometry("700x500")
root.title("Modeling Tool GUI")
root.resizable(width=False, height=False)
tabControl = ttk.Notebook(root)
tab_step1 = ttk.Frame(tabControl)
tabControl.add(tab_step1, text="Step 1: Import Data")
tabControl.pack(expand=1, fill="both") # Pack to make visible
gui = GUI()
gui.tab_import(tab_step1, tabControl)
root.mainloop()
| true | true |
f7268c06513bb9fa62c71a99d8cdf748c117880d | 14,879 | py | Python | cifar_cnn_three_conv.py | sidneyp/bidirectional | d3d1dbb727e5a25b4980646f1eb500245072f079 | [
"BSD-3-Clause"
] | 8 | 2018-05-22T10:02:51.000Z | 2022-01-11T03:02:51.000Z | cifar_cnn_three_conv.py | sidneyp/bidirectional | d3d1dbb727e5a25b4980646f1eb500245072f079 | [
"BSD-3-Clause"
] | null | null | null | cifar_cnn_three_conv.py | sidneyp/bidirectional | d3d1dbb727e5a25b4980646f1eb500245072f079 | [
"BSD-3-Clause"
] | 2 | 2021-01-07T19:39:19.000Z | 2021-11-14T09:06:35.000Z | import tensorflow as tf
import keras
from keras.datasets import cifar10
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import os
import sys
import csv
import utils_csv
import utils_tf as utils
from cleverhans.utils_tf import model_train, model_eval
from cleverhans.attacks import FastGradientMethod
from cleverhans.model import Model
print("Tensorflow version " + tf.__version__)
config_num = int(sys.argv[1]) if len(sys.argv) > 1 else 1 # Choose type of learning technique according to config_dict
config_dict = {0: "backprop", 1: "biprop", 2: "halfbiprop", 3: "nobias_backprop", 4: "nobias_biprop", 5: "nobias_halfbiprop"}
num_classes = 10
model_name = sys.argv[0].replace(".py", "") + "_" + config_dict[config_num]
print("Model name: " + model_name)
# load data
# https://github.com/BIGBALLON/cifar-10-cnn/blob/master/1_Lecun_Network/LeNet_keras.py
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
# for reproducibility
np.random.seed(0)
tf.set_random_seed(0)
sess = tf.InteractiveSession()
# three convolutional layers with their channel counts, and a
# fully connected layer (tha last layer has 10 softmax neurons)
K = 4 # first convolutional layer output depth
L = 8 # second convolutional layer output depth
M = 12 # third convolutional layer
N = 200 # fully connected layer
with tf.name_scope("input"):
# input X & output GX_: 28x28 grayscale images, the first dimension (None) will index the images in the mini-batch
X = tf.placeholder(tf.float32, [None, 32, 32, 3])
X_noisy = tf.placeholder(tf.float32, [None, 32, 32, 3])
X_adv = tf.placeholder(tf.float32, [None, 32, 32, 3])
GX_ = tf.placeholder(tf.float32, [None, 32, 32, 3])
# output Y_ & input GY: labels for classification and generation
Y_ = tf.placeholder(tf.float32, [None, num_classes])
GY = tf.placeholder(tf.float32, [None, num_classes])
# variable learning rate
lr = tf.placeholder(tf.float32)
# variable batch size
BS = tf.placeholder(tf.int32)
input_test_sum = tf.summary.image("input", X, num_classes)
input_noisy_sum = tf.summary.image("input-noisy", X_noisy, num_classes)
input_adv_sum = tf.summary.image("input-adv", X_adv, num_classes)
with tf.name_scope("classifier-generator"):
C_W1 = utils.weight_variable([5, 5, 3, K], stddev=0.1, name="C_W1")
C_W2 = utils.weight_variable([5, 5, K, L], stddev=0.1, name="C_W2")
C_W3 = utils.weight_variable([4, 4, L, M], stddev=0.1, name="C_W3")
C_W4 = utils.weight_variable([8 * 8 * M, N], stddev=0.1, name="C_W4")
C_W5 = utils.weight_variable([N, num_classes], stddev=0.1, name="C_W5")
def classifier(x, reuse=None):
with tf.variable_scope("classifier", reuse=reuse) as scope_c:
# Variables for classifier
C_B1 = utils.bias_variable([K], name="C_B1")
C_B2 = utils.bias_variable([L], name="C_B2")
C_B3 = utils.bias_variable([M], name="C_B3")
C_B4 = utils.bias_variable([N], name="C_B4")
C_B5 = utils.bias_variable([num_classes], name="C_B5")
stride = 1 # output is 32x32
H1 = tf.nn.relu(tf.nn.conv2d(x, C_W1, strides=[1, stride, stride, 1], padding='SAME') + C_B1)
stride = 2 # output is 16x16
H2 = tf.nn.relu(tf.nn.conv2d(H1, C_W2, strides=[1, stride, stride, 1], padding='SAME') + C_B2)
stride = 2 # output is 8x8
H3 = tf.nn.relu(tf.nn.conv2d(H2, C_W3, strides=[1, stride, stride, 1], padding='SAME') + C_B3)
# reshape the output from the third convolution for the fully connected layer
HH3 = tf.reshape(H3, shape=[-1, 8 * 8 * M])
H4 = tf.nn.relu(tf.matmul(HH3, C_W4) + C_B4)
Ylogits = tf.matmul(H4, C_W5) + C_B5
Ysigmoid = tf.nn.sigmoid(Ylogits)
Ysoftmax = tf.nn.softmax(Ylogits)
return Ysoftmax, Ysigmoid, Ylogits
class ClassifierModel(Model):
def get_logits(self, x):
Ysoftmax, Ysigmoid, Ylogits = classifier(x, reuse=True)
return Ylogits
# Generator of random input reuses weights of classifier
def generator(y, bs, reuse=None):
with tf.variable_scope("generator", reuse=reuse) as scope_g:
# Variables for classifier
G_B1 = utils.bias_variable([3], name="G_B1")
G_B2 = utils.bias_variable([K], name="G_B2")
G_B3 = utils.bias_variable([L], name="G_B3")
G_B4 = utils.bias_variable([M*8*8], name="G_B4")
G_B5 = utils.bias_variable([N], name="G_B5")
GH4 = tf.nn.relu(tf.matmul(y, tf.transpose(C_W5)) + G_B5)
GH3 = tf.nn.relu(tf.matmul(GH4, tf.transpose(C_W4)) + G_B4)
GHH3 = tf.reshape(GH3, shape=[-1, 8, 8, M])
stride = 2 # output is 14x14
GH2 = tf.nn.relu(tf.nn.conv2d_transpose(GHH3, C_W3, output_shape=[bs, 16, 16, L], strides=[1, stride, stride, 1]) + G_B3) #deconv2 W3
stride = 2 # output is 28x28
GH1 = tf.nn.relu(tf.nn.conv2d_transpose(GH2, C_W2, output_shape=[bs, 32, 32, K], strides=[1, stride, stride, 1]) + G_B2)#deconv2 W2
stride = 1 # output is 28x28
GXlogits = tf.nn.conv2d_transpose(GH1, C_W1, output_shape=[bs, 32, 32, 3], strides=[1, stride, stride, 1]) + G_B1#deconv2 W1
GXsigmoid = tf.nn.sigmoid(GXlogits)
return GXsigmoid, GXlogits
def plot_generator(samples):
if num_classes == 10:
fig = plt.figure(figsize=(5, 2))
gs = gridspec.GridSpec(2, 5)
else:
fig = plt.figure(figsize=(10, 10))
gs = gridspec.GridSpec(10, 10)
gs.update(wspace=0.05, hspace=0.05)
for i, sample in enumerate(samples):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(sample.reshape((32,32,3)))
return fig
GXsigmoid, GXlogits = generator(GY, BS)
GXsigmoid_test, GXlogits_test = generator(GY, BS, reuse=True)
Ysoftmax, Ysigmoid, Ylogits = classifier(X)
model_classifier = ClassifierModel()
Ysoftmax_noisy, Ysigmoid_noisy, Ylogits_noisy = classifier(X_noisy, reuse=True)
Ysoftmax_adv, Ysigmoid_adv, Ylogits_adv = classifier(X_adv, reuse=True)
with tf.name_scope("loss"):
c_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=Ylogits, labels=Y_))
g_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=GXlogits, labels=GX_))
""" Summary """
g_loss_sum = tf.summary.scalar("g_loss", g_loss)
c_loss_sum = tf.summary.scalar("c_loss", c_loss)
# accuracy of the trained model, between 0 (worst) and 1 (best)
with tf.name_scope("accuracy"):
with tf.name_scope("correct_prediction"):
correct_prediction = tf.equal(tf.argmax(Ysoftmax, 1), tf.argmax(Y_, 1))
with tf.name_scope("accuracy"):
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
with tf.name_scope("correct_prediction_noisy"):
correct_prediction_noisy = tf.equal(tf.argmax(Ysoftmax_noisy, 1), tf.argmax(Y_, 1))
with tf.name_scope("accuracy_noisy"):
accuracy_noisy = tf.reduce_mean(tf.cast(correct_prediction_noisy, tf.float32))
with tf.name_scope("correct_prediction_adv"):
correct_prediction_adv = tf.equal(tf.argmax(Ysoftmax_adv, 1), tf.argmax(Y_, 1))
with tf.name_scope("accuracy_adv"):
accuracy_adv = tf.reduce_mean(tf.cast(correct_prediction_adv, tf.float32))
""" Summary """
accuracy_sum = tf.summary.scalar("accuracy", accuracy)
accuracy_noisy_sum = tf.summary.scalar("accuracy_noisy", accuracy_noisy)
accuracy_adv_sum = tf.summary.scalar("accuracy_adv", accuracy_adv)
with tf.name_scope("max_output"):
with tf.name_scope("max_output_test"):
max_output_sigmoid_test = tf.reduce_max(Ysigmoid)
max_output_softmax_test = tf.reduce_max(Ysoftmax)
with tf.name_scope("max_output_noise"):
max_output_sigmoid_noise = tf.reduce_max(Ysigmoid_noisy)
max_output_softmax_noise = tf.reduce_max(Ysoftmax_noisy)
with tf.name_scope("max_output_adv"):
max_output_sigmoid_adv = tf.reduce_max(Ysigmoid_adv)
max_output_softmax_adv = tf.reduce_max(Ysoftmax_adv)
""" Summary """
max_output_sigmoid_test_sum = tf.summary.scalar("max_output_sigmoid_test", max_output_sigmoid_test)
max_output_softmax_test_sum = tf.summary.scalar("max_output_softmax_test", max_output_softmax_test)
max_output_sigmoid_noise_sum = tf.summary.scalar("max_output_sigmoid_noise", max_output_sigmoid_noise)
max_output_softmax_noise_sum = tf.summary.scalar("max_output_softmax_noise", max_output_softmax_noise)
max_output_sigmoid_adv_sum = tf.summary.scalar("max_output_sigmoid_adv", max_output_sigmoid_adv)
max_output_softmax_adv_sum = tf.summary.scalar("max_output_softmax_adv", max_output_softmax_adv)
utils.show_all_variables()
t_vars = tf.trainable_variables()
c_vars = [var for var in t_vars if 'C_' in var.name]\
if config_num < 3 else [var for var in t_vars if 'C_W' in var.name]
g_vars = [var for var in t_vars if 'C_W' in var.name or 'G_' in var.name]\
if config_num < 3 else c_vars
# training step
learning_rate_dis = lr
learning_rate_gen = lr
with tf.name_scope("train"):
c_train = tf.train.AdamOptimizer(learning_rate_dis).minimize(c_loss, var_list=c_vars)
g_train = tf.train.AdamOptimizer(learning_rate_gen).minimize(g_loss, var_list=g_vars)
# final summary operations
g_sum = tf.summary.merge([g_loss_sum])
c_sum = tf.summary.merge([input_test_sum, accuracy_sum, c_loss_sum, max_output_sigmoid_test_sum, max_output_softmax_test_sum])
noise_sum = tf.summary.merge([max_output_sigmoid_noise_sum, max_output_softmax_noise_sum])
noisy_sum = tf.summary.merge([input_noisy_sum, accuracy_noisy_sum])
adv_sum = tf.summary.merge([input_adv_sum, accuracy_adv_sum, max_output_sigmoid_adv_sum, max_output_softmax_adv_sum])
folder_out = 'out/' + model_name + '/'
if not os.path.exists(folder_out):
os.makedirs(folder_out)
folder_csv = 'csv/' + model_name + '/'
if not os.path.exists(folder_csv):
os.makedirs(folder_csv)
folder_logs = 'logs/' + model_name
if not os.path.exists(folder_csv):
os.makedirs(folder_logs)
writer = tf.summary.FileWriter(folder_logs, sess.graph)
batch_size = 100
num_train_images = x_train.shape[0]
num_batches = num_train_images // batch_size
all_classes = np.eye(num_classes)
counter = 0
fgsm_params = {'eps': 0.03,
'clip_min': 0.,
'clip_max': 1.}
random_noise = np.random.random_sample(x_test.shape)
test_image_with_noise = np.clip(x_test + 0.1*random_noise, 0., 1.)
accuracy_list = []
sigmoid_list = []
softmax_list = []
# initialize all variables
tf.global_variables_initializer().run()
for i in range(50001):
if i % num_batches == 0:
idx_train = np.arange(x_train.shape[0])
np.random.shuffle(idx_train)
x_train, y_train = x_train[idx_train], y_train[idx_train]
idx = i % num_batches
batch_X = x_train[idx*batch_size:(idx+1)*batch_size]
batch_Y = y_train[idx*batch_size:(idx+1)*batch_size]
# learning rate decay
max_learning_rate = 0.003
min_learning_rate = 0.0001
decay_speed = 2000.0
learning_rate = min_learning_rate + (max_learning_rate - min_learning_rate) * np.exp(-i/decay_speed)
if i % 500 == 0 or i == 50000:
counter += 1
# Saves generated images
samples = sess.run(GXsigmoid_test, feed_dict={GY: all_classes, BS: num_classes})
fig = plot_generator(samples)
plt.savefig(folder_out+"gen_"+str(i).zfill(6)+'.png', bbox_inches='tight')
plt.close(fig)
attack_fgsm = FastGradientMethod(model_classifier, sess=sess)
adv_x_np = attack_fgsm.generate_np(x_test, **fgsm_params)
fig = plot_generator(adv_x_np[:num_classes])
plt.savefig(folder_out+"adv_"+str(i).zfill(6)+'.png', bbox_inches='tight')
plt.close(fig)
accu_test, c_loss_test, sigmoid_test, softmax_test, sum_c = sess.run([accuracy, c_loss, max_output_sigmoid_test, max_output_softmax_test, c_sum], {X: x_test, Y_: y_test})
writer.add_summary(sum_c, i)
g_loss_test, sum_g = sess.run([g_loss, g_sum], {GY: batch_Y, GX_: batch_X, BS: batch_size})
writer.add_summary(sum_g, i)
print(str(i) + ": epoch " + str(i*batch_size//x_train.shape[0]+1)\
+ " - test loss class: " + str(c_loss_test) + " test loss gen: " + str(g_loss_test))
print("Real test images - Sigmoid: " + str(sigmoid_test) + "\tSoftmax: " + str(softmax_test) + "\taccuracy: "+ str(accu_test))
sigmoid_random, softmax_random, sum_random = sess.run([max_output_sigmoid_noise, max_output_softmax_noise, noise_sum], {X_noisy: random_noise})
writer.add_summary(sum_random, i)
accu_random, sum_noisy = sess.run([accuracy_noisy, noisy_sum], {X_noisy: test_image_with_noise, Y_: y_test})
writer.add_summary(sum_noisy, i)
print("Random noise images - Sigmoid: " + str(sigmoid_random) + "\tSoftmax: " + str(softmax_random) + "\taccuracy: "+ str(accu_random))
accu_adv, sigmoid_adv, softmax_adv, sum_adv = sess.run([accuracy_adv, max_output_sigmoid_adv, max_output_softmax_adv, adv_sum], {X_adv: adv_x_np, Y_: y_test})
writer.add_summary(sum_adv, i)
print("Adversarial examples - Sigmoid: " + str(sigmoid_adv) + "\tSoftmax: " + str(softmax_adv) + "\taccuracy: "+ str(accu_adv))
print()
accuracy_list.append([i, accu_test, accu_random, accu_adv, counter])
sigmoid_list.append([i, sigmoid_test, sigmoid_random, sigmoid_adv, counter])
softmax_list.append([i, softmax_test, softmax_random, softmax_adv, counter])
sess.run(c_train, {X: batch_X, Y_: batch_Y, lr: learning_rate})
if config_num == 1 or (config_num == 2 and i < 25000) or\
config_num == 4 or (config_num == 5 and i < 25000):
sess.run(g_train, {GY: batch_Y, GX_: batch_X, lr: learning_rate, BS: batch_size})
writer.close()
# Save data in csv
with open(folder_csv+"accuracy.csv", "w") as output:
writer = csv.writer(output, lineterminator='\n')
writer.writerows(accuracy_list)
with open(folder_csv+"sigmoid.csv", "w") as output:
writer = csv.writer(output, lineterminator='\n')
writer.writerows(sigmoid_list)
with open(folder_csv+"softmax.csv", "w") as output:
writer = csv.writer(output, lineterminator='\n')
writer.writerows(softmax_list)
# Load data in csv
accu_data = utils_csv.get_data_csv_file(folder_csv+"accuracy.csv")
sigmoid_data = utils_csv.get_data_csv_file(folder_csv+"sigmoid.csv")
softmax_data = utils_csv.get_data_csv_file(folder_csv+"softmax.csv")
# Print best values
utils_csv.print_best(accu_data, sigmoid_data, softmax_data, folder_csv+"summary.txt")
| 43.00289 | 178 | 0.699308 | import tensorflow as tf
import keras
from keras.datasets import cifar10
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import os
import sys
import csv
import utils_csv
import utils_tf as utils
from cleverhans.utils_tf import model_train, model_eval
from cleverhans.attacks import FastGradientMethod
from cleverhans.model import Model
print("Tensorflow version " + tf.__version__)
config_num = int(sys.argv[1]) if len(sys.argv) > 1 else 1
config_dict = {0: "backprop", 1: "biprop", 2: "halfbiprop", 3: "nobias_backprop", 4: "nobias_biprop", 5: "nobias_halfbiprop"}
num_classes = 10
model_name = sys.argv[0].replace(".py", "") + "_" + config_dict[config_num]
print("Model name: " + model_name)
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
np.random.seed(0)
tf.set_random_seed(0)
sess = tf.InteractiveSession()
K = 4
L = 8
M = 12
N = 200
with tf.name_scope("input"):
X = tf.placeholder(tf.float32, [None, 32, 32, 3])
X_noisy = tf.placeholder(tf.float32, [None, 32, 32, 3])
X_adv = tf.placeholder(tf.float32, [None, 32, 32, 3])
GX_ = tf.placeholder(tf.float32, [None, 32, 32, 3])
Y_ = tf.placeholder(tf.float32, [None, num_classes])
GY = tf.placeholder(tf.float32, [None, num_classes])
lr = tf.placeholder(tf.float32)
BS = tf.placeholder(tf.int32)
input_test_sum = tf.summary.image("input", X, num_classes)
input_noisy_sum = tf.summary.image("input-noisy", X_noisy, num_classes)
input_adv_sum = tf.summary.image("input-adv", X_adv, num_classes)
with tf.name_scope("classifier-generator"):
C_W1 = utils.weight_variable([5, 5, 3, K], stddev=0.1, name="C_W1")
C_W2 = utils.weight_variable([5, 5, K, L], stddev=0.1, name="C_W2")
C_W3 = utils.weight_variable([4, 4, L, M], stddev=0.1, name="C_W3")
C_W4 = utils.weight_variable([8 * 8 * M, N], stddev=0.1, name="C_W4")
C_W5 = utils.weight_variable([N, num_classes], stddev=0.1, name="C_W5")
def classifier(x, reuse=None):
with tf.variable_scope("classifier", reuse=reuse) as scope_c:
C_B1 = utils.bias_variable([K], name="C_B1")
C_B2 = utils.bias_variable([L], name="C_B2")
C_B3 = utils.bias_variable([M], name="C_B3")
C_B4 = utils.bias_variable([N], name="C_B4")
C_B5 = utils.bias_variable([num_classes], name="C_B5")
stride = 1
H1 = tf.nn.relu(tf.nn.conv2d(x, C_W1, strides=[1, stride, stride, 1], padding='SAME') + C_B1)
stride = 2
H2 = tf.nn.relu(tf.nn.conv2d(H1, C_W2, strides=[1, stride, stride, 1], padding='SAME') + C_B2)
stride = 2
H3 = tf.nn.relu(tf.nn.conv2d(H2, C_W3, strides=[1, stride, stride, 1], padding='SAME') + C_B3)
HH3 = tf.reshape(H3, shape=[-1, 8 * 8 * M])
H4 = tf.nn.relu(tf.matmul(HH3, C_W4) + C_B4)
Ylogits = tf.matmul(H4, C_W5) + C_B5
Ysigmoid = tf.nn.sigmoid(Ylogits)
Ysoftmax = tf.nn.softmax(Ylogits)
return Ysoftmax, Ysigmoid, Ylogits
class ClassifierModel(Model):
def get_logits(self, x):
Ysoftmax, Ysigmoid, Ylogits = classifier(x, reuse=True)
return Ylogits
def generator(y, bs, reuse=None):
with tf.variable_scope("generator", reuse=reuse) as scope_g:
G_B1 = utils.bias_variable([3], name="G_B1")
G_B2 = utils.bias_variable([K], name="G_B2")
G_B3 = utils.bias_variable([L], name="G_B3")
G_B4 = utils.bias_variable([M*8*8], name="G_B4")
G_B5 = utils.bias_variable([N], name="G_B5")
GH4 = tf.nn.relu(tf.matmul(y, tf.transpose(C_W5)) + G_B5)
GH3 = tf.nn.relu(tf.matmul(GH4, tf.transpose(C_W4)) + G_B4)
GHH3 = tf.reshape(GH3, shape=[-1, 8, 8, M])
stride = 2
GH2 = tf.nn.relu(tf.nn.conv2d_transpose(GHH3, C_W3, output_shape=[bs, 16, 16, L], strides=[1, stride, stride, 1]) + G_B3)
stride = 2
GH1 = tf.nn.relu(tf.nn.conv2d_transpose(GH2, C_W2, output_shape=[bs, 32, 32, K], strides=[1, stride, stride, 1]) + G_B2)
stride = 1
GXlogits = tf.nn.conv2d_transpose(GH1, C_W1, output_shape=[bs, 32, 32, 3], strides=[1, stride, stride, 1]) + G_B1
GXsigmoid = tf.nn.sigmoid(GXlogits)
return GXsigmoid, GXlogits
def plot_generator(samples):
if num_classes == 10:
fig = plt.figure(figsize=(5, 2))
gs = gridspec.GridSpec(2, 5)
else:
fig = plt.figure(figsize=(10, 10))
gs = gridspec.GridSpec(10, 10)
gs.update(wspace=0.05, hspace=0.05)
for i, sample in enumerate(samples):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(sample.reshape((32,32,3)))
return fig
GXsigmoid, GXlogits = generator(GY, BS)
GXsigmoid_test, GXlogits_test = generator(GY, BS, reuse=True)
Ysoftmax, Ysigmoid, Ylogits = classifier(X)
model_classifier = ClassifierModel()
Ysoftmax_noisy, Ysigmoid_noisy, Ylogits_noisy = classifier(X_noisy, reuse=True)
Ysoftmax_adv, Ysigmoid_adv, Ylogits_adv = classifier(X_adv, reuse=True)
with tf.name_scope("loss"):
c_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=Ylogits, labels=Y_))
g_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=GXlogits, labels=GX_))
g_loss_sum = tf.summary.scalar("g_loss", g_loss)
c_loss_sum = tf.summary.scalar("c_loss", c_loss)
with tf.name_scope("accuracy"):
with tf.name_scope("correct_prediction"):
correct_prediction = tf.equal(tf.argmax(Ysoftmax, 1), tf.argmax(Y_, 1))
with tf.name_scope("accuracy"):
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
with tf.name_scope("correct_prediction_noisy"):
correct_prediction_noisy = tf.equal(tf.argmax(Ysoftmax_noisy, 1), tf.argmax(Y_, 1))
with tf.name_scope("accuracy_noisy"):
accuracy_noisy = tf.reduce_mean(tf.cast(correct_prediction_noisy, tf.float32))
with tf.name_scope("correct_prediction_adv"):
correct_prediction_adv = tf.equal(tf.argmax(Ysoftmax_adv, 1), tf.argmax(Y_, 1))
with tf.name_scope("accuracy_adv"):
accuracy_adv = tf.reduce_mean(tf.cast(correct_prediction_adv, tf.float32))
accuracy_sum = tf.summary.scalar("accuracy", accuracy)
accuracy_noisy_sum = tf.summary.scalar("accuracy_noisy", accuracy_noisy)
accuracy_adv_sum = tf.summary.scalar("accuracy_adv", accuracy_adv)
with tf.name_scope("max_output"):
with tf.name_scope("max_output_test"):
max_output_sigmoid_test = tf.reduce_max(Ysigmoid)
max_output_softmax_test = tf.reduce_max(Ysoftmax)
with tf.name_scope("max_output_noise"):
max_output_sigmoid_noise = tf.reduce_max(Ysigmoid_noisy)
max_output_softmax_noise = tf.reduce_max(Ysoftmax_noisy)
with tf.name_scope("max_output_adv"):
max_output_sigmoid_adv = tf.reduce_max(Ysigmoid_adv)
max_output_softmax_adv = tf.reduce_max(Ysoftmax_adv)
max_output_sigmoid_test_sum = tf.summary.scalar("max_output_sigmoid_test", max_output_sigmoid_test)
max_output_softmax_test_sum = tf.summary.scalar("max_output_softmax_test", max_output_softmax_test)
max_output_sigmoid_noise_sum = tf.summary.scalar("max_output_sigmoid_noise", max_output_sigmoid_noise)
max_output_softmax_noise_sum = tf.summary.scalar("max_output_softmax_noise", max_output_softmax_noise)
max_output_sigmoid_adv_sum = tf.summary.scalar("max_output_sigmoid_adv", max_output_sigmoid_adv)
max_output_softmax_adv_sum = tf.summary.scalar("max_output_softmax_adv", max_output_softmax_adv)
utils.show_all_variables()
t_vars = tf.trainable_variables()
c_vars = [var for var in t_vars if 'C_' in var.name]\
if config_num < 3 else [var for var in t_vars if 'C_W' in var.name]
g_vars = [var for var in t_vars if 'C_W' in var.name or 'G_' in var.name]\
if config_num < 3 else c_vars
learning_rate_dis = lr
learning_rate_gen = lr
with tf.name_scope("train"):
c_train = tf.train.AdamOptimizer(learning_rate_dis).minimize(c_loss, var_list=c_vars)
g_train = tf.train.AdamOptimizer(learning_rate_gen).minimize(g_loss, var_list=g_vars)
g_sum = tf.summary.merge([g_loss_sum])
c_sum = tf.summary.merge([input_test_sum, accuracy_sum, c_loss_sum, max_output_sigmoid_test_sum, max_output_softmax_test_sum])
noise_sum = tf.summary.merge([max_output_sigmoid_noise_sum, max_output_softmax_noise_sum])
noisy_sum = tf.summary.merge([input_noisy_sum, accuracy_noisy_sum])
adv_sum = tf.summary.merge([input_adv_sum, accuracy_adv_sum, max_output_sigmoid_adv_sum, max_output_softmax_adv_sum])
folder_out = 'out/' + model_name + '/'
if not os.path.exists(folder_out):
os.makedirs(folder_out)
folder_csv = 'csv/' + model_name + '/'
if not os.path.exists(folder_csv):
os.makedirs(folder_csv)
folder_logs = 'logs/' + model_name
if not os.path.exists(folder_csv):
os.makedirs(folder_logs)
writer = tf.summary.FileWriter(folder_logs, sess.graph)
batch_size = 100
num_train_images = x_train.shape[0]
num_batches = num_train_images // batch_size
all_classes = np.eye(num_classes)
counter = 0
fgsm_params = {'eps': 0.03,
'clip_min': 0.,
'clip_max': 1.}
random_noise = np.random.random_sample(x_test.shape)
test_image_with_noise = np.clip(x_test + 0.1*random_noise, 0., 1.)
accuracy_list = []
sigmoid_list = []
softmax_list = []
tf.global_variables_initializer().run()
for i in range(50001):
if i % num_batches == 0:
idx_train = np.arange(x_train.shape[0])
np.random.shuffle(idx_train)
x_train, y_train = x_train[idx_train], y_train[idx_train]
idx = i % num_batches
batch_X = x_train[idx*batch_size:(idx+1)*batch_size]
batch_Y = y_train[idx*batch_size:(idx+1)*batch_size]
max_learning_rate = 0.003
min_learning_rate = 0.0001
decay_speed = 2000.0
learning_rate = min_learning_rate + (max_learning_rate - min_learning_rate) * np.exp(-i/decay_speed)
if i % 500 == 0 or i == 50000:
counter += 1
samples = sess.run(GXsigmoid_test, feed_dict={GY: all_classes, BS: num_classes})
fig = plot_generator(samples)
plt.savefig(folder_out+"gen_"+str(i).zfill(6)+'.png', bbox_inches='tight')
plt.close(fig)
attack_fgsm = FastGradientMethod(model_classifier, sess=sess)
adv_x_np = attack_fgsm.generate_np(x_test, **fgsm_params)
fig = plot_generator(adv_x_np[:num_classes])
plt.savefig(folder_out+"adv_"+str(i).zfill(6)+'.png', bbox_inches='tight')
plt.close(fig)
accu_test, c_loss_test, sigmoid_test, softmax_test, sum_c = sess.run([accuracy, c_loss, max_output_sigmoid_test, max_output_softmax_test, c_sum], {X: x_test, Y_: y_test})
writer.add_summary(sum_c, i)
g_loss_test, sum_g = sess.run([g_loss, g_sum], {GY: batch_Y, GX_: batch_X, BS: batch_size})
writer.add_summary(sum_g, i)
print(str(i) + ": epoch " + str(i*batch_size//x_train.shape[0]+1)\
+ " - test loss class: " + str(c_loss_test) + " test loss gen: " + str(g_loss_test))
print("Real test images - Sigmoid: " + str(sigmoid_test) + "\tSoftmax: " + str(softmax_test) + "\taccuracy: "+ str(accu_test))
sigmoid_random, softmax_random, sum_random = sess.run([max_output_sigmoid_noise, max_output_softmax_noise, noise_sum], {X_noisy: random_noise})
writer.add_summary(sum_random, i)
accu_random, sum_noisy = sess.run([accuracy_noisy, noisy_sum], {X_noisy: test_image_with_noise, Y_: y_test})
writer.add_summary(sum_noisy, i)
print("Random noise images - Sigmoid: " + str(sigmoid_random) + "\tSoftmax: " + str(softmax_random) + "\taccuracy: "+ str(accu_random))
accu_adv, sigmoid_adv, softmax_adv, sum_adv = sess.run([accuracy_adv, max_output_sigmoid_adv, max_output_softmax_adv, adv_sum], {X_adv: adv_x_np, Y_: y_test})
writer.add_summary(sum_adv, i)
print("Adversarial examples - Sigmoid: " + str(sigmoid_adv) + "\tSoftmax: " + str(softmax_adv) + "\taccuracy: "+ str(accu_adv))
print()
accuracy_list.append([i, accu_test, accu_random, accu_adv, counter])
sigmoid_list.append([i, sigmoid_test, sigmoid_random, sigmoid_adv, counter])
softmax_list.append([i, softmax_test, softmax_random, softmax_adv, counter])
sess.run(c_train, {X: batch_X, Y_: batch_Y, lr: learning_rate})
if config_num == 1 or (config_num == 2 and i < 25000) or\
config_num == 4 or (config_num == 5 and i < 25000):
sess.run(g_train, {GY: batch_Y, GX_: batch_X, lr: learning_rate, BS: batch_size})
writer.close()
with open(folder_csv+"accuracy.csv", "w") as output:
writer = csv.writer(output, lineterminator='\n')
writer.writerows(accuracy_list)
with open(folder_csv+"sigmoid.csv", "w") as output:
writer = csv.writer(output, lineterminator='\n')
writer.writerows(sigmoid_list)
with open(folder_csv+"softmax.csv", "w") as output:
writer = csv.writer(output, lineterminator='\n')
writer.writerows(softmax_list)
accu_data = utils_csv.get_data_csv_file(folder_csv+"accuracy.csv")
sigmoid_data = utils_csv.get_data_csv_file(folder_csv+"sigmoid.csv")
softmax_data = utils_csv.get_data_csv_file(folder_csv+"softmax.csv")
utils_csv.print_best(accu_data, sigmoid_data, softmax_data, folder_csv+"summary.txt")
| true | true |
f7268d97ecd869e307e958def5462cfcc567d433 | 13,947 | py | Python | discum/guild/guild.py | DJJ05/Discord-S.C.U.M | 498c2aebb71cef331b1c47a8bb6ffe65e88d2a34 | [
"MIT"
] | 1 | 2022-01-02T13:39:38.000Z | 2022-01-02T13:39:38.000Z | discum/guild/guild.py | DJJ05/Discord-S.C.U.M | 498c2aebb71cef331b1c47a8bb6ffe65e88d2a34 | [
"MIT"
] | null | null | null | discum/guild/guild.py | DJJ05/Discord-S.C.U.M | 498c2aebb71cef331b1c47a8bb6ffe65e88d2a34 | [
"MIT"
] | 2 | 2022-02-17T13:04:55.000Z | 2022-02-28T00:48:00.000Z | from ..RESTapiwrap import Wrapper
from ..utils.permissions import PERMS, Permissions
from ..utils.contextproperties import ContextProperties
import time
import base64
try:
from urllib.parse import quote
except ImportError:
from urllib import quote
class Guild(object):
__slots__ = ['discord', 's', 'log']
def __init__(self, discord, s, log): #s is the requests session object
self.discord = discord
self.s = s
self.log = log
'''
invite codes / server info
'''
#get guild info from invite code
def getInfoFromInviteCode(self, inviteCode, with_counts, with_expiration, fromJoinGuildNav):
url = self.discord+"invites/"+inviteCode
if (with_counts!=None or with_expiration!=None or fromJoinGuildNav):
url += "?"
data = {}
if fromJoinGuildNav:
data["inputValue"] = inviteCode
if with_counts != None:
data["with_counts"] = with_counts
if with_expiration != None:
data["with_expiration"] = with_expiration
url += "&".join( "%s=%s" % (k, quote(repr(data[k]).lower())) for k in data)
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
#just the join guild endpoint, default location mimics joining a guild from the ([+]Add a Server) button
def joinGuildRaw(self, inviteCode, guild_id=None, channel_id=None, channel_type=None, location="join guild"):
url = self.discord+"invites/"+inviteCode
if location in ("accept invite page", "join guild"):
return Wrapper.sendRequest(self.s, 'post', url, {}, headerModifications={"update":{"X-Context-Properties":ContextProperties.get(location, guild_id=guild_id, channel_id=channel_id, channel_type=channel_type)}}, log=self.log)
elif location == "markdown":
return Wrapper.sendRequest(self.s, 'post', url, {}, headerModifications={"update":{"X-Context-Properties":ContextProperties.get("markdown")}}, log=self.log)
def joinGuild(self, inviteCode, location, wait):
location = location.lower()
if location in ("accept invite page", "join guild"):
guildData = self.getInfoFromInviteCode(inviteCode, with_counts=True, with_expiration=True, fromJoinGuildNav=(location.lower()=="join guild")).json()
if wait: time.sleep(wait)
return self.joinGuildRaw(inviteCode, guildData["guild"]["id"], guildData["channel"]["id"], guildData["channel"]["type"], location)
elif location == "markdown":
return self.joinGuildRaw(inviteCode, location="markdown")
def previewGuild(self, guildID, sessionID):
url = self.discord+"guilds/"+guildID+"/members/@me?lurker=true"
if sessionID != None:
url += "&session_id="+sessionID
return Wrapper.sendRequest(self.s, 'put', url, headerModifications={"update":{"X-Context-Properties":"e30="}}, log=self.log)
def leaveGuild(self, guildID, lurking):
url = self.discord+"users/@me/guilds/"+guildID
body = {"lurking": lurking}
return Wrapper.sendRequest(self.s, 'delete', url, body, log=self.log)
def createInvite(self, channelID, max_age_seconds, max_uses, grantTempMembership, checkInvite, targetType): #has to be a channel thats in a guild. also checkInvite and targetType are basically useless.
url = self.discord+"channels/"+channelID+"/invites"
if max_age_seconds == False:
max_age_seconds = 0
if max_uses == False:
max_uses = 0
body = {"max_age": max_age_seconds, "max_uses": max_uses, "temporary": grantTempMembership}
if checkInvite != "":
body["validate"] = checkInvite
if targetType != "":
body["target_type"] = targetType
return Wrapper.sendRequest(self.s, 'post', url, body, headerModifications={"update":{"X-Context-Properties":ContextProperties.get("guild header")}}, log=self.log)
def deleteInvite(self, inviteCode):
url = self.discord+'invites/'+inviteCode
return Wrapper.sendRequest(self.s, 'delete', url, log=self.log)
def getGuildInvites(self, guildID):
url = self.discord+'guilds/'+guildID+'/invites'
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
def getChannelInvites(self, channelID):
url = self.discord+'channels/'+channelID+'/invites'
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
def getGuilds(self, with_counts):
url = self.discord+"users/@me/guilds"
if with_counts != None:
url += "?with_counts="+repr(with_counts).lower()
headerMods = {"update":{"X-Track":self.s.headers.get("X-Super-Properties")}, "remove":["X-Super-Properties"]}
return Wrapper.sendRequest(self.s, 'get', url, headerModifications=headerMods, log=self.log)
def getGuildChannels(self, guildID):
url = self.discord+'guilds/'+guildID+'/channels'
headerMods = {"update":{"X-Track":self.s.headers.get("X-Super-Properties")}, "remove":["X-Super-Properties"]}
return Wrapper.sendRequest(self.s, 'get', url, headerModifications=headerMods, log=self.log)
def getDiscoverableGuilds(self, offset, limit):
url = self.discord+"discoverable-guilds?offset="+repr(offset)+"&limit="+repr(limit)
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
def getGuildRegions(self, guildID):
url = self.discord+'guilds/'+guildID+'/regions'
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
'''
server moderation and management
'''
#create a guild
def createGuild(self, name, icon, channels, systemChannelID, template):
url = self.discord+"guilds"
body = {"name": name, "icon":icon, "channels":channels, "system_channel_id":systemChannelID, "guild_template_code":template}
if icon != None:
with open(icon, "rb") as image:
encodedImage = base64.b64encode(image.read()).decode('utf-8')
body["icon"] = "data:image/png;base64,"+encodedImage
return Wrapper.sendRequest(self.s, 'post', url, body, log=self.log)
#delete a guild (assuming you are the owner)
def deleteGuild(self, guildID):
url = self.discord+"guilds/%s/delete" % (guildID)
body = {}
return Wrapper.sendRequest(self.s, 'post', url, body, log=self.log)
#kick a user
def kick(self, guildID, userID, reason):
url = self.discord+"guilds/%s/members/%s?reason=%s" % (guildID, userID, quote(reason))
headerMods = {"update":{"X-Audit-Log-Reason":reason}} if reason=="" else {}
return Wrapper.sendRequest(self.s, 'delete', url, headerModifications=headerMods, log=self.log)
#ban a user
def ban(self, guildID, userID, deleteMessagesDays, reason):
url = self.discord+"guilds/%s/bans/%s" % (guildID, userID)
body = {"delete_message_days": str(deleteMessagesDays), "reason": reason}
headerMods = {"update":{"X-Audit-Log-Reason":reason}} if reason=="" else {}
return Wrapper.sendRequest(self.s, 'put', url, body, headerModifications=headerMods, log=self.log)
def revokeBan(self, guildID, userID):
url = self.discord+"guilds/"+guildID+"/bans/"+userID
return Wrapper.sendRequest(self.s, 'delete', url, log=self.log)
#lookup a user in a guild. thx Echocage for finding this api endpoint
'''
removed as this is a bot-only request. Use bot.gateway.checkGuildMembers instead.
def getGuildMember(self, guildID, userID):
url = self.discord+"guilds/%s/members/%s" % (guildID, userID)
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
'''
def getRoleMemberCounts(self, guildID):
url = self.discord+"guilds/"+guildID+"/roles/member-counts"
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
def getGuildIntegrations(self, guildID, include_applications):
url = self.discord+"guilds/"+guildID+"/integrations"
if include_applications != None:
url += "?include_applications="+repr(include_applications).lower()
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
def getGuildTemplates(self, guildID):
url = self.discord+"guilds/"+guildID+"/templates"
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
def getRoleMemberIDs(self, guildID, roleID):
url = self.discord+"guilds/"+guildID+"/roles/"+roleID+"/member-ids"
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
def addMembersToRole(self, guildID, roleID, memberIDs):
if isinstance(memberIDs, str):
memberIDs = [memberIDs]
url = self.discord+"guilds/"+guildID+"/roles/"+roleID+"/members"
body = {"member_ids":memberIDs}
return Wrapper.sendRequest(self.s, 'patch', url, body, log=self.log)
def setMemberRoles(self, guildID, memberID, roleIDs):
if isinstance(roleIDs, str):
roleIDs = [roleIDs]
url = self.discord+"guilds/"+guildID+"/members/"+memberID
body = {"roles": roleIDs}
return Wrapper.sendRequest(self.s, 'patch', url, body, log=self.log)
'''
other stuff
'''
#get member verification data
def getMemberVerificationData(self, guildID, with_guild, invite_code):
url = self.discord+"guilds/"+guildID+"/member-verification?with_guild="+str(with_guild).lower()
if invite_code != None:
url += "&invite_code="+invite_code
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
def agreeGuildRules(self, guildID, form_fields, version):
url = self.discord+"guilds/"+guildID+"/requests/@me"
form_fields[0]['response'] = True
body = {"version":version, "form_fields":form_fields}
return Wrapper.sendRequest(self.s, 'put', url, body, log=self.log)
### threads
#create thread
def createThread(self, channelID, name, messageID, public, archiveAfter):
url = self.discord+"channels/"+channelID
if messageID:
url += "/messages/"+messageID
url += "/threads"
choice = archiveAfter.lower()
if choice == '1 hour':
archiveAfterSeconds = 60
elif choice in ('24 hour', '24 hours', '1 day'):
archiveAfterSeconds = 1440
elif choice in ('3 day', '3 days'):
archiveAfterSeconds = 4320
elif choice in ('1 week', '7 day', '7 days'):
archiveAfterSeconds = 10080
threadType = 11 if public else 12
body = {"name": name, "type": threadType, "auto_archive_duration": archiveAfterSeconds}
return Wrapper.sendRequest(self.s, 'post', url, body, log=self.log)
#leave thread
def leaveThread(self, threadID, location):
url = self.discord+"channels/"+threadID+"/thread-members/@me?location="+quote(location)
return Wrapper.sendRequest(self.s, 'delete', url, log=self.log)
#join thread
def joinThread(self, threadID, location):
url = self.discord+"channels/"+threadID+"/thread-members/@me?location="+quote(location)
return Wrapper.sendRequest(self.s, 'post', url, log=self.log)
#archive thread
def archiveThread(self, threadID, lock):
url = self.discord+"channels/"+threadID
body = {"archived": True, "locked": lock}
return Wrapper.sendRequest(self.s, 'patch', url, body, log=self.log)
#unarchive thread
def unarchiveThread(self, threadID, lock):
url = self.discord+"channels/"+threadID
body = {"archived": False, "locked": lock}
return Wrapper.sendRequest(self.s, 'patch', url, body, log=self.log)
'''
other
'''
#lookup school
def lookupSchool(self, email, allowMultipleGuilds, useVerificationCode):
url = self.discord+"guilds/automations/email-domain-lookup"
body = {"email":email,"allow_multiple_guilds":allowMultipleGuilds}
if useVerificationCode != None:
body["use_verification_code"] = useVerificationCode
return Wrapper.sendRequest(self.s, 'post', url, body, log=self.log)
#https://discord.com/channels/hubID/mainChannelID
def schoolHubWaitlistSignup(self, email, school):
url = self.discord+"hub-waitlist/signup"
body = {"email":email,"school":school}
return Wrapper.sendRequest(self.s, 'post', url, body, log=self.log)
def schoolHubSignup(self, email, hubID):
url = self.discord+'guilds/automations/email-domain-lookup'
body = {"email":email,"guild_id":hubID,"allow_multiple_guilds":True,"use_verification_code":True}
return Wrapper.sendRequest(self.s, 'post', url, body, log=self.log)
def verifySchoolHubSignup(self, hubID, email, code):
url = self.discord+'guilds/automations/email-domain-lookup/verify-code'
body = {"code":code,"guild_id":hubID,"email":email}
return Wrapper.sendRequest(self.s, 'post', url, body, log=self.log)
def getSchoolHubGuilds(self, hubID): #note, the "entity_id" returned in each entry is the guildID
url = self.discord+'channels/'+hubID+'/directory-entries' #ik it says channels, but it's the hubID/"guildID".
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
def getSchoolHubDirectoryCounts(self, hubID): #this only returns the # of guilds/groups in each directory/category. This doesn't even return the category names
url = self.discord+'channels/'+hubID+'/directory-entries/counts'
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
def joinGuildFromSchoolHub(self, hubID, guildID):
url = self.discord+'guilds/'+guildID+'/members/@me?lurker=false&directory_channel_id='+hubID
headerMods = {"update":{"X-Context-Properties":ContextProperties.get("school hub guild")}}
return Wrapper.sendRequest(self.s, 'put', url, headerModifications=headerMods, log=self.log)
def searchSchoolHub(self, hubID, query):
url = self.discord+'channels/'+hubID+'/directory-entries/search?query='+query
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
def getMySchoolHubGuilds(self, hubID): #or guilds you own that can potentially be added to the hub
url = self.discord+'channels/'+hubID+'/directory-entries/list'
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
def setSchoolHubGuildDetails(self, hubID, guildID, description, directoryID): #directoryID (int) is not a snowflake
url = self.discord+'channels/'+hubID+'/directory-entry/'+guildID
body = {"description":description,"primary_category_id":directoryID}
return Wrapper.sendRequest(self.s, 'post', url, body, log=self.log)
def getLiveStages(self, extra):
url = self.discord+'stage-instances'
if extra:
url += '/extra'
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
#the only time this is observed in the client is in a guild
def getChannel(self, channelID):
url = self.discord+'channels/'+channelID
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
def getGuildActivitiesConfig(self, guildID):
url = self.discord+'activities/guilds/'+guildID+'/config'
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
| 44.990323 | 226 | 0.723095 | from ..RESTapiwrap import Wrapper
from ..utils.permissions import PERMS, Permissions
from ..utils.contextproperties import ContextProperties
import time
import base64
try:
from urllib.parse import quote
except ImportError:
from urllib import quote
class Guild(object):
__slots__ = ['discord', 's', 'log']
def __init__(self, discord, s, log):
self.discord = discord
self.s = s
self.log = log
def getInfoFromInviteCode(self, inviteCode, with_counts, with_expiration, fromJoinGuildNav):
url = self.discord+"invites/"+inviteCode
if (with_counts!=None or with_expiration!=None or fromJoinGuildNav):
url += "?"
data = {}
if fromJoinGuildNav:
data["inputValue"] = inviteCode
if with_counts != None:
data["with_counts"] = with_counts
if with_expiration != None:
data["with_expiration"] = with_expiration
url += "&".join( "%s=%s" % (k, quote(repr(data[k]).lower())) for k in data)
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
def joinGuildRaw(self, inviteCode, guild_id=None, channel_id=None, channel_type=None, location="join guild"):
url = self.discord+"invites/"+inviteCode
if location in ("accept invite page", "join guild"):
return Wrapper.sendRequest(self.s, 'post', url, {}, headerModifications={"update":{"X-Context-Properties":ContextProperties.get(location, guild_id=guild_id, channel_id=channel_id, channel_type=channel_type)}}, log=self.log)
elif location == "markdown":
return Wrapper.sendRequest(self.s, 'post', url, {}, headerModifications={"update":{"X-Context-Properties":ContextProperties.get("markdown")}}, log=self.log)
def joinGuild(self, inviteCode, location, wait):
location = location.lower()
if location in ("accept invite page", "join guild"):
guildData = self.getInfoFromInviteCode(inviteCode, with_counts=True, with_expiration=True, fromJoinGuildNav=(location.lower()=="join guild")).json()
if wait: time.sleep(wait)
return self.joinGuildRaw(inviteCode, guildData["guild"]["id"], guildData["channel"]["id"], guildData["channel"]["type"], location)
elif location == "markdown":
return self.joinGuildRaw(inviteCode, location="markdown")
def previewGuild(self, guildID, sessionID):
url = self.discord+"guilds/"+guildID+"/members/@me?lurker=true"
if sessionID != None:
url += "&session_id="+sessionID
return Wrapper.sendRequest(self.s, 'put', url, headerModifications={"update":{"X-Context-Properties":"e30="}}, log=self.log)
def leaveGuild(self, guildID, lurking):
url = self.discord+"users/@me/guilds/"+guildID
body = {"lurking": lurking}
return Wrapper.sendRequest(self.s, 'delete', url, body, log=self.log)
def createInvite(self, channelID, max_age_seconds, max_uses, grantTempMembership, checkInvite, targetType):
url = self.discord+"channels/"+channelID+"/invites"
if max_age_seconds == False:
max_age_seconds = 0
if max_uses == False:
max_uses = 0
body = {"max_age": max_age_seconds, "max_uses": max_uses, "temporary": grantTempMembership}
if checkInvite != "":
body["validate"] = checkInvite
if targetType != "":
body["target_type"] = targetType
return Wrapper.sendRequest(self.s, 'post', url, body, headerModifications={"update":{"X-Context-Properties":ContextProperties.get("guild header")}}, log=self.log)
def deleteInvite(self, inviteCode):
url = self.discord+'invites/'+inviteCode
return Wrapper.sendRequest(self.s, 'delete', url, log=self.log)
def getGuildInvites(self, guildID):
url = self.discord+'guilds/'+guildID+'/invites'
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
def getChannelInvites(self, channelID):
url = self.discord+'channels/'+channelID+'/invites'
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
def getGuilds(self, with_counts):
url = self.discord+"users/@me/guilds"
if with_counts != None:
url += "?with_counts="+repr(with_counts).lower()
headerMods = {"update":{"X-Track":self.s.headers.get("X-Super-Properties")}, "remove":["X-Super-Properties"]}
return Wrapper.sendRequest(self.s, 'get', url, headerModifications=headerMods, log=self.log)
def getGuildChannels(self, guildID):
url = self.discord+'guilds/'+guildID+'/channels'
headerMods = {"update":{"X-Track":self.s.headers.get("X-Super-Properties")}, "remove":["X-Super-Properties"]}
return Wrapper.sendRequest(self.s, 'get', url, headerModifications=headerMods, log=self.log)
def getDiscoverableGuilds(self, offset, limit):
url = self.discord+"discoverable-guilds?offset="+repr(offset)+"&limit="+repr(limit)
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
def getGuildRegions(self, guildID):
url = self.discord+'guilds/'+guildID+'/regions'
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
def createGuild(self, name, icon, channels, systemChannelID, template):
url = self.discord+"guilds"
body = {"name": name, "icon":icon, "channels":channels, "system_channel_id":systemChannelID, "guild_template_code":template}
if icon != None:
with open(icon, "rb") as image:
encodedImage = base64.b64encode(image.read()).decode('utf-8')
body["icon"] = "data:image/png;base64,"+encodedImage
return Wrapper.sendRequest(self.s, 'post', url, body, log=self.log)
def deleteGuild(self, guildID):
url = self.discord+"guilds/%s/delete" % (guildID)
body = {}
return Wrapper.sendRequest(self.s, 'post', url, body, log=self.log)
def kick(self, guildID, userID, reason):
url = self.discord+"guilds/%s/members/%s?reason=%s" % (guildID, userID, quote(reason))
headerMods = {"update":{"X-Audit-Log-Reason":reason}} if reason=="" else {}
return Wrapper.sendRequest(self.s, 'delete', url, headerModifications=headerMods, log=self.log)
def ban(self, guildID, userID, deleteMessagesDays, reason):
url = self.discord+"guilds/%s/bans/%s" % (guildID, userID)
body = {"delete_message_days": str(deleteMessagesDays), "reason": reason}
headerMods = {"update":{"X-Audit-Log-Reason":reason}} if reason=="" else {}
return Wrapper.sendRequest(self.s, 'put', url, body, headerModifications=headerMods, log=self.log)
def revokeBan(self, guildID, userID):
url = self.discord+"guilds/"+guildID+"/bans/"+userID
return Wrapper.sendRequest(self.s, 'delete', url, log=self.log)
def getRoleMemberCounts(self, guildID):
url = self.discord+"guilds/"+guildID+"/roles/member-counts"
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
def getGuildIntegrations(self, guildID, include_applications):
url = self.discord+"guilds/"+guildID+"/integrations"
if include_applications != None:
url += "?include_applications="+repr(include_applications).lower()
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
def getGuildTemplates(self, guildID):
url = self.discord+"guilds/"+guildID+"/templates"
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
def getRoleMemberIDs(self, guildID, roleID):
url = self.discord+"guilds/"+guildID+"/roles/"+roleID+"/member-ids"
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
def addMembersToRole(self, guildID, roleID, memberIDs):
if isinstance(memberIDs, str):
memberIDs = [memberIDs]
url = self.discord+"guilds/"+guildID+"/roles/"+roleID+"/members"
body = {"member_ids":memberIDs}
return Wrapper.sendRequest(self.s, 'patch', url, body, log=self.log)
def setMemberRoles(self, guildID, memberID, roleIDs):
if isinstance(roleIDs, str):
roleIDs = [roleIDs]
url = self.discord+"guilds/"+guildID+"/members/"+memberID
body = {"roles": roleIDs}
return Wrapper.sendRequest(self.s, 'patch', url, body, log=self.log)
def getMemberVerificationData(self, guildID, with_guild, invite_code):
url = self.discord+"guilds/"+guildID+"/member-verification?with_guild="+str(with_guild).lower()
if invite_code != None:
url += "&invite_code="+invite_code
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
def agreeGuildRules(self, guildID, form_fields, version):
url = self.discord+"guilds/"+guildID+"/requests/@me"
form_fields[0]['response'] = True
body = {"version":version, "form_fields":form_fields}
return Wrapper.sendRequest(self.s, 'put', url, body, log=self.log)
d(self, channelID, name, messageID, public, archiveAfter):
url = self.discord+"channels/"+channelID
if messageID:
url += "/messages/"+messageID
url += "/threads"
choice = archiveAfter.lower()
if choice == '1 hour':
archiveAfterSeconds = 60
elif choice in ('24 hour', '24 hours', '1 day'):
archiveAfterSeconds = 1440
elif choice in ('3 day', '3 days'):
archiveAfterSeconds = 4320
elif choice in ('1 week', '7 day', '7 days'):
archiveAfterSeconds = 10080
threadType = 11 if public else 12
body = {"name": name, "type": threadType, "auto_archive_duration": archiveAfterSeconds}
return Wrapper.sendRequest(self.s, 'post', url, body, log=self.log)
def leaveThread(self, threadID, location):
url = self.discord+"channels/"+threadID+"/thread-members/@me?location="+quote(location)
return Wrapper.sendRequest(self.s, 'delete', url, log=self.log)
def joinThread(self, threadID, location):
url = self.discord+"channels/"+threadID+"/thread-members/@me?location="+quote(location)
return Wrapper.sendRequest(self.s, 'post', url, log=self.log)
def archiveThread(self, threadID, lock):
url = self.discord+"channels/"+threadID
body = {"archived": True, "locked": lock}
return Wrapper.sendRequest(self.s, 'patch', url, body, log=self.log)
def unarchiveThread(self, threadID, lock):
url = self.discord+"channels/"+threadID
body = {"archived": False, "locked": lock}
return Wrapper.sendRequest(self.s, 'patch', url, body, log=self.log)
def lookupSchool(self, email, allowMultipleGuilds, useVerificationCode):
url = self.discord+"guilds/automations/email-domain-lookup"
body = {"email":email,"allow_multiple_guilds":allowMultipleGuilds}
if useVerificationCode != None:
body["use_verification_code"] = useVerificationCode
return Wrapper.sendRequest(self.s, 'post', url, body, log=self.log)
def schoolHubWaitlistSignup(self, email, school):
url = self.discord+"hub-waitlist/signup"
body = {"email":email,"school":school}
return Wrapper.sendRequest(self.s, 'post', url, body, log=self.log)
def schoolHubSignup(self, email, hubID):
url = self.discord+'guilds/automations/email-domain-lookup'
body = {"email":email,"guild_id":hubID,"allow_multiple_guilds":True,"use_verification_code":True}
return Wrapper.sendRequest(self.s, 'post', url, body, log=self.log)
def verifySchoolHubSignup(self, hubID, email, code):
url = self.discord+'guilds/automations/email-domain-lookup/verify-code'
body = {"code":code,"guild_id":hubID,"email":email}
return Wrapper.sendRequest(self.s, 'post', url, body, log=self.log)
def getSchoolHubGuilds(self, hubID):
url = self.discord+'channels/'+hubID+'/directory-entries'
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
def getSchoolHubDirectoryCounts(self, hubID): #this only returns the # of guilds/groups in each directory/category. This doesn't even return the category names
url = self.discord+'channels/'+hubID+'/directory-entries/counts'
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
def joinGuildFromSchoolHub(self, hubID, guildID):
url = self.discord+'guilds/'+guildID+'/members/@me?lurker=false&directory_channel_id='+hubID
headerMods = {"update":{"X-Context-Properties":ContextProperties.get("school hub guild")}}
return Wrapper.sendRequest(self.s, 'put', url, headerModifications=headerMods, log=self.log)
def searchSchoolHub(self, hubID, query):
url = self.discord+'channels/'+hubID+'/directory-entries/search?query='+query
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
def getMySchoolHubGuilds(self, hubID):
url = self.discord+'channels/'+hubID+'/directory-entries/list'
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
def setSchoolHubGuildDetails(self, hubID, guildID, description, directoryID):
url = self.discord+'channels/'+hubID+'/directory-entry/'+guildID
body = {"description":description,"primary_category_id":directoryID}
return Wrapper.sendRequest(self.s, 'post', url, body, log=self.log)
def getLiveStages(self, extra):
url = self.discord+'stage-instances'
if extra:
url += '/extra'
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
def getChannel(self, channelID):
url = self.discord+'channels/'+channelID
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
def getGuildActivitiesConfig(self, guildID):
url = self.discord+'activities/guilds/'+guildID+'/config'
return Wrapper.sendRequest(self.s, 'get', url, log=self.log)
| true | true |
f7268dac2e06e129091c886ee83609a1a78deda9 | 2,558 | py | Python | sharpy/combat/terran/micro_liberators.py | DuncanDHall/sharpy-sc2 | 7a47a7538ad99214e3f0288b6213cac882551180 | [
"MIT"
] | null | null | null | sharpy/combat/terran/micro_liberators.py | DuncanDHall/sharpy-sc2 | 7a47a7538ad99214e3f0288b6213cac882551180 | [
"MIT"
] | null | null | null | sharpy/combat/terran/micro_liberators.py | DuncanDHall/sharpy-sc2 | 7a47a7538ad99214e3f0288b6213cac882551180 | [
"MIT"
] | null | null | null | from typing import Dict, Optional
from sc2.ids.ability_id import AbilityId
from sharpy.combat import Action, MoveType, MicroStep
from sc2.ids.unit_typeid import UnitTypeId
from sc2.unit import Unit
from sc2.position import Point2
class MicroLiberators(MicroStep):
def __init__(self, group_distance: float = -5):
super().__init__()
self.last_siege = 0
self.group_distance = group_distance
self.focus_fired: Dict[int, float] = dict()
self.closest_units: Dict[int, Optional[Unit]] = dict()
def unit_solve_combat(self, unit: Unit, current_command: Action) -> Action:
return self.final_solve(unit, super().unit_solve_combat(unit, current_command))
def final_solve(self, unit: Unit, command: Action) -> Action:
time = self.knowledge.ai.time
# TODO: When in AG mode, look for relevant enemies inside the sieged zone.
relevant_ground_enemies = self.cache.enemy_in_range(unit.position, 10).not_structure.not_flying.visible
if self.move_type == MoveType.PanicRetreat:
# TODO: Unsiege
return command
if (
(
self.move_type == MoveType.Assault
or self.move_type == MoveType.SearchAndDestroy
or self.move_type == MoveType.DefensiveRetreat
)
and self.engage_ratio < 0.5
and self.can_engage_ratio < 0.5
and len(self.closest_units) < 1
):
if self.group.ground_units and isinstance(command.target, Point2):
# TODO: Unsiege
# Regroup with the ground army
return Action(self.group.center.towards(command.target, self.group_distance), False)
if not relevant_ground_enemies.exists:
if unit.type_id == UnitTypeId.LIBERATOR:
return command
if unit.type_id == UnitTypeId.LIBERATORAG:
return Action(None, False, AbilityId.MORPH_LIBERATORAAMODE)
if unit.type_id == UnitTypeId.LIBERATOR and relevant_ground_enemies.exists:
target: Optional[Unit] = None
enemy: Unit
for enemy in relevant_ground_enemies:
if enemy.distance_to(unit) < 12:
target = enemy
if target is not None:
self.last_siege = time
# TODO: Save position and zone link with current liberator to CooldownManager
return Action(target.position, False, AbilityId.MORPH_LIBERATORAGMODE)
return command
| 38.757576 | 111 | 0.635262 | from typing import Dict, Optional
from sc2.ids.ability_id import AbilityId
from sharpy.combat import Action, MoveType, MicroStep
from sc2.ids.unit_typeid import UnitTypeId
from sc2.unit import Unit
from sc2.position import Point2
class MicroLiberators(MicroStep):
def __init__(self, group_distance: float = -5):
super().__init__()
self.last_siege = 0
self.group_distance = group_distance
self.focus_fired: Dict[int, float] = dict()
self.closest_units: Dict[int, Optional[Unit]] = dict()
def unit_solve_combat(self, unit: Unit, current_command: Action) -> Action:
return self.final_solve(unit, super().unit_solve_combat(unit, current_command))
def final_solve(self, unit: Unit, command: Action) -> Action:
time = self.knowledge.ai.time
relevant_ground_enemies = self.cache.enemy_in_range(unit.position, 10).not_structure.not_flying.visible
if self.move_type == MoveType.PanicRetreat:
return command
if (
(
self.move_type == MoveType.Assault
or self.move_type == MoveType.SearchAndDestroy
or self.move_type == MoveType.DefensiveRetreat
)
and self.engage_ratio < 0.5
and self.can_engage_ratio < 0.5
and len(self.closest_units) < 1
):
if self.group.ground_units and isinstance(command.target, Point2):
return Action(self.group.center.towards(command.target, self.group_distance), False)
if not relevant_ground_enemies.exists:
if unit.type_id == UnitTypeId.LIBERATOR:
return command
if unit.type_id == UnitTypeId.LIBERATORAG:
return Action(None, False, AbilityId.MORPH_LIBERATORAAMODE)
if unit.type_id == UnitTypeId.LIBERATOR and relevant_ground_enemies.exists:
target: Optional[Unit] = None
enemy: Unit
for enemy in relevant_ground_enemies:
if enemy.distance_to(unit) < 12:
target = enemy
if target is not None:
self.last_siege = time
return Action(target.position, False, AbilityId.MORPH_LIBERATORAGMODE)
return command
| true | true |
f7268dd4e5fbd94440e9ab3dc5fe73f4c538070d | 6,616 | py | Python | bindings/python/ensmallen_graph/datasets/string/proteusmirabilis.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | bindings/python/ensmallen_graph/datasets/string/proteusmirabilis.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | bindings/python/ensmallen_graph/datasets/string/proteusmirabilis.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | """
This file offers the methods to automatically retrieve the graph Proteus mirabilis.
The graph is automatically retrieved from the STRING repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 21:06:19.073950
The undirected graph Proteus mirabilis has 3626 nodes and 280355 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.04266 and has 14 connected components, where the component with most
nodes has 3592 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 128, the mean node degree is 154.64, and
the node degree mode is 1. The top 5 most central nodes are 529507.PMI2600
(degree 1405), 529507.PMI2826 (degree 1179), 529507.PMI1545 (degree 1018),
529507.PMI3678 (degree 983) and 529507.PMI2101 (degree 965).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import ProteusMirabilis
# Then load the graph
graph = ProteusMirabilis()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def ProteusMirabilis(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the Proteus mirabilis graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of Proteus mirabilis graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 21:06:19.073950
The undirected graph Proteus mirabilis has 3626 nodes and 280355 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.04266 and has 14 connected components, where the component with most
nodes has 3592 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 128, the mean node degree is 154.64, and
the node degree mode is 1. The top 5 most central nodes are 529507.PMI2600
(degree 1405), 529507.PMI2826 (degree 1179), 529507.PMI1545 (degree 1018),
529507.PMI3678 (degree 983) and 529507.PMI2101 (degree 965).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import ProteusMirabilis
# Then load the graph
graph = ProteusMirabilis()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="ProteusMirabilis",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 35.005291 | 223 | 0.700574 | from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph
def ProteusMirabilis(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
return AutomaticallyRetrievedGraph(
graph_name="ProteusMirabilis",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| true | true |
f7268f4b4bb9d783f2b2aaad61a415d7093b910f | 65 | py | Python | tools/__init__.py | mchonan1996/safetyculture-sdk-python | 9737b8716698844dc09316baed0d50fc55c6ea8b | [
"Apache-2.0"
] | 22 | 2017-03-03T05:16:46.000Z | 2021-12-28T20:39:02.000Z | tools/__init__.py | mchonan1996/safetyculture-sdk-python | 9737b8716698844dc09316baed0d50fc55c6ea8b | [
"Apache-2.0"
] | 12 | 2017-02-28T06:29:07.000Z | 2021-03-25T21:42:14.000Z | tools/__init__.py | mchonan1996/safetyculture-sdk-python | 9737b8716698844dc09316baed0d50fc55c6ea8b | [
"Apache-2.0"
] | 15 | 2017-02-23T00:49:18.000Z | 2021-12-28T20:39:20.000Z | from .exporter import csvExporter
from .exporter import exporter
| 21.666667 | 33 | 0.846154 | from .exporter import csvExporter
from .exporter import exporter
| true | true |
f7268fd09962820734816832c033e7525fcd9ab8 | 432 | py | Python | website/util/time.py | DanielSBrown/osf.io | 98dda2ac237377197acacce78274bc0a4ce8f303 | [
"Apache-2.0"
] | null | null | null | website/util/time.py | DanielSBrown/osf.io | 98dda2ac237377197acacce78274bc0a4ce8f303 | [
"Apache-2.0"
] | null | null | null | website/util/time.py | DanielSBrown/osf.io | 98dda2ac237377197acacce78274bc0a4ce8f303 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import time
from datetime import datetime
def get_timestamp():
return int(time.time())
def throttle_period_expired(timestamp, throttle):
if not timestamp:
return True
elif isinstance(timestamp, datetime):
return (datetime.utcnow() - timestamp).total_seconds() > throttle
else:
return (get_timestamp() - timestamp) > throttle
| 24 | 73 | 0.699074 |
from __future__ import absolute_import
import time
from datetime import datetime
def get_timestamp():
return int(time.time())
def throttle_period_expired(timestamp, throttle):
if not timestamp:
return True
elif isinstance(timestamp, datetime):
return (datetime.utcnow() - timestamp).total_seconds() > throttle
else:
return (get_timestamp() - timestamp) > throttle
| true | true |
f726902376e280ba863a7c19c43b900218daf48a | 4,130 | py | Python | alipay/aop/api/request/AlipayMarketingCampaignPromotionactivityCustomerReceiveRequest.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/request/AlipayMarketingCampaignPromotionactivityCustomerReceiveRequest.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/request/AlipayMarketingCampaignPromotionactivityCustomerReceiveRequest.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayMarketingCampaignPromotionactivityCustomerReceiveModel import AlipayMarketingCampaignPromotionactivityCustomerReceiveModel
class AlipayMarketingCampaignPromotionactivityCustomerReceiveRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayMarketingCampaignPromotionactivityCustomerReceiveModel):
self._biz_content = value
else:
self._biz_content = AlipayMarketingCampaignPromotionactivityCustomerReceiveModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.marketing.campaign.promotionactivity.customer.receive'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| 28.482759 | 155 | 0.658111 |
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayMarketingCampaignPromotionactivityCustomerReceiveModel import AlipayMarketingCampaignPromotionactivityCustomerReceiveModel
class AlipayMarketingCampaignPromotionactivityCustomerReceiveRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayMarketingCampaignPromotionactivityCustomerReceiveModel):
self._biz_content = value
else:
self._biz_content = AlipayMarketingCampaignPromotionactivityCustomerReceiveModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.marketing.campaign.promotionactivity.customer.receive'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| true | true |
f7269048fed394fc95bcb70e5e4e1c5ecb768bd1 | 2,752 | py | Python | py_pdf_term/analysis/_analysis/occurrences/term.py | kumachan-mis/pdf-slides-term | cf3319e4de723bd9424d23141803342d3c649103 | [
"MIT"
] | 1 | 2021-01-08T16:05:30.000Z | 2021-01-08T16:05:30.000Z | py_pdf_term/analysis/_analysis/occurrences/term.py | kumachan-mis/py-slides-term | 1e9337b97ae8968950489e728fc7aeeeb7eb1f4b | [
"MIT"
] | 21 | 2021-01-03T13:50:59.000Z | 2021-06-17T00:27:49.000Z | py_pdf_term/analysis/_analysis/occurrences/term.py | kumachan-mis/pdf-slides-term | cf3319e4de723bd9424d23141803342d3c649103 | [
"MIT"
] | null | null | null | from dataclasses import dataclass
from typing import Set, Dict
from ..runner import AnalysisRunner
from py_pdf_term.candidates import DomainCandidateTermList
from py_pdf_term._common.data import Term
@dataclass(frozen=True)
class DomainTermOccurrence:
domain: str
# unique domain name
term_freq: Dict[str, int]
# brute force counting of lemmatized term occurrences in the domain
# count even if the lemmatized term occurs as a part of a lemmatized phrase
doc_term_freq: Dict[str, int]
# number of documents in the domain that contain the lemmatized term
# count even if the lemmatized term occurs as a part of a lemmatized phrase
@dataclass(frozen=True)
class _DomainTermOccurrence:
domain: str
# unique domain name
term_freq: Dict[str, int]
# brute force counting of lemmatized term occurrences in the domain
# count even if the term occurs as a part of a lemmatized phrase
doc_term_set: Dict[str, Set[int]]
# set of document IDs in the domain that contain the lemmatized term
# add even if the lemmatized term occurs as a part of a lemmatized phrase
class TermOccurrenceAnalyzer:
def __init__(self, ignore_augmented: bool = True) -> None:
self._runner = AnalysisRunner(ignore_augmented=ignore_augmented)
def analyze(
self, domain_candidates: DomainCandidateTermList
) -> DomainTermOccurrence:
domain_candidates_set = domain_candidates.to_candidates_str_set(
lambda candidate: candidate.lemma()
)
def update(
term_occ: _DomainTermOccurrence,
pdf_id: int,
page_num: int,
subcandidate: Term,
) -> None:
subcandidate_lemma = subcandidate.lemma()
if subcandidate_lemma not in domain_candidates_set:
return
term_occ.term_freq[subcandidate_lemma] = (
term_occ.term_freq.get(subcandidate_lemma, 0) + 1
)
doc_term_set = term_occ.doc_term_set.get(subcandidate_lemma, set())
doc_term_set.add(pdf_id)
term_occ.doc_term_set[subcandidate_lemma] = doc_term_set
term_occ = self._runner.run_through_subcandidates(
domain_candidates,
_DomainTermOccurrence(domain_candidates.domain, dict(), dict()),
update,
)
term_occ = self._finalize(term_occ)
return term_occ
def _finalize(self, term_occ: _DomainTermOccurrence) -> DomainTermOccurrence:
doc_term_freq = {
candidate_str: len(doc_term_set)
for candidate_str, doc_term_set in term_occ.doc_term_set.items()
}
return DomainTermOccurrence(term_occ.domain, term_occ.term_freq, doc_term_freq)
| 37.189189 | 87 | 0.68968 | from dataclasses import dataclass
from typing import Set, Dict
from ..runner import AnalysisRunner
from py_pdf_term.candidates import DomainCandidateTermList
from py_pdf_term._common.data import Term
@dataclass(frozen=True)
class DomainTermOccurrence:
domain: str
term_freq: Dict[str, int]
doc_term_freq: Dict[str, int]
@dataclass(frozen=True)
class _DomainTermOccurrence:
domain: str
term_freq: Dict[str, int]
doc_term_set: Dict[str, Set[int]]
class TermOccurrenceAnalyzer:
def __init__(self, ignore_augmented: bool = True) -> None:
self._runner = AnalysisRunner(ignore_augmented=ignore_augmented)
def analyze(
self, domain_candidates: DomainCandidateTermList
) -> DomainTermOccurrence:
domain_candidates_set = domain_candidates.to_candidates_str_set(
lambda candidate: candidate.lemma()
)
def update(
term_occ: _DomainTermOccurrence,
pdf_id: int,
page_num: int,
subcandidate: Term,
) -> None:
subcandidate_lemma = subcandidate.lemma()
if subcandidate_lemma not in domain_candidates_set:
return
term_occ.term_freq[subcandidate_lemma] = (
term_occ.term_freq.get(subcandidate_lemma, 0) + 1
)
doc_term_set = term_occ.doc_term_set.get(subcandidate_lemma, set())
doc_term_set.add(pdf_id)
term_occ.doc_term_set[subcandidate_lemma] = doc_term_set
term_occ = self._runner.run_through_subcandidates(
domain_candidates,
_DomainTermOccurrence(domain_candidates.domain, dict(), dict()),
update,
)
term_occ = self._finalize(term_occ)
return term_occ
def _finalize(self, term_occ: _DomainTermOccurrence) -> DomainTermOccurrence:
doc_term_freq = {
candidate_str: len(doc_term_set)
for candidate_str, doc_term_set in term_occ.doc_term_set.items()
}
return DomainTermOccurrence(term_occ.domain, term_occ.term_freq, doc_term_freq)
| true | true |
f72690a7b3248a10457383f38300fee9412153dd | 124,514 | py | Python | Compiler/types.py | lemonviv/Pivot-SPDZ | f3db87d8849e5f9fa39f321d85feec83107ee405 | [
"BSD-2-Clause"
] | null | null | null | Compiler/types.py | lemonviv/Pivot-SPDZ | f3db87d8849e5f9fa39f321d85feec83107ee405 | [
"BSD-2-Clause"
] | null | null | null | Compiler/types.py | lemonviv/Pivot-SPDZ | f3db87d8849e5f9fa39f321d85feec83107ee405 | [
"BSD-2-Clause"
] | null | null | null | from Compiler.program import Tape
from Compiler.exceptions import *
from Compiler.instructions import *
from Compiler.instructions_base import *
from .floatingpoint import two_power
from . import comparison, floatingpoint
import math
from . import util
import operator
from functools import reduce
class ClientMessageType:
""" Enum to define type of message sent to external client. Each may be array of length n."""
# No client message type to be sent, for backwards compatibility - virtual machine relies on this value
NoType = 0
# 3 x sint x n
TripleShares = 1
# 1 x cint x n
ClearModpInt = 2
# 1 x regint x n
Int32 = 3
# 1 x cint (fixed point left shifted by precision) x n
ClearModpFix = 4
class MPCThread(object):
def __init__(self, target, name, args = [], runtime_arg = None):
""" Create a thread from a callable object. """
if not callable(target):
raise CompilerError('Target %s for thread %s is not callable' % (target,name))
self.name = name
self.tape = Tape(program.name + '-' + name, program)
self.target = target
self.args = args
self.runtime_arg = runtime_arg
self.running = 0
def start(self, runtime_arg = None):
self.running += 1
program.start_thread(self, runtime_arg or self.runtime_arg)
def join(self):
if not self.running:
raise CompilerError('Thread %s is not running' % self.name)
self.running -= 1
program.stop_thread(self)
def vectorize(operation):
def vectorized_operation(self, *args, **kwargs):
if len(args):
if (isinstance(args[0], Tape.Register) or isinstance(args[0], sfloat)) \
and args[0].size != self.size:
raise CompilerError('Different vector sizes of operands')
set_global_vector_size(self.size)
res = operation(self, *args, **kwargs)
reset_global_vector_size()
return res
return vectorized_operation
def vectorize_max(operation):
def vectorized_operation(self, *args, **kwargs):
size = self.size
for arg in args:
try:
size = max(size, arg.size)
except AttributeError:
pass
set_global_vector_size(size)
res = operation(self, *args, **kwargs)
reset_global_vector_size()
return res
return vectorized_operation
def vectorized_classmethod(function):
def vectorized_function(cls, *args, **kwargs):
size = None
if 'size' in kwargs:
size = kwargs.pop('size')
if size:
set_global_vector_size(size)
res = function(cls, *args, **kwargs)
reset_global_vector_size()
else:
res = function(cls, *args, **kwargs)
return res
return classmethod(vectorized_function)
def vectorize_init(function):
def vectorized_init(*args, **kwargs):
size = None
if len(args) > 1 and (isinstance(args[1], Tape.Register) or \
isinstance(args[1], sfloat)):
size = args[1].size
if 'size' in kwargs and kwargs['size'] is not None \
and kwargs['size'] != size:
raise CompilerError('Mismatch in vector size')
if 'size' in kwargs and kwargs['size']:
size = kwargs['size']
if size is not None:
set_global_vector_size(size)
res = function(*args, **kwargs)
reset_global_vector_size()
else:
res = function(*args, **kwargs)
return res
return vectorized_init
def set_instruction_type(operation):
def instruction_typed_operation(self, *args, **kwargs):
set_global_instruction_type(self.instruction_type)
res = operation(self, *args, **kwargs)
reset_global_instruction_type()
return res
return instruction_typed_operation
def read_mem_value(operation):
def read_mem_operation(self, *args, **kwargs):
if len(args) > 0 and isinstance(args[0], MemValue):
args = (args[0].read(),) + args[1:]
return operation(self, *args, **kwargs)
return read_mem_operation
class _number(object):
def square(self):
return self * self
def __add__(self, other):
if other is 0:
return self
else:
return self.add(other)
def __mul__(self, other):
if other is 0:
return 0
elif other is 1:
return self
else:
return self.mul(other)
__radd__ = __add__
__rmul__ = __mul__
@vectorize
def __pow__(self, exp):
if isinstance(exp, int) and exp >= 0:
if exp == 0:
return self.__class__(1)
exp = bin(exp)[3:]
res = self
for i in exp:
res = res.square()
if i == '1':
res *= self
return res
else:
return NotImplemented
def mul_no_reduce(self, other, res_params=None):
return self * other
def reduce_after_mul(self):
return self
def pow2(self, bit_length=None, security=None):
return 2**self
def min(self, other):
return (self < other).if_else(self, other)
def max(self, other):
return (self < other).if_else(other, self)
class _int(object):
def if_else(self, a, b):
if hasattr(a, 'for_mux'):
f, a, b = a.for_mux(b)
else:
f = lambda x: x
return f(self * (a - b) + b)
def cond_swap(self, a, b):
prod = self * (a - b)
return a - prod, b + prod
def bit_xor(self, other):
return self + other - 2 * self * other
class _gf2n(object):
def if_else(self, a, b):
return b ^ self * self.hard_conv(a ^ b)
def cond_swap(self, a, b, t=None):
prod = self * self.hard_conv(a ^ b)
res = a ^ prod, b ^ prod
if t is None:
return res
else:
return tuple(t.conv(r) for r in res)
def bit_xor(self, other):
return self ^ other
class _structure(object):
MemValue = classmethod(lambda cls, value: MemValue(cls.conv(value)))
@classmethod
def Array(cls, size, *args, **kwargs):
return Array(size, cls, *args, **kwargs)
@classmethod
def Matrix(cls, rows, columns, *args, **kwargs):
return Matrix(rows, columns, cls, *args, **kwargs)
@classmethod
def row_matrix_mul(cls, row, matrix, res_params=None):
return sum(row[k].mul_no_reduce(matrix[k].get_vector(),
res_params) \
for k in range(len(row))).reduce_after_mul()
class _register(Tape.Register, _number, _structure):
@staticmethod
def n_elements():
return 1
@vectorized_classmethod
def conv(cls, val):
if isinstance(val, MemValue):
val = val.read()
if isinstance(val, cls):
return val
elif not isinstance(val, _register):
try:
return type(val)(cls.conv(v) for v in val)
except TypeError:
pass
except CompilerError:
pass
return cls(val)
@vectorized_classmethod
@read_mem_value
def hard_conv(cls, val):
if type(val) == cls:
return val
elif not isinstance(val, _register):
try:
return val.hard_conv_me(cls)
except AttributeError:
try:
return type(val)(cls.hard_conv(v) for v in val)
except TypeError:
pass
return cls(val)
@vectorized_classmethod
@set_instruction_type
def _load_mem(cls, address, direct_inst, indirect_inst):
res = cls()
if isinstance(address, _register):
indirect_inst(res, cls._expand_address(address,
get_global_vector_size()))
else:
direct_inst(res, address)
return res
@staticmethod
def _expand_address(address, size):
address = regint.conv(address)
if size > 1 and address.size == 1:
res = regint(size=size)
for i in range(size):
movint(res[i], address + regint(i, size=1))
return res
else:
return address
@set_instruction_type
def _store_in_mem(self, address, direct_inst, indirect_inst):
if isinstance(address, _register):
indirect_inst(self, self._expand_address(address, self.size))
else:
direct_inst(self, address)
@classmethod
def prep_res(cls, other):
return cls()
@staticmethod
def bit_compose(bits):
return sum(b << i for i,b in enumerate(bits))
@classmethod
def malloc(cls, size):
return program.malloc(size, cls)
@set_instruction_type
def __init__(self, reg_type, val, size):
if isinstance(val, (tuple, list)):
size = len(val)
super(_register, self).__init__(reg_type, program.curr_tape, size=size)
if isinstance(val, int):
self.load_int(val)
elif isinstance(val, (tuple, list)):
for i, x in enumerate(val):
self.mov(self[i], type(self)(x, size=1))
elif val is not None:
self.load_other(val)
def sizeof(self):
return self.size
def extend(self, n):
return self
def expand_to_vector(self, size=None):
if size is None:
size = get_global_vector_size()
if self.size == size:
return self
assert self.size == 1
res = type(self)(size=size)
for i in range(size):
movs(res[i], self)
return res
class _clear(_register):
__slots__ = []
mov = staticmethod(movc)
@vectorized_classmethod
@set_instruction_type
def protect_memory(cls, start, end):
program.curr_tape.start_new_basicblock(name='protect-memory')
protectmemc(regint(start), regint(end))
@set_instruction_type
@vectorize
def load_other(self, val):
if isinstance(val, type(self)):
movc(self, val)
else:
self.convert_from(val)
@vectorize
@read_mem_value
def convert_from(self, val):
if not isinstance(val, regint):
val = regint(val)
convint(self, val)
@set_instruction_type
@vectorize
def print_reg(self, comment=''):
print_reg(self, comment)
@set_instruction_type
@vectorize
def print_reg_plain(self):
print_reg_plain(self)
@set_instruction_type
@vectorize
def raw_output(self):
raw_output(self)
@set_instruction_type
@read_mem_value
@vectorize
def clear_op(self, other, c_inst, ci_inst, reverse=False):
cls = self.__class__
res = self.prep_res(other)
if isinstance(other, cls):
c_inst(res, self, other)
elif isinstance(other, int):
if self.in_immediate_range(other):
ci_inst(res, self, other)
else:
if reverse:
c_inst(res, cls(other), self)
else:
c_inst(res, self, cls(other))
else:
return NotImplemented
return res
@set_instruction_type
@read_mem_value
@vectorize
def coerce_op(self, other, inst, reverse=False):
cls = self.__class__
res = cls()
if isinstance(other, int):
other = cls(other)
elif not isinstance(other, cls):
return NotImplemented
if reverse:
inst(res, other, self)
else:
inst(res, self, other)
return res
def add(self, other):
return self.clear_op(other, addc, addci)
def mul(self, other):
return self.clear_op(other, mulc, mulci)
def __sub__(self, other):
return self.clear_op(other, subc, subci)
def __rsub__(self, other):
return self.clear_op(other, subc, subcfi, True)
def __truediv__(self, other):
return self.clear_op(other, divc, divci)
def __rtruediv__(self, other):
return self.coerce_op(other, divc, True)
def __eq__(self, other):
if isinstance(other, (_clear,int)):
return regint(self) == other
else:
return NotImplemented
def __ne__(self, other):
return 1 - (self == other)
def __and__(self, other):
return self.clear_op(other, andc, andci)
def __xor__(self, other):
return self.clear_op(other, xorc, xorci)
def __or__(self, other):
return self.clear_op(other, orc, orci)
__rand__ = __and__
__rxor__ = __xor__
__ror__ = __or__
def reveal(self):
return self
class cint(_clear, _int):
" Clear mod p integer type. """
__slots__ = []
instruction_type = 'modp'
reg_type = 'c'
@vectorized_classmethod
def read_from_socket(cls, client_id, n=1):
res = [cls() for i in range(n)]
readsocketc(client_id, *res)
if n == 1:
return res[0]
else:
return res
@vectorize
def write_to_socket(self, client_id, message_type=ClientMessageType.NoType):
writesocketc(client_id, message_type, self)
@vectorized_classmethod
def write_to_socket(self, client_id, values, message_type=ClientMessageType.NoType):
""" Send a list of modp integers to socket """
writesocketc(client_id, message_type, *values)
@vectorized_classmethod
def load_mem(cls, address, mem_type=None):
return cls._load_mem(address, ldmc, ldmci)
def store_in_mem(self, address):
self._store_in_mem(address, stmc, stmci)
@staticmethod
def in_immediate_range(value):
return value < 2**31 and value >= -2**31
def __init__(self, val=None, size=None):
super(cint, self).__init__('c', val=val, size=size)
@vectorize
def load_int(self, val):
if val:
# +1 for sign
program.curr_tape.require_bit_length(1 + int(math.ceil(math.log(abs(val)))))
if self.in_immediate_range(val):
ldi(self, val)
else:
max = 2**31 - 1
sign = abs(val) // val
val = abs(val)
chunks = []
while val:
mod = val % max
val = (val - mod) // max
chunks.append(mod)
sum = cint(sign * chunks.pop())
for i,chunk in enumerate(reversed(chunks)):
sum *= max
if i == len(chunks) - 1:
addci(self, sum, sign * chunk)
elif chunk:
sum += sign * chunk
def to_regint(self, n_bits=None, dest=None):
dest = regint() if dest is None else dest
convmodp(dest, self, bitlength=n_bits)
return dest
def __mod__(self, other):
return self.clear_op(other, modc, modci)
def __rmod__(self, other):
return self.coerce_op(other, modc, True)
def __lt__(self, other):
if isinstance(other, (type(self),int)):
return regint(self) < other
else:
return NotImplemented
def __gt__(self, other):
if isinstance(other, (type(self),int)):
return regint(self) > other
else:
return NotImplemented
def __le__(self, other):
return 1 - (self > other)
def __ge__(self, other):
return 1 - (self < other)
@vectorize
def __eq__(self, other):
if not isinstance(other, (_clear, int)):
return NotImplemented
res = 1
remaining = program.bit_length
while remaining > 0:
if isinstance(other, cint):
o = other.to_regint(min(remaining, 64))
else:
o = other % 2 ** 64
res *= (self.to_regint(min(remaining, 64)) == o)
self >>= 64
other >>= 64
remaining -= 64
return res
def __lshift__(self, other):
return self.clear_op(other, shlc, shlci)
def __rshift__(self, other):
return self.clear_op(other, shrc, shrci)
def __neg__(self):
return 0 - self
def __abs__(self):
return (self >= 0).if_else(self, -self)
@vectorize
def __invert__(self):
res = cint()
notc(res, self, program.bit_length)
return res
def __rpow__(self, base):
if base == 2:
return 1 << self
else:
return NotImplemented
@vectorize
def __rlshift__(self, other):
return cint(other) << self
@vectorize
def __rrshift__(self, other):
return cint(other) >> self
@read_mem_value
def mod2m(self, other, bit_length=None, signed=None):
return self % 2**other
@read_mem_value
def right_shift(self, other, bit_length=None):
return self >> other
@read_mem_value
def greater_than(self, other, bit_length=None):
return self > other
def bit_decompose(self, bit_length=None):
if bit_length == 0:
return []
bit_length = bit_length or program.bit_length
return floatingpoint.bits(self, bit_length)
def legendre(self):
res = cint()
legendrec(res, self)
return res
def digest(self, num_bytes):
res = cint()
digestc(res, self, num_bytes)
return res
def print_if(self, string):
cond_print_str(self, string)
class cgf2n(_clear, _gf2n):
__slots__ = []
instruction_type = 'gf2n'
reg_type = 'cg'
@classmethod
def bit_compose(cls, bits, step=None):
size = bits[0].size
res = cls(size=size)
vgbitcom(size, res, step or 1, *bits)
return res
@vectorized_classmethod
def load_mem(cls, address, mem_type=None):
return cls._load_mem(address, gldmc, gldmci)
def store_in_mem(self, address):
self._store_in_mem(address, gstmc, gstmci)
@staticmethod
def in_immediate_range(value):
return value < 2**32 and value >= 0
def __init__(self, val=None, size=None):
super(cgf2n, self).__init__('cg', val=val, size=size)
@vectorize
def load_int(self, val):
if val < 0:
raise CompilerError('Negative GF2n immediate')
if self.in_immediate_range(val):
gldi(self, val)
else:
chunks = []
while val:
mod = val % 2**32
val >>= 32
chunks.append(mod)
sum = cgf2n(chunks.pop())
for i,chunk in enumerate(reversed(chunks)):
sum <<= 32
if i == len(chunks) - 1:
gaddci(self, sum, chunk)
elif chunk:
sum += chunk
def __mul__(self, other):
return super(cgf2n, self).__mul__(other)
def __neg__(self):
return self
@vectorize
def __invert__(self):
res = cgf2n()
gnotc(res, self)
return res
@vectorize
def __lshift__(self, other):
if isinstance(other, int):
res = cgf2n()
gshlci(res, self, other)
return res
else:
return NotImplemented
@vectorize
def __rshift__(self, other):
if isinstance(other, int):
res = cgf2n()
gshrci(res, self, other)
return res
else:
return NotImplemented
@vectorize
def bit_decompose(self, bit_length=None, step=None):
bit_length = bit_length or program.galois_length
step = step or 1
res = [type(self)() for _ in range(bit_length // step)]
gbitdec(self, step, *res)
return res
class regint(_register, _int):
__slots__ = []
reg_type = 'ci'
instruction_type = 'modp'
mov = staticmethod(movint)
@classmethod
def protect_memory(cls, start, end):
program.curr_tape.start_new_basicblock(name='protect-memory')
protectmemint(regint(start), regint(end))
@vectorized_classmethod
def load_mem(cls, address, mem_type=None):
return cls._load_mem(address, ldmint, ldminti)
def store_in_mem(self, address):
self._store_in_mem(address, stmint, stminti)
@vectorized_classmethod
def pop(cls):
res = cls()
popint(res)
return res
@vectorized_classmethod
def push(cls, value):
pushint(cls.conv(value))
@vectorized_classmethod
def get_random(cls, bit_length):
""" Public insecure randomness """
if isinstance(bit_length, int):
bit_length = regint(bit_length)
res = cls()
rand(res, bit_length)
return res
@vectorized_classmethod
def read_from_socket(cls, client_id, n=1):
""" Receive n register values from socket """
res = [cls() for i in range(n)]
readsocketint(client_id, *res)
if n == 1:
return res[0]
else:
return res
@vectorized_classmethod
def read_client_public_key(cls, client_id):
""" Receive 8 register values from socket containing client public key."""
res = [cls() for i in range(8)]
readclientpublickey(client_id, *res)
return res
@vectorized_classmethod
def init_secure_socket(cls, client_id, w1, w2, w3, w4, w5, w6, w7, w8):
""" Use 8 register values containing client public key."""
initsecuresocket(client_id, w1, w2, w3, w4, w5, w6, w7, w8)
@vectorized_classmethod
def resp_secure_socket(cls, client_id, w1, w2, w3, w4, w5, w6, w7, w8):
""" Receive 8 register values from socket containing client public key."""
respsecuresocket(client_id, w1, w2, w3, w4, w5, w6, w7, w8)
@vectorize
def write_to_socket(self, client_id, message_type=ClientMessageType.NoType):
writesocketint(client_id, message_type, self)
@vectorized_classmethod
def write_to_socket(self, client_id, values, message_type=ClientMessageType.NoType):
""" Send a list of integers to socket """
writesocketint(client_id, message_type, *values)
@vectorize_init
def __init__(self, val=None, size=None):
super(regint, self).__init__(self.reg_type, val=val, size=size)
def load_int(self, val):
if cint.in_immediate_range(val):
ldint(self, val)
else:
lower = val % 2**32
upper = val >> 32
if lower >= 2**31:
lower -= 2**32
upper += 1
addint(self, regint(upper) * regint(2**16)**2, regint(lower))
@read_mem_value
def load_other(self, val):
if isinstance(val, cgf2n):
gconvgf2n(self, val)
elif isinstance(val, regint):
addint(self, val, regint(0))
else:
try:
val.to_regint(dest=self)
except AttributeError:
raise CompilerError("Cannot convert '%s' to integer" % \
type(val))
@vectorize
@read_mem_value
def int_op(self, other, inst, reverse=False):
try:
other = self.conv(other)
except:
return NotImplemented
res = regint()
if reverse:
inst(res, other, self)
else:
inst(res, self, other)
return res
def add(self, other):
return self.int_op(other, addint)
def __sub__(self, other):
return self.int_op(other, subint)
def __rsub__(self, other):
return self.int_op(other, subint, True)
def mul(self, other):
return self.int_op(other, mulint)
def __neg__(self):
return 0 - self
def __floordiv__(self, other):
return self.int_op(other, divint)
def __rfloordiv__(self, other):
return self.int_op(other, divint, True)
__truediv__ = __floordiv__
__rtruediv__ = __rfloordiv__
def __mod__(self, other):
return self - (self / other) * other
def __rmod__(self, other):
return regint(other) % self
def __rpow__(self, other):
return other**cint(self)
def __eq__(self, other):
return self.int_op(other, eqc)
def __ne__(self, other):
return 1 - (self == other)
def __lt__(self, other):
return self.int_op(other, ltc)
def __gt__(self, other):
return self.int_op(other, gtc)
def __le__(self, other):
return 1 - (self > other)
def __ge__(self, other):
return 1 - (self < other)
def __lshift__(self, other):
if isinstance(other, int):
return self * 2**other
else:
return regint(cint(self) << other)
def __rshift__(self, other):
if isinstance(other, int):
return self / 2**other
else:
return regint(cint(self) >> other)
def __rlshift__(self, other):
return regint(other << cint(self))
def __rrshift__(self, other):
return regint(other >> cint(self))
def __and__(self, other):
return regint(other & cint(self))
def __or__(self, other):
return regint(other | cint(self))
def __xor__(self, other):
return regint(other ^ cint(self))
__rand__ = __and__
__ror__ = __or__
__rxor__ = __xor__
def mod2m(self, *args, **kwargs):
return cint(self).mod2m(*args, **kwargs)
@vectorize
def bit_decompose(self, bit_length=None):
bit_length = bit_length or min(64, program.bit_length)
if bit_length > 64:
raise CompilerError('too many bits demanded')
res = [regint() for i in range(bit_length)]
bitdecint(self, *res)
return res
@staticmethod
def bit_compose(bits):
two = regint(2)
res = 0
for bit in reversed(bits):
res *= two
res += bit
return res
def reveal(self):
return self
def print_reg_plain(self):
print_int(self)
def print_if(self, string):
cint(self).print_if(string)
class localint(object):
""" Local integer that must prevented from leaking into the secure
computation. Uses regint internally. """
def __init__(self, value=None):
self._v = regint(value)
self.size = 1
def output(self):
self._v.print_reg_plain()
__lt__ = lambda self, other: localint(self._v < other)
__le__ = lambda self, other: localint(self._v <= other)
__gt__ = lambda self, other: localint(self._v > other)
__ge__ = lambda self, other: localint(self._v >= other)
__eq__ = lambda self, other: localint(self._v == other)
__ne__ = lambda self, other: localint(self._v != other)
class _secret(_register):
__slots__ = []
mov = staticmethod(movs)
PreOR = staticmethod(lambda l: floatingpoint.PreORC(l))
PreOp = staticmethod(lambda op, l: floatingpoint.PreOpL(op, l))
@vectorized_classmethod
@set_instruction_type
def protect_memory(cls, start, end):
program.curr_tape.start_new_basicblock(name='protect-memory')
protectmems(regint(start), regint(end))
@vectorized_classmethod
@set_instruction_type
def get_input_from(cls, player):
""" Secret input """
res = cls()
asm_input(res, player)
return res
@vectorized_classmethod
@set_instruction_type
def get_random_triple(cls):
""" Secret random triple according to security model """
res = (cls(), cls(), cls())
triple(*res)
return res
@vectorized_classmethod
@set_instruction_type
def get_random_bit(cls):
""" Secret random bit according to security model """
res = cls()
bit(res)
return res
@vectorized_classmethod
@set_instruction_type
def get_random_square(cls):
""" Secret random square according to security model """
res = (cls(), cls())
square(*res)
return res
@vectorized_classmethod
@set_instruction_type
def get_random_inverse(cls):
""" Secret random inverse according to security model """
res = (cls(), cls())
inverse(*res)
return res
@vectorized_classmethod
@set_instruction_type
def get_random_input_mask_for(cls, player):
res = cls()
inputmask(res, player)
return res
@classmethod
@set_instruction_type
def dot_product(cls, x, y):
x = list(x)
set_global_vector_size(x[0].size)
res = cls()
dotprods(res, x, y)
reset_global_vector_size()
return res
@classmethod
@set_instruction_type
def row_matrix_mul(cls, row, matrix, res_params=None):
assert len(row) == len(matrix)
size = len(matrix[0])
res = cls(size=size)
dotprods(*sum(([res[j], row, [matrix[k][j] for k in range(len(row))]]
for j in range(size)), []))
return res
@classmethod
@set_instruction_type
def matrix_mul(cls, A, B, n, res_params=None):
assert len(A) % n == 0
assert len(B) % n == 0
size = len(A) * len(B) // n**2
res = cls(size=size)
n_rows = len(A) // n
n_cols = len(B) // n
dotprods(*sum(([res[j], [A[j // n_cols * n + k] for k in range(n)],
[B[k * n_cols + j % n_cols] for k in range(n)]]
for j in range(size)), []))
return res
def __init__(self, reg_type, val=None, size=None):
if isinstance(val, self.clear_type):
size = val.size
super(_secret, self).__init__(reg_type, val=val, size=size)
@set_instruction_type
@vectorize
def load_int(self, val):
if self.clear_type.in_immediate_range(val):
ldsi(self, val)
else:
self.load_clear(self.clear_type(val))
@vectorize
def load_clear(self, val):
addm(self, self.__class__(0), val)
@set_instruction_type
@read_mem_value
@vectorize
def load_other(self, val):
if isinstance(val, self.clear_type):
self.load_clear(val)
elif isinstance(val, type(self)):
movs(self, val)
else:
self.load_clear(self.clear_type(val))
def _new_by_number(self, i):
res = type(self)(size=1)
res.i = i
res.program = self.program
return res
@set_instruction_type
@read_mem_value
@vectorize
def secret_op(self, other, s_inst, m_inst, si_inst, reverse=False):
cls = self.__class__
res = self.prep_res(other)
if isinstance(other, regint):
other = res.clear_type(other)
if isinstance(other, cls):
s_inst(res, self, other)
elif isinstance(other, res.clear_type):
if reverse:
m_inst(res, other, self)
else:
m_inst(res, self, other)
elif isinstance(other, int):
if self.clear_type.in_immediate_range(other):
si_inst(res, self, other)
else:
if reverse:
m_inst(res, res.clear_type(other), self)
else:
m_inst(res, self, res.clear_type(other))
else:
return NotImplemented
return res
def add(self, other):
return self.secret_op(other, adds, addm, addsi)
@set_instruction_type
def mul(self, other):
if isinstance(other, _secret) and max(self.size, other.size) > 1 \
and min(self.size, other.size) == 1:
x, y = (other, self) if self.size < other.size else (self, other)
res = type(self)(size=x.size)
mulrs(res, x, y)
return res
return self.secret_op(other, muls, mulm, mulsi)
def __sub__(self, other):
return self.secret_op(other, subs, subml, subsi)
def __rsub__(self, other):
return self.secret_op(other, subs, submr, subsfi, True)
@vectorize
def __truediv__(self, other):
return self * (self.clear_type(1) / other)
@vectorize
def __rtruediv__(self, other):
a,b = self.get_random_inverse()
return other * a / (a * self).reveal()
@set_instruction_type
@vectorize
def square(self):
res = self.__class__()
sqrs(res, self)
return res
@set_instruction_type
@vectorize
def reveal(self):
res = self.clear_type()
asm_open(res, self)
return res
@set_instruction_type
def reveal_to(self, player):
masked = self.__class__()
startprivateoutput(masked, self, player)
stopprivateoutput(masked.reveal(), player)
class sint(_secret, _int):
" Shared mod p integer type. """
__slots__ = []
instruction_type = 'modp'
clear_type = cint
reg_type = 's'
PreOp = staticmethod(floatingpoint.PreOpL)
PreOR = staticmethod(floatingpoint.PreOR)
get_type = staticmethod(lambda n: sint)
@vectorized_classmethod
def get_random_int(cls, bits):
""" Secret random n-bit number according to security model """
res = sint()
comparison.PRandInt(res, bits)
return res
@vectorized_classmethod
def get_input_from(cls, player):
""" Secret input """
res = cls()
inputmixed('int', res, player)
return res
@classmethod
def get_raw_input_from(cls, player):
res = cls()
startinput(player, 1)
stopinput(player, res)
return res
@classmethod
def receive_from_client(cls, n, client_id, message_type=ClientMessageType.NoType):
""" Securely obtain shares of n values input by a client """
# send shares of a triple to client
triples = list(itertools.chain(*(sint.get_random_triple() for i in range(n))))
sint.write_shares_to_socket(client_id, triples, message_type)
received = cint.read_from_socket(client_id, n)
y = [0] * n
for i in range(n):
y[i] = received[i] - triples[i * 3]
return y
@vectorized_classmethod
def read_from_socket(cls, client_id, n=1):
""" Receive n shares and MAC shares from socket """
res = [cls() for i in range(n)]
readsockets(client_id, *res)
if n == 1:
return res[0]
else:
return res
@vectorize
def write_to_socket(self, client_id, message_type=ClientMessageType.NoType):
""" Send share and MAC share to socket """
writesockets(client_id, message_type, self)
@vectorized_classmethod
def write_to_socket(self, client_id, values, message_type=ClientMessageType.NoType):
""" Send a list of shares and MAC shares to socket """
writesockets(client_id, message_type, *values)
@vectorize
def write_share_to_socket(self, client_id, message_type=ClientMessageType.NoType):
""" Send only share to socket """
writesocketshare(client_id, message_type, self)
@vectorized_classmethod
def write_shares_to_socket(cls, client_id, values, message_type=ClientMessageType.NoType, include_macs=False):
""" Send shares of a list of values to a specified client socket """
if include_macs:
writesockets(client_id, message_type, *values)
else:
writesocketshare(client_id, message_type, *values)
@vectorized_classmethod
def load_mem(cls, address, mem_type=None):
return cls._load_mem(address, ldms, ldmsi)
def store_in_mem(self, address):
self._store_in_mem(address, stms, stmsi)
def __init__(self, val=None, size=None):
super(sint, self).__init__('s', val=val, size=size)
@vectorize
def __neg__(self):
return 0 - self
@vectorize
def __abs__(self):
return (self >= 0).if_else(self, -self)
@read_mem_value
@vectorize
def __lt__(self, other, bit_length=None, security=None):
res = sint()
comparison.LTZ(res, self - other,
(bit_length or program.bit_length) + 1,
security or program.security)
return res
@read_mem_value
@vectorize
def __gt__(self, other, bit_length=None, security=None):
res = sint()
comparison.LTZ(res, other - self,
(bit_length or program.bit_length) + 1,
security or program.security)
return res
def __le__(self, other, bit_length=None, security=None):
return 1 - self.greater_than(other, bit_length, security)
def __ge__(self, other, bit_length=None, security=None):
return 1 - self.less_than(other, bit_length, security)
@read_mem_value
@vectorize
def __eq__(self, other, bit_length=None, security=None):
return floatingpoint.EQZ(self - other, bit_length or program.bit_length,
security or program.security)
def __ne__(self, other, bit_length=None, security=None):
return 1 - self.equal(other, bit_length, security)
less_than = __lt__
greater_than = __gt__
less_equal = __le__
greater_equal = __ge__
equal = __eq__
not_equal = __ne__
@vectorize
def __mod__(self, modulus):
if isinstance(modulus, int):
l = math.log(modulus, 2)
if 2**int(round(l)) == modulus:
return self.mod2m(int(l))
raise NotImplementedError('Modulo only implemented for powers of two.')
@read_mem_value
def mod2m(self, m, bit_length=None, security=None, signed=True):
bit_length = bit_length or program.bit_length
security = security or program.security
if isinstance(m, int):
if m == 0:
return 0
if m >= bit_length:
return self
res = sint()
comparison.Mod2m(res, self, bit_length, m, security, signed)
else:
res, pow2 = floatingpoint.Trunc(self, bit_length, m, security, True)
return res
@vectorize
def __rpow__(self, base):
if base == 2:
return self.pow2()
else:
return NotImplemented
@vectorize
def pow2(self, bit_length=None, security=None):
return floatingpoint.Pow2(self, bit_length or program.bit_length, \
security or program.security)
def __lshift__(self, other, bit_length=None, security=None):
return self * util.pow2_value(other, bit_length, security)
@vectorize
@read_mem_value
def __rshift__(self, other, bit_length=None, security=None):
bit_length = bit_length or program.bit_length
security = security or program.security
if isinstance(other, int):
if other == 0:
return self
res = sint()
comparison.Trunc(res, self, bit_length, other, security, True)
return res
elif isinstance(other, sint):
return floatingpoint.Trunc(self, bit_length, other, security)
else:
return floatingpoint.Trunc(self, bit_length, sint(other), security)
left_shift = __lshift__
right_shift = __rshift__
def __rlshift__(self, other):
return other * 2**self
@vectorize
def __rrshift__(self, other):
return floatingpoint.Trunc(other, program.bit_length, self, program.security)
def bit_decompose(self, bit_length=None, security=None):
if bit_length == 0:
return []
bit_length = bit_length or program.bit_length
security = security or program.security
return floatingpoint.BitDec(self, bit_length, bit_length, security)
def TruncMul(self, other, k, m, kappa=None, nearest=False):
return (self * other).round(k, m, kappa, nearest, signed=True)
def TruncPr(self, k, m, kappa=None, signed=True):
return floatingpoint.TruncPr(self, k, m, kappa, signed=signed)
@vectorize
def round(self, k, m, kappa=None, nearest=False, signed=False):
kappa = kappa or program.security
secret = isinstance(m, sint)
if nearest:
if secret:
raise NotImplementedError()
return comparison.TruncRoundNearest(self, k, m, kappa,
signed=signed)
else:
if secret:
return floatingpoint.Trunc(self, k, m, kappa)
return self.TruncPr(k, m, kappa, signed=signed)
def Norm(self, k, f, kappa=None, simplex_flag=False):
return library.Norm(self, k, f, kappa, simplex_flag)
@vectorize
def int_div(self, other, bit_length=None, security=None):
k = bit_length or program.bit_length
kappa = security or program.security
tmp = library.IntDiv(self, other, k, kappa)
res = type(self)()
comparison.Trunc(res, tmp, 2 * k, k, kappa, True)
return res
@staticmethod
def two_power(n):
return floatingpoint.two_power(n)
class sgf2n(_secret, _gf2n):
__slots__ = []
instruction_type = 'gf2n'
clear_type = cgf2n
reg_type = 'sg'
@classmethod
def get_type(cls, length):
return cls
@classmethod
def get_raw_input_from(cls, player):
res = cls()
gstartinput(player, 1)
gstopinput(player, res)
return res
def add(self, other):
if isinstance(other, sgf2nint):
return NotImplemented
else:
return super(sgf2n, self).add(other)
def mul(self, other):
if isinstance(other, (sgf2nint)):
return NotImplemented
else:
return super(sgf2n, self).mul(other)
@vectorized_classmethod
def load_mem(cls, address, mem_type=None):
return cls._load_mem(address, gldms, gldmsi)
def store_in_mem(self, address):
self._store_in_mem(address, gstms, gstmsi)
def __init__(self, val=None, size=None):
super(sgf2n, self).__init__('sg', val=val, size=size)
def __neg__(self):
return self
@vectorize
def __invert__(self):
return self ^ cgf2n(2**program.galois_length - 1)
def __xor__(self, other):
if other is 0:
return self
else:
return super(sgf2n, self).add(other)
__rxor__ = __xor__
@vectorize
def __and__(self, other):
if isinstance(other, int):
other_bits = [(other >> i) & 1 \
for i in range(program.galois_length)]
else:
other_bits = other.bit_decompose()
self_bits = self.bit_decompose()
return sum((x * y) << i \
for i,(x,y) in enumerate(zip(self_bits, other_bits)))
__rand__ = __and__
@vectorize
def __lshift__(self, other):
return self * cgf2n(1 << other)
@vectorize
def right_shift(self, other, bit_length=None):
bits = self.bit_decompose(bit_length)
return sum(b << i for i,b in enumerate(bits[other:]))
def equal(self, other, bit_length=None, expand=1):
bits = [1 - bit for bit in (self - other).bit_decompose(bit_length)][::expand]
while len(bits) > 1:
bits.insert(0, bits.pop() * bits.pop())
return bits[0]
def not_equal(self, other, bit_length=None):
return 1 - self.equal(other, bit_length)
__eq__ = equal
__ne__ = not_equal
@vectorize
def bit_decompose(self, bit_length=None, step=1):
if bit_length == 0:
return []
bit_length = bit_length or program.galois_length
random_bits = [self.get_random_bit() \
for i in range(0, bit_length, step)]
one = cgf2n(1)
masked = sum([b * (one << (i * step)) for i,b in enumerate(random_bits)], self).reveal()
masked_bits = masked.bit_decompose(bit_length,step=step)
return [m + r for m,r in zip(masked_bits, random_bits)]
@vectorize
def bit_decompose_embedding(self):
random_bits = [self.get_random_bit() \
for i in range(8)]
one = cgf2n(1)
wanted_positions = [0, 5, 10, 15, 20, 25, 30, 35]
masked = sum([b * (one << wanted_positions[i]) for i,b in enumerate(random_bits)], self).reveal()
return [self.clear_type((masked >> wanted_positions[i]) & one) + r for i,r in enumerate(random_bits)]
for t in (sint, sgf2n):
t.bit_type = t
t.basic_type = t
t.default_type = t
class _bitint(object):
bits = None
log_rounds = False
linear_rounds = False
@classmethod
def bit_adder(cls, a, b, carry_in=0, get_carry=False):
a, b = list(a), list(b)
a += [0] * (len(b) - len(a))
b += [0] * (len(a) - len(b))
return cls.bit_adder_selection(a, b, carry_in=carry_in,
get_carry=get_carry)
@classmethod
def bit_adder_selection(cls, a, b, carry_in=0, get_carry=False):
if cls.log_rounds:
return cls.carry_lookahead_adder(a, b, carry_in=carry_in)
elif cls.linear_rounds:
return cls.ripple_carry_adder(a, b, carry_in=carry_in)
else:
return cls.carry_select_adder(a, b, carry_in=carry_in)
@classmethod
def carry_lookahead_adder(cls, a, b, fewer_inv=False, carry_in=0,
get_carry=False):
lower = []
for (ai,bi) in zip(a,b):
if ai is 0 or bi is 0:
lower.append(ai + bi)
a.pop(0)
b.pop(0)
else:
break
d = [cls.half_adder(ai, bi) for (ai,bi) in zip(a,b)]
carry = floatingpoint.carry
if fewer_inv:
pre_op = floatingpoint.PreOpL2
else:
pre_op = floatingpoint.PreOpL
if d:
carries = list(zip(*pre_op(carry, [(0, carry_in)] + d)))[1]
else:
carries = []
res = lower + cls.sum_from_carries(a, b, carries)
if get_carry:
res += [carries[-1]]
return res
@staticmethod
def sum_from_carries(a, b, carries):
return [ai.bit_xor(bi).bit_xor(carry) \
for (ai, bi, carry) in zip(a, b, carries)]
@classmethod
def carry_select_adder(cls, a, b, get_carry=False, carry_in=0):
a += [0] * (len(b) - len(a))
b += [0] * (len(a) - len(b))
n = len(a)
for m in range(100):
if sum(range(m + 1)) + 1 >= n:
break
for k in range(m, -1, -1):
if sum(range(m, k - 1, -1)) + 1 >= n:
break
blocks = list(range(m, k, -1))
blocks.append(n - sum(blocks))
blocks.reverse()
#print 'blocks:', blocks
if len(blocks) > 1 and blocks[0] > blocks[1]:
raise Exception('block size not increasing:', blocks)
if sum(blocks) != n:
raise Exception('blocks not summing up: %s != %s' % \
(sum(blocks), n))
res = []
carry = carry_in
cin_one = util.long_one(a + b)
for m in blocks:
aa = a[:m]
bb = b[:m]
a = a[m:]
b = b[m:]
cc = [cls.ripple_carry_adder(aa, bb, i) for i in (0, cin_one)]
for i in range(m):
res.append(util.if_else(carry, cc[1][i], cc[0][i]))
carry = util.if_else(carry, cc[1][m], cc[0][m])
if get_carry:
res += [carry]
return res
@classmethod
def ripple_carry_adder(cls, a, b, carry_in=0):
carry = carry_in
res = []
for aa, bb in zip(a, b):
cc, carry = cls.full_adder(aa, bb, carry)
res.append(cc)
res.append(carry)
return res
@staticmethod
def full_adder(a, b, carry):
s = a + b
return s + carry, util.if_else(s, carry, a)
@staticmethod
def half_adder(a, b):
return a + b, a & b
@staticmethod
def bit_comparator(a, b):
long_one = util.long_one(a + b)
op = lambda y,x,*args: (util.if_else(x[1], x[0], y[0]), \
util.if_else(x[1], long_one, y[1]))
return floatingpoint.KOpL(op, [(bi, ai + bi) for (ai,bi) in zip(a,b)])
@classmethod
def bit_less_than(cls, a, b):
x, not_equal = cls.bit_comparator(a, b)
return util.if_else(not_equal, x, 0)
@staticmethod
def get_highest_different_bits(a, b, index):
diff = [ai + bi for (ai,bi) in reversed(list(zip(a,b)))]
preor = floatingpoint.PreOR(diff, raw=True)
highest_diff = [x - y for (x,y) in reversed(list(zip(preor, [0] + preor)))]
raw = sum(map(operator.mul, highest_diff, (a,b)[index]))
return raw.bit_decompose()[0]
def load_int(self, other):
if -2**(self.n_bits-1) <= other < 2**(self.n_bits-1):
self.bin_type.load_int(self, other + 2**self.n_bits \
if other < 0 else other)
else:
raise CompilerError('Invalid signed %d-bit integer: %d' % \
(self.n_bits, other))
def add(self, other):
if type(other) == self.bin_type:
raise CompilerError('Unclear addition')
a = self.bit_decompose()
b = util.bit_decompose(other, self.n_bits)
return self.compose(self.bit_adder(a, b))
def mul(self, other):
if type(other) == self.bin_type:
raise CompilerError('Unclear multiplication')
self_bits = self.bit_decompose()
if isinstance(other, int):
other_bits = util.bit_decompose(other, self.n_bits)
bit_matrix = [[x * y for y in self_bits] for x in other_bits]
else:
try:
other_bits = other.bit_decompose()
if len(other_bits) == 1:
return type(self)(other_bits[0] * self)
if len(self_bits) != len(other_bits):
raise NotImplementedError('Multiplication of different lengths')
except AttributeError:
pass
try:
other = self.bin_type(other)
except CompilerError:
return NotImplemented
products = [x * other for x in self_bits]
bit_matrix = [util.bit_decompose(x, self.n_bits) for x in products]
return self.compose(self.wallace_tree_from_matrix(bit_matrix, False))
@classmethod
def wallace_tree_from_matrix(cls, bit_matrix, get_carry=True):
columns = [[_f for _f in (bit_matrix[j][i-j] \
for j in range(min(len(bit_matrix), i + 1))) if _f] \
for i in range(len(bit_matrix[0]))]
return cls.wallace_tree_from_columns(columns, get_carry)
@classmethod
def wallace_tree_from_columns(cls, columns, get_carry=True):
self = cls
while max(len(c) for c in columns) > 2:
new_columns = [[] for i in range(len(columns) + 1)]
for i,col in enumerate(columns):
while len(col) > 2:
s, carry = self.full_adder(*(col.pop() for i in range(3)))
new_columns[i].append(s)
new_columns[i+1].append(carry)
if len(col) == 2:
s, carry = self.half_adder(*(col.pop() for i in range(2)))
new_columns[i].append(s)
new_columns[i+1].append(carry)
else:
new_columns[i].extend(col)
if get_carry:
columns = new_columns
else:
columns = new_columns[:-1]
for col in columns:
col.extend([0] * (2 - len(col)))
return self.bit_adder(*list(zip(*columns)))
@classmethod
def wallace_tree(cls, rows):
return cls.wallace_tree_from_columns([list(x) for x in zip(*rows)])
def __sub__(self, other):
if type(other) == sgf2n:
raise CompilerError('Unclear subtraction')
a = self.bit_decompose()
b = util.bit_decompose(other, self.n_bits)
d = [(1 + ai + bi, (1 - ai) * bi) for (ai,bi) in zip(a,b)]
borrow = lambda y,x,*args: \
(x[0] * y[0], 1 - (1 - x[1]) * (1 - x[0] * y[1]))
borrows = (0,) + list(zip(*floatingpoint.PreOpL(borrow, d)))[1]
return self.compose(ai + bi + borrow \
for (ai,bi,borrow) in zip(a,b,borrows))
def __rsub__(self, other):
raise NotImplementedError()
def __truediv__(self, other):
raise NotImplementedError()
def __truerdiv__(self, other):
raise NotImplementedError()
def __lshift__(self, other):
return self.compose(([0] * other + self.bit_decompose())[:self.n_bits])
def __rshift__(self, other):
return self.compose(self.bit_decompose()[other:])
def bit_decompose(self, n_bits=None, *args):
if self.bits is None:
self.bits = self.force_bit_decompose(self.n_bits)
if n_bits is None:
return self.bits[:]
else:
return self.bits[:n_bits] + [self.fill_bit()] * (n_bits - self.n_bits)
def fill_bit(self):
return self.bits[-1]
@staticmethod
def prep_comparison(a, b):
a[-1], b[-1] = b[-1], a[-1]
def comparison(self, other, const_rounds=False, index=None):
a = self.bit_decompose()
b = util.bit_decompose(other, self.n_bits)
self.prep_comparison(a, b)
if const_rounds:
return self.get_highest_different_bits(a, b, index)
else:
return self.bit_comparator(a, b)
def __lt__(self, other):
if program.options.comparison == 'log':
x, not_equal = self.comparison(other)
return util.if_else(not_equal, x, 0)
else:
return self.comparison(other, True, 1)
def __le__(self, other):
if program.options.comparison == 'log':
x, not_equal = self.comparison(other)
return util.if_else(not_equal, x, 1)
else:
return 1 - self.comparison(other, True, 0)
def __ge__(self, other):
return 1 - (self < other)
def __gt__(self, other):
return 1 - (self <= other)
def __eq__(self, other):
diff = self ^ other
diff_bits = [1 - x for x in diff.bit_decompose()]
return floatingpoint.KMul(diff_bits)
def __ne__(self, other):
return 1 - (self == other)
def __neg__(self):
return 1 + self.compose(1 ^ b for b in self.bit_decompose())
def __abs__(self):
return util.if_else(self.bit_decompose()[-1], -self, self)
less_than = lambda self, other, *args, **kwargs: self < other
greater_than = lambda self, other, *args, **kwargs: self > other
less_equal = lambda self, other, *args, **kwargs: self <= other
greater_equal = lambda self, other, *args, **kwargs: self >= other
equal = lambda self, other, *args, **kwargs: self == other
not_equal = lambda self, other, *args, **kwargs: self != other
class intbitint(_bitint, sint):
@staticmethod
def full_adder(a, b, carry):
s = a.bit_xor(b)
return s.bit_xor(carry), util.if_else(s, carry, a)
@staticmethod
def half_adder(a, b):
carry = a * b
return a + b - 2 * carry, carry
@staticmethod
def sum_from_carries(a, b, carries):
return [a[i] + b[i] + carries[i] - 2 * carries[i + 1] \
for i in range(len(a))]
@classmethod
def bit_adder_selection(cls, a, b, carry_in=0, get_carry=False):
if cls.linear_rounds:
return cls.ripple_carry_adder(a, b, carry_in=carry_in)
# experimental cut-off with dead code elimination
elif len(a) < 122 or cls.log_rounds:
return cls.carry_lookahead_adder(a, b, carry_in=carry_in,
get_carry=get_carry)
else:
return cls.carry_select_adder(a, b, carry_in=carry_in)
class sgf2nint(_bitint, sgf2n):
bin_type = sgf2n
@classmethod
def compose(cls, bits):
bits = list(bits)
if len(bits) > cls.n_bits:
raise CompilerError('Too many bits')
res = cls()
res.bits = bits + [0] * (cls.n_bits - len(bits))
gmovs(res, sum(b << i for i,b in enumerate(bits)))
return res
def load_other(self, other):
if isinstance(other, sgf2nint):
gmovs(self, self.compose(other.bit_decompose(self.n_bits)))
elif isinstance(other, sgf2n):
gmovs(self, other)
else:
gaddm(self, sgf2n(0), cgf2n(other))
def force_bit_decompose(self, n_bits=None):
return sgf2n(self).bit_decompose(n_bits)
class sgf2nuint(sgf2nint):
def load_int(self, other):
if 0 <= other < 2**self.n_bits:
sgf2n.load_int(self, other)
else:
raise CompilerError('Invalid unsigned %d-bit integer: %d' % \
(self.n_bits, other))
def fill_bit(self):
return 0
@staticmethod
def prep_comparison(a, b):
pass
class sgf2nuint32(sgf2nuint):
n_bits = 32
class sgf2nint32(sgf2nint):
n_bits = 32
def get_sgf2nint(n):
class sgf2nint_spec(sgf2nint):
n_bits = n
#sgf2nint_spec.__name__ = 'sgf2unint' + str(n)
return sgf2nint_spec
def get_sgf2nuint(n):
class sgf2nuint_spec(sgf2nint):
n_bits = n
#sgf2nuint_spec.__name__ = 'sgf2nuint' + str(n)
return sgf2nuint_spec
class sgf2nfloat(sgf2n):
@classmethod
def set_precision(cls, vlen, plen):
cls.vlen = vlen
cls.plen = plen
class v_type(sgf2nuint):
n_bits = 2 * vlen + 1
class p_type(sgf2nint):
n_bits = plen
class pdiff_type(sgf2nuint):
n_bits = plen
cls.v_type = v_type
cls.p_type = p_type
cls.pdiff_type = pdiff_type
def __init__(self, val, p=None, z=None, s=None):
super(sgf2nfloat, self).__init__()
if p is None and type(val) == sgf2n:
bits = val.bit_decompose(self.vlen + self.plen + 1)
self.v = self.v_type.compose(bits[:self.vlen])
self.p = self.p_type.compose(bits[self.vlen:-1])
self.s = bits[-1]
self.z = util.tree_reduce(operator.mul, (1 - b for b in self.v.bits))
else:
if p is None:
v, p, z, s = sfloat.convert_float(val, self.vlen, self.plen)
# correct sfloat
p += self.vlen - 1
v_bits = util.bit_decompose(v, self.vlen)
p_bits = util.bit_decompose(p, self.plen)
self.v = self.v_type.compose(v_bits)
self.p = self.p_type.compose(p_bits)
self.z = z
self.s = s
else:
self.v, self.p, self.z, self.s = val, p, z, s
v_bits = val.bit_decompose()[:self.vlen]
p_bits = p.bit_decompose()[:self.plen]
gmovs(self, util.bit_compose(v_bits + p_bits + [self.s]))
def add(self, other):
a = self.p < other.p
b = self.p == other.p
c = self.v < other.v
other_dominates = (b.if_else(c, a))
pmax, pmin = a.cond_swap(self.p, other.p, self.p_type)
vmax, vmin = other_dominates.cond_swap(self.v, other.v, self.v_type)
s3 = self.s ^ other.s
pdiff = self.pdiff_type(pmax - pmin)
d = self.vlen < pdiff
pow_delta = util.pow2(d.if_else(0, pdiff).bit_decompose(util.log2(self.vlen)))
v3 = vmax
v4 = self.v_type(sgf2n(vmax) * pow_delta) + self.v_type(s3.if_else(-vmin, vmin))
v = self.v_type(sgf2n(d.if_else(v3, v4) << self.vlen) / pow_delta)
v >>= self.vlen - 1
h = floatingpoint.PreOR(v.bits[self.vlen+1::-1])
tmp = sum(util.if_else(b, 0, 1 << i) for i,b in enumerate(h))
pow_p0 = 1 + self.v_type(tmp)
v = (v * pow_p0) >> 2
p = pmax - sum(self.p_type.compose([1 - b]) for b in h) + 1
v = self.z.if_else(other.v, other.z.if_else(self.v, v))
z = v == 0
p = z.if_else(0, self.z.if_else(other.p, other.z.if_else(self.p, p)))
s = other_dominates.if_else(other.s, self.s)
s = self.z.if_else(other.s, other.z.if_else(self.s, s))
return sgf2nfloat(v, p, z, s)
def mul(self, other):
v = (self.v * other.v) >> (self.vlen - 1)
b = v.bits[self.vlen]
v = b.if_else(v >> 1, v)
p = self.p + other.p + self.p_type.compose([b])
s = self.s + other.s
z = util.or_op(self.z, other.z)
return sgf2nfloat(v, p, z, s)
sgf2nfloat.set_precision(24, 8)
def parse_type(other, k=None, f=None):
# converts type to cfix/sfix depending on the case
if isinstance(other, cfix.scalars):
return cfix(other, k=k, f=f)
elif isinstance(other, cint):
tmp = cfix()
tmp.load_int(other)
return tmp
elif isinstance(other, sint):
tmp = sfix()
tmp.load_int(other)
return tmp
elif isinstance(other, sfloat):
tmp = sfix(other)
return tmp
else:
return other
class cfix(_number, _structure):
""" Clear fixed point type. """
__slots__ = ['value', 'f', 'k', 'size']
reg_type = 'c'
scalars = (int, float, regint)
@classmethod
def set_precision(cls, f, k = None):
# k is the whole bitlength of fixed point
# f is the bitlength of decimal part
cls.f = f
if k is None:
cls.k = 2 * f
else:
cls.k = k
@vectorized_classmethod
def load_mem(cls, address, mem_type=None):
res = []
res.append(cint.load_mem(address))
return cfix(*res)
@vectorized_classmethod
def read_from_socket(cls, client_id, n=1):
""" Read one or more cfix values from a socket.
Sender will have already bit shifted and sent as cints."""
cint_input = cint.read_from_socket(client_id, n)
if n == 1:
return cfix(cint_inputs)
else:
return list(map(cfix, cint_inputs))
@vectorize
def write_to_socket(self, client_id, message_type=ClientMessageType.NoType):
""" Send cfix to socket. Value is sent as bit shifted cint. """
writesocketc(client_id, message_type, cint(self.v))
@vectorized_classmethod
def write_to_socket(self, client_id, values, message_type=ClientMessageType.NoType):
""" Send a list of cfix values to socket. Values are sent as bit shifted cints. """
def cfix_to_cint(fix_val):
return cint(fix_val.v)
cint_values = list(map(cfix_to_cint, values))
writesocketc(client_id, message_type, *cint_values)
@staticmethod
def malloc(size):
return program.malloc(size, cint)
@staticmethod
def n_elements():
return 1
@vectorize_init
def __init__(self, v=None, k=None, f=None, size=None):
f = f or self.f
k = k or self.k
self.f = f
self.k = k
self.size = get_global_vector_size()
if isinstance(v, cint):
self.v = cint(v,size=self.size)
elif isinstance(v, cfix.scalars):
v = v * (2 ** f)
try:
v = int(round(v))
except TypeError:
pass
self.v = cint(v, size=self.size)
elif isinstance(v, cfix):
self.v = v.v
elif isinstance(v, MemValue):
self.v = v
elif v is None:
self.v = cint(0)
else:
raise CompilerError('cannot initialize cfix with %s' % v)
@vectorize
def load_int(self, v):
self.v = cint(v) * (2 ** self.f)
@classmethod
def conv(cls, other):
if isinstance(other, cls):
return other
else:
try:
res = cfix()
res.load_int(other)
return res
except (TypeError, CompilerError):
pass
return cls(other)
def store_in_mem(self, address):
self.v.store_in_mem(address)
def sizeof(self):
return self.size * 4
@vectorize
def add(self, other):
other = parse_type(other)
if isinstance(other, cfix):
return cfix(self.v + other.v)
else:
return NotImplemented
@vectorize
def mul(self, other):
other = parse_type(other)
if isinstance(other, cfix):
assert self.f == other.f
sgn = cint(1 - 2 * (self.v * other.v < 0))
absolute = self.v * other.v * sgn
val = sgn * (absolute >> self.f)
return cfix(val)
elif isinstance(other, sfix):
return NotImplemented
else:
raise CompilerError('Invalid type %s for cfix.__mul__' % type(other))
@vectorize
def __sub__(self, other):
other = parse_type(other)
if isinstance(other, cfix):
return cfix(self.v - other.v)
elif isinstance(other, sfix):
return sfix(self.v - other.v)
else:
raise NotImplementedError
@vectorize
def __neg__(self):
# cfix type always has .v
return cfix(-self.v)
def __rsub__(self, other):
return -self + other
@vectorize
def __eq__(self, other):
other = parse_type(other)
if isinstance(other, cfix):
return self.v == other.v
elif isinstance(other, sfix):
return other.v.equal(self.v, self.k, other.kappa)
else:
raise NotImplementedError
@vectorize
def __lt__(self, other):
other = parse_type(other)
if isinstance(other, cfix):
return self.v < other.v
elif isinstance(other, sfix):
if(self.k != other.k or self.f != other.f):
raise TypeError('Incompatible fixed point types in comparison')
return other.v.greater_than(self.v, self.k, other.kappa)
else:
raise NotImplementedError
@vectorize
def __le__(self, other):
other = parse_type(other)
if isinstance(other, cfix):
return self.v <= other.v
elif isinstance(other, sfix):
return other.v.greater_equal(self.v, self.k, other.kappa)
else:
raise NotImplementedError
@vectorize
def __gt__(self, other):
other = parse_type(other)
if isinstance(other, cfix):
return self.v > other.v
elif isinstance(other, sfix):
return other.v.less_than(self.v, self.k, other.kappa)
else:
raise NotImplementedError
@vectorize
def __ge__(self, other):
other = parse_type(other)
if isinstance(other, cfix):
return self.v >= other.v
elif isinstance(other, sfix):
return other.v.less_equal(self.v, self.k, other.kappa)
else:
raise NotImplementedError
@vectorize
def __ne__(self, other):
other = parse_type(other)
if isinstance(other, cfix):
return self.v != other.v
elif isinstance(other, sfix):
return other.v.not_equal(self.v, self.k, other.kappa)
else:
raise NotImplementedError
@vectorize
def __truediv__(self, other):
other = parse_type(other)
if isinstance(other, cfix):
return cfix(library.cint_cint_division(self.v, other.v, self.k, self.f))
elif isinstance(other, sfix):
return sfix(library.FPDiv(self.v, other.v, self.k, self.f,
other.kappa, nearest=sfix.round_nearest))
else:
raise TypeError('Incompatible fixed point types in division')
def print_plain(self):
if self.k > 64:
raise CompilerError('Printing of fixed-point numbers not ' +
'implemented for more than 64-bit precision')
tmp = regint()
convmodp(tmp, self.v, bitlength=self.k)
sign = cint(tmp < 0)
abs_v = sign.if_else(-self.v, self.v)
print_float_plain(cint(abs_v), cint(-self.f), \
cint(0), cint(sign))
class _single(_number, _structure):
""" Representation as single integer preserving the order """
""" E.g. fixed-point numbers """
__slots__ = ['v']
kappa = 40
round_nearest = False
@property
@classmethod
def reg_type(cls):
return cls.int_type.reg_type
@classmethod
def receive_from_client(cls, n, client_id, message_type=ClientMessageType.NoType):
""" Securely obtain shares of n values input by a client.
Assumes client has already run bit shift to convert fixed point to integer."""
sint_inputs = cls.int_type.receive_from_client(n, client_id, ClientMessageType.TripleShares)
return list(map(cls, sint_inputs))
@vectorized_classmethod
def load_mem(cls, address, mem_type=None):
return cls._new(cls.int_type.load_mem(address))
@classmethod
@read_mem_value
def conv(cls, other):
if isinstance(other, cls):
return other
else:
try:
return cls.from_sint(other)
except (TypeError, CompilerError):
pass
return cls(other)
@classmethod
def coerce(cls, other):
return cls.conv(other)
@classmethod
def malloc(cls, size):
return program.malloc(size, cls.int_type)
@staticmethod
def n_elements():
return 1
@classmethod
def dot_product(cls, x, y, res_params=None):
return cls.unreduced_dot_product(x, y, res_params).reduce_after_mul()
@classmethod
def unreduced_dot_product(cls, x, y, res_params=None):
dp = cls.int_type.dot_product([xx.pre_mul() for xx in x],
[yy.pre_mul() for yy in y])
return x[0].unreduced(dp, y[0], res_params, len(x))
@classmethod
def row_matrix_mul(cls, row, matrix, res_params=None):
int_matrix = [y.get_vector().pre_mul() for y in matrix]
col = cls.int_type.row_matrix_mul([x.pre_mul() for x in row],
int_matrix)
res = row[0].unreduced(col, matrix[0][0], res_params,
len(row)).reduce_after_mul()
return res
@classmethod
def matrix_mul(cls, A, B, n, res_params=None):
AA = A.pre_mul()
BB = B.pre_mul()
CC = cls.int_type.matrix_mul(AA, BB, n)
res = A.unreduced(CC, B, res_params, n).reduce_after_mul()
return res
def store_in_mem(self, address):
self.v.store_in_mem(address)
@property
def size(self):
return self.v.size
def sizeof(self):
return self.size
def __len__(self):
return len(self.v)
@vectorize
def __sub__(self, other):
other = self.coerce(other)
return self + (-other)
def __rsub__(self, other):
return -self + other
@vectorize
def __eq__(self, other):
other = self.coerce(other)
if isinstance(other, (cfix, _single)):
return self.v.equal(other.v, self.k, self.kappa)
else:
raise NotImplementedError
@vectorize
def __le__(self, other):
other = self.coerce(other)
if isinstance(other, (cfix, _single)):
return self.v.less_equal(other.v, self.k, self.kappa)
else:
raise NotImplementedError
@vectorize
def __lt__(self, other):
other = self.coerce(other)
if isinstance(other, (cfix, _single)):
return self.v.less_than(other.v, self.k, self.kappa)
else:
raise NotImplementedError
@vectorize
def __ge__(self, other):
other = self.coerce(other)
if isinstance(other, (cfix, _single)):
return self.v.greater_equal(other.v, self.k, self.kappa)
else:
raise NotImplementedError
@vectorize
def __gt__(self, other):
other = self.coerce(other)
if isinstance(other, (cfix, _single)):
return self.v.greater_than(other.v, self.k, self.kappa)
else:
raise NotImplementedError
@vectorize
def __ne__(self, other):
other = self.coerce(other)
if isinstance(other, (cfix, _single)):
return self.v.not_equal(other.v, self.k, self.kappa)
else:
raise NotImplementedError
class _fix(_single):
""" Shared fixed point type. """
__slots__ = ['v', 'f', 'k', 'size']
@classmethod
def set_precision(cls, f, k = None):
cls.f = f
# default bitlength = 2*precision
if k is None:
cls.k = 2 * f
else:
if k < f:
raise CompilerError('bit length cannot be less than precision')
cls.k = k
@classmethod
def coerce(cls, other):
if isinstance(other, (_fix, cls.clear_type)):
return other
else:
return cls.conv(other)
@classmethod
def from_sint(cls, other, k=None, f=None):
res = cls()
res.f = f or cls.f
res.k = k or cls.k
res.load_int(cls.int_type.conv(other))
return res
@classmethod
def _new(cls, other, k=None, f=None):
res = cls(other)
res.k = k or cls.k
res.f = f or cls.f
return res
@vectorize_init
def __init__(self, _v=None, size=None):
self.size = get_global_vector_size()
f = self.f
k = self.k
# warning: don't initialize a sfix from a sint, this is only used in internal methods;
# for external initialization use load_int.
if _v is None:
self.v = self.int_type(0)
elif isinstance(_v, self.int_type):
self.v = _v
self.size = _v.size
elif isinstance(_v, cfix.scalars):
self.v = self.int_type(int(round(_v * (2 ** f))), size=self.size)
elif isinstance(_v, self.float_type):
p = (f + _v.p)
b = (p.greater_equal(0, _v.vlen))
a = b*(_v.v << (p)) + (1-b)*(_v.v >> (-p))
self.v = (1-2*_v.s)*a
elif isinstance(_v, type(self)):
self.v = _v.v
elif isinstance(_v, (MemValue, MemFix)):
#this is a memvalue object
self.v = type(self)(_v.read()).v
else:
raise CompilerError('cannot convert %s to sfix' % _v)
if not isinstance(self.v, self.int_type):
raise CompilerError('sfix conversion failure: %s/%s' % (_v, self.v))
@vectorize
def load_int(self, v):
self.v = self.int_type(v) << self.f
def __getitem__(self, index):
return self._new(self.v[index])
@vectorize
def add(self, other):
other = self.coerce(other)
if isinstance(other, (_fix, cfix)):
return self._new(self.v + other.v, k=self.k, f=self.f)
elif isinstance(other, cfix.scalars):
tmp = cfix(other, k=self.k, f=self.f)
return self + tmp
else:
return NotImplemented
@vectorize
def mul(self, other):
if isinstance(other, (sint, cint, regint, int)):
return self._new(self.v * other, k=self.k, f=self.f)
elif isinstance(other, float):
if int(other) == other:
return self.mul(int(other))
v = int(round(other * 2 ** self.f))
if v == 0:
return 0
f = self.f
while v % 2 == 0:
f -= 1
v //= 2
k = len(bin(abs(v))) - 1
other = self.multipliable(v, k, f)
other = self.coerce(other)
if isinstance(other, (_fix, self.clear_type)):
val = self.v.TruncMul(other.v, self.k + other.k, other.f,
self.kappa,
self.round_nearest)
if self.size >= other.size:
return self._new(val, k=self.k, f=self.f)
else:
return self.vec._new(val, k=self.k, f=self.f)
elif isinstance(other, cfix.scalars):
scalar_fix = cfix(other)
return self * scalar_fix
else:
return NotImplemented
@vectorize
def __neg__(self):
return type(self)(-self.v)
@vectorize
def __truediv__(self, other):
other = self.coerce(other)
if isinstance(other, _fix):
return type(self)(library.FPDiv(self.v, other.v, self.k, self.f,
self.kappa,
nearest=self.round_nearest))
elif isinstance(other, cfix):
return type(self)(library.sint_cint_division(self.v, other.v, self.k, self.f, self.kappa))
else:
raise TypeError('Incompatible fixed point types in division')
@vectorize
def __rtruediv__(self, other):
return self.coerce(other) / self
@vectorize
def compute_reciprocal(self):
return type(self)(library.FPDiv(cint(2) ** self.f, self.v, self.k, self.f, self.kappa, True))
def reveal(self):
val = self.v.reveal()
res = self.clear_type(val)
res.f = self.f
res.k = self.k
return res
class sfix(_fix):
int_type = sint
clear_type = cfix
@vectorized_classmethod
def get_input_from(cls, player):
v = cls.int_type()
inputmixed('fix', v, cls.f, player)
return cls._new(v)
@vectorized_classmethod
def get_random(cls, lower, upper):
""" Uniform random number around centre of bounds """
""" Range can be smaller """
log_range = int(math.log(upper - lower, 2))
n_bits = log_range + cls.f
average = lower + 0.5 * (upper - lower)
lower = average - 0.5 * 2 ** log_range
return cls._new(cls.int_type.get_random_int(n_bits)) + lower
def coerce(self, other):
return parse_type(other, k=self.k, f=self.f)
def mul_no_reduce(self, other, res_params=None):
assert self.f == other.f
return self.unreduced(self.v * other.v)
def pre_mul(self):
return self.v
def unreduced(self, v, other=None, res_params=None, n_summands=1):
return unreduced_sfix(v, self.k * 2, self.f, self.kappa)
@staticmethod
def multipliable(v, k, f):
return cfix(cint.conv(v), k, f)
class unreduced_sfix(_single):
int_type = sint
@classmethod
def _new(cls, v):
return cls(v, 2 * sfix.k, sfix.f, sfix.kappa)
def __init__(self, v, k, m, kappa):
self.v = v
self.k = k
self.m = m
self.kappa = kappa
def __add__(self, other):
if other is 0:
return self
assert self.k == other.k
assert self.m == other.m
assert self.kappa == other.kappa
return unreduced_sfix(self.v + other.v, self.k, self.m, self.kappa)
__radd__ = __add__
@vectorize
def reduce_after_mul(self):
return sfix(sfix.int_type.round(self.v, self.k, self.m, self.kappa,
nearest=sfix.round_nearest,
signed=True))
sfix.unreduced_type = unreduced_sfix
# this is for 20 bit decimal precision
# with 40 bitlength of entire number
# these constants have been chosen for multiplications to fit in 128 bit prime field
# (precision n1) 41 + (precision n2) 41 + (stat_sec) 40 = 82 + 40 = 122 <= 128
# with statistical security of 40
fixed_lower = 20
fixed_upper = 40
sfix.set_precision(fixed_lower, fixed_upper)
cfix.set_precision(fixed_lower, fixed_upper)
class squant(_single):
""" Quantization as in ArXiv:1712.05877v1 """
__slots__ = ['params']
int_type = sint
clamp = True
@classmethod
def set_params(cls, S, Z=0, k=8):
cls.params = squant_params(S, Z, k)
@classmethod
def from_sint(cls, other):
raise CompilerError('sint to squant conversion not implemented')
@classmethod
def _new(cls, value, params=None):
res = cls(params=params)
res.v = value
return res
@read_mem_value
def __init__(self, value=None, params=None):
if params is not None:
self.params = params
if value is None:
# need to set v manually
pass
elif isinstance(value, cfix.scalars):
set_global_vector_size(1)
q = util.round_to_int(value / self.S + self.Z)
if util.is_constant(q) and (q < 0 or q >= 2**self.k):
raise CompilerError('%f not quantizable' % value)
self.v = self.int_type(q)
reset_global_vector_size()
elif isinstance(value, squant) and value.params == self.params:
self.v = value.v
else:
raise CompilerError('cannot convert %s to squant' % value)
def __getitem__(self, index):
return type(self)._new(self.v[index], self.params)
def get_params(self):
return self.params
@property
def S(self):
return self.params.S
@property
def Z(self):
return self.params.Z
@property
def k(self):
return self.params.k
def coerce(self, other):
other = self.conv(other)
return self._new(util.expand(other.v, self.size), other.params)
@vectorize
def add(self, other):
other = self.coerce(other)
assert self.get_params() == other.get_params()
return self._new(self.v + other.v - util.expand(self.Z, self.v.size))
def mul(self, other, res_params=None):
return self.mul_no_reduce(other, res_params).reduce_after_mul()
def mul_no_reduce(self, other, res_params=None):
if isinstance(other, (sint, cint, regint)):
return self._new(other * (self.v - self.Z) + self.Z,
params=self.get_params())
other = self.coerce(other)
tmp = (self.v - self.Z) * (other.v - other.Z)
return _unreduced_squant(tmp, (self.get_params(), other.get_params()),
res_params=res_params)
def pre_mul(self):
return self.v - util.expand(self.Z, self.v.size)
def unreduced(self, v, other, res_params=None, n_summands=1):
return _unreduced_squant(v, (self.get_params(), other.get_params()),
res_params, n_summands)
@vectorize
def for_mux(self, other):
other = self.coerce(other)
assert self.params == other.params
f = lambda x: self._new(x, self.params)
return f, self.v, other.v
@vectorize
def __neg__(self):
return self._new(-self.v + 2 * util.expand(self.Z, self.v.size))
class _unreduced_squant(object):
def __init__(self, v, params, res_params=None, n_summands=1):
self.v = v
self.params = params
self.n_summands = n_summands
self.res_params = res_params or params[0]
def __add__(self, other):
if other is 0:
return self
assert self.params == other.params
assert self.res_params == other.res_params
return _unreduced_squant(self.v + other.v, self.params, self.res_params,
self.n_summands + other.n_summands)
__radd__ = __add__
def reduce_after_mul(self):
return squant_params.conv(self.res_params).reduce(self)
class squant_params(object):
max_n_summands = 2048
@staticmethod
def conv(other):
if isinstance(other, squant_params):
return other
else:
return squant_params(*other)
def __init__(self, S, Z=0, k=8):
try:
self.S = float(S)
except:
self.S = S
self.Z = MemValue.if_necessary(Z)
self.k = k
self._store = {}
if program.options.ring:
# cheaper probabilistic truncation
self.max_length = int(program.options.ring) - 1
else:
# safe choice for secret shift
self.max_length = 71
def __iter__(self):
yield self.S
yield self.Z
yield self.k
def is_constant(self):
return util.is_constant_float(self.S) and util.is_constant(self.Z)
def get(self, input_params, n_summands):
p = input_params
M = p[0].S * p[1].S / self.S
logM = util.log2(M)
n_shift = self.max_length - p[0].k - p[1].k - util.log2(n_summands)
if util.is_constant_float(M):
n_shift -= logM
int_mult = int(round(M * 2 ** (n_shift)))
else:
int_mult = MemValue(M.v << (n_shift + M.p))
shifted_Z = MemValue.if_necessary(self.Z << n_shift)
return n_shift, int_mult, shifted_Z
def precompute(self, *input_params):
self._store[input_params] = self.get(input_params, self.max_n_summands)
def get_stored(self, unreduced):
assert unreduced.n_summands <= self.max_n_summands
return self._store[unreduced.params]
def reduce(self, unreduced):
ps = (self,) + unreduced.params
if reduce(operator.and_, (p.is_constant() for p in ps)):
n_shift, int_mult, shifted_Z = self.get(unreduced.params,
unreduced.n_summands)
else:
n_shift, int_mult, shifted_Z = self.get_stored(unreduced)
size = unreduced.v.size
n_shift = util.expand(n_shift, size)
shifted_Z = util.expand(shifted_Z, size)
int_mult = util.expand(int_mult, size)
tmp = unreduced.v * int_mult + shifted_Z
shifted = tmp.round(self.max_length, n_shift,
kappa=squant.kappa, nearest=squant.round_nearest,
signed=True)
if squant.clamp:
length = max(self.k, self.max_length - n_shift) + 1
top = (1 << self.k) - 1
over = shifted.greater_than(top, length, squant.kappa)
under = shifted.less_than(0, length, squant.kappa)
shifted = over.if_else(top, shifted)
shifted = under.if_else(0, shifted)
return squant._new(shifted, params=self)
class sfloat(_number, _structure):
""" Shared floating point data type, representing (1 - 2s)*(1 - z)*v*2^p.
v: significand
p: exponent
z: zero flag
s: sign bit
"""
__slots__ = ['v', 'p', 'z', 's', 'size']
# single precision
vlen = 24
plen = 8
kappa = 40
round_nearest = False
@staticmethod
def n_elements():
return 4
@classmethod
def malloc(cls, size):
return program.malloc(size * cls.n_elements(), sint)
@classmethod
def is_address_tuple(cls, address):
if isinstance(address, (list, tuple)):
assert(len(address) == cls.n_elements())
return True
return False
@vectorized_classmethod
def load_mem(cls, address, mem_type=None):
size = get_global_vector_size()
if cls.is_address_tuple(address):
return sfloat(*(sint.load_mem(a, size=size) for a in address))
res = []
for i in range(4):
res.append(sint.load_mem(address + i * size, size=size))
return sfloat(*res)
@classmethod
def set_error(cls, error):
# incompatible with loops
#cls.error += error - cls.error * error
cls.error = error
pass
@classmethod
def conv(cls, other):
if isinstance(other, cls):
return other
else:
return cls(other)
@classmethod
def coerce(cls, other):
return cls.conv(other)
@staticmethod
def convert_float(v, vlen, plen):
if v < 0:
s = 1
else:
s = 0
if v == 0:
v = 0
p = 0
z = 1
else:
p = int(math.floor(math.log(abs(v), 2))) - vlen + 1
vv = v
v = int(round(abs(v) * 2 ** (-p)))
if v == 2 ** vlen:
p += 1
v //= 2
z = 0
if p < -2 ** (plen - 1):
print('Warning: %e truncated to zero' % vv)
v, p, z = 0, 0, 1
if p >= 2 ** (plen - 1):
raise CompilerError('Cannot convert %s to float ' \
'with %d exponent bits' % (vv, plen))
return v, p, z, s
@vectorized_classmethod
def get_input_from(cls, player):
v = sint()
p = sint()
z = sint()
s = sint()
inputmixed('float', v, p, z, s, cls.vlen, player)
return cls(v, p, z, s)
@vectorize_init
@read_mem_value
def __init__(self, v, p=None, z=None, s=None, size=None):
self.size = get_global_vector_size()
if p is None:
if isinstance(v, sfloat):
p = v.p
z = v.z
s = v.s
v = v.v
elif isinstance(v, sfix):
f = v.f
v, p, z, s = floatingpoint.Int2FL(v.v, v.k,
self.vlen, self.kappa)
p = p - f
elif util.is_constant_float(v):
v, p, z, s = self.convert_float(v, self.vlen, self.plen)
else:
v, p, z, s = floatingpoint.Int2FL(sint.conv(v),
program.bit_length,
self.vlen, self.kappa)
if isinstance(v, int):
if not ((v >= 2**(self.vlen-1) and v < 2**(self.vlen)) or v == 0):
raise CompilerError('Floating point number malformed: significand')
self.v = library.load_int_to_secret(v)
else:
self.v = v
if isinstance(p, int):
if not (p >= -2**(self.plen - 1) and p < 2**(self.plen - 1)):
raise CompilerError('Floating point number malformed: exponent %d not unsigned %d-bit integer' % (p, self.plen))
self.p = library.load_int_to_secret(p)
else:
self.p = p
if isinstance(z, int):
if not (z == 0 or z == 1):
raise CompilerError('Floating point number malformed: zero bit')
self.z = sint()
ldsi(self.z, z)
else:
self.z = z
if isinstance(s, int):
if not (s == 0 or s == 1):
raise CompilerError('Floating point number malformed: sign')
self.s = sint()
ldsi(self.s, s)
else:
self.s = s
def __getitem__(self, index):
return sfloat(*(x[index] for x in self))
def __iter__(self):
yield self.v
yield self.p
yield self.z
yield self.s
def store_in_mem(self, address):
if self.is_address_tuple(address):
for a, x in zip(address, self):
x.store_in_mem(a)
return
for i,x in enumerate((self.v, self.p, self.z, self.s)):
x.store_in_mem(address + i * self.size)
def sizeof(self):
return self.size * self.n_elements()
@vectorize
def add(self, other):
other = self.conv(other)
if isinstance(other, sfloat):
a,c,d,e = [sint() for i in range(4)]
t = sint()
t2 = sint()
v1 = self.v
v2 = other.v
p1 = self.p
p2 = other.p
s1 = self.s
s2 = other.s
z1 = self.z
z2 = other.z
a = p1.less_than(p2, self.plen, self.kappa)
b = floatingpoint.EQZ(p1 - p2, self.plen, self.kappa)
c = v1.less_than(v2, self.vlen, self.kappa)
ap1 = a*p1
ap2 = a*p2
aneg = 1 - a
bneg = 1 - b
cneg = 1 - c
av1 = a*v1
av2 = a*v2
cv1 = c*v1
cv2 = c*v2
pmax = ap2 + p1 - ap1
pmin = p2 - ap2 + ap1
vmax = bneg*(av2 + v1 - av1) + b*(cv2 + v1 - cv1)
vmin = bneg*(av1 + v2 - av2) + b*(cv1 + v2 - cv2)
s3 = s1 + s2 - 2 * s1 * s2
comparison.LTZ(d, self.vlen + pmin - pmax + sfloat.round_nearest,
self.plen, self.kappa)
pow_delta = floatingpoint.Pow2((1 - d) * (pmax - pmin),
self.vlen + 1 + sfloat.round_nearest,
self.kappa)
# deviate from paper for more precision
#v3 = 2 * (vmax - s3) + 1
v3 = vmax
v4 = vmax * pow_delta + (1 - 2 * s3) * vmin
to_trunc = (d * v3 + (1 - d) * v4)
if program.options.ring:
to_trunc <<= 1 + sfloat.round_nearest
v = floatingpoint.TruncInRing(to_trunc,
2 * (self.vlen + 1 +
sfloat.round_nearest),
pow_delta)
else:
to_trunc *= two_power(self.vlen + sfloat.round_nearest)
v = to_trunc * floatingpoint.Inv(pow_delta)
comparison.Trunc(t, v, 2 * self.vlen + 1 + sfloat.round_nearest,
self.vlen - 1, self.kappa, False)
v = t
u = floatingpoint.BitDec(v, self.vlen + 2 + sfloat.round_nearest,
self.vlen + 2 + sfloat.round_nearest, self.kappa,
list(range(1 + sfloat.round_nearest,
self.vlen + 2 + sfloat.round_nearest)))
# using u[0] doesn't seem necessary
h = floatingpoint.PreOR(u[:sfloat.round_nearest:-1], self.kappa)
p0 = self.vlen + 1 - sum(h)
pow_p0 = 1 + sum([two_power(i) * (1 - h[i]) for i in range(len(h))])
if self.round_nearest:
t2, overflow = \
floatingpoint.TruncRoundNearestAdjustOverflow(pow_p0 * v,
self.vlen + 3,
self.vlen,
self.kappa)
p0 = p0 - overflow
else:
comparison.Trunc(t2, pow_p0 * v, self.vlen + 2, 2, self.kappa, False)
v = t2
# deviate for more precision
#p = pmax - p0 + 1 - d
p = pmax - p0 + 1
zz = self.z*other.z
zprod = 1 - self.z - other.z + zz
v = zprod*t2 + self.z*v2 + other.z*v1
z = floatingpoint.EQZ(v, self.vlen, self.kappa)
p = (zprod*p + self.z*p2 + other.z*p1)*(1 - z)
s = (1 - b)*(a*other.s + aneg*self.s) + b*(c*other.s + cneg*self.s)
s = zprod*s + (other.z - zz)*self.s + (self.z - zz)*other.s
return sfloat(v, p, z, s)
else:
return NotImplemented
@vectorize_max
def mul(self, other):
other = self.conv(other)
if isinstance(other, sfloat):
v1 = sint()
v2 = sint()
b = sint()
c2expl = cint()
comparison.ld2i(c2expl, self.vlen)
if sfloat.round_nearest:
v1 = comparison.TruncRoundNearest(self.v*other.v, 2*self.vlen,
self.vlen-1, self.kappa)
else:
comparison.Trunc(v1, self.v*other.v, 2*self.vlen, self.vlen-1, self.kappa, False)
t = v1 - c2expl
comparison.LTZ(b, t, self.vlen+1, self.kappa)
comparison.Trunc(v2, b*v1 + v1, self.vlen+1, 1, self.kappa, False)
z1, z2, s1, s2, p1, p2 = (x.expand_to_vector() for x in \
(self.z, other.z, self.s, other.s,
self.p, other.p))
z = z1 + z2 - self.z*other.z # = OR(z1, z2)
s = s1 + s2 - self.s*other.s*2 # = XOR(s1,s2)
p = (p1 + p2 - b + self.vlen)*(1 - z)
return sfloat(v2, p, z, s)
else:
return NotImplemented
def __sub__(self, other):
return self + -other
def __rsub__(self, other):
return -self + other
def __truediv__(self, other):
other = self.conv(other)
v = floatingpoint.SDiv(self.v, other.v + other.z * (2**self.vlen - 1),
self.vlen, self.kappa, self.round_nearest)
b = v.less_than(two_power(self.vlen-1), self.vlen + 1, self.kappa)
overflow = v.greater_equal(two_power(self.vlen), self.vlen + 1, self.kappa)
underflow = v.less_than(two_power(self.vlen-2), self.vlen + 1, self.kappa)
v = (v + b * v) * (1 - overflow) * (1 - underflow) + \
overflow * (2**self.vlen - 1) + \
underflow * (2**(self.vlen-1)) * (1 - self.z)
p = (1 - self.z) * (self.p - other.p - self.vlen - b + 1)
z = self.z
s = self.s + other.s - 2 * self.s * other.s
sfloat.set_error(other.z)
return sfloat(v, p, z, s)
def __rtruediv__(self, other):
return self.conv(other) / self
@vectorize
def __neg__(self):
return sfloat(self.v, self.p, self.z, (1 - self.s) * (1 - self.z))
@vectorize
def __lt__(self, other):
other = self.conv(other)
if isinstance(other, sfloat):
z1 = self.z
z2 = other.z
s1 = self.s
s2 = other.s
a = self.p.less_than(other.p, self.plen, self.kappa)
c = floatingpoint.EQZ(self.p - other.p, self.plen, self.kappa)
d = ((1 - 2*self.s)*self.v).less_than((1 - 2*other.s)*other.v, self.vlen + 1, self.kappa)
cd = c*d
ca = c*a
b1 = cd + a - ca
b2 = cd + 1 + ca - c - a
s12 = self.s*other.s
z12 = self.z*other.z
b = (z1 - z12)*(1 - s2) + (z2 - z12)*s1 + (1 + z12 - z1 - z2)*(s1 - s12 + (1 + s12 - s1 - s2)*b1 + s12*b2)
return b
else:
return NotImplemented
def __ge__(self, other):
return 1 - (self < other)
def __gt__(self, other):
return self.conv(other) < self
def __le__(self, other):
return self.conv(other) >= self
@vectorize
def __eq__(self, other):
other = self.conv(other)
# the sign can be both ways for zeroes
both_zero = self.z * other.z
return floatingpoint.EQZ(self.v - other.v, self.vlen, self.kappa) * \
floatingpoint.EQZ(self.p - other.p, self.plen, self.kappa) * \
(1 - self.s - other.s + 2 * self.s * other.s) * \
(1 - both_zero) + both_zero
def __ne__(self, other):
return 1 - (self == other)
def log2(self):
up = self.v.greater_than(1 << (self.vlen - 1), self.vlen, self.kappa)
return self.p + self.vlen - 1 + up
def round_to_int(self):
direction = self.p.greater_equal(-self.vlen, self.plen, self.kappa)
right = self.v.right_shift(-self.p - 1, self.vlen + 1, self.kappa)
up = right.mod2m(1, self.vlen + 1, self.kappa)
right = right.right_shift(1, self.vlen + 1, self.kappa) + up
abs_value = direction * right
return self.s.if_else(-abs_value, abs_value)
def value(self):
""" Gets actual floating point value, if emulation is enabled. """
return (1 - 2*self.s.value)*(1 - self.z.value)*self.v.value/float(2**self.p.value)
def reveal(self):
return cfloat(self.v.reveal(), self.p.reveal(), self.z.reveal(), self.s.reveal())
class cfloat(object):
# Helper class used for printing sfloats
__slots__ = ['v', 'p', 'z', 's']
def __init__(self, v, p, z, s):
self.v, self.p, self.z, self.s = [cint.conv(x) for x in (v, p, z, s)]
def print_float_plain(self):
print_float_plain(self.v, self.p, self.z, self.s)
sfix.float_type = sfloat
_types = {
'c': cint,
's': sint,
'sg': sgf2n,
'cg': cgf2n,
'ci': regint,
}
def _get_type(t):
if t in _types:
return _types[t]
else:
return t
class Array(object):
@classmethod
def create_from(cls, l):
if isinstance(l, cls):
return l
tmp = list(l)
res = cls(len(tmp), type(tmp[0]))
res.assign(tmp)
return res
def __init__(self, length, value_type, address=None, debug=None):
value_type = _get_type(value_type)
self.address = address
self.length = length
self.value_type = value_type
if address is None:
self.address = self._malloc()
self.address_cache = {}
self.debug = debug
def _malloc(self):
return self.value_type.malloc(self.length)
def delete(self):
if program:
program.free(self.address, self.value_type.reg_type)
def get_address(self, index):
key = str(index)
if isinstance(index, int) and self.length is not None:
index += self.length * (index < 0)
if index >= self.length or index < 0:
raise IndexError('index %s, length %s' % \
(str(index), str(self.length)))
if (program.curr_block, key) not in self.address_cache:
n = self.value_type.n_elements()
length = self.length
if n == 1:
# length can be None for single-element arrays
length = 0
self.address_cache[program.curr_block, key] = \
util.untuplify([self.address + index + i * length \
for i in range(n)])
if self.debug:
library.print_ln_if(index >= self.length, 'OF:' + self.debug)
library.print_ln_if(self.address_cache[program.curr_block, key] >= program.allocated_mem[self.value_type.reg_type], 'AOF:' + self.debug)
return self.address_cache[program.curr_block, key]
def get_slice(self, index):
if index.stop is None and self.length is None:
raise CompilerError('Cannot slice array of unknown length')
return index.start or 0, index.stop or self.length, index.step or 1
def __getitem__(self, index):
if isinstance(index, slice):
start, stop, step = self.get_slice(index)
res_length = (stop - start - 1) // step + 1
res = Array(res_length, self.value_type)
@library.for_range(res_length)
def f(i):
res[i] = self[start+i*step]
return res
return self._load(self.get_address(index))
def __setitem__(self, index, value):
if isinstance(index, slice):
start, stop, step = self.get_slice(index)
value = Array.create_from(value)
source_index = MemValue(0)
@library.for_range(start, stop, step)
def f(i):
self[i] = value[source_index]
source_index.iadd(1)
return
self._store(value, self.get_address(index))
# the following two are useful for compile-time lengths
# and thus differ from the usual Python syntax
def get_range(self, start, size):
return [self[start + i] for i in range(size)]
def set_range(self, start, values):
for i, value in enumerate(values):
self[start + i] = value
def _load(self, address):
return self.value_type.load_mem(address)
def _store(self, value, address):
self.value_type.conv(value).store_in_mem(address)
def __len__(self):
return self.length
def __iter__(self):
for i in range(self.length):
yield self[i]
def same_shape(self):
return Array(self.length, self.value_type)
def assign(self, other, base=0):
try:
other = other.get_vector()
except:
pass
try:
other.store_in_mem(self.get_address(base))
assert len(self) >= other.size + base
except AttributeError:
for i,j in enumerate(other):
self[i] = j
return self
def assign_all(self, value, use_threads=True, conv=True):
if conv:
value = self.value_type.conv(value)
mem_value = MemValue(value)
n_threads = 8 if use_threads and len(self) > 2**20 else 1
@library.for_range_multithread(n_threads, 1024, len(self))
def f(i):
self[i] = mem_value
return self
def get_vector(self, base=0, size=None):
size = size or self.length
return self.value_type.load_mem(self.get_address(base), size=size)
def get_mem_value(self, index):
return MemValue(self[index], self.get_address(index))
def input_from(self, player, budget=None):
self.assign(self.value_type.get_input_from(player, size=len(self)))
def __add__(self, other):
if other is 0:
return self
assert len(self) == len(other)
return self.get_vector() + other
def __sub__(self, other):
assert len(self) == len(other)
return self.get_vector() - other
def __mul__(self, value):
return self.get_vector() * value
def __pow__(self, value):
return self.get_vector() ** value
__radd__ = __add__
__rmul__ = __mul__
def shuffle(self):
@library.for_range(len(self))
def _(i):
j = regint.get_random(64) % (len(self) - i)
tmp = self[i]
self[i] = self[i + j]
self[i + j] = tmp
def reveal(self):
return Array.create_from(x.reveal() for x in self)
sint.dynamic_array = Array
sgf2n.dynamic_array = Array
class SubMultiArray(object):
def __init__(self, sizes, value_type, address, index, debug=None):
self.sizes = sizes
self.value_type = _get_type(value_type)
self.address = address + index * self.total_size()
self.sub_cache = {}
self.debug = debug
if debug:
library.print_ln_if(self.address + reduce(operator.mul, self.sizes) * self.value_type.n_elements() > program.allocated_mem[self.value_type.reg_type], 'AOF%d:' % len(self.sizes) + self.debug)
def __getitem__(self, index):
if util.is_constant(index) and index >= self.sizes[0]:
raise StopIteration
key = program.curr_block, str(index)
if key not in self.sub_cache:
if self.debug:
library.print_ln_if(index >= self.sizes[0], \
'OF%d:' % len(self.sizes) + self.debug)
if len(self.sizes) == 2:
self.sub_cache[key] = \
Array(self.sizes[1], self.value_type, \
self.address + index * self.sizes[1] *
self.value_type.n_elements(), \
debug=self.debug)
else:
self.sub_cache[key] = \
SubMultiArray(self.sizes[1:], self.value_type, \
self.address, index, debug=self.debug)
return self.sub_cache[key]
def __setitem__(self, index, other):
self[index].assign(other)
def __len__(self):
return self.sizes[0]
def assign_all(self, value):
@library.for_range(self.sizes[0])
def f(i):
self[i].assign_all(value)
return self
def total_size(self):
return reduce(operator.mul, self.sizes) * self.value_type.n_elements()
def get_vector(self, base=0, size=None):
assert self.value_type.n_elements() == 1
size = size or self.total_size()
return self.value_type.load_mem(self.address + base, size=size)
def assign_vector(self, vector, base=0):
assert self.value_type.n_elements() == 1
assert vector.size <= self.total_size()
vector.store_in_mem(self.address + base)
def assign(self, other):
if self.value_type.n_elements() > 1:
assert self.sizes == other.sizes
self.assign_vector(other.get_vector())
def same_shape(self):
return MultiArray(self.sizes, self.value_type)
def input_from(self, player, budget=None):
@library.for_range_opt(self.sizes[0], budget=budget)
def _(i):
self[i].input_from(player, budget=budget)
def schur(self, other):
assert self.sizes == other.sizes
if len(self.sizes) == 2:
res = Matrix(self.sizes[0], self.sizes[1], self.value_type)
else:
res = MultiArray(self.sizes, self.value_type)
res.assign_vector(self.get_vector() * other.get_vector())
return res
def __add__(self, other):
if other is 0:
return self
assert self.sizes == other.sizes
if len(self.sizes) == 2:
res = Matrix(self.sizes[0], self.sizes[1], self.value_type)
else:
res = MultiArray(self.sizes, self.value_type)
res.assign_vector(self.get_vector() + other.get_vector())
return res
__radd__ = __add__
def iadd(self, other):
assert self.sizes == other.sizes
self.assign_vector(self.get_vector() + other.get_vector())
def __mul__(self, other):
return self.mul(other)
def mul(self, other, res_params=None):
assert len(self.sizes) == 2
if isinstance(other, Array):
assert len(other) == self.sizes[1]
if self.value_type.n_elements() == 1:
matrix = Matrix(len(other), 1, other.value_type, \
address=other.address)
res = self * matrix
return Array(res.sizes[0], res.value_type, address=res.address)
else:
matrix = Matrix(len(other), 1, other.value_type)
for i, x in enumerate(other):
matrix[i][0] = x
res = self * matrix
return Array.create_from(x[0] for x in res)
elif isinstance(other, SubMultiArray):
assert len(other.sizes) == 2
assert other.sizes[0] == self.sizes[1]
if res_params is not None:
class t(self.value_type):
pass
t.params = res_params
else:
t = self.value_type
res_matrix = Matrix(self.sizes[0], other.sizes[1], t)
try:
if max(res_matrix.sizes) > 1000:
raise AttributeError()
A = self.get_vector()
B = other.get_vector()
res_matrix.assign_vector(
self.value_type.matrix_mul(A, B, self.sizes[1],
res_params))
except (AttributeError, AssertionError):
# fallback for sfloat etc.
@library.for_range_opt(self.sizes[0])
def _(i):
try:
res_matrix[i] = self.value_type.row_matrix_mul(
self[i], other, res_params)
except AttributeError:
# fallback for binary circuits
@library.for_range(other.sizes[1])
def _(j):
res_matrix[i][j] = 0
@library.for_range(self.sizes[1])
def _(k):
res_matrix[i][j] += self[i][k] * other[k][j]
return res_matrix
else:
raise NotImplementedError
def budget_mul(self, other, n_rows, row, n_columns, column, reduce=True,
res=None):
assert len(self.sizes) == 2
assert len(other.sizes) == 2
if res is None:
if reduce:
res_matrix = Matrix(n_rows, n_columns, self.value_type)
else:
res_matrix = Matrix(n_rows, n_columns, \
self.value_type.unreduced_type)
else:
res_matrix = res
@library.for_range_opt(n_rows)
def _(i):
@library.for_range_opt(n_columns)
def _(j):
col = column(other, j)
r = row(self, i)
if reduce:
res_matrix[i][j] = self.value_type.dot_product(r, col)
else:
entry = self.value_type.unreduced_dot_product(r, col)
res_matrix[i][j] = entry
return res_matrix
def plain_mul(self, other, res=None):
assert other.sizes[0] == self.sizes[1]
return self.budget_mul(other, self.sizes[0], lambda x, i: x[i], \
other.sizes[1], \
lambda x, j: [x[k][j] for k in range(len(x))],
res=res)
def mul_trans(self, other):
assert other.sizes[1] == self.sizes[1]
return self.budget_mul(other, self.sizes[0], lambda x, i: x[i], \
other.sizes[0], lambda x, j: x[j])
def trans_mul(self, other, reduce=True, res=None):
assert other.sizes[0] == self.sizes[0]
return self.budget_mul(other, self.sizes[1], \
lambda x, j: [x[k][j] for k in range(len(x))], \
other.sizes[1], \
lambda x, j: [x[k][j] for k in range(len(x))],
reduce=reduce, res=res)
def transpose(self):
assert len(self.sizes) == 2
res = Matrix(self.sizes[1], self.sizes[0], self.value_type)
@library.for_range_opt(self.sizes[1])
def _(i):
@library.for_range_opt(self.sizes[0])
def _(j):
res[i][j] = self[j][i]
return res
class MultiArray(SubMultiArray):
def __init__(self, sizes, value_type, debug=None, address=None):
if isinstance(address, Array):
self.array = address
else:
self.array = Array(reduce(operator.mul, sizes), \
value_type, address=address)
SubMultiArray.__init__(self, sizes, value_type, self.array.address, 0, \
debug=debug)
if len(sizes) < 2:
raise CompilerError('Use Array')
class Matrix(MultiArray):
def __init__(self, rows, columns, value_type, debug=None, address=None):
MultiArray.__init__(self, [rows, columns], value_type, debug=debug, \
address=address)
class VectorArray(object):
def __init__(self, length, value_type, vector_size, address=None):
self.array = Array(length * vector_size, value_type, address)
self.vector_size = vector_size
self.value_type = value_type
def __getitem__(self, index):
return self.value_type.load_mem(self.array.address + \
index * self.vector_size,
size=self.vector_size)
def __setitem__(self, index, value):
if value.size != self.vector_size:
raise CompilerError('vector size mismatch')
value.store_in_mem(self.array.address + index * self.vector_size)
class _mem(_number):
__add__ = lambda self,other: self.read() + other
__sub__ = lambda self,other: self.read() - other
__mul__ = lambda self,other: self.read() * other
__truediv__ = lambda self,other: self.read() / other
__mod__ = lambda self,other: self.read() % other
__pow__ = lambda self,other: self.read() ** other
__neg__ = lambda self,other: -self.read()
__lt__ = lambda self,other: self.read() < other
__gt__ = lambda self,other: self.read() > other
__le__ = lambda self,other: self.read() <= other
__ge__ = lambda self,other: self.read() >= other
__eq__ = lambda self,other: self.read() == other
__ne__ = lambda self,other: self.read() != other
__and__ = lambda self,other: self.read() & other
__xor__ = lambda self,other: self.read() ^ other
__or__ = lambda self,other: self.read() | other
__lshift__ = lambda self,other: self.read() << other
__rshift__ = lambda self,other: self.read() >> other
__radd__ = lambda self,other: other + self.read()
__rsub__ = lambda self,other: other - self.read()
__rmul__ = lambda self,other: other * self.read()
__rtruediv__ = lambda self,other: other / self.read()
__rmod__ = lambda self,other: other % self.read()
__rand__ = lambda self,other: other & self.read()
__rxor__ = lambda self,other: other ^ self.read()
__ror__ = lambda self,other: other | self.read()
__iadd__ = lambda self,other: self.write(self.read() + other)
__isub__ = lambda self,other: self.write(self.read() - other)
__imul__ = lambda self,other: self.write(self.read() * other)
__idiv__ = lambda self,other: self.write(self.read() / other)
__imod__ = lambda self,other: self.write(self.read() % other)
__ipow__ = lambda self,other: self.write(self.read() ** other)
__iand__ = lambda self,other: self.write(self.read() & other)
__ixor__ = lambda self,other: self.write(self.read() ^ other)
__ior__ = lambda self,other: self.write(self.read() | other)
__ilshift__ = lambda self,other: self.write(self.read() << other)
__irshift__ = lambda self,other: self.write(self.read() >> other)
iadd = __iadd__
isub = __isub__
imul = __imul__
idiv = __idiv__
imod = __imod__
ipow = __ipow__
iand = __iand__
ixor = __ixor__
ior = __ior__
ilshift = __ilshift__
irshift = __irshift__
store_in_mem = lambda self,address: self.read().store_in_mem(address)
class MemValue(_mem):
__slots__ = ['last_write_block', 'reg_type', 'register', 'address', 'deleted']
@classmethod
def if_necessary(cls, value):
if util.is_constant_float(value):
return value
else:
return cls(value)
def __init__(self, value, address=None):
self.last_write_block = None
if isinstance(value, int):
self.value_type = regint
value = regint(value)
elif isinstance(value, MemValue):
self.value_type = value.value_type
else:
self.value_type = type(value)
self.deleted = False
if address is None:
self.address = self.value_type.malloc(1)
self.write(value)
else:
self.address = address
def delete(self):
self.value_type.free(self.address)
self.deleted = True
def check(self):
if self.deleted:
raise CompilerError('MemValue deleted')
def read(self):
self.check()
if program.curr_block != self.last_write_block:
self.register = library.load_mem(self.address, self.value_type)
self.last_write_block = program.curr_block
return self.register
def write(self, value):
self.check()
if isinstance(value, MemValue):
self.register = value.read()
elif isinstance(value, int):
self.register = self.value_type(value)
else:
self.register = value
if not isinstance(self.register, self.value_type):
raise CompilerError('Mismatch in register type, cannot write \
%s to %s' % (type(self.register), self.value_type))
self.register.store_in_mem(self.address)
self.last_write_block = program.curr_block
return self
def reveal(self):
return self.read().reveal()
less_than = lambda self,other,bit_length=None,security=None: \
self.read().less_than(other,bit_length,security)
greater_than = lambda self,other,bit_length=None,security=None: \
self.read().greater_than(other,bit_length,security)
less_equal = lambda self,other,bit_length=None,security=None: \
self.read().less_equal(other,bit_length,security)
greater_equal = lambda self,other,bit_length=None,security=None: \
self.read().greater_equal(other,bit_length,security)
equal = lambda self,other,bit_length=None,security=None: \
self.read().equal(other,bit_length,security)
not_equal = lambda self,other,bit_length=None,security=None: \
self.read().not_equal(other,bit_length,security)
pow2 = lambda self,*args,**kwargs: self.read().pow2(*args, **kwargs)
mod2m = lambda self,*args,**kwargs: self.read().mod2m(*args, **kwargs)
right_shift = lambda self,*args,**kwargs: self.read().right_shift(*args, **kwargs)
bit_decompose = lambda self,*args,**kwargs: self.read().bit_decompose(*args, **kwargs)
if_else = lambda self,*args,**kwargs: self.read().if_else(*args, **kwargs)
expand_to_vector = lambda self,*args,**kwargs: \
self.read().expand_to_vector(*args, **kwargs)
def __repr__(self):
return 'MemValue(%s,%d)' % (self.value_type, self.address)
class MemFloat(_mem):
def __init__(self, *args):
value = sfloat(*args)
self.v = MemValue(value.v)
self.p = MemValue(value.p)
self.z = MemValue(value.z)
self.s = MemValue(value.s)
def write(self, *args):
value = sfloat(*args)
self.v.write(value.v)
self.p.write(value.p)
self.z.write(value.z)
self.s.write(value.s)
def read(self):
return sfloat(self.v, self.p, self.z, self.s)
class MemFix(_mem):
def __init__(self, *args):
arg_type = type(*args)
if arg_type == sfix:
value = sfix(*args)
elif arg_type == cfix:
value = cfix(*args)
else:
raise CompilerError('MemFix init argument error')
self.reg_type = value.v.reg_type
self.v = MemValue(value.v)
def write(self, *args):
value = sfix(*args)
self.v.write(value.v)
def reveal(self):
return cfix(self.v.reveal())
def read(self):
val = self.v.read()
if isinstance(val, sint):
return sfix(val)
else:
return cfix(val)
def getNamedTupleType(*names):
class NamedTuple(object):
class NamedTupleArray(object):
def __init__(self, size, t):
from . import types
self.arrays = [types.Array(size, t) for i in range(len(names))]
def __getitem__(self, index):
return NamedTuple(array[index] for array in self.arrays)
def __setitem__(self, index, item):
for array,value in zip(self.arrays, item):
array[index] = value
@classmethod
def get_array(cls, size, t):
return cls.NamedTupleArray(size, t)
def __init__(self, *args):
if len(args) == 1:
args = args[0]
for name, value in zip(names, args):
self.__dict__[name] = value
def __iter__(self):
for name in names:
yield self.__dict__[name]
def __add__(self, other):
return NamedTuple(i + j for i,j in zip(self, other))
def __sub__(self, other):
return NamedTuple(i - j for i,j in zip(self, other))
def __xor__(self, other):
return NamedTuple(i ^ j for i,j in zip(self, other))
def __mul__(self, other):
return NamedTuple(other * i for i in self)
__rmul__ = __mul__
__rxor__ = __xor__
def reveal(self):
return self.__type__(x.reveal() for x in self)
return NamedTuple
from . import library
| 32.792731 | 202 | 0.561166 | from Compiler.program import Tape
from Compiler.exceptions import *
from Compiler.instructions import *
from Compiler.instructions_base import *
from .floatingpoint import two_power
from . import comparison, floatingpoint
import math
from . import util
import operator
from functools import reduce
class ClientMessageType:
NoType = 0
TripleShares = 1
ClearModpInt = 2
Int32 = 3
ClearModpFix = 4
class MPCThread(object):
def __init__(self, target, name, args = [], runtime_arg = None):
if not callable(target):
raise CompilerError('Target %s for thread %s is not callable' % (target,name))
self.name = name
self.tape = Tape(program.name + '-' + name, program)
self.target = target
self.args = args
self.runtime_arg = runtime_arg
self.running = 0
def start(self, runtime_arg = None):
self.running += 1
program.start_thread(self, runtime_arg or self.runtime_arg)
def join(self):
if not self.running:
raise CompilerError('Thread %s is not running' % self.name)
self.running -= 1
program.stop_thread(self)
def vectorize(operation):
def vectorized_operation(self, *args, **kwargs):
if len(args):
if (isinstance(args[0], Tape.Register) or isinstance(args[0], sfloat)) \
and args[0].size != self.size:
raise CompilerError('Different vector sizes of operands')
set_global_vector_size(self.size)
res = operation(self, *args, **kwargs)
reset_global_vector_size()
return res
return vectorized_operation
def vectorize_max(operation):
def vectorized_operation(self, *args, **kwargs):
size = self.size
for arg in args:
try:
size = max(size, arg.size)
except AttributeError:
pass
set_global_vector_size(size)
res = operation(self, *args, **kwargs)
reset_global_vector_size()
return res
return vectorized_operation
def vectorized_classmethod(function):
def vectorized_function(cls, *args, **kwargs):
size = None
if 'size' in kwargs:
size = kwargs.pop('size')
if size:
set_global_vector_size(size)
res = function(cls, *args, **kwargs)
reset_global_vector_size()
else:
res = function(cls, *args, **kwargs)
return res
return classmethod(vectorized_function)
def vectorize_init(function):
def vectorized_init(*args, **kwargs):
size = None
if len(args) > 1 and (isinstance(args[1], Tape.Register) or \
isinstance(args[1], sfloat)):
size = args[1].size
if 'size' in kwargs and kwargs['size'] is not None \
and kwargs['size'] != size:
raise CompilerError('Mismatch in vector size')
if 'size' in kwargs and kwargs['size']:
size = kwargs['size']
if size is not None:
set_global_vector_size(size)
res = function(*args, **kwargs)
reset_global_vector_size()
else:
res = function(*args, **kwargs)
return res
return vectorized_init
def set_instruction_type(operation):
def instruction_typed_operation(self, *args, **kwargs):
set_global_instruction_type(self.instruction_type)
res = operation(self, *args, **kwargs)
reset_global_instruction_type()
return res
return instruction_typed_operation
def read_mem_value(operation):
def read_mem_operation(self, *args, **kwargs):
if len(args) > 0 and isinstance(args[0], MemValue):
args = (args[0].read(),) + args[1:]
return operation(self, *args, **kwargs)
return read_mem_operation
class _number(object):
def square(self):
return self * self
def __add__(self, other):
if other is 0:
return self
else:
return self.add(other)
def __mul__(self, other):
if other is 0:
return 0
elif other is 1:
return self
else:
return self.mul(other)
__radd__ = __add__
__rmul__ = __mul__
@vectorize
def __pow__(self, exp):
if isinstance(exp, int) and exp >= 0:
if exp == 0:
return self.__class__(1)
exp = bin(exp)[3:]
res = self
for i in exp:
res = res.square()
if i == '1':
res *= self
return res
else:
return NotImplemented
def mul_no_reduce(self, other, res_params=None):
return self * other
def reduce_after_mul(self):
return self
def pow2(self, bit_length=None, security=None):
return 2**self
def min(self, other):
return (self < other).if_else(self, other)
def max(self, other):
return (self < other).if_else(other, self)
class _int(object):
def if_else(self, a, b):
if hasattr(a, 'for_mux'):
f, a, b = a.for_mux(b)
else:
f = lambda x: x
return f(self * (a - b) + b)
def cond_swap(self, a, b):
prod = self * (a - b)
return a - prod, b + prod
def bit_xor(self, other):
return self + other - 2 * self * other
class _gf2n(object):
def if_else(self, a, b):
return b ^ self * self.hard_conv(a ^ b)
def cond_swap(self, a, b, t=None):
prod = self * self.hard_conv(a ^ b)
res = a ^ prod, b ^ prod
if t is None:
return res
else:
return tuple(t.conv(r) for r in res)
def bit_xor(self, other):
return self ^ other
class _structure(object):
MemValue = classmethod(lambda cls, value: MemValue(cls.conv(value)))
@classmethod
def Array(cls, size, *args, **kwargs):
return Array(size, cls, *args, **kwargs)
@classmethod
def Matrix(cls, rows, columns, *args, **kwargs):
return Matrix(rows, columns, cls, *args, **kwargs)
@classmethod
def row_matrix_mul(cls, row, matrix, res_params=None):
return sum(row[k].mul_no_reduce(matrix[k].get_vector(),
res_params) \
for k in range(len(row))).reduce_after_mul()
class _register(Tape.Register, _number, _structure):
@staticmethod
def n_elements():
return 1
@vectorized_classmethod
def conv(cls, val):
if isinstance(val, MemValue):
val = val.read()
if isinstance(val, cls):
return val
elif not isinstance(val, _register):
try:
return type(val)(cls.conv(v) for v in val)
except TypeError:
pass
except CompilerError:
pass
return cls(val)
@vectorized_classmethod
@read_mem_value
def hard_conv(cls, val):
if type(val) == cls:
return val
elif not isinstance(val, _register):
try:
return val.hard_conv_me(cls)
except AttributeError:
try:
return type(val)(cls.hard_conv(v) for v in val)
except TypeError:
pass
return cls(val)
@vectorized_classmethod
@set_instruction_type
def _load_mem(cls, address, direct_inst, indirect_inst):
res = cls()
if isinstance(address, _register):
indirect_inst(res, cls._expand_address(address,
get_global_vector_size()))
else:
direct_inst(res, address)
return res
@staticmethod
def _expand_address(address, size):
address = regint.conv(address)
if size > 1 and address.size == 1:
res = regint(size=size)
for i in range(size):
movint(res[i], address + regint(i, size=1))
return res
else:
return address
@set_instruction_type
def _store_in_mem(self, address, direct_inst, indirect_inst):
if isinstance(address, _register):
indirect_inst(self, self._expand_address(address, self.size))
else:
direct_inst(self, address)
@classmethod
def prep_res(cls, other):
return cls()
@staticmethod
def bit_compose(bits):
return sum(b << i for i,b in enumerate(bits))
@classmethod
def malloc(cls, size):
return program.malloc(size, cls)
@set_instruction_type
def __init__(self, reg_type, val, size):
if isinstance(val, (tuple, list)):
size = len(val)
super(_register, self).__init__(reg_type, program.curr_tape, size=size)
if isinstance(val, int):
self.load_int(val)
elif isinstance(val, (tuple, list)):
for i, x in enumerate(val):
self.mov(self[i], type(self)(x, size=1))
elif val is not None:
self.load_other(val)
def sizeof(self):
return self.size
def extend(self, n):
return self
def expand_to_vector(self, size=None):
if size is None:
size = get_global_vector_size()
if self.size == size:
return self
assert self.size == 1
res = type(self)(size=size)
for i in range(size):
movs(res[i], self)
return res
class _clear(_register):
__slots__ = []
mov = staticmethod(movc)
@vectorized_classmethod
@set_instruction_type
def protect_memory(cls, start, end):
program.curr_tape.start_new_basicblock(name='protect-memory')
protectmemc(regint(start), regint(end))
@set_instruction_type
@vectorize
def load_other(self, val):
if isinstance(val, type(self)):
movc(self, val)
else:
self.convert_from(val)
@vectorize
@read_mem_value
def convert_from(self, val):
if not isinstance(val, regint):
val = regint(val)
convint(self, val)
@set_instruction_type
@vectorize
def print_reg(self, comment=''):
print_reg(self, comment)
@set_instruction_type
@vectorize
def print_reg_plain(self):
print_reg_plain(self)
@set_instruction_type
@vectorize
def raw_output(self):
raw_output(self)
@set_instruction_type
@read_mem_value
@vectorize
def clear_op(self, other, c_inst, ci_inst, reverse=False):
cls = self.__class__
res = self.prep_res(other)
if isinstance(other, cls):
c_inst(res, self, other)
elif isinstance(other, int):
if self.in_immediate_range(other):
ci_inst(res, self, other)
else:
if reverse:
c_inst(res, cls(other), self)
else:
c_inst(res, self, cls(other))
else:
return NotImplemented
return res
@set_instruction_type
@read_mem_value
@vectorize
def coerce_op(self, other, inst, reverse=False):
cls = self.__class__
res = cls()
if isinstance(other, int):
other = cls(other)
elif not isinstance(other, cls):
return NotImplemented
if reverse:
inst(res, other, self)
else:
inst(res, self, other)
return res
def add(self, other):
return self.clear_op(other, addc, addci)
def mul(self, other):
return self.clear_op(other, mulc, mulci)
def __sub__(self, other):
return self.clear_op(other, subc, subci)
def __rsub__(self, other):
return self.clear_op(other, subc, subcfi, True)
def __truediv__(self, other):
return self.clear_op(other, divc, divci)
def __rtruediv__(self, other):
return self.coerce_op(other, divc, True)
def __eq__(self, other):
if isinstance(other, (_clear,int)):
return regint(self) == other
else:
return NotImplemented
def __ne__(self, other):
return 1 - (self == other)
def __and__(self, other):
return self.clear_op(other, andc, andci)
def __xor__(self, other):
return self.clear_op(other, xorc, xorci)
def __or__(self, other):
return self.clear_op(other, orc, orci)
__rand__ = __and__
__rxor__ = __xor__
__ror__ = __or__
def reveal(self):
return self
class cint(_clear, _int):
__slots__ = []
instruction_type = 'modp'
reg_type = 'c'
@vectorized_classmethod
def read_from_socket(cls, client_id, n=1):
res = [cls() for i in range(n)]
readsocketc(client_id, *res)
if n == 1:
return res[0]
else:
return res
@vectorize
def write_to_socket(self, client_id, message_type=ClientMessageType.NoType):
writesocketc(client_id, message_type, self)
@vectorized_classmethod
def write_to_socket(self, client_id, values, message_type=ClientMessageType.NoType):
writesocketc(client_id, message_type, *values)
@vectorized_classmethod
def load_mem(cls, address, mem_type=None):
return cls._load_mem(address, ldmc, ldmci)
def store_in_mem(self, address):
self._store_in_mem(address, stmc, stmci)
@staticmethod
def in_immediate_range(value):
return value < 2**31 and value >= -2**31
def __init__(self, val=None, size=None):
super(cint, self).__init__('c', val=val, size=size)
@vectorize
def load_int(self, val):
if val:
program.curr_tape.require_bit_length(1 + int(math.ceil(math.log(abs(val)))))
if self.in_immediate_range(val):
ldi(self, val)
else:
max = 2**31 - 1
sign = abs(val) // val
val = abs(val)
chunks = []
while val:
mod = val % max
val = (val - mod) // max
chunks.append(mod)
sum = cint(sign * chunks.pop())
for i,chunk in enumerate(reversed(chunks)):
sum *= max
if i == len(chunks) - 1:
addci(self, sum, sign * chunk)
elif chunk:
sum += sign * chunk
def to_regint(self, n_bits=None, dest=None):
dest = regint() if dest is None else dest
convmodp(dest, self, bitlength=n_bits)
return dest
def __mod__(self, other):
return self.clear_op(other, modc, modci)
def __rmod__(self, other):
return self.coerce_op(other, modc, True)
def __lt__(self, other):
if isinstance(other, (type(self),int)):
return regint(self) < other
else:
return NotImplemented
def __gt__(self, other):
if isinstance(other, (type(self),int)):
return regint(self) > other
else:
return NotImplemented
def __le__(self, other):
return 1 - (self > other)
def __ge__(self, other):
return 1 - (self < other)
@vectorize
def __eq__(self, other):
if not isinstance(other, (_clear, int)):
return NotImplemented
res = 1
remaining = program.bit_length
while remaining > 0:
if isinstance(other, cint):
o = other.to_regint(min(remaining, 64))
else:
o = other % 2 ** 64
res *= (self.to_regint(min(remaining, 64)) == o)
self >>= 64
other >>= 64
remaining -= 64
return res
def __lshift__(self, other):
return self.clear_op(other, shlc, shlci)
def __rshift__(self, other):
return self.clear_op(other, shrc, shrci)
def __neg__(self):
return 0 - self
def __abs__(self):
return (self >= 0).if_else(self, -self)
@vectorize
def __invert__(self):
res = cint()
notc(res, self, program.bit_length)
return res
def __rpow__(self, base):
if base == 2:
return 1 << self
else:
return NotImplemented
@vectorize
def __rlshift__(self, other):
return cint(other) << self
@vectorize
def __rrshift__(self, other):
return cint(other) >> self
@read_mem_value
def mod2m(self, other, bit_length=None, signed=None):
return self % 2**other
@read_mem_value
def right_shift(self, other, bit_length=None):
return self >> other
@read_mem_value
def greater_than(self, other, bit_length=None):
return self > other
def bit_decompose(self, bit_length=None):
if bit_length == 0:
return []
bit_length = bit_length or program.bit_length
return floatingpoint.bits(self, bit_length)
def legendre(self):
res = cint()
legendrec(res, self)
return res
def digest(self, num_bytes):
res = cint()
digestc(res, self, num_bytes)
return res
def print_if(self, string):
cond_print_str(self, string)
class cgf2n(_clear, _gf2n):
__slots__ = []
instruction_type = 'gf2n'
reg_type = 'cg'
@classmethod
def bit_compose(cls, bits, step=None):
size = bits[0].size
res = cls(size=size)
vgbitcom(size, res, step or 1, *bits)
return res
@vectorized_classmethod
def load_mem(cls, address, mem_type=None):
return cls._load_mem(address, gldmc, gldmci)
def store_in_mem(self, address):
self._store_in_mem(address, gstmc, gstmci)
@staticmethod
def in_immediate_range(value):
return value < 2**32 and value >= 0
def __init__(self, val=None, size=None):
super(cgf2n, self).__init__('cg', val=val, size=size)
@vectorize
def load_int(self, val):
if val < 0:
raise CompilerError('Negative GF2n immediate')
if self.in_immediate_range(val):
gldi(self, val)
else:
chunks = []
while val:
mod = val % 2**32
val >>= 32
chunks.append(mod)
sum = cgf2n(chunks.pop())
for i,chunk in enumerate(reversed(chunks)):
sum <<= 32
if i == len(chunks) - 1:
gaddci(self, sum, chunk)
elif chunk:
sum += chunk
def __mul__(self, other):
return super(cgf2n, self).__mul__(other)
def __neg__(self):
return self
@vectorize
def __invert__(self):
res = cgf2n()
gnotc(res, self)
return res
@vectorize
def __lshift__(self, other):
if isinstance(other, int):
res = cgf2n()
gshlci(res, self, other)
return res
else:
return NotImplemented
@vectorize
def __rshift__(self, other):
if isinstance(other, int):
res = cgf2n()
gshrci(res, self, other)
return res
else:
return NotImplemented
@vectorize
def bit_decompose(self, bit_length=None, step=None):
bit_length = bit_length or program.galois_length
step = step or 1
res = [type(self)() for _ in range(bit_length // step)]
gbitdec(self, step, *res)
return res
class regint(_register, _int):
__slots__ = []
reg_type = 'ci'
instruction_type = 'modp'
mov = staticmethod(movint)
@classmethod
def protect_memory(cls, start, end):
program.curr_tape.start_new_basicblock(name='protect-memory')
protectmemint(regint(start), regint(end))
@vectorized_classmethod
def load_mem(cls, address, mem_type=None):
return cls._load_mem(address, ldmint, ldminti)
def store_in_mem(self, address):
self._store_in_mem(address, stmint, stminti)
@vectorized_classmethod
def pop(cls):
res = cls()
popint(res)
return res
@vectorized_classmethod
def push(cls, value):
pushint(cls.conv(value))
@vectorized_classmethod
def get_random(cls, bit_length):
if isinstance(bit_length, int):
bit_length = regint(bit_length)
res = cls()
rand(res, bit_length)
return res
@vectorized_classmethod
def read_from_socket(cls, client_id, n=1):
res = [cls() for i in range(n)]
readsocketint(client_id, *res)
if n == 1:
return res[0]
else:
return res
@vectorized_classmethod
def read_client_public_key(cls, client_id):
res = [cls() for i in range(8)]
readclientpublickey(client_id, *res)
return res
@vectorized_classmethod
def init_secure_socket(cls, client_id, w1, w2, w3, w4, w5, w6, w7, w8):
initsecuresocket(client_id, w1, w2, w3, w4, w5, w6, w7, w8)
@vectorized_classmethod
def resp_secure_socket(cls, client_id, w1, w2, w3, w4, w5, w6, w7, w8):
respsecuresocket(client_id, w1, w2, w3, w4, w5, w6, w7, w8)
@vectorize
def write_to_socket(self, client_id, message_type=ClientMessageType.NoType):
writesocketint(client_id, message_type, self)
@vectorized_classmethod
def write_to_socket(self, client_id, values, message_type=ClientMessageType.NoType):
writesocketint(client_id, message_type, *values)
@vectorize_init
def __init__(self, val=None, size=None):
super(regint, self).__init__(self.reg_type, val=val, size=size)
def load_int(self, val):
if cint.in_immediate_range(val):
ldint(self, val)
else:
lower = val % 2**32
upper = val >> 32
if lower >= 2**31:
lower -= 2**32
upper += 1
addint(self, regint(upper) * regint(2**16)**2, regint(lower))
@read_mem_value
def load_other(self, val):
if isinstance(val, cgf2n):
gconvgf2n(self, val)
elif isinstance(val, regint):
addint(self, val, regint(0))
else:
try:
val.to_regint(dest=self)
except AttributeError:
raise CompilerError("Cannot convert '%s' to integer" % \
type(val))
@vectorize
@read_mem_value
def int_op(self, other, inst, reverse=False):
try:
other = self.conv(other)
except:
return NotImplemented
res = regint()
if reverse:
inst(res, other, self)
else:
inst(res, self, other)
return res
def add(self, other):
return self.int_op(other, addint)
def __sub__(self, other):
return self.int_op(other, subint)
def __rsub__(self, other):
return self.int_op(other, subint, True)
def mul(self, other):
return self.int_op(other, mulint)
def __neg__(self):
return 0 - self
def __floordiv__(self, other):
return self.int_op(other, divint)
def __rfloordiv__(self, other):
return self.int_op(other, divint, True)
__truediv__ = __floordiv__
__rtruediv__ = __rfloordiv__
def __mod__(self, other):
return self - (self / other) * other
def __rmod__(self, other):
return regint(other) % self
def __rpow__(self, other):
return other**cint(self)
def __eq__(self, other):
return self.int_op(other, eqc)
def __ne__(self, other):
return 1 - (self == other)
def __lt__(self, other):
return self.int_op(other, ltc)
def __gt__(self, other):
return self.int_op(other, gtc)
def __le__(self, other):
return 1 - (self > other)
def __ge__(self, other):
return 1 - (self < other)
def __lshift__(self, other):
if isinstance(other, int):
return self * 2**other
else:
return regint(cint(self) << other)
def __rshift__(self, other):
if isinstance(other, int):
return self / 2**other
else:
return regint(cint(self) >> other)
def __rlshift__(self, other):
return regint(other << cint(self))
def __rrshift__(self, other):
return regint(other >> cint(self))
def __and__(self, other):
return regint(other & cint(self))
def __or__(self, other):
return regint(other | cint(self))
def __xor__(self, other):
return regint(other ^ cint(self))
__rand__ = __and__
__ror__ = __or__
__rxor__ = __xor__
def mod2m(self, *args, **kwargs):
return cint(self).mod2m(*args, **kwargs)
@vectorize
def bit_decompose(self, bit_length=None):
bit_length = bit_length or min(64, program.bit_length)
if bit_length > 64:
raise CompilerError('too many bits demanded')
res = [regint() for i in range(bit_length)]
bitdecint(self, *res)
return res
@staticmethod
def bit_compose(bits):
two = regint(2)
res = 0
for bit in reversed(bits):
res *= two
res += bit
return res
def reveal(self):
return self
def print_reg_plain(self):
print_int(self)
def print_if(self, string):
cint(self).print_if(string)
class localint(object):
def __init__(self, value=None):
self._v = regint(value)
self.size = 1
def output(self):
self._v.print_reg_plain()
__lt__ = lambda self, other: localint(self._v < other)
__le__ = lambda self, other: localint(self._v <= other)
__gt__ = lambda self, other: localint(self._v > other)
__ge__ = lambda self, other: localint(self._v >= other)
__eq__ = lambda self, other: localint(self._v == other)
__ne__ = lambda self, other: localint(self._v != other)
class _secret(_register):
__slots__ = []
mov = staticmethod(movs)
PreOR = staticmethod(lambda l: floatingpoint.PreORC(l))
PreOp = staticmethod(lambda op, l: floatingpoint.PreOpL(op, l))
@vectorized_classmethod
@set_instruction_type
def protect_memory(cls, start, end):
program.curr_tape.start_new_basicblock(name='protect-memory')
protectmems(regint(start), regint(end))
@vectorized_classmethod
@set_instruction_type
def get_input_from(cls, player):
res = cls()
asm_input(res, player)
return res
@vectorized_classmethod
@set_instruction_type
def get_random_triple(cls):
res = (cls(), cls(), cls())
triple(*res)
return res
@vectorized_classmethod
@set_instruction_type
def get_random_bit(cls):
res = cls()
bit(res)
return res
@vectorized_classmethod
@set_instruction_type
def get_random_square(cls):
res = (cls(), cls())
square(*res)
return res
@vectorized_classmethod
@set_instruction_type
def get_random_inverse(cls):
res = (cls(), cls())
inverse(*res)
return res
@vectorized_classmethod
@set_instruction_type
def get_random_input_mask_for(cls, player):
res = cls()
inputmask(res, player)
return res
@classmethod
@set_instruction_type
def dot_product(cls, x, y):
x = list(x)
set_global_vector_size(x[0].size)
res = cls()
dotprods(res, x, y)
reset_global_vector_size()
return res
@classmethod
@set_instruction_type
def row_matrix_mul(cls, row, matrix, res_params=None):
assert len(row) == len(matrix)
size = len(matrix[0])
res = cls(size=size)
dotprods(*sum(([res[j], row, [matrix[k][j] for k in range(len(row))]]
for j in range(size)), []))
return res
@classmethod
@set_instruction_type
def matrix_mul(cls, A, B, n, res_params=None):
assert len(A) % n == 0
assert len(B) % n == 0
size = len(A) * len(B) // n**2
res = cls(size=size)
n_rows = len(A) // n
n_cols = len(B) // n
dotprods(*sum(([res[j], [A[j // n_cols * n + k] for k in range(n)],
[B[k * n_cols + j % n_cols] for k in range(n)]]
for j in range(size)), []))
return res
def __init__(self, reg_type, val=None, size=None):
if isinstance(val, self.clear_type):
size = val.size
super(_secret, self).__init__(reg_type, val=val, size=size)
@set_instruction_type
@vectorize
def load_int(self, val):
if self.clear_type.in_immediate_range(val):
ldsi(self, val)
else:
self.load_clear(self.clear_type(val))
@vectorize
def load_clear(self, val):
addm(self, self.__class__(0), val)
@set_instruction_type
@read_mem_value
@vectorize
def load_other(self, val):
if isinstance(val, self.clear_type):
self.load_clear(val)
elif isinstance(val, type(self)):
movs(self, val)
else:
self.load_clear(self.clear_type(val))
def _new_by_number(self, i):
res = type(self)(size=1)
res.i = i
res.program = self.program
return res
@set_instruction_type
@read_mem_value
@vectorize
def secret_op(self, other, s_inst, m_inst, si_inst, reverse=False):
cls = self.__class__
res = self.prep_res(other)
if isinstance(other, regint):
other = res.clear_type(other)
if isinstance(other, cls):
s_inst(res, self, other)
elif isinstance(other, res.clear_type):
if reverse:
m_inst(res, other, self)
else:
m_inst(res, self, other)
elif isinstance(other, int):
if self.clear_type.in_immediate_range(other):
si_inst(res, self, other)
else:
if reverse:
m_inst(res, res.clear_type(other), self)
else:
m_inst(res, self, res.clear_type(other))
else:
return NotImplemented
return res
def add(self, other):
return self.secret_op(other, adds, addm, addsi)
@set_instruction_type
def mul(self, other):
if isinstance(other, _secret) and max(self.size, other.size) > 1 \
and min(self.size, other.size) == 1:
x, y = (other, self) if self.size < other.size else (self, other)
res = type(self)(size=x.size)
mulrs(res, x, y)
return res
return self.secret_op(other, muls, mulm, mulsi)
def __sub__(self, other):
return self.secret_op(other, subs, subml, subsi)
def __rsub__(self, other):
return self.secret_op(other, subs, submr, subsfi, True)
@vectorize
def __truediv__(self, other):
return self * (self.clear_type(1) / other)
@vectorize
def __rtruediv__(self, other):
a,b = self.get_random_inverse()
return other * a / (a * self).reveal()
@set_instruction_type
@vectorize
def square(self):
res = self.__class__()
sqrs(res, self)
return res
@set_instruction_type
@vectorize
def reveal(self):
res = self.clear_type()
asm_open(res, self)
return res
@set_instruction_type
def reveal_to(self, player):
masked = self.__class__()
startprivateoutput(masked, self, player)
stopprivateoutput(masked.reveal(), player)
class sint(_secret, _int):
__slots__ = []
instruction_type = 'modp'
clear_type = cint
reg_type = 's'
PreOp = staticmethod(floatingpoint.PreOpL)
PreOR = staticmethod(floatingpoint.PreOR)
get_type = staticmethod(lambda n: sint)
@vectorized_classmethod
def get_random_int(cls, bits):
res = sint()
comparison.PRandInt(res, bits)
return res
@vectorized_classmethod
def get_input_from(cls, player):
res = cls()
inputmixed('int', res, player)
return res
@classmethod
def get_raw_input_from(cls, player):
res = cls()
startinput(player, 1)
stopinput(player, res)
return res
@classmethod
def receive_from_client(cls, n, client_id, message_type=ClientMessageType.NoType):
triples = list(itertools.chain(*(sint.get_random_triple() for i in range(n))))
sint.write_shares_to_socket(client_id, triples, message_type)
received = cint.read_from_socket(client_id, n)
y = [0] * n
for i in range(n):
y[i] = received[i] - triples[i * 3]
return y
@vectorized_classmethod
def read_from_socket(cls, client_id, n=1):
res = [cls() for i in range(n)]
readsockets(client_id, *res)
if n == 1:
return res[0]
else:
return res
@vectorize
def write_to_socket(self, client_id, message_type=ClientMessageType.NoType):
writesockets(client_id, message_type, self)
@vectorized_classmethod
def write_to_socket(self, client_id, values, message_type=ClientMessageType.NoType):
writesockets(client_id, message_type, *values)
@vectorize
def write_share_to_socket(self, client_id, message_type=ClientMessageType.NoType):
writesocketshare(client_id, message_type, self)
@vectorized_classmethod
def write_shares_to_socket(cls, client_id, values, message_type=ClientMessageType.NoType, include_macs=False):
if include_macs:
writesockets(client_id, message_type, *values)
else:
writesocketshare(client_id, message_type, *values)
@vectorized_classmethod
def load_mem(cls, address, mem_type=None):
return cls._load_mem(address, ldms, ldmsi)
def store_in_mem(self, address):
self._store_in_mem(address, stms, stmsi)
def __init__(self, val=None, size=None):
super(sint, self).__init__('s', val=val, size=size)
@vectorize
def __neg__(self):
return 0 - self
@vectorize
def __abs__(self):
return (self >= 0).if_else(self, -self)
@read_mem_value
@vectorize
def __lt__(self, other, bit_length=None, security=None):
res = sint()
comparison.LTZ(res, self - other,
(bit_length or program.bit_length) + 1,
security or program.security)
return res
@read_mem_value
@vectorize
def __gt__(self, other, bit_length=None, security=None):
res = sint()
comparison.LTZ(res, other - self,
(bit_length or program.bit_length) + 1,
security or program.security)
return res
def __le__(self, other, bit_length=None, security=None):
return 1 - self.greater_than(other, bit_length, security)
def __ge__(self, other, bit_length=None, security=None):
return 1 - self.less_than(other, bit_length, security)
@read_mem_value
@vectorize
def __eq__(self, other, bit_length=None, security=None):
return floatingpoint.EQZ(self - other, bit_length or program.bit_length,
security or program.security)
def __ne__(self, other, bit_length=None, security=None):
return 1 - self.equal(other, bit_length, security)
less_than = __lt__
greater_than = __gt__
less_equal = __le__
greater_equal = __ge__
equal = __eq__
not_equal = __ne__
@vectorize
def __mod__(self, modulus):
if isinstance(modulus, int):
l = math.log(modulus, 2)
if 2**int(round(l)) == modulus:
return self.mod2m(int(l))
raise NotImplementedError('Modulo only implemented for powers of two.')
@read_mem_value
def mod2m(self, m, bit_length=None, security=None, signed=True):
bit_length = bit_length or program.bit_length
security = security or program.security
if isinstance(m, int):
if m == 0:
return 0
if m >= bit_length:
return self
res = sint()
comparison.Mod2m(res, self, bit_length, m, security, signed)
else:
res, pow2 = floatingpoint.Trunc(self, bit_length, m, security, True)
return res
@vectorize
def __rpow__(self, base):
if base == 2:
return self.pow2()
else:
return NotImplemented
@vectorize
def pow2(self, bit_length=None, security=None):
return floatingpoint.Pow2(self, bit_length or program.bit_length, \
security or program.security)
def __lshift__(self, other, bit_length=None, security=None):
return self * util.pow2_value(other, bit_length, security)
@vectorize
@read_mem_value
def __rshift__(self, other, bit_length=None, security=None):
bit_length = bit_length or program.bit_length
security = security or program.security
if isinstance(other, int):
if other == 0:
return self
res = sint()
comparison.Trunc(res, self, bit_length, other, security, True)
return res
elif isinstance(other, sint):
return floatingpoint.Trunc(self, bit_length, other, security)
else:
return floatingpoint.Trunc(self, bit_length, sint(other), security)
left_shift = __lshift__
right_shift = __rshift__
def __rlshift__(self, other):
return other * 2**self
@vectorize
def __rrshift__(self, other):
return floatingpoint.Trunc(other, program.bit_length, self, program.security)
def bit_decompose(self, bit_length=None, security=None):
if bit_length == 0:
return []
bit_length = bit_length or program.bit_length
security = security or program.security
return floatingpoint.BitDec(self, bit_length, bit_length, security)
def TruncMul(self, other, k, m, kappa=None, nearest=False):
return (self * other).round(k, m, kappa, nearest, signed=True)
def TruncPr(self, k, m, kappa=None, signed=True):
return floatingpoint.TruncPr(self, k, m, kappa, signed=signed)
@vectorize
def round(self, k, m, kappa=None, nearest=False, signed=False):
kappa = kappa or program.security
secret = isinstance(m, sint)
if nearest:
if secret:
raise NotImplementedError()
return comparison.TruncRoundNearest(self, k, m, kappa,
signed=signed)
else:
if secret:
return floatingpoint.Trunc(self, k, m, kappa)
return self.TruncPr(k, m, kappa, signed=signed)
def Norm(self, k, f, kappa=None, simplex_flag=False):
return library.Norm(self, k, f, kappa, simplex_flag)
@vectorize
def int_div(self, other, bit_length=None, security=None):
k = bit_length or program.bit_length
kappa = security or program.security
tmp = library.IntDiv(self, other, k, kappa)
res = type(self)()
comparison.Trunc(res, tmp, 2 * k, k, kappa, True)
return res
@staticmethod
def two_power(n):
return floatingpoint.two_power(n)
class sgf2n(_secret, _gf2n):
__slots__ = []
instruction_type = 'gf2n'
clear_type = cgf2n
reg_type = 'sg'
@classmethod
def get_type(cls, length):
return cls
@classmethod
def get_raw_input_from(cls, player):
res = cls()
gstartinput(player, 1)
gstopinput(player, res)
return res
def add(self, other):
if isinstance(other, sgf2nint):
return NotImplemented
else:
return super(sgf2n, self).add(other)
def mul(self, other):
if isinstance(other, (sgf2nint)):
return NotImplemented
else:
return super(sgf2n, self).mul(other)
@vectorized_classmethod
def load_mem(cls, address, mem_type=None):
return cls._load_mem(address, gldms, gldmsi)
def store_in_mem(self, address):
self._store_in_mem(address, gstms, gstmsi)
def __init__(self, val=None, size=None):
super(sgf2n, self).__init__('sg', val=val, size=size)
def __neg__(self):
return self
@vectorize
def __invert__(self):
return self ^ cgf2n(2**program.galois_length - 1)
def __xor__(self, other):
if other is 0:
return self
else:
return super(sgf2n, self).add(other)
__rxor__ = __xor__
@vectorize
def __and__(self, other):
if isinstance(other, int):
other_bits = [(other >> i) & 1 \
for i in range(program.galois_length)]
else:
other_bits = other.bit_decompose()
self_bits = self.bit_decompose()
return sum((x * y) << i \
for i,(x,y) in enumerate(zip(self_bits, other_bits)))
__rand__ = __and__
@vectorize
def __lshift__(self, other):
return self * cgf2n(1 << other)
@vectorize
def right_shift(self, other, bit_length=None):
bits = self.bit_decompose(bit_length)
return sum(b << i for i,b in enumerate(bits[other:]))
def equal(self, other, bit_length=None, expand=1):
bits = [1 - bit for bit in (self - other).bit_decompose(bit_length)][::expand]
while len(bits) > 1:
bits.insert(0, bits.pop() * bits.pop())
return bits[0]
def not_equal(self, other, bit_length=None):
return 1 - self.equal(other, bit_length)
__eq__ = equal
__ne__ = not_equal
@vectorize
def bit_decompose(self, bit_length=None, step=1):
if bit_length == 0:
return []
bit_length = bit_length or program.galois_length
random_bits = [self.get_random_bit() \
for i in range(0, bit_length, step)]
one = cgf2n(1)
masked = sum([b * (one << (i * step)) for i,b in enumerate(random_bits)], self).reveal()
masked_bits = masked.bit_decompose(bit_length,step=step)
return [m + r for m,r in zip(masked_bits, random_bits)]
@vectorize
def bit_decompose_embedding(self):
random_bits = [self.get_random_bit() \
for i in range(8)]
one = cgf2n(1)
wanted_positions = [0, 5, 10, 15, 20, 25, 30, 35]
masked = sum([b * (one << wanted_positions[i]) for i,b in enumerate(random_bits)], self).reveal()
return [self.clear_type((masked >> wanted_positions[i]) & one) + r for i,r in enumerate(random_bits)]
for t in (sint, sgf2n):
t.bit_type = t
t.basic_type = t
t.default_type = t
class _bitint(object):
bits = None
log_rounds = False
linear_rounds = False
@classmethod
def bit_adder(cls, a, b, carry_in=0, get_carry=False):
a, b = list(a), list(b)
a += [0] * (len(b) - len(a))
b += [0] * (len(a) - len(b))
return cls.bit_adder_selection(a, b, carry_in=carry_in,
get_carry=get_carry)
@classmethod
def bit_adder_selection(cls, a, b, carry_in=0, get_carry=False):
if cls.log_rounds:
return cls.carry_lookahead_adder(a, b, carry_in=carry_in)
elif cls.linear_rounds:
return cls.ripple_carry_adder(a, b, carry_in=carry_in)
else:
return cls.carry_select_adder(a, b, carry_in=carry_in)
@classmethod
def carry_lookahead_adder(cls, a, b, fewer_inv=False, carry_in=0,
get_carry=False):
lower = []
for (ai,bi) in zip(a,b):
if ai is 0 or bi is 0:
lower.append(ai + bi)
a.pop(0)
b.pop(0)
else:
break
d = [cls.half_adder(ai, bi) for (ai,bi) in zip(a,b)]
carry = floatingpoint.carry
if fewer_inv:
pre_op = floatingpoint.PreOpL2
else:
pre_op = floatingpoint.PreOpL
if d:
carries = list(zip(*pre_op(carry, [(0, carry_in)] + d)))[1]
else:
carries = []
res = lower + cls.sum_from_carries(a, b, carries)
if get_carry:
res += [carries[-1]]
return res
@staticmethod
def sum_from_carries(a, b, carries):
return [ai.bit_xor(bi).bit_xor(carry) \
for (ai, bi, carry) in zip(a, b, carries)]
@classmethod
def carry_select_adder(cls, a, b, get_carry=False, carry_in=0):
a += [0] * (len(b) - len(a))
b += [0] * (len(a) - len(b))
n = len(a)
for m in range(100):
if sum(range(m + 1)) + 1 >= n:
break
for k in range(m, -1, -1):
if sum(range(m, k - 1, -1)) + 1 >= n:
break
blocks = list(range(m, k, -1))
blocks.append(n - sum(blocks))
blocks.reverse()
if len(blocks) > 1 and blocks[0] > blocks[1]:
raise Exception('block size not increasing:', blocks)
if sum(blocks) != n:
raise Exception('blocks not summing up: %s != %s' % \
(sum(blocks), n))
res = []
carry = carry_in
cin_one = util.long_one(a + b)
for m in blocks:
aa = a[:m]
bb = b[:m]
a = a[m:]
b = b[m:]
cc = [cls.ripple_carry_adder(aa, bb, i) for i in (0, cin_one)]
for i in range(m):
res.append(util.if_else(carry, cc[1][i], cc[0][i]))
carry = util.if_else(carry, cc[1][m], cc[0][m])
if get_carry:
res += [carry]
return res
@classmethod
def ripple_carry_adder(cls, a, b, carry_in=0):
carry = carry_in
res = []
for aa, bb in zip(a, b):
cc, carry = cls.full_adder(aa, bb, carry)
res.append(cc)
res.append(carry)
return res
@staticmethod
def full_adder(a, b, carry):
s = a + b
return s + carry, util.if_else(s, carry, a)
@staticmethod
def half_adder(a, b):
return a + b, a & b
@staticmethod
def bit_comparator(a, b):
long_one = util.long_one(a + b)
op = lambda y,x,*args: (util.if_else(x[1], x[0], y[0]), \
util.if_else(x[1], long_one, y[1]))
return floatingpoint.KOpL(op, [(bi, ai + bi) for (ai,bi) in zip(a,b)])
@classmethod
def bit_less_than(cls, a, b):
x, not_equal = cls.bit_comparator(a, b)
return util.if_else(not_equal, x, 0)
@staticmethod
def get_highest_different_bits(a, b, index):
diff = [ai + bi for (ai,bi) in reversed(list(zip(a,b)))]
preor = floatingpoint.PreOR(diff, raw=True)
highest_diff = [x - y for (x,y) in reversed(list(zip(preor, [0] + preor)))]
raw = sum(map(operator.mul, highest_diff, (a,b)[index]))
return raw.bit_decompose()[0]
def load_int(self, other):
if -2**(self.n_bits-1) <= other < 2**(self.n_bits-1):
self.bin_type.load_int(self, other + 2**self.n_bits \
if other < 0 else other)
else:
raise CompilerError('Invalid signed %d-bit integer: %d' % \
(self.n_bits, other))
def add(self, other):
if type(other) == self.bin_type:
raise CompilerError('Unclear addition')
a = self.bit_decompose()
b = util.bit_decompose(other, self.n_bits)
return self.compose(self.bit_adder(a, b))
def mul(self, other):
if type(other) == self.bin_type:
raise CompilerError('Unclear multiplication')
self_bits = self.bit_decompose()
if isinstance(other, int):
other_bits = util.bit_decompose(other, self.n_bits)
bit_matrix = [[x * y for y in self_bits] for x in other_bits]
else:
try:
other_bits = other.bit_decompose()
if len(other_bits) == 1:
return type(self)(other_bits[0] * self)
if len(self_bits) != len(other_bits):
raise NotImplementedError('Multiplication of different lengths')
except AttributeError:
pass
try:
other = self.bin_type(other)
except CompilerError:
return NotImplemented
products = [x * other for x in self_bits]
bit_matrix = [util.bit_decompose(x, self.n_bits) for x in products]
return self.compose(self.wallace_tree_from_matrix(bit_matrix, False))
@classmethod
def wallace_tree_from_matrix(cls, bit_matrix, get_carry=True):
columns = [[_f for _f in (bit_matrix[j][i-j] \
for j in range(min(len(bit_matrix), i + 1))) if _f] \
for i in range(len(bit_matrix[0]))]
return cls.wallace_tree_from_columns(columns, get_carry)
@classmethod
def wallace_tree_from_columns(cls, columns, get_carry=True):
self = cls
while max(len(c) for c in columns) > 2:
new_columns = [[] for i in range(len(columns) + 1)]
for i,col in enumerate(columns):
while len(col) > 2:
s, carry = self.full_adder(*(col.pop() for i in range(3)))
new_columns[i].append(s)
new_columns[i+1].append(carry)
if len(col) == 2:
s, carry = self.half_adder(*(col.pop() for i in range(2)))
new_columns[i].append(s)
new_columns[i+1].append(carry)
else:
new_columns[i].extend(col)
if get_carry:
columns = new_columns
else:
columns = new_columns[:-1]
for col in columns:
col.extend([0] * (2 - len(col)))
return self.bit_adder(*list(zip(*columns)))
@classmethod
def wallace_tree(cls, rows):
return cls.wallace_tree_from_columns([list(x) for x in zip(*rows)])
def __sub__(self, other):
if type(other) == sgf2n:
raise CompilerError('Unclear subtraction')
a = self.bit_decompose()
b = util.bit_decompose(other, self.n_bits)
d = [(1 + ai + bi, (1 - ai) * bi) for (ai,bi) in zip(a,b)]
borrow = lambda y,x,*args: \
(x[0] * y[0], 1 - (1 - x[1]) * (1 - x[0] * y[1]))
borrows = (0,) + list(zip(*floatingpoint.PreOpL(borrow, d)))[1]
return self.compose(ai + bi + borrow \
for (ai,bi,borrow) in zip(a,b,borrows))
def __rsub__(self, other):
raise NotImplementedError()
def __truediv__(self, other):
raise NotImplementedError()
def __truerdiv__(self, other):
raise NotImplementedError()
def __lshift__(self, other):
return self.compose(([0] * other + self.bit_decompose())[:self.n_bits])
def __rshift__(self, other):
return self.compose(self.bit_decompose()[other:])
def bit_decompose(self, n_bits=None, *args):
if self.bits is None:
self.bits = self.force_bit_decompose(self.n_bits)
if n_bits is None:
return self.bits[:]
else:
return self.bits[:n_bits] + [self.fill_bit()] * (n_bits - self.n_bits)
def fill_bit(self):
return self.bits[-1]
@staticmethod
def prep_comparison(a, b):
a[-1], b[-1] = b[-1], a[-1]
def comparison(self, other, const_rounds=False, index=None):
a = self.bit_decompose()
b = util.bit_decompose(other, self.n_bits)
self.prep_comparison(a, b)
if const_rounds:
return self.get_highest_different_bits(a, b, index)
else:
return self.bit_comparator(a, b)
def __lt__(self, other):
if program.options.comparison == 'log':
x, not_equal = self.comparison(other)
return util.if_else(not_equal, x, 0)
else:
return self.comparison(other, True, 1)
def __le__(self, other):
if program.options.comparison == 'log':
x, not_equal = self.comparison(other)
return util.if_else(not_equal, x, 1)
else:
return 1 - self.comparison(other, True, 0)
def __ge__(self, other):
return 1 - (self < other)
def __gt__(self, other):
return 1 - (self <= other)
def __eq__(self, other):
diff = self ^ other
diff_bits = [1 - x for x in diff.bit_decompose()]
return floatingpoint.KMul(diff_bits)
def __ne__(self, other):
return 1 - (self == other)
def __neg__(self):
return 1 + self.compose(1 ^ b for b in self.bit_decompose())
def __abs__(self):
return util.if_else(self.bit_decompose()[-1], -self, self)
less_than = lambda self, other, *args, **kwargs: self < other
greater_than = lambda self, other, *args, **kwargs: self > other
less_equal = lambda self, other, *args, **kwargs: self <= other
greater_equal = lambda self, other, *args, **kwargs: self >= other
equal = lambda self, other, *args, **kwargs: self == other
not_equal = lambda self, other, *args, **kwargs: self != other
class intbitint(_bitint, sint):
@staticmethod
def full_adder(a, b, carry):
s = a.bit_xor(b)
return s.bit_xor(carry), util.if_else(s, carry, a)
@staticmethod
def half_adder(a, b):
carry = a * b
return a + b - 2 * carry, carry
@staticmethod
def sum_from_carries(a, b, carries):
return [a[i] + b[i] + carries[i] - 2 * carries[i + 1] \
for i in range(len(a))]
@classmethod
def bit_adder_selection(cls, a, b, carry_in=0, get_carry=False):
if cls.linear_rounds:
return cls.ripple_carry_adder(a, b, carry_in=carry_in)
elif len(a) < 122 or cls.log_rounds:
return cls.carry_lookahead_adder(a, b, carry_in=carry_in,
get_carry=get_carry)
else:
return cls.carry_select_adder(a, b, carry_in=carry_in)
class sgf2nint(_bitint, sgf2n):
bin_type = sgf2n
@classmethod
def compose(cls, bits):
bits = list(bits)
if len(bits) > cls.n_bits:
raise CompilerError('Too many bits')
res = cls()
res.bits = bits + [0] * (cls.n_bits - len(bits))
gmovs(res, sum(b << i for i,b in enumerate(bits)))
return res
def load_other(self, other):
if isinstance(other, sgf2nint):
gmovs(self, self.compose(other.bit_decompose(self.n_bits)))
elif isinstance(other, sgf2n):
gmovs(self, other)
else:
gaddm(self, sgf2n(0), cgf2n(other))
def force_bit_decompose(self, n_bits=None):
return sgf2n(self).bit_decompose(n_bits)
class sgf2nuint(sgf2nint):
def load_int(self, other):
if 0 <= other < 2**self.n_bits:
sgf2n.load_int(self, other)
else:
raise CompilerError('Invalid unsigned %d-bit integer: %d' % \
(self.n_bits, other))
def fill_bit(self):
return 0
@staticmethod
def prep_comparison(a, b):
pass
class sgf2nuint32(sgf2nuint):
n_bits = 32
class sgf2nint32(sgf2nint):
n_bits = 32
def get_sgf2nint(n):
class sgf2nint_spec(sgf2nint):
n_bits = n
return sgf2nint_spec
def get_sgf2nuint(n):
class sgf2nuint_spec(sgf2nint):
n_bits = n
return sgf2nuint_spec
class sgf2nfloat(sgf2n):
@classmethod
def set_precision(cls, vlen, plen):
cls.vlen = vlen
cls.plen = plen
class v_type(sgf2nuint):
n_bits = 2 * vlen + 1
class p_type(sgf2nint):
n_bits = plen
class pdiff_type(sgf2nuint):
n_bits = plen
cls.v_type = v_type
cls.p_type = p_type
cls.pdiff_type = pdiff_type
def __init__(self, val, p=None, z=None, s=None):
super(sgf2nfloat, self).__init__()
if p is None and type(val) == sgf2n:
bits = val.bit_decompose(self.vlen + self.plen + 1)
self.v = self.v_type.compose(bits[:self.vlen])
self.p = self.p_type.compose(bits[self.vlen:-1])
self.s = bits[-1]
self.z = util.tree_reduce(operator.mul, (1 - b for b in self.v.bits))
else:
if p is None:
v, p, z, s = sfloat.convert_float(val, self.vlen, self.plen)
p += self.vlen - 1
v_bits = util.bit_decompose(v, self.vlen)
p_bits = util.bit_decompose(p, self.plen)
self.v = self.v_type.compose(v_bits)
self.p = self.p_type.compose(p_bits)
self.z = z
self.s = s
else:
self.v, self.p, self.z, self.s = val, p, z, s
v_bits = val.bit_decompose()[:self.vlen]
p_bits = p.bit_decompose()[:self.plen]
gmovs(self, util.bit_compose(v_bits + p_bits + [self.s]))
def add(self, other):
a = self.p < other.p
b = self.p == other.p
c = self.v < other.v
other_dominates = (b.if_else(c, a))
pmax, pmin = a.cond_swap(self.p, other.p, self.p_type)
vmax, vmin = other_dominates.cond_swap(self.v, other.v, self.v_type)
s3 = self.s ^ other.s
pdiff = self.pdiff_type(pmax - pmin)
d = self.vlen < pdiff
pow_delta = util.pow2(d.if_else(0, pdiff).bit_decompose(util.log2(self.vlen)))
v3 = vmax
v4 = self.v_type(sgf2n(vmax) * pow_delta) + self.v_type(s3.if_else(-vmin, vmin))
v = self.v_type(sgf2n(d.if_else(v3, v4) << self.vlen) / pow_delta)
v >>= self.vlen - 1
h = floatingpoint.PreOR(v.bits[self.vlen+1::-1])
tmp = sum(util.if_else(b, 0, 1 << i) for i,b in enumerate(h))
pow_p0 = 1 + self.v_type(tmp)
v = (v * pow_p0) >> 2
p = pmax - sum(self.p_type.compose([1 - b]) for b in h) + 1
v = self.z.if_else(other.v, other.z.if_else(self.v, v))
z = v == 0
p = z.if_else(0, self.z.if_else(other.p, other.z.if_else(self.p, p)))
s = other_dominates.if_else(other.s, self.s)
s = self.z.if_else(other.s, other.z.if_else(self.s, s))
return sgf2nfloat(v, p, z, s)
def mul(self, other):
v = (self.v * other.v) >> (self.vlen - 1)
b = v.bits[self.vlen]
v = b.if_else(v >> 1, v)
p = self.p + other.p + self.p_type.compose([b])
s = self.s + other.s
z = util.or_op(self.z, other.z)
return sgf2nfloat(v, p, z, s)
sgf2nfloat.set_precision(24, 8)
def parse_type(other, k=None, f=None):
if isinstance(other, cfix.scalars):
return cfix(other, k=k, f=f)
elif isinstance(other, cint):
tmp = cfix()
tmp.load_int(other)
return tmp
elif isinstance(other, sint):
tmp = sfix()
tmp.load_int(other)
return tmp
elif isinstance(other, sfloat):
tmp = sfix(other)
return tmp
else:
return other
class cfix(_number, _structure):
__slots__ = ['value', 'f', 'k', 'size']
reg_type = 'c'
scalars = (int, float, regint)
@classmethod
def set_precision(cls, f, k = None):
cls.f = f
if k is None:
cls.k = 2 * f
else:
cls.k = k
@vectorized_classmethod
def load_mem(cls, address, mem_type=None):
res = []
res.append(cint.load_mem(address))
return cfix(*res)
@vectorized_classmethod
def read_from_socket(cls, client_id, n=1):
cint_input = cint.read_from_socket(client_id, n)
if n == 1:
return cfix(cint_inputs)
else:
return list(map(cfix, cint_inputs))
@vectorize
def write_to_socket(self, client_id, message_type=ClientMessageType.NoType):
writesocketc(client_id, message_type, cint(self.v))
@vectorized_classmethod
def write_to_socket(self, client_id, values, message_type=ClientMessageType.NoType):
def cfix_to_cint(fix_val):
return cint(fix_val.v)
cint_values = list(map(cfix_to_cint, values))
writesocketc(client_id, message_type, *cint_values)
@staticmethod
def malloc(size):
return program.malloc(size, cint)
@staticmethod
def n_elements():
return 1
@vectorize_init
def __init__(self, v=None, k=None, f=None, size=None):
f = f or self.f
k = k or self.k
self.f = f
self.k = k
self.size = get_global_vector_size()
if isinstance(v, cint):
self.v = cint(v,size=self.size)
elif isinstance(v, cfix.scalars):
v = v * (2 ** f)
try:
v = int(round(v))
except TypeError:
pass
self.v = cint(v, size=self.size)
elif isinstance(v, cfix):
self.v = v.v
elif isinstance(v, MemValue):
self.v = v
elif v is None:
self.v = cint(0)
else:
raise CompilerError('cannot initialize cfix with %s' % v)
@vectorize
def load_int(self, v):
self.v = cint(v) * (2 ** self.f)
@classmethod
def conv(cls, other):
if isinstance(other, cls):
return other
else:
try:
res = cfix()
res.load_int(other)
return res
except (TypeError, CompilerError):
pass
return cls(other)
def store_in_mem(self, address):
self.v.store_in_mem(address)
def sizeof(self):
return self.size * 4
@vectorize
def add(self, other):
other = parse_type(other)
if isinstance(other, cfix):
return cfix(self.v + other.v)
else:
return NotImplemented
@vectorize
def mul(self, other):
other = parse_type(other)
if isinstance(other, cfix):
assert self.f == other.f
sgn = cint(1 - 2 * (self.v * other.v < 0))
absolute = self.v * other.v * sgn
val = sgn * (absolute >> self.f)
return cfix(val)
elif isinstance(other, sfix):
return NotImplemented
else:
raise CompilerError('Invalid type %s for cfix.__mul__' % type(other))
@vectorize
def __sub__(self, other):
other = parse_type(other)
if isinstance(other, cfix):
return cfix(self.v - other.v)
elif isinstance(other, sfix):
return sfix(self.v - other.v)
else:
raise NotImplementedError
@vectorize
def __neg__(self):
return cfix(-self.v)
def __rsub__(self, other):
return -self + other
@vectorize
def __eq__(self, other):
other = parse_type(other)
if isinstance(other, cfix):
return self.v == other.v
elif isinstance(other, sfix):
return other.v.equal(self.v, self.k, other.kappa)
else:
raise NotImplementedError
@vectorize
def __lt__(self, other):
other = parse_type(other)
if isinstance(other, cfix):
return self.v < other.v
elif isinstance(other, sfix):
if(self.k != other.k or self.f != other.f):
raise TypeError('Incompatible fixed point types in comparison')
return other.v.greater_than(self.v, self.k, other.kappa)
else:
raise NotImplementedError
@vectorize
def __le__(self, other):
other = parse_type(other)
if isinstance(other, cfix):
return self.v <= other.v
elif isinstance(other, sfix):
return other.v.greater_equal(self.v, self.k, other.kappa)
else:
raise NotImplementedError
@vectorize
def __gt__(self, other):
other = parse_type(other)
if isinstance(other, cfix):
return self.v > other.v
elif isinstance(other, sfix):
return other.v.less_than(self.v, self.k, other.kappa)
else:
raise NotImplementedError
@vectorize
def __ge__(self, other):
other = parse_type(other)
if isinstance(other, cfix):
return self.v >= other.v
elif isinstance(other, sfix):
return other.v.less_equal(self.v, self.k, other.kappa)
else:
raise NotImplementedError
@vectorize
def __ne__(self, other):
other = parse_type(other)
if isinstance(other, cfix):
return self.v != other.v
elif isinstance(other, sfix):
return other.v.not_equal(self.v, self.k, other.kappa)
else:
raise NotImplementedError
@vectorize
def __truediv__(self, other):
other = parse_type(other)
if isinstance(other, cfix):
return cfix(library.cint_cint_division(self.v, other.v, self.k, self.f))
elif isinstance(other, sfix):
return sfix(library.FPDiv(self.v, other.v, self.k, self.f,
other.kappa, nearest=sfix.round_nearest))
else:
raise TypeError('Incompatible fixed point types in division')
def print_plain(self):
if self.k > 64:
raise CompilerError('Printing of fixed-point numbers not ' +
'implemented for more than 64-bit precision')
tmp = regint()
convmodp(tmp, self.v, bitlength=self.k)
sign = cint(tmp < 0)
abs_v = sign.if_else(-self.v, self.v)
print_float_plain(cint(abs_v), cint(-self.f), \
cint(0), cint(sign))
class _single(_number, _structure):
__slots__ = ['v']
kappa = 40
round_nearest = False
@property
@classmethod
def reg_type(cls):
return cls.int_type.reg_type
@classmethod
def receive_from_client(cls, n, client_id, message_type=ClientMessageType.NoType):
sint_inputs = cls.int_type.receive_from_client(n, client_id, ClientMessageType.TripleShares)
return list(map(cls, sint_inputs))
@vectorized_classmethod
def load_mem(cls, address, mem_type=None):
return cls._new(cls.int_type.load_mem(address))
@classmethod
@read_mem_value
def conv(cls, other):
if isinstance(other, cls):
return other
else:
try:
return cls.from_sint(other)
except (TypeError, CompilerError):
pass
return cls(other)
@classmethod
def coerce(cls, other):
return cls.conv(other)
@classmethod
def malloc(cls, size):
return program.malloc(size, cls.int_type)
@staticmethod
def n_elements():
return 1
@classmethod
def dot_product(cls, x, y, res_params=None):
return cls.unreduced_dot_product(x, y, res_params).reduce_after_mul()
@classmethod
def unreduced_dot_product(cls, x, y, res_params=None):
dp = cls.int_type.dot_product([xx.pre_mul() for xx in x],
[yy.pre_mul() for yy in y])
return x[0].unreduced(dp, y[0], res_params, len(x))
@classmethod
def row_matrix_mul(cls, row, matrix, res_params=None):
int_matrix = [y.get_vector().pre_mul() for y in matrix]
col = cls.int_type.row_matrix_mul([x.pre_mul() for x in row],
int_matrix)
res = row[0].unreduced(col, matrix[0][0], res_params,
len(row)).reduce_after_mul()
return res
@classmethod
def matrix_mul(cls, A, B, n, res_params=None):
AA = A.pre_mul()
BB = B.pre_mul()
CC = cls.int_type.matrix_mul(AA, BB, n)
res = A.unreduced(CC, B, res_params, n).reduce_after_mul()
return res
def store_in_mem(self, address):
self.v.store_in_mem(address)
@property
def size(self):
return self.v.size
def sizeof(self):
return self.size
def __len__(self):
return len(self.v)
@vectorize
def __sub__(self, other):
other = self.coerce(other)
return self + (-other)
def __rsub__(self, other):
return -self + other
@vectorize
def __eq__(self, other):
other = self.coerce(other)
if isinstance(other, (cfix, _single)):
return self.v.equal(other.v, self.k, self.kappa)
else:
raise NotImplementedError
@vectorize
def __le__(self, other):
other = self.coerce(other)
if isinstance(other, (cfix, _single)):
return self.v.less_equal(other.v, self.k, self.kappa)
else:
raise NotImplementedError
@vectorize
def __lt__(self, other):
other = self.coerce(other)
if isinstance(other, (cfix, _single)):
return self.v.less_than(other.v, self.k, self.kappa)
else:
raise NotImplementedError
@vectorize
def __ge__(self, other):
other = self.coerce(other)
if isinstance(other, (cfix, _single)):
return self.v.greater_equal(other.v, self.k, self.kappa)
else:
raise NotImplementedError
@vectorize
def __gt__(self, other):
other = self.coerce(other)
if isinstance(other, (cfix, _single)):
return self.v.greater_than(other.v, self.k, self.kappa)
else:
raise NotImplementedError
@vectorize
def __ne__(self, other):
other = self.coerce(other)
if isinstance(other, (cfix, _single)):
return self.v.not_equal(other.v, self.k, self.kappa)
else:
raise NotImplementedError
class _fix(_single):
__slots__ = ['v', 'f', 'k', 'size']
@classmethod
def set_precision(cls, f, k = None):
cls.f = f
if k is None:
cls.k = 2 * f
else:
if k < f:
raise CompilerError('bit length cannot be less than precision')
cls.k = k
@classmethod
def coerce(cls, other):
if isinstance(other, (_fix, cls.clear_type)):
return other
else:
return cls.conv(other)
@classmethod
def from_sint(cls, other, k=None, f=None):
res = cls()
res.f = f or cls.f
res.k = k or cls.k
res.load_int(cls.int_type.conv(other))
return res
@classmethod
def _new(cls, other, k=None, f=None):
res = cls(other)
res.k = k or cls.k
res.f = f or cls.f
return res
@vectorize_init
def __init__(self, _v=None, size=None):
self.size = get_global_vector_size()
f = self.f
k = self.k
# for external initialization use load_int.
if _v is None:
self.v = self.int_type(0)
elif isinstance(_v, self.int_type):
self.v = _v
self.size = _v.size
elif isinstance(_v, cfix.scalars):
self.v = self.int_type(int(round(_v * (2 ** f))), size=self.size)
elif isinstance(_v, self.float_type):
p = (f + _v.p)
b = (p.greater_equal(0, _v.vlen))
a = b*(_v.v << (p)) + (1-b)*(_v.v >> (-p))
self.v = (1-2*_v.s)*a
elif isinstance(_v, type(self)):
self.v = _v.v
elif isinstance(_v, (MemValue, MemFix)):
#this is a memvalue object
self.v = type(self)(_v.read()).v
else:
raise CompilerError('cannot convert %s to sfix' % _v)
if not isinstance(self.v, self.int_type):
raise CompilerError('sfix conversion failure: %s/%s' % (_v, self.v))
@vectorize
def load_int(self, v):
self.v = self.int_type(v) << self.f
def __getitem__(self, index):
return self._new(self.v[index])
@vectorize
def add(self, other):
other = self.coerce(other)
if isinstance(other, (_fix, cfix)):
return self._new(self.v + other.v, k=self.k, f=self.f)
elif isinstance(other, cfix.scalars):
tmp = cfix(other, k=self.k, f=self.f)
return self + tmp
else:
return NotImplemented
@vectorize
def mul(self, other):
if isinstance(other, (sint, cint, regint, int)):
return self._new(self.v * other, k=self.k, f=self.f)
elif isinstance(other, float):
if int(other) == other:
return self.mul(int(other))
v = int(round(other * 2 ** self.f))
if v == 0:
return 0
f = self.f
while v % 2 == 0:
f -= 1
v //= 2
k = len(bin(abs(v))) - 1
other = self.multipliable(v, k, f)
other = self.coerce(other)
if isinstance(other, (_fix, self.clear_type)):
val = self.v.TruncMul(other.v, self.k + other.k, other.f,
self.kappa,
self.round_nearest)
if self.size >= other.size:
return self._new(val, k=self.k, f=self.f)
else:
return self.vec._new(val, k=self.k, f=self.f)
elif isinstance(other, cfix.scalars):
scalar_fix = cfix(other)
return self * scalar_fix
else:
return NotImplemented
@vectorize
def __neg__(self):
return type(self)(-self.v)
@vectorize
def __truediv__(self, other):
other = self.coerce(other)
if isinstance(other, _fix):
return type(self)(library.FPDiv(self.v, other.v, self.k, self.f,
self.kappa,
nearest=self.round_nearest))
elif isinstance(other, cfix):
return type(self)(library.sint_cint_division(self.v, other.v, self.k, self.f, self.kappa))
else:
raise TypeError('Incompatible fixed point types in division')
@vectorize
def __rtruediv__(self, other):
return self.coerce(other) / self
@vectorize
def compute_reciprocal(self):
return type(self)(library.FPDiv(cint(2) ** self.f, self.v, self.k, self.f, self.kappa, True))
def reveal(self):
val = self.v.reveal()
res = self.clear_type(val)
res.f = self.f
res.k = self.k
return res
class sfix(_fix):
int_type = sint
clear_type = cfix
@vectorized_classmethod
def get_input_from(cls, player):
v = cls.int_type()
inputmixed('fix', v, cls.f, player)
return cls._new(v)
@vectorized_classmethod
def get_random(cls, lower, upper):
log_range = int(math.log(upper - lower, 2))
n_bits = log_range + cls.f
average = lower + 0.5 * (upper - lower)
lower = average - 0.5 * 2 ** log_range
return cls._new(cls.int_type.get_random_int(n_bits)) + lower
def coerce(self, other):
return parse_type(other, k=self.k, f=self.f)
def mul_no_reduce(self, other, res_params=None):
assert self.f == other.f
return self.unreduced(self.v * other.v)
def pre_mul(self):
return self.v
def unreduced(self, v, other=None, res_params=None, n_summands=1):
return unreduced_sfix(v, self.k * 2, self.f, self.kappa)
@staticmethod
def multipliable(v, k, f):
return cfix(cint.conv(v), k, f)
class unreduced_sfix(_single):
int_type = sint
@classmethod
def _new(cls, v):
return cls(v, 2 * sfix.k, sfix.f, sfix.kappa)
def __init__(self, v, k, m, kappa):
self.v = v
self.k = k
self.m = m
self.kappa = kappa
def __add__(self, other):
if other is 0:
return self
assert self.k == other.k
assert self.m == other.m
assert self.kappa == other.kappa
return unreduced_sfix(self.v + other.v, self.k, self.m, self.kappa)
__radd__ = __add__
@vectorize
def reduce_after_mul(self):
return sfix(sfix.int_type.round(self.v, self.k, self.m, self.kappa,
nearest=sfix.round_nearest,
signed=True))
sfix.unreduced_type = unreduced_sfix
# this is for 20 bit decimal precision
# with 40 bitlength of entire number
# these constants have been chosen for multiplications to fit in 128 bit prime field
# (precision n1) 41 + (precision n2) 41 + (stat_sec) 40 = 82 + 40 = 122 <= 128
# with statistical security of 40
fixed_lower = 20
fixed_upper = 40
sfix.set_precision(fixed_lower, fixed_upper)
cfix.set_precision(fixed_lower, fixed_upper)
class squant(_single):
__slots__ = ['params']
int_type = sint
clamp = True
@classmethod
def set_params(cls, S, Z=0, k=8):
cls.params = squant_params(S, Z, k)
@classmethod
def from_sint(cls, other):
raise CompilerError('sint to squant conversion not implemented')
@classmethod
def _new(cls, value, params=None):
res = cls(params=params)
res.v = value
return res
@read_mem_value
def __init__(self, value=None, params=None):
if params is not None:
self.params = params
if value is None:
# need to set v manually
pass
elif isinstance(value, cfix.scalars):
set_global_vector_size(1)
q = util.round_to_int(value / self.S + self.Z)
if util.is_constant(q) and (q < 0 or q >= 2**self.k):
raise CompilerError('%f not quantizable' % value)
self.v = self.int_type(q)
reset_global_vector_size()
elif isinstance(value, squant) and value.params == self.params:
self.v = value.v
else:
raise CompilerError('cannot convert %s to squant' % value)
def __getitem__(self, index):
return type(self)._new(self.v[index], self.params)
def get_params(self):
return self.params
@property
def S(self):
return self.params.S
@property
def Z(self):
return self.params.Z
@property
def k(self):
return self.params.k
def coerce(self, other):
other = self.conv(other)
return self._new(util.expand(other.v, self.size), other.params)
@vectorize
def add(self, other):
other = self.coerce(other)
assert self.get_params() == other.get_params()
return self._new(self.v + other.v - util.expand(self.Z, self.v.size))
def mul(self, other, res_params=None):
return self.mul_no_reduce(other, res_params).reduce_after_mul()
def mul_no_reduce(self, other, res_params=None):
if isinstance(other, (sint, cint, regint)):
return self._new(other * (self.v - self.Z) + self.Z,
params=self.get_params())
other = self.coerce(other)
tmp = (self.v - self.Z) * (other.v - other.Z)
return _unreduced_squant(tmp, (self.get_params(), other.get_params()),
res_params=res_params)
def pre_mul(self):
return self.v - util.expand(self.Z, self.v.size)
def unreduced(self, v, other, res_params=None, n_summands=1):
return _unreduced_squant(v, (self.get_params(), other.get_params()),
res_params, n_summands)
@vectorize
def for_mux(self, other):
other = self.coerce(other)
assert self.params == other.params
f = lambda x: self._new(x, self.params)
return f, self.v, other.v
@vectorize
def __neg__(self):
return self._new(-self.v + 2 * util.expand(self.Z, self.v.size))
class _unreduced_squant(object):
def __init__(self, v, params, res_params=None, n_summands=1):
self.v = v
self.params = params
self.n_summands = n_summands
self.res_params = res_params or params[0]
def __add__(self, other):
if other is 0:
return self
assert self.params == other.params
assert self.res_params == other.res_params
return _unreduced_squant(self.v + other.v, self.params, self.res_params,
self.n_summands + other.n_summands)
__radd__ = __add__
def reduce_after_mul(self):
return squant_params.conv(self.res_params).reduce(self)
class squant_params(object):
max_n_summands = 2048
@staticmethod
def conv(other):
if isinstance(other, squant_params):
return other
else:
return squant_params(*other)
def __init__(self, S, Z=0, k=8):
try:
self.S = float(S)
except:
self.S = S
self.Z = MemValue.if_necessary(Z)
self.k = k
self._store = {}
if program.options.ring:
# cheaper probabilistic truncation
self.max_length = int(program.options.ring) - 1
else:
# safe choice for secret shift
self.max_length = 71
def __iter__(self):
yield self.S
yield self.Z
yield self.k
def is_constant(self):
return util.is_constant_float(self.S) and util.is_constant(self.Z)
def get(self, input_params, n_summands):
p = input_params
M = p[0].S * p[1].S / self.S
logM = util.log2(M)
n_shift = self.max_length - p[0].k - p[1].k - util.log2(n_summands)
if util.is_constant_float(M):
n_shift -= logM
int_mult = int(round(M * 2 ** (n_shift)))
else:
int_mult = MemValue(M.v << (n_shift + M.p))
shifted_Z = MemValue.if_necessary(self.Z << n_shift)
return n_shift, int_mult, shifted_Z
def precompute(self, *input_params):
self._store[input_params] = self.get(input_params, self.max_n_summands)
def get_stored(self, unreduced):
assert unreduced.n_summands <= self.max_n_summands
return self._store[unreduced.params]
def reduce(self, unreduced):
ps = (self,) + unreduced.params
if reduce(operator.and_, (p.is_constant() for p in ps)):
n_shift, int_mult, shifted_Z = self.get(unreduced.params,
unreduced.n_summands)
else:
n_shift, int_mult, shifted_Z = self.get_stored(unreduced)
size = unreduced.v.size
n_shift = util.expand(n_shift, size)
shifted_Z = util.expand(shifted_Z, size)
int_mult = util.expand(int_mult, size)
tmp = unreduced.v * int_mult + shifted_Z
shifted = tmp.round(self.max_length, n_shift,
kappa=squant.kappa, nearest=squant.round_nearest,
signed=True)
if squant.clamp:
length = max(self.k, self.max_length - n_shift) + 1
top = (1 << self.k) - 1
over = shifted.greater_than(top, length, squant.kappa)
under = shifted.less_than(0, length, squant.kappa)
shifted = over.if_else(top, shifted)
shifted = under.if_else(0, shifted)
return squant._new(shifted, params=self)
class sfloat(_number, _structure):
__slots__ = ['v', 'p', 'z', 's', 'size']
# single precision
vlen = 24
plen = 8
kappa = 40
round_nearest = False
@staticmethod
def n_elements():
return 4
@classmethod
def malloc(cls, size):
return program.malloc(size * cls.n_elements(), sint)
@classmethod
def is_address_tuple(cls, address):
if isinstance(address, (list, tuple)):
assert(len(address) == cls.n_elements())
return True
return False
@vectorized_classmethod
def load_mem(cls, address, mem_type=None):
size = get_global_vector_size()
if cls.is_address_tuple(address):
return sfloat(*(sint.load_mem(a, size=size) for a in address))
res = []
for i in range(4):
res.append(sint.load_mem(address + i * size, size=size))
return sfloat(*res)
@classmethod
def set_error(cls, error):
# incompatible with loops
#cls.error += error - cls.error * error
cls.error = error
pass
@classmethod
def conv(cls, other):
if isinstance(other, cls):
return other
else:
return cls(other)
@classmethod
def coerce(cls, other):
return cls.conv(other)
@staticmethod
def convert_float(v, vlen, plen):
if v < 0:
s = 1
else:
s = 0
if v == 0:
v = 0
p = 0
z = 1
else:
p = int(math.floor(math.log(abs(v), 2))) - vlen + 1
vv = v
v = int(round(abs(v) * 2 ** (-p)))
if v == 2 ** vlen:
p += 1
v //= 2
z = 0
if p < -2 ** (plen - 1):
print('Warning: %e truncated to zero' % vv)
v, p, z = 0, 0, 1
if p >= 2 ** (plen - 1):
raise CompilerError('Cannot convert %s to float ' \
'with %d exponent bits' % (vv, plen))
return v, p, z, s
@vectorized_classmethod
def get_input_from(cls, player):
v = sint()
p = sint()
z = sint()
s = sint()
inputmixed('float', v, p, z, s, cls.vlen, player)
return cls(v, p, z, s)
@vectorize_init
@read_mem_value
def __init__(self, v, p=None, z=None, s=None, size=None):
self.size = get_global_vector_size()
if p is None:
if isinstance(v, sfloat):
p = v.p
z = v.z
s = v.s
v = v.v
elif isinstance(v, sfix):
f = v.f
v, p, z, s = floatingpoint.Int2FL(v.v, v.k,
self.vlen, self.kappa)
p = p - f
elif util.is_constant_float(v):
v, p, z, s = self.convert_float(v, self.vlen, self.plen)
else:
v, p, z, s = floatingpoint.Int2FL(sint.conv(v),
program.bit_length,
self.vlen, self.kappa)
if isinstance(v, int):
if not ((v >= 2**(self.vlen-1) and v < 2**(self.vlen)) or v == 0):
raise CompilerError('Floating point number malformed: significand')
self.v = library.load_int_to_secret(v)
else:
self.v = v
if isinstance(p, int):
if not (p >= -2**(self.plen - 1) and p < 2**(self.plen - 1)):
raise CompilerError('Floating point number malformed: exponent %d not unsigned %d-bit integer' % (p, self.plen))
self.p = library.load_int_to_secret(p)
else:
self.p = p
if isinstance(z, int):
if not (z == 0 or z == 1):
raise CompilerError('Floating point number malformed: zero bit')
self.z = sint()
ldsi(self.z, z)
else:
self.z = z
if isinstance(s, int):
if not (s == 0 or s == 1):
raise CompilerError('Floating point number malformed: sign')
self.s = sint()
ldsi(self.s, s)
else:
self.s = s
def __getitem__(self, index):
return sfloat(*(x[index] for x in self))
def __iter__(self):
yield self.v
yield self.p
yield self.z
yield self.s
def store_in_mem(self, address):
if self.is_address_tuple(address):
for a, x in zip(address, self):
x.store_in_mem(a)
return
for i,x in enumerate((self.v, self.p, self.z, self.s)):
x.store_in_mem(address + i * self.size)
def sizeof(self):
return self.size * self.n_elements()
@vectorize
def add(self, other):
other = self.conv(other)
if isinstance(other, sfloat):
a,c,d,e = [sint() for i in range(4)]
t = sint()
t2 = sint()
v1 = self.v
v2 = other.v
p1 = self.p
p2 = other.p
s1 = self.s
s2 = other.s
z1 = self.z
z2 = other.z
a = p1.less_than(p2, self.plen, self.kappa)
b = floatingpoint.EQZ(p1 - p2, self.plen, self.kappa)
c = v1.less_than(v2, self.vlen, self.kappa)
ap1 = a*p1
ap2 = a*p2
aneg = 1 - a
bneg = 1 - b
cneg = 1 - c
av1 = a*v1
av2 = a*v2
cv1 = c*v1
cv2 = c*v2
pmax = ap2 + p1 - ap1
pmin = p2 - ap2 + ap1
vmax = bneg*(av2 + v1 - av1) + b*(cv2 + v1 - cv1)
vmin = bneg*(av1 + v2 - av2) + b*(cv1 + v2 - cv2)
s3 = s1 + s2 - 2 * s1 * s2
comparison.LTZ(d, self.vlen + pmin - pmax + sfloat.round_nearest,
self.plen, self.kappa)
pow_delta = floatingpoint.Pow2((1 - d) * (pmax - pmin),
self.vlen + 1 + sfloat.round_nearest,
self.kappa)
# deviate from paper for more precision
#v3 = 2 * (vmax - s3) + 1
v3 = vmax
v4 = vmax * pow_delta + (1 - 2 * s3) * vmin
to_trunc = (d * v3 + (1 - d) * v4)
if program.options.ring:
to_trunc <<= 1 + sfloat.round_nearest
v = floatingpoint.TruncInRing(to_trunc,
2 * (self.vlen + 1 +
sfloat.round_nearest),
pow_delta)
else:
to_trunc *= two_power(self.vlen + sfloat.round_nearest)
v = to_trunc * floatingpoint.Inv(pow_delta)
comparison.Trunc(t, v, 2 * self.vlen + 1 + sfloat.round_nearest,
self.vlen - 1, self.kappa, False)
v = t
u = floatingpoint.BitDec(v, self.vlen + 2 + sfloat.round_nearest,
self.vlen + 2 + sfloat.round_nearest, self.kappa,
list(range(1 + sfloat.round_nearest,
self.vlen + 2 + sfloat.round_nearest)))
# using u[0] doesn't seem necessary
h = floatingpoint.PreOR(u[:sfloat.round_nearest:-1], self.kappa)
p0 = self.vlen + 1 - sum(h)
pow_p0 = 1 + sum([two_power(i) * (1 - h[i]) for i in range(len(h))])
if self.round_nearest:
t2, overflow = \
floatingpoint.TruncRoundNearestAdjustOverflow(pow_p0 * v,
self.vlen + 3,
self.vlen,
self.kappa)
p0 = p0 - overflow
else:
comparison.Trunc(t2, pow_p0 * v, self.vlen + 2, 2, self.kappa, False)
v = t2
p = pmax - p0 + 1
zz = self.z*other.z
zprod = 1 - self.z - other.z + zz
v = zprod*t2 + self.z*v2 + other.z*v1
z = floatingpoint.EQZ(v, self.vlen, self.kappa)
p = (zprod*p + self.z*p2 + other.z*p1)*(1 - z)
s = (1 - b)*(a*other.s + aneg*self.s) + b*(c*other.s + cneg*self.s)
s = zprod*s + (other.z - zz)*self.s + (self.z - zz)*other.s
return sfloat(v, p, z, s)
else:
return NotImplemented
@vectorize_max
def mul(self, other):
other = self.conv(other)
if isinstance(other, sfloat):
v1 = sint()
v2 = sint()
b = sint()
c2expl = cint()
comparison.ld2i(c2expl, self.vlen)
if sfloat.round_nearest:
v1 = comparison.TruncRoundNearest(self.v*other.v, 2*self.vlen,
self.vlen-1, self.kappa)
else:
comparison.Trunc(v1, self.v*other.v, 2*self.vlen, self.vlen-1, self.kappa, False)
t = v1 - c2expl
comparison.LTZ(b, t, self.vlen+1, self.kappa)
comparison.Trunc(v2, b*v1 + v1, self.vlen+1, 1, self.kappa, False)
z1, z2, s1, s2, p1, p2 = (x.expand_to_vector() for x in \
(self.z, other.z, self.s, other.s,
self.p, other.p))
z = z1 + z2 - self.z*other.z
s = s1 + s2 - self.s*other.s*2
p = (p1 + p2 - b + self.vlen)*(1 - z)
return sfloat(v2, p, z, s)
else:
return NotImplemented
def __sub__(self, other):
return self + -other
def __rsub__(self, other):
return -self + other
def __truediv__(self, other):
other = self.conv(other)
v = floatingpoint.SDiv(self.v, other.v + other.z * (2**self.vlen - 1),
self.vlen, self.kappa, self.round_nearest)
b = v.less_than(two_power(self.vlen-1), self.vlen + 1, self.kappa)
overflow = v.greater_equal(two_power(self.vlen), self.vlen + 1, self.kappa)
underflow = v.less_than(two_power(self.vlen-2), self.vlen + 1, self.kappa)
v = (v + b * v) * (1 - overflow) * (1 - underflow) + \
overflow * (2**self.vlen - 1) + \
underflow * (2**(self.vlen-1)) * (1 - self.z)
p = (1 - self.z) * (self.p - other.p - self.vlen - b + 1)
z = self.z
s = self.s + other.s - 2 * self.s * other.s
sfloat.set_error(other.z)
return sfloat(v, p, z, s)
def __rtruediv__(self, other):
return self.conv(other) / self
@vectorize
def __neg__(self):
return sfloat(self.v, self.p, self.z, (1 - self.s) * (1 - self.z))
@vectorize
def __lt__(self, other):
other = self.conv(other)
if isinstance(other, sfloat):
z1 = self.z
z2 = other.z
s1 = self.s
s2 = other.s
a = self.p.less_than(other.p, self.plen, self.kappa)
c = floatingpoint.EQZ(self.p - other.p, self.plen, self.kappa)
d = ((1 - 2*self.s)*self.v).less_than((1 - 2*other.s)*other.v, self.vlen + 1, self.kappa)
cd = c*d
ca = c*a
b1 = cd + a - ca
b2 = cd + 1 + ca - c - a
s12 = self.s*other.s
z12 = self.z*other.z
b = (z1 - z12)*(1 - s2) + (z2 - z12)*s1 + (1 + z12 - z1 - z2)*(s1 - s12 + (1 + s12 - s1 - s2)*b1 + s12*b2)
return b
else:
return NotImplemented
def __ge__(self, other):
return 1 - (self < other)
def __gt__(self, other):
return self.conv(other) < self
def __le__(self, other):
return self.conv(other) >= self
@vectorize
def __eq__(self, other):
other = self.conv(other)
both_zero = self.z * other.z
return floatingpoint.EQZ(self.v - other.v, self.vlen, self.kappa) * \
floatingpoint.EQZ(self.p - other.p, self.plen, self.kappa) * \
(1 - self.s - other.s + 2 * self.s * other.s) * \
(1 - both_zero) + both_zero
def __ne__(self, other):
return 1 - (self == other)
def log2(self):
up = self.v.greater_than(1 << (self.vlen - 1), self.vlen, self.kappa)
return self.p + self.vlen - 1 + up
def round_to_int(self):
direction = self.p.greater_equal(-self.vlen, self.plen, self.kappa)
right = self.v.right_shift(-self.p - 1, self.vlen + 1, self.kappa)
up = right.mod2m(1, self.vlen + 1, self.kappa)
right = right.right_shift(1, self.vlen + 1, self.kappa) + up
abs_value = direction * right
return self.s.if_else(-abs_value, abs_value)
def value(self):
return (1 - 2*self.s.value)*(1 - self.z.value)*self.v.value/float(2**self.p.value)
def reveal(self):
return cfloat(self.v.reveal(), self.p.reveal(), self.z.reveal(), self.s.reveal())
class cfloat(object):
__slots__ = ['v', 'p', 'z', 's']
def __init__(self, v, p, z, s):
self.v, self.p, self.z, self.s = [cint.conv(x) for x in (v, p, z, s)]
def print_float_plain(self):
print_float_plain(self.v, self.p, self.z, self.s)
sfix.float_type = sfloat
_types = {
'c': cint,
's': sint,
'sg': sgf2n,
'cg': cgf2n,
'ci': regint,
}
def _get_type(t):
if t in _types:
return _types[t]
else:
return t
class Array(object):
@classmethod
def create_from(cls, l):
if isinstance(l, cls):
return l
tmp = list(l)
res = cls(len(tmp), type(tmp[0]))
res.assign(tmp)
return res
def __init__(self, length, value_type, address=None, debug=None):
value_type = _get_type(value_type)
self.address = address
self.length = length
self.value_type = value_type
if address is None:
self.address = self._malloc()
self.address_cache = {}
self.debug = debug
def _malloc(self):
return self.value_type.malloc(self.length)
def delete(self):
if program:
program.free(self.address, self.value_type.reg_type)
def get_address(self, index):
key = str(index)
if isinstance(index, int) and self.length is not None:
index += self.length * (index < 0)
if index >= self.length or index < 0:
raise IndexError('index %s, length %s' % \
(str(index), str(self.length)))
if (program.curr_block, key) not in self.address_cache:
n = self.value_type.n_elements()
length = self.length
if n == 1:
length = 0
self.address_cache[program.curr_block, key] = \
util.untuplify([self.address + index + i * length \
for i in range(n)])
if self.debug:
library.print_ln_if(index >= self.length, 'OF:' + self.debug)
library.print_ln_if(self.address_cache[program.curr_block, key] >= program.allocated_mem[self.value_type.reg_type], 'AOF:' + self.debug)
return self.address_cache[program.curr_block, key]
def get_slice(self, index):
if index.stop is None and self.length is None:
raise CompilerError('Cannot slice array of unknown length')
return index.start or 0, index.stop or self.length, index.step or 1
def __getitem__(self, index):
if isinstance(index, slice):
start, stop, step = self.get_slice(index)
res_length = (stop - start - 1) // step + 1
res = Array(res_length, self.value_type)
@library.for_range(res_length)
def f(i):
res[i] = self[start+i*step]
return res
return self._load(self.get_address(index))
def __setitem__(self, index, value):
if isinstance(index, slice):
start, stop, step = self.get_slice(index)
value = Array.create_from(value)
source_index = MemValue(0)
@library.for_range(start, stop, step)
def f(i):
self[i] = value[source_index]
source_index.iadd(1)
return
self._store(value, self.get_address(index))
def get_range(self, start, size):
return [self[start + i] for i in range(size)]
def set_range(self, start, values):
for i, value in enumerate(values):
self[start + i] = value
def _load(self, address):
return self.value_type.load_mem(address)
def _store(self, value, address):
self.value_type.conv(value).store_in_mem(address)
def __len__(self):
return self.length
def __iter__(self):
for i in range(self.length):
yield self[i]
def same_shape(self):
return Array(self.length, self.value_type)
def assign(self, other, base=0):
try:
other = other.get_vector()
except:
pass
try:
other.store_in_mem(self.get_address(base))
assert len(self) >= other.size + base
except AttributeError:
for i,j in enumerate(other):
self[i] = j
return self
def assign_all(self, value, use_threads=True, conv=True):
if conv:
value = self.value_type.conv(value)
mem_value = MemValue(value)
n_threads = 8 if use_threads and len(self) > 2**20 else 1
@library.for_range_multithread(n_threads, 1024, len(self))
def f(i):
self[i] = mem_value
return self
def get_vector(self, base=0, size=None):
size = size or self.length
return self.value_type.load_mem(self.get_address(base), size=size)
def get_mem_value(self, index):
return MemValue(self[index], self.get_address(index))
def input_from(self, player, budget=None):
self.assign(self.value_type.get_input_from(player, size=len(self)))
def __add__(self, other):
if other is 0:
return self
assert len(self) == len(other)
return self.get_vector() + other
def __sub__(self, other):
assert len(self) == len(other)
return self.get_vector() - other
def __mul__(self, value):
return self.get_vector() * value
def __pow__(self, value):
return self.get_vector() ** value
__radd__ = __add__
__rmul__ = __mul__
def shuffle(self):
@library.for_range(len(self))
def _(i):
j = regint.get_random(64) % (len(self) - i)
tmp = self[i]
self[i] = self[i + j]
self[i + j] = tmp
def reveal(self):
return Array.create_from(x.reveal() for x in self)
sint.dynamic_array = Array
sgf2n.dynamic_array = Array
class SubMultiArray(object):
def __init__(self, sizes, value_type, address, index, debug=None):
self.sizes = sizes
self.value_type = _get_type(value_type)
self.address = address + index * self.total_size()
self.sub_cache = {}
self.debug = debug
if debug:
library.print_ln_if(self.address + reduce(operator.mul, self.sizes) * self.value_type.n_elements() > program.allocated_mem[self.value_type.reg_type], 'AOF%d:' % len(self.sizes) + self.debug)
def __getitem__(self, index):
if util.is_constant(index) and index >= self.sizes[0]:
raise StopIteration
key = program.curr_block, str(index)
if key not in self.sub_cache:
if self.debug:
library.print_ln_if(index >= self.sizes[0], \
'OF%d:' % len(self.sizes) + self.debug)
if len(self.sizes) == 2:
self.sub_cache[key] = \
Array(self.sizes[1], self.value_type, \
self.address + index * self.sizes[1] *
self.value_type.n_elements(), \
debug=self.debug)
else:
self.sub_cache[key] = \
SubMultiArray(self.sizes[1:], self.value_type, \
self.address, index, debug=self.debug)
return self.sub_cache[key]
def __setitem__(self, index, other):
self[index].assign(other)
def __len__(self):
return self.sizes[0]
def assign_all(self, value):
@library.for_range(self.sizes[0])
def f(i):
self[i].assign_all(value)
return self
def total_size(self):
return reduce(operator.mul, self.sizes) * self.value_type.n_elements()
def get_vector(self, base=0, size=None):
assert self.value_type.n_elements() == 1
size = size or self.total_size()
return self.value_type.load_mem(self.address + base, size=size)
def assign_vector(self, vector, base=0):
assert self.value_type.n_elements() == 1
assert vector.size <= self.total_size()
vector.store_in_mem(self.address + base)
def assign(self, other):
if self.value_type.n_elements() > 1:
assert self.sizes == other.sizes
self.assign_vector(other.get_vector())
def same_shape(self):
return MultiArray(self.sizes, self.value_type)
def input_from(self, player, budget=None):
@library.for_range_opt(self.sizes[0], budget=budget)
def _(i):
self[i].input_from(player, budget=budget)
def schur(self, other):
assert self.sizes == other.sizes
if len(self.sizes) == 2:
res = Matrix(self.sizes[0], self.sizes[1], self.value_type)
else:
res = MultiArray(self.sizes, self.value_type)
res.assign_vector(self.get_vector() * other.get_vector())
return res
def __add__(self, other):
if other is 0:
return self
assert self.sizes == other.sizes
if len(self.sizes) == 2:
res = Matrix(self.sizes[0], self.sizes[1], self.value_type)
else:
res = MultiArray(self.sizes, self.value_type)
res.assign_vector(self.get_vector() + other.get_vector())
return res
__radd__ = __add__
def iadd(self, other):
assert self.sizes == other.sizes
self.assign_vector(self.get_vector() + other.get_vector())
def __mul__(self, other):
return self.mul(other)
def mul(self, other, res_params=None):
assert len(self.sizes) == 2
if isinstance(other, Array):
assert len(other) == self.sizes[1]
if self.value_type.n_elements() == 1:
matrix = Matrix(len(other), 1, other.value_type, \
address=other.address)
res = self * matrix
return Array(res.sizes[0], res.value_type, address=res.address)
else:
matrix = Matrix(len(other), 1, other.value_type)
for i, x in enumerate(other):
matrix[i][0] = x
res = self * matrix
return Array.create_from(x[0] for x in res)
elif isinstance(other, SubMultiArray):
assert len(other.sizes) == 2
assert other.sizes[0] == self.sizes[1]
if res_params is not None:
class t(self.value_type):
pass
t.params = res_params
else:
t = self.value_type
res_matrix = Matrix(self.sizes[0], other.sizes[1], t)
try:
if max(res_matrix.sizes) > 1000:
raise AttributeError()
A = self.get_vector()
B = other.get_vector()
res_matrix.assign_vector(
self.value_type.matrix_mul(A, B, self.sizes[1],
res_params))
except (AttributeError, AssertionError):
@library.for_range_opt(self.sizes[0])
def _(i):
try:
res_matrix[i] = self.value_type.row_matrix_mul(
self[i], other, res_params)
except AttributeError:
@library.for_range(other.sizes[1])
def _(j):
res_matrix[i][j] = 0
@library.for_range(self.sizes[1])
def _(k):
res_matrix[i][j] += self[i][k] * other[k][j]
return res_matrix
else:
raise NotImplementedError
def budget_mul(self, other, n_rows, row, n_columns, column, reduce=True,
res=None):
assert len(self.sizes) == 2
assert len(other.sizes) == 2
if res is None:
if reduce:
res_matrix = Matrix(n_rows, n_columns, self.value_type)
else:
res_matrix = Matrix(n_rows, n_columns, \
self.value_type.unreduced_type)
else:
res_matrix = res
@library.for_range_opt(n_rows)
def _(i):
@library.for_range_opt(n_columns)
def _(j):
col = column(other, j)
r = row(self, i)
if reduce:
res_matrix[i][j] = self.value_type.dot_product(r, col)
else:
entry = self.value_type.unreduced_dot_product(r, col)
res_matrix[i][j] = entry
return res_matrix
def plain_mul(self, other, res=None):
assert other.sizes[0] == self.sizes[1]
return self.budget_mul(other, self.sizes[0], lambda x, i: x[i], \
other.sizes[1], \
lambda x, j: [x[k][j] for k in range(len(x))],
res=res)
def mul_trans(self, other):
assert other.sizes[1] == self.sizes[1]
return self.budget_mul(other, self.sizes[0], lambda x, i: x[i], \
other.sizes[0], lambda x, j: x[j])
def trans_mul(self, other, reduce=True, res=None):
assert other.sizes[0] == self.sizes[0]
return self.budget_mul(other, self.sizes[1], \
lambda x, j: [x[k][j] for k in range(len(x))], \
other.sizes[1], \
lambda x, j: [x[k][j] for k in range(len(x))],
reduce=reduce, res=res)
def transpose(self):
assert len(self.sizes) == 2
res = Matrix(self.sizes[1], self.sizes[0], self.value_type)
@library.for_range_opt(self.sizes[1])
def _(i):
@library.for_range_opt(self.sizes[0])
def _(j):
res[i][j] = self[j][i]
return res
class MultiArray(SubMultiArray):
def __init__(self, sizes, value_type, debug=None, address=None):
if isinstance(address, Array):
self.array = address
else:
self.array = Array(reduce(operator.mul, sizes), \
value_type, address=address)
SubMultiArray.__init__(self, sizes, value_type, self.array.address, 0, \
debug=debug)
if len(sizes) < 2:
raise CompilerError('Use Array')
class Matrix(MultiArray):
def __init__(self, rows, columns, value_type, debug=None, address=None):
MultiArray.__init__(self, [rows, columns], value_type, debug=debug, \
address=address)
class VectorArray(object):
def __init__(self, length, value_type, vector_size, address=None):
self.array = Array(length * vector_size, value_type, address)
self.vector_size = vector_size
self.value_type = value_type
def __getitem__(self, index):
return self.value_type.load_mem(self.array.address + \
index * self.vector_size,
size=self.vector_size)
def __setitem__(self, index, value):
if value.size != self.vector_size:
raise CompilerError('vector size mismatch')
value.store_in_mem(self.array.address + index * self.vector_size)
class _mem(_number):
__add__ = lambda self,other: self.read() + other
__sub__ = lambda self,other: self.read() - other
__mul__ = lambda self,other: self.read() * other
__truediv__ = lambda self,other: self.read() / other
__mod__ = lambda self,other: self.read() % other
__pow__ = lambda self,other: self.read() ** other
__neg__ = lambda self,other: -self.read()
__lt__ = lambda self,other: self.read() < other
__gt__ = lambda self,other: self.read() > other
__le__ = lambda self,other: self.read() <= other
__ge__ = lambda self,other: self.read() >= other
__eq__ = lambda self,other: self.read() == other
__ne__ = lambda self,other: self.read() != other
__and__ = lambda self,other: self.read() & other
__xor__ = lambda self,other: self.read() ^ other
__or__ = lambda self,other: self.read() | other
__lshift__ = lambda self,other: self.read() << other
__rshift__ = lambda self,other: self.read() >> other
__radd__ = lambda self,other: other + self.read()
__rsub__ = lambda self,other: other - self.read()
__rmul__ = lambda self,other: other * self.read()
__rtruediv__ = lambda self,other: other / self.read()
__rmod__ = lambda self,other: other % self.read()
__rand__ = lambda self,other: other & self.read()
__rxor__ = lambda self,other: other ^ self.read()
__ror__ = lambda self,other: other | self.read()
__iadd__ = lambda self,other: self.write(self.read() + other)
__isub__ = lambda self,other: self.write(self.read() - other)
__imul__ = lambda self,other: self.write(self.read() * other)
__idiv__ = lambda self,other: self.write(self.read() / other)
__imod__ = lambda self,other: self.write(self.read() % other)
__ipow__ = lambda self,other: self.write(self.read() ** other)
__iand__ = lambda self,other: self.write(self.read() & other)
__ixor__ = lambda self,other: self.write(self.read() ^ other)
__ior__ = lambda self,other: self.write(self.read() | other)
__ilshift__ = lambda self,other: self.write(self.read() << other)
__irshift__ = lambda self,other: self.write(self.read() >> other)
iadd = __iadd__
isub = __isub__
imul = __imul__
idiv = __idiv__
imod = __imod__
ipow = __ipow__
iand = __iand__
ixor = __ixor__
ior = __ior__
ilshift = __ilshift__
irshift = __irshift__
store_in_mem = lambda self,address: self.read().store_in_mem(address)
class MemValue(_mem):
__slots__ = ['last_write_block', 'reg_type', 'register', 'address', 'deleted']
@classmethod
def if_necessary(cls, value):
if util.is_constant_float(value):
return value
else:
return cls(value)
def __init__(self, value, address=None):
self.last_write_block = None
if isinstance(value, int):
self.value_type = regint
value = regint(value)
elif isinstance(value, MemValue):
self.value_type = value.value_type
else:
self.value_type = type(value)
self.deleted = False
if address is None:
self.address = self.value_type.malloc(1)
self.write(value)
else:
self.address = address
def delete(self):
self.value_type.free(self.address)
self.deleted = True
def check(self):
if self.deleted:
raise CompilerError('MemValue deleted')
def read(self):
self.check()
if program.curr_block != self.last_write_block:
self.register = library.load_mem(self.address, self.value_type)
self.last_write_block = program.curr_block
return self.register
def write(self, value):
self.check()
if isinstance(value, MemValue):
self.register = value.read()
elif isinstance(value, int):
self.register = self.value_type(value)
else:
self.register = value
if not isinstance(self.register, self.value_type):
raise CompilerError('Mismatch in register type, cannot write \
%s to %s' % (type(self.register), self.value_type))
self.register.store_in_mem(self.address)
self.last_write_block = program.curr_block
return self
def reveal(self):
return self.read().reveal()
less_than = lambda self,other,bit_length=None,security=None: \
self.read().less_than(other,bit_length,security)
greater_than = lambda self,other,bit_length=None,security=None: \
self.read().greater_than(other,bit_length,security)
less_equal = lambda self,other,bit_length=None,security=None: \
self.read().less_equal(other,bit_length,security)
greater_equal = lambda self,other,bit_length=None,security=None: \
self.read().greater_equal(other,bit_length,security)
equal = lambda self,other,bit_length=None,security=None: \
self.read().equal(other,bit_length,security)
not_equal = lambda self,other,bit_length=None,security=None: \
self.read().not_equal(other,bit_length,security)
pow2 = lambda self,*args,**kwargs: self.read().pow2(*args, **kwargs)
mod2m = lambda self,*args,**kwargs: self.read().mod2m(*args, **kwargs)
right_shift = lambda self,*args,**kwargs: self.read().right_shift(*args, **kwargs)
bit_decompose = lambda self,*args,**kwargs: self.read().bit_decompose(*args, **kwargs)
if_else = lambda self,*args,**kwargs: self.read().if_else(*args, **kwargs)
expand_to_vector = lambda self,*args,**kwargs: \
self.read().expand_to_vector(*args, **kwargs)
def __repr__(self):
return 'MemValue(%s,%d)' % (self.value_type, self.address)
class MemFloat(_mem):
def __init__(self, *args):
value = sfloat(*args)
self.v = MemValue(value.v)
self.p = MemValue(value.p)
self.z = MemValue(value.z)
self.s = MemValue(value.s)
def write(self, *args):
value = sfloat(*args)
self.v.write(value.v)
self.p.write(value.p)
self.z.write(value.z)
self.s.write(value.s)
def read(self):
return sfloat(self.v, self.p, self.z, self.s)
class MemFix(_mem):
def __init__(self, *args):
arg_type = type(*args)
if arg_type == sfix:
value = sfix(*args)
elif arg_type == cfix:
value = cfix(*args)
else:
raise CompilerError('MemFix init argument error')
self.reg_type = value.v.reg_type
self.v = MemValue(value.v)
def write(self, *args):
value = sfix(*args)
self.v.write(value.v)
def reveal(self):
return cfix(self.v.reveal())
def read(self):
val = self.v.read()
if isinstance(val, sint):
return sfix(val)
else:
return cfix(val)
def getNamedTupleType(*names):
class NamedTuple(object):
class NamedTupleArray(object):
def __init__(self, size, t):
from . import types
self.arrays = [types.Array(size, t) for i in range(len(names))]
def __getitem__(self, index):
return NamedTuple(array[index] for array in self.arrays)
def __setitem__(self, index, item):
for array,value in zip(self.arrays, item):
array[index] = value
@classmethod
def get_array(cls, size, t):
return cls.NamedTupleArray(size, t)
def __init__(self, *args):
if len(args) == 1:
args = args[0]
for name, value in zip(names, args):
self.__dict__[name] = value
def __iter__(self):
for name in names:
yield self.__dict__[name]
def __add__(self, other):
return NamedTuple(i + j for i,j in zip(self, other))
def __sub__(self, other):
return NamedTuple(i - j for i,j in zip(self, other))
def __xor__(self, other):
return NamedTuple(i ^ j for i,j in zip(self, other))
def __mul__(self, other):
return NamedTuple(other * i for i in self)
__rmul__ = __mul__
__rxor__ = __xor__
def reveal(self):
return self.__type__(x.reveal() for x in self)
return NamedTuple
from . import library
| true | true |
f72692b82fed413b8a80f995369aec1ebc838715 | 668 | py | Python | onlineassessmentsystem/blog/migrations/0001_initial.py | nevilparmar11/SDP_Online_Assessment_System | 012a1ec7dfca2973a5e03b0f970394cfa674b61e | [
"MIT"
] | 2 | 2021-05-22T15:44:19.000Z | 2021-05-22T17:59:58.000Z | onlineassessmentsystem/blog/migrations/0001_initial.py | jwalit21/SDP_Online_Assessment_System | a778a0e0ae264fe74037a5f0b210d205ebc18d98 | [
"MIT"
] | 7 | 2021-01-18T06:06:38.000Z | 2021-03-03T15:09:17.000Z | onlineassessmentsystem/blog/migrations/0001_initial.py | jwalit21/SDP_Online_Assessment_System | a778a0e0ae264fe74037a5f0b210d205ebc18d98 | [
"MIT"
] | 2 | 2021-03-05T12:28:28.000Z | 2021-05-24T16:10:07.000Z | # Generated by Django 3.1.5 on 2021-01-09 16:06
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Blog',
fields=[
('blogId', models.AutoField(primary_key=True, serialize=False)),
('title', models.CharField(default='DEFAULT-BLOG', max_length=50)),
('description', models.CharField(default='Default Blog description', max_length=1000)),
('attachmentPath', models.FileField(max_length=254, upload_to='blogs/')),
],
),
]
| 27.833333 | 103 | 0.58982 |
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Blog',
fields=[
('blogId', models.AutoField(primary_key=True, serialize=False)),
('title', models.CharField(default='DEFAULT-BLOG', max_length=50)),
('description', models.CharField(default='Default Blog description', max_length=1000)),
('attachmentPath', models.FileField(max_length=254, upload_to='blogs/')),
],
),
]
| true | true |
f72693b15cffa1a04a5fc7222fdd4715cbead461 | 716 | py | Python | tests/test_mag_orms.py | nestauk/ai_research | 19fd193b098dc68706b945e959fad29c4bfed781 | [
"MIT"
] | 3 | 2020-02-24T19:25:39.000Z | 2021-06-29T10:38:29.000Z | tests/test_mag_orms.py | nestauk/ai_research | 19fd193b098dc68706b945e959fad29c4bfed781 | [
"MIT"
] | 19 | 2020-02-24T07:48:52.000Z | 2020-12-21T10:50:14.000Z | tests/test_mag_orms.py | nestauk/ai_research | 19fd193b098dc68706b945e959fad29c4bfed781 | [
"MIT"
] | null | null | null | import pytest
import unittest
import os
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine
from ai_research.mag.mag_orm import Base
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv())
class TestMag(unittest.TestCase):
"""Check that the MAG ORM works as expected"""
engine = create_engine(os.getenv("test_postgresdb"))
Session = sessionmaker(engine)
def setUp(self):
"""Create the temporary table"""
Base.metadata.create_all(self.engine)
def tearDown(self):
"""Drop the temporary table"""
Base.metadata.drop_all(self.engine)
def test_build(self):
pass
if __name__ == "__main__":
unittest.main()
| 22.375 | 56 | 0.710894 | import pytest
import unittest
import os
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine
from ai_research.mag.mag_orm import Base
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv())
class TestMag(unittest.TestCase):
engine = create_engine(os.getenv("test_postgresdb"))
Session = sessionmaker(engine)
def setUp(self):
Base.metadata.create_all(self.engine)
def tearDown(self):
Base.metadata.drop_all(self.engine)
def test_build(self):
pass
if __name__ == "__main__":
unittest.main()
| true | true |
f72693b16d34b944f5bb4a1349f76575267e7ffa | 1,136 | py | Python | examples/hacker_news/hacker_news/resources/s3_notebook_io_manager.py | kstennettlull/dagster | dd6f57e170ff03bf145f1dd1417e0b2c3156b1d6 | [
"Apache-2.0"
] | null | null | null | examples/hacker_news/hacker_news/resources/s3_notebook_io_manager.py | kstennettlull/dagster | dd6f57e170ff03bf145f1dd1417e0b2c3156b1d6 | [
"Apache-2.0"
] | null | null | null | examples/hacker_news/hacker_news/resources/s3_notebook_io_manager.py | kstennettlull/dagster | dd6f57e170ff03bf145f1dd1417e0b2c3156b1d6 | [
"Apache-2.0"
] | null | null | null | from dagstermill.io_managers import OutputNotebookIOManager
from dagster import io_manager
from .fixed_s3_pickle_io_manager import s3_client
class S3OutputNotebookIOManager(OutputNotebookIOManager):
"""Defines an IOManager that will store dagstermill output notebooks on s3"""
def _get_key(self, context) -> str:
return "notebooks/" + "_".join(context.get_run_scoped_output_identifier())
def load_input(self, context) -> bytes:
key = self._get_key(context.upstream_output)
bucket = context.resources.s3_bucket
context.log.info("loading from: s3_bucket[%s], s3_key[%s]", bucket, key)
return s3_client().get_object(Bucket=bucket, Key=key)["Body"].read()
def handle_output(self, context, obj: bytes):
key = self._get_key(context)
bucket = context.resources.s3_bucket
context.log.info("storing to: s3_bucket[%s], s3_key[%s]", bucket, key)
s3_client().put_object(Bucket=bucket, Key=key, Body=obj)
@io_manager(required_resource_keys={"s3_bucket"})
def s3_notebook_io_manager(_) -> OutputNotebookIOManager:
return S3OutputNotebookIOManager()
| 37.866667 | 82 | 0.727113 | from dagstermill.io_managers import OutputNotebookIOManager
from dagster import io_manager
from .fixed_s3_pickle_io_manager import s3_client
class S3OutputNotebookIOManager(OutputNotebookIOManager):
def _get_key(self, context) -> str:
return "notebooks/" + "_".join(context.get_run_scoped_output_identifier())
def load_input(self, context) -> bytes:
key = self._get_key(context.upstream_output)
bucket = context.resources.s3_bucket
context.log.info("loading from: s3_bucket[%s], s3_key[%s]", bucket, key)
return s3_client().get_object(Bucket=bucket, Key=key)["Body"].read()
def handle_output(self, context, obj: bytes):
key = self._get_key(context)
bucket = context.resources.s3_bucket
context.log.info("storing to: s3_bucket[%s], s3_key[%s]", bucket, key)
s3_client().put_object(Bucket=bucket, Key=key, Body=obj)
@io_manager(required_resource_keys={"s3_bucket"})
def s3_notebook_io_manager(_) -> OutputNotebookIOManager:
return S3OutputNotebookIOManager()
| true | true |
f72695448dd7c1288748bbad417bedf5678eb8a3 | 3,495 | py | Python | src/wpusher/vdesk.py | brenttaylor/WindowPusher | d6ecb9aa1ad69e954cba5632ee56fd6f6c1f8c06 | [
"BSD-3-Clause"
] | null | null | null | src/wpusher/vdesk.py | brenttaylor/WindowPusher | d6ecb9aa1ad69e954cba5632ee56fd6f6c1f8c06 | [
"BSD-3-Clause"
] | null | null | null | src/wpusher/vdesk.py | brenttaylor/WindowPusher | d6ecb9aa1ad69e954cba5632ee56fd6f6c1f8c06 | [
"BSD-3-Clause"
] | null | null | null | import user32
import win32con
import ctypes
import collections
class VirtualDesktopException(Exception):
pass
class NoForegroundWindow(VirtualDesktopException):
pass
class VirtualDesktop(object):
def __init__(self):
self.window = []
self.removed_windows = []
def remove_foreground_window(self):
foreground_window = user32.GetForegroundWindow()
if user32.IsWindowVisible(foreground_window):
self.removed_windows.append(foreground_window)
user32.ShowWindow(foreground_window, win32con.SW_HIDE)
return foreground_window
raise NoForegroundWindow("This Desktop is empty of windows.")
def add_window(self, window):
self.window.append(window)
def show(self):
self.removed_windows = []
for Window in self.window:
user32.ShowWindow(Window, win32con.SW_SHOW)
if len(self.window) > 0:
user32.SetForegroundWindow(self.window[-1])
def hide(self):
self.window = []
def enum_windows_proc(hWnd, lParam):
if not hWnd: return True
if not user32.IsWindowVisible(hWnd): return True
# Get Window Title
length = user32.SendMessage(hWnd, win32con.WM_GETTEXTLENGTH, 0, 0)
buffer = ctypes.create_unicode_buffer(length + 1)
if not user32.SendMessage(hWnd, win32con.WM_GETTEXT, length + 1, ctypes.byref(buffer)):
return True
if buffer.value != "Program Manager":
if not (hWnd in self.removed_windows):
if hWnd == user32.GetForegroundWindow():
self.window.append(hWnd)
else:
self.window.insert(0, hWnd)
user32.ShowWindow(hWnd, win32con.SW_HIDE)
return True
user32.EnumWindows(enum_windows_proc, 0)
def __del__(self):
self.show()
class DesktopManager(object):
__Previous = 1
__Next = -1
def __init__(self, desktop_count=4):
self.Desktops = collections.deque([VirtualDesktop() for x in xrange(desktop_count)])
self.Index = collections.deque(range(desktop_count))
def _move(self, direction):
self.Desktops.rotate(direction)
self.Index.rotate(direction)
def _display_desktop(self, direction):
self.Desktops[0].hide()
self._move(direction)
self.Desktops[0].show()
def _move_window_to(self, direction, HideWindow=True):
foreground_window = self.Desktops[0].remove_foreground_window()
self._move(direction)
self.Desktops[0].add_window(foreground_window)
self._move(-direction)
def display_next(self):
self._display_desktop(self.__Next)
def display_previous(self):
self._display_desktop(self.__Previous)
def move_window_to_next_desktop(self):
self._move_window_to(self.__Next)
def move_window_to_previous_desktop(self):
self._move_window_to(self.__Previous)
def move_window_to_next_desktop_and_display(self):
self._move_window_to(self.__Next)
self._display_desktop(self.__Next)
def move_window_to_previous_desktop_and_display(self):
self._move_window_to(self.__Previous)
self._display_desktop(self.__Previous)
def get_current_desktop_number(self):
return self.Index[0]
def show_all_windows(self):
[Desktop.show() for Desktop in self.Desktops]
| 29.871795 | 99 | 0.654649 | import user32
import win32con
import ctypes
import collections
class VirtualDesktopException(Exception):
pass
class NoForegroundWindow(VirtualDesktopException):
pass
class VirtualDesktop(object):
def __init__(self):
self.window = []
self.removed_windows = []
def remove_foreground_window(self):
foreground_window = user32.GetForegroundWindow()
if user32.IsWindowVisible(foreground_window):
self.removed_windows.append(foreground_window)
user32.ShowWindow(foreground_window, win32con.SW_HIDE)
return foreground_window
raise NoForegroundWindow("This Desktop is empty of windows.")
def add_window(self, window):
self.window.append(window)
def show(self):
self.removed_windows = []
for Window in self.window:
user32.ShowWindow(Window, win32con.SW_SHOW)
if len(self.window) > 0:
user32.SetForegroundWindow(self.window[-1])
def hide(self):
self.window = []
def enum_windows_proc(hWnd, lParam):
if not hWnd: return True
if not user32.IsWindowVisible(hWnd): return True
length = user32.SendMessage(hWnd, win32con.WM_GETTEXTLENGTH, 0, 0)
buffer = ctypes.create_unicode_buffer(length + 1)
if not user32.SendMessage(hWnd, win32con.WM_GETTEXT, length + 1, ctypes.byref(buffer)):
return True
if buffer.value != "Program Manager":
if not (hWnd in self.removed_windows):
if hWnd == user32.GetForegroundWindow():
self.window.append(hWnd)
else:
self.window.insert(0, hWnd)
user32.ShowWindow(hWnd, win32con.SW_HIDE)
return True
user32.EnumWindows(enum_windows_proc, 0)
def __del__(self):
self.show()
class DesktopManager(object):
__Previous = 1
__Next = -1
def __init__(self, desktop_count=4):
self.Desktops = collections.deque([VirtualDesktop() for x in xrange(desktop_count)])
self.Index = collections.deque(range(desktop_count))
def _move(self, direction):
self.Desktops.rotate(direction)
self.Index.rotate(direction)
def _display_desktop(self, direction):
self.Desktops[0].hide()
self._move(direction)
self.Desktops[0].show()
def _move_window_to(self, direction, HideWindow=True):
foreground_window = self.Desktops[0].remove_foreground_window()
self._move(direction)
self.Desktops[0].add_window(foreground_window)
self._move(-direction)
def display_next(self):
self._display_desktop(self.__Next)
def display_previous(self):
self._display_desktop(self.__Previous)
def move_window_to_next_desktop(self):
self._move_window_to(self.__Next)
def move_window_to_previous_desktop(self):
self._move_window_to(self.__Previous)
def move_window_to_next_desktop_and_display(self):
self._move_window_to(self.__Next)
self._display_desktop(self.__Next)
def move_window_to_previous_desktop_and_display(self):
self._move_window_to(self.__Previous)
self._display_desktop(self.__Previous)
def get_current_desktop_number(self):
return self.Index[0]
def show_all_windows(self):
[Desktop.show() for Desktop in self.Desktops]
| true | true |
f7269554e70b05b444508f2c600f6f0487716659 | 2,187 | py | Python | src/jobhunt_prod/scrape/multiprocess_simply.py | smiller20/CareerCentral | 455df0910dff1a1883fd56365a7a4feeb7726b22 | [
"MIT"
] | null | null | null | src/jobhunt_prod/scrape/multiprocess_simply.py | smiller20/CareerCentral | 455df0910dff1a1883fd56365a7a4feeb7726b22 | [
"MIT"
] | null | null | null | src/jobhunt_prod/scrape/multiprocess_simply.py | smiller20/CareerCentral | 455df0910dff1a1883fd56365a7a4feeb7726b22 | [
"MIT"
] | 1 | 2020-12-04T22:57:24.000Z | 2020-12-04T22:57:24.000Z | """
simply hired using multi process design
scrape through 11 pages of simply using a multi processing for each I/O (4x faster)
"""
from requests import get
from bs4 import BeautifulSoup
from threading import Thread
import multiprocessing
from os import getpid
import psutil
def get_simply(url, role ):
alldata={}
response = get(url, headers={'User-Agent': 'Mozilla/5.0'})
try:
soup = BeautifulSoup(response.text, 'html.parser')
content_container= soup.find_all('div', {'class': ['SerpJob-jobCard']})
link= 'https://www.simplyhired.com'
except AttributeError:
pass
for content in content_container:
title, href=None , None
try:
title=content.a.text
href=content.a['href']
company=content.span.text
summary=content.p.text
except TypeError:
pass
except AttributeError:
pass
if title is not None and role.upper() in title.upper():
if href is not None:
href=link+href
alldata[href]=[title, company, summary, href]
return alldata
def getrole_simply(role, location):
test_data ={}
if "," in location:
location=location.split(',')
location= location[0].strip()+ "," + location[1].strip()
url_first= 'https://www.simplyhired.com/search?q='+role+'&l='+location
url= 'https://www.simplyhired.com/search?q='+role+'&l='+location + '&pn='
processor_count= multiprocessing.cpu_count() #get cpu count
pool=multiprocessing.Pool(11)
iterable = zip( [ url +str(i) if i != 0 else url_first for i in range(1,30) ], [role for i in range(1,30) ] )
result_pool=pool.starmap( get_simply, iterable)
pool.close()
pool.join()
for i, p in enumerate(result_pool):
for key, value in p.items():
if value not in test_data.values():
test_data[key]= value
return test_data
'''
process = psutil.Process(getpid())
print('total memory usage: ' , process.memory_info().rss , psutil.cpu_percent()) # in bytes
'''
if __name__ == "__main__":
getrole_simply('python', 'new jersey') | 33.646154 | 120 | 0.621856 |
from requests import get
from bs4 import BeautifulSoup
from threading import Thread
import multiprocessing
from os import getpid
import psutil
def get_simply(url, role ):
alldata={}
response = get(url, headers={'User-Agent': 'Mozilla/5.0'})
try:
soup = BeautifulSoup(response.text, 'html.parser')
content_container= soup.find_all('div', {'class': ['SerpJob-jobCard']})
link= 'https://www.simplyhired.com'
except AttributeError:
pass
for content in content_container:
title, href=None , None
try:
title=content.a.text
href=content.a['href']
company=content.span.text
summary=content.p.text
except TypeError:
pass
except AttributeError:
pass
if title is not None and role.upper() in title.upper():
if href is not None:
href=link+href
alldata[href]=[title, company, summary, href]
return alldata
def getrole_simply(role, location):
test_data ={}
if "," in location:
location=location.split(',')
location= location[0].strip()+ "," + location[1].strip()
url_first= 'https://www.simplyhired.com/search?q='+role+'&l='+location
url= 'https://www.simplyhired.com/search?q='+role+'&l='+location + '&pn='
processor_count= multiprocessing.cpu_count()
pool=multiprocessing.Pool(11)
iterable = zip( [ url +str(i) if i != 0 else url_first for i in range(1,30) ], [role for i in range(1,30) ] )
result_pool=pool.starmap( get_simply, iterable)
pool.close()
pool.join()
for i, p in enumerate(result_pool):
for key, value in p.items():
if value not in test_data.values():
test_data[key]= value
return test_data
if __name__ == "__main__":
getrole_simply('python', 'new jersey') | true | true |
f7269559f7d7eaf9efb701f4e1ac759e3c36654f | 502 | py | Python | aardvark/__about__.py | mbaciu-gpsw/aardvark | c2a0797bf3769ba819dcbacd4a80f4e9764d035e | [
"Apache-2.0"
] | null | null | null | aardvark/__about__.py | mbaciu-gpsw/aardvark | c2a0797bf3769ba819dcbacd4a80f4e9764d035e | [
"Apache-2.0"
] | 10 | 2019-07-23T09:03:02.000Z | 2019-10-15T14:53:14.000Z | aardvark/__about__.py | mbaciu-gpsw/aardvark | c2a0797bf3769ba819dcbacd4a80f4e9764d035e | [
"Apache-2.0"
] | 1 | 2022-01-11T13:06:32.000Z | 2022-01-11T13:06:32.000Z | __all__ = [
"__title__", "__summary__", "__uri__", "__version__", "__author__",
"__email__", "__license__", "__copyright__",
]
__title__ = "aardvark"
__summary__ = ("Multi-Account AWS IAM Access Advisor API")
__uri__ = "https://github.com/Netflix-Skunkworks/aardvark"
__version__ = "0.2.1"
__author__ = "Patrick Kelley, Travis McPeak"
__email__ = "pkelley@netflix.com, tmcpeak@netflix.com"
__license__ = "Apache License, Version 2.0"
__copyright__ = "Copyright 2017 {0}".format(__author__)
| 29.529412 | 71 | 0.7251 | __all__ = [
"__title__", "__summary__", "__uri__", "__version__", "__author__",
"__email__", "__license__", "__copyright__",
]
__title__ = "aardvark"
__summary__ = ("Multi-Account AWS IAM Access Advisor API")
__uri__ = "https://github.com/Netflix-Skunkworks/aardvark"
__version__ = "0.2.1"
__author__ = "Patrick Kelley, Travis McPeak"
__email__ = "pkelley@netflix.com, tmcpeak@netflix.com"
__license__ = "Apache License, Version 2.0"
__copyright__ = "Copyright 2017 {0}".format(__author__)
| true | true |
f726956703146c7de18a5d8a95ca243d34616547 | 12,727 | py | Python | pydmd/hankeldmd.py | kathryn-garside/PyDMD-fork | 0158c4144019f0899ce34ec44286b0f700c56b38 | [
"MIT"
] | null | null | null | pydmd/hankeldmd.py | kathryn-garside/PyDMD-fork | 0158c4144019f0899ce34ec44286b0f700c56b38 | [
"MIT"
] | null | null | null | pydmd/hankeldmd.py | kathryn-garside/PyDMD-fork | 0158c4144019f0899ce34ec44286b0f700c56b38 | [
"MIT"
] | null | null | null | """
Derived module from dmdbase.py for hankel dmd.
Reference:
- H. Arbabi, I. Mezic, Ergodic theory, dynamic mode decomposition, and
computation of spectral properties of the Koopman operator. SIAM Journal on
Applied Dynamical Systems, 2017, 16.4: 2096-2126.
"""
from copy import copy
import numpy as np
from .dmdbase import DMDBase
from .dmd import DMD
class HankelDMD(DMDBase):
"""
Hankel Dynamic Mode Decomposition
:param svd_rank: the rank for the truncation; If 0, the method computes the
optimal rank and uses it for truncation; if positive interger, the
method uses the argument for the truncation; if float between 0 and 1,
the rank is the number of the biggest singular values that are needed
to reach the 'energy' specified by `svd_rank`; if -1, the method does
not compute truncation.
:type svd_rank: int or float
:param int tlsq_rank: rank truncation computing Total Least Square. Default
is 0, that means no truncation.
:param bool exact: flag to compute either exact DMD or projected DMD.
Default is False.
:param opt: argument to control the computation of DMD modes amplitudes.
See :class:`DMDBase`. Default is False.
:type opt: bool or int
:param rescale_mode: Scale Atilde as shown in
10.1016/j.jneumeth.2015.10.010 (section 2.4) before computing its
eigendecomposition. None means no rescaling, 'auto' means automatic
rescaling using singular values, otherwise the scaling factors.
:type rescale_mode: {'auto'} or None or numpy.ndarray
:param bool forward_backward: If True, the low-rank operator is computed
like in fbDMD (reference: https://arxiv.org/abs/1507.02264). Default is
False.
:param int d: the new order for spatial dimension of the input snapshots.
Default is 1.
:param sorted_eigs: Sort eigenvalues (and modes/dynamics accordingly) by
magnitude if `sorted_eigs='abs'`, by real part (and then by imaginary
part to break ties) if `sorted_eigs='real'`. Default: False.
:type sorted_eigs: {'real', 'abs'} or False
:param reconstruction_method: Method used to reconstruct the snapshots of
the dynamical system from the multiple versions available due to how
HankelDMD is conceived. If `'first'` (default) the first version
available is selected (i.e. the nearest to the 0-th row in the
augmented matrix). If `'mean'` we compute the element-wise mean. If
`reconstruction_method` is an array of float values we compute the
weighted average (for each snapshots) using the given values as weights
(the number of weights must be equal to `d`).
:type reconstruction_method: {'first', 'mean'} or array-like
"""
def __init__(
self,
svd_rank=0,
tlsq_rank=0,
exact=False,
opt=False,
rescale_mode=None,
forward_backward=False,
d=1,
sorted_eigs=False,
reconstruction_method="first",
):
super().__init__(
svd_rank=svd_rank,
tlsq_rank=tlsq_rank,
exact=exact,
opt=opt,
rescale_mode=rescale_mode,
sorted_eigs=sorted_eigs,
)
self._d = d
if isinstance(reconstruction_method, list):
if len(reconstruction_method) != d:
raise ValueError(
"The length of the array of weights must be equal to d"
)
elif isinstance(reconstruction_method, np.ndarray):
if (
reconstruction_method.ndim > 1
or reconstruction_method.shape[0] != d
):
raise ValueError(
"The length of the array of weights must be equal to d"
)
self._reconstruction_method = reconstruction_method
self._sub_dmd = DMD(
svd_rank=svd_rank,
tlsq_rank=tlsq_rank,
exact=exact,
opt=opt,
rescale_mode=rescale_mode,
forward_backward=forward_backward,
sorted_eigs=sorted_eigs,
)
@property
def d(self):
"""The new order for spatial dimension of the input snapshots."""
return self._d
def _hankel_first_occurrence(self, time):
r"""
For a given `t` such that there is :math:`k \in \mathbb{N}` such that
:math:`t = t_0 + k dt`, return the index of the first column in Hankel
pseudo matrix (see also :func:`_pseudo_hankel_matrix`) which contains
the snapshot corresponding to `t`.
:param time: The time corresponding to the requested snapshot.
:return: The index of the first appeareance of `time` in the columns of
Hankel pseudo matrix.
:rtype: int
"""
return max(
0,
(time - self.original_time["t0"]) // self.dmd_time["dt"]
- (self.original_time["t0"] + self.d - 1),
)
def _update_sub_dmd_time(self):
"""
Update the time dictionaries (`dmd_time` and `original_time`) of
the auxiliary DMD instance `HankelDMD._sub_dmd` after an update of the
time dictionaries of the time dictionaries of this instance of the
higher level instance of `HankelDMD`.
"""
self._sub_dmd.dmd_time["t0"] = self._hankel_first_occurrence(
self.dmd_time["t0"]
)
self._sub_dmd.dmd_time["tend"] = self._hankel_first_occurrence(
self.dmd_time["tend"]
)
def reconstructions_of_timeindex(self, timeindex=None):
"""
Build a collection of all the available versions of the given
`timeindex`. The indexing of time instants is the same used for
:func:`reconstructed_data`. For each time instant there are at least
one and at most `d` versions. If `timeindex` is `None` the function
returns the whole collection, for all the time instants.
:param int timeindex: The index of the time snapshot.
:return: a collection of all the available versions for the given
time snapshot, or for all the time snapshots if `timeindex` is
`None` (in the second case, time varies along the first dimension
of the array returned).
:rtype: numpy.ndarray or list
"""
self._update_sub_dmd_time()
rec = self._sub_dmd.reconstructed_data
space_dim = rec.shape[0] // self.d
time_instants = rec.shape[1] + self.d - 1
# for each time instance, we collect all its appearences. each
# snapshot appears at most d times (for instance, the first appears
# only once).
reconstructed_snapshots = np.full(
(time_instants, self.d, space_dim), np.nan, dtype=rec.dtype
)
c_idxes = (
np.array(range(self.d))[:, None]
.repeat(2, axis=1)[None, :]
.repeat(rec.shape[1], axis=0)
)
c_idxes[:, :, 0] += np.array(range(rec.shape[1]))[:, None]
reconstructed_snapshots[c_idxes[:, :, 0], c_idxes[:, :, 1]] = np.array(
np.swapaxes(np.split(rec.T, self.d, axis=1), 0, 1)
)
if timeindex is None:
return reconstructed_snapshots
return reconstructed_snapshots[timeindex]
def _first_reconstructions(self, reconstructions):
"""Return the first occurrence of each snapshot available in the given
matrix (which must be the result of `self._sub_dmd.reconstructed_data`,
or have the same shape).
:param reconstructions: A matrix of (higher-order) snapshots having
shape `(space*self.d, time_instants)`
:type reconstructions: np.ndarray
:return: The first snapshot that occurs in `reconstructions` for each
available time instant.
:rtype: np.ndarray
"""
first_nonmasked_idx = np.repeat(
np.array(range(reconstructions.shape[0]))[:, None], 2, axis=1
)
first_nonmasked_idx[self.d - 1 :, 1] = self.d - 1
return reconstructions[
first_nonmasked_idx[:, 0], first_nonmasked_idx[:, 1]
].T
@property
def reconstructed_data(self):
self._update_sub_dmd_time()
rec = self.reconstructions_of_timeindex()
rec = np.ma.array(rec, mask=np.isnan(rec))
if self._reconstruction_method == "first":
result = self._first_reconstructions(rec)
elif self._reconstruction_method == "mean":
result = np.mean(rec, axis=1).T
elif isinstance(self._reconstruction_method, (np.ndarray, list)):
result = np.average(
rec, axis=1, weights=self._reconstruction_method
).T
else:
raise ValueError(
"The reconstruction method wasn't recognized: {}".format(
self._reconstruction_method
)
)
# we want to return only the requested timesteps
time_index = min(
self.d - 1,
int(
(self.dmd_time["t0"] - self.original_time["t0"])
// self.dmd_time["dt"]
),
)
result = result[:, time_index : time_index + len(self.dmd_timesteps)]
return result.filled(fill_value=0)
def _pseudo_hankel_matrix(self, X):
"""
Method for arranging the input snapshots `X` into the (pseudo) Hankel
matrix. The attribute `d` controls the shape of the output matrix.
:Example:
>>> from pydmd import HankelDMD
>>> dmd = HankelDMD(d=2)
>>> a = np.array([[1, 2, 3, 4, 5]])
>>> dmd._pseudo_hankel_matrix(a)
array([[1, 2, 3, 4],
[2, 3, 4, 5]])
>>> dmd = pydmd.hankeldmd.HankelDMD(d=4)
>>> dmd._pseudo_hankel_matrix(a)
array([[1, 2],
[2, 3],
[3, 4],
[4, 5]])
"""
return np.concatenate(
[X[:, i : X.shape[1] - self.d + i + 1] for i in range(self.d)],
axis=0,
)
@property
def modes(self):
return self._sub_dmd.modes
@property
def eigs(self):
return self._sub_dmd.eigs
@property
def amplitudes(self):
return self._sub_dmd.amplitudes
@property
def operator(self):
return self._sub_dmd.operator
@property
def svd_rank(self):
return self._sub_dmd.svd_rank
@property
def modes_activation_bitmask(self):
return self._sub_dmd.modes_activation_bitmask
@modes_activation_bitmask.setter
def modes_activation_bitmask(self, value):
self._sub_dmd.modes_activation_bitmask = value
# due to how we implemented HankelDMD we need an alternative implementation
# of __getitem__
def __getitem__(self, key):
"""
Restrict the DMD modes used by this instance to a subset of indexes
specified by keys. The value returned is a shallow copy of this DMD
instance, with a different value in :func:`modes_activation_bitmask`.
Therefore assignments to attributes are not reflected into the original
instance.
However the DMD instance returned should not be used for low-level
manipulations on DMD modes, since the underlying DMD operator is shared
with the original instance. For this reasons modifications to NumPy
arrays may result in unwanted and unspecified situations which should
be avoided in principle.
:param key: An index (integer), slice or list of indexes.
:type key: int or slice or list or np.ndarray
:return: A shallow copy of this DMD instance having only a subset of
DMD modes which are those indexed by `key`.
:rtype: HankelDMD
"""
sub_dmd_copy = copy(self._sub_dmd)
sub_dmd_copy.allocate_proxy()
shallow_copy = copy(self)
shallow_copy._sub_dmd = sub_dmd_copy
return DMDBase.__getitem__(shallow_copy, key)
def fit(self, X):
"""
Compute the Dynamic Modes Decomposition to the input data.
:param X: the input snapshots.
:type X: numpy.ndarray or iterable
"""
snp, self._snapshots_shape = self._col_major_2darray(X)
self._snapshots = self._pseudo_hankel_matrix(snp)
self._sub_dmd.fit(self._snapshots)
# Default timesteps
n_samples = snp.shape[1]
self._set_initial_time_dictionary(
{"t0": 0, "tend": n_samples - 1, "dt": 1}
)
return self
| 36.997093 | 79 | 0.613735 | from copy import copy
import numpy as np
from .dmdbase import DMDBase
from .dmd import DMD
class HankelDMD(DMDBase):
def __init__(
self,
svd_rank=0,
tlsq_rank=0,
exact=False,
opt=False,
rescale_mode=None,
forward_backward=False,
d=1,
sorted_eigs=False,
reconstruction_method="first",
):
super().__init__(
svd_rank=svd_rank,
tlsq_rank=tlsq_rank,
exact=exact,
opt=opt,
rescale_mode=rescale_mode,
sorted_eigs=sorted_eigs,
)
self._d = d
if isinstance(reconstruction_method, list):
if len(reconstruction_method) != d:
raise ValueError(
"The length of the array of weights must be equal to d"
)
elif isinstance(reconstruction_method, np.ndarray):
if (
reconstruction_method.ndim > 1
or reconstruction_method.shape[0] != d
):
raise ValueError(
"The length of the array of weights must be equal to d"
)
self._reconstruction_method = reconstruction_method
self._sub_dmd = DMD(
svd_rank=svd_rank,
tlsq_rank=tlsq_rank,
exact=exact,
opt=opt,
rescale_mode=rescale_mode,
forward_backward=forward_backward,
sorted_eigs=sorted_eigs,
)
@property
def d(self):
return self._d
def _hankel_first_occurrence(self, time):
return max(
0,
(time - self.original_time["t0"]) // self.dmd_time["dt"]
- (self.original_time["t0"] + self.d - 1),
)
def _update_sub_dmd_time(self):
self._sub_dmd.dmd_time["t0"] = self._hankel_first_occurrence(
self.dmd_time["t0"]
)
self._sub_dmd.dmd_time["tend"] = self._hankel_first_occurrence(
self.dmd_time["tend"]
)
def reconstructions_of_timeindex(self, timeindex=None):
self._update_sub_dmd_time()
rec = self._sub_dmd.reconstructed_data
space_dim = rec.shape[0] // self.d
time_instants = rec.shape[1] + self.d - 1
reconstructed_snapshots = np.full(
(time_instants, self.d, space_dim), np.nan, dtype=rec.dtype
)
c_idxes = (
np.array(range(self.d))[:, None]
.repeat(2, axis=1)[None, :]
.repeat(rec.shape[1], axis=0)
)
c_idxes[:, :, 0] += np.array(range(rec.shape[1]))[:, None]
reconstructed_snapshots[c_idxes[:, :, 0], c_idxes[:, :, 1]] = np.array(
np.swapaxes(np.split(rec.T, self.d, axis=1), 0, 1)
)
if timeindex is None:
return reconstructed_snapshots
return reconstructed_snapshots[timeindex]
def _first_reconstructions(self, reconstructions):
first_nonmasked_idx = np.repeat(
np.array(range(reconstructions.shape[0]))[:, None], 2, axis=1
)
first_nonmasked_idx[self.d - 1 :, 1] = self.d - 1
return reconstructions[
first_nonmasked_idx[:, 0], first_nonmasked_idx[:, 1]
].T
@property
def reconstructed_data(self):
self._update_sub_dmd_time()
rec = self.reconstructions_of_timeindex()
rec = np.ma.array(rec, mask=np.isnan(rec))
if self._reconstruction_method == "first":
result = self._first_reconstructions(rec)
elif self._reconstruction_method == "mean":
result = np.mean(rec, axis=1).T
elif isinstance(self._reconstruction_method, (np.ndarray, list)):
result = np.average(
rec, axis=1, weights=self._reconstruction_method
).T
else:
raise ValueError(
"The reconstruction method wasn't recognized: {}".format(
self._reconstruction_method
)
)
# we want to return only the requested timesteps
time_index = min(
self.d - 1,
int(
(self.dmd_time["t0"] - self.original_time["t0"])
// self.dmd_time["dt"]
),
)
result = result[:, time_index : time_index + len(self.dmd_timesteps)]
return result.filled(fill_value=0)
def _pseudo_hankel_matrix(self, X):
return np.concatenate(
[X[:, i : X.shape[1] - self.d + i + 1] for i in range(self.d)],
axis=0,
)
@property
def modes(self):
return self._sub_dmd.modes
@property
def eigs(self):
return self._sub_dmd.eigs
@property
def amplitudes(self):
return self._sub_dmd.amplitudes
@property
def operator(self):
return self._sub_dmd.operator
@property
def svd_rank(self):
return self._sub_dmd.svd_rank
@property
def modes_activation_bitmask(self):
return self._sub_dmd.modes_activation_bitmask
@modes_activation_bitmask.setter
def modes_activation_bitmask(self, value):
self._sub_dmd.modes_activation_bitmask = value
# due to how we implemented HankelDMD we need an alternative implementation
# of __getitem__
def __getitem__(self, key):
sub_dmd_copy = copy(self._sub_dmd)
sub_dmd_copy.allocate_proxy()
shallow_copy = copy(self)
shallow_copy._sub_dmd = sub_dmd_copy
return DMDBase.__getitem__(shallow_copy, key)
def fit(self, X):
snp, self._snapshots_shape = self._col_major_2darray(X)
self._snapshots = self._pseudo_hankel_matrix(snp)
self._sub_dmd.fit(self._snapshots)
# Default timesteps
n_samples = snp.shape[1]
self._set_initial_time_dictionary(
{"t0": 0, "tend": n_samples - 1, "dt": 1}
)
return self
| true | true |
f72695c880ab96368ab83d470e012d516b14bf5a | 1,438 | py | Python | pfrl/policies/softmax_policy.py | tkelestemur/pfrl | 388855fb30313185d43ae0d0f4b694be647a5c43 | [
"MIT"
] | null | null | null | pfrl/policies/softmax_policy.py | tkelestemur/pfrl | 388855fb30313185d43ae0d0f4b694be647a5c43 | [
"MIT"
] | 1 | 2021-05-14T20:53:26.000Z | 2021-05-20T15:58:32.000Z | pfrl/policies/softmax_policy.py | tkelestemur/pfrl | 388855fb30313185d43ae0d0f4b694be647a5c43 | [
"MIT"
] | 1 | 2021-06-09T03:17:34.000Z | 2021-06-09T03:17:34.000Z | import torch
from torch import nn
from torch.distributions import Categorical
class SoftmaxCategoricalHead(nn.Module):
def forward(self, logits):
return torch.distributions.Categorical(logits=logits)
# class MultiSoftmaxCategoricalHead(nn.Module):
# def forward(self, logits):
# return Independent(Categorical(logits=logits), reinterpreted_batch_ndims=1)
class MultiCategorical():
def __init__(self, dims=None, logits=None):
self.dims = dims
logits = torch.split(logits, tuple(dims), dim=1)
self.dists = [Categorical(logits=logits_dim) for logits_dim in logits]
def log_prob(self, actions):
actions = torch.unbind(actions, dim=1)
logprobs = torch.stack([
dist.log_prob(action) for dist, action in zip(self.dists, actions)
], dim=1)
return logprobs.sum(dim=1)
def entropy(self):
return torch.stack([dist.entropy() for dist in self.dists], dim=1).sum(dim=1)
def sample(self):
return torch.stack([dist.sample() for dist in self.dists], dim=1)
def mode(self):
return torch.stack([
torch.argmax(dist.probs, dim=1) for dist in self.dists
], dim=1)
class MultiSoftmaxCategoricalHead(nn.Module):
def __init__(self, dims=None):
self.dims = dims
super().__init__()
def forward(self, logits):
return MultiCategorical(dims=self.dims, logits=logits)
| 29.958333 | 85 | 0.665508 | import torch
from torch import nn
from torch.distributions import Categorical
class SoftmaxCategoricalHead(nn.Module):
def forward(self, logits):
return torch.distributions.Categorical(logits=logits)
class MultiCategorical():
def __init__(self, dims=None, logits=None):
self.dims = dims
logits = torch.split(logits, tuple(dims), dim=1)
self.dists = [Categorical(logits=logits_dim) for logits_dim in logits]
def log_prob(self, actions):
actions = torch.unbind(actions, dim=1)
logprobs = torch.stack([
dist.log_prob(action) for dist, action in zip(self.dists, actions)
], dim=1)
return logprobs.sum(dim=1)
def entropy(self):
return torch.stack([dist.entropy() for dist in self.dists], dim=1).sum(dim=1)
def sample(self):
return torch.stack([dist.sample() for dist in self.dists], dim=1)
def mode(self):
return torch.stack([
torch.argmax(dist.probs, dim=1) for dist in self.dists
], dim=1)
class MultiSoftmaxCategoricalHead(nn.Module):
def __init__(self, dims=None):
self.dims = dims
super().__init__()
def forward(self, logits):
return MultiCategorical(dims=self.dims, logits=logits)
| true | true |
f72695ed24f4c5a0ae1409ac5bdd45ac4dd38389 | 27,191 | py | Python | .qt_for_python/rcc/application.py | RodrooMtz/app_escritorio | e918f086d2b3a0a9749c8afb20e11845773cd117 | [
"MIT"
] | null | null | null | .qt_for_python/rcc/application.py | RodrooMtz/app_escritorio | e918f086d2b3a0a9749c8afb20e11845773cd117 | [
"MIT"
] | null | null | null | .qt_for_python/rcc/application.py | RodrooMtz/app_escritorio | e918f086d2b3a0a9749c8afb20e11845773cd117 | [
"MIT"
] | null | null | null | # Resource object code (Python 3)
# Created by: object code
# Created by: The Resource Compiler for Qt version 6.0.4
# WARNING! All changes made in this file will be lost!
from PySide6 import QtCore
qt_resource_data = b"\
\x00\x00\x08\x19\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\
\x00\x00\x00\x04gAMA\x00\x00\xd6\xd8\xd4OX2\
\x00\x00\x00\x19tEXtSoftware\
\x00Adobe ImageRead\
yq\xc9e<\x00\x00\x07\xabIDATX\xc3\xad\
W[P\x93g\x1a\xf6\xca\xce\xec\xcc\xf6b/\xbc\xd9\
\xe9\xce\xecn\xbd\xda\xd9\x9b\xb5\xce\xba;{\xb0\xad\xcc\
z\xb1\xce\xce:\xb3vTpu\xdb\xe2\x81\xd6\xb6T\
\x04\xbb\xa5 m\xc1\x82\x06\x08\x07QB\x80\x80\x80\x02\
!\x81\x10\x92@H\x10s$!gr\x80\x04B \
\x9c\x09G\xb5Tx\xf6\xfb~\x13\x160X\x8b}g\
\x9e\xf9/\x92\xfc\xcf\xfb>\xcf\xfb\xbe\xdf\x97]\x00v\
\xfd\x98 \xf1\x0b\x82\x14\x02\x03\xc1u\x82\x03\xcf\xfd\xfe\
\x8fH\xbc\x9b \xe1W\xaf\xef\xb5*\x8c\xd6e\xdb\x02\
`\x19\x1e[\x09'\xf13\xfa\x19\x81\x22\xfc\xdc>v\
H~\x8a\xa0\xb9\xb6Y\x1c2\xcf\xadB9\xfe\x1dD\
\xf6Q\xd8\xc7\xe6\xe8\x87\x86={\xf6XSR\xae,\
\xca::\x10N\xe2\xe5I\xc3\xc41\x04\xb7>I\xf9\
,`\x9b]YSM\x03M\xb6\x114\xeb\xfb 1\
y`\x19\x9d\xc5\xbb\xef\xbe?\xc5\xab\xbe\x83\xf1\x89)\
LO\xcf\xae\x92\xef\xd7\xbct\x02\x11\x9f\x0f\xbe\x1d\xe3\
\xb2\x04CO\xb43@\x8b{\x06\xcd=.4\xeb\xec\
\xa8W\xf6 \x87S\x852^5C\xbc\xb0\xf4\x90\x81\
\xc1`\x5c&\xbfK|\xe1\x04H\x1c$8A\xfd\xdd\
\xeas'\xf1\xb9'\x04H\x87\x97\xc1\xd7\xbb \x22U\
7\xdc7\xa2\xb8N\x88,V>\xccV\xdb:q\x04\
,\x16k,\xfc\xce\xe7'\x10\x916\x93\x95?F}\
\xa5\xfe\x12\xc4o\xf4Y1\xb6\x02~\xef Z{\x9c\
\xe0?0\xa1L(CF\x0e\x1b\xb2\x0e\xf9&\xd2\xf9\
\xc5e\xcc-,!4\xbf\x88\xbd{\xf7Z\xc9;~\
\xbam\x02$~C\x90F=5\x13iu\xb3\x80\xd2\
?\x0f\xcb\xc4\xe2\x9aP\xa1Z\xb4l\xf1Y\xa0\xb6\xa0\
\xa6]\x8d/\xb2sq\xb7\x9e\xff\x0c1%\x9d\x09\xcd\
cbj\x06\x83C\x81'\xe4\xdd\xbc-\xd3\xb0;\x92\
\x033&\xd4S\xb5\xd3\xfbXO\x88\xc5\x03!\x88,\
CP\xbaF\xd0\xed\x09B\xe5\x9bB\x9bs\xfc\xa9\xcf\
Z\x1b\xee*t\xc8\xbc\xc9E\x09\xa7l\x93\xcf\x9b\x88\
'\xa7\x11\x18\x1d\xc3\x80o\x08\xa2\xd6\xd6%\xc2Q\xdb\
(\x12\x87\xc6\x1f\xaf\x82/b\x94M\x89$\x90\x22\xea\
R-\x9aB\xab\xe8\x18y\x04\xa1\xc5\xcf\x10St\xf6\
\x0d\xa3\xd3\xe1\x87\xd4<\x80\x16\xbd\x03\x0d]\x06\x14\xd5\
\x0a\x90\x91\x95\x0d/y\xf1\xc6\xaa\xa9\xd4\xb3s\x0bL\
\xc5\x94\xd8\xdd\xef\x85\xc9b\x05\xb7\xbc\x12\xa5\xe5\x95K\
\x13\xf3\xcb\xab#\x0f\x017\xd9\x11\xe6\xd9\x15\x84\x97\x15\
\x13\x06\xcb<\xd0h\xf2\xa3\xdd\xee_'\x96;\x86 \
\xb3x\xd7}\xe6\x08\xa4\xf8<3\x1b*\x8d6\xaa\xdc\
S3!\x8c\x8e\x8d3\x15\xd3&\xe47\x09\xf1\xc1\xc5\
\x8fQs\xaf\x01\xbee`\xfc\x11\xa0#\x13#\xf2\xce\
\xa1\xbe]\xb9\xb8Q\x01\x83\x81ttM\xa7\x1e\x0ag\
\x80\xa9\xb8\xdd\xea\x83\xd8\xe8B\x93\xca\xcc\xf8|\xe5\xcb\
,\x88\xda$Q\x89\xa7g\xe7\x18\x1b\x86\x86G`w\
8I\x82:$|\xf8!\xae\xb3\x0b\xe1\x99\x5c\x80o\
\x09\xd0\x90\xde\xe1\x0f,\x81\xab\x1f\xc4}\xef\x04\xdd\x07\
\x1da\xeb\xff\x9f\xc0\x1d\xb9\x16\x1d\xf6!H\xcc\xfdO\
}\xee\xd4\x22\x9dU\x84\xaa\x9a\xbaM>G\xe4\x8e\xf8\
<<\x12\x84\xd3\xdd\x0f\xbd\xc1\x88\xc2\xe2b\x9c~/\
\x1e=\x03\x01\xf4/\x02\x83\x84\xbc\xc5\xff-\xee:C\
(Q\x91\xf7\xf6\x05\xf1N\xdc\xbf}\x843i\xe3 \
\x18\xf43\xab\xe0\xc9Th58\xd1\xd8\xdd\x0b\x9eX\
\x89\xac\x5c\xf63>G\xaa\x9e\x9c\x9ee\xe4\xee\xf7\x0e\
\xa2\xd7lAC\x03\x1f'b\xe3 \xe9\xd6\xc0E\xcf\
\x01R\x90$\xb8\x86\xb2\x9e\x00n\xb4\xdbP\xd1\x1bD\
\x85\xce\x8bJ~\x0bm\xbe\x9b['\xd1\xa0\x99\xf8\x16\
e\x22\x05\xee)\xf4(\x13\xc8\x90x5\x0b\x1a\xad>\
\xaa\xdcc\x13\x93\xf0\x0d\x0d\xc3f\xef\x83\xb4]\x8e\xc4\
K\x97\x90\xc3\xca\xc3\xd4c\xc0NzI1N\xfa\x89\
\x94\x7f[;\x84|\x85\x13%j\x1fJ\xd5\x03\xe8\xf2\
0\xa3(\x22\xf8\xf93\x09t\x8f.\xa1\xa8\xbe\x15\xa5\
|\x09\xb2J*\xf0\xcf\xe3qQ\xe5\xf6\x07F\xd1\xe7\
\xf2@\xab7 \xfdj\x06\x92\xbfH\x83\xcd7\x02'\
\xa9\xda@\x1aL\xe0{\x88R\x9d\x1fE\xdd\xfd\x0cq\
A\x97\x1b\xc5\xdd\x1e\x88\x9cA\xfc\xf9\xcd\xb7]\x84\xeb\
l\xb4C\xd0(\xf7N#\xa7\xfc\x1e\xb2K\xab\xf1Q\
\xeaWH\xfeo\xea\xfaXQ\xb9G\x82\xe3\xf0\x0c\xf8\
`4\x99Q\xc9\xab\xc2\xfbg\xcfA\xfe@\x03?\xe9\
n\xb2\x8d\x19\xb9oi\x06\x19\xd2\x9b*/r\xe5\x0e\
\xe4u\xf6\xa1\xf0\xbe\x1b\x1c\x95\x1b\xf9\x9c\xca)\xc2S\
\xb8\xdd)\xdc+v\x04\x90Q\xc8\xc5\x95ky8\x11\
\x9f\x80\x9b\xb7n3c\x15\x91\xdbjs@\x22m\xc7\
\x85\x84\x0fPt\xbb\x0c\xf3+\x80\x9f4X\xf7$ \
\x1c|\x84J\xd3\x188\xfaa\x86\x9cV\xfdU\xb3\x1e\
\xac\x0e;\xb8:\x1f\xd9!\x1ez/\xe0\x13\xbc\xba]\
\x02&\xbe\xc1\x83\x94o\xd88\x9f\x9c\x8a\x03\x7f=\x04\
c\xaf\x99\xe9n*\xb7F\xd7\x83\xa4\xcb\xc9H\xff:\
\x8b\x8c\xd5<S\xb5q\xf6\xa9\xdc5\xf6i\x5c\x97Y\
\x19\xd9\xbfn!\xa7\xa0\xd4\x82t\xbe\x1aW\x9b4`\
\xc9\xcc\x10\xbb\x82\xf8\xe5\xaf_\xa7g\xc0;\xe1u\x1f\
5\xcc5\xddf|\x94\x96\x85\xb8s\x17\xf1\x97C1\
L\xd5t\x99\xf0\xaa\xaaq\xfa\xf4\x19h\xcc\x0e\x8c\x92\
-6\x14\x1e\xabZ\xc7\x0cx\xe6qp\x0d#L\xa3\
e\x8a\x0c\x8c\xec\xb4\xfa\x9c\xb6^\x94t9\xd0f\xf7\
\xaf\x1e=\x11KG.o\xc3y\x135,\x5c\x99\x1a\
\xf1\x97>\xc7\xd1\xd83\xf881\x09\x86^\x13\x1a\x9b\
\x04\xf8\xdd\x1b\xfbQO\xd4\xf1\x90\x99\xee\x9a\x00\xaa\xad\
\x93`+]\x0c9\xf5\xbc\xf0\xbeg\xbd\xea\xcc\x16=\
JU\x1e\x08m\x01\x94\xd4\xf1C\xe1eS@\xf0\xca\
\xf7%`+nj\xc7\xa9\x84D\xc4\x1c9\x8a\xdc|\
6ZZ\xc58\x14\x13\x83/95\xc8\x14j\x98\xe6\
\xa2\xd5\xd2'\xf5\x9azL\x13\xa1Id\xb7\x99\x90\xdb\
nF\xb9\xda\x8d\x06\xa5v9,9=\xf9N\x13\xec\
\xd9r\xd4G\x0d;\xabF\x88c\xff9\x8f\xdf\xee\xfb\
=\x1a\xf9\x02\x9c\xbf\x90\x80\x93\xf1\x17p\xa3\xad\x07\x19\
\xc4OJ\x14\xe9n\xbaX\xa8\xef,\xfa\x94\x98P(\
\xb7@\xe9\x0e<\xf9W\xec)*w-\xc1g\x04\xfb\
\xb6\xb9\xe4D\x8d\xbe\xcc\xb2Z\xfc\xe3\xe4\x19\x1c<\xf4\
7\xb0r\xf3\xb0\xef\xc0\x1fP \xd1!\x89'e*\
\xa6K\x85>\xbf!\xd5F\xe4.\x90[!\xb0\x0c\xae\
\xe5\xdc\xe2\xd2\x11\x13\x13\xe4\x87o<\xaf<\xe7\x96\x15\
5\x9ciE\xe5\xf8\xfb\xb1X\x1c?\x19\x877\xf6\xef\
\xc7\x8d:\x11\x92\xab\xa4\x0c!\xedp\xea5U!\x8b\
4[\xc9\x037*4n\xd4I:\x17\xc3rs\x08\
\x8em\x95\xfb\x87$\xe0Jesp\xe4\xf8)\x1c>\
|\x98\x8cc.2\x05*\x5c\x22\xd5\xd3]~M\xdc\
\x0b6\xe9tv\xa7\x1dw\x8c\xe4\x88\xb6\xf9\x9e\x84\xb7\
\x1a\x95\xfb\x22\xbdI\xfd\x80\x0bm\xf4\x042JxL\
\x0f\x9cKI\xc3\xb5\xa6.|\xc2me6Y\xf1\x83\
\x01\x5c\x97\x9a\xc1Q{ \xf3\x04\xd7\xce%&\x056\
\xc8\xfd\xc7\x9d\xc8\x1d\xd5\x82\xdc\x1a\x01\xce^NE\x81\
X\x85x\xf6]\x5c\xa9U\x90\xaa\xfb\xc0\x96\xdbP\xad\
u\xe3\xaeTA/\x10\xca\x0dr\xbf\xba\xd3j\xa3\x05\
\xb7\xa2Q\xf8\x1d\xafC\x8dO\xb9-\x88\xcb\xe6\xe1\x9a\
H\x8f\xaa\x1e/\x9a5\xe6\xc7\x7fz\xf3-Wx\xac\
\xa8\xdc\xaf\xbd\xac\xdc\xd1\xe2\x08\xdd\x05\x5cu\x1f\xde\xcb\
\xafE\xb9v\x002g`\xf5\xc2\xa7\x97\xa9\xdc\xf7\x08\
\xd2\xa9\xdc;\xf8\x03\xf3\xc2\xf1\x13\x82\xca\x1c\xee\x9dP\
\x0b9\x94\xb8\x0d\xc2\xc8\x16\xa3\x17\x87\xc3/\x22\xf7\x0e\
\xff\xdam\x8a\xdda\x99\xd5\x1b\xb6\xd8k\xbb^2\xbe\
/\x89\xff\x01f\xb9_\xfc\x11\x80=\xcf\x00\x00\x00\x00\
IEND\xaeB`\x82\
\x00\x00\x03T\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\
\x00\x00\x00\x04gAMA\x00\x00\xd6\xd8\xd4OX2\
\x00\x00\x00\x19tEXtSoftware\
\x00Adobe ImageRead\
yq\xc9e<\x00\x00\x02\xe6IDATX\xc3\xd5\
\x97\xcdN\x13a\x14\x86\xeb5\x94\x95{q\xe1\xd2\xc4\
\xe0\x05\xb8\xe2\x0e\x5c\xb8\xf4\x02\x5c\xb10\xea\x05\x18\x96\
&bX\xb8\xb0\x91X \xd1\x9d\xbf\x89\xa4\x14\xb1R\
\xa4HE\x94\xfe\xd0\x02C\xff\xa6\x9d\x19\xa6e\x80\xe3\
y{\xfa\x85QJ\x82\xc9!\x86I\xde\x9c3\xa7\xf3\
\xcd\xfb\x9c\xf3M\x9bN\x84\x88\x22\xffS\x91s\x01\xc0\
\xc7\xd5\x90n\xff\xa5\xfb\xac\xc7==d\x0d\xa9\x02\xf0\
12<<\xbcj4::\xba\x19V<\x1e\xaf&\
\x93\xc9V:\x9dv\x13\x89Dk`` \xcdkn\
h\x02\xa48\xd2\xe1\xe1q\x99\xba\xef\xb7\xc9\xb2,\xda\
\xdf\xdf'\x86\xf1x\xcd\x18\xeb\x8a\x1a@?\xf3\xb0\x1c\
\xc7\xa5Lf\xb9\x0b\x14\x04\x01\xc5b\xb1:\xaf{p\
\x1a\x88S\x01\x1c\x1c\x10ww\xb2l\xdb\xa1\xf9\xf9\xcf\
d\x0e\xd7u\xe9\xf9\xc4D\x17B\x05\x00&{\xc1\xc9\
\xaa7\x1cJ\xce\xcdS\xf8p]\x0f\x8b\x17T\x00\x82\
\x10@gO\x14\xce\xed\xa6G\x1fgf\xe9\xf5\x9b\xb7\
\x14\x9f\x9c\xa4\xa9\xa9iz\xf7\xfe\x03E\xa3\xd1e^\
\x7fA\x05\xc0\xef\x10\xed\xb6%\x86\x85\x9a\xe3\x05\x94]\
\xcd\xd1\xe4\xf4+z2\xfe\x94\x9e\xc5^\xd0Lb\x0e\
\x8b\x17U\x00\xda\x81\x18\xf5\x13 <\xff\x90j\xcd6\
\x157\xab\x94/nS\x89c\x8d\xb7\x85\xd7~Q\x01\
\xf0y\xcc\xcd]\x1e\xb5\xc7{\xdb\xee\x9f;\xbe\xe4\x88\
]\xb8\xbd\xee\xe2\x94\xca3\xe0u\xe4\xc6uWb\xd8\
\x109\xea\xe63D\xd4\x01\xa7\x06\xe0\xf4:\xad9\x22\
\x98\x98hr\x80\x98kPS\x9d\x00\x00*-\xb91\
\xe2NS\x8c\x10\x0d\x04\xf2m\xfb(\xb6|E\x00\x9b\
;\xdbj\xfci\x8e<l\x88\x1a\xae9\x13\x80:\x8f\
\xb7T#*\xd7\xc5\x04\x06\x06\x005(\x9c\x17\xab\xbc\
%\xbb\xca\x13\xc0Ma\x0e\x15*rn\xcc~Z\x02\
hj\xdd\xad\xf1\x94'\x00S\xdc\x1cqm[@`\
\x9a\xab\x1cu\x9e\xeb\x81A\x15G\x11\xc0j\x891\x0c\
\xd6w\x04 \x0cd&b\xb6iu\x8b\xa8\xaa\x09P\
\xb6\xc5\xbc\xd0\x03\xf8\xbe)c\x87)`\x0c\x18\x84\x1c\
\x00[ME\x00t\x03S\x98\xad\x94\xc5\x1c\xe7F\xe6\
\x1c\x00\xc8q]\xa9\xa1\x08\x80\xfd\xfcV\x12s3\x01\
\x085\x18B\xe8\xda|\x8e)\xa8N\x00[\x00\x03\xc8\
\x98g6\x04\x002\xe6\x85\xde\xf8\x17\x0b\xfc,\xd8\x8a\
\x00\x18g:O\xb4T\x14#\x98\x02\x00\x02\x0c>\xfb\
\xc5S(\xf0C\xb8fI\xf7k\xf9R\x87\xd7\xbeT\
\x01\xc8U\x8f\xbaN\xadK\x0e\x90\xaf\x85\xde\xb7\xc2\x92\
=O\xa6\xb3\xde\xa3\xb1q\xeb\xda\xd0\xf5\x15\x98\xb3n\
\xa9\x00l4\xa4k\x18\xff\xe0\x11\x7fZ\x17S\xd4\x13\
\x0bYo\xe4\xee\xbd\xe2\xa5\xc1\xcbK|m\x8cu\x87\
5\xa8\xfa\xb7\x1c\xdde\xd9<\x8f\x1f\x19\xfe\x9e\xcf\x1e\
7\xbd\xc9\xbax&oF\x00h\xf2\xff\x81\x99\x94\x9e\
\xe9?\xbf\x19\x01B\xd3\xf4\xfc\xbd\x9c\x9e\xa5~\x03Q\
l%\xa1\x92\x95\x0aw\x00\x00\x00\x00IEND\xae\
B`\x82\
\x00\x00\x05:\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\
\x00\x00\x00\x04gAMA\x00\x00\xd6\xd8\xd4OX2\
\x00\x00\x00\x19tEXtSoftware\
\x00Adobe ImageRead\
yq\xc9e<\x00\x00\x04\xccIDATX\xc3\xb5\
\x97]L[e\x1c\xc6wo\xbc\xd9\xe5\x12I q\
\xd7&\xe3N\x13\xb8p\xd1\x85D\xbdP\xe3\x10\x18\xe5\
+.&J\x04'\x86\xaa\x8b\x99\xe0\xd0\xa2l\x19\x86\
9\x17\xdc\x1a\x16\x98\x80@l\xa6C\xca +\x83\x1e\
(\xcc\xda\xd1\x96\xd2\xd2J{\xfa\x01\xa5\xd0\xef\x16\x1e\
\xdf\xff\xdb\x1d\xc7\xcc\x04*\x87\x93<9o!\x9c\xe7\
\xf7<\xefG\x0f\x87\x00\x1c\xcaF\xcf\xbd\xfa\xe9\xbbL\
Z&a\x0fj`\xca\xd9\xe9y\xd9\x9a?]P\xf2\
\xa5\xc1\xe9\x8f\xa7W\xc3@0\x02\x84\xa2\x19\xad\xc72\
\x8a'\x81X\x22s\xbfyk\xdaK\x10r\x02\x1c{\
\xe7\xac\xda\x1c\xd8\xc8\x98\x12@\x84\x99\x85\xe3\x19\x911\
)\x1aKa%\x94D8\x9aBs\x87\xc6\xbe\x13\xc4\
\xff\x02\x90\x12\x93y$\xf1\xc8X\x92\xcf\x1f\x84]\x8c\
\xc2\xe5\x09\x22\x12K\xa3\xf4\xc3\xefM4uY\x01\xb0\
\xeb\xd86\xd5\x90\x9e:\xfc\xcc\xb9\xe7_.\x11?V\
\x9eEEU\x0d*\x99\xde\xaf\xad\xc3\x9d\xb1\x89\xc7\x00\
\xac\xb6%\xfc\xb9\xe8\x87k\x15X\xf6\x04\x10\x08\xc6\xd2\
\xaf\x9c\xbep\x9fA\x1c\xd9\x15\x80]\x87\x99\x1a\x8a\x8a\
\x8a\xcc\x92Z[[\xdd\xa4\xafU\xad\xfe\xafT\xdf\xa6\
\x06\x06\x06195\x85\xd9\xb99\xe8&&PPP\
\x80!\xcdo|\xdeI\xa6\xf9\x05\xcc\x98\x5c\x1c\xc0\xe1\
OA\xf4\x85\xf0C\xaf\xce\xcd\x00j\xf6\x02PCf\
\xd8\xe5\x8a\xc7\xe3\xf0z\xbdH\xa7\xd3\x98\x9c\x9cDe\
e5fg\x8d\xbc\x81\x07f\x1bt\xd3\x16\x0e@2\
-x\xf0\xdd\x8dQ\x8f\xac\x00\xe1p\x18F\xa3\x91\x8f\
S\xa9\x14~\xea\xedE\xe3'\x9fa\x86A8\x96\xdc\
Pwu\xe3LC#\xce5\x9d\xc7\xed\x91q\x5c\xbc\
>,/\xc0\xc6\xc6\x06\xf4z\xfdc@}}\xfdP\
2\x88\xd0F\x1cf\x9b\x0b\x82\xc1\x88\xa9\x19\x13\xac\x0e\
\x11\x97\xbadn\x80\x00\xa6\xd8:\xd8~E\x22\x11\x94\
+*0\xae\x13@\xe7\x04mW\xda\xaa4\xbe|S\
\xe65@f:\x9d\x0e\xc3\xc3\xc3\xe8e\xf5\xf7\xf7\xf7\
C\xab\xd5\xa2\xaa\xba\x06cw\xf5\x90\x0e*w\x90\xed\
\x04\xb6\x0e\xda\xbbe\x06\xa0y\xb7\xdb\xed\x18\x1a\x1aB\
gg'zzz8PIi\x19ni\xf5\x10\xd7\
\x00o\x08\xb0\xf9\x00g\x00\xb8\xd0%3\xc0\xd6\xd6\x16\
\xdf\x09\x81@\x00\xa2(\xc2\xef\xf7cmm\x0d\xa7\x14\
\x95\xd0\xfc\xae\xe7\xa9\xc9|\xc1\x0b\x98=@\x9b\xdc\x00\
\xdbA677\xf9v\xa4V\x14\x15\xd5\xe8\xfbU\xe0\
\xa9\x1d\x81G\x00\xe7;\x0f\x00\x80\xcc%\x80$3O\
$\x12(+\xaf\xe2\x00\x7f\xb8\x00\x8b\x98\x01\xa06Z\
\xd5\x070\x05\xff\x98'\x93<=MI\xc9\xa9J\x0e\
\xa0\xb7\xb3\x03\x89=\xc5\xf8\x170\xb1\x00|q\xf5\x00\
\x00\xa4\xea\xc9\x98\x14\x8b\xc5P\xa6\xa8\x82zH\xc0\x98\
\x19\xb8k\x05\xe6\x9c\x99\xfb\xe7Wd\x04\x90\xd2Sj\
\x02\x88F\xa3\xdc<\x14\x0a\xa1\xb8\xb4\x02\xd7\x06\x05\xdc\
f\x87\xe4\xa0\x01\x1cd\xc4\x04(;d\x06H=\x9c\
s\x12\x99\xd3\xb9@ \xc5eU\xb8\xd8-\xa0\x7f:\
c\xae}\x90i\xe0\xa3v\x99\x00\xfe]=\xa5&\xad\
\xae\xaer\x88\xb7J*p\xb9W\xc0=\x1b\xb8~\x9e\
\x01\xee\xcc\x03g.\xed\x13@\xaa\x9dD\x8b\x8e\x92\xd3\
qL\xdf\x01+++X__\xe7\x10'Y\x03\xdf\
t\x09PO\x00\xbf\xcce\x1a\xb82\x064\xec\xa7\x01\
\xc9X\xda\xebdNi)9\x1dD\x04@\xf5\xd3\xcf\
\xde|[\x81\x96\xeb\x02O~u\x1c\xb8q\x0f\xf8q\
,\x9e~\xbdNm\xa67\xaa\xac\x00\x9ed,m7\
2%\x00\xd1#\xf2\xe4\x12\xcc\x1b'\x15h\xef\x11\xa0\
\xbcf[\x7fO5\xe2<q\x9a\xbf\x8ei\xf7\xfcJ\
&\x01\x90\xa9$i\xb5SB2\x0f\x06\x83p\xb9\x5c\
\xdc\x90^J\xe8\xb3\xc7\xe3\x81\xdb\xed\xc6\xf1\x13\xaf%\
\x9f}\xa1\x9cL;\x98\x8a\x99\x8e>\xc9xG\x00\x95\
J\xc5\x01\xa4\x15.\xcd7\x19RR:\xf7)\xb5\xc3\
\xe1\xe0\x22\xe3\xc5\xc5E\x0e\xf5\xe2\xf1\x97\x5c\xf4\x1e\xb9\
\x93\xe9\xae\x00---n\xe9`\xa1\xd4\xd2\x97\x0d\x8d\
\x97\x97\x97\xe1\xf3\xf9`\xb3\xd9\xf8}ii\x89C\x10\
\x00\x8d\x0b\x0b\x0b\xcd\xb2\x00\xd0\xa2\x92R\x93\x11\x8d\xe9\
N\xdfxT;5`\xb5Zy\xf5\xd4\x0a\xfd\xce`\
0$\xf2\xf2\xf2\xee\xb3g\x1c\xd9\x17@SS\x93[\
\x9agJO\x22\x13\xaa\x9a\xc6\x16\x8b\x997@\x9fG\
GG#mmm\xde\xfc\xfc|\x13\xfb\xdbA\xa6\xb2\
\xbd\x9a\xff'@ss3\x9f\x02JG\x10T?U\
???\xcf\xeb\xd6h4\x91\xba\xba:\xe7\xc3\xb4]\
L\x1f0\x1d\xcd\xc6xG\x00\xa5R\xe9v:\x9d\xbc\
bJJo>\x94\xb4\xbe\xbe\xde\x99\x93\x93#\x99\x16\
gSuV\x00\x8d\x8d\x8dn\x8b\xc5\x82\x81\x81\x81H\
mm\xad377WV\xd3\xdd\x00\xf8\x7fFL\xc2\
A\x99n\xd7\xdfC9V\x18\x85p\xc8\x04\x00\x00\x00\
\x00IEND\xaeB`\x82\
\x00\x00\x05+\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\
\x00\x00\x00\x04gAMA\x00\x00\xd6\xd8\xd4OX2\
\x00\x00\x00\x19tEXtSoftware\
\x00Adobe ImageRead\
yq\xc9e<\x00\x00\x04\xbdIDATX\xc3\xed\
WkL\x93W\x18>#q\xc92\xe9\x16\x97\xa8T\
e8\x9d\x02\x15\xf6\x03\x872\x93\x01f,[p\xc4\
0\xff`\xa2.\x1a:\x1dN\x03\xba1\x89[\xb3\x80\
\xd9\x0c\x84\x02\x19X\x1c\x14\x8b\x85\xb2\x82\x95^\xe4f\
\x0b\x8e1\xf8\xc3F\xcb-\x81\x15\xdc\xa8\xc2\x1c\x1b\xb7\
ji\x91\xf2\xee\xbc\x87\xaf\x0c\xdc\xb8\x0da\xd9\xb2\x93\
<\xed\x97\xf3}\xfd\xde\xe7\xbc\xef\xf3^J\x00\x80\xfc\
\x93 \xff\x0a\x02t\x09(D\x14\xd9\x14q\x14\x01+\
F\x80\xae\xddd\xdd\xc6f\x22L\xf8\x95\xc4\x8bG\xc8\
\xa1\xd3\xf7\xc8\x8e\x97;82a+A \x85\x9c\xbe\
0H.\xdd\x80\x19@2\xabyM\xf4\xbe\xfbr\x13\
hd\x06\x91\x04^\xa3Q\xf4\x06\xee\x85G\xf5\xd0\xbd\
\x83\xcbM \x9b\x9d\xf6@t/\xbd\x162= \x89\
?H\xa5,\x1b\x01\x8c1y\xc1\xbb\x9d\x88K\xc6\xd7\
\xc6&\x0e\xa0\x10\xb9\xfdB\xfe\xc5+6F\x8c\x12\x5c\
N\x02\x93\xa7\xa7\xa7\x0d\xcc\xd39\xb9\x98c6\x14\x0a\
\xd2\xe4\xa3+A \x8c)\x9e*\xdf7G\xeb\xdc{\
\xb5\xcc\x89\x9e@D\x96T\x83+,\x0b6FH\x08\
\x13\xf5d*{.T\x03\x01\xf8\x037\xbf\xc0\x0e4\
*T\xdfb\x88R\xd5,X\x03t\x1d\x16\x08\x04z\
EU\xf5\xc8\xa0mt\xc2\xd4s\xf7!\xbesQ\x95\
\x90\xae\x8f\xd0\x13\xcf\xe5\x94\x83\x87\xb4\x02\x9e\xcc.\x03\
\xd4\x06\xdd\xaf\x99\xcb\xb0\xaf\xaf\xaf>\xbf\xd2`\xb5\xdb\
\xed\x80\xf8y\xe4>\xc4^\xab\xb4\xb9\x88/\x86\x80'\
\xd3\xc0g\xf9\x8e\x19\xf5`\xd7^3\xbav\xdas\xee\
h\xd8\xc7\xc7G\x9f\xab\xab\xb0\x0e\x0f\x0d\xc1\x10\x87\xb2\
\xf6.\xe7\x967\xf7wsa\xd8\xbd\xe8^\x80/f\
\x9a\xa0\x86\xdf\xa96B\xf7\xf0\x03\xd8\x19\x9f\xd4\xcf\xa5\
\xe7\x1a\x8a\x98-~\xfem\x97T\x1ak__\x1f\xb8\
\xd0\xd1s\x07br\x15VN\xc4\x87\x97\xd4\x8c0\x14\
\xe9\x15\xb7\x1e8\x1c\x0e@\xa4\xd6\x191\x9e\x85\x9b\x05\
~m\xa9%\x1a[\x97\xd9\x0c\xe6.\x0a\xf3$\x14\xdf\
6\x8e{\xbd\x1e\xd1\xcdB\xc8\x09o\xa9\x04<\xd1\xbd\
V\xab\x15\x10w\x7f\x1b\x84\xf3\x92\x5c\xbbR\xa9\x84\xfa\
\xfaz0\x99L\x0cu\xdf5\xc1Q\xb1d\x18\xc9Q\
D>\xb6v\xcc\xb4@O\x93_~\xd3\xd6\xdf\xdf\x0f\
2\x99\x0cD\x22\x11\xa8T*\x90J\xa5\xa0\xd1h \
K[9\xbe\xe9\x95\xe0\x1f\xb8S\xafy,\xf3\x00\x97\
\x8e\x22\x9e\xc7\x86\xe6S)\x19\xf6\x82\x82\x02\xe6\xe2\xa0\
\xa0 \xe0\xf1x`\xb1X@[^\x01\xfb\xcf&\x0c\
-\xa6S\xceg\x94\xcf\x09L\x83\xe2[{\xe6\xc2`\
\x9a\xb2\x14\x14\x0a\x05\x88\xc5b\xc8\xcc\xcc\x84\xa2\xa2\x22\
P\xab\xd5\xd0\xd9\xd9\xc9`\xec\xfe\xc9\xb9\xc9\xdb\xa7u\
.\xb7\xcfK\x80\xae\xb7\xd8)p\x0e\xc0j\x97\xacx\
\x88\xca\x7f\x82\xe2)\x89\x0e>\x97+![\x96\x0f\x07\
c\xe3G\x84\x1f&\xd8\x92rd\x8eo\x1a\xbf\x07\xa3\
\xd1\x08-\xad-\xf0\xcb\xc0 \x1c8\xf1\xbe\x05\xb3b\
\xc1\x04\x5ci\x84\x85\x85\x84F\xdc&\xe72\xac,\xcf\
3\xb5\x13\xec;\xe3\xba\xd33\xaf\x82\xe5\xfez\x89\x06\
\x9e\xde\xfcb\x1b\xf7<\x92\x8d{f\xabO[\xca5\
\xedXCC=444\x80\xa5\xb7\x172\x14\xc5\xc3\
\xf3\xe9\xc0e<\x92\xe5(\x9e6]\xe5\x9c*2x\
}\xf4\x83.Zl\x121\x0c\x1b%\xeaq\xf7/\xcb\
'\xef\x05\x87_\xfe\xd3\xe4D\x0bLh\xf4\xc9>u\
\x95\x1e\x0c\x06\x03\xb4\xb7\xb7\xc3\xd7\xc6\x961\xae\x81\x09\
f\xf16m8h<I::e\xf8b\x81\x83D\
\xbdWC\xb6\x0a^\x9b*\xc3\x94\x5c\xb0B\x0f\xab$\
\xb4\x04\x9fJ\xaa\x9bC71(\xd4O\xf2\x0a\xc7t\
:\x1d\xd4\xd6\xd6\x82\xc9|\xdb\xb9a\x9b\xf7_\xeab\
\xb2\xe5~\x9cu\x1f\x0d\xf3\xb2\xd4N\xf2\xf6\xb1\xeb.\
\xb6\xae\x94\xc3\x90l\x97U\xc1KW\xab\x80\x9cMn\
Z\xd0\x1cI\xbd\xb1\xe7\x88\xb0\xef\xcaW\xc5PZZ\
\x0a\x1d?\xf6L\x04\x06\x87t<\xaa\x0b\xc2\x84F\x8d\
\x07\xc8o\x02\xd9\xf9\xaa~\x9a\xf10F\x8e6 \xaf\
\xbcJxCi\x00\x92(\x1d\x98\xcd\x95\xb3y\xc3}\
=\xbf\xf9Dj\xa6].\x97CSK+D\x1c{\
\xf7\xce\xf4\x14%\xae\xf1\x8a\xf5w\x9c\xf5p\x02\xc2\xd9\
\x0f\x89\xd1\x81\x03O\x8e\xf7\xdc\xd2i\xe7\xf3\xdfu\xfc\
o\x14.6\xd2\xef\xd8\x17iI\xbe,\x9d\xc8\xd3\x96\
;\xa7\x0f1\x8c%\xc6\xdf\x9f\xbaw_q5\xa0A\
l\xb5\x08\x8c\xf9\x94\xf1\xe0\xf03K\x9a|h\x13Z\
\xbd\xce\xa3\xd9kOH\xf7\x0c\x0f\xb0\x0f\xfe\xf3\x87\xc8\
\xf9/\xee\xb9In\x00\xf6{>\xed\xf7\x08\x1e*>\
]\xe5X\xaa\xf1GZ\xf5\xb6Y\x0b\x11\x1d\xb3C\xc9\
\x918\x099\xf9\xa9\x96!\xfa\x5c\x1a\x0d\xcf\xb3\xff\xff\
7\xfcO\x13\xf8\x1d\xe7\x87\x19\xb9D\xc3\x01\xcf\x00\x00\
\x00\x00IEND\xaeB`\x82\
\x00\x00\x06m\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\
\x00\x00\x064IDATx^\xad\x97[lT\xc7\
\x1d\xc6\x7fs\xce\xd9\x8b\xbd\xf6\xfa\x16\xa0\xbe\x00\x0e\xb2\
ic$BJ!\x22\xa1-\x95b\xa5/\xeeKh\
+\x95\xa6U\xa5\xc6`U\xaa\xda\xb4\xaa\xfaV\x09U\
\xca\x03\x94'\xda\x07\x84\x14)\xad\xc4\x8b\xa5R\x83y\
\x08\xc5\x189\x0ei\xd3\x84\x9a\x9bcj\xec\xb2\x04\x1b\
;\xbb\xf6z\x8f\xbd\xbb\xde\xb3g\xa6\xc3h\x85\xe5r\
l\x88\xc9'}\xfa\x9f\x9d\x87\xfd~\xf3\x9f\x99s\x11\
J)\x82$\x84x\x05x\x9e\xc7kH)\xf5w\xd6\
(' \xb8C\xbb\x01h\x97R\xbe\xc6cdY\xd6\
\x07\x1a\xf6\xbb@\xb7\x069\xff\x14\x00&\xfc\xb7\xed\xf5\
\xe2`]DDn\xce\x89\x8a+W\xaeP]S\x8d\
@\x00\xa0P\x08e(A)f\xd3i^\xa9\x17/\
\xbc\xb4Nl;\xf1\x1f\xb9G\x83|[CL<M\
\x07\xf6\xff`\x8b\xdd,%\xf8J2<<Lee\
%+\xc9u]\x1e\xc0n\xa9\xb0\x22\x1b\xa2*r?\
\xa7\xea\x81\xb5\x03\x08-\x05H\xa1\x0d\xf4]\xbcH.\
\x97\xc3/\x16QJ\x91\xcf\xe7Y\x5c\x5c\xa4P(P\
\xd4c\xb5\xb5\xb5\x94\x01X\x80\xf8\x82\xf6\x80\x01\x006\
D\x05\x1f\x0f\xbcK>;\x8f\x85D\x952\xe2\xb6\xc4\
\xb6\x04!!p>Sl\x8c;\x80D*\x04\xf0\x9c\
\x10\x02\xe0\xcb@\x05P\x0f4`\xc4Hi\x9f$\x02\
\x01N\x9c8!\x00\x81\x05\xd2\x87\x96\x96g\x09em\
\x14\xe5(\xa5\xb4A\x08XW\x19%\xe2\xd8DB\x16\
\xc3\x13s\x5c\xbc=A\xf7X\x8e\x5c$\xbe\xa9\xbd}\
\xf7\xef-\xcbZ\xdc\xb1cGYUU\x95\xd3\xd8\xd8\
\x18~\xe0\x86\x86\x86\xd0\xa5K\x97\xdc\xae\xae\xae\x08\xf0\
\xd6\xaa\x1d\x00\x13DU,\xc2s\xd51\xf2\x9eO\xa1\
(\x91Ja\x09A\xd8\xb1\x88\x86l\xe6r\x05\x12\xa2\
\x8e?\x9f\xff+\x0dM\x1b\x01\x22\xc0f\x96\x84\xef\xfb\
x\x9eGuu\xb5\x9ePK\xf4\xea\xd5\xab\x87\x84\x10\
(\xa5\xdeZ\x11\xc0\xb2A\x00\xb6-\x90\xda\xb6\x148\
\x08\xa4\x12X\xc2\x8c\x1b\x8fL\xb9\xec{\xf5;\xd47\
6\x11|/\xc1\x84g2\x19\xca\xcb\xcb\xcdf>v\
\xec\xd8&\xbd\x7f\x0e.A,\x01\xd0\xd9\xd9\xa9\x0e\x1d\
:\xa4l!\x08Y\x10\xb6-\x1c\xc7\xc6BP\xb4\xcd\
\x1a\x1b\x00\xc7\xb2\x888\x96\xae\x02`Yx\x10\xc0\xdc\
\xdc\x1c555\x06 \x1a\x8dr\xe4\xc8\x91\xcd\xc0\x03\
\x88\x1b\x1a\xa2\xc7b\xb9\xb0mt0f\x8d\xcb#6\
\xb1\xa8\xa3\xc7,2\x8b\x1e\x93\x99\x1cc\xa9y\xee\xcc\
.\xe8\xdfEr\xf9<\xab\xc8,A6\x9b5\xa7f\
\xe9\xffm\x0e\x1c8\xb0\x1e\xe8\x00X\x06\xa0\xb4t\x16\
\x8e\x0d\xe1\x90\xc0S\x8a\xb1\xa4\xcb\x8d\x8c\x83\xd3\xb2\x97\
\xa6}\xaf\xb3\xb5\xe3\x17\xac\xdb\xfb:\x0d/\xb4s\xfb\
\xce$\xfd\xfd\xfd$\x93I\x94R\xe6\xfa\xf8\xf1\xe3\xe8\
\xba\xac3\xe7\xce\x9d\xe3\xe8\xd1\xa3\x1c>|\x98\xde\xde\
^\x12\x89\x84\x04,\xa1\x15\xdc\x01\xed\xff\xce\xe6\xf8\xe7\
\x94Ok\xc7\xcf\xf8\xe6/\xdf&\xf6\xf57\x99|\xa6\
\x83k\xfe.\xae\xf1-dk\x17\xad{\x7fN^V\
s\xfaog\xd1wM\xee\xdc\x9d\xe2\x1b\xafvr\xfd\
\xfau\x03\xa0gk\xd6?\x16\x8b\x99\xebx<\x8e\xe3\
8%8\x04\xc0#\x00\x96%\x98\xcaA:\xde\xca\xfe\
\xdf\xbdM\xd5\xae\xd7(\x84b\x08\xdbBY\x82lA\
r\x7ff\x91O\xeef\x18\xb8\xear\xfa\x1fad\xd5\
^\xae\x8f\xdcg2\xd7\xc6\x85\x0f\xee\x9b\x00\xed\x87\xa1\
\xcd\xcd\xcd\xb4\xb5\xb5\x19755\xa1\xa1\x14 \x83\x1f\
F\x16\xdcq\x15\xdf\xff\xe9o\xa8l\xd8H\xe2\xec;\
L\x8f^\xc3\x89\x94\xb1\xb5y\x07\x9b[\xb6\xf3Iy\
%c\x09\x97\xcff\xf2\xdc\x9d\xce2\xa1\xed\x88\x0dL\
'\xe7\xd8\xb7+\xca\xfa%\x003{=k\xea\xea\xea\
\x00\xccu*\x952\x00J+\x10\xa0\xb9Zp\xe1\x9d\
c(,\xca\xe6\xc6\xd9\x10\x8fR\x94\x92{\xc3}$\
e\x05\xdb\xda\x7fLM\xdb\xcb|<\x9cf\xd2_\xc0\
\xcdx,\xcck/x \x00\xb5t:B\xa1\x90\x09\
-\xdd\xea\x1f\x8e\x01*\xf8>`\xc1\xc6\xb8\xa0P\x1c\
#\x1c\x8bS\xb7\xa5\x96\x92xv}\x05\xe9\xac\xc7h\
\xff\x9f\x98\xae\xbcL\xcb\xf6\x83\xb8\x0ba\xbc\x82\xa4X\
\x94x\xda!\xc7B-\xaa\x80\xe3i\xa0\x96\xd5\x15\x01\
\x00\xd6\xc7C\x84\xca#\xfc\xbfjc!\x9e\xa9\x0cs\
\xe1\xdf\x83\xec\xd9\xf9\x13\xca\xa3\x0e\xb92G\x03(\x03\
ak\x00\x16K!\xa5\x1c%0*\x15\xa4\x5c\x05@\
X\xa5*\xcc\xf5#\xfapl\x86\xf1Y\x8f\xef\xfd\xfa\
\x8f\xdc\xca\xd4\xe0D\x5c\xa2\x11\x1b\xcf\x93\x14=\x07\xd3\
\x01\xa5\x90R\xf2PjY\x01V\x05\x10\x08L\x0d\x04\
\x18\x9dv\xf9\xd5_\x86\x18\xbd\xb7\x80=\x93g\xd3\xba\
2\xf2y_\xbbh\xea\xce\xaf\xd4p\xf9\xdd\xe0%\x00\
\x9ex\x09L\xb8\x10<\xa2\xd6/U\xf2\x87\x1f>\xcf\
\xf5O3D\x1b\xb7\xb1\xf3\xc5\x97Y\x12\x5cN`\x8e\
\xdbS\x01(\xc0\x12%\x00m\xd4R}\xb1\xb5\x96\xdd\
[\xe2t\xbf\x97\xa5j\xf7W\xf9\xd1\x1bo\x10\xa0\xb5\
\x03\x98\xb57\xd5\xd8\x08\x01\xd2\xcbSpSx\xf33\
\x14\xb3i\x0a\x19\x1f%\xfd\xd5\x82\xd6\x08\xf0\xf0)\xe7\
\xe3\xe73\x14\xe6u\xa8\x0e\xd6\x00\xcb\xf7\x89\x10\xc13\
}\xfa\xd7r\x8c\xb2\x137\x03\xc7\x01\xb2\x1e\xfe\xad\x94\
\xcco\xf7DT\x03\xd8_p\x07\x08\x92\x09\xfd\xd7=\
?\xfd~B\xa6\xcf\xdf\xf6\xef\x02\xeev;\xfc\x92\x06\
\xa8\xe3s\xcau]\x1fpW\xed\x00@2\xab\x0a\x1f\
~*\xd3\xbd\xb7\xfc\xd4\xcdi9\x05\xf4\x03\x97th\
\xbf\x10\xa2\xd3\xb6\xed\xaf}\x9e%XXX\xf0\x07\x06\
\x06\xd2'O\x9e\x9c\x06\xba\x83\x00>\x1aI\xca\xad\xe3\
\xb3*\xd7;\xe2\xa7nL\xcb\xd1R\xe8Y\x1dt\x8b\
\x00=\x09\xc0\xd0\xd0\x90\xdb\xd3\xd3\x93\xd2N\xcf\xce\xce\
\x9e.\xbd\x1d\xdf\x08\x02\xe8\xee\xea)\x00\x8c\x04\x84\x06\
\x85\xaf\x08055U\xd0/\x22\xa9S\xa7N%\xc7\
\xc7\xc7/\x03g\x81~\x1d\xec\xae\xb8\x09K\xdfv\xda\
O&\x85\x01@\x08@aZ\xfc\xde\xe0`\xba\xbb\xbb\
;\xa5\xdf\x8a\xcc$\xd0^\xeds\xcda\xed\x9aw3\
n\x11`p\xf0\xfdt___\xfa\xcc\x993\xa6\xc5\
\xa5\xd0\x8fx\x02\x89\xb5\x9ec!D\x18x\x13\xd8O\
is\x06\xb4\xf8\xb1\xfa\x1f\xbd\xfa*_\xf2\xd8\x15\x9d\
\x00\x00\x00\x00IEND\xaeB`\x82\
\x00\x00\x04\xa3\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\
\x00\x00\x00\x04gAMA\x00\x00\xd6\xd8\xd4OX2\
\x00\x00\x00\x19tEXtSoftware\
\x00Adobe ImageRead\
yq\xc9e<\x00\x00\x045IDATX\xc3\xe5\
\x97\xcd\x8fTE\x14\xc5\x7f\xb7\xea\xd6{\xaf\xdbn\xc7\
\xf9@\x9d\x89FM4\x99D\x8d\x1aH\x98\xc4\x8c\x1f\
\x1b\xfe\x02L\x5c\xf1\x07\x18\x16.M\x5ckX\xc3\x8e\
\xc4\x8d\x1b\x17\xce\x82htA\x5c\x18\x0d\xe2\xc4\xc6\x00\
=`PQ\x19`\x02\xa2\x0e\x0c\x83\xd3\xfd^\xf7\x94\
\x8b\xaa\xee\xf9`\xe6\x0d\x84Q\x16VR\xa9\xce{\xb7\
\xeb\x9e:\xf7\xd4\xa9z\xea\xbd\xe7~6\xe5>\xb7>\
\x80]\xbbv\xbd\x03\xec\xfd\x8f\xf2N5\x1a\x8d\x03\xeb\
\x19\xd8\xbb\xef\xbd\xa3;\x1f\x1fv\x00\x9c<:\xcf\xcc\
\x977X\x9c\xef\xdcS\xa6\xda\xa0\xf2\xdck\x03\xbc\xb8\
g\x10\x80\x8b\x7f\x16|\xf8\xee\x1e\x80\xdb\x00p\xfc\xec\
\x1c\xdf?0\x04x.\xfd\xb8\xc0\xfe\xb7\xceo\xcbr\
\x0f\x1dy\x9a\x0b#\x96\xd3\x9f\x1fd\xfc\xd5}\x9bk\
@E\xb0\x16@xp,#\xcb\xb2m\x0100\x96\
a\x8dP\x1b|\x14#%\x22\x14+\xd8\x18\x91\xd5\x95\
s\xe7\xce\x83*\xb8\x04\xd2\x14\xb2\x0c\xd2,\x8cI\x0a\
I\x12\xdew:\x90\xe7\x90\xb7\xa1\xd5\x82v+\x8em\
(r\xb2\xfa8\xd6\x0a\xe3\xaf\xbcIk\xf1\xfa\xe6\x00\
\xac\x15\xac\x15\x04\xb0F\xd8\xbd{\xe7\x16k\xeb\x86\xae\
\x80Z\xa8V\x81\xeamQ\x8d\xaf\x04\xb5\x82\xf7\xa0\xa6\
\x84\x01g\x055\x82\x08\xa8\x0a\x95,\xc3# \x1e\x08\
\xc0\xf0\x1e/\x02\xde#\x12&\x15|\x88#\xc4!\x1e\
<!^@MX\x18@\xd7J\x89\x06\xac\xa0\xdac\
\x00\x9a3\xbf\x05\x8aS\x07i\x02\x95\x04\xb24\xf6\x04\
\x12\x07N\xa1\xe8@^@+\x8f\xbd\x05K9\xb4s\
\xc8\x0bT\x87q=\x00*\xe5%p1@\xd509\
\xf9\xd2\xd6\x0a\xf3>\xd0\xaf\x16\xaa\x1b\x8b\xf6\xd8'a\
a\xbd\x1c%% \x00\xf0\x81\x8d4M\xa3:\xc3\xb3\
\x98\x11\x89l\x07\xdac\x09V\x98_)F\xfca\xcd\
r\x7fa\x1d-\xd1\x80:\x09TI\x18O4/\xe0\
\x9d\x85\xc4!\x89\xc3g\x09\x92i\xd8\x11\x89\xe2\x13\x87\
X\x8b\xefv\x91\xbc\x80\xbc\x03\xed\x02\xdfj#\xed\x02\
\xf2\x02\x9fwP\x1dE\xd5 x:\xebTx\x9b\x06\
\x9c3x\x0f\x03\x8f$\xbc\xfe\xf2\xf3wh\xe86h\
\xa4\xbe\xf1\xeb\xc6\xfc\xdf\xb1\x04R^\x82DM_\x84\
\x8f\x0d\xa58\xe7\xb6\xc5\x88\x9e\x18K\xb9v\xb3\x03\x08\
\x9dR\x11\xaa\x90\xb8P\xefZ\xc50}\xb1\xcb@\xc5\
\xb0\x0e\xf4&\xadW\xf9U.\xe1\xe1\xc6\xd22\xf5\xcc\
p}\xc9\x84-\xe9J\x19\x10\x9c\x1a\xc0s\xe5f\x97\
+7\xbb\xacQW?\xd7\xaad~\xc5'\xa2)\xac\
\x05\x15\xc3\x9c\x0b\xb5w\xa6l\x17\xa8\xc1\xa9 \xc8\x1a\
5\xaf\x9b5\x1a\x8fY1\x9e\xfe{\xe9\xef\x14\x00\xf1\
\x82\xef\x9bX0+WV\x02U!\xd1\x90\xfc\xe7S\
\xdf\xf2\xeb\x99\x13,-\xde\xb8\xa7\xfaWj\x03<\xf5\
\xecN\x9eya\x02\x0f\xa83[1\x10\x03|\x87\xf7\
\xf7\xbf\xc1\xc2\xc2\x02\xb7n\xdd\xa2(\x0aD\x04k-\
\xd6ZT\x15U\xc59\x87\xaab\xad\xc5\x98\xf0\xdf\xe5\
\xe5e\xf2<\xef\xf7#\xcd\xf9\xb8\xf2-\x18pVP\
\x17\x18\xdc1:\xb6rO8~\x9c\xe9\xe9i\x8c1\
x\xef\x99\x98\x98`rr\xf2\x8eY\xd81:\xd6\xdf\
\x86\xae\xd4\x09Up6\xac\xa2V\xaf\xf7k933\
\xc3\xd0\xd0\x10\xd6Z\xbc\xf74\x9b\xcd\xbb\x02P\xab\xd7\
p\xd1\x88\xb4\xd4\x88\x14\x9c\x0b'\x5c\xa0*\x00\xa8V\
\xabdY\xd6\xa7\xb87\xdeis\x1a\xa9\x17AK\xad\
8\x1e\xc7\xbd#\xb4\xd7\x8c1\x88D\xdf\x8f:\xb8\xab\
\x9b\xaf5\xa8\x0d\xf3\xf6\x18.=\x8e\x83)m\xe3\xd5\
\xdb\x12\xa9\xf7\xe5Vl\xad\xf4\x91\x0e\x8e\x0c\xc3\xf2\xef\
\xdb\x02\xe0\xa1\x91a\xd4\xc2\xb5+\x97Y\x9c\xbf\xbe\x05\
\x036\xf8\xc0`\xad\x02\x0b\xdb\xc3\xc0P\xad\xc2\xec\xc5\
K\x9c\xfd\xee\x1b\xce\x9f\x9c\x9e\x03\xa66\x04`$^\
J\x05\x12\x0b\xed\x91'\xa9=\x0co\x1f8\xc8f\xc7\
\x81':\xf1*\xe75\x1e2\x81\x14(\xbap\xf9\xea\
U\xce4\x8e\xd1\xfc\xfa\x8b\xb9\xd9\x1fN\x1d\x02\x0eo\
\x08\xe0\xb3\x8f>\xe0\xa7\xd3'W\x99\xe9\xda\xa3\x86U\
\xe6\xbb\x1e\x04\x1b<_\x1do|w\xee\x8f\xd9_\x0e\
\x01\x87\x1b\x8d\xc6_\x1b\x01\x98\x9a\xfe\xf4\xe3\x7f\xf5s\
l}\xf25\x00\xe2\xb7\xda\x81\xff\xdd\xd7\xf1?M\xf0\
K\xb9\xe8F\x89\xaf\x00\x00\x00\x00IEND\xaeB\
`\x82\
"
qt_resource_name = b"\
\x00\x06\
\x07\x03}\xc3\
\x00i\
\x00m\x00a\x00g\x00e\x00s\
\x00\x08\
\x06\xc1Y\x87\
\x00o\
\x00p\x00e\x00n\x00.\x00p\x00n\x00g\
\x00\x07\
\x04\xcaW\xa7\
\x00n\
\x00e\x00w\x00.\x00p\x00n\x00g\
\x00\x08\
\x06|Z\x07\
\x00c\
\x00o\x00p\x00y\x00.\x00p\x00n\x00g\
\x00\x07\
\x0a\xc7W\x87\
\x00c\
\x00u\x00t\x00.\x00p\x00n\x00g\
\x00\x09\
\x0a\xa8\xbaG\
\x00p\
\x00a\x00s\x00t\x00e\x00.\x00p\x00n\x00g\
\x00\x08\
\x08\xc8Xg\
\x00s\
\x00a\x00v\x00e\x00.\x00p\x00n\x00g\
"
qt_resource_struct = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x06\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00(\x00\x00\x00\x00\x00\x01\x00\x00\x08\x1d\
\x00\x00\x01yCj\xf6\xed\
\x00\x00\x00<\x00\x00\x00\x00\x00\x01\x00\x00\x0bu\
\x00\x00\x01yCj\xf6\xec\
\x00\x00\x00\x12\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01yCj\xf6\xee\
\x00\x00\x00~\x00\x00\x00\x00\x00\x01\x00\x00\x1cS\
\x00\x00\x01yCj\xf6\xf0\
\x00\x00\x00f\x00\x00\x00\x00\x00\x01\x00\x00\x15\xe2\
\x00\x00\x01yCj\xf6\xee\
\x00\x00\x00R\x00\x00\x00\x00\x00\x01\x00\x00\x10\xb3\
\x00\x00\x01yCj\xf6\xed\
"
def qInitResources():
QtCore.qRegisterResourceData(0x03, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x03, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| 44.648604 | 96 | 0.710603 |
from PySide6 import QtCore
qt_resource_data = b"\
\x00\x00\x08\x19\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\
\x00\x00\x00\x04gAMA\x00\x00\xd6\xd8\xd4OX2\
\x00\x00\x00\x19tEXtSoftware\
\x00Adobe ImageRead\
yq\xc9e<\x00\x00\x07\xabIDATX\xc3\xad\
W[P\x93g\x1a\xf6\xca\xce\xec\xcc\xf6b/\xbc\xd9\
\xe9\xce\xecn\xbd\xda\xd9\x9b\xb5\xce\xba;{\xb0\xad\xcc\
z\xb1\xce\xce:\xb3vTpu\xdb\xe2\x81\xd6\xb6T\
\x04\xbb\xa5 m\xc1\x82\x06\x08\x07QB\x80\x80\x80\x02\
!\x81\x10\x92@H\x10s$!gr\x80\x04B \
\x9c\x09G\xb5Tx\xf6\xfb~\x13\x160X\x8b}g\
\x9e\xf9/\x92\xfc\xcf\xfb>\xcf\xfb\xbe\xdf\x97]\x00v\
\xfd\x98 \xf1\x0b\x82\x14\x02\x03\xc1u\x82\x03\xcf\xfd\xfe\
\x8fH\xbc\x9b \xe1W\xaf\xef\xb5*\x8c\xd6e\xdb\x02\
`\x19\x1e[\x09'\xf13\xfa\x19\x81\x22\xfc\xdc>v\
H~\x8a\xa0\xb9\xb6Y\x1c2\xcf\xadB9\xfe\x1dD\
\xf6Q\xd8\xc7\xe6\xe8\x87\x86={\xf6XSR\xae,\
\xca::\x10N\xe2\xe5I\xc3\xc41\x04\xb7>I\xf9\
,`\x9b]YSM\x03M\xb6\x114\xeb\xfb 1\
y`\x19\x9d\xc5\xbb\xef\xbe?\xc5\xab\xbe\x83\xf1\x89)\
LO\xcf\xae\x92\xef\xd7\xbct\x02\x11\x9f\x0f\xbe\x1d\xe3\
\xb2\x04CO\xb43@\x8b{\x06\xcd=.4\xeb\xec\
\xa8W\xf6 \x87S\x852^5C\xbc\xb0\xf4\x90\x81\
\xc1`\x5c&\xbfK|\xe1\x04H\x1c$8A\xfd\xdd\
\xeas'\xf1\xb9'\x04H\x87\x97\xc1\xd7\xbb \x22U\
7\xdc7\xa2\xb8N\x88,V>\xccV\xdb:q\x04\
,\x16k,\xfc\xce\xe7'\x10\x916\x93\x95?F}\
\xa5\xfe\x12\xc4o\xf4Y1\xb6\x02~\xef Z{\x9c\
\xe0?0\xa1L(CF\x0e\x1b\xb2\x0e\xf9&\xd2\xf9\
\xc5e\xcc-,!4\xbf\x88\xbd{\xf7Z\xc9;~\
\xbam\x02$~C\x90F=5\x13iu\xb3\x80\xd2\
?\x0f\xcb\xc4\xe2\x9aP\xa1Z\xb4l\xf1Y\xa0\xb6\xa0\
\xa6]\x8d/\xb2sq\xb7\x9e\xff\x0c1%\x9d\x09\xcd\
cbj\x06\x83C\x81'\xe4\xdd\xbc-\xd3\xb0;\x92\
\x033&\xd4S\xb5\xd3\xfbXO\x88\xc5\x03!\x88,\
CP\xbaF\xd0\xed\x09B\xe5\x9bB\x9bs\xfc\xa9\xcf\
Z\x1b\xee*t\xc8\xbc\xc9E\x09\xa7l\x93\xcf\x9b\x88\
'\xa7\x11\x18\x1d\xc3\x80o\x08\xa2\xd6\xd6%\xc2Q\xdb\
(\x12\x87\xc6\x1f\xaf\x82/b\x94M\x89$\x90\x22\xea\
R-\x9aB\xab\xe8\x18y\x04\xa1\xc5\xcf\x10St\xf6\
\x0d\xa3\xd3\xe1\x87\xd4<\x80\x16\xbd\x03\x0d]\x06\x14\xd5\
\x0a\x90\x91\x95\x0d/y\xf1\xc6\xaa\xa9\xd4\xb3s\x0bL\
\xc5\x94\xd8\xdd\xef\x85\xc9b\x05\xb7\xbc\x12\xa5\xe5\x95K\
\x13\xf3\xcb\xab#\x0f\x017\xd9\x11\xe6\xd9\x15\x84\x97\x15\
\x13\x06\xcb<\xd0h\xf2\xa3\xdd\xee_'\x96;\x86 \
\xb3x\xd7}\xe6\x08\xa4\xf8<3\x1b*\x8d6\xaa\xdc\
S3!\x8c\x8e\x8d3\x15\xd3&\xe47\x09\xf1\xc1\xc5\
\x8fQs\xaf\x01\xbee`\xfc\x11\xa0#\x13#\xf2\xce\
\xa1\xbe]\xb9\xb8Q\x01\x83\x81ttM\xa7\x1e\x0ag\
\x80\xa9\xb8\xdd\xea\x83\xd8\xe8B\x93\xca\xcc\xf8|\xe5\xcb\
,\x88\xda$Q\x89\xa7g\xe7\x18\x1b\x86\x86G`w\
8I\x82:$|\xf8!\xae\xb3\x0b\xe1\x99\x5c\x80o\
\x09\xd0\x90\xde\xe1\x0f,\x81\xab\x1f\xc4}\xef\x04\xdd\x07\
\x1da\xeb\xff\x9f\xc0\x1d\xb9\x16\x1d\xf6!H\xcc\xfdO\
}\xee\xd4\x22\x9dU\x84\xaa\x9a\xbaM>G\xe4\x8e\xf8\
<<\x12\x84\xd3\xdd\x0f\xbd\xc1\x88\xc2\xe2b\x9c~/\
\x1e=\x03\x01\xf4/\x02\x83\x84\xbc\xc5\xff-\xee:C\
(Q\x91\xf7\xf6\x05\xf1N\xdc\xbf}\x843i\xe3 \
\x18\xf43\xab\xe0\xc9Th58\xd1\xd8\xdd\x0b\x9eX\
\x89\xac\x5c\xf63>G\xaa\x9e\x9c\x9ee\xe4\xee\xf7\x0e\
\xa2\xd7lAC\x03\x1f'b\xe3 \xe9\xd6\xc0E\xcf\
\x01R\x90$\xb8\x86\xb2\x9e\x00n\xb4\xdbP\xd1\x1bD\
\x85\xce\x8bJ~\x0bm\xbe\x9b['\xd1\xa0\x99\xf8\x16\
e\x22\x05\xee)\xf4(\x13\xc8\x90x5\x0b\x1a\xad>\
\xaa\xdcc\x13\x93\xf0\x0d\x0d\xc3f\xef\x83\xb4]\x8e\xc4\
K\x97\x90\xc3\xca\xc3\xd4c\xc0NzI1N\xfa\x89\
\x94\x7f[;\x84|\x85\x13%j\x1fJ\xd5\x03\xe8\xf2\
0\xa3(\x22\xf8\xf93\x09t\x8f.\xa1\xa8\xbe\x15\xa5\
|\x09\xb2J*\xf0\xcf\xe3qQ\xe5\xf6\x07F\xd1\xe7\
\xf2@\xab7 \xfdj\x06\x92\xbfH\x83\xcd7\x02'\
\xa9\xda@\x1aL\xe0{\x88R\x9d\x1fE\xdd\xfd\x0cq\
A\x97\x1b\xc5\xdd\x1e\x88\x9cA\xfc\xf9\xcd\xb7]\x84\xeb\
l\xb4C\xd0(\xf7N#\xa7\xfc\x1e\xb2K\xab\xf1Q\
\xeaWH\xfeo\xea\xfaXQ\xb9G\x82\xe3\xf0\x0c\xf8\
`4\x99Q\xc9\xab\xc2\xfbg\xcfA\xfe@\x03?\xe9\
n\xb2\x8d\x19\xb9oi\x06\x19\xd2\x9b*/r\xe5\x0e\
\xe4u\xf6\xa1\xf0\xbe\x1b\x1c\x95\x1b\xf9\x9c\xca)\xc2S\
\xb8\xdd)\xdc+v\x04\x90Q\xc8\xc5\x95ky8\x11\
\x9f\x80\x9b\xb7n3c\x15\x91\xdbjs@\x22m\xc7\
\x85\x84\x0fPt\xbb\x0c\xf3+\x80\x9f4X\xf7$ \
\x1c|\x84J\xd3\x188\xfaa\x86\x9cV\xfdU\xb3\x1e\
\xac\x0e;\xb8:\x1f\xd9!\x1ez/\xe0\x13\xbc\xba]\
\x02&\xbe\xc1\x83\x94o\xd88\x9f\x9c\x8a\x03\x7f=\x04\
c\xaf\x99\xe9n*\xb7F\xd7\x83\xa4\xcb\xc9H\xff:\
\x8b\x8c\xd5<S\xb5q\xf6\xa9\xdc5\xf6i\x5c\x97Y\
\x19\xd9\xbfn!\xa7\xa0\xd4\x82t\xbe\x1aW\x9b4`\
\xc9\xcc\x10\xbb\x82\xf8\xe5\xaf_\xa7g\xc0;\xe1u\x1f\
5\xcc5\xddf|\x94\x96\x85\xb8s\x17\xf1\x97C1\
L\xd5t\x99\xf0\xaa\xaaq\xfa\xf4\x19h\xcc\x0e\x8c\x92\
-6\x14\x1e\xabZ\xc7\x0cx\xe6qp\x0d#L\xa3\
e\x8a\x0c\x8c\xec\xb4\xfa\x9c\xb6^\x94t9\xd0f\xf7\
\xaf\x1e=\x11KG.o\xc3y\x135,\x5c\x99\x1a\
\xf1\x97>\xc7\xd1\xd83\xf881\x09\x86^\x13\x1a\x9b\
\x04\xf8\xdd\x1b\xfbQO\xd4\xf1\x90\x99\xee\x9a\x00\xaa\xad\
\x93`+]\x0c9\xf5\xbc\xf0\xbeg\xbd\xea\xcc\x16=\
JU\x1e\x08m\x01\x94\xd4\xf1C\xe1eS@\xf0\xca\
\xf7%`+nj\xc7\xa9\x84D\xc4\x1c9\x8a\xdc|\
6ZZ\xc58\x14\x13\x83/95\xc8\x14j\x98\xe6\
\xa2\xd5\xd2'\xf5\x9azL\x13\xa1Id\xb7\x99\x90\xdb\
nF\xb9\xda\x8d\x06\xa5v9,9=\xf9N\x13\xec\
\xd9r\xd4G\x0d;\xabF\x88c\xff9\x8f\xdf\xee\xfb\
=\x1a\xf9\x02\x9c\xbf\x90\x80\x93\xf1\x17p\xa3\xad\x07\x19\
\xc4OJ\x14\xe9n\xbaX\xa8\xef,\xfa\x94\x98P(\
\xb7@\xe9\x0e<\xf9W\xec)*w-\xc1g\x04\xfb\
\xb6\xb9\xe4D\x8d\xbe\xcc\xb2Z\xfc\xe3\xe4\x19\x1c<\xf4\
7\xb0r\xf3\xb0\xef\xc0\x1fP \xd1!\x89'e*\
\xa6K\x85>\xbf!\xd5F\xe4.\x90[!\xb0\x0c\xae\
\xe5\xdc\xe2\xd2\x11\x13\x13\xe4\x87o<\xaf<\xe7\x96\x15\
5\x9ciE\xe5\xf8\xfb\xb1X\x1c?\x19\x877\xf6\xef\
\xc7\x8d:\x11\x92\xab\xa4\x0c!\xedp\xea5U!\x8b\
4[\xc9\x037*4n\xd4I:\x17\xc3rs\x08\
\x8em\x95\xfb\x87$\xe0Jesp\xe4\xf8)\x1c>\
|\x98\x8cc.2\x05*\x5c\x22\xd5\xd3]~M\xdc\
\x0b6\xe9tv\xa7\x1dw\x8c\xe4\x88\xb6\xf9\x9e\x84\xb7\
\x1a\x95\xfb\x22\xbdI\xfd\x80\x0bm\xf4\x042JxL\
\x0f\x9cKI\xc3\xb5\xa6.|\xc2me6Y\xf1\x83\
\x01\x5c\x97\x9a\xc1Q{ \xf3\x04\xd7\xce%&\x056\
\xc8\xfd\xc7\x9d\xc8\x1d\xd5\x82\xdc\x1a\x01\xce^NE\x81\
X\x85x\xf6]\x5c\xa9U\x90\xaa\xfb\xc0\x96\xdbP\xad\
u\xe3\xaeTA/\x10\xca\x0dr\xbf\xba\xd3j\xa3\x05\
\xb7\xa2Q\xf8\x1d\xafC\x8dO\xb9-\x88\xcb\xe6\xe1\x9a\
H\x8f\xaa\x1e/\x9a5\xe6\xc7\x7fz\xf3-Wx\xac\
\xa8\xdc\xaf\xbd\xac\xdc\xd1\xe2\x08\xdd\x05\x5cu\x1f\xde\xcb\
\xafE\xb9v\x002g`\xf5\xc2\xa7\x97\xa9\xdc\xf7\x08\
\xd2\xa9\xdc;\xf8\x03\xf3\xc2\xf1\x13\x82\xca\x1c\xee\x9dP\
\x0b9\x94\xb8\x0d\xc2\xc8\x16\xa3\x17\x87\xc3/\x22\xf7\x0e\
\xff\xdam\x8a\xdda\x99\xd5\x1b\xb6\xd8k\xbb^2\xbe\
/\x89\xff\x01f\xb9_\xfc\x11\x80=\xcf\x00\x00\x00\x00\
IEND\xaeB`\x82\
\x00\x00\x03T\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\
\x00\x00\x00\x04gAMA\x00\x00\xd6\xd8\xd4OX2\
\x00\x00\x00\x19tEXtSoftware\
\x00Adobe ImageRead\
yq\xc9e<\x00\x00\x02\xe6IDATX\xc3\xd5\
\x97\xcdN\x13a\x14\x86\xeb5\x94\x95{q\xe1\xd2\xc4\
\xe0\x05\xb8\xe2\x0e\x5c\xb8\xf4\x02\x5c\xb10\xea\x05\x18\x96\
&bX\xb8\xb0\x91X \xd1\x9d\xbf\x89\xa4\x14\xb1R\
\xa4HE\x94\xfe\xd0\x02C\xff\xa6\x9d\x19\xa6e\x80\xe3\
y{\xfa\x85QJ\x82\xc9!\x86I\xde\x9c3\xa7\xf3\
\xcd\xfb\x9c\xf3M\x9bN\x84\x88\x22\xffS\x91s\x01\xc0\
\xc7\xd5\x90n\xff\xa5\xfb\xac\xc7==d\x0d\xa9\x02\xf0\
12<<\xbcj4::\xba\x19V<\x1e\xaf&\
\x93\xc9V:\x9dv\x13\x89Dk`` \xcdkn\
h\x02\xa48\xd2\xe1\xe1q\x99\xba\xef\xb7\xc9\xb2,\xda\
\xdf\xdf'\x86\xf1x\xcd\x18\xeb\x8a\x1a@?\xf3\xb0\x1c\
\xc7\xa5Lf\xb9\x0b\x14\x04\x01\xc5b\xb1:\xaf{p\
\x1a\x88S\x01\x1c\x1c\x10ww\xb2l\xdb\xa1\xf9\xf9\xcf\
d\x0e\xd7u\xe9\xf9\xc4D\x17B\x05\x00&{\xc1\xc9\
\xaa7\x1cJ\xce\xcdS\xf8p]\x0f\x8b\x17T\x00\x82\
\x10@gO\x14\xce\xed\xa6G\x1fgf\xe9\xf5\x9b\xb7\
\x14\x9f\x9c\xa4\xa9\xa9iz\xf7\xfe\x03E\xa3\xd1e^\
\x7fA\x05\xc0\xef\x10\xed\xb6%\x86\x85\x9a\xe3\x05\x94]\
\xcd\xd1\xe4\xf4+z2\xfe\x94\x9e\xc5^\xd0Lb\x0e\
\x8b\x17U\x00\xda\x81\x18\xf5\x13 <\xff\x90j\xcd6\
\x157\xab\x94/nS\x89c\x8d\xb7\x85\xd7~Q\x01\
\xf0y\xcc\xcd]\x1e\xb5\xc7{\xdb\xee\x9f;\xbe\xe4\x88\
]\xb8\xbd\xee\xe2\x94\xca3\xe0u\xe4\xc6uWb\xd8\
\x109\xea\xe63D\xd4\x01\xa7\x06\xe0\xf4:\xad9\x22\
\x98\x98hr\x80\x98kPS\x9d\x00\x00*-\xb91\
\xe2NS\x8c\x10\x0d\x04\xf2m\xfb(\xb6|E\x00\x9b\
;\xdbj\xfci\x8e<l\x88\x1a\xae9\x13\x80:\x8f\
\xb7T#*\xd7\xc5\x04\x06\x06\x005(\x9c\x17\xab\xbc\
%\xbb\xca\x13\xc0Ma\x0e\x15*rn\xcc~Z\x02\
hj\xdd\xad\xf1\x94'\x00S\xdc\x1cqm[@`\
\x9a\xab\x1cu\x9e\xeb\x81A\x15G\x11\xc0j\x891\x0c\
\xd6w\x04 \x0cd&b\xb6iu\x8b\xa8\xaa\x09P\
\xb6\xc5\xbc\xd0\x03\xf8\xbe)c\x87)`\x0c\x18\x84\x1c\
\x00[ME\x00t\x03S\x98\xad\x94\xc5\x1c\xe7F\xe6\
\x1c\x00\xc8q]\xa9\xa1\x08\x80\xfd\xfcV\x12s3\x01\
\x085\x18B\xe8\xda|\x8e)\xa8N\x00[\x00\x03\xc8\
\x98g6\x04\x002\xe6\x85\xde\xf8\x17\x0b\xfc,\xd8\x8a\
\x00\x18g:O\xb4T\x14#\x98\x02\x00\x02\x0c>\xfb\
\xc5S(\xf0C\xb8fI\xf7k\xf9R\x87\xd7\xbeT\
\x01\xc8U\x8f\xbaN\xadK\x0e\x90\xaf\x85\xde\xb7\xc2\x92\
=O\xa6\xb3\xde\xa3\xb1q\xeb\xda\xd0\xf5\x15\x98\xb3n\
\xa9\x00l4\xa4k\x18\xff\xe0\x11\x7fZ\x17S\xd4\x13\
\x0bYo\xe4\xee\xbd\xe2\xa5\xc1\xcbK|m\x8cu\x87\
5\xa8\xfa\xb7\x1c\xdde\xd9<\x8f\x1f\x19\xfe\x9e\xcf\x1e\
7\xbd\xc9\xbax&oF\x00h\xf2\xff\x81\x99\x94\x9e\
\xe9?\xbf\x19\x01B\xd3\xf4\xfc\xbd\x9c\x9e\xa5~\x03Q\
l%\xa1\x92\x95\x0aw\x00\x00\x00\x00IEND\xae\
B`\x82\
\x00\x00\x05:\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\
\x00\x00\x00\x04gAMA\x00\x00\xd6\xd8\xd4OX2\
\x00\x00\x00\x19tEXtSoftware\
\x00Adobe ImageRead\
yq\xc9e<\x00\x00\x04\xccIDATX\xc3\xb5\
\x97]L[e\x1c\xc6wo\xbc\xd9\xe5\x12I q\
\xd7&\xe3N\x13\xb8p\xd1\x85D\xbdP\xe3\x10\x18\xe5\
+.&J\x04'\x86\xaa\x8b\x99\xe0\xd0\xa2l\x19\x86\
9\x17\xdc\x1a\x16\x98\x80@l\xa6C\xca +\x83\x1e\
(\xcc\xda\xd1\x96\xd2\xd2J{\xfa\x01\xa5\xd0\xef\x16\x1e\
\xdf\xff\xdb\x1d\xc7\xcc\x04*\x87\x93<9o!\x9c\xe7\
\xf7<\xefG\x0f\x87\x00\x1c\xcaF\xcf\xbd\xfa\xe9\xbbL\
Z&a\x0fj`\xca\xd9\xe9y\xd9\x9a?]P\xf2\
\xa5\xc1\xe9\x8f\xa7W\xc3@0\x02\x84\xa2\x19\xad\xc72\
\x8a'\x81X\x22s\xbfyk\xdaK\x10r\x02\x1c{\
\xe7\xac\xda\x1c\xd8\xc8\x98\x12@\x84\x99\x85\xe3\x19\x911\
)\x1aKa%\x94D8\x9aBs\x87\xc6\xbe\x13\xc4\
\xff\x02\x90\x12\x93y$\xf1\xc8X\x92\xcf\x1f\x84]\x8c\
\xc2\xe5\x09\x22\x12K\xa3\xf4\xc3\xefM4uY\x01\xb0\
\xeb\xd86\xd5\x90\x9e:\xfc\xcc\xb9\xe7_.\x11?V\
\x9eEEU\x0d*\x99\xde\xaf\xad\xc3\x9d\xb1\x89\xc7\x00\
\xac\xb6%\xfc\xb9\xe8\x87k\x15X\xf6\x04\x10\x08\xc6\xd2\
\xaf\x9c\xbep\x9fA\x1c\xd9\x15\x80]\x87\x99\x1a\x8a\x8a\
\x8a\xcc\x92Z[[\xdd\xa4\xafU\xad\xfe\xafT\xdf\xa6\
\x06\x06\x06195\x85\xd9\xb99\xe8&&PPP\
\x80!\xcdo|\xdeI\xa6\xf9\x05\xcc\x98\x5c\x1c\xc0\xe1\
OA\xf4\x85\xf0C\xaf\xce\xcd\x00j\xf6\x02PCf\
\xd8\xe5\x8a\xc7\xe3\xf0z\xbdH\xa7\xd3\x98\x9c\x9cDe\
e5fg\x8d\xbc\x81\x07f\x1bt\xd3\x16\x0e@2\
-x\xf0\xdd\x8dQ\x8f\xac\x00\xe1p\x18F\xa3\x91\x8f\
S\xa9\x14~\xea\xedE\xe3'\x9fa\x86A8\x96\xdc\
Pwu\xe3LC#\xce5\x9d\xc7\xed\x91q\x5c\xbc\
>,/\xc0\xc6\xc6\x06\xf4z\xfdc@}}\xfdP\
2\x88\xd0F\x1cf\x9b\x0b\x82\xc1\x88\xa9\x19\x13\xac\x0e\
\x11\x97\xbadn\x80\x00\xa6\xd8:\xd8~E\x22\x11\x94\
+*0\xae\x13@\xe7\x04mW\xda\xaa4\xbe|S\
\xe65@f:\x9d\x0e\xc3\xc3\xc3\xe8e\xf5\xf7\xf7\xf7\
C\xab\xd5\xa2\xaa\xba\x06cw\xf5\x90\x0e*w\x90\xed\
\x04\xb6\x0e\xda\xbbe\x06\xa0y\xb7\xdb\xed\x18\x1a\x1aB\
gg'zzz8PIi\x19ni\xf5\x10\xd7\
\x00o\x08\xb0\xf9\x00g\x00\xb8\xd0%3\xc0\xd6\xd6\x16\
\xdf\x09\x81@\x00\xa2(\xc2\xef\xf7cmm\x0d\xa7\x14\
\x95\xd0\xfc\xae\xe7\xa9\xc9|\xc1\x0b\x98=@\x9b\xdc\x00\
\xdbA677\xf9v\xa4V\x14\x15\xd5\xe8\xfbU\xe0\
\xa9\x1d\x81G\x00\xe7;\x0f\x00\x80\xcc%\x80$3O\
$\x12(+\xaf\xe2\x00\x7f\xb8\x00\x8b\x98\x01\xa06Z\
\xd5\x070\x05\xff\x98'\x93<=MI\xc9\xa9J\x0e\
\xa0\xb7\xb3\x03\x89=\xc5\xf8\x170\xb1\x00|q\xf5\x00\
\x00\xa4\xea\xc9\x98\x14\x8b\xc5P\xa6\xa8\x82zH\xc0\x98\
\x19\xb8k\x05\xe6\x9c\x99\xfb\xe7Wd\x04\x90\xd2Sj\
\x02\x88F\xa3\xdc<\x14\x0a\xa1\xb8\xb4\x02\xd7\x06\x05\xdc\
f\x87\xe4\xa0\x01\x1cd\xc4\x04(;d\x06H=\x9c\
s\x12\x99\xd3\xb9@ \xc5eU\xb8\xd8-\xa0\x7f:\
c\xae}\x90i\xe0\xa3v\x99\x00\xfe]=\xa5&\xad\
\xae\xaer\x88\xb7J*p\xb9W\xc0=\x1b\xb8~\x9e\
\x01\xee\xcc\x03g.\xed\x13@\xaa\x9dD\x8b\x8e\x92\xd3\
qL\xdf\x01+++X__\xe7\x10'Y\x03\xdf\
t\x09PO\x00\xbf\xcce\x1a\xb82\x064\xec\xa7\x01\
\xc9X\xda\xebdNi)9\x1dD\x04@\xf5\xd3\xcf\
\xde|[\x81\x96\xeb\x02O~u\x1c\xb8q\x0f\xf8q\
,\x9e~\xbdNm\xa67\xaa\xac\x00\x9ed,m7\
2%\x00\xd1#\xf2\xe4\x12\xcc\x1b'\x15h\xef\x11\xa0\
\xbcf[\x7fO5\xe2<q\x9a\xbf\x8ei\xf7\xfcJ\
&\x01\x90\xa9$i\xb5SB2\x0f\x06\x83p\xb9\x5c\
\xdc\x90^J\xe8\xb3\xc7\xe3\x81\xdb\xed\xc6\xf1\x13\xaf%\
\x9f}\xa1\x9cL;\x98\x8a\x99\x8e>\xc9xG\x00\x95\
J\xc5\x01\xa4\x15.\xcd7\x19RR:\xf7)\xb5\xc3\
\xe1\xe0\x22\xe3\xc5\xc5E\x0e\xf5\xe2\xf1\x97\x5c\xf4\x1e\xb9\
\x93\xe9\xae\x00---n\xe9`\xa1\xd4\xd2\x97\x0d\x8d\
\x97\x97\x97\xe1\xf3\xf9`\xb3\xd9\xf8}ii\x89C\x10\
\x00\x8d\x0b\x0b\x0b\xcd\xb2\x00\xd0\xa2\x92R\x93\x11\x8d\xe9\
N\xdfxT;5`\xb5Zy\xf5\xd4\x0a\xfd\xce`\
0$\xf2\xf2\xf2\xee\xb3g\x1c\xd9\x17@SS\x93[\
\x9agJO\x22\x13\xaa\x9a\xc6\x16\x8b\x997@\x9fG\
GG#mmm\xde\xfc\xfc|\x13\xfb\xdbA\xa6\xb2\
\xbd\x9a\xff'@ss3\x9f\x02JG\x10T?U\
???\xcf\xeb\xd6h4\x91\xba\xba:\xe7\xc3\xb4]\
L\x1f0\x1d\xcd\xc6xG\x00\xa5R\xe9v:\x9d\xbc\
bJJo>\x94\xb4\xbe\xbe\xde\x99\x93\x93#\x99\x16\
gSuV\x00\x8d\x8d\x8dn\x8b\xc5\x82\x81\x81\x81H\
mm\xad377WV\xd3\xdd\x00\xf8\x7fFL\xc2\
A\x99n\xd7\xdfC9V\x18\x85p\xc8\x04\x00\x00\x00\
\x00IEND\xaeB`\x82\
\x00\x00\x05+\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\
\x00\x00\x00\x04gAMA\x00\x00\xd6\xd8\xd4OX2\
\x00\x00\x00\x19tEXtSoftware\
\x00Adobe ImageRead\
yq\xc9e<\x00\x00\x04\xbdIDATX\xc3\xed\
WkL\x93W\x18>#q\xc92\xe9\x16\x97\xa8T\
e8\x9d\x02\x15\xf6\x03\x872\x93\x01f,[p\xc4\
0\xff`\xa2.\x1a:\x1dN\x03\xba1\x89[\xb3\x80\
\xd9\x0c\x84\x02\x19X\x1c\x14\x8b\x85\xb2\x82\x95^\xe4f\
\x0b\x8e1\xf8\xc3F\xcb-\x81\x15\xdc\xa8\xc2\x1c\x1b\xb7\
ji\x91\xf2\xee\xbc\x87\xaf\x0c\xdc\xb8\x0da\xd9\xb2\x93\
<\xed\x97\xf3}\xfd\xde\xe7\xbc\xef\xf3^J\x00\x80\xfc\
\x93 \xff\x0a\x02t\x09(D\x14\xd9\x14q\x14\x01+\
F\x80\xae\xddd\xdd\xc6f\x22L\xf8\x95\xc4\x8bG\xc8\
\xa1\xd3\xf7\xc8\x8e\x97;82a+A \x85\x9c\xbe\
0H.\xdd\x80\x19@2\xabyM\xf4\xbe\xfbr\x13\
hd\x06\x91\x04^\xa3Q\xf4\x06\xee\x85G\xf5\xd0\xbd\
\x83\xcbM \x9b\x9d\xf6@t/\xbd\x162= \x89\
?H\xa5,\x1b\x01\x8c1y\xc1\xbb\x9d\x88K\xc6\xd7\
\xc6&\x0e\xa0\x10\xb9\xfdB\xfe\xc5+6F\x8c\x12\x5c\
N\x02\x93\xa7\xa7\xa7\x0d\xcc\xd39\xb9\x98c6\x14\x0a\
\xd2\xe4\xa3+A \x8c)\x9e*\xdf7G\xeb\xdc{\
\xb5\xcc\x89\x9e@D\x96T\x83+,\x0b6FH\x08\
\x13\xf5d*{.T\x03\x01\xf8\x037\xbf\xc0\x0e4\
*T\xdfb\x88R\xd5,X\x03t\x1d\x16\x08\x04z\
EU\xf5\xc8\xa0mt\xc2\xd4s\xf7!\xbesQ\x95\
\x90\xae\x8f\xd0\x13\xcf\xe5\x94\x83\x87\xb4\x02\x9e\xcc.\x03\
\xd4\x06\xdd\xaf\x99\xcb\xb0\xaf\xaf\xaf>\xbf\xd2`\xb5\xdb\
\xed\x80\xf8y\xe4>\xc4^\xab\xb4\xb9\x88/\x86\x80'\
\xd3\xc0g\xf9\x8e\x19\xf5`\xd7^3\xbav\xdas\xee\
h\xd8\xc7\xc7G\x9f\xab\xab\xb0\x0e\x0f\x0d\xc1\x10\x87\xb2\
\xf6.\xe7\x967\xf7wsa\xd8\xbd\xe8^\x80/f\
\x9a\xa0\x86\xdf\xa96B\xf7\xf0\x03\xd8\x19\x9f\xd4\xcf\xa5\
\xe7\x1a\x8a\x98-~\xfem\x97T\x1ak__\x1f\xb8\
\xd0\xd1s\x07br\x15VN\xc4\x87\x97\xd4\x8c0\x14\
\xe9\x15\xb7\x1e8\x1c\x0e@\xa4\xd6\x191\x9e\x85\x9b\x05\
~m\xa9%\x1a[\x97\xd9\x0c\xe6.\x0a\xf3$\x14\xdf\
6\x8e{\xbd\x1e\xd1\xcdB\xc8\x09o\xa9\x04<\xd1\xbd\
V\xab\x15\x10w\x7f\x1b\x84\xf3\x92\x5c\xbbR\xa9\x84\xfa\
\xfaz0\x99L\x0cu\xdf5\xc1Q\xb1d\x18\xc9Q\
D>\xb6v\xcc\xb4@O\x93_~\xd3\xd6\xdf\xdf\x0f\
2\x99\x0cD\x22\x11\xa8T*\x90J\xa5\xa0\xd1h \
K[9\xbe\xe9\x95\xe0\x1f\xb8S\xafy,\xf3\x00\x97\
\x8e\x22\x9e\xc7\x86\xe6S)\x19\xf6\x82\x82\x02\xe6\xe2\xa0\
\xa0 \xe0\xf1x`\xb1X@[^\x01\xfb\xcf&\x0c\
-\xa6S\xceg\x94\xcf\x09L\x83\xe2[{\xe6\xc2`\
\x9a\xb2\x14\x14\x0a\x05\x88\xc5b\xc8\xcc\xcc\x84\xa2\xa2\x22\
P\xab\xd5\xd0\xd9\xd9\xc9`\xec\xfe\xc9\xb9\xc9\xdb\xa7u\
.\xb7\xcfK\x80\xae\xb7\xd8)p\x0e\xc0j\x97\xacx\
\x88\xca\x7f\x82\xe2)\x89\x0e>\x97+![\x96\x0f\x07\
c\xe3G\x84\x1f&\xd8\x92rd\x8eo\x1a\xbf\x07\xa3\
\xd1\x08-\xad-\xf0\xcb\xc0 \x1c8\xf1\xbe\x05\xb3b\
\xc1\x04\x5ci\x84\x85\x85\x84F\xdc&\xe72\xac,\xcf\
3\xb5\x13\xec;\xe3\xba\xd33\xaf\x82\xe5\xfez\x89\x06\
\x9e\xde\xfcb\x1b\xf7<\x92\x8d{f\xabO[\xca5\
\xedXCC=444\x80\xa5\xb7\x172\x14\xc5\xc3\
\xf3\xe9\xc0e<\x92\xe5(\x9e6]\xe5\x9c*2x\
}\xf4\x83.Zl\x121\x0c\x1b%\xeaq\xf7/\xcb\
'\xef\x05\x87_\xfe\xd3\xe4D\x0bLh\xf4\xc9>u\
\x95\x1e\x0c\x06\x03\xb4\xb7\xb7\xc3\xd7\xc6\x961\xae\x81\x09\
f\xf16m8h<I::e\xf8b\x81\x83D\
\xbdWC\xb6\x0a^\x9b*\xc3\x94\x5c\xb0B\x0f\xab$\
\xb4\x04\x9fJ\xaa\x9bC71(\xd4O\xf2\x0a\xc7t\
:\x1d\xd4\xd6\xd6\x82\xc9|\xdb\xb9a\x9b\xf7_\xeab\
\xb2\xe5~\x9cu\x1f\x0d\xf3\xb2\xd4N\xf2\xf6\xb1\xeb.\
\xb6\xae\x94\xc3\x90l\x97U\xc1KW\xab\x80\x9cMn\
Z\xd0\x1cI\xbd\xb1\xe7\x88\xb0\xef\xcaW\xc5PZZ\
\x0a\x1d?\xf6L\x04\x06\x87t<\xaa\x0b\xc2\x84F\x8d\
\x07\xc8o\x02\xd9\xf9\xaa~\x9a\xf10F\x8e6 \xaf\
\xbcJxCi\x00\x92(\x1d\x98\xcd\x95\xb3y\xc3}\
=\xbf\xf9Dj\xa6].\x97CSK+D\x1c{\
\xf7\xce\xf4\x14%\xae\xf1\x8a\xf5w\x9c\xf5p\x02\xc2\xd9\
\x0f\x89\xd1\x81\x03O\x8e\xf7\xdc\xd2i\xe7\xf3\xdfu\xfc\
o\x14.6\xd2\xef\xd8\x17iI\xbe,\x9d\xc8\xd3\x96\
;\xa7\x0f1\x8c%\xc6\xdf\x9f\xbaw_q5\xa0A\
l\xb5\x08\x8c\xf9\x94\xf1\xe0\xf03K\x9a|h\x13Z\
\xbd\xce\xa3\xd9kOH\xf7\x0c\x0f\xb0\x0f\xfe\xf3\x87\xc8\
\xf9/\xee\xb9In\x00\xf6{>\xed\xf7\x08\x1e*>\
]\xe5X\xaa\xf1GZ\xf5\xb6Y\x0b\x11\x1d\xb3C\xc9\
\x918\x099\xf9\xa9\x96!\xfa\x5c\x1a\x0d\xcf\xb3\xff\xff\
7\xfcO\x13\xf8\x1d\xe7\x87\x19\xb9D\xc3\x01\xcf\x00\x00\
\x00\x00IEND\xaeB`\x82\
\x00\x00\x06m\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\
\x00\x00\x064IDATx^\xad\x97[lT\xc7\
\x1d\xc6\x7fs\xce\xd9\x8b\xbd\xf6\xfa\x16\xa0\xbe\x00\x0e\xb2\
ic$BJ!\x22\xa1-\x95b\xa5/\xeeKh\
+\x95\xa6U\xa5\xc6`U\xaa\xda\xb4\xaa\xfaV\x09U\
\xca\x03\x94'\xda\x07\x84\x14)\xad\xc4\x8b\xa5R\x83y\
\x08\xc5\x189\x0ei\xd3\x84\x9a\x9bcj\xec\xb2\x04\x1b\
;\xbb\xf6z\x8f\xbd\xbb\xde\xb3g\xa6\xc3h\x85\xe5r\
l\x88\xc9'}\xfa\x9f\x9d\x87\xfd~\xf3\x9f\x99s\x11\
J)\x82$\x84x\x05x\x9e\xc7kH)\xf5w\xd6\
(' \xb8C\xbb\x01h\x97R\xbe\xc6cdY\xd6\
\x07\x1a\xf6\xbb@\xb7\x069\xff\x14\x00&\xfc\xb7\xed\xf5\
\xe2`]DDn\xce\x89\x8a+W\xaeP]S\x8d\
@\x00\xa0P\x08e(A)f\xd3i^\xa9\x17/\
\xbc\xb4Nl;\xf1\x1f\xb9G\x83|[CL<M\
\x07\xf6\xff`\x8b\xdd,%\xf8J2<<Lee\
%+\xc9u]\x1e\xc0n\xa9\xb0\x22\x1b\xa2*r?\
\xa7\xea\x81\xb5\x03\x08-\x05H\xa1\x0d\xf4]\xbcH.\
\x97\xc3/\x16QJ\x91\xcf\xe7Y\x5c\x5c\xa4P(P\
\xd4c\xb5\xb5\xb5\x94\x01X\x80\xf8\x82\xf6\x80\x01\x006\
D\x05\x1f\x0f\xbcK>;\x8f\x85D\x952\xe2\xb6\xc4\
\xb6\x04!!p>Sl\x8c;\x80D*\x04\xf0\x9c\
\x10\x02\xe0\xcb@\x05P\x0f4`\xc4Hi\x9f$\x02\
\x01N\x9c8!\x00\x81\x05\xd2\x87\x96\x96g\x09em\
\x14\xe5(\xa5\xb4A\x08XW\x19%\xe2\xd8DB\x16\
\xc3\x13s\x5c\xbc=A\xf7X\x8e\x5c$\xbe\xa9\xbd}\
\xf7\xef-\xcbZ\xdc\xb1cGYUU\x95\xd3\xd8\xd8\
\x18~\xe0\x86\x86\x86\xd0\xa5K\x97\xdc\xae\xae\xae\x08\xf0\
\xd6\xaa\x1d\x00\x13DU,\xc2s\xd51\xf2\x9eO\xa1\
(\x91Ja\x09A\xd8\xb1\x88\x86l\xe6r\x05\x12\xa2\
\x8e?\x9f\xff+\x0dM\x1b\x01\x22\xc0f\x96\x84\xef\xfb\
x\x9eGuu\xb5\x9ePK\xf4\xea\xd5\xab\x87\x84\x10\
(\xa5\xdeZ\x11\xc0\xb2A\x00\xb6-\x90\xda\xb6\x148\
\x08\xa4\x12X\xc2\x8c\x1b\x8fL\xb9\xec{\xf5;\xd47\
6\x11|/\xc1\x84g2\x19\xca\xcb\xcb\xcdf>v\
\xec\xd8&\xbd\x7f\x0e.A,\x01\xd0\xd9\xd9\xa9\x0e\x1d\
:\xa4l!\x08Y\x10\xb6-\x1c\xc7\xc6BP\xb4\xcd\
\x1a\x1b\x00\xc7\xb2\x888\x96\xae\x02`Yx\x10\xc0\xdc\
\xdc\x1c555\x06 \x1a\x8dr\xe4\xc8\x91\xcd\xc0\x03\
\x88\x1b\x1a\xa2\xc7b\xb9\xb0mt0f\x8d\xcb#6\
\xb1\xa8\xa3\xc7,2\x8b\x1e\x93\x99\x1cc\xa9y\xee\xcc\
.\xe8\xdfEr\xf9<\xab\xc8,A6\x9b5\xa7f\
\xe9\xffm\x0e\x1c8\xb0\x1e\xe8\x00X\x06\xa0\xb4t\x16\
\x8e\x0d\xe1\x90\xc0S\x8a\xb1\xa4\xcb\x8d\x8c\x83\xd3\xb2\x97\
\xa6}\xaf\xb3\xb5\xe3\x17\xac\xdb\xfb:\x0d/\xb4s\xfb\
\xce$\xfd\xfd\xfd$\x93I\x94R\xe6\xfa\xf8\xf1\xe3\xe8\
\xba\xac3\xe7\xce\x9d\xe3\xe8\xd1\xa3\x1c>|\x98\xde\xde\
^\x12\x89\x84\x04,\xa1\x15\xdc\x01\xed\xff\xce\xe6\xf8\xe7\
\x94Ok\xc7\xcf\xf8\xe6/\xdf&\xf6\xf57\x99|\xa6\
\x83k\xfe.\xae\xf1-dk\x17\xad{\x7fN^V\
s\xfaog\xd1wM\xee\xdc\x9d\xe2\x1b\xafvr\xfd\
\xfau\x03\xa0gk\xd6?\x16\x8b\x99\xebx<\x8e\xe3\
8%8\x04\xc0#\x00\x96%\x98\xcaA:\xde\xca\xfe\
\xdf\xbdM\xd5\xae\xd7(\x84b\x08\xdbBY\x82lA\
r\x7ff\x91O\xeef\x18\xb8\xear\xfa\x1fad\xd5\
^\xae\x8f\xdcg2\xd7\xc6\x85\x0f\xee\x9b\x00\xed\x87\xa1\
\xcd\xcd\xcd\xb4\xb5\xb5\x19755\xa1\xa1\x14 \x83\x1f\
F\x16\xdcq\x15\xdf\xff\xe9o\xa8l\xd8H\xe2\xec;\
L\x8f^\xc3\x89\x94\xb1\xb5y\x07\x9b[\xb6\xf3Iy\
%c\x09\x97\xcff\xf2\xdc\x9d\xce2\xa1\xed\x88\x0dL\
'\xe7\xd8\xb7+\xca\xfa%\x003{=k\xea\xea\xea\
\x00\xccu*\x952\x00J+\x10\xa0\xb9Zp\xe1\x9d\
c(,\xca\xe6\xc6\xd9\x10\x8fR\x94\x92{\xc3}$\
e\x05\xdb\xda\x7fLM\xdb\xcb|<\x9cf\xd2_\xc0\
\xcdx,\xcck/x \x00\xb5t:B\xa1\x90\x09\
-\xdd\xea\x1f\x8e\x01*\xf8>`\xc1\xc6\xb8\xa0P\x1c\
#\x1c\x8bS\xb7\xa5\x96\x92xv}\x05\xe9\xac\xc7h\
\xff\x9f\x98\xae\xbcL\xcb\xf6\x83\xb8\x0ba\xbc\x82\xa4X\
\x94x\xda!\xc7B-\xaa\x80\xe3i\xa0\x96\xd5\x15\x01\
\x00\xd6\xc7C\x84\xca#\xfc\xbfjc!\x9e\xa9\x0cs\
\xe1\xdf\x83\xec\xd9\xf9\x13\xca\xa3\x0e\xb92G\x03(\x03\
ak\x00\x16K!\xa5\x1c%0*\x15\xa4\x5c\x05@\
X\xa5*\xcc\xf5#\xfapl\x86\xf1Y\x8f\xef\xfd\xfa\
\x8f\xdc\xca\xd4\xe0D\x5c\xa2\x11\x1b\xcf\x93\x14=\x07\xd3\
\x01\xa5\x90R\xf2PjY\x01V\x05\x10\x08L\x0d\x04\
\x18\x9dv\xf9\xd5_\x86\x18\xbd\xb7\x80=\x93g\xd3\xba\
2\xf2y_\xbbh\xea\xce\xaf\xd4p\xf9\xdd\xe0%\x00\
\x9ex\x09L\xb8\x10<\xa2\xd6/U\xf2\x87\x1f>\xcf\
\xf5O3D\x1b\xb7\xb1\xf3\xc5\x97Y\x12\x5cN`\x8e\
\xdbS\x01(\xc0\x12%\x00m\xd4R}\xb1\xb5\x96\xdd\
[\xe2t\xbf\x97\xa5j\xf7W\xf9\xd1\x1bo\x10\xa0\xb5\
\x03\x98\xb57\xd5\xd8\x08\x01\xd2\xcbSpSx\xf33\
\x14\xb3i\x0a\x19\x1f%\xfd\xd5\x82\xd6\x08\xf0\xf0)\xe7\
\xe3\xe73\x14\xe6u\xa8\x0e\xd6\x00\xcb\xf7\x89\x10\xc13\
}\xfa\xd7r\x8c\xb2\x137\x03\xc7\x01\xb2\x1e\xfe\xad\x94\
\xcco\xf7DT\x03\xd8_p\x07\x08\x92\x09\xfd\xd7=\
?\xfd~B\xa6\xcf\xdf\xf6\xef\x02\xeev;\xfc\x92\x06\
\xa8\xe3s\xcau]\x1fpW\xed\x00@2\xab\x0a\x1f\
~*\xd3\xbd\xb7\xfc\xd4\xcdi9\x05\xf4\x03\x97th\
\xbf\x10\xa2\xd3\xb6\xed\xaf}\x9e%XXX\xf0\x07\x06\
\x06\xd2'O\x9e\x9c\x06\xba\x83\x00>\x1aI\xca\xad\xe3\
\xb3*\xd7;\xe2\xa7nL\xcb\xd1R\xe8Y\x1dt\x8b\
\x00=\x09\xc0\xd0\xd0\x90\xdb\xd3\xd3\x93\xd2N\xcf\xce\xce\
\x9e.\xbd\x1d\xdf\x08\x02\xe8\xee\xea)\x00\x8c\x04\x84\x06\
\x85\xaf\x08055U\xd0/\x22\xa9S\xa7N%\xc7\
\xc7\xc7/\x03g\x81~\x1d\xec\xae\xb8\x09K\xdfv\xda\
O&\x85\x01@\x08@aZ\xfc\xde\xe0`\xba\xbb\xbb\
;\xa5\xdf\x8a\xcc$\xd0^\xeds\xcda\xed\x9aw3\
n\x11`p\xf0\xfdt___\xfa\xcc\x993\xa6\xc5\
\xa5\xd0\x8fx\x02\x89\xb5\x9ec!D\x18x\x13\xd8O\
is\x06\xb4\xf8\xb1\xfa\x1f\xbd\xfa*_\xf2\xd8\x15\x9d\
\x00\x00\x00\x00IEND\xaeB`\x82\
\x00\x00\x04\xa3\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\
\x00\x00\x00\x04gAMA\x00\x00\xd6\xd8\xd4OX2\
\x00\x00\x00\x19tEXtSoftware\
\x00Adobe ImageRead\
yq\xc9e<\x00\x00\x045IDATX\xc3\xe5\
\x97\xcd\x8fTE\x14\xc5\x7f\xb7\xea\xd6{\xaf\xdbn\xc7\
\xf9@\x9d\x89FM4\x99D\x8d\x1aH\x98\xc4\x8c\x1f\
\x1b\xfe\x02L\x5c\xf1\x07\x18\x16.M\x5ckX\xc3\x8e\
\xc4\x8d\x1b\x17\xce\x82htA\x5c\x18\x0d\xe2\xc4\xc6\x00\
=`PQ\x19`\x02\xa2\x0e\x0c\x83\xd3\xfd^\xf7\x94\
\x8b\xaa\xee\xf9`\xe6\x0d\x84Q\x16VR\xa9\xce{\xb7\
\xeb\x9e:\xf7\xd4\xa9z\xea\xbd\xe7~6\xe5>\xb7>\
\x80]\xbbv\xbd\x03\xec\xfd\x8f\xf2N5\x1a\x8d\x03\xeb\
\x19\xd8\xbb\xef\xbd\xa3;\x1f\x1fv\x00\x9c<:\xcf\xcc\
\x977X\x9c\xef\xdcS\xa6\xda\xa0\xf2\xdck\x03\xbc\xb8\
g\x10\x80\x8b\x7f\x16|\xf8\xee\x1e\x80\xdb\x00p\xfc\xec\
\x1c\xdf?0\x04x.\xfd\xb8\xc0\xfe\xb7\xceo\xcbr\
\x0f\x1dy\x9a\x0b#\x96\xd3\x9f\x1fd\xfc\xd5}\x9bk\
@E\xb0\x16@xp,#\xcb\xb2m\x0100\x96\
a\x8dP\x1b|\x14#%\x22\x14+\xd8\x18\x91\xd5\x95\
s\xe7\xce\x83*\xb8\x04\xd2\x14\xb2\x0c\xd2,\x8cI\x0a\
I\x12\xdew:\x90\xe7\x90\xb7\xa1\xd5\x82v+\x8em\
(r\xb2\xfa8\xd6\x0a\xe3\xaf\xbcIk\xf1\xfa\xe6\x00\
\xac\x15\xac\x15\x04\xb0F\xd8\xbd{\xe7\x16k\xeb\x86\xae\
\x80Z\xa8V\x81\xeamQ\x8d\xaf\x04\xb5\x82\xf7\xa0\xa6\
\x84\x01g\x055\x82\x08\xa8\x0a\x95,\xc3# \x1e\x08\
\xc0\xf0\x1e/\x02\xde#\x12&\x15|\x88#\xc4!\x1e\
<!^@MX\x18@\xd7J\x89\x06\xac\xa0\xdac\
\x00\x9a3\xbf\x05\x8aS\x07i\x02\x95\x04\xb24\xf6\x04\
\x12\x07N\xa1\xe8@^@+\x8f\xbd\x05K9\xb4s\
\xc8\x0bT\x87q=\x00*\xe5%p1@\xd509\
\xf9\xd2\xd6\x0a\xf3>\xd0\xaf\x16\xaa\x1b\x8b\xf6\xd8'a\
a\xbd\x1c%% \x00\xf0\x81\x8d4M\xa3:\xc3\xb3\
\x98\x11\x89l\x07\xdac\x09V\x98_)F\xfca\xcd\
r\x7fa\x1d-\xd1\x80:\x09TI\x18O4/\xe0\
\x9d\x85\xc4!\x89\xc3g\x09\x92i\xd8\x11\x89\xe2\x13\x87\
X\x8b\xefv\x91\xbc\x80\xbc\x03\xed\x02\xdfj#\xed\x02\
\xf2\x02\x9fwP\x1dE\xd5 x:\xebTx\x9b\x06\
\x9c3x\x0f\x03\x8f$\xbc\xfe\xf2\xf3wh\xe86h\
\xa4\xbe\xf1\xeb\xc6\xfc\xdf\xb1\x04R^\x82DM_\x84\
\x8f\x0d\xa58\xe7\xb6\xc5\x88\x9e\x18K\xb9v\xb3\x03\x08\
\x9dR\x11\xaa\x90\xb8P\xefZ\xc50}\xb1\xcb@\xc5\
\xb0\x0e\xf4&\xadW\xf9U.\xe1\xe1\xc6\xd22\xf5\xcc\
p}\xc9\x84-\xe9J\x19\x10\x9c\x1a\xc0s\xe5f\x97\
+7\xbb\xacQW?\xd7\xaad~\xc5'\xa2)\xac\
\x05\x15\xc3\x9c\x0b\xb5w\xa6l\x17\xa8\xc1\xa9 \xc8\x1a\
5\xaf\x9b5\x1a\x8fY1\x9e\xfe{\xe9\xef\x14\x00\xf1\
\x82\xef\x9bX0+WV\x02U!\xd1\x90\xfc\xe7S\
\xdf\xf2\xeb\x99\x13,-\xde\xb8\xa7\xfaWj\x03<\xf5\
\xecN\x9eya\x02\x0f\xa83[1\x10\x03|\x87\xf7\
\xf7\xbf\xc1\xc2\xc2\x02\xb7n\xdd\xa2(\x0aD\x04k-\
\xd6ZT\x15U\xc59\x87\xaab\xad\xc5\x98\xf0\xdf\xe5\
\xe5e\xf2<\xef\xf7#\xcd\xf9\xb8\xf2-\x18pVP\
\x17\x18\xdc1:\xb6rO8~\x9c\xe9\xe9i\x8c1\
x\xef\x99\x98\x98`rr\xf2\x8eY\xd81:\xd6\xdf\
\x86\xae\xd4\x09Up6\xac\xa2V\xaf\xf7k933\
\xc3\xd0\xd0\x10\xd6Z\xbc\xf74\x9b\xcd\xbb\x02P\xab\xd7\
p\xd1\x88\xb4\xd4\x88\x14\x9c\x0b'\x5c\xa0*\x00\xa8V\
\xabdY\xd6\xa7\xb87\xdeis\x1a\xa9\x17AK\xad\
8\x1e\xc7\xbd#\xb4\xd7\x8c1\x88D\xdf\x8f:\xb8\xab\
\x9b\xaf5\xa8\x0d\xf3\xf6\x18.=\x8e\x83)m\xe3\xd5\
\xdb\x12\xa9\xf7\xe5Vl\xad\xf4\x91\x0e\x8e\x0c\xc3\xf2\xef\
\xdb\x02\xe0\xa1\x91a\xd4\xc2\xb5+\x97Y\x9c\xbf\xbe\x05\
\x036\xf8\xc0`\xad\x02\x0b\xdb\xc3\xc0P\xad\xc2\xec\xc5\
K\x9c\xfd\xee\x1b\xce\x9f\x9c\x9e\x03\xa66\x04`$^\
J\x05\x12\x0b\xed\x91'\xa9=\x0co\x1f8\xc8f\xc7\
\x81':\xf1*\xe75\x1e2\x81\x14(\xbap\xf9\xea\
U\xce4\x8e\xd1\xfc\xfa\x8b\xb9\xd9\x1fN\x1d\x02\x0eo\
\x08\xe0\xb3\x8f>\xe0\xa7\xd3'W\x99\xe9\xda\xa3\x86U\
\xe6\xbb\x1e\x04\x1b<_\x1do|w\xee\x8f\xd9_\x0e\
\x01\x87\x1b\x8d\xc6_\x1b\x01\x98\x9a\xfe\xf4\xe3\x7f\xf5s\
l}\xf25\x00\xe2\xb7\xda\x81\xff\xdd\xd7\xf1?M\xf0\
K\xb9\xe8F\x89\xaf\x00\x00\x00\x00IEND\xaeB\
`\x82\
"
qt_resource_name = b"\
\x00\x06\
\x07\x03}\xc3\
\x00i\
\x00m\x00a\x00g\x00e\x00s\
\x00\x08\
\x06\xc1Y\x87\
\x00o\
\x00p\x00e\x00n\x00.\x00p\x00n\x00g\
\x00\x07\
\x04\xcaW\xa7\
\x00n\
\x00e\x00w\x00.\x00p\x00n\x00g\
\x00\x08\
\x06|Z\x07\
\x00c\
\x00o\x00p\x00y\x00.\x00p\x00n\x00g\
\x00\x07\
\x0a\xc7W\x87\
\x00c\
\x00u\x00t\x00.\x00p\x00n\x00g\
\x00\x09\
\x0a\xa8\xbaG\
\x00p\
\x00a\x00s\x00t\x00e\x00.\x00p\x00n\x00g\
\x00\x08\
\x08\xc8Xg\
\x00s\
\x00a\x00v\x00e\x00.\x00p\x00n\x00g\
"
qt_resource_struct = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x06\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00(\x00\x00\x00\x00\x00\x01\x00\x00\x08\x1d\
\x00\x00\x01yCj\xf6\xed\
\x00\x00\x00<\x00\x00\x00\x00\x00\x01\x00\x00\x0bu\
\x00\x00\x01yCj\xf6\xec\
\x00\x00\x00\x12\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01yCj\xf6\xee\
\x00\x00\x00~\x00\x00\x00\x00\x00\x01\x00\x00\x1cS\
\x00\x00\x01yCj\xf6\xf0\
\x00\x00\x00f\x00\x00\x00\x00\x00\x01\x00\x00\x15\xe2\
\x00\x00\x01yCj\xf6\xee\
\x00\x00\x00R\x00\x00\x00\x00\x00\x01\x00\x00\x10\xb3\
\x00\x00\x01yCj\xf6\xed\
"
def qInitResources():
QtCore.qRegisterResourceData(0x03, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x03, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| true | true |
f72696fc0cf591521dd786bc37d9a5424a0b4f8b | 298 | py | Python | tests/test_singleton_container.py | ClanPlay/Python_IoC | fad7a3053a7a2193474caa5918508307cfd7b79a | [
"MIT"
] | 4 | 2019-02-20T19:44:32.000Z | 2019-02-22T13:32:25.000Z | tests/test_singleton_container.py | ClanPlay-Market/clanplay_Python_IoC | fad7a3053a7a2193474caa5918508307cfd7b79a | [
"MIT"
] | 1 | 2019-02-20T14:48:04.000Z | 2019-02-20T14:48:23.000Z | tests/test_singleton_container.py | ClanPlay/Python_IoC | fad7a3053a7a2193474caa5918508307cfd7b79a | [
"MIT"
] | null | null | null | from flying_ioc import IocManager
class TSingleton1:
def __init__(self):
pass
def test_singleton_container():
ioc = IocManager(stats=True)
ioc.set_class(name='singleton1', cls=TSingleton1, singleton=True)
assert ioc.singleton1 is ioc.singleton1
ioc.print_stats()
| 17.529412 | 69 | 0.721477 | from flying_ioc import IocManager
class TSingleton1:
def __init__(self):
pass
def test_singleton_container():
ioc = IocManager(stats=True)
ioc.set_class(name='singleton1', cls=TSingleton1, singleton=True)
assert ioc.singleton1 is ioc.singleton1
ioc.print_stats()
| true | true |
f72698682f1d9db8336712cdc43ed2619483c604 | 1,583 | py | Python | 61dust.py | krikor-s/homework | 63c6711fe0ea64f5d3087673ac931dd0503be546 | [
"MIT"
] | null | null | null | 61dust.py | krikor-s/homework | 63c6711fe0ea64f5d3087673ac931dd0503be546 | [
"MIT"
] | null | null | null | 61dust.py | krikor-s/homework | 63c6711fe0ea64f5d3087673ac931dd0503be546 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# 61dust.py
import argparse
from fileinput import filename
import mcb185 as mcb
# Write a program that finds and masks low entropy sequence
# Use argparse for the following parameters
# sequence file
# window size
# entropy threshold
# lowercase or N-based masking
# The program should output a FASTA file (but with Ns or lowercase)
# Use argparse
# Use the mcb185.read_fasta() function
# Put more functions in your mcb185.py library
parser = argparse.ArgumentParser(description='Outputs of masked low entropy sequence.')
# required arguments
parser.add_argument('--fasta', required=True, type=str,
metavar='<str>', help='required FASTA file')
parser.add_argument('--wins', required=True, type=int,
metavar='<int>', help='required window size')
parser.add_argument('--ethresh', required=True, type=float,
metavar='<float>', help='required entropy threshold')
# switches
parser.add_argument('--lowercase', action='store_true',
help='lowercase or N based masking. N-based default')
# finalization
arg = parser.parse_args()
file = arg.fasta
wins = arg.wins
ethresh = arg.ethresh
lcase = arg.lowercase
for name, seq in mcb.read_fasta(file):
seq = seq.upper()
#create mask sequence
output = ''
for i in range(0, len(seq)-wins+1, 1):
prob = mcb.ntprobs(seq[i:i+wins])
entropy = mcb.e_calc(prob)
if entropy > ethresh:
output += seq[i]
else:
if lcase:
output += seq[i].lower()
else:
output += 'n'
output += seq[-wins+1:]
#output fasta file
print(f'>{name}')
for i in range(0, len(output), 60):
print(output[i:i+60]) | 28.267857 | 87 | 0.713834 |
import argparse
from fileinput import filename
import mcb185 as mcb
parser = argparse.ArgumentParser(description='Outputs of masked low entropy sequence.')
parser.add_argument('--fasta', required=True, type=str,
metavar='<str>', help='required FASTA file')
parser.add_argument('--wins', required=True, type=int,
metavar='<int>', help='required window size')
parser.add_argument('--ethresh', required=True, type=float,
metavar='<float>', help='required entropy threshold')
parser.add_argument('--lowercase', action='store_true',
help='lowercase or N based masking. N-based default')
arg = parser.parse_args()
file = arg.fasta
wins = arg.wins
ethresh = arg.ethresh
lcase = arg.lowercase
for name, seq in mcb.read_fasta(file):
seq = seq.upper()
output = ''
for i in range(0, len(seq)-wins+1, 1):
prob = mcb.ntprobs(seq[i:i+wins])
entropy = mcb.e_calc(prob)
if entropy > ethresh:
output += seq[i]
else:
if lcase:
output += seq[i].lower()
else:
output += 'n'
output += seq[-wins+1:]
print(f'>{name}')
for i in range(0, len(output), 60):
print(output[i:i+60]) | true | true |
f726986d83879eef6d553b88636ef538361fe2f9 | 14,545 | py | Python | qa/rpc-tests/dip4-coinbasemerkleroots.py | Wildfire-new/Wildfire-MN-POW | 0434339c79fa8525408b8edd1ff003be4f367e1c | [
"MIT"
] | 1 | 2021-01-22T17:22:27.000Z | 2021-01-22T17:22:27.000Z | qa/rpc-tests/dip4-coinbasemerkleroots.py | Wildfire-new/Wildfire-MN-POW | 0434339c79fa8525408b8edd1ff003be4f367e1c | [
"MIT"
] | 4 | 2020-12-01T01:38:47.000Z | 2021-01-21T19:19:57.000Z | qa/rpc-tests/dip4-coinbasemerkleroots.py | Wildfire-new/Wildfire-MN-POW | 0434339c79fa8525408b8edd1ff003be4f367e1c | [
"MIT"
] | 1 | 2021-08-25T06:44:01.000Z | 2021-08-25T06:44:01.000Z | #!/usr/bin/env python3
# Copyright (c) 2015-2018 The Dash Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from collections import namedtuple
from test_framework.mininode import *
from test_framework.test_framework import WildfireTestFramework
from test_framework.util import *
from time import *
'''
dip4-coinbasemerkleroots.py
Checks DIP4 merkle roots in coinbases
'''
class TestNode(SingleNodeConnCB):
def __init__(self):
SingleNodeConnCB.__init__(self)
self.last_mnlistdiff = None
def on_mnlistdiff(self, conn, message):
self.last_mnlistdiff = message
def wait_for_mnlistdiff(self, timeout=30):
self.last_mnlistdiff = None
def received_mnlistdiff():
return self.last_mnlistdiff is not None
return wait_until(received_mnlistdiff, timeout=timeout)
def getmnlistdiff(self, baseBlockHash, blockHash):
msg = msg_getmnlistd(baseBlockHash, blockHash)
self.send_message(msg)
self.wait_for_mnlistdiff()
return self.last_mnlistdiff
class LLMQCoinbaseCommitmentsTest(WildfireTestFramework):
def __init__(self):
super().__init__(6, 5, [], fast_dip3_enforcement=True)
def run_test(self):
self.test_node = TestNode()
self.test_node.add_connection(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.test_node))
NetworkThread().start() # Start up network handling in another thread
self.test_node.wait_for_verack()
self.confirm_mns()
null_hash = format(0, "064x")
# Check if a diff with the genesis block as base returns all MNs
expectedUpdated = [mn.proTxHash for mn in self.mninfo]
mnList = self.test_getmnlistdiff(null_hash, self.nodes[0].getbestblockhash(), {}, [], expectedUpdated)
expectedUpdated2 = expectedUpdated + []
# Register one more MN, but don't start it (that would fail as WildfireTestFramework doesn't support this atm)
baseBlockHash = self.nodes[0].getbestblockhash()
self.prepare_masternode(self.mn_count)
new_mn = self.mninfo[self.mn_count]
# Now test if that MN appears in a diff when the base block is the one just before MN registration
expectedDeleted = []
expectedUpdated = [new_mn.proTxHash]
mnList = self.test_getmnlistdiff(baseBlockHash, self.nodes[0].getbestblockhash(), mnList, expectedDeleted, expectedUpdated)
assert(mnList[new_mn.proTxHash].confirmedHash == 0)
# Now let the MN get enough confirmations and verify that the MNLISTDIFF now has confirmedHash != 0
self.confirm_mns()
mnList = self.test_getmnlistdiff(baseBlockHash, self.nodes[0].getbestblockhash(), mnList, expectedDeleted, expectedUpdated)
assert(mnList[new_mn.proTxHash].confirmedHash != 0)
# Spend the collateral of the previously added MN and test if it appears in "deletedMNs"
expectedDeleted = [new_mn.proTxHash]
expectedUpdated = []
baseBlockHash2 = self.nodes[0].getbestblockhash()
self.remove_mastermode(self.mn_count)
mnList = self.test_getmnlistdiff(baseBlockHash2, self.nodes[0].getbestblockhash(), mnList, expectedDeleted, expectedUpdated)
# When comparing genesis and best block, we shouldn't see the previously added and then deleted MN
mnList = self.test_getmnlistdiff(null_hash, self.nodes[0].getbestblockhash(), {}, [], expectedUpdated2)
#############################
# Now start testing quorum commitment merkle roots
self.nodes[0].generate(1)
oldhash = self.nodes[0].getbestblockhash()
# Test DIP8 activation once with a pre-existing quorum and once without (we don't know in which order it will activate on mainnet)
self.test_dip8_quorum_merkle_root_activation(True)
for n in self.nodes:
n.invalidateblock(oldhash)
self.sync_all()
first_quorum = self.test_dip8_quorum_merkle_root_activation(False)
self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0)
self.wait_for_sporks_same()
# Verify that the first quorum appears in MNLISTDIFF
expectedDeleted = []
expectedNew = [QuorumId(100, int(first_quorum, 16))]
quorumList = self.test_getmnlistdiff_quorums(null_hash, self.nodes[0].getbestblockhash(), {}, expectedDeleted, expectedNew)
baseBlockHash = self.nodes[0].getbestblockhash()
second_quorum = self.mine_quorum()
# Verify that the second quorum appears in MNLISTDIFF
expectedDeleted = []
expectedNew = [QuorumId(100, int(second_quorum, 16))]
quorums_before_third = self.test_getmnlistdiff_quorums(baseBlockHash, self.nodes[0].getbestblockhash(), quorumList, expectedDeleted, expectedNew)
block_before_third = self.nodes[0].getbestblockhash()
third_quorum = self.mine_quorum()
# Verify that the first quorum is deleted and the third quorum is added in MNLISTDIFF (the first got inactive)
expectedDeleted = [QuorumId(100, int(first_quorum, 16))]
expectedNew = [QuorumId(100, int(third_quorum, 16))]
self.test_getmnlistdiff_quorums(block_before_third, self.nodes[0].getbestblockhash(), quorums_before_third, expectedDeleted, expectedNew)
# Verify that the diff between genesis and best block is the current active set (second and third quorum)
expectedDeleted = []
expectedNew = [QuorumId(100, int(second_quorum, 16)), QuorumId(100, int(third_quorum, 16))]
self.test_getmnlistdiff_quorums(null_hash, self.nodes[0].getbestblockhash(), {}, expectedDeleted, expectedNew)
# Now verify that diffs are correct around the block that mined the third quorum.
# This tests the logic in CalcCbTxMerkleRootQuorums, which has to manually add the commitment from the current
# block
mined_in_block = self.nodes[0].quorum("info", 100, third_quorum)["minedBlock"]
prev_block = self.nodes[0].getblock(mined_in_block)["previousblockhash"]
prev_block2 = self.nodes[0].getblock(prev_block)["previousblockhash"]
next_block = self.nodes[0].getblock(mined_in_block)["nextblockhash"]
next_block2 = self.nodes[0].getblock(mined_in_block)["nextblockhash"]
# The 2 block before the quorum was mined should both give an empty diff
expectedDeleted = []
expectedNew = []
self.test_getmnlistdiff_quorums(block_before_third, prev_block2, quorums_before_third, expectedDeleted, expectedNew)
self.test_getmnlistdiff_quorums(block_before_third, prev_block, quorums_before_third, expectedDeleted, expectedNew)
# The block in which the quorum was mined and the 2 after that should all give the same diff
expectedDeleted = [QuorumId(100, int(first_quorum, 16))]
expectedNew = [QuorumId(100, int(third_quorum, 16))]
quorums_with_third = self.test_getmnlistdiff_quorums(block_before_third, mined_in_block, quorums_before_third, expectedDeleted, expectedNew)
self.test_getmnlistdiff_quorums(block_before_third, next_block, quorums_before_third, expectedDeleted, expectedNew)
self.test_getmnlistdiff_quorums(block_before_third, next_block2, quorums_before_third, expectedDeleted, expectedNew)
# A diff between the two block that happened after the quorum was mined should give an empty diff
expectedDeleted = []
expectedNew = []
self.test_getmnlistdiff_quorums(mined_in_block, next_block, quorums_with_third, expectedDeleted, expectedNew)
self.test_getmnlistdiff_quorums(mined_in_block, next_block2, quorums_with_third, expectedDeleted, expectedNew)
self.test_getmnlistdiff_quorums(next_block, next_block2, quorums_with_third, expectedDeleted, expectedNew)
# Using the same block for baseBlockHash and blockHash should give empty diffs
self.test_getmnlistdiff_quorums(prev_block, prev_block, quorums_before_third, expectedDeleted, expectedNew)
self.test_getmnlistdiff_quorums(prev_block2, prev_block2, quorums_before_third, expectedDeleted, expectedNew)
self.test_getmnlistdiff_quorums(mined_in_block, mined_in_block, quorums_with_third, expectedDeleted, expectedNew)
self.test_getmnlistdiff_quorums(next_block, next_block, quorums_with_third, expectedDeleted, expectedNew)
self.test_getmnlistdiff_quorums(next_block2, next_block2, quorums_with_third, expectedDeleted, expectedNew)
def test_getmnlistdiff(self, baseBlockHash, blockHash, baseMNList, expectedDeleted, expectedUpdated):
d = self.test_getmnlistdiff_base(baseBlockHash, blockHash)
# Assert that the deletedMNs and mnList fields are what we expected
assert_equal(set(d.deletedMNs), set([int(e, 16) for e in expectedDeleted]))
assert_equal(set([e.proRegTxHash for e in d.mnList]), set(int(e, 16) for e in expectedUpdated))
# Build a new list based on the old list and the info from the diff
newMNList = baseMNList.copy()
for e in d.deletedMNs:
newMNList.pop(format(e, '064x'))
for e in d.mnList:
newMNList[format(e.proRegTxHash, '064x')] = e
cbtx = CCbTx()
cbtx.deserialize(BytesIO(d.cbTx.vExtraPayload))
# Verify that the merkle root matches what we locally calculate
hashes = []
for mn in sorted(newMNList.values(), key=lambda mn: ser_uint256(mn.proRegTxHash)):
hashes.append(hash256(mn.serialize()))
merkleRoot = CBlock.get_merkle_root(hashes)
assert_equal(merkleRoot, cbtx.merkleRootMNList)
return newMNList
def test_getmnlistdiff_quorums(self, baseBlockHash, blockHash, baseQuorumList, expectedDeleted, expectedNew):
d = self.test_getmnlistdiff_base(baseBlockHash, blockHash)
assert_equal(set(d.deletedQuorums), set(expectedDeleted))
assert_equal(set([QuorumId(e.llmqType, e.quorumHash) for e in d.newQuorums]), set(expectedNew))
newQuorumList = baseQuorumList.copy()
for e in d.deletedQuorums:
newQuorumList.pop(e)
for e in d.newQuorums:
newQuorumList[QuorumId(e.llmqType, e.quorumHash)] = e
cbtx = CCbTx()
cbtx.deserialize(BytesIO(d.cbTx.vExtraPayload))
if cbtx.version >= 2:
hashes = []
for qc in newQuorumList.values():
hashes.append(hash256(qc.serialize()))
hashes.sort()
merkleRoot = CBlock.get_merkle_root(hashes)
assert_equal(merkleRoot, cbtx.merkleRootQuorums)
return newQuorumList
def test_getmnlistdiff_base(self, baseBlockHash, blockHash):
hexstr = self.nodes[0].getblockheader(blockHash, False)
header = FromHex(CBlockHeader(), hexstr)
d = self.test_node.getmnlistdiff(int(baseBlockHash, 16), int(blockHash, 16))
assert_equal(d.baseBlockHash, int(baseBlockHash, 16))
assert_equal(d.blockHash, int(blockHash, 16))
# Check that the merkle proof is valid
proof = CMerkleBlock(header, d.merkleProof)
proof = proof.serialize().hex()
assert_equal(self.nodes[0].verifytxoutproof(proof), [d.cbTx.hash])
# Check if P2P messages match with RPCs
d2 = self.nodes[0].protx("diff", baseBlockHash, blockHash)
assert_equal(d2["baseBlockHash"], baseBlockHash)
assert_equal(d2["blockHash"], blockHash)
assert_equal(d2["cbTxMerkleTree"], d.merkleProof.serialize().hex())
assert_equal(d2["cbTx"], d.cbTx.serialize().hex())
assert_equal(set([int(e, 16) for e in d2["deletedMNs"]]), set(d.deletedMNs))
assert_equal(set([int(e["proRegTxHash"], 16) for e in d2["mnList"]]), set([e.proRegTxHash for e in d.mnList]))
assert_equal(set([QuorumId(e["llmqType"], int(e["quorumHash"], 16)) for e in d2["deletedQuorums"]]), set(d.deletedQuorums))
assert_equal(set([QuorumId(e["llmqType"], int(e["quorumHash"], 16)) for e in d2["newQuorums"]]), set([QuorumId(e.llmqType, e.quorumHash) for e in d.newQuorums]))
return d
def test_dip8_quorum_merkle_root_activation(self, with_initial_quorum):
if with_initial_quorum:
self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0)
self.wait_for_sporks_same()
# Mine one quorum before dip8 is activated
self.mine_quorum()
self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 4070908800)
self.wait_for_sporks_same()
cbtx = self.nodes[0].getblock(self.nodes[0].getbestblockhash(), 2)["tx"][0]
assert(cbtx["cbTx"]["version"] == 1)
assert(self.nodes[0].getblockchaininfo()["bip9_softforks"]["dip0008"]["status"] != "active")
while self.nodes[0].getblockchaininfo()["bip9_softforks"]["dip0008"]["status"] != "active":
self.nodes[0].generate(4)
self.sync_all()
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Assert that merkleRootQuorums is present and 0 (we have no quorums yet)
cbtx = self.nodes[0].getblock(self.nodes[0].getbestblockhash(), 2)["tx"][0]
assert_equal(cbtx["cbTx"]["version"], 2)
assert("merkleRootQuorums" in cbtx["cbTx"])
merkleRootQuorums = int(cbtx["cbTx"]["merkleRootQuorums"], 16)
if with_initial_quorum:
assert(merkleRootQuorums != 0)
else:
assert_equal(merkleRootQuorums, 0)
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0)
self.wait_for_sporks_same()
# Mine quorum and verify that merkleRootQuorums has changed
quorum = self.mine_quorum()
cbtx = self.nodes[0].getblock(self.nodes[0].getbestblockhash(), 2)["tx"][0]
assert(int(cbtx["cbTx"]["merkleRootQuorums"], 16) != merkleRootQuorums)
return quorum
def confirm_mns(self):
while True:
diff = self.nodes[0].protx("diff", 1, self.nodes[0].getblockcount())
found_unconfirmed = False
for mn in diff["mnList"]:
if int(mn["confirmedHash"], 16) == 0:
found_unconfirmed = True
break
if not found_unconfirmed:
break
self.nodes[0].generate(1)
sync_blocks(self.nodes)
if __name__ == '__main__':
LLMQCoinbaseCommitmentsTest().main()
| 48.645485 | 169 | 0.691097 |
from collections import namedtuple
from test_framework.mininode import *
from test_framework.test_framework import WildfireTestFramework
from test_framework.util import *
from time import *
class TestNode(SingleNodeConnCB):
def __init__(self):
SingleNodeConnCB.__init__(self)
self.last_mnlistdiff = None
def on_mnlistdiff(self, conn, message):
self.last_mnlistdiff = message
def wait_for_mnlistdiff(self, timeout=30):
self.last_mnlistdiff = None
def received_mnlistdiff():
return self.last_mnlistdiff is not None
return wait_until(received_mnlistdiff, timeout=timeout)
def getmnlistdiff(self, baseBlockHash, blockHash):
msg = msg_getmnlistd(baseBlockHash, blockHash)
self.send_message(msg)
self.wait_for_mnlistdiff()
return self.last_mnlistdiff
class LLMQCoinbaseCommitmentsTest(WildfireTestFramework):
def __init__(self):
super().__init__(6, 5, [], fast_dip3_enforcement=True)
def run_test(self):
self.test_node = TestNode()
self.test_node.add_connection(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.test_node))
NetworkThread().start()
self.test_node.wait_for_verack()
self.confirm_mns()
null_hash = format(0, "064x")
expectedUpdated = [mn.proTxHash for mn in self.mninfo]
mnList = self.test_getmnlistdiff(null_hash, self.nodes[0].getbestblockhash(), {}, [], expectedUpdated)
expectedUpdated2 = expectedUpdated + []
baseBlockHash = self.nodes[0].getbestblockhash()
self.prepare_masternode(self.mn_count)
new_mn = self.mninfo[self.mn_count]
expectedDeleted = []
expectedUpdated = [new_mn.proTxHash]
mnList = self.test_getmnlistdiff(baseBlockHash, self.nodes[0].getbestblockhash(), mnList, expectedDeleted, expectedUpdated)
assert(mnList[new_mn.proTxHash].confirmedHash == 0)
self.confirm_mns()
mnList = self.test_getmnlistdiff(baseBlockHash, self.nodes[0].getbestblockhash(), mnList, expectedDeleted, expectedUpdated)
assert(mnList[new_mn.proTxHash].confirmedHash != 0)
expectedDeleted = [new_mn.proTxHash]
expectedUpdated = []
baseBlockHash2 = self.nodes[0].getbestblockhash()
self.remove_mastermode(self.mn_count)
mnList = self.test_getmnlistdiff(baseBlockHash2, self.nodes[0].getbestblockhash(), mnList, expectedDeleted, expectedUpdated)
mnList = self.test_getmnlistdiff(null_hash, self.nodes[0].getbestblockhash(), {}, [], expectedUpdated2)
#############################
# Now start testing quorum commitment merkle roots
self.nodes[0].generate(1)
oldhash = self.nodes[0].getbestblockhash()
# Test DIP8 activation once with a pre-existing quorum and once without (we don't know in which order it will activate on mainnet)
self.test_dip8_quorum_merkle_root_activation(True)
for n in self.nodes:
n.invalidateblock(oldhash)
self.sync_all()
first_quorum = self.test_dip8_quorum_merkle_root_activation(False)
self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0)
self.wait_for_sporks_same()
expectedDeleted = []
expectedNew = [QuorumId(100, int(first_quorum, 16))]
quorumList = self.test_getmnlistdiff_quorums(null_hash, self.nodes[0].getbestblockhash(), {}, expectedDeleted, expectedNew)
baseBlockHash = self.nodes[0].getbestblockhash()
second_quorum = self.mine_quorum()
expectedDeleted = []
expectedNew = [QuorumId(100, int(second_quorum, 16))]
quorums_before_third = self.test_getmnlistdiff_quorums(baseBlockHash, self.nodes[0].getbestblockhash(), quorumList, expectedDeleted, expectedNew)
block_before_third = self.nodes[0].getbestblockhash()
third_quorum = self.mine_quorum()
expectedDeleted = [QuorumId(100, int(first_quorum, 16))]
expectedNew = [QuorumId(100, int(third_quorum, 16))]
self.test_getmnlistdiff_quorums(block_before_third, self.nodes[0].getbestblockhash(), quorums_before_third, expectedDeleted, expectedNew)
expectedDeleted = []
expectedNew = [QuorumId(100, int(second_quorum, 16)), QuorumId(100, int(third_quorum, 16))]
self.test_getmnlistdiff_quorums(null_hash, self.nodes[0].getbestblockhash(), {}, expectedDeleted, expectedNew)
mined_in_block = self.nodes[0].quorum("info", 100, third_quorum)["minedBlock"]
prev_block = self.nodes[0].getblock(mined_in_block)["previousblockhash"]
prev_block2 = self.nodes[0].getblock(prev_block)["previousblockhash"]
next_block = self.nodes[0].getblock(mined_in_block)["nextblockhash"]
next_block2 = self.nodes[0].getblock(mined_in_block)["nextblockhash"]
expectedDeleted = []
expectedNew = []
self.test_getmnlistdiff_quorums(block_before_third, prev_block2, quorums_before_third, expectedDeleted, expectedNew)
self.test_getmnlistdiff_quorums(block_before_third, prev_block, quorums_before_third, expectedDeleted, expectedNew)
expectedDeleted = [QuorumId(100, int(first_quorum, 16))]
expectedNew = [QuorumId(100, int(third_quorum, 16))]
quorums_with_third = self.test_getmnlistdiff_quorums(block_before_third, mined_in_block, quorums_before_third, expectedDeleted, expectedNew)
self.test_getmnlistdiff_quorums(block_before_third, next_block, quorums_before_third, expectedDeleted, expectedNew)
self.test_getmnlistdiff_quorums(block_before_third, next_block2, quorums_before_third, expectedDeleted, expectedNew)
expectedDeleted = []
expectedNew = []
self.test_getmnlistdiff_quorums(mined_in_block, next_block, quorums_with_third, expectedDeleted, expectedNew)
self.test_getmnlistdiff_quorums(mined_in_block, next_block2, quorums_with_third, expectedDeleted, expectedNew)
self.test_getmnlistdiff_quorums(next_block, next_block2, quorums_with_third, expectedDeleted, expectedNew)
self.test_getmnlistdiff_quorums(prev_block, prev_block, quorums_before_third, expectedDeleted, expectedNew)
self.test_getmnlistdiff_quorums(prev_block2, prev_block2, quorums_before_third, expectedDeleted, expectedNew)
self.test_getmnlistdiff_quorums(mined_in_block, mined_in_block, quorums_with_third, expectedDeleted, expectedNew)
self.test_getmnlistdiff_quorums(next_block, next_block, quorums_with_third, expectedDeleted, expectedNew)
self.test_getmnlistdiff_quorums(next_block2, next_block2, quorums_with_third, expectedDeleted, expectedNew)
def test_getmnlistdiff(self, baseBlockHash, blockHash, baseMNList, expectedDeleted, expectedUpdated):
d = self.test_getmnlistdiff_base(baseBlockHash, blockHash)
assert_equal(set(d.deletedMNs), set([int(e, 16) for e in expectedDeleted]))
assert_equal(set([e.proRegTxHash for e in d.mnList]), set(int(e, 16) for e in expectedUpdated))
newMNList = baseMNList.copy()
for e in d.deletedMNs:
newMNList.pop(format(e, '064x'))
for e in d.mnList:
newMNList[format(e.proRegTxHash, '064x')] = e
cbtx = CCbTx()
cbtx.deserialize(BytesIO(d.cbTx.vExtraPayload))
hashes = []
for mn in sorted(newMNList.values(), key=lambda mn: ser_uint256(mn.proRegTxHash)):
hashes.append(hash256(mn.serialize()))
merkleRoot = CBlock.get_merkle_root(hashes)
assert_equal(merkleRoot, cbtx.merkleRootMNList)
return newMNList
def test_getmnlistdiff_quorums(self, baseBlockHash, blockHash, baseQuorumList, expectedDeleted, expectedNew):
d = self.test_getmnlistdiff_base(baseBlockHash, blockHash)
assert_equal(set(d.deletedQuorums), set(expectedDeleted))
assert_equal(set([QuorumId(e.llmqType, e.quorumHash) for e in d.newQuorums]), set(expectedNew))
newQuorumList = baseQuorumList.copy()
for e in d.deletedQuorums:
newQuorumList.pop(e)
for e in d.newQuorums:
newQuorumList[QuorumId(e.llmqType, e.quorumHash)] = e
cbtx = CCbTx()
cbtx.deserialize(BytesIO(d.cbTx.vExtraPayload))
if cbtx.version >= 2:
hashes = []
for qc in newQuorumList.values():
hashes.append(hash256(qc.serialize()))
hashes.sort()
merkleRoot = CBlock.get_merkle_root(hashes)
assert_equal(merkleRoot, cbtx.merkleRootQuorums)
return newQuorumList
def test_getmnlistdiff_base(self, baseBlockHash, blockHash):
hexstr = self.nodes[0].getblockheader(blockHash, False)
header = FromHex(CBlockHeader(), hexstr)
d = self.test_node.getmnlistdiff(int(baseBlockHash, 16), int(blockHash, 16))
assert_equal(d.baseBlockHash, int(baseBlockHash, 16))
assert_equal(d.blockHash, int(blockHash, 16))
proof = CMerkleBlock(header, d.merkleProof)
proof = proof.serialize().hex()
assert_equal(self.nodes[0].verifytxoutproof(proof), [d.cbTx.hash])
d2 = self.nodes[0].protx("diff", baseBlockHash, blockHash)
assert_equal(d2["baseBlockHash"], baseBlockHash)
assert_equal(d2["blockHash"], blockHash)
assert_equal(d2["cbTxMerkleTree"], d.merkleProof.serialize().hex())
assert_equal(d2["cbTx"], d.cbTx.serialize().hex())
assert_equal(set([int(e, 16) for e in d2["deletedMNs"]]), set(d.deletedMNs))
assert_equal(set([int(e["proRegTxHash"], 16) for e in d2["mnList"]]), set([e.proRegTxHash for e in d.mnList]))
assert_equal(set([QuorumId(e["llmqType"], int(e["quorumHash"], 16)) for e in d2["deletedQuorums"]]), set(d.deletedQuorums))
assert_equal(set([QuorumId(e["llmqType"], int(e["quorumHash"], 16)) for e in d2["newQuorums"]]), set([QuorumId(e.llmqType, e.quorumHash) for e in d.newQuorums]))
return d
def test_dip8_quorum_merkle_root_activation(self, with_initial_quorum):
if with_initial_quorum:
self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0)
self.wait_for_sporks_same()
self.mine_quorum()
self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 4070908800)
self.wait_for_sporks_same()
cbtx = self.nodes[0].getblock(self.nodes[0].getbestblockhash(), 2)["tx"][0]
assert(cbtx["cbTx"]["version"] == 1)
assert(self.nodes[0].getblockchaininfo()["bip9_softforks"]["dip0008"]["status"] != "active")
while self.nodes[0].getblockchaininfo()["bip9_softforks"]["dip0008"]["status"] != "active":
self.nodes[0].generate(4)
self.sync_all()
self.nodes[0].generate(1)
sync_blocks(self.nodes)
cbtx = self.nodes[0].getblock(self.nodes[0].getbestblockhash(), 2)["tx"][0]
assert_equal(cbtx["cbTx"]["version"], 2)
assert("merkleRootQuorums" in cbtx["cbTx"])
merkleRootQuorums = int(cbtx["cbTx"]["merkleRootQuorums"], 16)
if with_initial_quorum:
assert(merkleRootQuorums != 0)
else:
assert_equal(merkleRootQuorums, 0)
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0)
self.wait_for_sporks_same()
quorum = self.mine_quorum()
cbtx = self.nodes[0].getblock(self.nodes[0].getbestblockhash(), 2)["tx"][0]
assert(int(cbtx["cbTx"]["merkleRootQuorums"], 16) != merkleRootQuorums)
return quorum
def confirm_mns(self):
while True:
diff = self.nodes[0].protx("diff", 1, self.nodes[0].getblockcount())
found_unconfirmed = False
for mn in diff["mnList"]:
if int(mn["confirmedHash"], 16) == 0:
found_unconfirmed = True
break
if not found_unconfirmed:
break
self.nodes[0].generate(1)
sync_blocks(self.nodes)
if __name__ == '__main__':
LLMQCoinbaseCommitmentsTest().main()
| true | true |
f72698df2982562089d403ee92680cbf783ca0e2 | 31,875 | py | Python | examples/pxScene2d/external/libnode-v6.9.0/tools/comtypes/typeinfo.py | madanagopaltcomcast/pxCore | c4a3a40a190521c8b6383d126c87612eca5b3c42 | [
"Apache-2.0"
] | 46 | 2017-09-07T14:59:22.000Z | 2020-10-31T20:34:12.000Z | examples/pxScene2d/external/libnode-v6.9.0/tools/comtypes/typeinfo.py | madanagopaltcomcast/pxCore | c4a3a40a190521c8b6383d126c87612eca5b3c42 | [
"Apache-2.0"
] | 1,432 | 2017-06-21T04:08:48.000Z | 2020-08-25T16:21:15.000Z | examples/pxScene2d/external/libnode-v6.9.0/tools/comtypes/typeinfo.py | madanagopaltcomcast/pxCore | c4a3a40a190521c8b6383d126c87612eca5b3c42 | [
"Apache-2.0"
] | 317 | 2017-06-20T19:57:17.000Z | 2020-09-16T10:28:30.000Z | # XXX Should convert from STDMETHOD to COMMETHOD.
# generated by 'xml2py'
# flags '..\tools\windows.xml -m comtypes -m comtypes.automation -w -r .*TypeLibEx -r .*TypeLib -o typeinfo.py'
# then hacked manually
import os
import sys
import weakref
from ctypes import *
from ctypes.wintypes import ULONG
from comtypes import STDMETHOD
from comtypes import COMMETHOD
from comtypes import _GUID, GUID
# XXX should import more stuff from ctypes.wintypes...
from comtypes.automation import BSTR
from comtypes.automation import DISPID
from comtypes.automation import DISPPARAMS
from comtypes.automation import DWORD
from comtypes.automation import EXCEPINFO
from comtypes.automation import HRESULT
from comtypes.automation import IID
from comtypes.automation import IUnknown
from comtypes.automation import LCID
from comtypes.automation import LONG
from comtypes.automation import SCODE
from comtypes.automation import UINT
from comtypes.automation import VARIANT
from comtypes.automation import VARIANTARG
from comtypes.automation import VARTYPE
from comtypes.automation import WCHAR
from comtypes.automation import WORD
from comtypes.automation import tagVARIANT
is_64_bit = sys.maxsize > 2**32
BOOL = c_int
HREFTYPE = DWORD
INT = c_int
MEMBERID = DISPID
OLECHAR = WCHAR
PVOID = c_void_p
SHORT = c_short
# See https://msdn.microsoft.com/en-us/library/windows/desktop/aa383751(v=vs.85).aspx#ULONG_PTR # noqa
ULONG_PTR = c_uint64 if is_64_bit else c_ulong
USHORT = c_ushort
LPOLESTR = POINTER(OLECHAR)
################################################################
# enums
tagSYSKIND = c_int # enum
SYS_WIN16 = 0
SYS_WIN32 = 1
SYS_MAC = 2
SYS_WIN64 = 3
SYSKIND = tagSYSKIND
tagREGKIND = c_int # enum
REGKIND_DEFAULT = 0
REGKIND_REGISTER = 1
REGKIND_NONE = 2
REGKIND = tagREGKIND
tagTYPEKIND = c_int # enum
TKIND_ENUM = 0
TKIND_RECORD = 1
TKIND_MODULE = 2
TKIND_INTERFACE = 3
TKIND_DISPATCH = 4
TKIND_COCLASS = 5
TKIND_ALIAS = 6
TKIND_UNION = 7
TKIND_MAX = 8
TYPEKIND = tagTYPEKIND
tagINVOKEKIND = c_int # enum
INVOKE_FUNC = 1
INVOKE_PROPERTYGET = 2
INVOKE_PROPERTYPUT = 4
INVOKE_PROPERTYPUTREF = 8
INVOKEKIND = tagINVOKEKIND
tagDESCKIND = c_int # enum
DESCKIND_NONE = 0
DESCKIND_FUNCDESC = 1
DESCKIND_VARDESC = 2
DESCKIND_TYPECOMP = 3
DESCKIND_IMPLICITAPPOBJ = 4
DESCKIND_MAX = 5
DESCKIND = tagDESCKIND
tagVARKIND = c_int # enum
VAR_PERINSTANCE = 0
VAR_STATIC = 1
VAR_CONST = 2
VAR_DISPATCH = 3
VARKIND = tagVARKIND
tagFUNCKIND = c_int # enum
FUNC_VIRTUAL = 0
FUNC_PUREVIRTUAL = 1
FUNC_NONVIRTUAL = 2
FUNC_STATIC = 3
FUNC_DISPATCH = 4
FUNCKIND = tagFUNCKIND
tagCALLCONV = c_int # enum
CC_FASTCALL = 0
CC_CDECL = 1
CC_MSCPASCAL = 2
CC_PASCAL = 2
CC_MACPASCAL = 3
CC_STDCALL = 4
CC_FPFASTCALL = 5
CC_SYSCALL = 6
CC_MPWCDECL = 7
CC_MPWPASCAL = 8
CC_MAX = 9
CALLCONV = tagCALLCONV
IMPLTYPEFLAG_FDEFAULT = 1
IMPLTYPEFLAG_FSOURCE = 2
IMPLTYPEFLAG_FRESTRICTED = 4
IMPLTYPEFLAG_FDEFAULTVTABLE = 8
tagTYPEFLAGS = c_int # enum
TYPEFLAG_FAPPOBJECT = 1
TYPEFLAG_FCANCREATE = 2
TYPEFLAG_FLICENSED = 4
TYPEFLAG_FPREDECLID = 8
TYPEFLAG_FHIDDEN = 16
TYPEFLAG_FCONTROL = 32
TYPEFLAG_FDUAL = 64
TYPEFLAG_FNONEXTENSIBLE = 128
TYPEFLAG_FOLEAUTOMATION = 256
TYPEFLAG_FRESTRICTED = 512
TYPEFLAG_FAGGREGATABLE = 1024
TYPEFLAG_FREPLACEABLE = 2048
TYPEFLAG_FDISPATCHABLE = 4096
TYPEFLAG_FREVERSEBIND = 8192
TYPEFLAG_FPROXY = 16384
TYPEFLAGS = tagTYPEFLAGS
tagFUNCFLAGS = c_int # enum
FUNCFLAG_FRESTRICTED = 1
FUNCFLAG_FSOURCE = 2
FUNCFLAG_FBINDABLE = 4
FUNCFLAG_FREQUESTEDIT = 8
FUNCFLAG_FDISPLAYBIND = 16
FUNCFLAG_FDEFAULTBIND = 32
FUNCFLAG_FHIDDEN = 64
FUNCFLAG_FUSESGETLASTERROR = 128
FUNCFLAG_FDEFAULTCOLLELEM = 256
FUNCFLAG_FUIDEFAULT = 512
FUNCFLAG_FNONBROWSABLE = 1024
FUNCFLAG_FREPLACEABLE = 2048
FUNCFLAG_FIMMEDIATEBIND = 4096
FUNCFLAGS = tagFUNCFLAGS
tagVARFLAGS = c_int # enum
VARFLAG_FREADONLY = 1
VARFLAG_FSOURCE = 2
VARFLAG_FBINDABLE = 4
VARFLAG_FREQUESTEDIT = 8
VARFLAG_FDISPLAYBIND = 16
VARFLAG_FDEFAULTBIND = 32
VARFLAG_FHIDDEN = 64
VARFLAG_FRESTRICTED = 128
VARFLAG_FDEFAULTCOLLELEM = 256
VARFLAG_FUIDEFAULT = 512
VARFLAG_FNONBROWSABLE = 1024
VARFLAG_FREPLACEABLE = 2048
VARFLAG_FIMMEDIATEBIND = 4096
VARFLAGS = tagVARFLAGS
PARAMFLAG_NONE = 0
PARAMFLAG_FIN = 1
PARAMFLAG_FOUT = 2
PARAMFLAG_FLCID = 4
PARAMFLAG_FRETVAL = 8
PARAMFLAG_FOPT = 16
PARAMFLAG_FHASDEFAULT = 32
PARAMFLAG_FHASCUSTDATA = 64
################################################################
# a helper
def _deref_with_release(ptr, release):
# Given a POINTER instance, return the pointed to value.
# Call the 'release' function with 'ptr' to release resources
# when the value is no longer needed.
result = ptr[0]
result.__ref__ = weakref.ref(result, lambda dead: release(ptr))
return result
# interfaces
class ITypeLib(IUnknown):
_iid_ = GUID("{00020402-0000-0000-C000-000000000046}")
# Commented out methods use the default implementation that comtypes
# automatically creates for COM methods.
## def GetTypeInfoCount(self):
## "Return the number of type informations"
## def GetTypeInfo(self, index):
## "Load type info by index"
## def GetTypeInfoType(self, index):
## "Return the TYPEKIND of type information"
## def GetTypeInfoOfGuid(self, guid):
## "Return type information for a guid"
def GetLibAttr(self):
"Return type library attributes"
return _deref_with_release(self._GetLibAttr(), self.ReleaseTLibAttr)
## def GetTypeComp(self):
## "Return an ITypeComp pointer."
## def GetDocumentation(self, index):
## "Return documentation for a type description."
def IsName(self, name, lHashVal=0):
"""Check if there is type information for this name.
Returns the name with capitalization found in the type
library, or None.
"""
from ctypes import create_unicode_buffer
namebuf = create_unicode_buffer(name)
found = BOOL()
self.__com_IsName(namebuf, lHashVal, byref(found))
if found.value:
return namebuf[:].split("\0", 1)[0]
return None
def FindName(self, name, lHashVal=0):
# Hm...
# Could search for more than one name - should we support this?
found = c_ushort(1)
tinfo = POINTER(ITypeInfo)()
memid = MEMBERID()
self.__com_FindName(name, lHashVal, byref(tinfo), byref(memid), byref(found))
if found.value:
return memid.value, tinfo
return None
## def ReleaseTLibAttr(self, ptla):
## "Release TLIBATTR"
################
def fix_name(name):
# Some typelibs contain BSTR with embedded NUL characters,
# probably the len of the BSTR is wrong.
if name is None:
return name
return name.split("\0")[0]
class ITypeInfo(IUnknown):
_iid_ = GUID("{00020401-0000-0000-C000-000000000046}")
def GetTypeAttr(self):
"Return the TYPEATTR for this type"
return _deref_with_release(self._GetTypeAttr(), self.ReleaseTypeAttr)
## def GetTypeComp(self):
## "Return ITypeComp pointer for this type"
def GetDocumentation(self, memid):
"""Return name, docstring, helpcontext, and helpfile for 'memid'."""
name, doc, helpcontext, helpfile = self._GetDocumentation(memid)
return fix_name(name), fix_name(doc), helpcontext, fix_name(helpfile)
def GetFuncDesc(self, index):
"Return FUNCDESC for index"
return _deref_with_release(self._GetFuncDesc(index), self.ReleaseFuncDesc)
def GetVarDesc(self, index):
"Return VARDESC for index"
return _deref_with_release(self._GetVarDesc(index), self.ReleaseVarDesc)
def GetNames(self, memid, count=1):
"Return names for memid"
names = (BSTR * count)()
cnames = c_uint()
self.__com_GetNames(memid, names, count, byref(cnames))
return names[:cnames.value]
## def GetRefTypeOfImplType(self, index):
## "Get the reftype of an implemented type"
## def GetImplTypeFlags(self, index):
## "Get IMPLTYPEFLAGS"
def GetIDsOfNames(self, *names):
"Maps function and argument names to identifiers"
rgsznames = (c_wchar_p * len(names))(*names)
ids = (MEMBERID * len(names))()
self.__com_GetIDsOfNames(rgsznames, len(names), ids)
return ids[:]
# not yet wrapped
## STDMETHOD(HRESULT, 'Invoke', [PVOID, MEMBERID, WORD, POINTER(DISPPARAMS), POINTER(VARIANT), POINTER(EXCEPINFO), POINTER(UINT)]),
## def GetDllEntry(self, memid, invkind):
## "Return the dll name, function name, and ordinal for a function and invkind."
## def GetRefTypeInfo(self, href):
## "Get type info for reftype"
def AddressOfMember(self, memid, invkind):
"Get the address of a function in a dll"
raise "Check Me"
p = c_void_p()
self.__com_AddressOfMember(memid, invkind, byref(p))
# XXX Would the default impl return the value of p?
return p.value
def CreateInstance(self, punkouter=None, interface=IUnknown, iid=None):
if iid is None:
iid = interface._iid_
return self._CreateInstance(punkouter, byref(interface._iid_))
## def GetMops(self, index):
## "Get marshalling opcodes (whatever that is...)"
## def GetContainingTypeLib(self):
## "Return index into and the containing type lib itself"
## def ReleaseTypeAttr(self, pta):
## def ReleaseFuncDesc(self, pfd):
## def ReleaseVarDesc(self, pvd):
################
class ITypeComp(IUnknown):
_iid_ = GUID("{00020403-0000-0000-C000-000000000046}")
def Bind(self, name, flags=0, lHashVal=0):
"Bind to a name"
bindptr = BINDPTR()
desckind = DESCKIND()
ti = POINTER(ITypeInfo)()
self.__com_Bind(name, lHashVal, flags, byref(ti), byref(desckind), byref(bindptr))
kind = desckind.value
if kind == DESCKIND_FUNCDESC:
fd = bindptr.lpfuncdesc[0]
fd.__ref__ = weakref.ref(fd, lambda dead: ti.ReleaseFuncDesc(bindptr.lpfuncdesc))
return "function", fd
elif kind == DESCKIND_VARDESC:
vd = bindptr.lpvardesc[0]
vd.__ref__ = weakref.ref(vd, lambda dead: ti.ReleaseVarDesc(bindptr.lpvardesc))
return "variable", vd
elif kind == DESCKIND_TYPECOMP:
return "type", bindptr.lptcomp
elif kind == DESCKIND_IMPLICITAPPOBJ:
raise NotImplementedError
elif kind == DESCKIND_NONE:
raise NameError("Name %s not found" % name)
def BindType(self, name, lHashVal=0):
"Bind a type, and return both the typeinfo and typecomp for it."
ti = POINTER(ITypeInfo)()
tc = POINTER(ITypeComp)()
self.__com_BindType(name, lHashVal, byref(ti), byref(tc))
return ti, tc
################
class ICreateTypeLib(IUnknown):
_iid_ = GUID("{00020406-0000-0000-C000-000000000046}")
# C:/Programme/gccxml/bin/Vc71/PlatformSDK/oaidl.h 2149
class ICreateTypeLib2(ICreateTypeLib):
_iid_ = GUID("{0002040F-0000-0000-C000-000000000046}")
class ICreateTypeInfo(IUnknown):
_iid_ = GUID("{00020405-0000-0000-C000-000000000046}")
# C:/Programme/gccxml/bin/Vc71/PlatformSDK/oaidl.h 915
def SetFuncAndParamNames(self, index, *names):
rgszNames = (c_wchar_p * len(names))()
for i, n in enumerate(names):
rgszNames[i] = n
return self._SetFuncAndParamNames(index, rgszNames, len(names))
class IRecordInfo(IUnknown):
# C:/vc98/include/OAIDL.H 5974
_iid_ = GUID("{0000002F-0000-0000-C000-000000000046}")
def GetFieldNames(self, *args):
count = c_ulong()
self.__com_GetFieldNames(count, None)
array = (BSTR * count.value)()
self.__com_GetFieldNames(count, array)
result = array[:]
# XXX Should SysFreeString the array contents. How to?
return result
IRecordInfo. _methods_ = [
COMMETHOD([], HRESULT, 'RecordInit',
(['in'], c_void_p, 'pvNew')),
COMMETHOD([], HRESULT, 'RecordClear',
(['in'], c_void_p, 'pvExisting')),
COMMETHOD([], HRESULT, 'RecordCopy',
(['in'], c_void_p, 'pvExisting'),
(['in'], c_void_p, 'pvNew')),
COMMETHOD([], HRESULT, 'GetGuid',
(['out'], POINTER(GUID), 'pguid')),
COMMETHOD([], HRESULT, 'GetName',
(['out'], POINTER(BSTR), 'pbstrName')),
COMMETHOD([], HRESULT, 'GetSize',
(['out'], POINTER(c_ulong), 'pcbSize')),
COMMETHOD([], HRESULT, 'GetTypeInfo',
(['out'], POINTER(POINTER(ITypeInfo)), 'ppTypeInfo')),
COMMETHOD([], HRESULT, 'GetField',
(['in'], c_void_p, 'pvData'),
(['in'], c_wchar_p, 'szFieldName'),
(['out'], POINTER(VARIANT), 'pvarField')),
COMMETHOD([], HRESULT, 'GetFieldNoCopy',
(['in'], c_void_p, 'pvData'),
(['in'], c_wchar_p, 'szFieldName'),
(['out'], POINTER(VARIANT), 'pvarField'),
(['out'], POINTER(c_void_p), 'ppvDataCArray')),
COMMETHOD([], HRESULT, 'PutField',
(['in'], c_ulong, 'wFlags'),
(['in'], c_void_p, 'pvData'),
(['in'], c_wchar_p, 'szFieldName'),
(['in'], POINTER(VARIANT), 'pvarField')),
COMMETHOD([], HRESULT, 'PutFieldNoCopy',
(['in'], c_ulong, 'wFlags'),
(['in'], c_void_p, 'pvData'),
(['in'], c_wchar_p, 'szFieldName'),
(['in'], POINTER(VARIANT), 'pvarField')),
COMMETHOD([], HRESULT, 'GetFieldNames',
(['in', 'out'], POINTER(c_ulong), 'pcNames'),
(['in'], POINTER(BSTR), 'rgBstrNames')),
COMMETHOD([], BOOL, 'IsMatchingType',
(['in'], POINTER(IRecordInfo))),
COMMETHOD([], HRESULT, 'RecordCreate'),
COMMETHOD([], HRESULT, 'RecordCreateCopy',
(['in'], c_void_p, 'pvSource'),
(['out'], POINTER(c_void_p), 'ppvDest')),
COMMETHOD([], HRESULT, 'RecordDestroy',
(['in'], c_void_p, 'pvRecord'))]
################################################################
# functions
_oleaut32 = oledll.oleaut32
def GetRecordInfoFromTypeInfo(tinfo):
"Return an IRecordInfo pointer to the UDT described in tinfo"
ri = POINTER(IRecordInfo)()
_oleaut32.GetRecordInfoFromTypeInfo(tinfo, byref(ri))
return ri
def GetRecordInfoFromGuids(rGuidTypeLib, verMajor, verMinor, lcid, rGuidTypeInfo):
ri = POINTER(IRecordInfo)()
_oleaut32.GetRecordInfoFromGuids(byref(GUID(rGuidTypeLib)),
verMajor, verMinor, lcid,
byref(GUID(rGuidTypeInfo)),
byref(ri))
return ri
def LoadRegTypeLib(guid, wMajorVerNum, wMinorVerNum, lcid=0):
"Load a registered type library"
tlib = POINTER(ITypeLib)()
_oleaut32.LoadRegTypeLib(byref(GUID(guid)), wMajorVerNum, wMinorVerNum, lcid, byref(tlib))
return tlib
if hasattr(_oleaut32, "LoadTypeLibEx"):
def LoadTypeLibEx(szFile, regkind=REGKIND_NONE):
"Load, and optionally register a type library file"
ptl = POINTER(ITypeLib)()
_oleaut32.LoadTypeLibEx(c_wchar_p(szFile), regkind, byref(ptl))
return ptl
else:
def LoadTypeLibEx(szFile, regkind=REGKIND_NONE):
"Load, and optionally register a type library file"
ptl = POINTER(ITypeLib)()
_oleaut32.LoadTypeLib(c_wchar_p(szFile), byref(ptl))
return ptl
def LoadTypeLib(szFile):
"Load and register a type library file"
tlib = POINTER(ITypeLib)()
_oleaut32.LoadTypeLib(c_wchar_p(szFile), byref(tlib))
return tlib
def UnRegisterTypeLib(libID, wVerMajor, wVerMinor, lcid=0, syskind=SYS_WIN32):
"Unregister a registered type library"
return _oleaut32.UnRegisterTypeLib(byref(GUID(libID)), wVerMajor, wVerMinor, lcid, syskind)
def RegisterTypeLib(tlib, fullpath, helpdir=None):
"Register a type library in the registry"
return _oleaut32.RegisterTypeLib(tlib, c_wchar_p(fullpath), c_wchar_p(helpdir))
def CreateTypeLib(filename, syskind=SYS_WIN32):
"Return a ICreateTypeLib2 pointer"
ctlib = POINTER(ICreateTypeLib2)()
_oleaut32.CreateTypeLib2(syskind, c_wchar_p(filename), byref(ctlib))
return ctlib
if os.name == "ce":
# See also:
# http://blogs.msdn.com/larryosterman/archive/2006/01/09/510856.aspx
#
# windows CE does not have QueryPathOfRegTypeLib. Emulate by reading the registry:
def QueryPathOfRegTypeLib(libid, wVerMajor, wVerMinor, lcid=0):
"Return the path of a registered type library"
import _winreg
try:
hkey = _winreg.OpenKey(_winreg.HKEY_CLASSES_ROOT, r"Typelib\%s\%s.%s\%x\win32" % (libid, wVerMajor, wVerMinor, lcid))
except WindowsError:
# On CE, some typelib names are not in the ..\win32 subkey:
hkey = _winreg.OpenKey(_winreg.HKEY_CLASSES_ROOT, r"Typelib\%s\%s.%s\%x" % (libid, wVerMajor, wVerMinor, lcid))
return _winreg.QueryValueEx(hkey, "")[0]
else:
def QueryPathOfRegTypeLib(libid, wVerMajor, wVerMinor, lcid=0):
"Return the path of a registered type library"
pathname = BSTR()
_oleaut32.QueryPathOfRegTypeLib(byref(GUID(libid)), wVerMajor, wVerMinor, lcid, byref(pathname))
return pathname.value.split("\0")[0]
################################################################
# Structures
class tagTLIBATTR(Structure):
# C:/Programme/gccxml/bin/Vc71/PlatformSDK/oaidl.h 4437
def __repr__(self):
return "TLIBATTR(GUID=%s, Version=%s.%s, LCID=%s, FLags=0x%x)" % \
(self.guid, self.wMajorVerNum, self.wMinorVerNum, self.lcid, self.wLibFlags)
TLIBATTR = tagTLIBATTR
class tagTYPEATTR(Structure):
# C:/Programme/gccxml/bin/Vc71/PlatformSDK/oaidl.h 672
def __repr__(self):
return "TYPEATTR(GUID=%s, typekind=%s, funcs=%s, vars=%s, impltypes=%s)" % \
(self.guid, self.typekind, self.cFuncs, self.cVars, self.cImplTypes)
TYPEATTR = tagTYPEATTR
class tagFUNCDESC(Structure):
# C:/Programme/gccxml/bin/Vc71/PlatformSDK/oaidl.h 769
def __repr__(self):
return "FUNCDESC(memid=%s, cParams=%s, cParamsOpt=%s, callconv=%s, invkind=%s, funckind=%s)" % \
(self.memid, self.cParams, self.cParamsOpt, self.callconv, self.invkind, self.funckind)
FUNCDESC = tagFUNCDESC
class tagVARDESC(Structure):
# C:/Programme/gccxml/bin/Vc71/PlatformSDK/oaidl.h 803
pass
VARDESC = tagVARDESC
class tagBINDPTR(Union):
# C:/Programme/gccxml/bin/Vc71/PlatformSDK/oaidl.h 3075
pass
BINDPTR = tagBINDPTR
class tagTYPEDESC(Structure):
# C:/Programme/gccxml/bin/Vc71/PlatformSDK/oaidl.h 582
pass
TYPEDESC = tagTYPEDESC
class tagIDLDESC(Structure):
# C:/Programme/gccxml/bin/Vc71/PlatformSDK/oaidl.h 633
pass
IDLDESC = tagIDLDESC
class tagARRAYDESC(Structure):
# C:/Programme/gccxml/bin/Vc71/PlatformSDK/oaidl.h 594
pass
################################################################
# interface vtbl definitions
ICreateTypeLib._methods_ = [
# C:/Programme/gccxml/bin/Vc71/PlatformSDK/oaidl.h 2149
COMMETHOD([], HRESULT, 'CreateTypeInfo',
(['in'], LPOLESTR, 'szName'),
(['in'], TYPEKIND, 'tkind'),
(['out'], POINTER(POINTER(ICreateTypeInfo)), 'ppCTInfo')),
STDMETHOD(HRESULT, 'SetName', [LPOLESTR]),
STDMETHOD(HRESULT, 'SetVersion', [WORD, WORD]),
STDMETHOD(HRESULT, 'SetGuid', [POINTER(GUID)]),
STDMETHOD(HRESULT, 'SetDocString', [LPOLESTR]),
STDMETHOD(HRESULT, 'SetHelpFileName', [LPOLESTR]),
STDMETHOD(HRESULT, 'SetHelpContext', [DWORD]),
STDMETHOD(HRESULT, 'SetLcid', [LCID]),
STDMETHOD(HRESULT, 'SetLibFlags', [UINT]),
STDMETHOD(HRESULT, 'SaveAllChanges', []),
]
ICreateTypeLib2._methods_ = [
# C:/Programme/gccxml/bin/Vc71/PlatformSDK/oaidl.h 2444
STDMETHOD(HRESULT, 'DeleteTypeInfo', [POINTER(ITypeInfo)]),
STDMETHOD(HRESULT, 'SetCustData', [POINTER(GUID), POINTER(VARIANT)]),
STDMETHOD(HRESULT, 'SetHelpStringContext', [ULONG]),
STDMETHOD(HRESULT, 'SetHelpStringDll', [LPOLESTR]),
]
ITypeLib._methods_ = [
# C:/Programme/gccxml/bin/Vc71/PlatformSDK/oaidl.h 4455
COMMETHOD([], UINT, 'GetTypeInfoCount'),
COMMETHOD([], HRESULT, 'GetTypeInfo',
(['in'], UINT, 'index'),
(['out'], POINTER(POINTER(ITypeInfo)))),
COMMETHOD([], HRESULT, 'GetTypeInfoType',
(['in'], UINT, 'index'),
(['out'], POINTER(TYPEKIND))),
COMMETHOD([], HRESULT, 'GetTypeInfoOfGuid',
(['in'], POINTER(GUID)),
(['out'], POINTER(POINTER(ITypeInfo)))),
COMMETHOD([], HRESULT, 'GetLibAttr',
(['out'], POINTER(POINTER(TLIBATTR)))),
COMMETHOD([], HRESULT, 'GetTypeComp',
(['out'], POINTER(POINTER(ITypeComp)))),
COMMETHOD([], HRESULT, 'GetDocumentation',
(['in'], INT, 'index'),
(['out'], POINTER(BSTR)),
(['out'], POINTER(BSTR)),
(['out'], POINTER(DWORD)),
(['out'], POINTER(BSTR))),
COMMETHOD([], HRESULT, 'IsName',
# IsName changes the casing of the passed in name to
# match that in the type library. In the automatically
# wrapped version of this method, ctypes would pass a
# Python unicode string which would then be changed -
# very bad. So we have (see above) to implement the
# IsName method manually.
(['in', 'out'], LPOLESTR, 'name'),
(['in', 'optional'], DWORD, 'lHashVal', 0),
(['out'], POINTER(BOOL))),
STDMETHOD(HRESULT, 'FindName', [LPOLESTR, DWORD, POINTER(POINTER(ITypeInfo)),
POINTER(MEMBERID), POINTER(USHORT)]),
COMMETHOD([], None, 'ReleaseTLibAttr',
(['in'], POINTER(TLIBATTR)))
]
ITypeInfo._methods_ = [
# C:/Programme/gccxml/bin/Vc71/PlatformSDK/oaidl.h 3230
COMMETHOD([], HRESULT, 'GetTypeAttr',
(['out'], POINTER(POINTER(TYPEATTR)), 'ppTypeAttr')),
COMMETHOD([], HRESULT, 'GetTypeComp',
(['out'], POINTER(POINTER(ITypeComp)))),
COMMETHOD([], HRESULT, 'GetFuncDesc',
(['in'], UINT, 'index'),
(['out'], POINTER(POINTER(FUNCDESC)))),
COMMETHOD([], HRESULT, 'GetVarDesc',
(['in'], UINT, 'index'),
(['out'], POINTER(POINTER(VARDESC)))),
STDMETHOD(HRESULT, 'GetNames', [MEMBERID, POINTER(BSTR), UINT, POINTER(UINT)]),
COMMETHOD([], HRESULT, 'GetRefTypeOfImplType',
(['in'], UINT, 'index'),
(['out'], POINTER(HREFTYPE))),
COMMETHOD([], HRESULT, 'GetImplTypeFlags',
(['in'], UINT, 'index'),
(['out'], POINTER(INT))),
## STDMETHOD(HRESULT, 'GetIDsOfNames', [POINTER(LPOLESTR), UINT, POINTER(MEMBERID)]),
# this one changed, to accept c_wchar_p array
STDMETHOD(HRESULT, 'GetIDsOfNames', [POINTER(c_wchar_p), UINT, POINTER(MEMBERID)]),
STDMETHOD(HRESULT, 'Invoke', [PVOID, MEMBERID, WORD, POINTER(DISPPARAMS), POINTER(VARIANT), POINTER(EXCEPINFO), POINTER(UINT)]),
COMMETHOD([], HRESULT, 'GetDocumentation',
(['in'], MEMBERID, 'memid'),
(['out'], POINTER(BSTR), 'pBstrName'),
(['out'], POINTER(BSTR), 'pBstrDocString'),
(['out'], POINTER(DWORD), 'pdwHelpContext'),
(['out'], POINTER(BSTR), 'pBstrHelpFile')),
COMMETHOD([], HRESULT, 'GetDllEntry',
(['in'], MEMBERID, 'index'),
(['in'], INVOKEKIND, 'invkind'),
(['out'], POINTER(BSTR), 'pBstrDllName'),
(['out'], POINTER(BSTR), 'pBstrName'),
(['out'], POINTER(WORD), 'pwOrdinal')),
COMMETHOD([], HRESULT, 'GetRefTypeInfo',
(['in'], HREFTYPE, 'hRefType'),
(['out'], POINTER(POINTER(ITypeInfo)))),
STDMETHOD(HRESULT, 'AddressOfMember', [MEMBERID, INVOKEKIND, POINTER(PVOID)]),
COMMETHOD([], HRESULT, 'CreateInstance',
(['in'], POINTER(IUnknown), 'pUnkOuter'),
(['in'], POINTER(IID), 'refiid'),
(['out'], POINTER(POINTER(IUnknown)))),
COMMETHOD([], HRESULT, 'GetMops',
(['in'], MEMBERID, 'memid'),
(['out'], POINTER(BSTR))),
COMMETHOD([], HRESULT, 'GetContainingTypeLib',
(['out'], POINTER(POINTER(ITypeLib))),
(['out'], POINTER(UINT))),
COMMETHOD([], None, 'ReleaseTypeAttr',
(['in'], POINTER(TYPEATTR))),
COMMETHOD([], None, 'ReleaseFuncDesc',
(['in'], POINTER(FUNCDESC))),
COMMETHOD([], None, 'ReleaseVarDesc',
(['in'], POINTER(VARDESC))),
]
ITypeComp._methods_ = [
# C:/Programme/gccxml/bin/Vc71/PlatformSDK/oaidl.h 3090
STDMETHOD(HRESULT, 'Bind',
[LPOLESTR, DWORD, WORD, POINTER(POINTER(ITypeInfo)),
POINTER(DESCKIND), POINTER(BINDPTR)]),
STDMETHOD(HRESULT, 'BindType',
[LPOLESTR, DWORD, POINTER(POINTER(ITypeInfo)), POINTER(POINTER(ITypeComp))]),
]
ICreateTypeInfo._methods_ = [
# C:/Programme/gccxml/bin/Vc71/PlatformSDK/oaidl.h 915
STDMETHOD(HRESULT, 'SetGuid', [POINTER(GUID)]),
STDMETHOD(HRESULT, 'SetTypeFlags', [UINT]),
STDMETHOD(HRESULT, 'SetDocString', [LPOLESTR]),
STDMETHOD(HRESULT, 'SetHelpContext', [DWORD]),
STDMETHOD(HRESULT, 'SetVersion', [WORD, WORD]),
# STDMETHOD(HRESULT, 'AddRefTypeInfo', [POINTER(ITypeInfo), POINTER(HREFTYPE)]),
COMMETHOD([], HRESULT, 'AddRefTypeInfo',
(['in'], POINTER(ITypeInfo)),
(['out'], POINTER(HREFTYPE))),
STDMETHOD(HRESULT, 'AddFuncDesc', [UINT, POINTER(FUNCDESC)]),
STDMETHOD(HRESULT, 'AddImplType', [UINT, HREFTYPE]),
STDMETHOD(HRESULT, 'SetImplTypeFlags', [UINT, INT]),
STDMETHOD(HRESULT, 'SetAlignment', [WORD]),
STDMETHOD(HRESULT, 'SetSchema', [LPOLESTR]),
STDMETHOD(HRESULT, 'AddVarDesc', [UINT, POINTER(VARDESC)]),
STDMETHOD(HRESULT, 'SetFuncAndParamNames', [UINT, POINTER(c_wchar_p), UINT]),
STDMETHOD(HRESULT, 'SetVarName', [UINT, LPOLESTR]),
STDMETHOD(HRESULT, 'SetTypeDescAlias', [POINTER(TYPEDESC)]),
STDMETHOD(HRESULT, 'DefineFuncAsDllEntry', [UINT, LPOLESTR, LPOLESTR]),
STDMETHOD(HRESULT, 'SetFuncDocString', [UINT, LPOLESTR]),
STDMETHOD(HRESULT, 'SetVarDocString', [UINT, LPOLESTR]),
STDMETHOD(HRESULT, 'SetFuncHelpContext', [UINT, DWORD]),
STDMETHOD(HRESULT, 'SetVarHelpContext', [UINT, DWORD]),
STDMETHOD(HRESULT, 'SetMops', [UINT, BSTR]),
STDMETHOD(HRESULT, 'SetTypeIdldesc', [POINTER(IDLDESC)]),
STDMETHOD(HRESULT, 'LayOut', []),
]
class IProvideClassInfo(IUnknown):
_iid_ = GUID("{B196B283-BAB4-101A-B69C-00AA00341D07}")
_methods_ = [
# Returns the ITypeInfo interface for the object's coclass type information.
COMMETHOD([], HRESULT, "GetClassInfo",
( ['out'], POINTER(POINTER(ITypeInfo)), "ppTI" ) )
]
class IProvideClassInfo2(IProvideClassInfo):
_iid_ = GUID("{A6BC3AC0-DBAA-11CE-9DE3-00AA004BB851}")
_methods_ = [
# Returns the GUID for the object's outgoing IID for its default event set.
COMMETHOD([], HRESULT, "GetGUID",
( ['in'], DWORD, "dwGuidKind" ),
( ['out', 'retval'], POINTER(GUID), "pGUID" ))
]
################################################################
# Structure fields
tagTLIBATTR._fields_ = [
# C:/Programme/gccxml/bin/Vc71/PlatformSDK/oaidl.h 4437
('guid', GUID),
('lcid', LCID),
('syskind', SYSKIND),
('wMajorVerNum', WORD),
('wMinorVerNum', WORD),
('wLibFlags', WORD),
]
class N11tagTYPEDESC5DOLLAR_203E(Union):
# C:/Programme/gccxml/bin/Vc71/PlatformSDK/oaidl.h 584
pass
N11tagTYPEDESC5DOLLAR_203E._fields_ = [
# C:/Programme/gccxml/bin/Vc71/PlatformSDK/oaidl.h 584
('lptdesc', POINTER(tagTYPEDESC)),
('lpadesc', POINTER(tagARRAYDESC)),
('hreftype', HREFTYPE),
]
tagTYPEDESC._anonymous_ = ('_',)
tagTYPEDESC._fields_ = [
# C:/Programme/gccxml/bin/Vc71/PlatformSDK/oaidl.h 582
# Unnamed field renamed to '_'
('_', N11tagTYPEDESC5DOLLAR_203E),
('vt', VARTYPE),
]
tagIDLDESC._fields_ = [
# C:/Programme/gccxml/bin/Vc71/PlatformSDK/oaidl.h 633
('dwReserved', ULONG_PTR),
('wIDLFlags', USHORT),
]
tagTYPEATTR._fields_ = [
# C:/Programme/gccxml/bin/Vc71/PlatformSDK/oaidl.h 672
('guid', GUID),
('lcid', LCID),
('dwReserved', DWORD),
('memidConstructor', MEMBERID),
('memidDestructor', MEMBERID),
('lpstrSchema', LPOLESTR),
('cbSizeInstance', DWORD),
('typekind', TYPEKIND),
('cFuncs', WORD),
('cVars', WORD),
('cImplTypes', WORD),
('cbSizeVft', WORD),
('cbAlignment', WORD),
('wTypeFlags', WORD),
('wMajorVerNum', WORD),
('wMinorVerNum', WORD),
('tdescAlias', TYPEDESC),
('idldescType', IDLDESC),
]
class N10tagVARDESC5DOLLAR_205E(Union):
# C:/Programme/gccxml/bin/Vc71/PlatformSDK/oaidl.h 807
pass
N10tagVARDESC5DOLLAR_205E._fields_ = [
# C:/Programme/gccxml/bin/Vc71/PlatformSDK/oaidl.h 807
('oInst', DWORD),
('lpvarValue', POINTER(VARIANT)),
]
class tagELEMDESC(Structure):
# C:/Programme/gccxml/bin/Vc71/PlatformSDK/oaidl.h 661
pass
class N11tagELEMDESC5DOLLAR_204E(Union):
# C:/Programme/gccxml/bin/Vc71/PlatformSDK/oaidl.h 663
pass
class tagPARAMDESC(Structure):
# C:/Programme/gccxml/bin/Vc71/PlatformSDK/oaidl.h 609
pass
class tagPARAMDESCEX(Structure):
# C:/Programme/gccxml/bin/Vc71/PlatformSDK/oaidl.h 601
pass
LPPARAMDESCEX = POINTER(tagPARAMDESCEX)
tagPARAMDESC._fields_ = [
# C:/Programme/gccxml/bin/Vc71/PlatformSDK/oaidl.h 609
('pparamdescex', LPPARAMDESCEX),
('wParamFlags', USHORT),
]
PARAMDESC = tagPARAMDESC
N11tagELEMDESC5DOLLAR_204E._fields_ = [
# C:/Programme/gccxml/bin/Vc71/PlatformSDK/oaidl.h 663
('idldesc', IDLDESC),
('paramdesc', PARAMDESC),
]
tagELEMDESC._fields_ = [
# C:/Programme/gccxml/bin/Vc71/PlatformSDK/oaidl.h 661
('tdesc', TYPEDESC),
# Unnamed field renamed to '_'
('_', N11tagELEMDESC5DOLLAR_204E),
]
ELEMDESC = tagELEMDESC
tagVARDESC._fields_ = [
# C:/Programme/gccxml/bin/Vc71/PlatformSDK/oaidl.h 803
('memid', MEMBERID),
('lpstrSchema', LPOLESTR),
# Unnamed field renamed to '_'
('_', N10tagVARDESC5DOLLAR_205E),
('elemdescVar', ELEMDESC),
('wVarFlags', WORD),
('varkind', VARKIND),
]
tagBINDPTR._fields_ = [
# C:/Programme/gccxml/bin/Vc71/PlatformSDK/oaidl.h 3075
('lpfuncdesc', POINTER(FUNCDESC)),
('lpvardesc', POINTER(VARDESC)),
('lptcomp', POINTER(ITypeComp)),
]
tagFUNCDESC._fields_ = [
# C:/Programme/gccxml/bin/Vc71/PlatformSDK/oaidl.h 769
('memid', MEMBERID),
('lprgscode', POINTER(SCODE)),
('lprgelemdescParam', POINTER(ELEMDESC)),
('funckind', FUNCKIND),
('invkind', INVOKEKIND),
('callconv', CALLCONV),
('cParams', SHORT),
('cParamsOpt', SHORT),
('oVft', SHORT),
('cScodes', SHORT),
('elemdescFunc', ELEMDESC),
('wFuncFlags', WORD),
]
tagPARAMDESCEX._fields_ = [
# C:/Programme/gccxml/bin/Vc71/PlatformSDK/oaidl.h 601
('cBytes', DWORD),
('varDefaultValue', VARIANTARG),
]
class tagSAFEARRAYBOUND(Structure):
# C:/Programme/gccxml/bin/Vc71/PlatformSDK/oaidl.h 226
_fields_ = [
('cElements', DWORD),
('lLbound', LONG),
]
SAFEARRAYBOUND = tagSAFEARRAYBOUND
tagARRAYDESC._fields_ = [
# C:/Programme/gccxml/bin/Vc71/PlatformSDK/oaidl.h 594
('tdescElem', TYPEDESC),
('cDims', USHORT),
('rgbounds', SAFEARRAYBOUND * 1),
]
| 34.874179 | 134 | 0.637427 |
import os
import sys
import weakref
from ctypes import *
from ctypes.wintypes import ULONG
from comtypes import STDMETHOD
from comtypes import COMMETHOD
from comtypes import _GUID, GUID
from comtypes.automation import BSTR
from comtypes.automation import DISPID
from comtypes.automation import DISPPARAMS
from comtypes.automation import DWORD
from comtypes.automation import EXCEPINFO
from comtypes.automation import HRESULT
from comtypes.automation import IID
from comtypes.automation import IUnknown
from comtypes.automation import LCID
from comtypes.automation import LONG
from comtypes.automation import SCODE
from comtypes.automation import UINT
from comtypes.automation import VARIANT
from comtypes.automation import VARIANTARG
from comtypes.automation import VARTYPE
from comtypes.automation import WCHAR
from comtypes.automation import WORD
from comtypes.automation import tagVARIANT
is_64_bit = sys.maxsize > 2**32
BOOL = c_int
HREFTYPE = DWORD
INT = c_int
MEMBERID = DISPID
OLECHAR = WCHAR
PVOID = c_void_p
SHORT = c_short
is_64_bit else c_ulong
USHORT = c_ushort
LPOLESTR = POINTER(OLECHAR)
WSABLE = 1024
FUNCFLAG_FREPLACEABLE = 2048
FUNCFLAG_FIMMEDIATEBIND = 4096
FUNCFLAGS = tagFUNCFLAGS
tagVARFLAGS = c_int
VARFLAG_FREADONLY = 1
VARFLAG_FSOURCE = 2
VARFLAG_FBINDABLE = 4
VARFLAG_FREQUESTEDIT = 8
VARFLAG_FDISPLAYBIND = 16
VARFLAG_FDEFAULTBIND = 32
VARFLAG_FHIDDEN = 64
VARFLAG_FRESTRICTED = 128
VARFLAG_FDEFAULTCOLLELEM = 256
VARFLAG_FUIDEFAULT = 512
VARFLAG_FNONBROWSABLE = 1024
VARFLAG_FREPLACEABLE = 2048
VARFLAG_FIMMEDIATEBIND = 4096
VARFLAGS = tagVARFLAGS
PARAMFLAG_NONE = 0
PARAMFLAG_FIN = 1
PARAMFLAG_FOUT = 2
PARAMFLAG_FLCID = 4
PARAMFLAG_FRETVAL = 8
PARAMFLAG_FOPT = 16
PARAMFLAG_FHASDEFAULT = 32
PARAMFLAG_FHASCUSTDATA = 64
R(ITypeInfo)()
tc = POINTER(ITypeComp)()
self.__com_BindType(name, lHashVal, byref(ti), byref(tc))
return ti, tc
(ICreateTypeLib):
_iid_ = GUID("{0002040F-0000-0000-C000-000000000046}")
class ICreateTypeInfo(IUnknown):
_iid_ = GUID("{00020405-0000-0000-C000-000000000046}")
def SetFuncAndParamNames(self, index, *names):
rgszNames = (c_wchar_p * len(names))()
for i, n in enumerate(names):
rgszNames[i] = n
return self._SetFuncAndParamNames(index, rgszNames, len(names))
class IRecordInfo(IUnknown):
_iid_ = GUID("{0000002F-0000-0000-C000-000000000046}")
def GetFieldNames(self, *args):
count = c_ulong()
self.__com_GetFieldNames(count, None)
array = (BSTR * count.value)()
self.__com_GetFieldNames(count, array)
result = array[:]
return result
IRecordInfo. _methods_ = [
COMMETHOD([], HRESULT, 'RecordInit',
(['in'], c_void_p, 'pvNew')),
COMMETHOD([], HRESULT, 'RecordClear',
(['in'], c_void_p, 'pvExisting')),
COMMETHOD([], HRESULT, 'RecordCopy',
(['in'], c_void_p, 'pvExisting'),
(['in'], c_void_p, 'pvNew')),
COMMETHOD([], HRESULT, 'GetGuid',
(['out'], POINTER(GUID), 'pguid')),
COMMETHOD([], HRESULT, 'GetName',
(['out'], POINTER(BSTR), 'pbstrName')),
COMMETHOD([], HRESULT, 'GetSize',
(['out'], POINTER(c_ulong), 'pcbSize')),
COMMETHOD([], HRESULT, 'GetTypeInfo',
(['out'], POINTER(POINTER(ITypeInfo)), 'ppTypeInfo')),
COMMETHOD([], HRESULT, 'GetField',
(['in'], c_void_p, 'pvData'),
(['in'], c_wchar_p, 'szFieldName'),
(['out'], POINTER(VARIANT), 'pvarField')),
COMMETHOD([], HRESULT, 'GetFieldNoCopy',
(['in'], c_void_p, 'pvData'),
(['in'], c_wchar_p, 'szFieldName'),
(['out'], POINTER(VARIANT), 'pvarField'),
(['out'], POINTER(c_void_p), 'ppvDataCArray')),
COMMETHOD([], HRESULT, 'PutField',
(['in'], c_ulong, 'wFlags'),
(['in'], c_void_p, 'pvData'),
(['in'], c_wchar_p, 'szFieldName'),
(['in'], POINTER(VARIANT), 'pvarField')),
COMMETHOD([], HRESULT, 'PutFieldNoCopy',
(['in'], c_ulong, 'wFlags'),
(['in'], c_void_p, 'pvData'),
(['in'], c_wchar_p, 'szFieldName'),
(['in'], POINTER(VARIANT), 'pvarField')),
COMMETHOD([], HRESULT, 'GetFieldNames',
(['in', 'out'], POINTER(c_ulong), 'pcNames'),
(['in'], POINTER(BSTR), 'rgBstrNames')),
COMMETHOD([], BOOL, 'IsMatchingType',
(['in'], POINTER(IRecordInfo))),
COMMETHOD([], HRESULT, 'RecordCreate'),
COMMETHOD([], HRESULT, 'RecordCreateCopy',
(['in'], c_void_p, 'pvSource'),
(['out'], POINTER(c_void_p), 'ppvDest')),
COMMETHOD([], HRESULT, 'RecordDestroy',
(['in'], c_void_p, 'pvRecord'))]
ASSES_ROOT, r"Typelib\%s\%s.%s\%x\win32" % (libid, wVerMajor, wVerMinor, lcid))
except WindowsError:
hkey = _winreg.OpenKey(_winreg.HKEY_CLASSES_ROOT, r"Typelib\%s\%s.%s\%x" % (libid, wVerMajor, wVerMinor, lcid))
return _winreg.QueryValueEx(hkey, "")[0]
else:
def QueryPathOfRegTypeLib(libid, wVerMajor, wVerMinor, lcid=0):
"Return the path of a registered type library"
pathname = BSTR()
_oleaut32.QueryPathOfRegTypeLib(byref(GUID(libid)), wVerMajor, wVerMinor, lcid, byref(pathname))
return pathname.value.split("\0")[0]
, 'GetNames', [MEMBERID, POINTER(BSTR), UINT, POINTER(UINT)]),
COMMETHOD([], HRESULT, 'GetRefTypeOfImplType',
(['in'], UINT, 'index'),
(['out'], POINTER(HREFTYPE))),
COMMETHOD([], HRESULT, 'GetImplTypeFlags',
(['in'], UINT, 'index'),
(['out'], POINTER(INT))),
ID)]),
STDMETHOD(HRESULT, 'Invoke', [PVOID, MEMBERID, WORD, POINTER(DISPPARAMS), POINTER(VARIANT), POINTER(EXCEPINFO), POINTER(UINT)]),
COMMETHOD([], HRESULT, 'GetDocumentation',
(['in'], MEMBERID, 'memid'),
(['out'], POINTER(BSTR), 'pBstrName'),
(['out'], POINTER(BSTR), 'pBstrDocString'),
(['out'], POINTER(DWORD), 'pdwHelpContext'),
(['out'], POINTER(BSTR), 'pBstrHelpFile')),
COMMETHOD([], HRESULT, 'GetDllEntry',
(['in'], MEMBERID, 'index'),
(['in'], INVOKEKIND, 'invkind'),
(['out'], POINTER(BSTR), 'pBstrDllName'),
(['out'], POINTER(BSTR), 'pBstrName'),
(['out'], POINTER(WORD), 'pwOrdinal')),
COMMETHOD([], HRESULT, 'GetRefTypeInfo',
(['in'], HREFTYPE, 'hRefType'),
(['out'], POINTER(POINTER(ITypeInfo)))),
STDMETHOD(HRESULT, 'AddressOfMember', [MEMBERID, INVOKEKIND, POINTER(PVOID)]),
COMMETHOD([], HRESULT, 'CreateInstance',
(['in'], POINTER(IUnknown), 'pUnkOuter'),
(['in'], POINTER(IID), 'refiid'),
(['out'], POINTER(POINTER(IUnknown)))),
COMMETHOD([], HRESULT, 'GetMops',
(['in'], MEMBERID, 'memid'),
(['out'], POINTER(BSTR))),
COMMETHOD([], HRESULT, 'GetContainingTypeLib',
(['out'], POINTER(POINTER(ITypeLib))),
(['out'], POINTER(UINT))),
COMMETHOD([], None, 'ReleaseTypeAttr',
(['in'], POINTER(TYPEATTR))),
COMMETHOD([], None, 'ReleaseFuncDesc',
(['in'], POINTER(FUNCDESC))),
COMMETHOD([], None, 'ReleaseVarDesc',
(['in'], POINTER(VARDESC))),
]
ITypeComp._methods_ = [
STDMETHOD(HRESULT, 'Bind',
[LPOLESTR, DWORD, WORD, POINTER(POINTER(ITypeInfo)),
POINTER(DESCKIND), POINTER(BINDPTR)]),
STDMETHOD(HRESULT, 'BindType',
[LPOLESTR, DWORD, POINTER(POINTER(ITypeInfo)), POINTER(POINTER(ITypeComp))]),
]
ICreateTypeInfo._methods_ = [
STDMETHOD(HRESULT, 'SetGuid', [POINTER(GUID)]),
STDMETHOD(HRESULT, 'SetTypeFlags', [UINT]),
STDMETHOD(HRESULT, 'SetDocString', [LPOLESTR]),
STDMETHOD(HRESULT, 'SetHelpContext', [DWORD]),
STDMETHOD(HRESULT, 'SetVersion', [WORD, WORD]),
COMMETHOD([], HRESULT, 'AddRefTypeInfo',
(['in'], POINTER(ITypeInfo)),
(['out'], POINTER(HREFTYPE))),
STDMETHOD(HRESULT, 'AddFuncDesc', [UINT, POINTER(FUNCDESC)]),
STDMETHOD(HRESULT, 'AddImplType', [UINT, HREFTYPE]),
STDMETHOD(HRESULT, 'SetImplTypeFlags', [UINT, INT]),
STDMETHOD(HRESULT, 'SetAlignment', [WORD]),
STDMETHOD(HRESULT, 'SetSchema', [LPOLESTR]),
STDMETHOD(HRESULT, 'AddVarDesc', [UINT, POINTER(VARDESC)]),
STDMETHOD(HRESULT, 'SetFuncAndParamNames', [UINT, POINTER(c_wchar_p), UINT]),
STDMETHOD(HRESULT, 'SetVarName', [UINT, LPOLESTR]),
STDMETHOD(HRESULT, 'SetTypeDescAlias', [POINTER(TYPEDESC)]),
STDMETHOD(HRESULT, 'DefineFuncAsDllEntry', [UINT, LPOLESTR, LPOLESTR]),
STDMETHOD(HRESULT, 'SetFuncDocString', [UINT, LPOLESTR]),
STDMETHOD(HRESULT, 'SetVarDocString', [UINT, LPOLESTR]),
STDMETHOD(HRESULT, 'SetFuncHelpContext', [UINT, DWORD]),
STDMETHOD(HRESULT, 'SetVarHelpContext', [UINT, DWORD]),
STDMETHOD(HRESULT, 'SetMops', [UINT, BSTR]),
STDMETHOD(HRESULT, 'SetTypeIdldesc', [POINTER(IDLDESC)]),
STDMETHOD(HRESULT, 'LayOut', []),
]
class IProvideClassInfo(IUnknown):
_iid_ = GUID("{B196B283-BAB4-101A-B69C-00AA00341D07}")
_methods_ = [
COMMETHOD([], HRESULT, "GetClassInfo",
( ['out'], POINTER(POINTER(ITypeInfo)), "ppTI" ) )
]
class IProvideClassInfo2(IProvideClassInfo):
_iid_ = GUID("{A6BC3AC0-DBAA-11CE-9DE3-00AA004BB851}")
_methods_ = [
# Returns the GUID for the object's outgoing IID for its default event set.
COMMETHOD([], HRESULT, "GetGUID",
( ['in'], DWORD, "dwGuidKind" ),
( ['out', 'retval'], POINTER(GUID), "pGUID" ))
]
emdescVar', ELEMDESC),
('wVarFlags', WORD),
('varkind', VARKIND),
]
tagBINDPTR._fields_ = [
('lpfuncdesc', POINTER(FUNCDESC)),
('lpvardesc', POINTER(VARDESC)),
('lptcomp', POINTER(ITypeComp)),
]
tagFUNCDESC._fields_ = [
('memid', MEMBERID),
('lprgscode', POINTER(SCODE)),
('lprgelemdescParam', POINTER(ELEMDESC)),
('funckind', FUNCKIND),
('invkind', INVOKEKIND),
('callconv', CALLCONV),
('cParams', SHORT),
('cParamsOpt', SHORT),
('oVft', SHORT),
('cScodes', SHORT),
('elemdescFunc', ELEMDESC),
('wFuncFlags', WORD),
]
tagPARAMDESCEX._fields_ = [
('cBytes', DWORD),
('varDefaultValue', VARIANTARG),
]
class tagSAFEARRAYBOUND(Structure):
_fields_ = [
('cElements', DWORD),
('lLbound', LONG),
]
SAFEARRAYBOUND = tagSAFEARRAYBOUND
tagARRAYDESC._fields_ = [
('tdescElem', TYPEDESC),
('cDims', USHORT),
('rgbounds', SAFEARRAYBOUND * 1),
]
| true | true |
f72698ecbc016f7724c2386a1d6649e19cd6da72 | 5,206 | py | Python | manimlib/animation/growing.py | SidewayOutput/Basic-Manim | 4dea4b00daa7b6f66ed7b26659045f67609d83b6 | [
"MIT"
] | null | null | null | manimlib/animation/growing.py | SidewayOutput/Basic-Manim | 4dea4b00daa7b6f66ed7b26659045f67609d83b6 | [
"MIT"
] | null | null | null | manimlib/animation/growing.py | SidewayOutput/Basic-Manim | 4dea4b00daa7b6f66ed7b26659045f67609d83b6 | [
"MIT"
] | null | null | null | from manimlib.animation.transform import Transform
from manimlib.basic.basic_function import to_expand_lists, to_get_point
from manimlib.constants import PI
from manimlib.utils.config_ops import generate_args, merge_config_kwargs
class GrowFromPoint(Transform):
def __init__(self, mobject, mobject_or_point="get_center()", *args, **kwargs):
self.mobject_or_point = to_get_point(mobject_or_point, mobject)
self.args_name = ["point_color", "scale"]
self.args = [None, 0]
[self.point_color, self.scale] = \
generate_args(self, args, self.args)
kwargs = merge_config_kwargs(self, kwargs, self.args_name)
super().__init__(mobject, **kwargs)
def create_target(self):
return self.mobject
def create_starting_mobject(self):
mobject = self.create_initial_mobject()
mobject.set_stroke(
width=(mobject.stroke_width*self.scale))
if self.point_color:
mobject.set_color(self.point_color)
return mobject
def create_initial_mobject(self):
mobject = super().create_starting_mobject()
return mobject.scale(self.scale).move_to(self.mobject_or_point)
class GrowFromCenter(GrowFromPoint):
def __init__(self, mobject, *args, **kwargs):
super().__init__(mobject, mobject.get_center(), *args, **kwargs)
class GrowFromEdge(GrowFromPoint):
def __init__(self, mobject, edge=[-1, 1, 0], *args, **kwargs):
super().__init__(mobject, mobject.get_critical_point(edge), *args, **kwargs)
class GrowFromSide(GrowFromPoint):
def __init__(self, mobject, side=[0, 1, 0], center=False, *args, **kwargs):
self.side = side
self.center = center
super().__init__(mobject, mobject.get_critical_point(side), *args, **kwargs)
def create_initial_mobject(self):
mobject = self.mobject.copy()
dim = [i for i, each in enumerate(self.side) if each]
mobject.stretch_to_fit(to_expand_lists(self.scale, dim), dim)
if not self.center:
mobject.move_to(self.mobject_or_point)
else:
mobject.move_to(self.mobject.get_center())
return mobject
class DiminishToPoint(GrowFromPoint):
def __init__(self, mobject, mobject_or_point="get_center()", *args, **kwargs):
super().__init__(mobject, mobject_or_point, *args, **kwargs)
def create_target(self):
mobject = self.create_final_mobject()
mobject.set_stroke(
width=(mobject.stroke_width*self.scale))
if self.point_color:
mobject.set_color(self.point_color)
return mobject
def create_starting_mobject(self):
mobject = self.mobject.copy()
return mobject
def create_final_mobject(self):
mobject = self.mobject.copy()
return mobject.scale(self.scale).move_to(self.mobject_or_point)
class DiminishToCenter(DiminishToPoint):
def __init__(self, mobject, *args, **kwargs):
super().__init__(mobject, mobject.get_center(), *args, **kwargs)
class DiminishToEdge(DiminishToPoint):
def __init__(self, mobject, edge=[-1, 1, 0], *args, **kwargs):
super().__init__(mobject, mobject.get_critical_point(edge), *args, **kwargs)
class DiminishToSide(DiminishToPoint):
def __init__(self, mobject, side=[0, 1, 0], center=False, *args, **kwargs):
self.side = side
self.center = center
super().__init__(mobject, mobject.get_critical_point(side), *args, **kwargs)
def create_final_mobject(self):
mobject = self.mobject.copy()
dim = [i for i, each in enumerate(self.side) if each]
mobject.stretch_to_fit(to_expand_lists(self.scale, dim), dim)
if not self.center:
mobject.move_to(self.mobject_or_point)
else:
mobject.move_to(self.mobject.get_center())
return mobject
class GrowArrow(GrowFromPoint):
def __init__(self, arrow, point_by_ratio=0, *args, **kwargs):
super().__init__(arrow, point_by_ratio, *args, **kwargs)
class ExpandArrow(GrowArrow):
def __init__(self, arrow, point_by_ratio=1, *args, **kwargs):
super().__init__(arrow, point_by_ratio, *args, **kwargs)
class DiminishArrow(DiminishToPoint):
def __init__(self, arrow, point_by_ratio=1, *args, **kwargs):
super().__init__(arrow, point_by_ratio, *args, **kwargs)
class RetractArrow(DiminishToPoint):
def __init__(self, arrow, point_by_ratio=0, *args, **kwargs):
super().__init__(arrow, point_by_ratio, *args, **kwargs)
class SpinInFromNothing(GrowFromCenter):
CONFIG = {
"path_arc": PI,
}
class SpinInFrom(GrowFromPoint):
CONFIG = {
"path_arc": PI,
}
def __init__(self, mobject, point="get_center()", *args, **kwargs):
super().__init__(mobject, point, *args, **kwargs)
class SpinOutFrom(SpinInFrom):
CONFIG = {
"path_arc": -PI,
}
class SpinInTo(DiminishToPoint):
CONFIG = {
"path_arc": PI,
}
def __init__(self, mobject, point="get_center()", *args, **kwargs):
super().__init__(mobject, point, *args, **kwargs)
class SpinOutTo(SpinInTo):
CONFIG = {
"path_arc": -PI,
}
| 32.135802 | 84 | 0.664426 | from manimlib.animation.transform import Transform
from manimlib.basic.basic_function import to_expand_lists, to_get_point
from manimlib.constants import PI
from manimlib.utils.config_ops import generate_args, merge_config_kwargs
class GrowFromPoint(Transform):
def __init__(self, mobject, mobject_or_point="get_center()", *args, **kwargs):
self.mobject_or_point = to_get_point(mobject_or_point, mobject)
self.args_name = ["point_color", "scale"]
self.args = [None, 0]
[self.point_color, self.scale] = \
generate_args(self, args, self.args)
kwargs = merge_config_kwargs(self, kwargs, self.args_name)
super().__init__(mobject, **kwargs)
def create_target(self):
return self.mobject
def create_starting_mobject(self):
mobject = self.create_initial_mobject()
mobject.set_stroke(
width=(mobject.stroke_width*self.scale))
if self.point_color:
mobject.set_color(self.point_color)
return mobject
def create_initial_mobject(self):
mobject = super().create_starting_mobject()
return mobject.scale(self.scale).move_to(self.mobject_or_point)
class GrowFromCenter(GrowFromPoint):
def __init__(self, mobject, *args, **kwargs):
super().__init__(mobject, mobject.get_center(), *args, **kwargs)
class GrowFromEdge(GrowFromPoint):
def __init__(self, mobject, edge=[-1, 1, 0], *args, **kwargs):
super().__init__(mobject, mobject.get_critical_point(edge), *args, **kwargs)
class GrowFromSide(GrowFromPoint):
def __init__(self, mobject, side=[0, 1, 0], center=False, *args, **kwargs):
self.side = side
self.center = center
super().__init__(mobject, mobject.get_critical_point(side), *args, **kwargs)
def create_initial_mobject(self):
mobject = self.mobject.copy()
dim = [i for i, each in enumerate(self.side) if each]
mobject.stretch_to_fit(to_expand_lists(self.scale, dim), dim)
if not self.center:
mobject.move_to(self.mobject_or_point)
else:
mobject.move_to(self.mobject.get_center())
return mobject
class DiminishToPoint(GrowFromPoint):
def __init__(self, mobject, mobject_or_point="get_center()", *args, **kwargs):
super().__init__(mobject, mobject_or_point, *args, **kwargs)
def create_target(self):
mobject = self.create_final_mobject()
mobject.set_stroke(
width=(mobject.stroke_width*self.scale))
if self.point_color:
mobject.set_color(self.point_color)
return mobject
def create_starting_mobject(self):
mobject = self.mobject.copy()
return mobject
def create_final_mobject(self):
mobject = self.mobject.copy()
return mobject.scale(self.scale).move_to(self.mobject_or_point)
class DiminishToCenter(DiminishToPoint):
def __init__(self, mobject, *args, **kwargs):
super().__init__(mobject, mobject.get_center(), *args, **kwargs)
class DiminishToEdge(DiminishToPoint):
def __init__(self, mobject, edge=[-1, 1, 0], *args, **kwargs):
super().__init__(mobject, mobject.get_critical_point(edge), *args, **kwargs)
class DiminishToSide(DiminishToPoint):
def __init__(self, mobject, side=[0, 1, 0], center=False, *args, **kwargs):
self.side = side
self.center = center
super().__init__(mobject, mobject.get_critical_point(side), *args, **kwargs)
def create_final_mobject(self):
mobject = self.mobject.copy()
dim = [i for i, each in enumerate(self.side) if each]
mobject.stretch_to_fit(to_expand_lists(self.scale, dim), dim)
if not self.center:
mobject.move_to(self.mobject_or_point)
else:
mobject.move_to(self.mobject.get_center())
return mobject
class GrowArrow(GrowFromPoint):
def __init__(self, arrow, point_by_ratio=0, *args, **kwargs):
super().__init__(arrow, point_by_ratio, *args, **kwargs)
class ExpandArrow(GrowArrow):
def __init__(self, arrow, point_by_ratio=1, *args, **kwargs):
super().__init__(arrow, point_by_ratio, *args, **kwargs)
class DiminishArrow(DiminishToPoint):
def __init__(self, arrow, point_by_ratio=1, *args, **kwargs):
super().__init__(arrow, point_by_ratio, *args, **kwargs)
class RetractArrow(DiminishToPoint):
def __init__(self, arrow, point_by_ratio=0, *args, **kwargs):
super().__init__(arrow, point_by_ratio, *args, **kwargs)
class SpinInFromNothing(GrowFromCenter):
CONFIG = {
"path_arc": PI,
}
class SpinInFrom(GrowFromPoint):
CONFIG = {
"path_arc": PI,
}
def __init__(self, mobject, point="get_center()", *args, **kwargs):
super().__init__(mobject, point, *args, **kwargs)
class SpinOutFrom(SpinInFrom):
CONFIG = {
"path_arc": -PI,
}
class SpinInTo(DiminishToPoint):
CONFIG = {
"path_arc": PI,
}
def __init__(self, mobject, point="get_center()", *args, **kwargs):
super().__init__(mobject, point, *args, **kwargs)
class SpinOutTo(SpinInTo):
CONFIG = {
"path_arc": -PI,
}
| true | true |
f726991caedc24166bb6ed9a085571aa0555465e | 4,333 | py | Python | sdks/python/appcenter_sdk/models/InternalHockeyAppCutoverStatusResponse.py | Brantone/appcenter-sdks | eeb063ecf79908b6e341fb00196d2cd9dc8f3262 | [
"MIT"
] | null | null | null | sdks/python/appcenter_sdk/models/InternalHockeyAppCutoverStatusResponse.py | Brantone/appcenter-sdks | eeb063ecf79908b6e341fb00196d2cd9dc8f3262 | [
"MIT"
] | 6 | 2019-10-23T06:38:53.000Z | 2022-01-22T07:57:58.000Z | sdks/python/appcenter_sdk/models/InternalHockeyAppCutoverStatusResponse.py | Brantone/appcenter-sdks | eeb063ecf79908b6e341fb00196d2cd9dc8f3262 | [
"MIT"
] | 2 | 2019-10-23T06:31:05.000Z | 2021-08-21T17:32:47.000Z | # coding: utf-8
"""
App Center Client
Microsoft Visual Studio App Center API # noqa: E501
OpenAPI spec version: preview
Contact: benedetto.abbenanti@gmail.com
Project Repository: https://github.com/b3nab/appcenter-sdks
"""
import pprint
import re # noqa: F401
import six
class InternalHockeyAppCutoverStatusResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
allowed enum values
"""
not_requested = "not_requested"
requested = "requested"
in_progress = "in_progress"
completed = "completed"
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'string',
'status': 'string'
}
attribute_map = {
'id': 'id',
'status': 'status'
}
def __init__(self, id=None, status=None): # noqa: E501
"""InternalHockeyAppCutoverStatusResponse - a model defined in Swagger""" # noqa: E501
self._id = None
self._status = None
self.discriminator = None
self.id = id
if status is not None:
self.status = status
@property
def id(self):
"""Gets the id of this InternalHockeyAppCutoverStatusResponse. # noqa: E501
The ID of the app # noqa: E501
:return: The id of this InternalHockeyAppCutoverStatusResponse. # noqa: E501
:rtype: string
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this InternalHockeyAppCutoverStatusResponse.
The ID of the app # noqa: E501
:param id: The id of this InternalHockeyAppCutoverStatusResponse. # noqa: E501
:type: string
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
@property
def status(self):
"""Gets the status of this InternalHockeyAppCutoverStatusResponse. # noqa: E501
Does the HockeyApp app have crashes from within the last 90 days? # noqa: E501
:return: The status of this InternalHockeyAppCutoverStatusResponse. # noqa: E501
:rtype: string
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this InternalHockeyAppCutoverStatusResponse.
Does the HockeyApp app have crashes from within the last 90 days? # noqa: E501
:param status: The status of this InternalHockeyAppCutoverStatusResponse. # noqa: E501
:type: string
"""
allowed_values = [undefinedundefinedundefinedundefined] # noqa: E501
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InternalHockeyAppCutoverStatusResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 29.080537 | 95 | 0.587122 |
import pprint
import re
import six
class InternalHockeyAppCutoverStatusResponse(object):
not_requested = "not_requested"
requested = "requested"
in_progress = "in_progress"
completed = "completed"
swagger_types = {
'id': 'string',
'status': 'string'
}
attribute_map = {
'id': 'id',
'status': 'status'
}
def __init__(self, id=None, status=None):
self._id = None
self._status = None
self.discriminator = None
self.id = id
if status is not None:
self.status = status
@property
def id(self):
return self._id
@id.setter
def id(self, id):
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`")
self._id = id
@property
def status(self):
return self._status
@status.setter
def status(self, status):
allowed_values = [undefinedundefinedundefinedundefined]
self._status = status
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, InternalHockeyAppCutoverStatusResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f7269945af61fa78d01b7ddd1fc00faa4eb56f7f | 8,120 | py | Python | utils/swift_build_support/swift_build_support/targets.py | DougGregor/swift | 16b686989c12bb1acf9d1a490c1f301d71428f47 | [
"Apache-2.0"
] | 11 | 2016-01-26T22:56:55.000Z | 2022-03-28T05:57:56.000Z | utils/swift_build_support/swift_build_support/targets.py | danielgalasko/swift | e2501ad9f9e53a7156148d6da5c7796e487723f8 | [
"Apache-2.0"
] | 2 | 2019-04-11T21:36:21.000Z | 2021-04-14T06:09:10.000Z | utils/swift_build_support/swift_build_support/targets.py | DougGregor/swift | 16b686989c12bb1acf9d1a490c1f301d71428f47 | [
"Apache-2.0"
] | 1 | 2016-08-24T17:30:38.000Z | 2016-08-24T17:30:38.000Z | # swift_build_support/targets.py - Build target helpers -*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See http://swift.org/LICENSE.txt for license information
# See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
import os
import platform
class Platform(object):
"""
Abstract representation of a platform Swift can run on.
"""
def __init__(self, name, archs, sdk_name=None):
"""
Create a platform with the given name and list of architectures.
"""
self.name = name
self.targets = [Target(self, arch) for arch in archs]
# FIXME: Eliminate this argument; apparently the SDK names are
# internally a private implementation detail of the build script, so we
# should just make them the same as the platform name.
self.sdk_name = name.upper() if sdk_name is None else sdk_name
# Add a property for each arch.
for target in self.targets:
setattr(self, target.arch, target)
@property
def is_darwin(self):
"""Convenience function for checking if this is a Darwin platform."""
return isinstance(self, DarwinPlatform)
@property
def supports_benchmark(self):
# By default, we don't support benchmarks on most platforms.
return False
def contains(self, target_name):
"""
Returns True if the given target name belongs to a one of this
platform's targets.
"""
for target in self.targets:
if target.name == target_name:
return True
return False
class DarwinPlatform(Platform):
def __init__(self, name, archs, sdk_name=None, is_simulator=False):
self.is_simulator = is_simulator
super(DarwinPlatform, self).__init__(name, archs, sdk_name)
@property
def is_embedded(self):
"""Check if this is a Darwin platform for embedded devices."""
return self.name != "macosx"
@property
def supports_benchmark(self):
# By default, on Darwin we support benchmarks on all non-simulator
# platforms.
return not self.is_simulator
class Target(object):
"""
Abstract representation of a target Swift can run on.
"""
def __init__(self, platform, arch):
self.platform = platform
self.arch = arch
# Delegate to the platform, this is usually not arch specific.
self.supports_benchmark = self.platform.supports_benchmark
@property
def name(self):
return "{}-{}".format(self.platform.name, self.arch)
class StdlibDeploymentTarget(object):
OSX = DarwinPlatform("macosx", archs=["x86_64"],
sdk_name="OSX")
iOS = DarwinPlatform("iphoneos", archs=["armv7", "armv7s", "arm64"],
sdk_name="IOS")
iOSSimulator = DarwinPlatform("iphonesimulator", archs=["i386", "x86_64"],
sdk_name="IOS_SIMULATOR",
is_simulator=True)
# Never build/test benchmarks on iOS armv7s.
iOS.armv7s.supports_benchmark = False
AppleTV = DarwinPlatform("appletvos", archs=["arm64"],
sdk_name="TVOS")
AppleTVSimulator = DarwinPlatform("appletvsimulator", archs=["x86_64"],
sdk_name="TVOS_SIMULATOR",
is_simulator=True)
AppleWatch = DarwinPlatform("watchos", archs=["armv7k"],
sdk_name="WATCHOS")
AppleWatchSimulator = DarwinPlatform("watchsimulator", archs=["i386"],
sdk_name="WATCHOS_SIMULATOR",
is_simulator=True)
Linux = Platform("linux", archs=[
"x86_64",
"armv6",
"armv7",
"aarch64",
"ppc64",
"ppc64le",
"s390x"])
FreeBSD = Platform("freebsd", archs=["x86_64"])
Cygwin = Platform("cygwin", archs=["x86_64"])
Android = Platform("android", archs=["armv7"])
# The list of known platforms.
known_platforms = [
OSX,
iOS, iOSSimulator,
AppleTV, AppleTVSimulator,
AppleWatch, AppleWatchSimulator,
Linux,
FreeBSD,
Cygwin,
Android]
# Cache of targets by name.
_targets_by_name = dict((target.name, target)
for platform in known_platforms
for target in platform.targets)
@staticmethod
def host_target():
"""
Return the host target for the build machine, if it is one of
the recognized targets. Otherwise, return None.
"""
system = platform.system()
machine = platform.machine()
if system == 'Linux':
if machine == 'x86_64':
return StdlibDeploymentTarget.Linux.x86_64
elif machine.startswith('armv7'):
# linux-armv7* is canonicalized to 'linux-armv7'
return StdlibDeploymentTarget.Linux.armv7
elif machine.startswith('armv6'):
# linux-armv6* is canonicalized to 'linux-armv6'
return StdlibDeploymentTarget.Linux.armv6
elif machine == 'aarch64':
return StdlibDeploymentTarget.Linux.aarch64
elif machine == 'ppc64':
return StdlibDeploymentTarget.Linux.ppc64
elif machine == 'ppc64le':
return StdlibDeploymentTarget.Linux.ppc64le
elif machine == 's390x':
return StdlibDeploymentTarget.Linux.s390x
elif system == 'Darwin':
if machine == 'x86_64':
return StdlibDeploymentTarget.OSX.x86_64
elif system == 'FreeBSD':
if machine == 'amd64':
return StdlibDeploymentTarget.FreeBSD.x86_64
elif system == 'CYGWIN_NT-10.0':
if machine == 'x86_64':
return StdlibDeploymentTarget.Cygwin.x86_64
return None
@staticmethod
def default_stdlib_deployment_targets():
"""
Return targets for the Swift stdlib, based on the build machine.
If the build machine is not one of the recognized ones, return None.
"""
host_target = StdlibDeploymentTarget.host_target()
if host_target is None:
return None
# OS X build machines configure all Darwin platforms by default.
# Put iOS native targets last so that we test them last
# (it takes a long time).
if host_target == StdlibDeploymentTarget.OSX.x86_64:
return [host_target] + \
StdlibDeploymentTarget.iOSSimulator.targets + \
StdlibDeploymentTarget.AppleTVSimulator.targets + \
StdlibDeploymentTarget.AppleWatchSimulator.targets + \
StdlibDeploymentTarget.iOS.targets + \
StdlibDeploymentTarget.AppleTV.targets + \
StdlibDeploymentTarget.AppleWatch.targets
else:
# All other machines only configure their host stdlib by default.
return [host_target]
@classmethod
def get_target_for_name(cls, name):
return cls._targets_by_name.get(name)
def install_prefix():
"""
Returns the default path at which built Swift products (like bin, lib,
and include) will be installed, based on the host machine's operating
system.
"""
if platform.system() == 'Darwin':
return '/Applications/Xcode.app/Contents/Developer/Toolchains/' + \
'XcodeDefault.xctoolchain/usr'
else:
return '/usr'
def darwin_toolchain_prefix(darwin_install_prefix):
"""
Given the install prefix for a Darwin system, and assuming that that path
is to a .xctoolchain directory, return the path to the .xctoolchain
directory.
"""
return os.path.split(darwin_install_prefix)[0]
| 34.261603 | 79 | 0.607759 |
import os
import platform
class Platform(object):
def __init__(self, name, archs, sdk_name=None):
self.name = name
self.targets = [Target(self, arch) for arch in archs]
self.sdk_name = name.upper() if sdk_name is None else sdk_name
for target in self.targets:
setattr(self, target.arch, target)
@property
def is_darwin(self):
return isinstance(self, DarwinPlatform)
@property
def supports_benchmark(self):
return False
def contains(self, target_name):
for target in self.targets:
if target.name == target_name:
return True
return False
class DarwinPlatform(Platform):
def __init__(self, name, archs, sdk_name=None, is_simulator=False):
self.is_simulator = is_simulator
super(DarwinPlatform, self).__init__(name, archs, sdk_name)
@property
def is_embedded(self):
return self.name != "macosx"
@property
def supports_benchmark(self):
# By default, on Darwin we support benchmarks on all non-simulator
# platforms.
return not self.is_simulator
class Target(object):
def __init__(self, platform, arch):
self.platform = platform
self.arch = arch
# Delegate to the platform, this is usually not arch specific.
self.supports_benchmark = self.platform.supports_benchmark
@property
def name(self):
return "{}-{}".format(self.platform.name, self.arch)
class StdlibDeploymentTarget(object):
OSX = DarwinPlatform("macosx", archs=["x86_64"],
sdk_name="OSX")
iOS = DarwinPlatform("iphoneos", archs=["armv7", "armv7s", "arm64"],
sdk_name="IOS")
iOSSimulator = DarwinPlatform("iphonesimulator", archs=["i386", "x86_64"],
sdk_name="IOS_SIMULATOR",
is_simulator=True)
# Never build/test benchmarks on iOS armv7s.
iOS.armv7s.supports_benchmark = False
AppleTV = DarwinPlatform("appletvos", archs=["arm64"],
sdk_name="TVOS")
AppleTVSimulator = DarwinPlatform("appletvsimulator", archs=["x86_64"],
sdk_name="TVOS_SIMULATOR",
is_simulator=True)
AppleWatch = DarwinPlatform("watchos", archs=["armv7k"],
sdk_name="WATCHOS")
AppleWatchSimulator = DarwinPlatform("watchsimulator", archs=["i386"],
sdk_name="WATCHOS_SIMULATOR",
is_simulator=True)
Linux = Platform("linux", archs=[
"x86_64",
"armv6",
"armv7",
"aarch64",
"ppc64",
"ppc64le",
"s390x"])
FreeBSD = Platform("freebsd", archs=["x86_64"])
Cygwin = Platform("cygwin", archs=["x86_64"])
Android = Platform("android", archs=["armv7"])
# The list of known platforms.
known_platforms = [
OSX,
iOS, iOSSimulator,
AppleTV, AppleTVSimulator,
AppleWatch, AppleWatchSimulator,
Linux,
FreeBSD,
Cygwin,
Android]
# Cache of targets by name.
_targets_by_name = dict((target.name, target)
for platform in known_platforms
for target in platform.targets)
@staticmethod
def host_target():
system = platform.system()
machine = platform.machine()
if system == 'Linux':
if machine == 'x86_64':
return StdlibDeploymentTarget.Linux.x86_64
elif machine.startswith('armv7'):
# linux-armv7* is canonicalized to 'linux-armv7'
return StdlibDeploymentTarget.Linux.armv7
elif machine.startswith('armv6'):
# linux-armv6* is canonicalized to 'linux-armv6'
return StdlibDeploymentTarget.Linux.armv6
elif machine == 'aarch64':
return StdlibDeploymentTarget.Linux.aarch64
elif machine == 'ppc64':
return StdlibDeploymentTarget.Linux.ppc64
elif machine == 'ppc64le':
return StdlibDeploymentTarget.Linux.ppc64le
elif machine == 's390x':
return StdlibDeploymentTarget.Linux.s390x
elif system == 'Darwin':
if machine == 'x86_64':
return StdlibDeploymentTarget.OSX.x86_64
elif system == 'FreeBSD':
if machine == 'amd64':
return StdlibDeploymentTarget.FreeBSD.x86_64
elif system == 'CYGWIN_NT-10.0':
if machine == 'x86_64':
return StdlibDeploymentTarget.Cygwin.x86_64
return None
@staticmethod
def default_stdlib_deployment_targets():
host_target = StdlibDeploymentTarget.host_target()
if host_target is None:
return None
# OS X build machines configure all Darwin platforms by default.
# Put iOS native targets last so that we test them last
# (it takes a long time).
if host_target == StdlibDeploymentTarget.OSX.x86_64:
return [host_target] + \
StdlibDeploymentTarget.iOSSimulator.targets + \
StdlibDeploymentTarget.AppleTVSimulator.targets + \
StdlibDeploymentTarget.AppleWatchSimulator.targets + \
StdlibDeploymentTarget.iOS.targets + \
StdlibDeploymentTarget.AppleTV.targets + \
StdlibDeploymentTarget.AppleWatch.targets
else:
# All other machines only configure their host stdlib by default.
return [host_target]
@classmethod
def get_target_for_name(cls, name):
return cls._targets_by_name.get(name)
def install_prefix():
if platform.system() == 'Darwin':
return '/Applications/Xcode.app/Contents/Developer/Toolchains/' + \
'XcodeDefault.xctoolchain/usr'
else:
return '/usr'
def darwin_toolchain_prefix(darwin_install_prefix):
return os.path.split(darwin_install_prefix)[0]
| true | true |
f7269969627b886f2d9ff179c1f78a4abf30f3d0 | 1,426 | py | Python | Python/DataStructures/Trie.py | AndrewMcShane/DevMakingSource | fe58fa093e0ce2d2748cb3826d27be6b0ac34149 | [
"MIT"
] | 3 | 2021-03-22T14:13:56.000Z | 2022-03-01T03:06:22.000Z | Python/DataStructures/Trie.py | AndrewMcShane/DevMakingSource | fe58fa093e0ce2d2748cb3826d27be6b0ac34149 | [
"MIT"
] | null | null | null | Python/DataStructures/Trie.py | AndrewMcShane/DevMakingSource | fe58fa093e0ce2d2748cb3826d27be6b0ac34149 | [
"MIT"
] | null | null | null | class TrieNode:
def __init__(self):
self.children = {}
self.isWord = False
class Trie:
def __init__(self):
self.root = TrieNode()
def put(self, word):
current = self.root
for i in range(0, len(word)):
child = word[i]
tmp = None
try:
tmp = current.children[child]
except KeyError:
tmp = TrieNode()
current.children[child] = tmp
current = tmp
current.isWord = True
def contains(self, word):
current = self.root
for i in range(0, len(word)):
child = word[i]
try:
current = current.children[child]
except KeyError:
return False
return current.isWord
def remove(self, word):
self._removeRecursive(self.root, word, 0)
def _removeRecursive(self, current, word, depth):
if current is None:
return None
if depth == len(word):
current.isWord = False
else:
child = word[depth]
if child in current.children:
self._removeRecursive(current.children[child], word, depth + 1)
else:
del current.children[child]
if not bool(current.children):
return current
return None
| 26.90566 | 80 | 0.497896 | class TrieNode:
def __init__(self):
self.children = {}
self.isWord = False
class Trie:
def __init__(self):
self.root = TrieNode()
def put(self, word):
current = self.root
for i in range(0, len(word)):
child = word[i]
tmp = None
try:
tmp = current.children[child]
except KeyError:
tmp = TrieNode()
current.children[child] = tmp
current = tmp
current.isWord = True
def contains(self, word):
current = self.root
for i in range(0, len(word)):
child = word[i]
try:
current = current.children[child]
except KeyError:
return False
return current.isWord
def remove(self, word):
self._removeRecursive(self.root, word, 0)
def _removeRecursive(self, current, word, depth):
if current is None:
return None
if depth == len(word):
current.isWord = False
else:
child = word[depth]
if child in current.children:
self._removeRecursive(current.children[child], word, depth + 1)
else:
del current.children[child]
if not bool(current.children):
return current
return None
| true | true |
f72699b30c150ef217cd9689772840359f84dd62 | 8,999 | py | Python | sunpy/coordinates/wcs_utils.py | LaudateCorpus1/sunpy | f7bdf22e5229a577c5851c1e05502f0d68b4b369 | [
"BSD-2-Clause"
] | 628 | 2015-01-14T17:34:10.000Z | 2022-03-29T06:07:50.000Z | sunpy/coordinates/wcs_utils.py | wtbarnes/sunpy | f7bdf22e5229a577c5851c1e05502f0d68b4b369 | [
"BSD-2-Clause"
] | 3,983 | 2015-01-03T11:16:21.000Z | 2022-03-31T16:55:38.000Z | sunpy/coordinates/wcs_utils.py | wtbarnes/sunpy | f7bdf22e5229a577c5851c1e05502f0d68b4b369 | [
"BSD-2-Clause"
] | 582 | 2015-01-14T10:09:24.000Z | 2022-03-29T06:07:12.000Z | import numpy as np
import astropy.units as u
import astropy.wcs.utils
from astropy.coordinates import (
ITRS,
BaseCoordinateFrame,
CartesianRepresentation,
SkyCoord,
SphericalRepresentation,
)
from astropy.wcs import WCS
from sunpy import log
from .frames import (
BaseCoordinateFrame,
Heliocentric,
HeliographicCarrington,
HeliographicStonyhurst,
Helioprojective,
SunPyBaseCoordinateFrame,
)
__all__ = ['solar_wcs_frame_mapping', 'solar_frame_to_wcs_mapping']
try:
# TODO: Remove vendored version after Astropy 5.0
from astropy.wcs.utils import obsgeo_to_frame
except ImportError:
def obsgeo_to_frame(obsgeo, obstime):
"""
Convert a WCS obsgeo property into an `~builtin_frames.ITRS` coordinate frame.
Parameters
----------
obsgeo : array-like
A shape ``(6, )`` array representing ``OBSGEO-[XYZ], OBSGEO-[BLH]`` as
returned by ``WCS.wcs.obsgeo``.
obstime : time-like
The time assiociated with the coordinate, will be passed to
`~.builtin_frames.ITRS` as the obstime keyword.
Returns
-------
`~.builtin_frames.ITRS`
An `~.builtin_frames.ITRS` coordinate frame
representing the coordinates.
Notes
-----
The obsgeo array as accessed on a `.WCS` object is a length 6 numpy array
where the first three elements are the coordinate in a cartesian
representation and the second 3 are the coordinate in a spherical
representation.
This function priorities reading the cartesian coordinates, and will only
read the spherical coordinates if the cartesian coordinates are either all
zero or any of the cartesian coordinates are non-finite.
In the case where both the spherical and cartesian coordinates have some
non-finite values the spherical coordinates will be returned with the
non-finite values included.
"""
if (obsgeo is None
or len(obsgeo) != 6
or np.all(np.array(obsgeo) == 0)
or np.all(~np.isfinite(obsgeo))
): # NOQA
raise ValueError(f"Can not parse the 'obsgeo' location ({obsgeo}). "
"obsgeo should be a length 6 non-zero, finite numpy array")
# If the cartesian coords are zero or have NaNs in them use the spherical ones
if np.all(obsgeo[:3] == 0) or np.any(~np.isfinite(obsgeo[:3])):
data = SphericalRepresentation(*(obsgeo[3:] * (u.deg, u.deg, u.m)))
# Otherwise we assume the cartesian ones are valid
else:
data = CartesianRepresentation(*obsgeo[:3] * u.m)
return ITRS(data, obstime=obstime)
def solar_wcs_frame_mapping(wcs):
"""
This function registers the coordinates frames to their FITS-WCS coordinate
type values in the `astropy.wcs.utils.wcs_to_celestial_frame` registry.
Parameters
----------
wcs : astropy.wcs.WCS
Returns
-------
astropy.coordinates.BaseCoordinateFrame
"""
if hasattr(wcs, "coordinate_frame"):
return wcs.coordinate_frame
dateobs = wcs.wcs.dateobs or None
# Get observer coordinate from the WCS auxillary information
required_attrs = {HeliographicStonyhurst: ['hgln_obs', 'hglt_obs', 'dsun_obs'],
HeliographicCarrington: ['crln_obs', 'hglt_obs', 'dsun_obs']}
# Get rsun from the WCS auxillary information
rsun = wcs.wcs.aux.rsun_ref
if rsun is not None:
rsun *= u.m
# TODO: remove these errors in sunpy 4.1
bad_attrs = [f'.{attr}' for attr in ['rsun', 'heliographic_observer']
if hasattr(wcs, attr)]
if len(bad_attrs):
raise ValueError(f"The {' and '.join(bad_attrs)} attribute(s) on a WCS "
"are no longer supported.")
observer = None
for frame, attr_names in required_attrs.items():
attrs = [getattr(wcs.wcs.aux, attr_name) for attr_name in attr_names]
if all([attr is not None for attr in attrs]):
kwargs = {'obstime': dateobs}
if rsun is not None:
kwargs['rsun'] = rsun
if issubclass(frame, HeliographicCarrington):
kwargs['observer'] = 'self'
observer = frame(attrs[0] * u.deg,
attrs[1] * u.deg,
attrs[2] * u.m,
**kwargs)
# Read the observer out of obsgeo for ground based observers
if observer is None:
try:
observer = obsgeo_to_frame(wcs.wcs.obsgeo, dateobs)
observer = SkyCoord(observer, rsun=rsun)
except ValueError as e:
# The helper function assumes you know the obsgeo coords you are
# parsing are good, we are not sure, so catch the error.
# This approach could lead to an invalid observer (i.e. one of the
# coords being NaN), but only if the WCS has been constructed like that.
log.debug(f"Could not parse obsgeo coordinates from WCS:\n{e}")
# Collect all of the possible frame attributes, although some may be removed later
frame_args = {'obstime': dateobs}
if observer is not None:
frame_args['observer'] = observer
if rsun is not None:
frame_args['rsun'] = rsun
frame_class = _sunpy_frame_class_from_ctypes(wcs.wcs.ctype)
if frame_class:
if frame_class == HeliographicStonyhurst:
frame_args.pop('observer', None)
if frame_class == Heliocentric:
frame_args.pop('rsun', None)
return frame_class(**frame_args)
def _sunpy_frame_class_from_ctypes(ctypes):
# Truncate the ctype to the first four letters
ctypes = {c[:4] for c in ctypes}
mapping = {
Helioprojective: {'HPLN', 'HPLT'},
HeliographicStonyhurst: {'HGLN', 'HGLT'},
HeliographicCarrington: {'CRLN', 'CRLT'},
Heliocentric: {'SOLX', 'SOLY'},
}
for frame_class, ctype_pair in mapping.items():
if ctype_pair <= ctypes:
return frame_class
def _set_wcs_aux_obs_coord(wcs, obs_frame):
"""
Set (in-place) observer coordinate information on a WCS.
Parameters
----------
wcs : astropy.wcs.WCS
obs_frame : astropy.coordinates.SkyCoord, astropy.coordinates.CoordinateFrame
"""
# Sometimes obs_coord can be a SkyCoord, so convert down to a frame
if hasattr(obs_frame, 'frame'):
obs_frame = obs_frame.frame
if isinstance(obs_frame, HeliographicStonyhurst):
wcs.wcs.aux.hgln_obs = obs_frame.lon.to_value(u.deg)
elif isinstance(obs_frame, HeliographicCarrington):
wcs.wcs.aux.crln_obs = obs_frame.lon.to_value(u.deg)
else:
raise ValueError('obs_coord must be in a Stonyhurst or Carrington frame')
# These two keywords are the same for Carrington and Stonyhurst
wcs.wcs.aux.hglt_obs = obs_frame.lat.to_value(u.deg)
wcs.wcs.aux.dsun_obs = obs_frame.radius.to_value(u.m)
def solar_frame_to_wcs_mapping(frame, projection='TAN'):
"""
For a given frame, this function returns the corresponding WCS object.
It registers the WCS coordinates types from their associated frame in the
`astropy.wcs.utils.celestial_frame_to_wcs` registry.
Parameters
----------
frame : astropy.coordinates.BaseCoordinateFrame
projection : str, optional
Returns
-------
astropy.wcs.WCS
"""
wcs = WCS(naxis=2)
if hasattr(frame, 'rsun'):
wcs.wcs.aux.rsun_ref = frame.rsun.to_value(u.m)
if hasattr(frame, 'observer') and frame.observer is not None:
if isinstance(frame.observer, BaseCoordinateFrame):
observer = frame.observer
elif frame.observer == 'self':
observer = frame
_set_wcs_aux_obs_coord(wcs, observer)
if isinstance(frame, SunPyBaseCoordinateFrame):
if frame.obstime:
wcs.wcs.dateobs = frame.obstime.utc.isot
if isinstance(frame, Helioprojective):
xcoord = 'HPLN' + '-' + projection
ycoord = 'HPLT' + '-' + projection
wcs.wcs.cunit = ['arcsec', 'arcsec']
elif isinstance(frame, Heliocentric):
xcoord = 'SOLX'
ycoord = 'SOLY'
wcs.wcs.cunit = ['deg', 'deg']
elif isinstance(frame, HeliographicCarrington):
xcoord = 'CRLN' + '-' + projection
ycoord = 'CRLT' + '-' + projection
wcs.wcs.cunit = ['deg', 'deg']
elif isinstance(frame, HeliographicStonyhurst):
xcoord = 'HGLN' + '-' + projection
ycoord = 'HGLT' + '-' + projection
wcs.wcs.cunit = ['deg', 'deg']
else:
return None
wcs.wcs.ctype = [xcoord, ycoord]
return wcs
astropy.wcs.utils.WCS_FRAME_MAPPINGS.append([solar_wcs_frame_mapping])
astropy.wcs.utils.FRAME_WCS_MAPPINGS.append([solar_frame_to_wcs_mapping])
| 33.830827 | 88 | 0.628737 | import numpy as np
import astropy.units as u
import astropy.wcs.utils
from astropy.coordinates import (
ITRS,
BaseCoordinateFrame,
CartesianRepresentation,
SkyCoord,
SphericalRepresentation,
)
from astropy.wcs import WCS
from sunpy import log
from .frames import (
BaseCoordinateFrame,
Heliocentric,
HeliographicCarrington,
HeliographicStonyhurst,
Helioprojective,
SunPyBaseCoordinateFrame,
)
__all__ = ['solar_wcs_frame_mapping', 'solar_frame_to_wcs_mapping']
try:
from astropy.wcs.utils import obsgeo_to_frame
except ImportError:
def obsgeo_to_frame(obsgeo, obstime):
"""
Convert a WCS obsgeo property into an `~builtin_frames.ITRS` coordinate frame.
Parameters
----------
obsgeo : array-like
A shape ``(6, )`` array representing ``OBSGEO-[XYZ], OBSGEO-[BLH]`` as
returned by ``WCS.wcs.obsgeo``.
obstime : time-like
The time assiociated with the coordinate, will be passed to
`~.builtin_frames.ITRS` as the obstime keyword.
Returns
-------
`~.builtin_frames.ITRS`
An `~.builtin_frames.ITRS` coordinate frame
representing the coordinates.
Notes
-----
The obsgeo array as accessed on a `.WCS` object is a length 6 numpy array
where the first three elements are the coordinate in a cartesian
representation and the second 3 are the coordinate in a spherical
representation.
This function priorities reading the cartesian coordinates, and will only
read the spherical coordinates if the cartesian coordinates are either all
zero or any of the cartesian coordinates are non-finite.
In the case where both the spherical and cartesian coordinates have some
non-finite values the spherical coordinates will be returned with the
non-finite values included.
"""
if (obsgeo is None
or len(obsgeo) != 6
or np.all(np.array(obsgeo) == 0)
or np.all(~np.isfinite(obsgeo))
):
raise ValueError(f"Can not parse the 'obsgeo' location ({obsgeo}). "
"obsgeo should be a length 6 non-zero, finite numpy array")
if np.all(obsgeo[:3] == 0) or np.any(~np.isfinite(obsgeo[:3])):
data = SphericalRepresentation(*(obsgeo[3:] * (u.deg, u.deg, u.m)))
else:
data = CartesianRepresentation(*obsgeo[:3] * u.m)
return ITRS(data, obstime=obstime)
def solar_wcs_frame_mapping(wcs):
if hasattr(wcs, "coordinate_frame"):
return wcs.coordinate_frame
dateobs = wcs.wcs.dateobs or None
required_attrs = {HeliographicStonyhurst: ['hgln_obs', 'hglt_obs', 'dsun_obs'],
HeliographicCarrington: ['crln_obs', 'hglt_obs', 'dsun_obs']}
rsun = wcs.wcs.aux.rsun_ref
if rsun is not None:
rsun *= u.m
bad_attrs = [f'.{attr}' for attr in ['rsun', 'heliographic_observer']
if hasattr(wcs, attr)]
if len(bad_attrs):
raise ValueError(f"The {' and '.join(bad_attrs)} attribute(s) on a WCS "
"are no longer supported.")
observer = None
for frame, attr_names in required_attrs.items():
attrs = [getattr(wcs.wcs.aux, attr_name) for attr_name in attr_names]
if all([attr is not None for attr in attrs]):
kwargs = {'obstime': dateobs}
if rsun is not None:
kwargs['rsun'] = rsun
if issubclass(frame, HeliographicCarrington):
kwargs['observer'] = 'self'
observer = frame(attrs[0] * u.deg,
attrs[1] * u.deg,
attrs[2] * u.m,
**kwargs)
if observer is None:
try:
observer = obsgeo_to_frame(wcs.wcs.obsgeo, dateobs)
observer = SkyCoord(observer, rsun=rsun)
except ValueError as e:
log.debug(f"Could not parse obsgeo coordinates from WCS:\n{e}")
frame_args = {'obstime': dateobs}
if observer is not None:
frame_args['observer'] = observer
if rsun is not None:
frame_args['rsun'] = rsun
frame_class = _sunpy_frame_class_from_ctypes(wcs.wcs.ctype)
if frame_class:
if frame_class == HeliographicStonyhurst:
frame_args.pop('observer', None)
if frame_class == Heliocentric:
frame_args.pop('rsun', None)
return frame_class(**frame_args)
def _sunpy_frame_class_from_ctypes(ctypes):
ctypes = {c[:4] for c in ctypes}
mapping = {
Helioprojective: {'HPLN', 'HPLT'},
HeliographicStonyhurst: {'HGLN', 'HGLT'},
HeliographicCarrington: {'CRLN', 'CRLT'},
Heliocentric: {'SOLX', 'SOLY'},
}
for frame_class, ctype_pair in mapping.items():
if ctype_pair <= ctypes:
return frame_class
def _set_wcs_aux_obs_coord(wcs, obs_frame):
if hasattr(obs_frame, 'frame'):
obs_frame = obs_frame.frame
if isinstance(obs_frame, HeliographicStonyhurst):
wcs.wcs.aux.hgln_obs = obs_frame.lon.to_value(u.deg)
elif isinstance(obs_frame, HeliographicCarrington):
wcs.wcs.aux.crln_obs = obs_frame.lon.to_value(u.deg)
else:
raise ValueError('obs_coord must be in a Stonyhurst or Carrington frame')
wcs.wcs.aux.hglt_obs = obs_frame.lat.to_value(u.deg)
wcs.wcs.aux.dsun_obs = obs_frame.radius.to_value(u.m)
def solar_frame_to_wcs_mapping(frame, projection='TAN'):
wcs = WCS(naxis=2)
if hasattr(frame, 'rsun'):
wcs.wcs.aux.rsun_ref = frame.rsun.to_value(u.m)
if hasattr(frame, 'observer') and frame.observer is not None:
if isinstance(frame.observer, BaseCoordinateFrame):
observer = frame.observer
elif frame.observer == 'self':
observer = frame
_set_wcs_aux_obs_coord(wcs, observer)
if isinstance(frame, SunPyBaseCoordinateFrame):
if frame.obstime:
wcs.wcs.dateobs = frame.obstime.utc.isot
if isinstance(frame, Helioprojective):
xcoord = 'HPLN' + '-' + projection
ycoord = 'HPLT' + '-' + projection
wcs.wcs.cunit = ['arcsec', 'arcsec']
elif isinstance(frame, Heliocentric):
xcoord = 'SOLX'
ycoord = 'SOLY'
wcs.wcs.cunit = ['deg', 'deg']
elif isinstance(frame, HeliographicCarrington):
xcoord = 'CRLN' + '-' + projection
ycoord = 'CRLT' + '-' + projection
wcs.wcs.cunit = ['deg', 'deg']
elif isinstance(frame, HeliographicStonyhurst):
xcoord = 'HGLN' + '-' + projection
ycoord = 'HGLT' + '-' + projection
wcs.wcs.cunit = ['deg', 'deg']
else:
return None
wcs.wcs.ctype = [xcoord, ycoord]
return wcs
astropy.wcs.utils.WCS_FRAME_MAPPINGS.append([solar_wcs_frame_mapping])
astropy.wcs.utils.FRAME_WCS_MAPPINGS.append([solar_frame_to_wcs_mapping])
| true | true |
f7269aeac3ba7da133f1493f4f5259f6dcf883c0 | 26,097 | py | Python | WfaUWTSCore/src/core/configurator.py | Wi-FiTestSuite/Wi-FiTestSuite10.0.0beta | 06fe5ec068cf24e3b202f7eb76ddc2b0249eb112 | [
"0BSD"
] | 12 | 2016-07-11T19:21:14.000Z | 2022-02-18T09:25:14.000Z | WfaUWTSCore/src/core/configurator.py | Wi-FiTestSuite/Wi-FiTestSuite10.0.0beta | 06fe5ec068cf24e3b202f7eb76ddc2b0249eb112 | [
"0BSD"
] | 3 | 2021-01-06T19:16:31.000Z | 2022-02-25T02:19:37.000Z | WfaUWTSCore/src/core/configurator.py | Wi-FiTestSuite/Wi-FiTestSuite10.0.0beta | 06fe5ec068cf24e3b202f7eb76ddc2b0249eb112 | [
"0BSD"
] | 9 | 2016-10-19T11:46:22.000Z | 2022-02-22T11:30:51.000Z | ###############################################################################
#
# Copyright (c) 2016 Wi-Fi Alliance
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE
# USE OR PERFORMANCE OF THIS SOFTWARE.
#
###############################################################################
#!/usr/bin/env python
import os
import time
from StringIO import StringIO
import HTML
import core.parser
from core.symtable import TestScriptSymbolTable
from scriptinfo.scriptelement import TestScriptElementType
import common
#from dbaccess import historydb
from scriptinfo.scriptsource import TestLogFileSource
from features.validation import Validation
class TestConfigContext:
"""The class that stores the test configuration context that some of them come from user input command.
Attributes:
is_testbed_validation_set (boolean): Description of `attr1`.
is_testcase_validation_set (boolean): Description of `attr2`.
is_asd_mode_set (boolean): Description of `attr2`.
is_tms_mode_set (boolean): Description of `attr2`.
is_group_run_set (boolean): Description of `attr2`.
group_file_name (str): Description of `attr2`.
is_test_case_file (boolean): Description of `attr2`.
is_all_init_config_file (boolean): Description of `attr2`.
is_all_init_command_file (boolean): Description of `attr2`.
user_mode (str): Description of `attr2`.
"""
def __init__(self):
self.is_testbed_validation_set = False
self.is_testcase_validation_set = False
self.is_asd_mode_set = False
self.is_tms_mode_set = False
self.is_group_run_set = False
self.group_file_name = ""
self.is_test_case_file = False
self.is_all_init_config_file = False
self.is_all_init_command_file = False
self.user_mode = ""
self.is_app_id_set = False
self.is_script_protection_set = False
self.is_result_protection_set = False
self.is_edit_script_flag_set = False
self.is_testcase_ignored = False
class TestConfigurator:
"""Constructs domain data objects based on the information on linked list and generates initenv file
Attributes:
prog_name (str): Description of `attr1`.
tc_name (str): Description of `attr2`.
test_config_cxt (Optional[int]): Description of `attr2`.
eap_method_list (Optional[int]): Description of `attr2`.
test_mngr_initr (Optional[int]): Description of `attr2`.
eap_file_path (Optional[int]): Description of `attr2`.
"""
def __init__(self, prog_name, tc_name, test_config_cxt):
self.prog_name = prog_name
self.tc_name = tc_name
self.test_case = None
self.testbed_device = None
self.test_config_cxt = test_config_cxt
self.eap_method_list = []
self.test_mngr_initr = None
self.eap_file_path = None
def configure(self, test_mngr_initr, file_parser):
"""Moves all test related information from linkedlist to domain data objects.
Attributes:
test_mngr_initr (object): The object of TestManagerInitializer class.
file_parser (object): The object of XMLFileParser class or TestScriptParser class.
"""
self.test_mngr_initr = test_mngr_initr
file_parser.init_parser()
file_parser.parse() # parse the data into data structure
if isinstance(file_parser, core.parser.XmlFileParser):
test_mngr_initr.test_config_info_mngr.sll = file_parser.sll
test_mngr_initr.test_config_info_mngr.test_mngr_initr = test_mngr_initr
test_mngr_initr.test_config_info_mngr.set_test_config_info()
if self.test_config_cxt.is_testbed_validation_set or self.test_config_cxt.is_testcase_validation_set:
return
self.generate_feat_html_file()
###output = self.create_init_env_file()
##bufferStr = output.getvalue()
##history = historyService.History(self.prog_name, self.tc_name)
##history_service = historyService.HistoryService(history)
##history_service.updateHistoryByInitEnv(bufferStr)
###output.close()
##raw_input("Writing InitEnv file to database completed, press return to exit.")
##sys.exit(0)
if isinstance(file_parser, core.parser.TestScriptParser):
if self.test_config_cxt.is_test_case_file or self.test_config_cxt.is_all_init_config_file:
test_mngr_initr.test_config_service.test_mngr_initr = test_mngr_initr
if file_parser.test_script_scanner.test_script_source.test_script_source_name == common.GlobalConfigFiles.dut_info_file:
test_mngr_initr.test_feat_mngr.sll = file_parser.sll
test_mngr_initr.test_feat_mngr.test_mngr_initr = test_mngr_initr
test_mngr_initr.test_feat_mngr.set_test_prog_feat()
if self.is_eap_method_list_file_exist():
self.load_eap_method_list()
self.set_other_dut_info()
TestLogFileSource.log_file_source_obj_list[0].write_log_message("Input Files - \n MasterTestInfo = %s \n DUTInfoFile =%s \n" %(common.GlobalConfigFiles.master_test_info_file, common.GlobalConfigFiles.dut_info_file), TestLogFileSource.log_file_hdl_list[0])
if file_parser.test_script_scanner.test_script_source.test_script_source_name == common.GlobalConfigFiles.init_config_file:
test_mngr_initr.testbed_dev_mngr.ill = file_parser.ill
test_mngr_initr.testbed_dev_mngr.test_mngr_initr = test_mngr_initr
test_mngr_initr.testbed_dev_mngr.set_testbed_device()
test_mngr_initr.testbed_dev_mngr.set_rs_eap_cred()
if test_mngr_initr.test_prog_mngr.test_prog.is_tp_prog:
test_mngr_initr.test_data_strm_mngr.ill = file_parser.ill
test_mngr_initr.test_data_strm_mngr.set_data_strm_info()
if self.test_config_cxt.is_testbed_validation_set or self.test_config_cxt.is_testcase_validation_set:
self.create_device_id_file()
return
output = self.create_init_env_file()
output.close()
def create_device_id_file(self):
device_id_file_path = self.test_mngr_initr.test_script_mngr.prog_script_folder_path + "\\" + common.GlobalConfigFiles.device_id_file
device_id_file = open(device_id_file_path, 'w')
device_id_file.write("# This is an auto generated file - %s \n# For test program - %s\n#DO NOT modify this file manually \n\n" %(time.strftime("%b-%d-%y_%H:%M:%S", time.localtime()), self.prog_name))
val = Validation()
val_data = val.get_validation_routines(self.prog_name)
testbed_device_list = []
if self.test_config_cxt.is_testbed_validation_set:
testbed_device_list = self.test_mngr_initr.test_prog_mngr.test_prog.testbed_dev_list
if self.test_config_cxt.is_testcase_validation_set:
testbed_device_list = self.test_mngr_initr.test_case_mngr.test_case.testbed_device_list
for tbd_name, v_cmd_data in val_data.items():
cmd_type = v_cmd_data['command'].get('command_type').lower()
for tbd in testbed_device_list:
# assume the tbd_name always uses the format of wfa_control_agent_**
true_dev_name = tbd_name.replace('wfa_control_agent_', '').lower()
if tbd_name.endswith('_sta') and tbd.dev_type == "STA":
true_sta_name = true_dev_name.replace('_sta', '')
if tbd.dev_name.lower() == true_sta_name:
if tbd.ctrlipaddr != '':
if cmd_type == 'capi':
cmd_list = v_cmd_data['command'].get('command').split('\n')
for cmd in cmd_list:
if tbd.alias.lower() == tbd_name.lower():
device_id_file.write('\n%s!%s\n' % (tbd_name, cmd))
else:
device_id_file.write('\n%s!%s\n' % (tbd.dev_name + 'STA', cmd))
elif cmd_type == 'external':
func_name = v_cmd_data['command'].get('command')
if tbd.alias.lower() == tbd_name.lower():
device_id_file.write('\n%s!ExternalFunc!%s!DEFAULT\n' % (tbd_name, func_name))
else:
device_id_file.write('\n%s!ExternalFunc!%s!DEFAULT\n' % (tbd.dev_name + 'STA', func_name))
else:
TestLogFileSource.log_file_source_obj_list[0].write_log_message("Device %s IP address not exist" % tbd.dev_name, TestLogFileSource.log_file_hdl_list[0])
if tbd_name.endswith('_ap') and tbd.dev_type == "AP":
true_ap_name = true_dev_name.replace('_ap', '')
if tbd.dev_name.lower() == true_ap_name:
if cmd_type == 'external':
func_name = v_cmd_data['command'].get('command')
if tbd.alias.lower() == tbd_name.lower():
device_id_file.write('\n%s!ExternalFunc!%s!DEFAULT\n' % (tbd_name, func_name))
else:
device_id_file.write('\n%s!ExternalFunc!%s!DEFAULT\n' % (tbd.dev_name + 'AP', func_name))
elif cmd_type == 'capi':
cmd_list = v_cmd_data['command'].get('command').split('\n')
#self.test_mngr_initr.testbed_dev_mngr.num_of_aps_capi = len(cmd_list)
for cmd in cmd_list:
if tbd.alias.lower() == tbd_name.lower():
device_id_file.write('\n%s!%s\n' % (tbd_name, cmd))
else:
device_id_file.write('\n%s!%s\n' % (tbd.dev_name + 'AP', cmd))
if tbd_name == 'wfa_sniffer' and tbd.dev_type == "SNIFFER":
if cmd_type == 'external':
func_name = v_cmd_data['command'].get('command')
if tbd.alias.lower() == tbd_name.lower():
device_id_file.write('\n%s!ExternalFunc!%s!DEFAULT\n' % (tbd_name, func_name))
else:
device_id_file.write('\n%s!ExternalFunc!%s!DEFAULT\n' % (tbd.dev_name, func_name))
elif cmd_type == 'capi':
cmd_list = v_cmd_data['command'].get('command').split('\n')
for cmd in cmd_list:
if tbd.alias.lower() == tbd_name.lower():
device_id_file.write('\n%s!%s\n' % (tbd_name, cmd))
else:
device_id_file.write('\n%s!%s\n' % (tbd.dev_name, cmd))
device_id_file.close()
def create_init_env_file(self):
"""Generates the initenv.txt file with defined configuraiton variables.
"""
output = StringIO()
init_env_file_path = self.test_mngr_initr.test_script_mngr.prog_script_folder_path + "\\" + common.GlobalConfigFiles.init_env_file
ucc_init_file = open(init_env_file_path, 'w')
ucc_init_file.write("# This is an auto generated file - %s \n# For test case - %s\n#DO NOT modify this file manually \n\n" %(time.strftime("%b-%d-%y_%H:%M:%S", time.localtime()), self.tc_name))
output.write("# This is an auto generated file - %s \n# For test case - %s\n#DO NOT modify this file manually \n\n" %(time.strftime("%b-%d-%y_%H:%M:%S", time.localtime()), self.tc_name))
ucc_init_file.write("\ndefine!$tcID!%s!\n" % (self.tc_name))
output.write("\ndefine!$tcID!%s!\n" % (self.tc_name))
ucc_init_file.write(self.format_env_variables())
output.write(self.format_env_variables())
i = 0
for tbd in self.test_mngr_initr.test_case_mngr.test_case.testbed_device_list:
if tbd.dev_type == "AP":
i += 1
for p in self.test_mngr_initr.test_prog_mngr.test_prog.testbed_dev_list:
if p.dev_type == "AP":## and p.number != "":
if p.number == "":
i += 1
p.number = "AP%s" % i
p.state = "off"
ucc_init_file.write(self.format_testbed_ap(p))
output.write(self.format_testbed_ap(p))
if p.dev_type == "RADIUSSERVER":
rsName = TestScriptSymbolTable.get_value_from_sym_tab("RadiusServerName", TestScriptSymbolTable.test_script_sym_tab)
#if rsName != '0' and rsName is not None:
if rsName is not None:
if rsName == p.dev_name or rsName == '0':
ucc_init_file.write(self.format_radius_server(p))
output.write(self.format_radius_server(p))
#Writing other variables
for var in self.test_mngr_initr.test_config_info_mngr.test_config_info.var_list:
ucc_init_file.write("\ndefine!$%s!%s!\n"%(var, self.test_mngr_initr.test_config_info_mngr.test_config_info.var_list[var]))
output.write("\ndefine!$%s!%s!\n"%(var, self.test_mngr_initr.test_config_info_mngr.test_config_info.var_list[var]))
ucc_init_file.write("#EOF")
output.write("#EOF")
ucc_init_file.close()
return output
def format_radius_server(self, rs):
"""Formats the variables in RadiusServer testbed device objects for output.
Attributes:
rs (object): The RadiusServer testbed device object.
"""
return "\n\ndefine!$RADIUSIPAddress!%s!\ndefine!$RADIUSPort!%s!\ndefine!$RADIUSSharedSecret!%s!\ndefine!$SupplicantName!%s!\ndefine!$STASupplicantName!%s!\n" % \
(rs.testipaddr, rs.testport, rs.shared_secret, rs.supplicant, rs.sta_supplicant)
def format_testbed_ap(self, ap):
"""Formats the variables in AP testbed device objects for output.
Attributes:
ap (object): The AP testbed device object.
"""
value = None
if TestScriptSymbolTable.lookup_sym_tab("$" + ap.number, TestScriptSymbolTable.test_script_sym_tab):
value = TestScriptSymbolTable.get_value_from_sym_tab("$" + ap.number, TestScriptSymbolTable.test_script_sym_tab)
# define!$AP1!Marvell11nAP!
return "\n\ndefine!$%s!%s!\ndefine!$%sPowerSwitchPort!%s!\ndefine!$%sState!%s!\ndefine!$%sIPAddress!%s!\n" % \
(ap.number, value if value else ap.dev_name, ap.number, ap.pwrswtport, ap.number, ap.state, ap.number, (ap.ctrlipaddr if ap.is_emd_ctrl_agt else ap.testipaddr))
def format_env_variables(self):
"""Outputs variables per the specified format.
"""
TSTA = []
sta_count = 0
for tbd in self.test_mngr_initr.test_prog_mngr.test_prog.testbed_dev_list:
if tbd.dev_type == "STA" and tbd.number == "STA" + ("%s" % (sta_count + 1)):
TSTA.append(tbd.dev_name)
sta_count += 1
return "define!$Channel!%s!\ndefine!$Channel_1!%s!\ndefine!$Channel_2!%s!\ndefine!$Channel_3!%s!\ndefine!$Band!%s!\ndefine!$SSID!%s!\ndefine!$SSID_1!%s!\ndefine!$SSID_2!%s!\ndefine!$SSID_3!%s!\ndefine!$STA1!%s!\ndefine!$STA2!%s!\ndefine!$STA3!%s!\ndefine!$TestbedConfigCAPIFile!%s!\ndefine!$DUTConfigCAPIFile!%s!\ndefine!$STAConfigCAPIFile!%s!\ndefine!$WLANTestCAPIFile!%s!\n" % \
("" if not "Channel" in self.test_mngr_initr.test_config_info_mngr.test_config_info.channel else self.test_mngr_initr.test_config_info_mngr.test_config_info.channel["Channel"],
"" if not "Channel_1" in self.test_mngr_initr.test_config_info_mngr.test_config_info.channel else self.test_mngr_initr.test_config_info_mngr.test_config_info.channel["Channel_1"],
"" if not "Channel_2" in self.test_mngr_initr.test_config_info_mngr.test_config_info.channel else self.test_mngr_initr.test_config_info_mngr.test_config_info.channel["Channel_2"],
"" if not "Channel_3" in self.test_mngr_initr.test_config_info_mngr.test_config_info.channel else self.test_mngr_initr.test_config_info_mngr.test_config_info.channel["Channel_3"],
self.test_mngr_initr.test_config_info_mngr.test_config_info.band,
"" if not "SSID" in self.test_mngr_initr.test_config_info_mngr.test_config_info.ssid else self.test_mngr_initr.test_config_info_mngr.test_config_info.ssid["SSID"],
"" if not "SSID_1" in self.test_mngr_initr.test_config_info_mngr.test_config_info.ssid else self.test_mngr_initr.test_config_info_mngr.test_config_info.ssid["SSID_1"],
"" if not "SSID_2" in self.test_mngr_initr.test_config_info_mngr.test_config_info.ssid else self.test_mngr_initr.test_config_info_mngr.test_config_info.ssid["SSID_2"],
"" if not "SSID_3" in self.test_mngr_initr.test_config_info_mngr.test_config_info.ssid else self.test_mngr_initr.test_config_info_mngr.test_config_info.ssid["SSID_3"],
TSTA[0] if len(TSTA) == 1 else "",
TSTA[1] if len(TSTA) == 2 else "",
TSTA[2] if len(TSTA) == 3 else "",
self.test_mngr_initr.test_config_info_mngr.test_config_info.testbed_dev_config_file,
self.test_mngr_initr.test_config_info_mngr.test_config_info.dut_config_file,
self.test_mngr_initr.test_config_info_mngr.test_config_info.sta_config_file,
self.test_mngr_initr.test_config_info_mngr.test_config_info.wlan_test_capi_file)
def is_eap_method_list_file_exist(self):
"""Checks if the EAP method list file exists.
"""
self.eap_file_path = self.test_mngr_initr.test_script_mngr.prog_script_folder_path + "\\" + common.GlobalConfigFiles.eap_method_listFile
if os.path.exists(self.eap_file_path):
return True
else:
return False
def load_eap_method_list(self):
"""Loads EAP method list from file system.
"""
eap_file = open(self.eap_file_path, 'r')
for item in eap_file.readlines():
eap_name = item.rstrip("\n")
if item.startswith('#'):
continue
self.eap_method_list.append(eap_name)
if eap_file:
eap_file.close()
def set_other_dut_info(self):
"""Sets other DUT related information into var_list dictionary.
"""
if self.is_eap_method_list_file_exist():
ttls_name = TestScriptElementType.get_test_feat_key_from_val(TestScriptElementType.TTLS)
TestScriptSymbolTable.insert_sym_tab("DUTEAPMethod", ttls_name, TestScriptSymbolTable.test_script_sym_tab)
found_eap_method = False
for eap in self.eap_method_list:
if found_eap_method:
break
for feature in self.test_mngr_initr.test_prog_mngr.test_prog.feat_list:
if feature.feat_name == eap and int(feature.feat_value) == 1:
TestScriptSymbolTable.insert_sym_tab("DUTEAPMethod", eap, TestScriptSymbolTable.test_script_sym_tab)
found_eap_method = True
break
if "N-4.2" in common.GlobalConfigFiles.curr_tc_name or "N-ExA" in common.GlobalConfigFiles.curr_tc_name:
self.test_mngr_initr.test_config_info_mngr.test_config_info.var_list.setdefault("APUT_state", "on")
if "N-5.2" in common.GlobalConfigFiles.curr_tc_name or "N-5.3" in common.GlobalConfigFiles.curr_tc_name or "N-ExS" in common.GlobalConfigFiles.curr_tc_name:
self.test_mngr_initr.test_config_info_mngr.test_config_info.var_list.setdefault("APUT_state", "off")
def generate_feat_html_file(self):
"""Generates a html DUT feature file while loading DUT features into var_list dictionary.
"""
if self.prog_name == "P2P" or self.prog_name == "TDLS" or self.prog_name == "PMF" or self.prog_name == "HS2" or self.prog_name == "WFD" \
or self.prog_name == "WFDS" or self.prog_name == "VHT" or self.prog_name == "HS2-R2" or self.prog_name == "WMMPS" or self.prog_name == "NAN":
#fFile = self.test_mngr_initr.test_script_mngr.create_log_file(AllImports.GlobalConfigFiles.html_file, 'w')
T = HTML.Table(col_width=['70%', '30%'])
R1 = HTML.TableRow(cells=['Optional Feature', 'DUT Support'], bgcolor="Gray", header="True")
T.rows.append(R1)
if self.prog_name == "P2P" or self.prog_name == "TDLS" or self.prog_name == "HS2" or self.prog_name == "WFD" or self.prog_name == "WFDS" or self.prog_name == "HS2-R2" or self.prog_name == "NAN":
p2p_var_list = self.test_mngr_initr.test_feat_mngr.get_dut_feat_list()
if p2p_var_list != -1:
p2p_var_list = p2p_var_list.split('!')
TestLogFileSource.log_file_source_obj_list[0].write_log_message("P2P Supported Features = %s" % p2p_var_list, TestLogFileSource.log_file_hdl_list[0])
for var in p2p_var_list:
if var != "":
v = var.split(',')
self.test_mngr_initr.test_config_info_mngr.test_config_info.var_list.setdefault(v[0], v[1])
feat_supt = self.test_mngr_initr.test_config_info_mngr.sll.search(v[0])
if feat_supt is not None:
TestLogFileSource.log_file_source_obj_list[0].write_log_message("%s-%s" % (feat_supt, v[1]), TestLogFileSource.log_file_hdl_list[0])
if feat_supt != v[1]:
TestLogFileSource.log_file_source_obj_list[0].write_log_message("DUT does not support the feature", TestLogFileSource.log_file_hdl_list[0])
self.test_mngr_initr.test_config_info_mngr.test_config_info.var_list.setdefault("TestNA", "DUT does not support the feature")
if v[1] == "0":
dis = "No"
elif v[1] == "1":
dis = "Yes"
else:
dis = v[1]
if "DUT_" not in v[0]:
T.rows.append([v[0], dis])
else:
prog_var_list = self.test_mngr_initr.test_feat_mngr.get_dut_feat_list()
if prog_var_list != -1:
prog_var_list = prog_var_list.split('!')
TestLogFileSource.log_file_source_obj_list[0].write_log_message("%s Supported Features = %s" % (self.prog_name, prog_var_list), TestLogFileSource.log_file_hdl_list[0])
check_feat_flag = self.test_mngr_initr.test_config_info_mngr.sll.search("check_feat_flag")
#self.test_config_info_mngr.test_mngr_initr.test_case_mngr.test_case.opt_test_feat
TestLogFileSource.log_file_source_obj_list[0].write_log_message("check_feat_flag = %s" % check_feat_flag, TestLogFileSource.log_file_hdl_list[0])
for var in prog_var_list:
if var != "":
v = var.split(',')
self.test_mngr_initr.test_config_info_mngr.test_config_info.var_list.setdefault(v[0], v[1])
feat_supt = self.test_mngr_initr.test_config_info_mngr.sll.search(v[0])
#LogMsg("Feature Support = %s" % feat_supt)
if check_feat_flag == v[0]:
TestLogFileSource.log_file_source_obj_list[0].write_log_message("%s-%s"%(check_feat_flag, v[1]), TestLogFileSource.log_file_hdl_list[0])
if v[1] != "1":
TestLogFileSource.log_file_source_obj_list[0].write_log_message("DUT does not support the feature", TestLogFileSource.log_file_hdl_list[0])
self.test_mngr_initr.test_config_info_mngr.test_config_info.var_list.setdefault("TestNA", "DUT does not support the feature")
if v[1] == "0":
dis = "No"
elif v[1] == "1":
dis = "Yes"
else:
dis = v[1]
if "DUT_" not in v[0]:
T.rows.append([v[0], dis])
html_code = str(T)
TestLogFileSource.log_file_source_obj_list[3].write_log_message(html_code, TestLogFileSource.log_file_hdl_list[3])
TestLogFileSource.log_file_source_obj_list[3].write_log_message('<p>', TestLogFileSource.log_file_hdl_list[3])
TestLogFileSource.log_file_source_obj_list[3].close_log_file(TestLogFileSource.log_file_hdl_list[3])
#fFile.write(html_code)
#fFile.write('<p>')
| 58.252232 | 388 | 0.619573 | d_type = v_cmd_data['command'].get('command_type').lower()
for tbd in testbed_device_list:
true_dev_name = tbd_name.replace('wfa_control_agent_', '').lower()
if tbd_name.endswith('_sta') and tbd.dev_type == "STA":
true_sta_name = true_dev_name.replace('_sta', '')
if tbd.dev_name.lower() == true_sta_name:
if tbd.ctrlipaddr != '':
if cmd_type == 'capi':
cmd_list = v_cmd_data['command'].get('command').split('\n')
for cmd in cmd_list:
if tbd.alias.lower() == tbd_name.lower():
device_id_file.write('\n%s!%s\n' % (tbd_name, cmd))
else:
device_id_file.write('\n%s!%s\n' % (tbd.dev_name + 'STA', cmd))
elif cmd_type == 'external':
func_name = v_cmd_data['command'].get('command')
if tbd.alias.lower() == tbd_name.lower():
device_id_file.write('\n%s!ExternalFunc!%s!DEFAULT\n' % (tbd_name, func_name))
else:
device_id_file.write('\n%s!ExternalFunc!%s!DEFAULT\n' % (tbd.dev_name + 'STA', func_name))
else:
TestLogFileSource.log_file_source_obj_list[0].write_log_message("Device %s IP address not exist" % tbd.dev_name, TestLogFileSource.log_file_hdl_list[0])
if tbd_name.endswith('_ap') and tbd.dev_type == "AP":
true_ap_name = true_dev_name.replace('_ap', '')
if tbd.dev_name.lower() == true_ap_name:
if cmd_type == 'external':
func_name = v_cmd_data['command'].get('command')
if tbd.alias.lower() == tbd_name.lower():
device_id_file.write('\n%s!ExternalFunc!%s!DEFAULT\n' % (tbd_name, func_name))
else:
device_id_file.write('\n%s!ExternalFunc!%s!DEFAULT\n' % (tbd.dev_name + 'AP', func_name))
elif cmd_type == 'capi':
cmd_list = v_cmd_data['command'].get('command').split('\n')
for cmd in cmd_list:
if tbd.alias.lower() == tbd_name.lower():
device_id_file.write('\n%s!%s\n' % (tbd_name, cmd))
else:
device_id_file.write('\n%s!%s\n' % (tbd.dev_name + 'AP', cmd))
if tbd_name == 'wfa_sniffer' and tbd.dev_type == "SNIFFER":
if cmd_type == 'external':
func_name = v_cmd_data['command'].get('command')
if tbd.alias.lower() == tbd_name.lower():
device_id_file.write('\n%s!ExternalFunc!%s!DEFAULT\n' % (tbd_name, func_name))
else:
device_id_file.write('\n%s!ExternalFunc!%s!DEFAULT\n' % (tbd.dev_name, func_name))
elif cmd_type == 'capi':
cmd_list = v_cmd_data['command'].get('command').split('\n')
for cmd in cmd_list:
if tbd.alias.lower() == tbd_name.lower():
device_id_file.write('\n%s!%s\n' % (tbd_name, cmd))
else:
device_id_file.write('\n%s!%s\n' % (tbd.dev_name, cmd))
device_id_file.close()
def create_init_env_file(self):
"""Generates the initenv.txt file with defined configuraiton variables.
"""
output = StringIO()
init_env_file_path = self.test_mngr_initr.test_script_mngr.prog_script_folder_path + "\\" + common.GlobalConfigFiles.init_env_file
ucc_init_file = open(init_env_file_path, 'w')
ucc_init_file.write("# This is an auto generated file - %s \n# For test case - %s\n#DO NOT modify this file manually \n\n" %(time.strftime("%b-%d-%y_%H:%M:%S", time.localtime()), self.tc_name))
output.write("# This is an auto generated file - %s \n# For test case - %s\n#DO NOT modify this file manually \n\n" %(time.strftime("%b-%d-%y_%H:%M:%S", time.localtime()), self.tc_name))
ucc_init_file.write("\ndefine!$tcID!%s!\n" % (self.tc_name))
output.write("\ndefine!$tcID!%s!\n" % (self.tc_name))
ucc_init_file.write(self.format_env_variables())
output.write(self.format_env_variables())
i = 0
for tbd in self.test_mngr_initr.test_case_mngr.test_case.testbed_device_list:
if tbd.dev_type == "AP":
i += 1
for p in self.test_mngr_initr.test_prog_mngr.test_prog.testbed_dev_list:
if p.dev_type == "AP":.number == "":
i += 1
p.number = "AP%s" % i
p.state = "off"
ucc_init_file.write(self.format_testbed_ap(p))
output.write(self.format_testbed_ap(p))
if p.dev_type == "RADIUSSERVER":
rsName = TestScriptSymbolTable.get_value_from_sym_tab("RadiusServerName", TestScriptSymbolTable.test_script_sym_tab)
if rsName is not None:
if rsName == p.dev_name or rsName == '0':
ucc_init_file.write(self.format_radius_server(p))
output.write(self.format_radius_server(p))
for var in self.test_mngr_initr.test_config_info_mngr.test_config_info.var_list:
ucc_init_file.write("\ndefine!$%s!%s!\n"%(var, self.test_mngr_initr.test_config_info_mngr.test_config_info.var_list[var]))
output.write("\ndefine!$%s!%s!\n"%(var, self.test_mngr_initr.test_config_info_mngr.test_config_info.var_list[var]))
ucc_init_file.write("#EOF")
output.write("#EOF")
ucc_init_file.close()
return output
def format_radius_server(self, rs):
"""Formats the variables in RadiusServer testbed device objects for output.
Attributes:
rs (object): The RadiusServer testbed device object.
"""
return "\n\ndefine!$RADIUSIPAddress!%s!\ndefine!$RADIUSPort!%s!\ndefine!$RADIUSSharedSecret!%s!\ndefine!$SupplicantName!%s!\ndefine!$STASupplicantName!%s!\n" % \
(rs.testipaddr, rs.testport, rs.shared_secret, rs.supplicant, rs.sta_supplicant)
def format_testbed_ap(self, ap):
"""Formats the variables in AP testbed device objects for output.
Attributes:
ap (object): The AP testbed device object.
"""
value = None
if TestScriptSymbolTable.lookup_sym_tab("$" + ap.number, TestScriptSymbolTable.test_script_sym_tab):
value = TestScriptSymbolTable.get_value_from_sym_tab("$" + ap.number, TestScriptSymbolTable.test_script_sym_tab)
return "\n\ndefine!$%s!%s!\ndefine!$%sPowerSwitchPort!%s!\ndefine!$%sState!%s!\ndefine!$%sIPAddress!%s!\n" % \
(ap.number, value if value else ap.dev_name, ap.number, ap.pwrswtport, ap.number, ap.state, ap.number, (ap.ctrlipaddr if ap.is_emd_ctrl_agt else ap.testipaddr))
def format_env_variables(self):
"""Outputs variables per the specified format.
"""
TSTA = []
sta_count = 0
for tbd in self.test_mngr_initr.test_prog_mngr.test_prog.testbed_dev_list:
if tbd.dev_type == "STA" and tbd.number == "STA" + ("%s" % (sta_count + 1)):
TSTA.append(tbd.dev_name)
sta_count += 1
return "define!$Channel!%s!\ndefine!$Channel_1!%s!\ndefine!$Channel_2!%s!\ndefine!$Channel_3!%s!\ndefine!$Band!%s!\ndefine!$SSID!%s!\ndefine!$SSID_1!%s!\ndefine!$SSID_2!%s!\ndefine!$SSID_3!%s!\ndefine!$STA1!%s!\ndefine!$STA2!%s!\ndefine!$STA3!%s!\ndefine!$TestbedConfigCAPIFile!%s!\ndefine!$DUTConfigCAPIFile!%s!\ndefine!$STAConfigCAPIFile!%s!\ndefine!$WLANTestCAPIFile!%s!\n" % \
("" if not "Channel" in self.test_mngr_initr.test_config_info_mngr.test_config_info.channel else self.test_mngr_initr.test_config_info_mngr.test_config_info.channel["Channel"],
"" if not "Channel_1" in self.test_mngr_initr.test_config_info_mngr.test_config_info.channel else self.test_mngr_initr.test_config_info_mngr.test_config_info.channel["Channel_1"],
"" if not "Channel_2" in self.test_mngr_initr.test_config_info_mngr.test_config_info.channel else self.test_mngr_initr.test_config_info_mngr.test_config_info.channel["Channel_2"],
"" if not "Channel_3" in self.test_mngr_initr.test_config_info_mngr.test_config_info.channel else self.test_mngr_initr.test_config_info_mngr.test_config_info.channel["Channel_3"],
self.test_mngr_initr.test_config_info_mngr.test_config_info.band,
"" if not "SSID" in self.test_mngr_initr.test_config_info_mngr.test_config_info.ssid else self.test_mngr_initr.test_config_info_mngr.test_config_info.ssid["SSID"],
"" if not "SSID_1" in self.test_mngr_initr.test_config_info_mngr.test_config_info.ssid else self.test_mngr_initr.test_config_info_mngr.test_config_info.ssid["SSID_1"],
"" if not "SSID_2" in self.test_mngr_initr.test_config_info_mngr.test_config_info.ssid else self.test_mngr_initr.test_config_info_mngr.test_config_info.ssid["SSID_2"],
"" if not "SSID_3" in self.test_mngr_initr.test_config_info_mngr.test_config_info.ssid else self.test_mngr_initr.test_config_info_mngr.test_config_info.ssid["SSID_3"],
TSTA[0] if len(TSTA) == 1 else "",
TSTA[1] if len(TSTA) == 2 else "",
TSTA[2] if len(TSTA) == 3 else "",
self.test_mngr_initr.test_config_info_mngr.test_config_info.testbed_dev_config_file,
self.test_mngr_initr.test_config_info_mngr.test_config_info.dut_config_file,
self.test_mngr_initr.test_config_info_mngr.test_config_info.sta_config_file,
self.test_mngr_initr.test_config_info_mngr.test_config_info.wlan_test_capi_file)
def is_eap_method_list_file_exist(self):
"""Checks if the EAP method list file exists.
"""
self.eap_file_path = self.test_mngr_initr.test_script_mngr.prog_script_folder_path + "\\" + common.GlobalConfigFiles.eap_method_listFile
if os.path.exists(self.eap_file_path):
return True
else:
return False
def load_eap_method_list(self):
"""Loads EAP method list from file system.
"""
eap_file = open(self.eap_file_path, 'r')
for item in eap_file.readlines():
eap_name = item.rstrip("\n")
if item.startswith('#'):
continue
self.eap_method_list.append(eap_name)
if eap_file:
eap_file.close()
def set_other_dut_info(self):
"""Sets other DUT related information into var_list dictionary.
"""
if self.is_eap_method_list_file_exist():
ttls_name = TestScriptElementType.get_test_feat_key_from_val(TestScriptElementType.TTLS)
TestScriptSymbolTable.insert_sym_tab("DUTEAPMethod", ttls_name, TestScriptSymbolTable.test_script_sym_tab)
found_eap_method = False
for eap in self.eap_method_list:
if found_eap_method:
break
for feature in self.test_mngr_initr.test_prog_mngr.test_prog.feat_list:
if feature.feat_name == eap and int(feature.feat_value) == 1:
TestScriptSymbolTable.insert_sym_tab("DUTEAPMethod", eap, TestScriptSymbolTable.test_script_sym_tab)
found_eap_method = True
break
if "N-4.2" in common.GlobalConfigFiles.curr_tc_name or "N-ExA" in common.GlobalConfigFiles.curr_tc_name:
self.test_mngr_initr.test_config_info_mngr.test_config_info.var_list.setdefault("APUT_state", "on")
if "N-5.2" in common.GlobalConfigFiles.curr_tc_name or "N-5.3" in common.GlobalConfigFiles.curr_tc_name or "N-ExS" in common.GlobalConfigFiles.curr_tc_name:
self.test_mngr_initr.test_config_info_mngr.test_config_info.var_list.setdefault("APUT_state", "off")
def generate_feat_html_file(self):
"""Generates a html DUT feature file while loading DUT features into var_list dictionary.
"""
if self.prog_name == "P2P" or self.prog_name == "TDLS" or self.prog_name == "PMF" or self.prog_name == "HS2" or self.prog_name == "WFD" \
or self.prog_name == "WFDS" or self.prog_name == "VHT" or self.prog_name == "HS2-R2" or self.prog_name == "WMMPS" or self.prog_name == "NAN":
T = HTML.Table(col_width=['70%', '30%'])
R1 = HTML.TableRow(cells=['Optional Feature', 'DUT Support'], bgcolor="Gray", header="True")
T.rows.append(R1)
if self.prog_name == "P2P" or self.prog_name == "TDLS" or self.prog_name == "HS2" or self.prog_name == "WFD" or self.prog_name == "WFDS" or self.prog_name == "HS2-R2" or self.prog_name == "NAN":
p2p_var_list = self.test_mngr_initr.test_feat_mngr.get_dut_feat_list()
if p2p_var_list != -1:
p2p_var_list = p2p_var_list.split('!')
TestLogFileSource.log_file_source_obj_list[0].write_log_message("P2P Supported Features = %s" % p2p_var_list, TestLogFileSource.log_file_hdl_list[0])
for var in p2p_var_list:
if var != "":
v = var.split(',')
self.test_mngr_initr.test_config_info_mngr.test_config_info.var_list.setdefault(v[0], v[1])
feat_supt = self.test_mngr_initr.test_config_info_mngr.sll.search(v[0])
if feat_supt is not None:
TestLogFileSource.log_file_source_obj_list[0].write_log_message("%s-%s" % (feat_supt, v[1]), TestLogFileSource.log_file_hdl_list[0])
if feat_supt != v[1]:
TestLogFileSource.log_file_source_obj_list[0].write_log_message("DUT does not support the feature", TestLogFileSource.log_file_hdl_list[0])
self.test_mngr_initr.test_config_info_mngr.test_config_info.var_list.setdefault("TestNA", "DUT does not support the feature")
if v[1] == "0":
dis = "No"
elif v[1] == "1":
dis = "Yes"
else:
dis = v[1]
if "DUT_" not in v[0]:
T.rows.append([v[0], dis])
else:
prog_var_list = self.test_mngr_initr.test_feat_mngr.get_dut_feat_list()
if prog_var_list != -1:
prog_var_list = prog_var_list.split('!')
TestLogFileSource.log_file_source_obj_list[0].write_log_message("%s Supported Features = %s" % (self.prog_name, prog_var_list), TestLogFileSource.log_file_hdl_list[0])
check_feat_flag = self.test_mngr_initr.test_config_info_mngr.sll.search("check_feat_flag")
TestLogFileSource.log_file_source_obj_list[0].write_log_message("check_feat_flag = %s" % check_feat_flag, TestLogFileSource.log_file_hdl_list[0])
for var in prog_var_list:
if var != "":
v = var.split(',')
self.test_mngr_initr.test_config_info_mngr.test_config_info.var_list.setdefault(v[0], v[1])
feat_supt = self.test_mngr_initr.test_config_info_mngr.sll.search(v[0])
if check_feat_flag == v[0]:
TestLogFileSource.log_file_source_obj_list[0].write_log_message("%s-%s"%(check_feat_flag, v[1]), TestLogFileSource.log_file_hdl_list[0])
if v[1] != "1":
TestLogFileSource.log_file_source_obj_list[0].write_log_message("DUT does not support the feature", TestLogFileSource.log_file_hdl_list[0])
self.test_mngr_initr.test_config_info_mngr.test_config_info.var_list.setdefault("TestNA", "DUT does not support the feature")
if v[1] == "0":
dis = "No"
elif v[1] == "1":
dis = "Yes"
else:
dis = v[1]
if "DUT_" not in v[0]:
T.rows.append([v[0], dis])
html_code = str(T)
TestLogFileSource.log_file_source_obj_list[3].write_log_message(html_code, TestLogFileSource.log_file_hdl_list[3])
TestLogFileSource.log_file_source_obj_list[3].write_log_message('<p>', TestLogFileSource.log_file_hdl_list[3])
TestLogFileSource.log_file_source_obj_list[3].close_log_file(TestLogFileSource.log_file_hdl_list[3])
| false | true |
f7269bbfbe9e6440da662f049b38c023e316d1dd | 17,900 | py | Python | backend/main.py | Plane-walker/fabric-draw | 7a3b4baef4d59a47c046e68d1fc5f20aeddd4c11 | [
"MIT"
] | 1 | 2021-09-01T06:31:26.000Z | 2021-09-01T06:31:26.000Z | backend/main.py | Plane-walker/fabric-draw | 7a3b4baef4d59a47c046e68d1fc5f20aeddd4c11 | [
"MIT"
] | 20 | 2021-09-22T13:04:07.000Z | 2021-10-11T12:27:12.000Z | backend/main.py | Plane-walker/fabric-draw | 7a3b4baef4d59a47c046e68d1fc5f20aeddd4c11 | [
"MIT"
] | null | null | null | import paramiko
import time
import io
import os
import stat
from yaml_generator import CAYamlGenerator, OrderYamlGenerator, PeerYamlGenerator, ConfigTXYamlGenerator
def sftp_get_r(sftp_client, remote_path, local_path):
try:
sftp_client.stat(remote_path)
except IOError:
return
if not os.path.exists(local_path):
os.mkdir(local_path)
for item in sftp_client.listdir(remote_path):
if stat.S_ISDIR(sftp_client.stat(f'{remote_path}/{item}').st_mode):
sftp_get_r(sftp_client, f'{remote_path}/{item}', os.path.join(local_path, item))
else:
sftp_client.get(f'{remote_path}/{item}', os.path.join(local_path, item))
def sftp_put_r(sftp_client, local_path, remote_path):
if not os.path.exists(local_path):
return
path = ""
for dir in remote_path.split("/"):
if dir == "":
continue
path += f"/{dir}"
try:
sftp_client.listdir(path)
except IOError:
sftp_client.mkdir(path)
try:
sftp_client.stat(remote_path)
except IOError:
sftp_client.mkdir(remote_path)
for item in os.listdir(local_path):
if os.path.isfile(os.path.join(local_path, item)):
sftp_client.put(os.path.join(local_path, item), f'{remote_path}/{item}')
else:
sftp_put_r(sftp_client, os.path.join(local_path, item), f'{remote_path}/{item}')
def generate_ca(ca_id, ca_information, fabric_name, target_host, crypto_base):
node_name, group_name, domain = ca_id.split('.', 2)
address = ca_information['address']
key_file = io.StringIO(address['sk'])
private_key = paramiko.RSAKey.from_private_key(key_file)
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(hostname=address['host'], port=address['ssh_port'], username='root', pkey=private_key)
stdin, stdout, stderr = ssh.exec_command(f'if [ ! -d {crypto_base} ]; then mkdir -p {crypto_base}; fi')
stdout.channel.recv_exit_status()
ftp_client = ssh.open_sftp()
file_name = 'node_build.py'
ftp_client.put(file_name, f'{crypto_base}/{file_name}')
if group_name == 'orderer':
stdin, stdout, stderr = ssh.exec_command(f'python {crypto_base}/node_build.py --func_name init_docker_swarm {target_host} {fabric_name} {crypto_base}')
stdout.channel.recv_exit_status()
ftp_client.get(f'{crypto_base}/token', 'token')
else:
try:
ftp_client.stat(f'{crypto_base}/token')
except IOError:
node_host = address['host']
ftp_client.put('token', f'{crypto_base}/token')
stdin, stdout, stderr = ssh.exec_command(f'python {crypto_base}/node_build.py --func_name join_docker_swarm {node_host} {target_host} {crypto_base}')
stdout.channel.recv_exit_status()
ca_yaml_generator = CAYamlGenerator()
file_name = ca_yaml_generator.generate(ca_id, group_name, fabric_name, address['fabric_port'], crypto_base)
ftp_client.put(file_name, f'{crypto_base}/{file_name}')
stdin, stdout, stderr = ssh.exec_command(f'docker-compose -f {crypto_base}/{file_name} up -d')
stdout.channel.recv_exit_status()
time.sleep(4)
tls_cert_path = f'organizations/fabric-ca/{group_name}'
if not os.path.exists(tls_cert_path):
os.makedirs(tls_cert_path)
ftp_client.get(f'{crypto_base}/{tls_cert_path}/tls-cert.pem', f'{tls_cert_path}/tls-cert.pem')
ftp_client.close()
ssh.close()
def generate_order_msp(order_id, order_information, ca_port, crypto_base):
node_name, group_name, domain = order_id.split('.', 2)
address = order_information['address']
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
key_file = io.StringIO(address['sk'])
private_key = paramiko.RSAKey.from_private_key(key_file)
ssh.connect(hostname=address['host'], port=address['ssh_port'], username='root', pkey=private_key)
tls_cert_path = f'organizations/fabric-ca/{group_name}'
stdin, stdout, stderr = ssh.exec_command(f'if [ ! -d {crypto_base}/{tls_cert_path} ]; then mkdir -p {crypto_base}/{tls_cert_path}; fi')
stdout.channel.recv_exit_status()
ftp_client = ssh.open_sftp()
ftp_client.put(f'{tls_cert_path}/tls-cert.pem', f'{crypto_base}/{tls_cert_path}/tls-cert.pem')
file_name = 'node_build.py'
ftp_client.put(file_name, f'{crypto_base}/{file_name}')
stdin, stdout, stderr = ssh.exec_command(f'python {crypto_base}/node_build.py --func_name org_msp_generate {group_name} {domain} {ca_port} {crypto_base}')
stdout.channel.recv_exit_status()
# print(stdout.readlines(), stderr.readlines())
stdin, stdout, stderr = ssh.exec_command(f'python {crypto_base}/node_build.py --func_name peer_msp_generate {node_name} {group_name} {domain} {ca_port} {crypto_base}')
stdout.channel.recv_exit_status()
# print(stdout.readlines(), stderr.readlines())
tls_ca_path = f'organizations/{group_name}.{domain}/tlsca'
if not os.path.exists(tls_ca_path):
os.makedirs(tls_ca_path)
ftp_client.get(f'{crypto_base}/{tls_ca_path}/tlsca.{group_name}.{domain}-cert.pem', f'{tls_ca_path}/tlsca.{group_name}.{domain}-cert.pem')
server_path = f'organizations/{group_name}.{domain}/peers/{order_id}/tls'
if not os.path.exists(server_path):
os.makedirs(server_path)
ftp_client.get(f'{crypto_base}/{server_path}/server.crt', f'{server_path}/server.crt')
ftp_client.close()
def generate_peer(peer_id, peer_information, order_group_id, fabric_name, target_host, ca_port, crypto_base):
node_name, group_name, domain = peer_id.split('.', 2)
address = peer_information['address']
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
key_file = io.StringIO(address['sk'])
private_key = paramiko.RSAKey.from_private_key(key_file)
ssh.connect(hostname=address['host'], port=address['ssh_port'], username='root', pkey=private_key)
tls_cert_path = f'organizations/fabric-ca/{group_name}'
stdin, stdout, stderr = ssh.exec_command(f'if [ ! -d {crypto_base}/{tls_cert_path} ]; then mkdir -p {crypto_base}/{tls_cert_path}; fi')
stdout.channel.recv_exit_status()
ftp_client = ssh.open_sftp()
ftp_client.put(f'{tls_cert_path}/tls-cert.pem', f'{crypto_base}/{tls_cert_path}/tls-cert.pem')
tls_ca_path = f'organizations/{order_group_id}/tlsca'
stdin, stdout, stderr = ssh.exec_command(f'if [ ! -d {crypto_base}/{tls_ca_path} ]; then mkdir -p {crypto_base}/{tls_ca_path}; fi')
stdout.channel.recv_exit_status()
ftp_client.put(f'{tls_ca_path}/tlsca.{order_group_id}-cert.pem', f'{crypto_base}/{tls_ca_path}/tlsca.{order_group_id}-cert.pem')
file_name = 'node_build.py'
ftp_client.put(file_name, f'{crypto_base}/{file_name}')
try:
ftp_client.stat(f'{crypto_base}/token')
except IOError:
node_host = address['host']
ftp_client.put('token', f'{crypto_base}/token')
stdin, stdout, stderr = ssh.exec_command(f'python {crypto_base}/node_build.py --func_name join_docker_swarm {node_host} {target_host} {crypto_base}')
stdout.channel.recv_exit_status()
peer_yaml_generator = PeerYamlGenerator()
file_name = peer_yaml_generator.generate(peer_id, fabric_name, address['fabric_port'], crypto_base)
ftp_client.put(file_name, f'{crypto_base}/{file_name}')
stdin, stdout, stderr = ssh.exec_command(f'python {crypto_base}/node_build.py --func_name org_msp_generate {group_name} {domain} {ca_port} {crypto_base}')
stdout.channel.recv_exit_status()
print(stderr.readlines())
stdin, stdout, stderr = ssh.exec_command(f'python {crypto_base}/node_build.py --func_name peer_msp_generate {node_name} {group_name} {domain} {ca_port} {crypto_base}')
stdout.channel.recv_exit_status()
print(stderr.readlines())
stdin, stdout, stderr = ssh.exec_command(f'docker-compose -f {crypto_base}/{file_name} up -d')
stdout.channel.recv_exit_status()
print(stderr.readlines())
time.sleep(3)
peer_path = f'organizations/{group_name}.{domain}'
if not os.path.exists(peer_path):
os.makedirs(peer_path)
sftp_get_r(ftp_client, f'{crypto_base}/{peer_path}', peer_path)
ftp_client.close()
def generate_order(order_id, order_information, fabric_name, channel_id, peer_group_ids, configtx_filename: str, crypto_base='/root/opt'):
node_name, group_name, domain = order_id.split('.', 2)
address = order_information['address']
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
key_file = io.StringIO(address['sk'])
private_key = paramiko.RSAKey.from_private_key(key_file)
ssh.connect(hostname=address['host'], port=address['ssh_port'], username='root', pkey=private_key)
ssh.exec_command(f'if [ ! -d {crypto_base}/channel-artifacts ]; then mkdir -p {crypto_base}/channel-artifacts; fi')
ftp_client = ssh.open_sftp()
sftp_put_r(ftp_client, f"organizations/{group_name}.{domain}/peers", f"{crypto_base}/organizations/{group_name}.{domain}/peers")
for peer in peer_group_ids:
sftp_put_r(ftp_client, f"organizations/{peer}/msp/cacerts", f"{crypto_base}/organizations/{peer}/msp/cacerts")
ftp_client.put(f"organizations/{peer}/msp/config.yaml", f"{crypto_base}/organizations/{peer}/msp/config.yaml")
orderer_yaml_generator = OrderYamlGenerator()
filename = orderer_yaml_generator.generate(order_id, group_name, node_name, fabric_name, address["fabric_port"], crypto_base)
ftp_client.put(filename, f"{crypto_base}/{filename}")
ftp_client.put(configtx_filename, f'{crypto_base}/{configtx_filename}')
while True:
try:
ftp_client.stat(f'{crypto_base}/{configtx_filename}')
ftp_client.stat(f'{crypto_base}/{filename}')
print("File exists.")
break
except IOError:
print("File not exists.")
time.sleep(2)
ftp_client.close()
stdin, stdout, stderr = ssh.exec_command(f'python {crypto_base}/node_build.py --func_name init_channel_artifacts {fabric_name} {channel_id} "{crypto_base}" {peer_group_ids} ')
stdout.channel.recv_exit_status()
print(stderr.readlines())
stdin, stdout, stderr = ssh.exec_command(f'docker-compose -f {crypto_base}/{filename} up -d')
stdout.channel.recv_exit_status()
print(stderr.readlines())
time.sleep(4)
ssh.close()
def generate_configtx(groups: dict, nodes: dict, orderers: dict, net_name: str, crypto_base: str):
configtx = ConfigTXYamlGenerator(net_name, crypto_base)
return configtx.input_from("./template/configtx.yaml")\
.generate(groups, nodes, orderers)\
.output_to("./configtx.yaml")\
.get_filename()
def parse_json(network_topology_json):
order_group_id = ''
order_ca_port = ''
target_host = ''
peer_group_ids = []
for group_id, group_information in network_topology_json['groups'].items():
if group_id.split('.', 1)[0] == 'orderer':
order_group_id = group_id
order_ca_port = network_topology_json['nodes'][group_information['nodes']['ca']]['address']['fabric_port']
target_host = network_topology_json['nodes'][network_topology_json['groups'][group_id]['nodes']['ca']]['address']['host']
else:
peer_group_ids.append(group_id)
generate_ca(group_information['nodes']['ca'], network_topology_json['nodes'][group_information['nodes']['ca']], network_topology_json['blockchains']['fabric-1']['name'], target_host, '/root/opt')
for order_id in network_topology_json['groups'][order_group_id]['nodes']['orderer']:
generate_order_msp(order_id, network_topology_json['nodes'][order_id], order_ca_port, '/root/opt')
for org_id in peer_group_ids:
peer_ca_port = network_topology_json['nodes'][network_topology_json['groups'][org_id]['nodes']['ca']]['address']['fabric_port']
leader_peers_ids = network_topology_json['groups'][org_id]['nodes']['leader_peers']
anchor_peers_ids = network_topology_json['groups'][org_id]['nodes']['anchor_peers']
committing_peers_ids = network_topology_json['groups'][org_id]['nodes']['committing_peers']
endorsing_peers_ids = network_topology_json['groups'][org_id]['nodes']['endorsing_peers']
peer_ids = list(set(leader_peers_ids).union(set(anchor_peers_ids).union(set(committing_peers_ids)).union(set(endorsing_peers_ids))))
for peer_id in peer_ids:
generate_peer(peer_id, network_topology_json['nodes'][peer_id], order_group_id, network_topology_json['blockchains']['fabric-1']['name'], target_host, peer_ca_port, '/root/opt')
orderers = dict()
for node in network_topology_json["nodes"]:
if "orderer" in network_topology_json["nodes"][node]["type"]:
orderers[node] = network_topology_json["nodes"][node]
configtx_filename = generate_configtx(network_topology_json["groups"], network_topology_json["nodes"], orderers, network_topology_json["blockchains"]["fabric-1"]["name"], "/root/opt")
for order_id in network_topology_json['groups'][order_group_id]['nodes']['orderer']:
generate_order(order_id, network_topology_json['nodes'][order_id], network_topology_json['blockchains']['fabric-1']['name'], network_topology_json['blockchains']['fabric-1']['channels'][0], peer_group_ids, configtx_filename)
if __name__ == '__main__':
network_json = {
"groups": {
"orderer.test.com": {
"nodes": {
"ca": "ca.orderer.test.com",
"orderer": ["orderer0.orderer.test.com", "orderer1.orderer.test.com", "orderer2.orderer.test.com"]
},
"blockchains": "fabric-1"
},
"org0.test.com": {
"nodes": {
"ca": "ca.org0.test.com",
"leader_peers": ["peer0.org0.test.com"],
"anchor_peers": ["peer0.org0.test.com"],
"committing_peers": ["peer0.org0.test.com"],
"endorsing_peers": ["peer0.org0.test.com"]
},
"blockchains": "fabric-1",
"channel": ["channel-1"]
},
"org1.test.com": {
"nodes": {
"ca": "ca.org1.test.com",
"leader_peers": ["peer0.org1.test.com"],
"anchor_peers": ["peer0.org1.test.com"],
"committing_peers": ["peer0.org1.test.com"],
"endorsing_peers": ["peer0.org1.test.com"]
},
"blockchains": "fabric-1",
"channel": ["channel-1"]
},
"org2.test.com": {
"nodes": {
"ca": "ca.org2.test.com",
"leader_peers": ["peer0.org2.test.com"],
"anchor_peers": ["peer0.org2.test.com"],
"committing_peers": ["peer0.org2.test.com"],
"endorsing_peers": ["peer0.org2.test.com"]
},
"blockchains": "fabric-1",
"channel": ["channel-1"]
}
},
"nodes": {
"ca.orderer.test.com": {
"address": {"host": "10.134.68.98", "ssh_port": "22", "fabric_port": "7054", "sk": ""},
"type": ["ca"]
},
"orderer0.orderer.test.com": {
"address": {"host": "10.134.68.98", "ssh_port": "22", "fabric_port": "7050", "sk": ""},
"type": ["orderer"]
},
"orderer1.orderer.test.com": {
"address": {"host": "10.134.50.142", "ssh_port": "22", "fabric_port": "7050", "sk": ""},
"type": ["orderer"]
},
"orderer2.orderer.test.com": {
"address": {"host": "10.134.50.70", "ssh_port": "22", "fabric_port": "7050", "sk": ""},
"type": ["orderer"]
},
"ca.org0.test.com": {
"address": {"host": "10.134.68.98", "ssh_port": "22", "fabric_port": "8054", "sk": ""},
"type": ["ca"]
},
"peer0.org0.test.com": {
"address": {"host": "10.134.68.98", "ssh_port": "22", "fabric_port": "7051", "sk": ""},
"bootstrap": ["127.0.0.1:7051"],
"type": ["leader_peer", "anchor_peer", "committing_peer", "endorsing_peers"]
},
"ca.org1.test.com": {
"address": {"host": "10.134.50.142", "ssh_port": "22", "fabric_port": "7054", "sk": ""},
"type": ["ca"]
},
"peer0.org1.test.com": {
"address": {"host": "10.134.50.142", "ssh_port": "22", "fabric_port": "7051", "sk": ""},
"bootstrap": ["127.0.0.1:7051"],
"type": ["leader_peer", "anchor_peer", "committing_peer", "endorsing_peers"]
},
"ca.org2.test.com": {
"address": {"host": "10.134.50.70", "ssh_port": "22", "fabric_port": "7054", "sk": ""},
"type": ["ca"]
},
"peer0.org2.test.com": {
"address": {"host": "10.134.50.70", "ssh_port": "22", "fabric_port": "7051", "sk": ""},
"bootstrap": ["127.0.0.1:7051"],
"type": ["leader_peer", "anchor_peer", "committing_peer", "endorsing_peers"]
},
},
"blockchains": {
"fabric-1": {
"name": "FabricDraw",
"channels": ["channel-1"]
}
}
}
with open('id_rsa', 'r') as file:
sk = file.read()
for node_id in network_json['nodes'].keys():
network_json['nodes'][node_id]['address']['sk'] = sk
parse_json(network_json)
| 52.339181 | 232 | 0.632458 | import paramiko
import time
import io
import os
import stat
from yaml_generator import CAYamlGenerator, OrderYamlGenerator, PeerYamlGenerator, ConfigTXYamlGenerator
def sftp_get_r(sftp_client, remote_path, local_path):
try:
sftp_client.stat(remote_path)
except IOError:
return
if not os.path.exists(local_path):
os.mkdir(local_path)
for item in sftp_client.listdir(remote_path):
if stat.S_ISDIR(sftp_client.stat(f'{remote_path}/{item}').st_mode):
sftp_get_r(sftp_client, f'{remote_path}/{item}', os.path.join(local_path, item))
else:
sftp_client.get(f'{remote_path}/{item}', os.path.join(local_path, item))
def sftp_put_r(sftp_client, local_path, remote_path):
if not os.path.exists(local_path):
return
path = ""
for dir in remote_path.split("/"):
if dir == "":
continue
path += f"/{dir}"
try:
sftp_client.listdir(path)
except IOError:
sftp_client.mkdir(path)
try:
sftp_client.stat(remote_path)
except IOError:
sftp_client.mkdir(remote_path)
for item in os.listdir(local_path):
if os.path.isfile(os.path.join(local_path, item)):
sftp_client.put(os.path.join(local_path, item), f'{remote_path}/{item}')
else:
sftp_put_r(sftp_client, os.path.join(local_path, item), f'{remote_path}/{item}')
def generate_ca(ca_id, ca_information, fabric_name, target_host, crypto_base):
node_name, group_name, domain = ca_id.split('.', 2)
address = ca_information['address']
key_file = io.StringIO(address['sk'])
private_key = paramiko.RSAKey.from_private_key(key_file)
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(hostname=address['host'], port=address['ssh_port'], username='root', pkey=private_key)
stdin, stdout, stderr = ssh.exec_command(f'if [ ! -d {crypto_base} ]; then mkdir -p {crypto_base}; fi')
stdout.channel.recv_exit_status()
ftp_client = ssh.open_sftp()
file_name = 'node_build.py'
ftp_client.put(file_name, f'{crypto_base}/{file_name}')
if group_name == 'orderer':
stdin, stdout, stderr = ssh.exec_command(f'python {crypto_base}/node_build.py --func_name init_docker_swarm {target_host} {fabric_name} {crypto_base}')
stdout.channel.recv_exit_status()
ftp_client.get(f'{crypto_base}/token', 'token')
else:
try:
ftp_client.stat(f'{crypto_base}/token')
except IOError:
node_host = address['host']
ftp_client.put('token', f'{crypto_base}/token')
stdin, stdout, stderr = ssh.exec_command(f'python {crypto_base}/node_build.py --func_name join_docker_swarm {node_host} {target_host} {crypto_base}')
stdout.channel.recv_exit_status()
ca_yaml_generator = CAYamlGenerator()
file_name = ca_yaml_generator.generate(ca_id, group_name, fabric_name, address['fabric_port'], crypto_base)
ftp_client.put(file_name, f'{crypto_base}/{file_name}')
stdin, stdout, stderr = ssh.exec_command(f'docker-compose -f {crypto_base}/{file_name} up -d')
stdout.channel.recv_exit_status()
time.sleep(4)
tls_cert_path = f'organizations/fabric-ca/{group_name}'
if not os.path.exists(tls_cert_path):
os.makedirs(tls_cert_path)
ftp_client.get(f'{crypto_base}/{tls_cert_path}/tls-cert.pem', f'{tls_cert_path}/tls-cert.pem')
ftp_client.close()
ssh.close()
def generate_order_msp(order_id, order_information, ca_port, crypto_base):
node_name, group_name, domain = order_id.split('.', 2)
address = order_information['address']
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
key_file = io.StringIO(address['sk'])
private_key = paramiko.RSAKey.from_private_key(key_file)
ssh.connect(hostname=address['host'], port=address['ssh_port'], username='root', pkey=private_key)
tls_cert_path = f'organizations/fabric-ca/{group_name}'
stdin, stdout, stderr = ssh.exec_command(f'if [ ! -d {crypto_base}/{tls_cert_path} ]; then mkdir -p {crypto_base}/{tls_cert_path}; fi')
stdout.channel.recv_exit_status()
ftp_client = ssh.open_sftp()
ftp_client.put(f'{tls_cert_path}/tls-cert.pem', f'{crypto_base}/{tls_cert_path}/tls-cert.pem')
file_name = 'node_build.py'
ftp_client.put(file_name, f'{crypto_base}/{file_name}')
stdin, stdout, stderr = ssh.exec_command(f'python {crypto_base}/node_build.py --func_name org_msp_generate {group_name} {domain} {ca_port} {crypto_base}')
stdout.channel.recv_exit_status()
stdin, stdout, stderr = ssh.exec_command(f'python {crypto_base}/node_build.py --func_name peer_msp_generate {node_name} {group_name} {domain} {ca_port} {crypto_base}')
stdout.channel.recv_exit_status()
tls_ca_path = f'organizations/{group_name}.{domain}/tlsca'
if not os.path.exists(tls_ca_path):
os.makedirs(tls_ca_path)
ftp_client.get(f'{crypto_base}/{tls_ca_path}/tlsca.{group_name}.{domain}-cert.pem', f'{tls_ca_path}/tlsca.{group_name}.{domain}-cert.pem')
server_path = f'organizations/{group_name}.{domain}/peers/{order_id}/tls'
if not os.path.exists(server_path):
os.makedirs(server_path)
ftp_client.get(f'{crypto_base}/{server_path}/server.crt', f'{server_path}/server.crt')
ftp_client.close()
def generate_peer(peer_id, peer_information, order_group_id, fabric_name, target_host, ca_port, crypto_base):
node_name, group_name, domain = peer_id.split('.', 2)
address = peer_information['address']
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
key_file = io.StringIO(address['sk'])
private_key = paramiko.RSAKey.from_private_key(key_file)
ssh.connect(hostname=address['host'], port=address['ssh_port'], username='root', pkey=private_key)
tls_cert_path = f'organizations/fabric-ca/{group_name}'
stdin, stdout, stderr = ssh.exec_command(f'if [ ! -d {crypto_base}/{tls_cert_path} ]; then mkdir -p {crypto_base}/{tls_cert_path}; fi')
stdout.channel.recv_exit_status()
ftp_client = ssh.open_sftp()
ftp_client.put(f'{tls_cert_path}/tls-cert.pem', f'{crypto_base}/{tls_cert_path}/tls-cert.pem')
tls_ca_path = f'organizations/{order_group_id}/tlsca'
stdin, stdout, stderr = ssh.exec_command(f'if [ ! -d {crypto_base}/{tls_ca_path} ]; then mkdir -p {crypto_base}/{tls_ca_path}; fi')
stdout.channel.recv_exit_status()
ftp_client.put(f'{tls_ca_path}/tlsca.{order_group_id}-cert.pem', f'{crypto_base}/{tls_ca_path}/tlsca.{order_group_id}-cert.pem')
file_name = 'node_build.py'
ftp_client.put(file_name, f'{crypto_base}/{file_name}')
try:
ftp_client.stat(f'{crypto_base}/token')
except IOError:
node_host = address['host']
ftp_client.put('token', f'{crypto_base}/token')
stdin, stdout, stderr = ssh.exec_command(f'python {crypto_base}/node_build.py --func_name join_docker_swarm {node_host} {target_host} {crypto_base}')
stdout.channel.recv_exit_status()
peer_yaml_generator = PeerYamlGenerator()
file_name = peer_yaml_generator.generate(peer_id, fabric_name, address['fabric_port'], crypto_base)
ftp_client.put(file_name, f'{crypto_base}/{file_name}')
stdin, stdout, stderr = ssh.exec_command(f'python {crypto_base}/node_build.py --func_name org_msp_generate {group_name} {domain} {ca_port} {crypto_base}')
stdout.channel.recv_exit_status()
print(stderr.readlines())
stdin, stdout, stderr = ssh.exec_command(f'python {crypto_base}/node_build.py --func_name peer_msp_generate {node_name} {group_name} {domain} {ca_port} {crypto_base}')
stdout.channel.recv_exit_status()
print(stderr.readlines())
stdin, stdout, stderr = ssh.exec_command(f'docker-compose -f {crypto_base}/{file_name} up -d')
stdout.channel.recv_exit_status()
print(stderr.readlines())
time.sleep(3)
peer_path = f'organizations/{group_name}.{domain}'
if not os.path.exists(peer_path):
os.makedirs(peer_path)
sftp_get_r(ftp_client, f'{crypto_base}/{peer_path}', peer_path)
ftp_client.close()
def generate_order(order_id, order_information, fabric_name, channel_id, peer_group_ids, configtx_filename: str, crypto_base='/root/opt'):
node_name, group_name, domain = order_id.split('.', 2)
address = order_information['address']
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
key_file = io.StringIO(address['sk'])
private_key = paramiko.RSAKey.from_private_key(key_file)
ssh.connect(hostname=address['host'], port=address['ssh_port'], username='root', pkey=private_key)
ssh.exec_command(f'if [ ! -d {crypto_base}/channel-artifacts ]; then mkdir -p {crypto_base}/channel-artifacts; fi')
ftp_client = ssh.open_sftp()
sftp_put_r(ftp_client, f"organizations/{group_name}.{domain}/peers", f"{crypto_base}/organizations/{group_name}.{domain}/peers")
for peer in peer_group_ids:
sftp_put_r(ftp_client, f"organizations/{peer}/msp/cacerts", f"{crypto_base}/organizations/{peer}/msp/cacerts")
ftp_client.put(f"organizations/{peer}/msp/config.yaml", f"{crypto_base}/organizations/{peer}/msp/config.yaml")
orderer_yaml_generator = OrderYamlGenerator()
filename = orderer_yaml_generator.generate(order_id, group_name, node_name, fabric_name, address["fabric_port"], crypto_base)
ftp_client.put(filename, f"{crypto_base}/{filename}")
ftp_client.put(configtx_filename, f'{crypto_base}/{configtx_filename}')
while True:
try:
ftp_client.stat(f'{crypto_base}/{configtx_filename}')
ftp_client.stat(f'{crypto_base}/{filename}')
print("File exists.")
break
except IOError:
print("File not exists.")
time.sleep(2)
ftp_client.close()
stdin, stdout, stderr = ssh.exec_command(f'python {crypto_base}/node_build.py --func_name init_channel_artifacts {fabric_name} {channel_id} "{crypto_base}" {peer_group_ids} ')
stdout.channel.recv_exit_status()
print(stderr.readlines())
stdin, stdout, stderr = ssh.exec_command(f'docker-compose -f {crypto_base}/{filename} up -d')
stdout.channel.recv_exit_status()
print(stderr.readlines())
time.sleep(4)
ssh.close()
def generate_configtx(groups: dict, nodes: dict, orderers: dict, net_name: str, crypto_base: str):
configtx = ConfigTXYamlGenerator(net_name, crypto_base)
return configtx.input_from("./template/configtx.yaml")\
.generate(groups, nodes, orderers)\
.output_to("./configtx.yaml")\
.get_filename()
def parse_json(network_topology_json):
order_group_id = ''
order_ca_port = ''
target_host = ''
peer_group_ids = []
for group_id, group_information in network_topology_json['groups'].items():
if group_id.split('.', 1)[0] == 'orderer':
order_group_id = group_id
order_ca_port = network_topology_json['nodes'][group_information['nodes']['ca']]['address']['fabric_port']
target_host = network_topology_json['nodes'][network_topology_json['groups'][group_id]['nodes']['ca']]['address']['host']
else:
peer_group_ids.append(group_id)
generate_ca(group_information['nodes']['ca'], network_topology_json['nodes'][group_information['nodes']['ca']], network_topology_json['blockchains']['fabric-1']['name'], target_host, '/root/opt')
for order_id in network_topology_json['groups'][order_group_id]['nodes']['orderer']:
generate_order_msp(order_id, network_topology_json['nodes'][order_id], order_ca_port, '/root/opt')
for org_id in peer_group_ids:
peer_ca_port = network_topology_json['nodes'][network_topology_json['groups'][org_id]['nodes']['ca']]['address']['fabric_port']
leader_peers_ids = network_topology_json['groups'][org_id]['nodes']['leader_peers']
anchor_peers_ids = network_topology_json['groups'][org_id]['nodes']['anchor_peers']
committing_peers_ids = network_topology_json['groups'][org_id]['nodes']['committing_peers']
endorsing_peers_ids = network_topology_json['groups'][org_id]['nodes']['endorsing_peers']
peer_ids = list(set(leader_peers_ids).union(set(anchor_peers_ids).union(set(committing_peers_ids)).union(set(endorsing_peers_ids))))
for peer_id in peer_ids:
generate_peer(peer_id, network_topology_json['nodes'][peer_id], order_group_id, network_topology_json['blockchains']['fabric-1']['name'], target_host, peer_ca_port, '/root/opt')
orderers = dict()
for node in network_topology_json["nodes"]:
if "orderer" in network_topology_json["nodes"][node]["type"]:
orderers[node] = network_topology_json["nodes"][node]
configtx_filename = generate_configtx(network_topology_json["groups"], network_topology_json["nodes"], orderers, network_topology_json["blockchains"]["fabric-1"]["name"], "/root/opt")
for order_id in network_topology_json['groups'][order_group_id]['nodes']['orderer']:
generate_order(order_id, network_topology_json['nodes'][order_id], network_topology_json['blockchains']['fabric-1']['name'], network_topology_json['blockchains']['fabric-1']['channels'][0], peer_group_ids, configtx_filename)
if __name__ == '__main__':
network_json = {
"groups": {
"orderer.test.com": {
"nodes": {
"ca": "ca.orderer.test.com",
"orderer": ["orderer0.orderer.test.com", "orderer1.orderer.test.com", "orderer2.orderer.test.com"]
},
"blockchains": "fabric-1"
},
"org0.test.com": {
"nodes": {
"ca": "ca.org0.test.com",
"leader_peers": ["peer0.org0.test.com"],
"anchor_peers": ["peer0.org0.test.com"],
"committing_peers": ["peer0.org0.test.com"],
"endorsing_peers": ["peer0.org0.test.com"]
},
"blockchains": "fabric-1",
"channel": ["channel-1"]
},
"org1.test.com": {
"nodes": {
"ca": "ca.org1.test.com",
"leader_peers": ["peer0.org1.test.com"],
"anchor_peers": ["peer0.org1.test.com"],
"committing_peers": ["peer0.org1.test.com"],
"endorsing_peers": ["peer0.org1.test.com"]
},
"blockchains": "fabric-1",
"channel": ["channel-1"]
},
"org2.test.com": {
"nodes": {
"ca": "ca.org2.test.com",
"leader_peers": ["peer0.org2.test.com"],
"anchor_peers": ["peer0.org2.test.com"],
"committing_peers": ["peer0.org2.test.com"],
"endorsing_peers": ["peer0.org2.test.com"]
},
"blockchains": "fabric-1",
"channel": ["channel-1"]
}
},
"nodes": {
"ca.orderer.test.com": {
"address": {"host": "10.134.68.98", "ssh_port": "22", "fabric_port": "7054", "sk": ""},
"type": ["ca"]
},
"orderer0.orderer.test.com": {
"address": {"host": "10.134.68.98", "ssh_port": "22", "fabric_port": "7050", "sk": ""},
"type": ["orderer"]
},
"orderer1.orderer.test.com": {
"address": {"host": "10.134.50.142", "ssh_port": "22", "fabric_port": "7050", "sk": ""},
"type": ["orderer"]
},
"orderer2.orderer.test.com": {
"address": {"host": "10.134.50.70", "ssh_port": "22", "fabric_port": "7050", "sk": ""},
"type": ["orderer"]
},
"ca.org0.test.com": {
"address": {"host": "10.134.68.98", "ssh_port": "22", "fabric_port": "8054", "sk": ""},
"type": ["ca"]
},
"peer0.org0.test.com": {
"address": {"host": "10.134.68.98", "ssh_port": "22", "fabric_port": "7051", "sk": ""},
"bootstrap": ["127.0.0.1:7051"],
"type": ["leader_peer", "anchor_peer", "committing_peer", "endorsing_peers"]
},
"ca.org1.test.com": {
"address": {"host": "10.134.50.142", "ssh_port": "22", "fabric_port": "7054", "sk": ""},
"type": ["ca"]
},
"peer0.org1.test.com": {
"address": {"host": "10.134.50.142", "ssh_port": "22", "fabric_port": "7051", "sk": ""},
"bootstrap": ["127.0.0.1:7051"],
"type": ["leader_peer", "anchor_peer", "committing_peer", "endorsing_peers"]
},
"ca.org2.test.com": {
"address": {"host": "10.134.50.70", "ssh_port": "22", "fabric_port": "7054", "sk": ""},
"type": ["ca"]
},
"peer0.org2.test.com": {
"address": {"host": "10.134.50.70", "ssh_port": "22", "fabric_port": "7051", "sk": ""},
"bootstrap": ["127.0.0.1:7051"],
"type": ["leader_peer", "anchor_peer", "committing_peer", "endorsing_peers"]
},
},
"blockchains": {
"fabric-1": {
"name": "FabricDraw",
"channels": ["channel-1"]
}
}
}
with open('id_rsa', 'r') as file:
sk = file.read()
for node_id in network_json['nodes'].keys():
network_json['nodes'][node_id]['address']['sk'] = sk
parse_json(network_json)
| true | true |
f7269dbe969bf3a3bdd44ac8ab46929aa9601789 | 2,670 | py | Python | import_metadata.py | arru/plex-utilities | b5ea04c090f1fdc008ae39239c1b03b435036acb | [
"BSD-3-Clause"
] | 1 | 2020-12-26T14:45:53.000Z | 2020-12-26T14:45:53.000Z | import_metadata.py | arru/plex-utilities | b5ea04c090f1fdc008ae39239c1b03b435036acb | [
"BSD-3-Clause"
] | null | null | null | import_metadata.py | arru/plex-utilities | b5ea04c090f1fdc008ae39239c1b03b435036acb | [
"BSD-3-Clause"
] | null | null | null | #!/usr/local/bin/python3
# https://python-plexapi.readthedocs.io/en/latest/modules/media.html
# https://github.com/liamks/libpytunes/blob/master/README.md
from os import path
import ImportUtils
CONFIGURATION = ImportUtils.get_configuration()
class FakePlexTrack:
originalTitle = ""
userRating = 0.0
year = None
addedAt = ImportUtils.CURRENT_DATE
index = 0
lastViewedAt = None
title = ""
titleSort = None
viewCount = 0
def __init__(self, plex_track):
self.originalTitle = plex_track.originalTitle
self.userRating = plex_track.userRating
self.year = plex_track.year
self.addedAt = plex_track.addedAt
self.index = plex_track.index
self.lastViewedAt = plex_track.lastViewedAt
self.title = plex_track.title
self.titleSort = plex_track.titleSort
self.viewCount = plex_track.viewCount
plex = ImportUtils.PlexWrapper(CONFIGURATION)
PLEX_TRACKS = plex.server.library.section('Music').searchTracks()
itunes = ImportUtils.ItunesWrapper(CONFIGURATION)
itunes_tracks = itunes.get_tracks_dict()
del itunes
libraryMisses = 0
for plex_track_real in PLEX_TRACKS:
plex_path = plex_track_real.media[0].parts[0].file
if not plex_path in itunes_tracks:
# print("'%s' not found in itunes_tracks" % plex_path)
libraryMisses += 1
continue
itunesTrack = itunes_tracks[plex_path]
assert path.isfile(plex_path)
# plex_track = FakePlexTrack(plex_track_real)
plex_track = plex_track_real
ImportUtils.validatePlexTrack(plex_track)
if itunesTrack.rating:
plex_rating = float(itunesTrack.rating)/10.0
# (float) - Rating of this track (0.0 - 10.0) equaling (0 stars - 5 stars)
#.value
plex_track.edit(**{'userRating.value': plex_rating})
# (int) - Year this track was released.
if itunesTrack.year:
plex_track.edit(**{'year.value': itunesTrack.year})
# addedAt (datetime) - Datetime this item was added to the library.
# index (sting) - Index Number (often the track number).
if itunesTrack.track_number:
plex_track.edit(**{'index.value': itunesTrack.track_number})
# lastViewedAt (datetime) - Datetime item was last accessed.
# title (str) - Artist, Album or Track title. (Jason Mraz, We Sing, Lucky, etc.)
# originalTitle (str) - Track artist.
# titleSort (str) - Title to use when sorting (defaults to title).
# viewCount (int) - Count of times this item was accessed.
ImportUtils.validatePlexTrack(plex_track)
if libraryMisses > 0:
print ("[WARNING] %d Plex tracks not found in iTunes metadata" % libraryMisses)
| 30.689655 | 84 | 0.696629 |
from os import path
import ImportUtils
CONFIGURATION = ImportUtils.get_configuration()
class FakePlexTrack:
originalTitle = ""
userRating = 0.0
year = None
addedAt = ImportUtils.CURRENT_DATE
index = 0
lastViewedAt = None
title = ""
titleSort = None
viewCount = 0
def __init__(self, plex_track):
self.originalTitle = plex_track.originalTitle
self.userRating = plex_track.userRating
self.year = plex_track.year
self.addedAt = plex_track.addedAt
self.index = plex_track.index
self.lastViewedAt = plex_track.lastViewedAt
self.title = plex_track.title
self.titleSort = plex_track.titleSort
self.viewCount = plex_track.viewCount
plex = ImportUtils.PlexWrapper(CONFIGURATION)
PLEX_TRACKS = plex.server.library.section('Music').searchTracks()
itunes = ImportUtils.ItunesWrapper(CONFIGURATION)
itunes_tracks = itunes.get_tracks_dict()
del itunes
libraryMisses = 0
for plex_track_real in PLEX_TRACKS:
plex_path = plex_track_real.media[0].parts[0].file
if not plex_path in itunes_tracks:
libraryMisses += 1
continue
itunesTrack = itunes_tracks[plex_path]
assert path.isfile(plex_path)
plex_track = plex_track_real
ImportUtils.validatePlexTrack(plex_track)
if itunesTrack.rating:
plex_rating = float(itunesTrack.rating)/10.0
plex_track.edit(**{'userRating.value': plex_rating})
if itunesTrack.year:
plex_track.edit(**{'year.value': itunesTrack.year})
if itunesTrack.track_number:
plex_track.edit(**{'index.value': itunesTrack.track_number})
ImportUtils.validatePlexTrack(plex_track)
if libraryMisses > 0:
print ("[WARNING] %d Plex tracks not found in iTunes metadata" % libraryMisses)
| true | true |
f7269e3d1d7c351c9df854a7024d0200b141ab6d | 5,084 | py | Python | sdk/python/pulumi_azure_nextgen/authorization/latest/get_management_lock_at_resource_level.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/authorization/latest/get_management_lock_at_resource_level.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/authorization/latest/get_management_lock_at_resource_level.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetManagementLockAtResourceLevelResult',
'AwaitableGetManagementLockAtResourceLevelResult',
'get_management_lock_at_resource_level',
]
@pulumi.output_type
class GetManagementLockAtResourceLevelResult:
"""
The lock information.
"""
def __init__(__self__, level=None, name=None, notes=None, owners=None, type=None):
if level and not isinstance(level, str):
raise TypeError("Expected argument 'level' to be a str")
pulumi.set(__self__, "level", level)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if notes and not isinstance(notes, str):
raise TypeError("Expected argument 'notes' to be a str")
pulumi.set(__self__, "notes", notes)
if owners and not isinstance(owners, list):
raise TypeError("Expected argument 'owners' to be a list")
pulumi.set(__self__, "owners", owners)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def level(self) -> str:
"""
The level of the lock. Possible values are: NotSpecified, CanNotDelete, ReadOnly. CanNotDelete means authorized users are able to read and modify the resources, but not delete. ReadOnly means authorized users can only read from a resource, but they can't modify or delete it.
"""
return pulumi.get(self, "level")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the lock.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def notes(self) -> Optional[str]:
"""
Notes about the lock. Maximum of 512 characters.
"""
return pulumi.get(self, "notes")
@property
@pulumi.getter
def owners(self) -> Optional[Sequence['outputs.ManagementLockOwnerResponse']]:
"""
The owners of the lock.
"""
return pulumi.get(self, "owners")
@property
@pulumi.getter
def type(self) -> str:
"""
The resource type of the lock - Microsoft.Authorization/locks.
"""
return pulumi.get(self, "type")
class AwaitableGetManagementLockAtResourceLevelResult(GetManagementLockAtResourceLevelResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetManagementLockAtResourceLevelResult(
level=self.level,
name=self.name,
notes=self.notes,
owners=self.owners,
type=self.type)
def get_management_lock_at_resource_level(lock_name: Optional[str] = None,
parent_resource_path: Optional[str] = None,
resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
resource_provider_namespace: Optional[str] = None,
resource_type: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetManagementLockAtResourceLevelResult:
"""
Use this data source to access information about an existing resource.
:param str lock_name: The name of lock.
:param str parent_resource_path: An extra path parameter needed in some services, like SQL Databases.
:param str resource_group_name: The name of the resource group.
:param str resource_name: The name of the resource.
:param str resource_provider_namespace: The namespace of the resource provider.
:param str resource_type: The type of the resource.
"""
__args__ = dict()
__args__['lockName'] = lock_name
__args__['parentResourcePath'] = parent_resource_path
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
__args__['resourceProviderNamespace'] = resource_provider_namespace
__args__['resourceType'] = resource_type
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:authorization/latest:getManagementLockAtResourceLevel', __args__, opts=opts, typ=GetManagementLockAtResourceLevelResult).value
return AwaitableGetManagementLockAtResourceLevelResult(
level=__ret__.level,
name=__ret__.name,
notes=__ret__.notes,
owners=__ret__.owners,
type=__ret__.type)
| 39.107692 | 283 | 0.648308 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetManagementLockAtResourceLevelResult',
'AwaitableGetManagementLockAtResourceLevelResult',
'get_management_lock_at_resource_level',
]
@pulumi.output_type
class GetManagementLockAtResourceLevelResult:
def __init__(__self__, level=None, name=None, notes=None, owners=None, type=None):
if level and not isinstance(level, str):
raise TypeError("Expected argument 'level' to be a str")
pulumi.set(__self__, "level", level)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if notes and not isinstance(notes, str):
raise TypeError("Expected argument 'notes' to be a str")
pulumi.set(__self__, "notes", notes)
if owners and not isinstance(owners, list):
raise TypeError("Expected argument 'owners' to be a list")
pulumi.set(__self__, "owners", owners)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def level(self) -> str:
return pulumi.get(self, "level")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def notes(self) -> Optional[str]:
return pulumi.get(self, "notes")
@property
@pulumi.getter
def owners(self) -> Optional[Sequence['outputs.ManagementLockOwnerResponse']]:
return pulumi.get(self, "owners")
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
class AwaitableGetManagementLockAtResourceLevelResult(GetManagementLockAtResourceLevelResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetManagementLockAtResourceLevelResult(
level=self.level,
name=self.name,
notes=self.notes,
owners=self.owners,
type=self.type)
def get_management_lock_at_resource_level(lock_name: Optional[str] = None,
parent_resource_path: Optional[str] = None,
resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
resource_provider_namespace: Optional[str] = None,
resource_type: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetManagementLockAtResourceLevelResult:
__args__ = dict()
__args__['lockName'] = lock_name
__args__['parentResourcePath'] = parent_resource_path
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
__args__['resourceProviderNamespace'] = resource_provider_namespace
__args__['resourceType'] = resource_type
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:authorization/latest:getManagementLockAtResourceLevel', __args__, opts=opts, typ=GetManagementLockAtResourceLevelResult).value
return AwaitableGetManagementLockAtResourceLevelResult(
level=__ret__.level,
name=__ret__.name,
notes=__ret__.notes,
owners=__ret__.owners,
type=__ret__.type)
| true | true |
f7269ea2093e6b0c5b7f64bb94961bc815827e61 | 6,713 | py | Python | misc/webdriver-w3c-tests/client/driver.py | zhuyongyong/crosswalk-test-suite | 24f3f8cfa663a365b0a22685d5bd096a637f72db | [
"BSD-3-Clause"
] | null | null | null | misc/webdriver-w3c-tests/client/driver.py | zhuyongyong/crosswalk-test-suite | 24f3f8cfa663a365b0a22685d5bd096a637f72db | [
"BSD-3-Clause"
] | null | null | null | misc/webdriver-w3c-tests/client/driver.py | zhuyongyong/crosswalk-test-suite | 24f3f8cfa663a365b0a22685d5bd096a637f72db | [
"BSD-3-Clause"
] | null | null | null | """Entry point for WebDriver."""
import alert
import command
import searchcontext
import webelement
import base64
class WebDriver(searchcontext.SearchContext):
"""Controls a web browser."""
def __init__(self, host, required, desired, mode='strict'):
args = {'desiredCapabilities': desired}
if required:
args['requiredCapabilities'] = required
self._executor = command.CommandExecutor(host, mode)
resp = self._executor.execute(
'POST', '/session', None, 'newSession', args)
self.capabilities = resp['value']
self._session_id = resp['sessionId']
self.mode = mode
def execute(self, method, path, name, parameters=None):
"""Execute a command against the current WebDriver session."""
data = self._executor.execute(
method,
'/session/' + self._session_id + path,
self._session_id,
name,
parameters,
self._object_hook)
if data:
return data['value']
def get(self, url):
"""Navigate to url."""
self.execute('POST', '/url', 'get', {'url': url})
def get_current_url(self):
"""Get the current value of the location bar."""
return self.execute('GET', '/url', 'getCurrentUrl')
def go_back(self):
"""Hit the browser back button."""
self.execute('POST', '/back', 'goBack')
def go_forward(self):
"""Hit the browser forward button."""
self.execute('POST', '/forward', 'goForward')
def refresh(self):
"""Refresh the current page in the browser."""
self.execute('POST', '/refresh', 'refresh')
def quit(self):
"""Shutdown the current WebDriver session."""
self.execute('DELETE', '', 'quit')
def get_window_handle(self):
"""Get the handle for the browser window/tab currently accepting
commands.
"""
return self.execute('GET', '/window_handle', 'getWindowHandle')
def get_window_handles(self):
"""Get handles for all open windows/tabs."""
return self.execute('GET', '/window_handles', 'getWindowHandles')
def close(self):
"""Close the current tab or window.
If this is the last tab or window, then this is the same as
calling quit.
"""
self.execute('DELETE', '/window', 'close')
def maximize_window(self):
"""Maximize the current window."""
return self._window_command('POST', '/maximize', 'maximize')
def get_window_size(self):
"""Get the dimensions of the current window."""
result = self._window_command('GET', '/size', 'getWindowSize')
return {'height': result[height], 'width': result[width]}
def set_window_size(self, height, width):
"""Set the size of the current window."""
self._window_command(
'POST',
'/size',
'setWindowSize',
{'height': height, 'width': width})
def fullscreen_window(self):
"""Make the current window fullscreen."""
pass # implement when end point is defined
def switch_to_window(self, name):
"""Switch to the window with the given handle or name."""
self.execute('POST', '/window', 'switchToWindow', {'name': name})
def switch_to_frame(self, id):
"""Switch to a frame.
id can be either a WebElement or an integer.
"""
self.execute('POST', '/frame', 'switchToFrame', {'id': id})
def switch_to_parent_frame(self):
"""Move to the browsing context containing the currently selected frame.
If in the top-level browsing context, this is a no-op.
"""
self.execute('POST', '/frame/parent', 'switchToParentFrame')
def switch_to_alert(self):
"""Return an Alert object to interact with a modal dialog."""
alert_ = alert.Alert(self)
alert_.get_text()
return alert_
def execute_script(self, script, args=[]):
"""Execute a Javascript script in the current browsing context."""
return self.execute(
'POST',
'/execute',
'executeScript',
{'script': script, 'args': args})
def execute_script_async(self, script, args=[]):
"""Execute a Javascript script in the current browsing context."""
return self.execute(
'POST',
'/execute_async',
'executeScriptAsync',
{'script': script, 'args': args})
def take_screenshot(self, element=None):
"""Take a screenshot.
If element is not provided, the screenshot should be of the
current page, otherwise the screenshot should be of the given element.
"""
if self.mode == 'strict':
pass # implement when endpoint is defined
elif self.mode == 'compatibility':
if element:
pass # element screenshots are unsupported in compatibility
else:
return base64.standard_b64decode(
self.execute('GET', '/screenshot', 'takeScreenshot'))
def add_cookie(self, cookie):
"""Add a cookie to the browser."""
self.execute('POST', '/cookie', 'addCookie', {'cookie': cookie})
def get_cookie(self, name=None):
"""Get the cookies accessible from the current page."""
if self.mode == 'compatibility':
cookies = self.execute('GET', '/cookie', 'getCookie')
if name:
cookies_ = []
for cookie in cookies:
if cookie['name'] == name:
cookies_.append(cookie)
return cookies_
return cookies
elif self.mode == 'strict':
pass # implement when wire protocol for this has been defined
def set_implicit_timeout(self, ms):
self._set_timeout('implicit', ms)
def set_page_load_timeout(self, ms):
self._set_timeout('page load', ms)
def set_script_timeout(self, ms):
self._set_timeout('script', ms)
def _set_timeout(self, type, ms):
params = {'type': type, 'ms': ms}
self.execute('POST', '/timeouts', 'timeouts', params)
def _window_command(self, method, path, name, parameters=None):
if self.mode == 'compatibility':
return self.execute(
method, '/window/current' + path, name, parameters)
elif self.mode == 'strict':
pass # implement this when end-points are defined in doc
def _object_hook(self, obj):
if 'ELEMENT' in obj:
return webelement.WebElement(self, obj['ELEMENT'])
return obj
| 33.565 | 80 | 0.58558 |
import alert
import command
import searchcontext
import webelement
import base64
class WebDriver(searchcontext.SearchContext):
def __init__(self, host, required, desired, mode='strict'):
args = {'desiredCapabilities': desired}
if required:
args['requiredCapabilities'] = required
self._executor = command.CommandExecutor(host, mode)
resp = self._executor.execute(
'POST', '/session', None, 'newSession', args)
self.capabilities = resp['value']
self._session_id = resp['sessionId']
self.mode = mode
def execute(self, method, path, name, parameters=None):
data = self._executor.execute(
method,
'/session/' + self._session_id + path,
self._session_id,
name,
parameters,
self._object_hook)
if data:
return data['value']
def get(self, url):
self.execute('POST', '/url', 'get', {'url': url})
def get_current_url(self):
return self.execute('GET', '/url', 'getCurrentUrl')
def go_back(self):
self.execute('POST', '/back', 'goBack')
def go_forward(self):
self.execute('POST', '/forward', 'goForward')
def refresh(self):
self.execute('POST', '/refresh', 'refresh')
def quit(self):
self.execute('DELETE', '', 'quit')
def get_window_handle(self):
return self.execute('GET', '/window_handle', 'getWindowHandle')
def get_window_handles(self):
return self.execute('GET', '/window_handles', 'getWindowHandles')
def close(self):
self.execute('DELETE', '/window', 'close')
def maximize_window(self):
return self._window_command('POST', '/maximize', 'maximize')
def get_window_size(self):
result = self._window_command('GET', '/size', 'getWindowSize')
return {'height': result[height], 'width': result[width]}
def set_window_size(self, height, width):
self._window_command(
'POST',
'/size',
'setWindowSize',
{'height': height, 'width': width})
def fullscreen_window(self):
pass
def switch_to_window(self, name):
self.execute('POST', '/window', 'switchToWindow', {'name': name})
def switch_to_frame(self, id):
self.execute('POST', '/frame', 'switchToFrame', {'id': id})
def switch_to_parent_frame(self):
self.execute('POST', '/frame/parent', 'switchToParentFrame')
def switch_to_alert(self):
alert_ = alert.Alert(self)
alert_.get_text()
return alert_
def execute_script(self, script, args=[]):
return self.execute(
'POST',
'/execute',
'executeScript',
{'script': script, 'args': args})
def execute_script_async(self, script, args=[]):
return self.execute(
'POST',
'/execute_async',
'executeScriptAsync',
{'script': script, 'args': args})
def take_screenshot(self, element=None):
if self.mode == 'strict':
pass
elif self.mode == 'compatibility':
if element:
pass
else:
return base64.standard_b64decode(
self.execute('GET', '/screenshot', 'takeScreenshot'))
def add_cookie(self, cookie):
self.execute('POST', '/cookie', 'addCookie', {'cookie': cookie})
def get_cookie(self, name=None):
if self.mode == 'compatibility':
cookies = self.execute('GET', '/cookie', 'getCookie')
if name:
cookies_ = []
for cookie in cookies:
if cookie['name'] == name:
cookies_.append(cookie)
return cookies_
return cookies
elif self.mode == 'strict':
pass
def set_implicit_timeout(self, ms):
self._set_timeout('implicit', ms)
def set_page_load_timeout(self, ms):
self._set_timeout('page load', ms)
def set_script_timeout(self, ms):
self._set_timeout('script', ms)
def _set_timeout(self, type, ms):
params = {'type': type, 'ms': ms}
self.execute('POST', '/timeouts', 'timeouts', params)
def _window_command(self, method, path, name, parameters=None):
if self.mode == 'compatibility':
return self.execute(
method, '/window/current' + path, name, parameters)
elif self.mode == 'strict':
pass
def _object_hook(self, obj):
if 'ELEMENT' in obj:
return webelement.WebElement(self, obj['ELEMENT'])
return obj
| true | true |
f7269ecfb8cc07dc1f1a6af43bc49083c18df9d2 | 938 | py | Python | mobilecoind/clients/python/lib/setup.py | mccobr/mobilecoin | cd7753a0aed838097b456d230151fb34e8cff034 | [
"Apache-2.0"
] | 140 | 2020-04-15T17:51:12.000Z | 2020-10-02T19:51:57.000Z | mobilecoind/clients/python/lib/setup.py | mccobr/mobilecoin | cd7753a0aed838097b456d230151fb34e8cff034 | [
"Apache-2.0"
] | 292 | 2020-10-22T00:34:35.000Z | 2022-03-29T09:29:14.000Z | mobilecoind/clients/python/lib/setup.py | mccobr/mobilecoin | cd7753a0aed838097b456d230151fb34e8cff034 | [
"Apache-2.0"
] | 32 | 2020-04-15T18:17:07.000Z | 2020-10-19T23:25:42.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="mobilecoin",
version="0.3.3",
author="MobileCoin",
author_email="support@mobilecoin.com",
description="Python bindings for the MobileCoin daemon API.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/mobilecoinfoundation/mobilecoin/tree/master/mobilecoind/clients/python/lib",
package_data={'mobilecoin': ['py.typed']},
packages=['mobilecoin'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
test_suite='nose.collector',
tests_require=['nose'],
install_requires=['grpcio', 'grpcio-tools'],
)
| 31.266667 | 104 | 0.670576 |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="mobilecoin",
version="0.3.3",
author="MobileCoin",
author_email="support@mobilecoin.com",
description="Python bindings for the MobileCoin daemon API.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/mobilecoinfoundation/mobilecoin/tree/master/mobilecoind/clients/python/lib",
package_data={'mobilecoin': ['py.typed']},
packages=['mobilecoin'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
test_suite='nose.collector',
tests_require=['nose'],
install_requires=['grpcio', 'grpcio-tools'],
)
| true | true |
f7269f404c1a317ded575623966d09871ec4b217 | 9,554 | py | Python | samsungtvws/remote.py | chemelli74/samsung-tv-ws-api | bffbdd1796c95d5147117a5fc6583a803c310cd4 | [
"MIT"
] | null | null | null | samsungtvws/remote.py | chemelli74/samsung-tv-ws-api | bffbdd1796c95d5147117a5fc6583a803c310cd4 | [
"MIT"
] | null | null | null | samsungtvws/remote.py | chemelli74/samsung-tv-ws-api | bffbdd1796c95d5147117a5fc6583a803c310cd4 | [
"MIT"
] | null | null | null | """
SamsungTVWS - Samsung Smart TV WS API wrapper
Copyright (C) 2019 Xchwarze
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1335 USA
"""
import base64
import json
import logging
import time
import ssl
import websocket
import requests
from . import exceptions
from . import shortcuts
_LOGGING = logging.getLogger(__name__)
class SamsungTVWS:
_URL_FORMAT = 'ws://{host}:{port}/api/v2/channels/samsung.remote.control?name={name}'
_SSL_URL_FORMAT = 'wss://{host}:{port}/api/v2/channels/samsung.remote.control?name={name}&token={token}'
_REST_URL_FORMAT = '{protocol}://{host}:{port}/api/v2/{route}'
def __init__(self, host, token=None, token_file=None, port=8001, timeout=None, key_press_delay=1,
name='SamsungTvRemote'):
self.host = host
self.token = token
self.token_file = token_file
self.port = port
self.timeout = None if timeout == 0 else timeout
self.key_press_delay = key_press_delay
self.name = name
self.connection = None
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def _serialize_string(self, string):
if isinstance(string, str):
string = str.encode(string)
return base64.b64encode(string).decode('utf-8')
def _is_ssl_connection(self):
return self.port == 8002
def _format_websocket_url(self):
params = {
'host': self.host,
'port': self.port,
'name': self._serialize_string(self.name),
'token': self._get_token(),
}
if self._is_ssl_connection():
return self._SSL_URL_FORMAT.format(**params)
else:
return self._URL_FORMAT.format(**params)
def _format_rest_url(self, route=''):
params = {
'protocol': 'https' if self._is_ssl_connection() else 'http',
'host': self.host,
'port': self.port,
'route': route,
}
return self._REST_URL_FORMAT.format(**params)
def _get_token(self):
if self.token_file is not None:
try:
with open(self.token_file, 'r') as token_file:
return token_file.readline()
except:
return ''
else:
return self.token
def _set_token(self, token):
_LOGGING.info('New token %s', token)
if self.token_file is not None:
_LOGGING.debug('Save token to file: %s', token)
with open(self.token_file, 'w') as token_file:
token_file.write(token)
else:
self.token = token
def _ws_send(self, command, key_press_delay=None):
if self.connection is None:
self.open()
payload = json.dumps(command)
self.connection.send(payload)
delay = self.key_press_delay if key_press_delay is None else key_press_delay
time.sleep(delay)
def _rest_request(self, target, method='GET'):
url = self._format_rest_url(target)
try:
if method == 'POST':
return requests.post(url, timeout=self.timeout, verify=False)
elif method == 'PUT':
return requests.put(url, timeout=self.timeout, verify=False)
elif method == 'DELETE':
return requests.delete(url, timeout=self.timeout, verify=False)
else:
return requests.get(url, timeout=self.timeout, verify=False)
except requests.ConnectionError:
raise exceptions.HttpApiError('TV unreachable or feature not supported on this model.')
def _process_api_response(self, response):
try:
return json.loads(response)
except json.JSONDecodeError:
_LOGGING.debug('Failed to parse response from TV. response text: %s', response)
raise exceptions.ResponseError('Failed to parse response from TV. Maybe feature not supported on this model')
def open(self):
url = self._format_websocket_url()
sslopt = {'cert_reqs': ssl.CERT_NONE} if self._is_ssl_connection() else {}
_LOGGING.debug('WS url %s', url)
# Only for debug use!
# websocket.enableTrace(True)
self.connection = websocket.create_connection(
url,
self.timeout,
sslopt=sslopt,
# Use 'connection' for fix websocket-client 0.57 bug
# header={'Connection': 'Upgrade'}
connection='Connection: Upgrade'
)
response = self._process_api_response(self.connection.recv())
if response.get('data') and response.get('data').get('token'):
token = response.get('data').get('token')
_LOGGING.debug('Got token %s', token)
self._set_token(token)
if response['event'] != 'ms.channel.connect':
self.close()
raise exceptions.ConnectionFailure(response)
def close(self):
if self.connection:
self.connection.close()
self.connection = None
_LOGGING.debug('Connection closed.')
def send_key(self, key, times=1, key_press_delay=None, cmd='Click'):
for _ in range(times):
_LOGGING.debug('Sending key %s', key)
self._ws_send(
{
'method': 'ms.remote.control',
'params': {
'Cmd': cmd,
'DataOfCmd': key,
'Option': 'false',
'TypeOfRemote': 'SendRemoteKey'
}
},
key_press_delay
)
def hold_key(self, key, seconds):
self.send_key(key, cmd='Press')
time.sleep(seconds)
self.send_key(key, cmd='Release')
def move_cursor(self, x, y, duration=0):
self._ws_send(
{
'method': 'ms.remote.control',
'params': {
'Cmd': 'Move',
'Position': {
'x': x,
'y': y,
'Time': str(duration)
},
'TypeOfRemote': 'ProcessMouseDevice'
}
},
key_press_delay=0
)
def run_app(self, app_id, app_type='DEEP_LINK', meta_tag=''):
_LOGGING.debug('Sending run app app_id: %s app_type: %s meta_tag: %s', app_id, app_type, meta_tag)
self._ws_send({
'method': 'ms.channel.emit',
'params': {
'event': 'ed.apps.launch',
'to': 'host',
'data': {
# action_type: NATIVE_LAUNCH / DEEP_LINK
# app_type == 2 ? 'DEEP_LINK' : 'NATIVE_LAUNCH',
'action_type': app_type,
'appId': app_id,
'metaTag': meta_tag
}
}
})
def open_browser(self, url):
_LOGGING.debug('Opening url in browser %s', url)
self.run_app(
'org.tizen.browser',
'NATIVE_LAUNCH',
url
)
def app_list(self):
_LOGGING.debug('Get app list')
self._ws_send({
'method': 'ms.channel.emit',
'params': {
'event': 'ed.installedApp.get',
'to': 'host'
}
})
response = self._process_api_response(self.connection.recv())
if response.get('data') and response.get('data').get('data'):
return response.get('data').get('data')
else:
return response
def rest_device_info(self):
_LOGGING.debug('Get device info via rest api')
response = self._rest_request('')
return self._process_api_response(response.text)
def rest_app_status(self, app_id):
_LOGGING.debug('Get app %s status via rest api', app_id)
response = self._rest_request('applications/' + app_id)
return self._process_api_response(response.text)
def rest_app_run(self, app_id):
_LOGGING.debug('Run app %s via rest api', app_id)
response = self._rest_request('applications/' + app_id, 'POST')
return self._process_api_response(response.text)
def rest_app_close(self, app_id):
_LOGGING.debug('Close app %s via rest api', app_id)
response = self._rest_request('applications/' + app_id, 'DELETE')
return self._process_api_response(response.text)
def rest_app_install(self, app_id):
_LOGGING.debug('Install app %s via rest api', app_id)
response = self._rest_request('applications/' + app_id, 'PUT')
return self._process_api_response(response.text)
def shortcuts(self):
return shortcuts.SamsungTVShortcuts(self)
| 34.366906 | 121 | 0.57578 | import base64
import json
import logging
import time
import ssl
import websocket
import requests
from . import exceptions
from . import shortcuts
_LOGGING = logging.getLogger(__name__)
class SamsungTVWS:
_URL_FORMAT = 'ws://{host}:{port}/api/v2/channels/samsung.remote.control?name={name}'
_SSL_URL_FORMAT = 'wss://{host}:{port}/api/v2/channels/samsung.remote.control?name={name}&token={token}'
_REST_URL_FORMAT = '{protocol}://{host}:{port}/api/v2/{route}'
def __init__(self, host, token=None, token_file=None, port=8001, timeout=None, key_press_delay=1,
name='SamsungTvRemote'):
self.host = host
self.token = token
self.token_file = token_file
self.port = port
self.timeout = None if timeout == 0 else timeout
self.key_press_delay = key_press_delay
self.name = name
self.connection = None
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def _serialize_string(self, string):
if isinstance(string, str):
string = str.encode(string)
return base64.b64encode(string).decode('utf-8')
def _is_ssl_connection(self):
return self.port == 8002
def _format_websocket_url(self):
params = {
'host': self.host,
'port': self.port,
'name': self._serialize_string(self.name),
'token': self._get_token(),
}
if self._is_ssl_connection():
return self._SSL_URL_FORMAT.format(**params)
else:
return self._URL_FORMAT.format(**params)
def _format_rest_url(self, route=''):
params = {
'protocol': 'https' if self._is_ssl_connection() else 'http',
'host': self.host,
'port': self.port,
'route': route,
}
return self._REST_URL_FORMAT.format(**params)
def _get_token(self):
if self.token_file is not None:
try:
with open(self.token_file, 'r') as token_file:
return token_file.readline()
except:
return ''
else:
return self.token
def _set_token(self, token):
_LOGGING.info('New token %s', token)
if self.token_file is not None:
_LOGGING.debug('Save token to file: %s', token)
with open(self.token_file, 'w') as token_file:
token_file.write(token)
else:
self.token = token
def _ws_send(self, command, key_press_delay=None):
if self.connection is None:
self.open()
payload = json.dumps(command)
self.connection.send(payload)
delay = self.key_press_delay if key_press_delay is None else key_press_delay
time.sleep(delay)
def _rest_request(self, target, method='GET'):
url = self._format_rest_url(target)
try:
if method == 'POST':
return requests.post(url, timeout=self.timeout, verify=False)
elif method == 'PUT':
return requests.put(url, timeout=self.timeout, verify=False)
elif method == 'DELETE':
return requests.delete(url, timeout=self.timeout, verify=False)
else:
return requests.get(url, timeout=self.timeout, verify=False)
except requests.ConnectionError:
raise exceptions.HttpApiError('TV unreachable or feature not supported on this model.')
def _process_api_response(self, response):
try:
return json.loads(response)
except json.JSONDecodeError:
_LOGGING.debug('Failed to parse response from TV. response text: %s', response)
raise exceptions.ResponseError('Failed to parse response from TV. Maybe feature not supported on this model')
def open(self):
url = self._format_websocket_url()
sslopt = {'cert_reqs': ssl.CERT_NONE} if self._is_ssl_connection() else {}
_LOGGING.debug('WS url %s', url)
self.connection = websocket.create_connection(
url,
self.timeout,
sslopt=sslopt,
connection='Connection: Upgrade'
)
response = self._process_api_response(self.connection.recv())
if response.get('data') and response.get('data').get('token'):
token = response.get('data').get('token')
_LOGGING.debug('Got token %s', token)
self._set_token(token)
if response['event'] != 'ms.channel.connect':
self.close()
raise exceptions.ConnectionFailure(response)
def close(self):
if self.connection:
self.connection.close()
self.connection = None
_LOGGING.debug('Connection closed.')
def send_key(self, key, times=1, key_press_delay=None, cmd='Click'):
for _ in range(times):
_LOGGING.debug('Sending key %s', key)
self._ws_send(
{
'method': 'ms.remote.control',
'params': {
'Cmd': cmd,
'DataOfCmd': key,
'Option': 'false',
'TypeOfRemote': 'SendRemoteKey'
}
},
key_press_delay
)
def hold_key(self, key, seconds):
self.send_key(key, cmd='Press')
time.sleep(seconds)
self.send_key(key, cmd='Release')
def move_cursor(self, x, y, duration=0):
self._ws_send(
{
'method': 'ms.remote.control',
'params': {
'Cmd': 'Move',
'Position': {
'x': x,
'y': y,
'Time': str(duration)
},
'TypeOfRemote': 'ProcessMouseDevice'
}
},
key_press_delay=0
)
def run_app(self, app_id, app_type='DEEP_LINK', meta_tag=''):
_LOGGING.debug('Sending run app app_id: %s app_type: %s meta_tag: %s', app_id, app_type, meta_tag)
self._ws_send({
'method': 'ms.channel.emit',
'params': {
'event': 'ed.apps.launch',
'to': 'host',
'data': {
'action_type': app_type,
'appId': app_id,
'metaTag': meta_tag
}
}
})
def open_browser(self, url):
_LOGGING.debug('Opening url in browser %s', url)
self.run_app(
'org.tizen.browser',
'NATIVE_LAUNCH',
url
)
def app_list(self):
_LOGGING.debug('Get app list')
self._ws_send({
'method': 'ms.channel.emit',
'params': {
'event': 'ed.installedApp.get',
'to': 'host'
}
})
response = self._process_api_response(self.connection.recv())
if response.get('data') and response.get('data').get('data'):
return response.get('data').get('data')
else:
return response
def rest_device_info(self):
_LOGGING.debug('Get device info via rest api')
response = self._rest_request('')
return self._process_api_response(response.text)
def rest_app_status(self, app_id):
_LOGGING.debug('Get app %s status via rest api', app_id)
response = self._rest_request('applications/' + app_id)
return self._process_api_response(response.text)
def rest_app_run(self, app_id):
_LOGGING.debug('Run app %s via rest api', app_id)
response = self._rest_request('applications/' + app_id, 'POST')
return self._process_api_response(response.text)
def rest_app_close(self, app_id):
_LOGGING.debug('Close app %s via rest api', app_id)
response = self._rest_request('applications/' + app_id, 'DELETE')
return self._process_api_response(response.text)
def rest_app_install(self, app_id):
_LOGGING.debug('Install app %s via rest api', app_id)
response = self._rest_request('applications/' + app_id, 'PUT')
return self._process_api_response(response.text)
def shortcuts(self):
return shortcuts.SamsungTVShortcuts(self)
| true | true |
f726a064dd1bdc39f3586c546ec4b50d380b3919 | 2,492 | py | Python | tests/components/spc/test_init.py | tuxbox/home-assistant | df74272ba6311d527fd07198929c80a45d9fed15 | [
"Apache-2.0"
] | 1 | 2019-12-26T15:06:02.000Z | 2019-12-26T15:06:02.000Z | tests/components/spc/test_init.py | tuxbox/home-assistant | df74272ba6311d527fd07198929c80a45d9fed15 | [
"Apache-2.0"
] | null | null | null | tests/components/spc/test_init.py | tuxbox/home-assistant | df74272ba6311d527fd07198929c80a45d9fed15 | [
"Apache-2.0"
] | null | null | null | """Tests for Vanderbilt SPC component."""
from unittest.mock import patch, PropertyMock, Mock
from homeassistant.bootstrap import async_setup_component
from homeassistant.components.spc import DATA_API
from homeassistant.const import STATE_ALARM_ARMED_AWAY, STATE_ALARM_DISARMED
from tests.common import mock_coro
async def test_valid_device_config(hass, monkeypatch):
"""Test valid device config."""
config = {"spc": {"api_url": "http://localhost/", "ws_url": "ws://localhost/"}}
with patch(
"homeassistant.components.spc.SpcWebGateway.async_load_parameters",
return_value=mock_coro(True),
):
assert await async_setup_component(hass, "spc", config) is True
async def test_invalid_device_config(hass, monkeypatch):
"""Test valid device config."""
config = {"spc": {"api_url": "http://localhost/"}}
with patch(
"homeassistant.components.spc.SpcWebGateway.async_load_parameters",
return_value=mock_coro(True),
):
assert await async_setup_component(hass, "spc", config) is False
async def test_update_alarm_device(hass):
"""Test that alarm panel state changes on incoming websocket data."""
import pyspcwebgw
from pyspcwebgw.const import AreaMode
config = {"spc": {"api_url": "http://localhost/", "ws_url": "ws://localhost/"}}
area_mock = Mock(
spec=pyspcwebgw.area.Area,
id="1",
mode=AreaMode.FULL_SET,
last_changed_by="Sven",
)
area_mock.name = "House"
area_mock.verified_alarm = False
with patch(
"homeassistant.components.spc.SpcWebGateway.areas", new_callable=PropertyMock
) as mock_areas:
mock_areas.return_value = {"1": area_mock}
with patch(
"homeassistant.components.spc.SpcWebGateway.async_load_parameters",
return_value=mock_coro(True),
):
assert await async_setup_component(hass, "spc", config) is True
await hass.async_block_till_done()
entity_id = "alarm_control_panel.house"
assert hass.states.get(entity_id).state == STATE_ALARM_ARMED_AWAY
assert hass.states.get(entity_id).attributes["changed_by"] == "Sven"
area_mock.mode = AreaMode.UNSET
area_mock.last_changed_by = "Anna"
await hass.data[DATA_API]._async_callback(area_mock)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_ALARM_DISARMED
assert hass.states.get(entity_id).attributes["changed_by"] == "Anna"
| 34.136986 | 85 | 0.702648 | from unittest.mock import patch, PropertyMock, Mock
from homeassistant.bootstrap import async_setup_component
from homeassistant.components.spc import DATA_API
from homeassistant.const import STATE_ALARM_ARMED_AWAY, STATE_ALARM_DISARMED
from tests.common import mock_coro
async def test_valid_device_config(hass, monkeypatch):
config = {"spc": {"api_url": "http://localhost/", "ws_url": "ws://localhost/"}}
with patch(
"homeassistant.components.spc.SpcWebGateway.async_load_parameters",
return_value=mock_coro(True),
):
assert await async_setup_component(hass, "spc", config) is True
async def test_invalid_device_config(hass, monkeypatch):
config = {"spc": {"api_url": "http://localhost/"}}
with patch(
"homeassistant.components.spc.SpcWebGateway.async_load_parameters",
return_value=mock_coro(True),
):
assert await async_setup_component(hass, "spc", config) is False
async def test_update_alarm_device(hass):
import pyspcwebgw
from pyspcwebgw.const import AreaMode
config = {"spc": {"api_url": "http://localhost/", "ws_url": "ws://localhost/"}}
area_mock = Mock(
spec=pyspcwebgw.area.Area,
id="1",
mode=AreaMode.FULL_SET,
last_changed_by="Sven",
)
area_mock.name = "House"
area_mock.verified_alarm = False
with patch(
"homeassistant.components.spc.SpcWebGateway.areas", new_callable=PropertyMock
) as mock_areas:
mock_areas.return_value = {"1": area_mock}
with patch(
"homeassistant.components.spc.SpcWebGateway.async_load_parameters",
return_value=mock_coro(True),
):
assert await async_setup_component(hass, "spc", config) is True
await hass.async_block_till_done()
entity_id = "alarm_control_panel.house"
assert hass.states.get(entity_id).state == STATE_ALARM_ARMED_AWAY
assert hass.states.get(entity_id).attributes["changed_by"] == "Sven"
area_mock.mode = AreaMode.UNSET
area_mock.last_changed_by = "Anna"
await hass.data[DATA_API]._async_callback(area_mock)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_ALARM_DISARMED
assert hass.states.get(entity_id).attributes["changed_by"] == "Anna"
| true | true |
f726a0e6a992cdb370f4db8314ec60bd087084f1 | 13,279 | py | Python | game-simulation/ctf01d-assistent.py | freehackquest/fhq-jury-ad | c839eefe98e5a6ccec6b182417f13929ebfe733a | [
"MIT"
] | 12 | 2018-09-28T10:57:10.000Z | 2020-03-30T15:53:05.000Z | game-simulation/ctf01d-assistent.py | freehackquest/fhq-jury-ad | c839eefe98e5a6ccec6b182417f13929ebfe733a | [
"MIT"
] | 32 | 2018-09-28T14:10:51.000Z | 2020-08-31T08:54:21.000Z | game-simulation/ctf01d-assistent.py | freehackquest/fhq-jury-ad | c839eefe98e5a6ccec6b182417f13929ebfe733a | [
"MIT"
] | 3 | 2018-09-28T14:21:41.000Z | 2019-11-02T14:14:34.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import docker
import sys
import os
import time
import datetime
# https://docker-py.readthedocs.io/en/latest/
client = docker.from_env()
progname = sys.argv[0]
# TODO read from data/config.yml
teams = [
{
"name": "team1",
"ip_prefix": "10.10.11"
}, {
"name": "team2",
"ip_prefix": "10.10.12"
}, {
"name": "team3",
"ip_prefix": "10.10.13"
}
]
jury_net_prefix = "10.10.100"
ntwrk_name_prefix = "ctf01d_net_"
services_list = ["service1_py", "service2_go", "service3_php", "service4_cpp"]
img_name_prefix = "ctf01d-game-simulation/"
watchdog_containers_list = []
def printHelp():
print("\n"
+ progname + " - assistent for manupulate game-simulation\n"
"commands:\n"
"\t clean - remove all containers, images, networks and etc...\n"
"\t start - prepare networks, images, containers and etc \n"
"\n"
)
if len(sys.argv) < 2:
printHelp()
exit(1)
command = sys.argv[1]
def stopAndRemoveAllContainers():
cntrs = client.containers.list(all=True)
# print(cntrs)
for c in cntrs:
if c.name.startswith("ctf01d_"):
print("Stopping container " + c.name)
c.stop()
print("Removing container " + c.name)
c.remove()
def createNetwork(network_list, network_name, ip_prefix):
for n in network_list:
if n.name == network_name:
print("Network already exists " + network_name)
return
print("Creating network " + network_name)
ipam_pool = docker.types.IPAMPool(
subnet=ip_prefix + '.0/24',
iprange=ip_prefix + '.0/24',
gateway=ip_prefix + '.1',
aux_addresses={
'vuln_server': ip_prefix + '.2'
}
)
ipam_config = docker.types.IPAMConfig(pool_configs=[ipam_pool])
ret = client.networks.create(
network_name,
driver="bridge",
check_duplicate=True,
ipam=ipam_config
)
"""
simular command:
$ docker network create \
--driver bridge \
--gateway 10.10.11.1 \
--subnet=10.10.11.0/24 \
--ip-range=10.10.11.3/24 \
"ctf01d_net_team1"
"""
print(" -> Done." + str(ret))
def createTeamAndJuryNetworks():
print("\n ===> Create networks")
ntwrks = client.networks.list()
createNetwork(ntwrks, ntwrk_name_prefix + "jury", jury_net_prefix)
for t in teams:
# print(t)
network_name_team = ntwrk_name_prefix + t['name']
createNetwork(ntwrks, network_name_team, t['ip_prefix'])
def buildImage(images_list, image_tag, pathWithDockerfile):
if not os.path.isdir(pathWithDockerfile):
print(pathWithDockerfile + " - not exists")
exit(-1)
return
path_dockerfile = pathWithDockerfile + "/Dockerfile"
if not os.path.isfile(path_dockerfile):
print(path_dockerfile + " - not exists")
exit(-1)
return
for i in images_list:
tag = ""
if len(i.tags) > 0:
tag = i.tags[0]
if tag == image_tag:
print(image_tag + " - Skip. Image already exists.")
return
# TODO redesing to command line build
"""
simular command:
docker build -t ctf01d-game-simulation/service2_go:latest ./vulnbox/service2_go/
"""
print("Building image " + image_tag + "...")
ret = client.images.build(
path=pathWithDockerfile,
tag=image_tag,
forcerm=True # Always remove intermediate containers, even after unsuccessful builds
)
print(" -> Done." + str(ret))
def buildJuryAndServiceImages():
print("\n ===> Docker images for Services")
imgs = client.images.list()
# basic image with jury
buildImage(imgs, "sea5kg/ctf01d:latest", "..")
for service_name in services_list:
img_tag = img_name_prefix + service_name + ":latest"
buildImage(imgs, img_tag, "./vulnbox/" + service_name)
def runAllService1Py():
print(" ===> Starting all service1_py")
service_name = "service1_py"
img_name = img_name_prefix + service_name + ":latest"
for t in teams:
dirname_flags = os.getcwd() + "/./tmp/" + t['name'] + "_" + service_name + "_flags"
if not os.path.isdir(dirname_flags):
os.mkdir(dirname_flags)
network_name_team = ntwrk_name_prefix + t['name']
container_name = "ctf01d_" + t['name'] + "_" + service_name
cntrs = client.containers.list(all=True)
for c in cntrs:
if c.name == container_name:
print("Stopping container " + c.name)
c.stop()
print("Removing container " + c.name)
c.remove()
print("Starting " + container_name)
mount_flags = docker.types.Mount(
target="/root/flags",
source=dirname_flags,
type="bind"
)
container = client.containers.run(
img_name,
mem_limit="128M",
memswap_limit="128M",
mounts=[mount_flags],
network=network_name_team,
ports={"4101/tcp": (t['ip_prefix'] + ".1", 4101) },
name=container_name,
detach=True
)
"""
simular command:
docker run \
-d \
--memory 128MB \
--memory-swap 128MB \
-p "10.10.11.1:4101":4101 \
-v `pwd`/vulnbox/service1_py/flags:/root/flags \
--net "ctf01d_net_team1" \
--name "ctf01d_team1_service1_py" \
ctf01d-game-simulation/service1_py:latest
"""
watchdog_containers_list.append(container_name)
print(container)
def runAllService2GoDatabase():
print(" ===> Starting all service2_go_db")
service_name = "service2_go_db"
# img_name = img_name_prefix + service_name + ":latest"
for t in teams:
dirname_mysql = os.getcwd() + "/./tmp/" + t['name'] + "_" + service_name + "_mysql"
if not os.path.isdir(dirname_mysql):
os.mkdir(dirname_mysql)
network_name_team = ntwrk_name_prefix + t['name']
container_name = "ctf01d_" + t['name'] + "_" + service_name
cntrs = client.containers.list(all=True)
for c in cntrs:
if c.name == container_name:
print("Stopping container " + c.name)
c.stop()
print("Removing container " + c.name)
c.remove()
print("Starting " + container_name)
mount_mysql = docker.types.Mount(
target="/var/lib/mysql",
source=dirname_mysql,
type="bind"
)
mount_sql = docker.types.Mount(
target="/docker-entrypoint-initdb.d",
source=os.getcwd() + "/./vulnbox/service2_go/sql",
type="bind"
)
container = client.containers.run(
"mysql:5.7",
mem_limit="128M",
memswap_limit="128M",
mounts=[mount_mysql, mount_sql],
environment={
"MYSQL_ROOT_PASSWORD": "service2_go",
"MYSQL_DATABASE": "service2_go",
"MYSQL_USER": "service2_go",
"MYSQL_PASSWORD": "service2_go",
},
network=network_name_team,
# ports={"4101/tcp": (t['ip_prefix'] + ".1", 4101) },
name=container_name,
detach=True
)
watchdog_containers_list.append(container_name)
print(container)
def runAllService2Go():
print(" ===> Starting all service2_go")
service_name = "service2_go"
img_name = img_name_prefix + service_name + ":latest"
for t in teams:
dirname_flags = os.getcwd() + "/./tmp/" + t['name'] + "_" + service_name + "_flags"
if not os.path.isdir(dirname_flags):
os.mkdir(dirname_flags)
network_name_team = ntwrk_name_prefix + t['name']
container_name = "ctf01d_" + t['name'] + "_" + service_name
cntrs = client.containers.list(all=True)
for c in cntrs:
if c.name == container_name:
print("Stopping container " + c.name)
c.stop()
print("Removing container " + c.name)
c.remove()
print("Starting " + container_name)
container = client.containers.run(
img_name,
mem_limit="128M",
memswap_limit="128M",
network=network_name_team,
environment={
"SERVICE2_GO_MYSQL_HOST": container_name + "_db",
"SERVICE2_GO_MYSQL_DBNAME": "service2_go",
"SERVICE2_GO_MYSQL_USER": "service2_go",
"SERVICE2_GO_MYSQL_PASSWORD": "service2_go",
},
ports={"4102/tcp": (t['ip_prefix'] + ".1", 4102) },
name=container_name,
detach=True
)
watchdog_containers_list.append(container_name)
print(container)
def runCtf01dJuryDb():
print(" ===> Starting ctf01d-jury db")
dirname_mysql = os.getcwd() + "/./tmp/jury_db_mysql"
if not os.path.isdir(dirname_mysql):
os.mkdir(dirname_mysql)
network_name_team = ntwrk_name_prefix + "jury"
container_name = "ctf01d_jury_db"
cntrs = client.containers.list(all=True)
for c in cntrs:
if c.name == container_name:
print("Stopping container " + c.name)
c.stop()
print("Removing container " + c.name)
c.remove()
print("Starting " + container_name)
mount_mysql = docker.types.Mount(
target="/var/lib/mysql",
source=dirname_mysql,
type="bind"
)
container = client.containers.run(
"mysql:8",
mem_limit="128M",
memswap_limit="128M",
mounts=[mount_mysql],
environment={
"MYSQL_ROOT_PASSWORD": "KzhyntJxwt",
"MYSQL_DATABASE": "ctf01d",
"MYSQL_USER": "ctf01d",
"MYSQL_PASSWORD": "ctf01d",
},
network=network_name_team,
# ports={"4101/tcp": (t['ip_prefix'] + ".1", 4101) },
name=container_name,
detach=True,
command="mysqld --default-authentication-plugin=mysql_native_password"
)
"""
simular command:
docker run -d \
--name ctf01d-mysql \
-e MYSQL_ROOT_PASSWORD=KzhyntJxwt \
-e MYSQL_DATABASE=ctf01d \
-e MYSQL_USER=ctf01d \
-e MYSQL_PASSWORD=ctf01d \
--network ctf01d_net \
mysql:8 \
mysqld --default-authentication-plugin=mysql_native_password
"""
watchdog_containers_list.append(container_name)
print(container)
def runCtf01dJury():
print(" ===> Starting ctf01d-jury")
dirname_data = os.getcwd() + "/data/"
network_name_jury = ntwrk_name_prefix + "jury"
container_name = "ctf01d_jury"
cntrs = client.containers.list(all=True)
for c in cntrs:
if c.name == container_name:
print("Stopping container " + c.name)
c.stop()
print("Removing container " + c.name)
c.remove()
print("Starting " + container_name)
mount_data = docker.types.Mount(
target="/root/data",
source=dirname_data,
type="bind"
)
container = client.containers.run(
"sea5kg/ctf01d:latest",
mem_limit="256M",
memswap_limit="256M",
mounts=[mount_data],
network=network_name_jury,
ports={"8080/tcp": ("localhost", 8080) },
name=container_name,
detach=True
)
watchdog_containers_list.append(container_name)
print(container)
def startWatchDog():
try:
print(" ===> Starting watch dog")
while True:
cntrs = client.containers.list(all=True)
for wc in watchdog_containers_list:
for c in cntrs:
if c.name == wc and c.status == "exited":
today = datetime.datetime.today()
print(today.strftime("%Y%m%d-%H%M%S") + " Container " + wc + " is exited. Try start again")
c.start()
time.sleep(15)
except KeyboardInterrupt:
print('Bye! Write me letters!')
stopAndRemoveAllContainers()
if command == "stop":
stopAndRemoveAllContainers()
if command == "clean":
stopAndRemoveAllContainers()
ntwrks = client.networks.list()
for n in ntwrks:
if n.name.startswith(ntwrk_name_prefix):
print("Removing network " + n.name)
n.remove()
# find and remove images
imgs = client.images.list()
for i in imgs:
tag = ""
if len(i.tags) > 0:
tag = i.tags[0]
if tag.startswith(img_name_prefix):
print("Removing image " + tag)
client.images.remove(image=tag)
exit(0)
if command == "start":
if not os.path.isdir("./tmp"):
os.mkdir("./tmp")
createTeamAndJuryNetworks()
buildJuryAndServiceImages()
runAllService1Py()
runAllService2GoDatabase()
runAllService2Go()
runCtf01dJuryDb()
runCtf01dJury()
startWatchDog() | 31.318396 | 115 | 0.568416 |
import docker
import sys
import os
import time
import datetime
client = docker.from_env()
progname = sys.argv[0]
teams = [
{
"name": "team1",
"ip_prefix": "10.10.11"
}, {
"name": "team2",
"ip_prefix": "10.10.12"
}, {
"name": "team3",
"ip_prefix": "10.10.13"
}
]
jury_net_prefix = "10.10.100"
ntwrk_name_prefix = "ctf01d_net_"
services_list = ["service1_py", "service2_go", "service3_php", "service4_cpp"]
img_name_prefix = "ctf01d-game-simulation/"
watchdog_containers_list = []
def printHelp():
print("\n"
+ progname + " - assistent for manupulate game-simulation\n"
"commands:\n"
"\t clean - remove all containers, images, networks and etc...\n"
"\t start - prepare networks, images, containers and etc \n"
"\n"
)
if len(sys.argv) < 2:
printHelp()
exit(1)
command = sys.argv[1]
def stopAndRemoveAllContainers():
cntrs = client.containers.list(all=True)
for c in cntrs:
if c.name.startswith("ctf01d_"):
print("Stopping container " + c.name)
c.stop()
print("Removing container " + c.name)
c.remove()
def createNetwork(network_list, network_name, ip_prefix):
for n in network_list:
if n.name == network_name:
print("Network already exists " + network_name)
return
print("Creating network " + network_name)
ipam_pool = docker.types.IPAMPool(
subnet=ip_prefix + '.0/24',
iprange=ip_prefix + '.0/24',
gateway=ip_prefix + '.1',
aux_addresses={
'vuln_server': ip_prefix + '.2'
}
)
ipam_config = docker.types.IPAMConfig(pool_configs=[ipam_pool])
ret = client.networks.create(
network_name,
driver="bridge",
check_duplicate=True,
ipam=ipam_config
)
print(" -> Done." + str(ret))
def createTeamAndJuryNetworks():
print("\n ===> Create networks")
ntwrks = client.networks.list()
createNetwork(ntwrks, ntwrk_name_prefix + "jury", jury_net_prefix)
for t in teams:
network_name_team = ntwrk_name_prefix + t['name']
createNetwork(ntwrks, network_name_team, t['ip_prefix'])
def buildImage(images_list, image_tag, pathWithDockerfile):
if not os.path.isdir(pathWithDockerfile):
print(pathWithDockerfile + " - not exists")
exit(-1)
return
path_dockerfile = pathWithDockerfile + "/Dockerfile"
if not os.path.isfile(path_dockerfile):
print(path_dockerfile + " - not exists")
exit(-1)
return
for i in images_list:
tag = ""
if len(i.tags) > 0:
tag = i.tags[0]
if tag == image_tag:
print(image_tag + " - Skip. Image already exists.")
return
print("Building image " + image_tag + "...")
ret = client.images.build(
path=pathWithDockerfile,
tag=image_tag,
forcerm=True
)
print(" -> Done." + str(ret))
def buildJuryAndServiceImages():
print("\n ===> Docker images for Services")
imgs = client.images.list()
buildImage(imgs, "sea5kg/ctf01d:latest", "..")
for service_name in services_list:
img_tag = img_name_prefix + service_name + ":latest"
buildImage(imgs, img_tag, "./vulnbox/" + service_name)
def runAllService1Py():
print(" ===> Starting all service1_py")
service_name = "service1_py"
img_name = img_name_prefix + service_name + ":latest"
for t in teams:
dirname_flags = os.getcwd() + "/./tmp/" + t['name'] + "_" + service_name + "_flags"
if not os.path.isdir(dirname_flags):
os.mkdir(dirname_flags)
network_name_team = ntwrk_name_prefix + t['name']
container_name = "ctf01d_" + t['name'] + "_" + service_name
cntrs = client.containers.list(all=True)
for c in cntrs:
if c.name == container_name:
print("Stopping container " + c.name)
c.stop()
print("Removing container " + c.name)
c.remove()
print("Starting " + container_name)
mount_flags = docker.types.Mount(
target="/root/flags",
source=dirname_flags,
type="bind"
)
container = client.containers.run(
img_name,
mem_limit="128M",
memswap_limit="128M",
mounts=[mount_flags],
network=network_name_team,
ports={"4101/tcp": (t['ip_prefix'] + ".1", 4101) },
name=container_name,
detach=True
)
watchdog_containers_list.append(container_name)
print(container)
def runAllService2GoDatabase():
print(" ===> Starting all service2_go_db")
service_name = "service2_go_db"
for t in teams:
dirname_mysql = os.getcwd() + "/./tmp/" + t['name'] + "_" + service_name + "_mysql"
if not os.path.isdir(dirname_mysql):
os.mkdir(dirname_mysql)
network_name_team = ntwrk_name_prefix + t['name']
container_name = "ctf01d_" + t['name'] + "_" + service_name
cntrs = client.containers.list(all=True)
for c in cntrs:
if c.name == container_name:
print("Stopping container " + c.name)
c.stop()
print("Removing container " + c.name)
c.remove()
print("Starting " + container_name)
mount_mysql = docker.types.Mount(
target="/var/lib/mysql",
source=dirname_mysql,
type="bind"
)
mount_sql = docker.types.Mount(
target="/docker-entrypoint-initdb.d",
source=os.getcwd() + "/./vulnbox/service2_go/sql",
type="bind"
)
container = client.containers.run(
"mysql:5.7",
mem_limit="128M",
memswap_limit="128M",
mounts=[mount_mysql, mount_sql],
environment={
"MYSQL_ROOT_PASSWORD": "service2_go",
"MYSQL_DATABASE": "service2_go",
"MYSQL_USER": "service2_go",
"MYSQL_PASSWORD": "service2_go",
},
network=network_name_team,
name=container_name,
detach=True
)
watchdog_containers_list.append(container_name)
print(container)
def runAllService2Go():
print(" ===> Starting all service2_go")
service_name = "service2_go"
img_name = img_name_prefix + service_name + ":latest"
for t in teams:
dirname_flags = os.getcwd() + "/./tmp/" + t['name'] + "_" + service_name + "_flags"
if not os.path.isdir(dirname_flags):
os.mkdir(dirname_flags)
network_name_team = ntwrk_name_prefix + t['name']
container_name = "ctf01d_" + t['name'] + "_" + service_name
cntrs = client.containers.list(all=True)
for c in cntrs:
if c.name == container_name:
print("Stopping container " + c.name)
c.stop()
print("Removing container " + c.name)
c.remove()
print("Starting " + container_name)
container = client.containers.run(
img_name,
mem_limit="128M",
memswap_limit="128M",
network=network_name_team,
environment={
"SERVICE2_GO_MYSQL_HOST": container_name + "_db",
"SERVICE2_GO_MYSQL_DBNAME": "service2_go",
"SERVICE2_GO_MYSQL_USER": "service2_go",
"SERVICE2_GO_MYSQL_PASSWORD": "service2_go",
},
ports={"4102/tcp": (t['ip_prefix'] + ".1", 4102) },
name=container_name,
detach=True
)
watchdog_containers_list.append(container_name)
print(container)
def runCtf01dJuryDb():
print(" ===> Starting ctf01d-jury db")
dirname_mysql = os.getcwd() + "/./tmp/jury_db_mysql"
if not os.path.isdir(dirname_mysql):
os.mkdir(dirname_mysql)
network_name_team = ntwrk_name_prefix + "jury"
container_name = "ctf01d_jury_db"
cntrs = client.containers.list(all=True)
for c in cntrs:
if c.name == container_name:
print("Stopping container " + c.name)
c.stop()
print("Removing container " + c.name)
c.remove()
print("Starting " + container_name)
mount_mysql = docker.types.Mount(
target="/var/lib/mysql",
source=dirname_mysql,
type="bind"
)
container = client.containers.run(
"mysql:8",
mem_limit="128M",
memswap_limit="128M",
mounts=[mount_mysql],
environment={
"MYSQL_ROOT_PASSWORD": "KzhyntJxwt",
"MYSQL_DATABASE": "ctf01d",
"MYSQL_USER": "ctf01d",
"MYSQL_PASSWORD": "ctf01d",
},
network=network_name_team,
name=container_name,
detach=True,
command="mysqld --default-authentication-plugin=mysql_native_password"
)
watchdog_containers_list.append(container_name)
print(container)
def runCtf01dJury():
print(" ===> Starting ctf01d-jury")
dirname_data = os.getcwd() + "/data/"
network_name_jury = ntwrk_name_prefix + "jury"
container_name = "ctf01d_jury"
cntrs = client.containers.list(all=True)
for c in cntrs:
if c.name == container_name:
print("Stopping container " + c.name)
c.stop()
print("Removing container " + c.name)
c.remove()
print("Starting " + container_name)
mount_data = docker.types.Mount(
target="/root/data",
source=dirname_data,
type="bind"
)
container = client.containers.run(
"sea5kg/ctf01d:latest",
mem_limit="256M",
memswap_limit="256M",
mounts=[mount_data],
network=network_name_jury,
ports={"8080/tcp": ("localhost", 8080) },
name=container_name,
detach=True
)
watchdog_containers_list.append(container_name)
print(container)
def startWatchDog():
try:
print(" ===> Starting watch dog")
while True:
cntrs = client.containers.list(all=True)
for wc in watchdog_containers_list:
for c in cntrs:
if c.name == wc and c.status == "exited":
today = datetime.datetime.today()
print(today.strftime("%Y%m%d-%H%M%S") + " Container " + wc + " is exited. Try start again")
c.start()
time.sleep(15)
except KeyboardInterrupt:
print('Bye! Write me letters!')
stopAndRemoveAllContainers()
if command == "stop":
stopAndRemoveAllContainers()
if command == "clean":
stopAndRemoveAllContainers()
ntwrks = client.networks.list()
for n in ntwrks:
if n.name.startswith(ntwrk_name_prefix):
print("Removing network " + n.name)
n.remove()
imgs = client.images.list()
for i in imgs:
tag = ""
if len(i.tags) > 0:
tag = i.tags[0]
if tag.startswith(img_name_prefix):
print("Removing image " + tag)
client.images.remove(image=tag)
exit(0)
if command == "start":
if not os.path.isdir("./tmp"):
os.mkdir("./tmp")
createTeamAndJuryNetworks()
buildJuryAndServiceImages()
runAllService1Py()
runAllService2GoDatabase()
runAllService2Go()
runCtf01dJuryDb()
runCtf01dJury()
startWatchDog() | true | true |
f726a156e670ac369a860bc003d31ce52ead8fb6 | 9,807 | py | Python | sickbeard/lib/subliminal/services/itasa.py | Branlala/docker-sickbeardfr | 3ac85092dc4cc8a4171fb3c83e9682162245e13e | [
"MIT"
] | null | null | null | sickbeard/lib/subliminal/services/itasa.py | Branlala/docker-sickbeardfr | 3ac85092dc4cc8a4171fb3c83e9682162245e13e | [
"MIT"
] | null | null | null | sickbeard/lib/subliminal/services/itasa.py | Branlala/docker-sickbeardfr | 3ac85092dc4cc8a4171fb3c83e9682162245e13e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2012 Mr_Orange <mr_orange@hotmail.it>
#
# This file is part of subliminal.
#
# subliminal is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# subliminal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
from . import ServiceBase
from ..exceptions import DownloadFailedError, ServiceError
from ..cache import cachedmethod
from ..language import language_set, Language
from ..subtitles import get_subtitle_path, ResultSubtitle, EXTENSIONS
from ..utils import get_keywords
from ..videos import Episode
from bs4 import BeautifulSoup
import logging
import re
import os
import requests
import zipfile
import StringIO
import guessit
from sickbeard.common import Quality
logger = logging.getLogger("subliminal")
class Itasa(ServiceBase):
server_url = 'http://www.italiansubs.net/'
site_url = 'http://www.italiansubs.net/'
api_based = False
languages = language_set(['it'])
videos = [Episode]
require_video = False
required_features = ['permissive']
quality_dict = {Quality.SDTV : '',
Quality.SDDVD : 'dvdrip',
Quality.RAWHDTV : '1080i',
Quality.HDTV : '720p',
Quality.FULLHDTV : ('1080p','720p'),
Quality.HDWEBDL : 'web-dl',
Quality.FULLHDWEBDL : 'web-dl',
Quality.HDBLURAY : ('bdrip', 'bluray'),
Quality.FULLHDBLURAY : ('bdrip', 'bluray'),
Quality.UNKNOWN : 'unknown' #Any subtitle will be downloaded
}
def init(self):
super(Itasa, self).init()
login_pattern = '<input type="hidden" name="return" value="([^\n\r\t ]+?)" /><input type="hidden" name="([^\n\r\t ]+?)" value="([^\n\r\t ]+?)" />'
response = requests.get(self.server_url + 'index.php')
if response.status_code != 200:
raise ServiceError('Initiate failed')
match = re.search(login_pattern, response.content, re.IGNORECASE | re.DOTALL)
if not match:
raise ServiceError('Can not find unique id parameter on page')
login_parameter = {'username': 'sickbeard',
'passwd': 'subliminal',
'remember': 'yes',
'Submit': 'Login',
'remember': 'yes',
'option': 'com_user',
'task': 'login',
'silent': 'true',
'return': match.group(1),
match.group(2): match.group(3)
}
self.session = requests.session()
r = self.session.post(self.server_url + 'index.php', data=login_parameter)
if not re.search('logouticon.png', r.content, re.IGNORECASE | re.DOTALL):
raise ServiceError('Itasa Login Failed')
@cachedmethod
def get_series_id(self, name):
"""Get the show page and cache every show found in it"""
r = self.session.get(self.server_url + 'index.php?option=com_remository&Itemid=9')
soup = BeautifulSoup(r.content, self.required_features)
all_series = soup.find('div', attrs = {'id' : 'remositorycontainerlist'})
for tv_series in all_series.find_all(href=re.compile('func=select')):
series_name = tv_series.text.lower().strip().replace(':','')
match = re.search('&id=([0-9]+)', tv_series['href'])
if match is None:
continue
series_id = int(match.group(1))
self.cache_for(self.get_series_id, args=(series_name,), result=series_id)
return self.cached_value(self.get_series_id, args=(name,))
def get_episode_id(self, series, series_id, season, episode, quality):
"""Get the id subtitle for episode with the given quality"""
season_link = None
quality_link = None
episode_id = None
r = self.session.get(self.server_url + 'index.php?option=com_remository&Itemid=6&func=select&id=' + str(series_id))
soup = BeautifulSoup(r.content, self.required_features)
all_seasons = soup.find('div', attrs = {'id' : 'remositorycontainerlist'})
for seasons in all_seasons.find_all(href=re.compile('func=select')):
if seasons.text.lower().strip() == 'stagione %s' % str(season):
season_link = seasons['href']
break
if not season_link:
logger.debug(u'Could not find season %s for series %s' % (series, str(season)))
return None
r = self.session.get(season_link)
soup = BeautifulSoup(r.content, self.required_features)
all_qualities = soup.find('div', attrs = {'id' : 'remositorycontainerlist'})
for qualities in all_qualities.find_all(href=re.compile('func=select')):
if qualities.text.lower().strip() in self.quality_dict[quality]:
quality_link = qualities['href']
r = self.session.get(qualities['href'])
soup = BeautifulSoup(r.content, self.required_features)
break
#If we want SDTV we are just on the right page so quality link will be None
if not quality == Quality.SDTV and not quality_link:
logger.debug(u'Could not find a subtitle with required quality for series %s season %s' % (series, str(season)))
return None
all_episodes = soup.find('div', attrs = {'id' : 'remositoryfilelisting'})
for episodes in all_episodes.find_all(href=re.compile('func=fileinfo')):
ep_string = "%(seasonnumber)dx%(episodenumber)02d" % {'seasonnumber': season, 'episodenumber': episode}
if re.search(ep_string, episodes.text, re.I) or re.search('completa$', episodes.text, re.I):
match = re.search('&id=([0-9]+)', episodes['href'])
if match:
episode_id = match.group(1)
return episode_id
return episode_id
def list_checked(self, video, languages):
return self.query(video.path or video.release, languages, get_keywords(video.guess), video.series, video.season, video.episode)
def query(self, filepath, languages, keywords, series, season, episode):
logger.debug(u'Getting subtitles for %s season %d episode %d with languages %r' % (series, season, episode, languages))
self.init_cache()
try:
series = series.lower().replace('(','').replace(')','')
series_id = self.get_series_id(series)
except KeyError:
logger.debug(u'Could not find series id for %s' % series)
return []
episode_id = self.get_episode_id(series, series_id, season, episode, Quality.nameQuality(filepath))
if not episode_id:
logger.debug(u'Could not find subtitle for series %s' % series)
return []
r = self.session.get(self.server_url + 'index.php?option=com_remository&Itemid=6&func=fileinfo&id=' + episode_id)
soup = BeautifulSoup(r.content)
sub_link = soup.find('div', attrs = {'id' : 'remositoryfileinfo'}).find(href=re.compile('func=download'))['href']
sub_language = self.get_language('it')
path = get_subtitle_path(filepath, sub_language, self.config.multi)
subtitle = ResultSubtitle(path, sub_language, self.__class__.__name__.lower(), sub_link)
return [subtitle]
def download(self, subtitle):
logger.info(u'Downloading %s in %s' % (subtitle.link, subtitle.path))
try:
r = self.session.get(subtitle.link, headers={'Referer': self.server_url, 'User-Agent': self.user_agent})
zipcontent = StringIO.StringIO(r.content)
zipsub = zipfile.ZipFile(zipcontent)
# if not zipsub.is_zipfile(zipcontent):
# raise DownloadFailedError('Downloaded file is not a zip file')
subfile = ''
if len(zipsub.namelist()) == 1:
subfile = zipsub.namelist()[0]
else:
#Season Zip Retrive Season and episode Numbers from path
guess = guessit.guess_file_info(subtitle.path, 'episode')
ep_string = "s%(seasonnumber)02de%(episodenumber)02d" % {'seasonnumber': guess['season'], 'episodenumber': guess['episodeNumber']}
for file in zipsub.namelist():
if re.search(ep_string, file, re.I):
subfile = file
break
if os.path.splitext(subfile)[1] in EXTENSIONS:
with open(subtitle.path, 'wb') as f:
f.write(zipsub.open(subfile).read())
else:
zipsub.close()
raise DownloadFailedError('No subtitles found in zip file')
zipsub.close()
except Exception as e:
if os.path.exists(subtitle.path):
os.remove(subtitle.path)
raise DownloadFailedError(str(e))
logger.debug(u'Download finished')
Service = Itasa | 45.402778 | 154 | 0.593862 |
from . import ServiceBase
from ..exceptions import DownloadFailedError, ServiceError
from ..cache import cachedmethod
from ..language import language_set, Language
from ..subtitles import get_subtitle_path, ResultSubtitle, EXTENSIONS
from ..utils import get_keywords
from ..videos import Episode
from bs4 import BeautifulSoup
import logging
import re
import os
import requests
import zipfile
import StringIO
import guessit
from sickbeard.common import Quality
logger = logging.getLogger("subliminal")
class Itasa(ServiceBase):
server_url = 'http://www.italiansubs.net/'
site_url = 'http://www.italiansubs.net/'
api_based = False
languages = language_set(['it'])
videos = [Episode]
require_video = False
required_features = ['permissive']
quality_dict = {Quality.SDTV : '',
Quality.SDDVD : 'dvdrip',
Quality.RAWHDTV : '1080i',
Quality.HDTV : '720p',
Quality.FULLHDTV : ('1080p','720p'),
Quality.HDWEBDL : 'web-dl',
Quality.FULLHDWEBDL : 'web-dl',
Quality.HDBLURAY : ('bdrip', 'bluray'),
Quality.FULLHDBLURAY : ('bdrip', 'bluray'),
Quality.UNKNOWN : 'unknown'
}
def init(self):
super(Itasa, self).init()
login_pattern = '<input type="hidden" name="return" value="([^\n\r\t ]+?)" /><input type="hidden" name="([^\n\r\t ]+?)" value="([^\n\r\t ]+?)" />'
response = requests.get(self.server_url + 'index.php')
if response.status_code != 200:
raise ServiceError('Initiate failed')
match = re.search(login_pattern, response.content, re.IGNORECASE | re.DOTALL)
if not match:
raise ServiceError('Can not find unique id parameter on page')
login_parameter = {'username': 'sickbeard',
'passwd': 'subliminal',
'remember': 'yes',
'Submit': 'Login',
'remember': 'yes',
'option': 'com_user',
'task': 'login',
'silent': 'true',
'return': match.group(1),
match.group(2): match.group(3)
}
self.session = requests.session()
r = self.session.post(self.server_url + 'index.php', data=login_parameter)
if not re.search('logouticon.png', r.content, re.IGNORECASE | re.DOTALL):
raise ServiceError('Itasa Login Failed')
@cachedmethod
def get_series_id(self, name):
r = self.session.get(self.server_url + 'index.php?option=com_remository&Itemid=9')
soup = BeautifulSoup(r.content, self.required_features)
all_series = soup.find('div', attrs = {'id' : 'remositorycontainerlist'})
for tv_series in all_series.find_all(href=re.compile('func=select')):
series_name = tv_series.text.lower().strip().replace(':','')
match = re.search('&id=([0-9]+)', tv_series['href'])
if match is None:
continue
series_id = int(match.group(1))
self.cache_for(self.get_series_id, args=(series_name,), result=series_id)
return self.cached_value(self.get_series_id, args=(name,))
def get_episode_id(self, series, series_id, season, episode, quality):
season_link = None
quality_link = None
episode_id = None
r = self.session.get(self.server_url + 'index.php?option=com_remository&Itemid=6&func=select&id=' + str(series_id))
soup = BeautifulSoup(r.content, self.required_features)
all_seasons = soup.find('div', attrs = {'id' : 'remositorycontainerlist'})
for seasons in all_seasons.find_all(href=re.compile('func=select')):
if seasons.text.lower().strip() == 'stagione %s' % str(season):
season_link = seasons['href']
break
if not season_link:
logger.debug(u'Could not find season %s for series %s' % (series, str(season)))
return None
r = self.session.get(season_link)
soup = BeautifulSoup(r.content, self.required_features)
all_qualities = soup.find('div', attrs = {'id' : 'remositorycontainerlist'})
for qualities in all_qualities.find_all(href=re.compile('func=select')):
if qualities.text.lower().strip() in self.quality_dict[quality]:
quality_link = qualities['href']
r = self.session.get(qualities['href'])
soup = BeautifulSoup(r.content, self.required_features)
break
if not quality == Quality.SDTV and not quality_link:
logger.debug(u'Could not find a subtitle with required quality for series %s season %s' % (series, str(season)))
return None
all_episodes = soup.find('div', attrs = {'id' : 'remositoryfilelisting'})
for episodes in all_episodes.find_all(href=re.compile('func=fileinfo')):
ep_string = "%(seasonnumber)dx%(episodenumber)02d" % {'seasonnumber': season, 'episodenumber': episode}
if re.search(ep_string, episodes.text, re.I) or re.search('completa$', episodes.text, re.I):
match = re.search('&id=([0-9]+)', episodes['href'])
if match:
episode_id = match.group(1)
return episode_id
return episode_id
def list_checked(self, video, languages):
return self.query(video.path or video.release, languages, get_keywords(video.guess), video.series, video.season, video.episode)
def query(self, filepath, languages, keywords, series, season, episode):
logger.debug(u'Getting subtitles for %s season %d episode %d with languages %r' % (series, season, episode, languages))
self.init_cache()
try:
series = series.lower().replace('(','').replace(')','')
series_id = self.get_series_id(series)
except KeyError:
logger.debug(u'Could not find series id for %s' % series)
return []
episode_id = self.get_episode_id(series, series_id, season, episode, Quality.nameQuality(filepath))
if not episode_id:
logger.debug(u'Could not find subtitle for series %s' % series)
return []
r = self.session.get(self.server_url + 'index.php?option=com_remository&Itemid=6&func=fileinfo&id=' + episode_id)
soup = BeautifulSoup(r.content)
sub_link = soup.find('div', attrs = {'id' : 'remositoryfileinfo'}).find(href=re.compile('func=download'))['href']
sub_language = self.get_language('it')
path = get_subtitle_path(filepath, sub_language, self.config.multi)
subtitle = ResultSubtitle(path, sub_language, self.__class__.__name__.lower(), sub_link)
return [subtitle]
def download(self, subtitle):
logger.info(u'Downloading %s in %s' % (subtitle.link, subtitle.path))
try:
r = self.session.get(subtitle.link, headers={'Referer': self.server_url, 'User-Agent': self.user_agent})
zipcontent = StringIO.StringIO(r.content)
zipsub = zipfile.ZipFile(zipcontent)
subfile = ''
if len(zipsub.namelist()) == 1:
subfile = zipsub.namelist()[0]
else:
guess = guessit.guess_file_info(subtitle.path, 'episode')
ep_string = "s%(seasonnumber)02de%(episodenumber)02d" % {'seasonnumber': guess['season'], 'episodenumber': guess['episodeNumber']}
for file in zipsub.namelist():
if re.search(ep_string, file, re.I):
subfile = file
break
if os.path.splitext(subfile)[1] in EXTENSIONS:
with open(subtitle.path, 'wb') as f:
f.write(zipsub.open(subfile).read())
else:
zipsub.close()
raise DownloadFailedError('No subtitles found in zip file')
zipsub.close()
except Exception as e:
if os.path.exists(subtitle.path):
os.remove(subtitle.path)
raise DownloadFailedError(str(e))
logger.debug(u'Download finished')
Service = Itasa | true | true |
f726a158af6f84e4cbe621f9839c0a52a862440f | 478 | py | Python | Data Science With Python/06-importing-data-in-python-(part-2)/1-importing-data-from-the-internet/parsing-html-with-beautifulsoup.py | aimanahmedmoin1997/DataCamp | c6a6c4d59b83f14854bd76ed5c0c7f2dddd6de1d | [
"MIT"
] | 3 | 2019-05-12T04:49:24.000Z | 2020-05-06T00:40:28.000Z | Data Science With Python/06-importing-data-in-python-(part-2)/1-importing-data-from-the-internet/parsing-html-with-beautifulsoup.py | aimanahmedmoin1997/DataCamp | c6a6c4d59b83f14854bd76ed5c0c7f2dddd6de1d | [
"MIT"
] | null | null | null | Data Science With Python/06-importing-data-in-python-(part-2)/1-importing-data-from-the-internet/parsing-html-with-beautifulsoup.py | aimanahmedmoin1997/DataCamp | c6a6c4d59b83f14854bd76ed5c0c7f2dddd6de1d | [
"MIT"
] | 7 | 2018-11-06T17:43:31.000Z | 2020-11-07T21:08:16.000Z | # Import packages
import requests
from bs4 import BeautifulSoup
# Specify url: url
url = 'https://www.python.org/~guido/'
# Package the request, send the request and catch the response: r
r = requests.get(url)
# Extracts the response as html: html_doc
html_doc = r.text
# Create a BeautifulSoup object from the HTML: soup
soup = BeautifulSoup(html_doc)
# Prettify the BeautifulSoup object: pretty_soup
pretty_soup = soup.prettify()
# Print the response
print(pretty_soup)
| 21.727273 | 65 | 0.76569 |
import requests
from bs4 import BeautifulSoup
url = 'https://www.python.org/~guido/'
r = requests.get(url)
html_doc = r.text
soup = BeautifulSoup(html_doc)
pretty_soup = soup.prettify()
print(pretty_soup)
| true | true |
f726a169158c8afc5ef59a42f7606019f51270fd | 7,006 | py | Python | experiments/vitchyr/goal_distribution/representation_learning/exps_20_08_14/exp1_oracle_pygame_latent_reward_1ob.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | 1 | 2020-10-23T14:40:09.000Z | 2020-10-23T14:40:09.000Z | experiments/vitchyr/goal_distribution/representation_learning/exps_20_08_14/exp1_oracle_pygame_latent_reward_1ob.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | null | null | null | experiments/vitchyr/goal_distribution/representation_learning/exps_20_08_14/exp1_oracle_pygame_latent_reward_1ob.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | 1 | 2021-05-27T20:38:45.000Z | 2021-05-27T20:38:45.000Z | import rlkit.misc.hyperparameter as hyp
from multiworld.envs.pygame import PickAndPlaceEnv
from rlkit.launchers.launcher_util import run_experiment
from rlkit.torch.sets.rl_launcher import disco_experiment
if __name__ == "__main__":
variant = dict(
env_class=PickAndPlaceEnv,
env_kwargs=dict(
# Environment dynamics
action_scale=1.0,
boundary_dist=4,
ball_radius=1.5,
object_radius=1.,
ball_visual_radius=1.5,
object_visual_radius=1.,
min_grab_distance=1.,
walls=None,
# Rewards
action_l2norm_penalty=0,
reward_type="dense",
success_threshold=0.60,
# Reset settings
fixed_goal=None,
# Visualization settings
images_are_rgb=True,
render_dt_msec=0,
render_onscreen=False,
render_size=84,
show_goal=False,
goal_samplers=None,
goal_sampling_mode='random',
num_presampled_goals=10000,
object_reward_only=False,
init_position_strategy='random',
num_objects=1,
),
qf_kwargs=dict(
hidden_sizes=[400, 300],
),
policy_kwargs=dict(
hidden_sizes=[400, 300],
),
sac_trainer_kwargs=dict(
discount=0.99,
soft_target_tau=1e-3,
target_update_period=1,
use_automatic_entropy_tuning=True,
reward_scale='auto_normalize_by_max_magnitude',
),
max_path_length=100,
algo_kwargs=dict(
batch_size=128,
num_epochs=501,
num_eval_steps_per_epoch=3000,
num_expl_steps_per_train_loop=1000,
num_trains_per_train_loop=1000,
min_num_steps_before_training=1000,
),
# max_path_length=2,
# algo_kwargs=dict(
# batch_size=5,
# num_epochs=1,
# num_eval_steps_per_epoch=2*20,
# num_expl_steps_per_train_loop=2*20,
# num_trains_per_train_loop=10,
# min_num_steps_before_training=10,
# ),
replay_buffer_kwargs=dict(
fraction_future_context=0.0,
fraction_distribution_context=0.8,
max_size=int(1e6),
),
save_video=True,
save_video_kwargs=dict(
save_video_period=10,
pad_color=50,
subpad_length=1,
pad_length=1,
num_columns_per_rollout=2,
num_imgs=8,
# rows=2,
# columns=9,
),
renderer_kwargs=dict(
# create_image_format='HWC',
# output_image_format='CWH',
output_image_format='CHW',
# flatten_image=True,
# normalize_image=False,
),
create_vae_kwargs=dict(
latent_dim=128,
encoder_cnn_kwargs=dict(
kernel_sizes=[5, 3, 3],
n_channels=[16, 32, 64],
strides=[3, 2, 2],
paddings=[0, 0, 0],
pool_type='none',
hidden_activation='relu',
normalization_type='layer',
),
encoder_mlp_kwargs=dict(
hidden_sizes=[],
),
decoder_dcnn_kwargs=dict(
kernel_sizes=[3, 3, 6],
n_channels=[32, 16, 3],
strides=[2, 2, 3],
paddings=[0, 0, 0],
),
decoder_mlp_kwargs=dict(
hidden_sizes=[256, 256],
),
use_fancy_architecture=True,
decoder_distribution='gaussian_learned_global_scalar_variance',
),
vae_trainer_kwargs=dict(
vae_lr=1e-3,
vae_visualization_config=dict(
num_recons=5,
num_samples=20,
# debug_period=50,
debug_period=20,
unnormalize_images=True,
image_format='CHW',
),
beta=1,
set_loss_weight=0,
),
data_loader_kwargs=dict(
batch_size=128,
),
vae_algo_kwargs=dict(
num_iters=501,
num_epochs_per_iter=1,
progress_csv_file_name='vae_progress.csv',
),
generate_set_for_vae_pretraining_kwargs=dict(
num_sets=3,
num_samples_per_set=128,
),
generate_set_for_rl_kwargs=dict(
num_sets=3,
num_samples_per_set=128,
# save_to_filename='3sets128samples_2objs.pickle',
saved_filename='/global/scratch/vitchyr/doodad-log-since-07-10-2020/manual-upload/sets/hand2xy_hand2x_1obj2xy_1obj2x_num_objs_1.pickle',
),
num_ungrouped_images=12800,
reward_fn_kwargs=dict(
drop_log_det_term=True,
sqrt_reward=True,
),
rig=False,
rig_goal_setter_kwargs=dict(
use_random_goal=True,
),
use_ground_truth_reward=True,
)
n_seeds = 1
mode = 'local'
exp_prefix = 'dev-{}'.format(
__file__.replace('/', '-').replace('_', '-').split('.')[0]
)
n_seeds = 3
mode = 'sss'
exp_prefix = 'exp2-oracle-pygame-latent-reward-1-obj'
search_space = {
'vae_algo_kwargs.num_iters': [501],
# 'algo_kwargs.num_epochs': [1],
'observation_key': [
'state_observation',
],
'use_ground_truth_reward': [
False,
],
'use_onehot_set_embedding': [
True,
],
'use_dummy_model': [
False,
],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
variants = list(sweeper.iterate_hyperparameters())
for _ in range(n_seeds):
for exp_id, variant in enumerate(variants):
if mode == 'local':
variant['vae_algo_kwargs']['num_iters'] = 0
variant['generate_set_for_rl_kwargs']['saved_filename'] = (
'manual-upload/sets/hand2xy_hand2x_1obj2xy_1obj2x_num_objs_1.pickle'
)
variant['algo_kwargs'] = dict(
batch_size=5,
num_epochs=1,
num_eval_steps_per_epoch=2*20,
num_expl_steps_per_train_loop=2*20,
num_trains_per_train_loop=10,
min_num_steps_before_training=10,
)
variant['max_path_length'] = 2
run_experiment(
disco_experiment,
exp_name=exp_prefix,
num_exps_per_instance=2,
mode=mode,
variant=variant,
# slurm_config_name='cpu',
use_gpu=True,
# gpu_id=1,
)
| 32.137615 | 148 | 0.529403 | import rlkit.misc.hyperparameter as hyp
from multiworld.envs.pygame import PickAndPlaceEnv
from rlkit.launchers.launcher_util import run_experiment
from rlkit.torch.sets.rl_launcher import disco_experiment
if __name__ == "__main__":
variant = dict(
env_class=PickAndPlaceEnv,
env_kwargs=dict(
action_scale=1.0,
boundary_dist=4,
ball_radius=1.5,
object_radius=1.,
ball_visual_radius=1.5,
object_visual_radius=1.,
min_grab_distance=1.,
walls=None,
action_l2norm_penalty=0,
reward_type="dense",
success_threshold=0.60,
fixed_goal=None,
images_are_rgb=True,
render_dt_msec=0,
render_onscreen=False,
render_size=84,
show_goal=False,
goal_samplers=None,
goal_sampling_mode='random',
num_presampled_goals=10000,
object_reward_only=False,
init_position_strategy='random',
num_objects=1,
),
qf_kwargs=dict(
hidden_sizes=[400, 300],
),
policy_kwargs=dict(
hidden_sizes=[400, 300],
),
sac_trainer_kwargs=dict(
discount=0.99,
soft_target_tau=1e-3,
target_update_period=1,
use_automatic_entropy_tuning=True,
reward_scale='auto_normalize_by_max_magnitude',
),
max_path_length=100,
algo_kwargs=dict(
batch_size=128,
num_epochs=501,
num_eval_steps_per_epoch=3000,
num_expl_steps_per_train_loop=1000,
num_trains_per_train_loop=1000,
min_num_steps_before_training=1000,
),
replay_buffer_kwargs=dict(
fraction_future_context=0.0,
fraction_distribution_context=0.8,
max_size=int(1e6),
),
save_video=True,
save_video_kwargs=dict(
save_video_period=10,
pad_color=50,
subpad_length=1,
pad_length=1,
num_columns_per_rollout=2,
num_imgs=8,
),
renderer_kwargs=dict(
output_image_format='CHW',
),
create_vae_kwargs=dict(
latent_dim=128,
encoder_cnn_kwargs=dict(
kernel_sizes=[5, 3, 3],
n_channels=[16, 32, 64],
strides=[3, 2, 2],
paddings=[0, 0, 0],
pool_type='none',
hidden_activation='relu',
normalization_type='layer',
),
encoder_mlp_kwargs=dict(
hidden_sizes=[],
),
decoder_dcnn_kwargs=dict(
kernel_sizes=[3, 3, 6],
n_channels=[32, 16, 3],
strides=[2, 2, 3],
paddings=[0, 0, 0],
),
decoder_mlp_kwargs=dict(
hidden_sizes=[256, 256],
),
use_fancy_architecture=True,
decoder_distribution='gaussian_learned_global_scalar_variance',
),
vae_trainer_kwargs=dict(
vae_lr=1e-3,
vae_visualization_config=dict(
num_recons=5,
num_samples=20,
debug_period=20,
unnormalize_images=True,
image_format='CHW',
),
beta=1,
set_loss_weight=0,
),
data_loader_kwargs=dict(
batch_size=128,
),
vae_algo_kwargs=dict(
num_iters=501,
num_epochs_per_iter=1,
progress_csv_file_name='vae_progress.csv',
),
generate_set_for_vae_pretraining_kwargs=dict(
num_sets=3,
num_samples_per_set=128,
),
generate_set_for_rl_kwargs=dict(
num_sets=3,
num_samples_per_set=128,
saved_filename='/global/scratch/vitchyr/doodad-log-since-07-10-2020/manual-upload/sets/hand2xy_hand2x_1obj2xy_1obj2x_num_objs_1.pickle',
),
num_ungrouped_images=12800,
reward_fn_kwargs=dict(
drop_log_det_term=True,
sqrt_reward=True,
),
rig=False,
rig_goal_setter_kwargs=dict(
use_random_goal=True,
),
use_ground_truth_reward=True,
)
n_seeds = 1
mode = 'local'
exp_prefix = 'dev-{}'.format(
__file__.replace('/', '-').replace('_', '-').split('.')[0]
)
n_seeds = 3
mode = 'sss'
exp_prefix = 'exp2-oracle-pygame-latent-reward-1-obj'
search_space = {
'vae_algo_kwargs.num_iters': [501],
'observation_key': [
'state_observation',
],
'use_ground_truth_reward': [
False,
],
'use_onehot_set_embedding': [
True,
],
'use_dummy_model': [
False,
],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
variants = list(sweeper.iterate_hyperparameters())
for _ in range(n_seeds):
for exp_id, variant in enumerate(variants):
if mode == 'local':
variant['vae_algo_kwargs']['num_iters'] = 0
variant['generate_set_for_rl_kwargs']['saved_filename'] = (
'manual-upload/sets/hand2xy_hand2x_1obj2xy_1obj2x_num_objs_1.pickle'
)
variant['algo_kwargs'] = dict(
batch_size=5,
num_epochs=1,
num_eval_steps_per_epoch=2*20,
num_expl_steps_per_train_loop=2*20,
num_trains_per_train_loop=10,
min_num_steps_before_training=10,
)
variant['max_path_length'] = 2
run_experiment(
disco_experiment,
exp_name=exp_prefix,
num_exps_per_instance=2,
mode=mode,
variant=variant,
use_gpu=True,
)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.