text string | size int64 | token_count int64 |
|---|---|---|
"""
TensorFlow 2 OscoNet code
"""
import numpy as np
import pytest
import tensorflow as tf
from OscopeBootstrap import qvalue
from OscopeBootstrap.create_edge_network_represention import create_edge_network_representation
from OscopeBootstrap.oscope_tf import PRECISION_fp, calc_e2, calc_e2_many_genes, find_best_psi_for_each_gene_pair, \
PRECISION_int, get_permuted_cost, get_pvalues, flatten_upper_triangular, get_symmetric_matrix_from_upper_triangular
from OscopeBootstrap.SyntheticDataset import GetSimISyntheticData, true_adj_matrix
from OscopeBootstrap.oscope_tf import bootstrap_hypothesis_test, get_accuracy, get_metrics_for_different_qvalue_thresholds
def calc_e2_np(X, Y, psi):
return np.sum(np.square(np.square(X) + np.square(Y) - 2 * X * Y * np.cos(psi) - np.square(np.sin(psi))))
def calc_e2_many_genes_np(X_many_genes: np.ndarray, psi_ng: np.ndarray):
'''
:param X_many_genes: G X N tensor of gene expression
:param psi_ng: G X G tensor of phase shift - should be symmetric
:return: total cost across all genes
'''
G = X_many_genes.shape[0]
c = 0
for ix in range(G):
for iy in range(G):
c += calc_e2_np(X_many_genes[ix, :], X_many_genes[iy, :], psi_ng[ix, iy])
return c
def create_single_group_example(N, std_noise, phase_shift):
t = np.linspace(0, 2 * np.pi, N)
G = 4
data = np.zeros((G, N))
data[0, :] = np.sin(t) + std_noise * np.random.randn(N)
data[1, :] = np.sin(t + phase_shift) + std_noise * np.random.randn(N)
data[2, :] = std_noise * np.random.randn(N)
data[3, :] = std_noise * np.random.randn(N)
return data
def test_get_symmetric_matrix_from_upper_triangular():
flatten_vector = np.array([1, 2, 3, 4, 5, 6])
a = get_symmetric_matrix_from_upper_triangular(4, flatten_vector)
np.testing.assert_equal(a, a.T)
def test_calc_e2():
np.random.seed(42)
N = 10
X = tf.constant(np.random.randn(N,), dtype=PRECISION_fp)
Y = tf.constant(np.random.randn(N, ), dtype=PRECISION_fp)
psi = tf.constant(np.array(3.), dtype=PRECISION_fp)
assert calc_e2(X, X, tf.constant(np.array(0.), dtype=PRECISION_fp)) == 0, 'must get minimum cost for identical gene with 0 phase'
e_tf = calc_e2(X, Y, psi)
e_np = calc_e2_np(X.numpy(), Y.numpy(), psi.numpy())
np.testing.assert_almost_equal(e_tf, e_np, decimal=1)
def test_calc_e2_many_genes():
G = 5
N = 10
X_many_genes = tf.constant(np.random.randn(G, N), dtype=PRECISION_fp)
psi_ng = tf.constant(np.random.randn(G, G), dtype=PRECISION_fp)
# make sure we include 0 as possible phase
cost = calc_e2_many_genes(X_many_genes, psi_ng)
cost_np = calc_e2_many_genes_np(X_many_genes.numpy(), psi_ng.numpy())
# np.testing.assert_almost_equal(cost, cost_np) Big differences!
assert np.all(cost > 0)
def test_find_best_psi_for_each_gene_pair():
np.random.seed(42)
tf.random.set_seed(42)
# construct example
phase_shift = np.pi
N = 10
G = 4
data_np = create_single_group_example(N, 0.1, phase_shift=phase_shift)
data = tf.constant(data_np, dtype=PRECISION_fp)
# candidate_psi = tf.linspace(0, 2 * tf.constant(np.pi), dtype=PRECISION)
candidate_psi = tf.constant(np.array([phase_shift, phase_shift/2]), dtype=PRECISION_fp)
n_permutations = tf.constant(np.array(20), dtype=PRECISION_int)
psi_ng = tf.Variable(tf.zeros((G, G), dtype=PRECISION_fp) * tf.constant(np.inf, dtype=PRECISION_fp))
cost_ng = tf.Variable(tf.ones((G, G), dtype=PRECISION_fp) * tf.constant(np.inf, dtype=PRECISION_fp))
cost_permuted = tf.Variable(tf.ones((G, G, n_permutations), dtype=PRECISION_fp) * tf.constant(np.inf, dtype=PRECISION_fp))
pvalues = tf.Variable(tf.ones((G, G), dtype=PRECISION_fp) * tf.constant(np.inf, dtype=PRECISION_fp))
find_best_psi_for_each_gene_pair(psi_ng, cost_ng, data, candidate_psi=candidate_psi)
assert psi_ng[0, 1] == phase_shift, 'why picked the other phase shift?'
get_permuted_cost(cost_permuted, data, candidate_psi, n_permutations)
get_pvalues(pvalues, cost_ng, cost_permuted)
# then q-values
# then check we find the right pair
pvalue_flatten = flatten_upper_triangular(pvalues.numpy())
qvalues_flatten, pi0 = qvalue.estimate(pvalue_flatten, verbose=True)
qvalues = get_symmetric_matrix_from_upper_triangular(pvalues.shape[0], qvalues_flatten)
adjacency_matrix = qvalues < 0.01
assert adjacency_matrix[0, 1]
assert adjacency_matrix[1, 0]
assert adjacency_matrix.sum() == 2, 'Only one significant pair should exist (0, 1)'
gene_names = [f'gene{i}' for i in range(4)]
a = create_edge_network_representation(adjacency_matrix, 1/cost_ng.numpy(), gene_names)
assert a.shape[1] == 3, 'must have gene1, gene2, weight columns'
assert a.shape[0] == 1, 'only one gene pair is significant'
@pytest.mark.slow
def test_bootstrap():
# This is a slow test (>10 secs) so need to run with `pytest --runslow -rs`
np.random.seed(42)
tf.random.set_seed(42)
NG = 5
G = 20
N = 100
ngroups = 1
alpha = 0.01 # significance level for test
data_df, phaseG, angularSpeed = GetSimISyntheticData(NG=NG, G=G, ngroups=ngroups,
N=N, noiseLevel=0)
adjacency_matrix, qvalues, cost = bootstrap_hypothesis_test(n_bootstrap=30,
data=data_df.values,
alpha=alpha,
grid_points_in_search=30)
assert qvalues.shape == (G, G)
assert adjacency_matrix.shape == (G, G)
assert np.all(~np.isnan(qvalues))
assert np.all(~np.isnan(adjacency_matrix))
assert cost.shape == (G, G)
adjacency_matrix_true = true_adj_matrix(G, angularSpeed)
correct_ratio = get_accuracy(adjacency_matrix, adjacency_matrix_true)
assert correct_ratio > .98
TPR, FDR, FPR = get_metrics_for_different_qvalue_thresholds(qvalues,
adjacency_matrix_true,
np.array([alpha]))
# To get appropriate values we need to increase number of bootstrap samples
assert TPR > 0.75
assert FDR < 0.3
assert FPR < 0.1
| 6,373 | 2,423 |
from django.test import TestCase
from .models import Profile,Image,Comments
import datetime as dt
# Create your tests here.
| 127 | 35 |
from texts.text_info import NewsSentenceInfo
class BaseObjectCache:
"""
Base Cache for NER data (API).
"""
TITLE_SENT_IND = NewsSentenceInfo.TITLE_SENT_IND
def __init__(self):
pass
def is_news_registered(self, news_id):
raise NotImplementedError()
def try_get(self, filename, s_ind):
raise NotImplementedError()
| 370 | 128 |
class Solution(object):
def isPalindrome(self, x):
"""
:type x: int
:rtype: bool
"""
if x<0:
return False
a=x
b=0
while(a!=0):
# 1. get last digit of a and add to b, b=b*10+lastdigit
b=b*10+a%10
# 2. delete last digit of a
a=a/10
#compare x and b and return
return True if x==b else False | 458 | 147 |
# -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
from snapshottest import Snapshot
snapshots = Snapshot()
snapshots["test_signup_notification 1"] = [
"""kukkuu@example.com|['michellewalker@example.net']|SIGNUP-notifikaation aihe|
SIGNUP-notifikaation sisältö tekstimuodossa.
Lapset: [<Child: Matti Mainio (2020-01-01)>, <Child: Jussi Juonio (2020-02-02)>]
Huoltaja: Gulle Guardian (michellewalker@example.net)"""
]
snapshots["test_signup_notification_language[EN] 1"] = [
"""kukkuu@example.com|['michellewalker@example.net']|SIGNUP notification subject|
SIGNUP notification body text.
Children: [<Child: Matti Mainio (2020-01-01)>, <Child: Jussi Juonio (2020-02-02)>]
Guardian: Gulle Guardian (michellewalker@example.net)"""
]
snapshots["test_signup_notification_language[FI] 1"] = [
"""kukkuu@example.com|['michellewalker@example.net']|SIGNUP-notifikaation aihe|
SIGNUP-notifikaation sisältö tekstimuodossa.
Lapset: [<Child: Matti Mainio (2020-01-01)>, <Child: Jussi Juonio (2020-02-02)>]
Huoltaja: Gulle Guardian (michellewalker@example.net)"""
]
snapshots["test_signup_notification_language[SV] 1"] = [
"""kukkuu@example.com|['michellewalker@example.net']|SIGNUP-notifikaation aihe|
SIGNUP-notifikaation sisältö tekstimuodossa.
Lapset: [<Child: Matti Mainio (2020-01-01)>, <Child: Jussi Juonio (2020-02-02)>]
Huoltaja: Gulle Guardian (michellewalker@example.net)"""
]
| 1,451 | 635 |
# Arda Mavi
import os
import cv2
import platform
import numpy as np
from predict import predict
from scipy.misc import imresize
from multiprocessing import Process
from keras.models import model_from_json
img_size = 64
channel_size = 1
def main():
# Getting model:
model_file = open('Data/Model/model.json', 'r')
model = model_file.read()
model_file.close()
model = model_from_json(model)
# Getting weights
model.load_weights("Data/Model/weights.h5")
print('Press "ESC" button for exit.')
# Get image from camera, get predict and say it with another process, repeat.
cap = cv2.VideoCapture(0)
old_char = ''
while 1:
ret, img = cap.read()
# Cropping image:
img_height, img_width = img.shape[:2]
side_width = int((img_width-img_height)/2)
img = img[0:img_height, side_width:side_width+img_height]
# Show window:
cv2.imshow('VSL', cv2.flip(img,1)) # cv2.flip(img,1) : Flip(mirror effect) for easy handling.
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = imresize(img, (img_size, img_size, channel_size))
img = 1-np.array(img).astype('float32')/255.
img = img.reshape(1, img_size, img_size, channel_size)
Y_string, Y_possibility = predict(model, img)
if Y_possibility < 0.4: # For secondary vocalization
old_char = ''
if(platform.system() == 'Darwin') and old_char != Y_string and Y_possibility > 0.6:
print(Y_string, Y_possibility)
arg = 'say {0}'.format(Y_string)
# Say predict with multiprocessing
Process(target=os.system, args=(arg,)).start()
old_char = Y_string
if cv2.waitKey(200) == 27: # Decimal 27 = Esc
break
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
| 1,918 | 651 |
# Twitcaspy
# Copyright 2021 Alma-field
# See LICENSE for details.
# Before running this code, run the following command:
# このコードを実行する前に、以下のコマンドを実行してください。
# pip install twitcaspy[webhook]
from flask import Flask, request, make_response, jsonify, abort
app = Flask(__name__)
from twitcaspy import api, TwitcaspyException
@app.route('/', methods=['GET', 'POST'])
def main():
if request.method == 'POST':
webhook = api.incoming_webhook(request.json)
#Show Parse Result
print(f'signature : {webhook.signature}')
print(f'user_id : {webhook.broadcaster.id}')
print(f'title : {webhook.movie.title}')
return make_response(jsonify({'message':'OK'}))
if __name__ == '__main__':
import json
cassettes_file = '../../cassettes/testincomingwebhook.json'
# load test webhook data
with open(cassettes_file, "r", encoding='utf-8')as file:
data = json.load(file)
# set signature to api instance
api.signature = data['signature']
app.run(debug=True)
| 1,028 | 364 |
import sqlite3
class UserModel:
def __init__(self, _id, username, password):
self.id = _id
self.username = username
self.password = password
@classmethod
def find_by_username(cls, username):
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
query = "SELECT * FROM users WHERE username = ?"
# Parameters MUST ALWAYS be in form of a TUPLE!
result = cursor.execute(query, (username, ))
# If the result set does not contain any values row = None
row = result.fetchone()
if row is not None:
# *row is like *args, cls in this example is class User
user = cls(*row)
else:
user = None
connection.close()
return user
@classmethod
def find_by_id(cls, id):
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
query = "SELECT * FROM users WHERE id = ?"
# Parameters MUST ALWAYS be in form of a TUPLE!
result = cursor.execute(query, (id, ))
# If the result set does not contain any values row = None
row = result.fetchone()
if row is not None:
# *row is like *args, cls in this example is class User
user = cls(*row)
else:
user = None
connection.close()
return user | 1,173 | 425 |
# -*- coding: utf-8 -*-
from __future__ import print_function
"""
Created on Fri Oct 23 13:31:34 2020
@author: Admin
"""
from keras.layers import Input
from keras.layers import Conv2D
from keras.layers import BatchNormalization
from keras.layers import Activation
from keras.layers import MaxPooling2D
from keras.layers import GlobalAveragePooling2D
from keras.layers import Concatenate
from keras.layers import concatenate
from keras.layers import add
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Lambda
from keras import backend as K
from keras.models import Model
from keras.utils import plot_model
if K.image_data_format() == 'channels_first':
bn_axis = 1
else:
bn_axis = -1
def _grouped_conv_block(input_tensor, cardinality, output_filters, kernel_size, block):
'''
kernel_size = 3
cardinality = 2
'''
base_name = 'ek_block_' + str(block) + '_'
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
group_list = []
input_filters = input_tensor._keras_shape[channel_axis]
grouped_filters = int(input_filters / cardinality)
for c in range(cardinality):
if K.image_data_format() == 'channels_last':
x = Lambda(lambda z: z[:, :, :, c * grouped_filters:(c + 1) * grouped_filters])(input_tensor)
else:
x = Lambda(lambda z: z[:, c * grouped_filters:(c + 1) * grouped_filters, :, :])(input_tensor)
x = Conv2D(filters = output_filters // cardinality,
kernel_size = kernel_size,
strides = (1, 1),
padding = 'same',
name = base_name + 'grouped_conv_' + str(c))(x)
group_list.append(x)
group_merge = concatenate(group_list, axis = channel_axis)
# The shape of group_merge: b, h, w, output_filters
x_c = BatchNormalization(axis = channel_axis, name = base_name + 'grouped_conv_bn')(group_merge)
x_c = Activation('relu')(x_c)
x_c = Conv2D(filters = output_filters,
kernel_size = (1, 1),
strides = (1, 1),
name = base_name + 'mix_conv_1')(x_c)
x_c = BatchNormalization(axis = channel_axis, name = base_name + 'mix_bn_1')(x_c)
x_c = Activation('relu')(x_c)
x_c = Conv2D(filters = output_filters,
kernel_size = (1, 1),
strides = (1, 1),
name = base_name + 'mix_conv_2')(x_c)
x_c = BatchNormalization(axis = channel_axis, name = base_name + 'mix_bn_2')(x_c)
x_c = Activation('relu')(x_c)
return x_c
def _select_kernel(inputs, kernels, filters, cardinality, block):
'''
kernels = [3, 5]
cardinality = 2
'''
base_name = 'sk_block_' + str(block) + '_'
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
group_list = []
input_filters = inputs._keras_shape[channel_axis]
grouped_filters = int(input_filters / cardinality)
for c in range(cardinality):
if K.image_data_format() == 'channels_last':
x = Lambda(lambda z: z[:, :, :, c * grouped_filters:(c + 1) * grouped_filters])(inputs)
else:
x = Lambda(lambda z: z[:, c * grouped_filters:(c + 1) * grouped_filters, :, :])(inputs)
x_1 = Conv2D(filters = filters // cardinality,
kernel_size = kernels[0],
strides = (1, 1),
padding = 'same',
name = base_name + 'grouped_conv1_' + str(c))(x)
group_list.append(x_1)
x_2 = Conv2D(filters = filters // cardinality,
kernel_size = kernels[1],
strides = (1, 1),
padding = 'same',
name = base_name + 'grouped_conv2_' + str(c))(x)
group_list.append(x_2)
o_1 = add([group_list[0], group_list[2]])
o_2 = add([group_list[1], group_list[3]])
# The shape of o_1, o_2: b, h, w, filters // cardinality
group_merge = concatenate([o_1, o_2], axis = channel_axis)
# The shape of group_merge is: b, h, w, filters
x_c = BatchNormalization(axis = channel_axis, name = base_name + 'grouped_conv_bn')(group_merge)
x_c = Activation('relu')(x_c)
x_c = Conv2D(filters = filters,
kernel_size = (1, 1),
strides = (1, 1),
name = base_name + 'mix_conv_1')(x_c)
x_c = BatchNormalization(axis = channel_axis, name = base_name + 'mix_bn_1')(x_c)
x_c = Activation('relu')(x_c)
x_c = Conv2D(filters = filters,
kernel_size = (1, 1),
strides = (1, 1),
name = base_name + 'mix_conv_2')(x_c)
x_c = BatchNormalization(axis = channel_axis, name = base_name + 'mix_bn_2')(x_c)
x_c = Activation('relu')(x_c)
return x_c
def _initial_conv_block(inputs):
x = Conv2D(filters = 32,
kernel_size = (7, 7),
strides = (2, 2),
padding = 'same',
name = 'init_conv')(inputs)
x = BatchNormalization(axis = bn_axis, name = 'init_conv_bn')(x)
x = Activation('relu')(x)
x = MaxPooling2D(pool_size = (3, 3),
strides = (2, 2),
padding = 'same',
name = 'init_MaxPool')(x)
return x
def Weakly_DenseNet(input_shape, classes):
inputs = Input(shape = input_shape)
# The shape of inputs: 224 x 224 x 3
x_1 = _initial_conv_block(inputs)
# The shape of x_1: 56 x 56 x 32
x_2 = _select_kernel(x_1, [3, 5], 64, 2, 1)
# The shape of x_2: 56 x 56 x 64
pool_1 = MaxPooling2D(pool_size = (2, 2), strides = (2, 2), padding = 'same')(x_2)
# The shape of pool_1: 28 x 28 x 64
x_3 = Concatenate(axis = bn_axis)([x_1, x_2])
# The shape of x_3: 56 x 56 x 96
x_4 = _select_kernel(x_3, [3, 5], 128, 2, 2)
# The shape of x_4: 56 x 56 x 128
pool_2 = MaxPooling2D(pool_size = (2, 2), strides = (2, 2), padding = 'same')(x_4)
# The shape of pool_2: 28 x 28 x 128
x_5 = Concatenate(axis = bn_axis)([pool_1, pool_2])
# The shape of x_5: 28 x 28 x 192
x_6 = _select_kernel(x_5, [3, 5], 256, 2, 3)
# The shape of x_6: 28 x 28 x 256
pool_3 = MaxPooling2D(pool_size = (2, 2), strides = (2, 2), padding = 'same')(x_6)
# The shape of pool_3: 14 x 14 x 256
x_7 = Concatenate(axis = bn_axis)([pool_2, x_6])
# The shape of x_7: 28 x 28 x 384
x_8 = _select_kernel(x_7, [3, 5], 512, 2, 4)
# The shape of x_8: 28 x 28 x 512
pool_4 = MaxPooling2D(pool_size = (2, 2), strides = (2, 2), padding = 'same')(x_8)
# The shape of pool_4: 14 x 14 x 512
x_9 = Concatenate(axis = bn_axis)([pool_3, pool_4])
# The shape of x_9: 14 x 14 x 768
x_10 = _select_kernel(x_9, [3, 5], 512, 2, 5)
# The shape of x_10: 14 x 14 x 512
pool_5 = MaxPooling2D(pool_size = (2, 2), strides = (2, 2), padding = 'same')(x_10)
# The shape of pool_5: 7 x 7 x 512
output = GlobalAveragePooling2D()(pool_5)
output = Dense(512, activation = 'relu', name = 'fc_1')(output)
output = Dropout(rate = 0.5, name = 'dropout')(output)
output = Dense(classes, activation = 'softmax', name = 'fc_2')(output)
model = Model(inputs = inputs, outputs = output, name = 'Grouped_Weakly_Densenet_19')
return model
if __name__ == '__main__':
model = Weakly_DenseNet((224, 224, 3), 10)
plot_model(model, to_file = 'model_SK_Net.png', show_shapes = True, show_layer_names = True)
print(len(model.layers))
model.summary()
| 7,814 | 3,125 |
# isort: skip_file
from .partitioned_job import my_partitioned_config
from dagster import HourlyPartitionsDefinition
# start_marker
from dagster import build_schedule_from_partitioned_job, job
@job(config=my_partitioned_config)
def do_stuff_partitioned():
...
do_stuff_partitioned_schedule = build_schedule_from_partitioned_job(
do_stuff_partitioned,
)
# end_marker
# start_partitioned_asset_schedule
from dagster import define_asset_job
partitioned_asset_job = define_asset_job(
"partitioned_job",
selection="*",
partitions_def=HourlyPartitionsDefinition(start_date="2022-05-31", fmt="%Y-%m-%d"),
)
asset_partitioned_schedule = build_schedule_from_partitioned_job(
partitioned_asset_job,
)
# end_partitioned_asset_schedule
from .static_partitioned_job import continent_job, CONTINENTS
# start_static_partition
from dagster import schedule
@schedule(cron_schedule="0 0 * * *", job=continent_job)
def continent_schedule():
for c in CONTINENTS:
request = continent_job.run_request_for_partition(partition_key=c, run_key=c)
yield request
# end_static_partition
# start_single_partition
@schedule(cron_schedule="0 0 * * *", job=continent_job)
def antarctica_schedule():
request = continent_job.run_request_for_partition(
partition_key="Antarctica", run_key=None
)
yield request
# end_single_partition
| 1,386 | 480 |
from django.apps import apps
from django.contrib import admin
from django.contrib.admin.sites import AlreadyRegistered
for model in apps.get_app_config("annotator").get_models():
try:
admin.site.register(model)
except AlreadyRegistered:
pass
| 267 | 74 |
import pyfirmata
dPins = range(14)
aPins = range(18,24)
A0,A1,A2,A3,A4,A5 = aPins
HIGH,OUT = (1,1)
LOW,IN = (0,0)
class Driver():
def __init__(self,device):
self.board = pyfirmata.Arduino(device)
# Setup Analog Pins
it = pyfirmata.util.Iterator(self.board)
it.start()
for pin in aPins:
self.board.analog[pin-aPins[0]].enable_reporting()
# Delay 1 sec
self.board.pass_time(1)
def analogRead(self):
pass
def analogWrite(self):
pass
def digitalRead(self):
pass
def digitalWrite(self,pin,state):
self.board.digital[pin].write(state)
def pinMode(self):
pass
def serialPrintln(self,msg):
print msg
def exit(self):
self.board.exit() | 834 | 309 |
# -*- coding: utf-8 -*-
from contnext_viewer.models import Network, engine
from sqlalchemy.orm import sessionmaker
def create_json_file(id, node):
# Start database session
Session = sessionmaker(bind=engine)
sqlsession = Session()
try:
g = [each.data for each in sqlsession.query(Network).filter(Network.identifier == id).all()][0]
properties = [each.properties for each in sqlsession.query(Network).filter(Network.identifier == id).all()][0]
except:
return [], []
# Get edges linked to nodes:
edges = list(g.edges(node))
node_list = list(set([i[1] for i in edges[:]] + [i[0] for i in edges[:]]))
nodes_dic = {node_list[i]: i for i in range(len(node_list))}
nodes = [{'id': nodes_dic[str(i)], 'name': str(i), 'connections': properties.get(i).get('connections'),
'rank': properties.get(i).get('rank'), 'housekeeping': properties.get(i).get('housekeeping')
} for i in list(set(node_list))]
links = [{'source': nodes_dic[u[0]], 'target': nodes_dic[u[1]]} for u in edges]
return nodes, links
| 1,019 | 369 |
# -*- coding: utf-8 -*-
"""
Linear algebra operations and helpers.
Inspired by Christoph Gohlke's transformation.py <http://www.lfd.uci.edu/~gohlke/>
This module is not directly exported by the `crystals` library. Use it with caution.
"""
import math
import numpy as np
# standard basis
e1, e2, e3 = np.eye(3)
def affine_map(array):
"""
Extends 3x3 transform matrices to 4x4, i.e. general affine transforms.
Parameters
----------
array : ndarray, shape {(3,3), (4,4)}
Transformation matrix. If shape = (4,4), returned intact.
Returns
-------
extended : ndarray, shape (4,4)
Extended array
Raises
------
ValueError : If the transformation matrix is neither 3x3 or 4x4
"""
if array.shape == (4, 4): # Already the right shape
return array
elif array.shape == (3, 3):
extended_matrix = np.zeros(shape=(4, 4), dtype=array.dtype)
extended_matrix[-1, -1] = 1
extended_matrix[:3, :3] = array
return extended_matrix
else:
raise ValueError(
"Array shape not 3x3 or 4x4, and thus is not a transformation matrix."
)
def transform(matrix, array):
"""
Applies a matrix transform on an array.
Parameters
----------
matrix : ndarray, shape {(3,3), (4,4)}
Transformation matrix.
array : ndarray, shape {(3,), (3,3), (4,4)}
Array to be transformed. Either a 1x3 vector, or a transformation
matrix in 3x3 or 4x4 shape.
Returns
-------
transformed : ndarray
Transformed array, either a 1D vector or a 4x4 transformation matrix
Raises
------
ValueError : If the transformation matrix is neither 3x3 or 4x4
"""
array = np.asarray(array)
if matrix.shape not in [(3, 3), (4, 4)]:
raise ValueError(
f"Input matrix is neither a 3x3 or 4x4 matrix, but \
rather of shape {matrix.shape}."
)
matrix = affine_map(matrix)
# Case of a vector (e.g. position vector):
if array.ndim == 1:
extended_vector = np.array([0, 0, 0, 1], dtype=array.dtype)
extended_vector[:3] = array
return np.dot(matrix, extended_vector)[:3]
else:
array = affine_map(array)
return np.dot(matrix, array)
def translation_matrix(direction):
"""
Return matrix to translate by direction vector.
Parameters
----------
direction : array_like, shape (3,)
Returns
-------
translation : `~numpy.ndarray`, shape (4,4)
4x4 translation matrix.
"""
matrix = np.eye(4)
matrix[:3, 3] = np.asarray(direction)[:3]
return matrix
def change_of_basis(basis1, basis2=(e1, e2, e3)):
"""
Returns the matrix transforms vectors expressed in one basis,
to vectors expressed in another basis.
Parameters
----------
basis1 : list of array_like, shape (3,)
First basis
basis2 : list of array_like, shape (3,), optional
Second basis. By default, this is the standard basis
Returns
-------
cob : `~numpy.ndarray`, shape (3,3)
Change-of-basis matrix.
"""
# Calculate the transform that goes from basis 1 to standard basis
basis1 = [np.asarray(vector).reshape(3, 1) for vector in basis1]
basis1_to_standard = np.hstack(tuple(basis1))
# Calculate the transform that goes from standard basis to basis2
basis2 = [np.asarray(vector).reshape(3, 1) for vector in basis2]
standard_to_basis2 = np.linalg.inv(np.hstack(tuple(basis2)))
return np.dot(standard_to_basis2, basis1_to_standard)
def is_basis(basis):
"""
Returns true if the set of vectors forms a basis. This is done by checking
whether basis vectors are independent via an eigenvalue calculation.
Parameters
----------
basis : list of array-like, shape (3,)
Returns
-------
out : bool
Whether or not the basis is valid.
"""
return 0 not in np.linalg.eigvals(np.asarray(basis))
def is_rotation_matrix(matrix):
"""
Checks whether a matrix is orthogonal with unit determinant (1 or -1), properties
of rotation matrices.
Parameters
----------
matrix : ndarray, shape {(3,3), (4,4)}
Rotation matrix candidate. If (4,4) matrix is provided,
only the top-left block matrix of (3,) is checked
Returns
-------
result : bool
If True, input could be a rotation matrix.
"""
# TODO: is this necessary? should a composite transformation
# of translation and rotation return True?
# if matrix.shape == (4,4):
# matrix = matrix[:3,:3]
is_orthogonal = np.allclose(np.linalg.inv(matrix), np.transpose(matrix))
unit_determinant = np.allclose(abs(np.linalg.det(matrix)), 1)
return is_orthogonal and unit_determinant
def rotation_matrix(angle, axis=(0, 0, 1)):
"""
Return matrix to rotate about axis defined by direction around the origin [0,0,0].
Parameters
----------
angle : float
Rotation angle [rad]
axis : array-like of length 3
Axis about which to rotate
Returns
-------
matrix : `~numpy.ndarray`, shape (3,3)
Rotation matrix.
See also
--------
translation_rotation_matrix
Notes
-----
To combine rotation and translations, see
http://www.euclideanspace.com/maths/geometry/affine/matrix4x4/index.htm
"""
sina, cosa = math.sin(angle), math.cos(angle)
# Make sure direction is a numpy vector of unit length
direction = np.asarray(axis)
direction = direction / np.linalg.norm(direction)
# rotation matrix around unit vector
R = np.diag([cosa, cosa, cosa])
R += np.outer(direction, direction) * (1.0 - cosa)
direction *= sina
R += np.array(
[
[0.0, -direction[2], direction[1]],
[direction[2], 0.0, -direction[0]],
[-direction[1], direction[0], 0.0],
]
)
return R
def translation_rotation_matrix(angle, axis, translation):
"""
Returns a 4x4 matrix that includes a rotation and a translation.
Parameters
----------
angle : float
Rotation angle [rad]
axis : array-like of length 3
Axis about which to rotate
translation : array_like, shape (3,)
Translation vector
Returns
-------
matrix : `~numpy.ndarray`, shape (4,4)
Affine transform matrix.
"""
rmat = affine_map(rotation_matrix(angle=angle, axis=axis))
rmat[:3, 3] = np.asarray(translation)
return rmat
def change_basis_mesh(xx, yy, zz, basis1, basis2):
"""
Changes the basis of meshgrid arrays.
Parameters
----------
xx, yy, zz : ndarrays
Arrays of equal shape, such as produced by numpy.meshgrid.
basis1 : list of ndarrays, shape(3,)
Basis of the mesh
basis2 : list of ndarrays, shape(3,)
Basis in which to express the mesh
Returns
-------
XX, YY, ZZ : `~numpy.ndarray`
"""
# Build coordinate array row-wise
changed = np.empty(shape=(3, xx.size), dtype=np.float)
linearized = np.empty(shape=(3, xx.size), dtype=np.float)
linearized[0, :] = xx.ravel()
linearized[1, :] = yy.ravel()
linearized[2, :] = zz.ravel()
# Change the basis at each row
COB = change_of_basis(basis1, basis2)
np.dot(COB, linearized, out=changed)
return (
changed[0, :].reshape(xx.shape),
changed[1, :].reshape(yy.shape),
changed[2, :].reshape(zz.shape),
)
def minimum_image_distance(xx, yy, zz, lattice):
"""
Returns a periodic array according to the minimum image convention.
Parameters
----------
xx, yy, zz : ndarrays
Arrays of equal shape, such as produced by numpy.meshgrid.
lattice : list of ndarrays, shape(3,)
Basis of the mesh
Returns
-------
r : `~numpy.ndarray`
Minimum image distance over the lattice
"""
COB = change_of_basis(np.eye(3), lattice)
linearized = np.empty(shape=(3, xx.size), dtype=np.float) # In the standard basis
ulinearized = np.empty_like(linearized) # In the unitcell basis
linearized[0, :] = xx.ravel()
linearized[1, :] = yy.ravel()
linearized[2, :] = zz.ravel()
# Go to unitcell basis, where the cell is cubic of side length 1
np.dot(COB, linearized, out=ulinearized)
ulinearized -= np.rint(ulinearized)
np.dot(np.linalg.inv(COB), ulinearized, out=linearized)
return np.reshape(np.linalg.norm(linearized, axis=0), xx.shape)
| 8,302 | 2,879 |
import json
from django.db import models
class JSONEncoder(json.JSONEncoder):
def __init__(self, *args, **kwargs):
kwargs['ensure_ascii'] = False
super().__init__(*args, **kwargs)
class Document(models.Model):
document = models.JSONField(encoder=JSONEncoder, default=dict)
images = None
class Meta:
abstract = True
def __init__(self, *args, **kwargs):
self.images = []
super().__init__(*args, **kwargs)
if self.id:
for k in self.document:
if hasattr(self, k):
setattr(self, k, self.document[k])
else:
raise KeyError
| 675 | 198 |
import sys
import getpass
import subprocess
import pkg_resources
def enter_key_only():
# Expect the user to press Enter key and suppress the output
getpass.getpass("")
def enter_key_confirmation():
print("Press \'Enter\' to continue or \'CTRL+C\' to abort the program", end="", flush=True)
# Expect the user to press Enter key and suppress the output
getpass.getpass("")
def input_option():
# Capture any inputs
return str(input(""))
def exception_translator():
# Get the raised exception error messages values
exc_type, exc_value, _ = sys.exc_info()
# Store the raised exception error messages values
exception_name = exc_type.__name__
exception_explanation = str(exc_value)
# Output for blank raised exception error explanation
if len(exception_explanation) == 0:
exception_explanation = "There's no explanation provided for this exception."
# Pass these values
return exception_name, exception_explanation
def module_verifier(module):
try:
# Get installed module's name
module_name = pkg_resources.get_distribution(module).key
# Get installed module's version
module_version = pkg_resources.get_distribution(module).version
# Pass these values
return module_name, module_version
except:
# Pass these values when the module is not installed
return False, False
def install_requirements(module):
print("Installing required module: ", end="", flush=True)
try:
# Install the module and suppress outputs
subprocess.check_call([sys.executable, "-m", "pip", "install", module], stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)
print("Done, " + module + " " + module_verifier(module)[1], flush=True)
return True
# Module installations failed due to an Internet problem
except:
print("Failed (" + module + ")", flush=True)
return False
def program_requirements():
# Required modules
prerequisites = ["netmiko", "pythonping", "bcrypt", "cffi", "cryptography", "future", "ntc-templates", "paramiko", "pycparser", "pynacl", "pyserial", "scp", "setuptools", "six", "tenacity", "textfsm"]
# Initial variables
module_results = []
module_installs = []
# Loop for every required module in the list
for module in prerequisites:
# Verify if modules are installed and store results in a list
module_results.append([module, bool(module_verifier(module)[0])])
# Loop for every module check results. If the module is not installed, store the module name in a list
if all([module_installs.append(result[0]) if result[1] == False else True for result in module_results]):
# All required modules are installed
pass
# Install the required modules
else:
print("\n \ Self-diagnostics and Self-recovery")
print(" \___________________________________________________________________\n")
# Initial variables
install_results = []
# Loop for every module in the list
for module in module_installs:
# Execute install_requirements to install the required modules and store results in a list
install_results.append(install_requirements(module))
# Loop for every module installation result
if all([True if result == True else False for result in install_results]):
print("\nDiagnostics and recovery are completed")
# Module installations failed due to an Internet problem
else:
print("\nPlease check the Internet connection and try again!")
print("Alternatively, please perform manual module installation!")
# Exit program
sys.exit()
def powered_by(module):
# Execute module_verifier to get the installed module's name and version
module_name, module_version = module_verifier(module)
# Pass the value
return (module_name + " " + module_version)
def program_cancellation():
print("\nEXIT: I\'ll see you again :)")
# Exit program
sys.exit()
| 4,194 | 1,166 |
from django.core.mail import send_mail
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.http.response import JsonResponse
from django.shortcuts import render, get_object_or_404, redirect
from django.forms import modelformset_factory
from django.contrib.auth.decorators import login_required, permission_required
from django.http import HttpResponseForbidden, HttpResponse
from django.core.exceptions import PermissionDenied, ObjectDoesNotExist
from django.contrib import messages
from django.utils import timezone
from django.db import transaction
import csv
from utils.validators import liu_id_validator
from .forms import EventForm, CheckForm, ImportEntriesForm, RejectionForm, AttachmentForm, \
ImageAttachmentForm, DeleteForm
from .models import Event, EntryAsPreRegistered, EntryAsReserve, EntryAsParticipant, OtherAttachment, \
ImageAttachment
from .exceptions import CouldNotRegisterException
from user_managements.models import IUser
from django.utils.translation import ugettext as _
# Create your views here.
from iportalen import settings
from utils.time import six_months_back
@login_required()
def summarise_noshow(request,pk):
event = get_object_or_404(Event,pk=pk)
if not event.can_administer(request.user):
raise PermissionDenied
if not event.finished:
event.finished = True
noshows = event.no_show
for user in noshows:
noshow = EntryAsPreRegistered.objects.get(event=event, user=user)
noshow.no_show = True
noshow.save()
for user in noshows:
if len(EntryAsPreRegistered.objects.get_noshow(user=user)) == 2:
subject = "Du har nu missat ditt andra event"
body = "<p>Hej du har missat 2 event som du har anmält dig på. Om du missar en tredje gång så blir vi tvungna att stänga av dig från " \
"framtida event fram tills ett halv år framåt.</p>"
send_mail(subject, "", settings.EMAIL_HOST_USER, [user.email, ], fail_silently=False, html_message=body)
elif len(EntryAsPreRegistered.objects.get_noshow(user=user)) == 3:
subject = "Du har nu missat ditt tredje event"
body = "<p>Hej igen du har missat 3 event som du har anmält dig på. Du kommer härmed att blir avstängd från " \
"framtida event fram tills ett halv år framåt. Ha en bra dag :)</p>"
send_mail(subject, "", settings.EMAIL_HOST_USER, [user.email, ], fail_silently=False, html_message=body)
event.save()
return redirect("events:administer event", pk=pk)
def view_event(request, pk):
event = get_object_or_404(Event, pk=pk)
if (event.status == Event.APPROVED and event.show_event_before_experation) or event.can_administer(request.user):
return render(request, "events/event.html", {"event": event})
raise PermissionDenied
@login_required()
def register_to_event(request, pk):
if request.method == "POST":
event = get_object_or_404(Event, pk=pk)
try:
event.register_user(request.user)
messages.success(request, _("Du är nu registrerad på eventet."))
except CouldNotRegisterException as err:
messages.error(request,
_("Fel, kunde inte registrera dig på ") + err.event.headline + _(" för att ") + err.reason + ".")
return redirect("events:event", pk=pk)
@login_required()
@transaction.atomic
def import_registrations(request, pk):
event = get_object_or_404(Event, pk=pk)
if not event.can_administer(request.user):
raise PermissionDenied
if request.method == 'POST':
form = ImportEntriesForm(request.POST)
if form.is_valid():
list_of_liu_id = form.cleaned_data['users'].splitlines()
for liu_id in list_of_liu_id:
try:
event.register_user(IUser.objects.get(username=liu_id))
except CouldNotRegisterException as err:
messages.error(
request,
"".join([_("Fel, kunde inte registrera"),
" {liu_id} ",
_("på"),
" {hedline} ",
_("för att"),
" {reason}."]).format(
liu_id=liu_id,
hedline=err.event.headline,
reason=err.reason))
except ObjectDoesNotExist:
messages.error(request, "".join(["{liu_id} ", _("finns inte i databasen.")]).format(liu_id))
else:
form = ImportEntriesForm()
return render(request, "events/import_users.html", {'form': form})
@login_required()
def register_as_reserve(request, pk):
if request.method == "POST":
event = get_object_or_404(Event, pk=pk)
entry = event.register_reserve(request.user)
messages.success(request,
_("Du är nu anmäld som reserv på eventet, du har plats nr. ") + str(entry.position()) + ".")
return redirect("events:event", pk=pk)
@login_required()
def administer_event(request, pk):
event = get_object_or_404(Event, pk=pk)
form = DeleteForm(request.POST or None, request.FILES or None,)
if event.can_administer(request.user):
return render(request, 'events/administer_event.html', {
'event': event, 'form':form,
})
else:
raise PermissionDenied # Nope.
@login_required()
def preregistrations_list(request, pk):
event = get_object_or_404(Event, pk=pk)
if event.can_administer(request.user):
return render(request, 'events/event_preregistrations.html', {
'event': event,
})
else:
raise PermissionDenied # Nope.
@login_required()
def participants_list(request, pk):
event = get_object_or_404(Event, pk=pk)
if event.can_administer(request.user):
return render(request, 'events/event_participants.html', {
'event': event,
})
else:
raise PermissionDenied # Nope.
@login_required()
def speech_nr_list(request, pk):
event = get_object_or_404(Event, pk=pk)
if event.can_administer(request.user):
return render(request, 'events/event_speech_nr_list.html', {
'event': event,
})
else:
raise PermissionDenied # Nope.
@login_required()
def reserves_list(request, pk):
event = get_object_or_404(Event, pk=pk)
event_reserves = event.reserves_object()
if event.can_administer(request.user):
return render(request, 'events/event_reserves.html', {
'event': event,
'event_reserves': event_reserves,
})
else:
raise PermissionDenied # Nope.
@login_required()
def check_in(request, pk):
event = get_object_or_404(Event, pk=pk)
can_administer = event.can_administer(request.user)
if can_administer:
form = CheckForm()
return render(request, 'events/event_check_in.html', {
'form': form, 'event': event, "can_administer": can_administer,
})
else:
raise PermissionDenied
@login_required()
def check_in_api(request, pk):
if request.method == 'POST':
try:
event = Event.objects.get(pk=pk)
if not event.can_administer(request.user):
raise PermissionDenied
except:
return JsonResponse({"status": "error", "message": _("Inget event med detta idnummer.")})
form = CheckForm(request.POST)
if form.is_valid():
form_user = form.cleaned_data["user"]
try:
event_user = IUser.objects.get(username=form_user)
except ObjectDoesNotExist:
try:
event_user = IUser.objects.get(rfid_number=form_user)
except ObjectDoesNotExist:
return JsonResponse({"status": "error", "message": _("Inget event med detta idnummer.")})
prereg = None
try:
# Preregistered
prereg = EntryAsPreRegistered.objects.get(event=event, user=event_user)
except ObjectDoesNotExist:
try:
prereg = EntryAsReserve.objects.get(event=event, user=event_user)
if not form.cleaned_data["force_check_in"]:
return JsonResponse({"status": "error", "message": "".join(["{0} {1} ", _("är anmäld som reserv")]).format(
event_user.first_name.capitalize(), event_user.last_name.capitalize())})
except ObjectDoesNotExist:
if not form.cleaned_data["force_check_in"]:
return JsonResponse({"status": "error", "message": "".join(["{0} {1} ", _("är inte anmäld på eventet")]).format(
event_user.first_name.capitalize(), event_user.last_name.capitalize())})
try:
EntryAsParticipant.objects.get(event=event, user=event_user)
return JsonResponse({"status": "error", "message": _("Redan incheckad.")})
except ObjectDoesNotExist:
pass
participant = EntryAsParticipant(user=event_user, event=event)
participant.add_speech_nr()
participant.save()
while EntryAsParticipant.objects.filter(event=event, speech_nr=participant.speech_nr).count() > 1:
participant.add_speech_nr()
participant.save()
if event.extra_deadline:
try:
if prereg.timestamp < event.extra_deadline:
extra_str = _("<br>Anmälde sig i tid för att ") + event.extra_deadline_text + "."
else:
extra_str = _("<br><span class='errorlist'>Anmälde sig ej i tid för att ") + \
event.extra_deadline_text + ".</span>"
except:
extra_str = ""
else:
extra_str = ""
return JsonResponse({"status": "success", "message": "".join(["{0} {1} ",
_("checkades in med talarnummer:"),
" {2}{3}"]).format(
event_user.first_name.capitalize(),
event_user.last_name.capitalize(),
participant.speech_nr,
extra_str
)})
return JsonResponse({"status": "error", "message": _("Fyll i Liu-id eller RFID.")})
return JsonResponse({})
@login_required()
def all_unapproved_events(request):
if request.user.has_perm("events.can_approve_event"):
events = Event.objects.filter(status=Event.BEING_REVIEWED, end__gte=timezone.now())
events_to_delete = Event.objects.filter(status=Event.BEING_CANCELD, end__gte=timezone.now())
return render(request, 'events/approve_event.html', {'events': events, 'events_to_delete': events_to_delete})
else:
raise PermissionDenied
@login_required()
@transaction.atomic
def approve_event(request, event_id):
event = Event.objects.get(pk=event_id)
if event.approve(request.user):
return redirect(reverse('events:unapproved'))
else:
raise PermissionDenied
@login_required()
def unapprove_event(request, pk):
event = Event.objects.get(pk=pk)
form = RejectionForm(request.POST or None)
if request.method == 'POST':
if form.is_valid():
if event.reject(request.user, form.cleaned_data['rejection_message']):
messages.success(request, _("Eventet har avslagits."))
return redirect('events:unapproved')
else:
raise PermissionDenied
return render(request, 'events/reject.html', {'form': form, 'event': event})
@login_required()
def CSV_view_participants(request, pk):
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="participants.txt"'
writer = csv.writer(response)
writer.writerow(['These are your participants:'])
event = get_object_or_404(Event, pk=pk)
participants = event.participants
for user in participants:
writer.writerow([user.username, user.first_name, user.last_name, user.email])
return response
@login_required()
def CSV_view_preregistrations(request, pk):
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="preregistrations.txt"'
writer = csv.writer(response)
writer.writerow(['These are your preregistrations:'])
event = get_object_or_404(Event, pk=pk)
preregistrations = event.preregistrations
for user in preregistrations:
writer.writerow([user.username, user.first_name, user.last_name, user.email])
return response
@login_required()
def unregister(request, pk):
if request.method == "POST":
event = get_object_or_404(Event, pk=pk)
try:
event.deregister_user(request.user)
messages.success(request, _("Du är nu avregistrerad på eventet."))
except CouldNotRegisterException as err:
messages.error(request,
"".join([_("Fel, kunde inte avregistrera dig på "),
err.event.headline,
_(" för att "),
err.reason,
"."]))
return redirect("events:event", pk=pk)
def event_calender(request):
return render(request, "events/calender.html")
def event_calender_view(request):
events = Event.objects.published().order_by('start')
return render(request, "events/calendar_view.html", {'events': events})
@login_required()
def registered_on_events(request):
entry_as_preregistered = EntryAsPreRegistered.objects.filter(user=request.user)
entry_as_reserve = EntryAsReserve.objects.filter(user=request.user)
reserve_events = []
preregistrations_events = []
for e in entry_as_preregistered:
if e.event.end >= timezone.now():
preregistrations_events.append(e)
for e in entry_as_reserve:
if e.event.end >= timezone.now():
reserve_events.append(e)
return render(request, "events/registerd_on_events.html",
{"reserve_events": reserve_events, "preregistrations_events": preregistrations_events})
@login_required()
def events_by_user(request):
user_events = Event.objects.user(request.user)
return render(request, 'events/my_events.html', {
'user_events': user_events
})
@login_required()
def create_or_modify_event(request, pk=None): # TODO: Reduce complexity
if pk: # if pk is set we modify an existing event.
duplicates = Event.objects.filter(replacing_id=pk)
if duplicates:
links = ""
for d in duplicates:
links += "<a href='{0}'>{1}</a><br>".format(d.get_absolute_url(), d.headline)
messages.error(request,
"".join([_("Det finns redan en ändrad version av det här arrangemanget! "
"Är du säker på att du vill ändra den här?<br>"
"Följande ändringar är redan föreslagna: <br> "),
"{:}"]).format(links),
extra_tags='safe')
event = get_object_or_404(Event, pk=pk)
if not event.can_administer(request.user):
raise PermissionDenied
form = EventForm(request.POST or None, request.FILES or None, instance=event)
else: # new event.
form = EventForm(request.POST or None, request.FILES or None)
if request.method == 'POST':
if form.is_valid():
event = form.save(commit=False)
if form.cleaned_data['draft']:
draft = True
else:
draft = False
status = event.get_new_status(draft)
event.status = status["status"]
event.user = request.user
if status["new"]:
event.replacing_id = event.id
event.id = None
event.save()
form.save_m2m()
if event.status == Event.DRAFT:
messages.success(request, _("Dina ändringar har sparats i ett utkast."))
elif event.status == Event.BEING_REVIEWED:
body = "<h1>Hej!</h1><br><br><p>Det finns nya artiklar att godkänna på i-Portalen.<br><a href='https://www.i-portalen.se/article/unapproved/'>Klicka här!</a></p><br><br><p>Med vänliga hälsningar, <br><br>Admins @ webgroup"
send_mail('Ny artikel att godkänna', '', settings.EMAIL_HOST_USER, ['infowebb@isektionen.se'], fail_silently=False, html_message=body)
messages.success(request, _("Dina ändringar har skickats för granskning."))
return redirect('events:by user')
else:
messages.error(request, _("Det uppstod ett fel, se detaljer nedan."))
return render(request, 'events/create_event.html', {
'form': form,
})
return render(request, 'events/create_event.html', {
'form': form,
})
def upload_attachments(request, pk):
event = get_object_or_404(Event, pk=pk)
if not event.can_administer(request.user):
raise PermissionDenied
AttachmentFormset = modelformset_factory(OtherAttachment,
form=AttachmentForm,
max_num=30,
extra=3,
can_delete=True,
)
if request.method == 'POST':
formset = AttachmentFormset(request.POST, request.FILES, queryset=OtherAttachment.objects.filter(event=event))
if formset.is_valid():
for entry in formset.cleaned_data:
if not entry == {}:
if entry['DELETE']:
try:
entry['id'].delete() # TODO: Remove the clear option from html-widget (or make it work).
except AttributeError:
pass
else:
if entry['id']:
attachment = entry['id']
else:
attachment = OtherAttachment(event=event)
attachment.file_name = entry['file'].name
attachment.file = entry['file']
attachment.display_name = entry['display_name']
attachment.modified_by = request.user
attachment.save()
messages.success(request, 'Dina bilagor har sparats.')
return redirect('events:manage attachments', pk=event.pk)
else:
return render(request, "events/attachments.html", {
'event': event,
'formset': formset,
})
formset = AttachmentFormset(queryset=OtherAttachment.objects.filter(event=event))
return render(request, "events/attachments.html", {
'event': event,
'formset': formset,
})
@login_required()
def upload_attachments_images(request, pk):
event = get_object_or_404(Event, pk=pk)
if not event.can_administer(request.user):
raise PermissionDenied
AttachmentFormset = modelformset_factory(ImageAttachment,
form=ImageAttachmentForm,
max_num=30,
extra=3,
can_delete=True,
)
if request.method == 'POST':
formset = AttachmentFormset(request.POST,
request.FILES,
queryset=ImageAttachment.objects.filter(event=event)
)
if formset.is_valid():
for entry in formset.cleaned_data:
if not entry == {}:
if entry['DELETE']:
try:
entry['id'].delete() # TODO: Remove the clear option from html-widget (or make it work).
except AttributeError:
pass
else:
if entry['id']:
attachment = entry['id']
else:
attachment = ImageAttachment(event=event)
attachment.img = entry['img']
attachment.caption = entry['caption']
attachment.modified_by = request.user
attachment.save()
messages.success(request, 'Dina bilagor har sparats.')
return redirect('events:event', event.pk)
else:
return render(request, "events/attach_images.html", {
'event': event,
'formset': formset,
})
formset = AttachmentFormset(queryset=ImageAttachment.objects.filter(event=event))
return render(request, "events/attach_images.html", {
'event': event,
'formset': formset,
})
@login_required()
def user_view(request, pk):
event = get_object_or_404(Event, pk=pk)
user = request.user
#checks if user is a participant
try:
participant = EntryAsParticipant.objects.get(event=event, user=user)
except EntryAsParticipant.DoesNotExist:
raise PermissionDenied
return render(request, "events/user_view.html", {'event': event})
def calendar_feed(request):
events = Event.objects.published()
response = render(request,
template_name='events/feed.ics',
context={'events': events},
content_type='text/calendar; charset=UTF-8')
response['Filename'] = 'feed.ics'
response['Content-Disposition'] = 'attachment; filename=feed.ics'
return response
def personal_calendar_feed(request, liu_id):
u = get_object_or_404(IUser, username=liu_id)
events = Event.objects.events_by_user(u)
response = render(request,
template_name='events/feed.ics',
context={'liu_user': u, 'events': events},
content_type='text/calendar; charset=UTF-8')
response['Filename'] = 'feed.ics'
response['Content-Disposition'] = 'attachment; filename=feed.ics'
return response
@login_required()
@permission_required('events.can_view_no_shows')
def show_noshows(request):
user = request.user
no_shows = EntryAsPreRegistered.objects.filter(no_show = True, timestamp__gte= six_months_back).order_by("user")
result = []
tempuser = {"user": None, "count": 0, "no_shows": []}
for no_show in no_shows:
if tempuser["user"] == no_show.user:
tempuser["count"] += 1
else:
if tempuser["user"]:
result.append(tempuser)
tempuser = {"user": no_show.user, "count":1, "no_shows": []}
tempuser["no_shows"].append(no_show)
if tempuser["user"]:
result.append(tempuser)
return render(request, "events/show_noshows.html", {"user": user, "no_shows": result})
@login_required()
@permission_required('events.can_remove_no_shows')
def remove_noshow(request):
user = request.user
if request.method == 'POST':
try:
user_id=request.POST.get('user_id')
event_id=request.POST.get('event_id')
except:
return JsonResponse({'status': 'fel request'})
no_shows = EntryAsPreRegistered.objects.filter(user_id=user_id, event_id=event_id, no_show=True)
print(no_shows)
if len(no_shows)==1:
no_shows[0].no_show=False
no_shows[0].save()
return JsonResponse({'status': 'OK'})
elif len(no_shows)==0:
return JsonResponse({'status': 'Ingen no show hittades'})
else:
return JsonResponse({'status': 'Error: fler än ett no show hittades'})
return JsonResponse({'status': 'fel request'})
@login_required()
def cancel(request, pk=None):
event = get_object_or_404(Event, pk=pk)
if event.can_administer(request.user):
if request.method == 'POST':
form = DeleteForm(request.POST)
if form.is_valid():
event.status = Event.BEING_CANCELD
event.cancel_message = form.cleaned_data["cancel"]
event.save()
form_user = form.cleaned_data["cancel"]
body = "<h1>Hej!</h1><br><br><p>Det finns nya event att ställa in på i-Portalen.<br><a href='https://www.i-portalen.se/article/unapproved/'>Klicka här!</a></p><br><br><p>Med vänliga hälsningar, <br><br>Admins @ webgroup" + form_user
send_mail('Nytt event att ställa in', '', settings.EMAIL_HOST_USER, ['admin@isektionen.se'], fail_silently=False, html_message=body)
messages.success(request, _("Dina ändringar har skickats för granskning."))
# vill låsa radera knapp
else:
messages.error(request, _("Det har ej fyllts i varför eventet önskas raderas."))
return redirect("events:administer event", pk=pk)
# vill stanna kvar på sidan
return render(request, 'events/administer_event.html', {'event': event, 'form':form, 'form_user':form_user, })
raise PermissionDenied
| 25,987 | 7,555 |
intNum = 0
fltTotal = 0.0
while True:
strVal = input('Enter a number: ')
if strVal == 'done':
break
try:
fltVal = float(strVal)
intNum += 1
fltTotal += fltVal
except ValueError:
print('Invalid Input value, continuing...')
continue
print("The number of valid lines:{}, the total:{}, the average:{}".format(intNum, fltTotal, fltTotal / intNum))
| 411 | 135 |
#!/usr/bin/env python
# This program is public domain
#
# Phase inversion author: Norm Berk
# Translated from Mathematica by Paul Kienzle
#
# Phase reconstruction author: Charles Majkrzak
# Converted from Fortran by Paul Kienzle
#
# Reflectivity calculation author: Paul Kienzle
#
# The National Institute of Standards and Technology makes no representations
# concerning this particular software and is not bound in any wy to correct
# possible errors or to provide extensions, upgrades or any form of support.
#
# This disclaimer must accompany any public distribution of this software.
# Note: save this file as invert to run as a stand-alone program.
"""
Core classes and functions:
* :class:`Interpolator`
Class that performs data interpolation.
* :class:`Inversion`
Class that implements the inversion calculator.
* :class:`SurroundVariation`
Class that performs the surround variation calculation.
* :func:`refl`
Reflectometry as a function of Qz and wavelength.
* :func:`reconstruct`
Phase reconstruction by surround variation magic.
* :func:`valid_f`
Calculate vector function using only the finite elements of the array.
Command line phase reconstruction phase inversion::
invert -u 2.07 -v 6.33 0 --Qmin 0.014 --thickness 1000 qrd1.refl qrd2.refl
Command line phase + inversion only::
invert --thickness=150 --Qmax 0.35 wsh02_re.dat
Scripts can use :func:`reconstruct` and :func:`invert`. For example:
.. doctest::
>>> from direfl.invert import reconstruct, invert
>>> substrate = 2.07
>>> f1, f2 = 0, -0.53
>>> phase = reconstruct("file1", "file2", substrate, f1, f2)
>>> inversion = invert(data=(phase.Q, phase.RealR), thickness=200)
>>> inversion.plot()
>>> inversion.save("profile.dat")
The resulting profile has attributes for the input (*Q*, *RealR*) and the
output (*z*, *rho*, *drho*). There are methods for plotting (*plot*,
*plot_residual*) and storing (*save*). The analysis can be rerun with
different attributes (*run(key=val, ...)*).
See :func:`reconstruct` and :class:`Inversion` for details.
The phase reconstruction algorithm is described in [Majkrzak2003]_. The
phase inversion algorithm is described in [Berk2009]_ and references therein.
It is based on the partial differential equation solver described
in [Sacks1993]_.
References
==========
.. [Majkrzak2003] C. F. Majkrzak, N. F. Berk and U. A. Perez-Salas,
"Phase-Sensitive Neutron Reflectometry", *Langmuir* 19, 7796-7810 (2003).
.. [Berk2009] N. F. Berk and C. F. Majkrzak, "Statistical analysis of
phase-inversion neutron specular reflectivity", *Langmuir* 25, 4132-4144 (2009).
.. [Sacks1993] P.E. Sacks, *Wave Motion* 18, 21-30 (1993).
"""
from __future__ import division, print_function
import os
from functools import reduce
import numpy as np
from numpy import (
pi, inf, nan, sqrt, exp, sin, cos, tan, log,
ceil, floor, real, imag, sign, isinf, isnan, isfinite,
diff, mean, std, arange, diag, isscalar)
from numpy.fft import fft
# The following line is temporarily commented out because Sphinx on Windows
# tries to document the three modules as part of inversion.api.invert when it
# should be skipping over them. The problem may be caused by numpy shipping
# these modules in a dll (mtrand.pyd) instead of in .pyc or .pyo files.
# Furthermore, Sphinx 1.0 generates non-fatal error messages when processing
# these imports and Sphinx 0.6.7 generates fatal errors and will not create the
# documentation. Sphinx on Linux does not exhibit these problems. The
# workaround is to use implicit imports in the functions or methods that use
# these functions.
#from numpy.random import uniform, poisson, normal
from .calc import convolve
from .util import isstr
# Custom colors
DARK_RED = "#990000"
# Common SLDs
silicon = Si = 2.07
sapphire = Al2O3 = 5.0
water = H2O = -0.56
heavywater = D2O = 6.33
lightheavywater = HDO = 2.9 # 50-50 mixture of H2O and D2O
def invert(**kw):
"""
Invert data returning an :class:`Inversion` object.
If outfile is specified, save z, rho, drho to the named file.
If plot=True, show a plot before returning
"""
doplot = kw.pop('plot', True)
outfile = kw.pop('outfile', None)
inverter = Inversion(**kw)
inverter.run()
if outfile is not None:
inverter.save(outfile)
if doplot:
import pylab
inverter.plot()
pylab.ginput(show_clicks=False)
return inverter
class Inversion():
"""
Class that implements the inversion calculator.
This object holds the data and results associated with the direct inversion
of the real value of the phase from a reflected signal.
Inversion converts a real reflectivity amplitude as computed by
:func:`reconstruct` into a step profile of scattering length density
as a function of depth. This process will only work for real-valued
scattering potentials - with non-negligible absorption the results
will be incorrect. With X-rays, the absorption is too high for this
technique to be used successfully. For details on the underlying
theory, see [Berk2009]_.
The following attributes and methods are of most interest:
**Inputs:**
================= =========================================================
Input Parameters Description
================= =========================================================
*data* The name of an input file or a pair of vectors (Q, RealR)
where RealR is the real portion of the complex
reflectivity amplitude.input filename or Q, RealR data
(required).
*thickness* (400) Defines the total thickness of the film of interest. If
the value chosen is too small, the inverted profile will
not be able to match the input reflection signal. If
the thickness is too large, the film of interest should
be properly reconstructed, but will be extended into a
reconstructed substrate below the film.film thickness.
*substrate* (0) It is the scattering length density of the substrate. The
inversion calculation determines the scattering length
densities (SLDs) within the profile relative to the SLD
of the substrate. Entering the correct value of
substrate will shift the profile back to the correct
values.
*bse* (0) It is the bound state energy correction factor. Films
with large negative potentials at their base sometimes
produce an incorrect inversion, as indicated by an
incorrect value for the substrate portion of a film. A
value of substrate SLD - bound state SLD seems to correct
the reconstruction.
*Qmin* (0) Minimum Q to use from data. Reduce *Qmax* to avoid
contamination from noise at high Q and improve precision.
However, doing this will reduce the size of the features
that you are sensitive to in your profile.
*Qmax* (None) Maximum Q to use from data. Increase *Qmin* to avoid
values at low Q which will not have the correct phase
reconstruction when Q is less than Qc^2 for both surround
variation measurements used in the phase reconstruction
calculation. Use this technique sparingly --- the overall
shape of the profile is sensitive to data at low Q.
*backrefl* (True) Reflection measured through the substrate. It is True if
the film is measured with an incident beam through the
substrate rather than the surface.
================= =========================================================
**Uncertainty controls:**
Uncertainty is handled by averaging over *stages* inversions with noise
added to the input data for each inversion. Usually the measurement
uncertainty is estimated during data reduction and phase reconstruction,
and Gaussian noise is added to the data. This is scaled by a factor of
*noise* so the effects of noisier or quieter input are easy to estimate.
If the uncertainty estimate is not available, 5% relative noise per point
is assumed.
If *monitor* is specified, then Poisson noise is used instead, according to
the following::
*noise* U[-1, 1] (poisson(*monitor* |real R|)/*monitor* - |real R|)
That is, a value is pulled from the Poisson distribution of the expected
counts, and the noise is the difference between this and the actual counts.
This is further scaled by a fudge factor of *noise* and a further random
uniform in [-1, 1].
==================== =======================================================
Uncertainty controls Description
==================== =======================================================
*stages* (4) number of inversions to average over
*noise* (1) noise scale factor
*monitor* (None) incident beam intensity (poisson noise source)
==================== =======================================================
**Inversion controls:**
=================== ========================================================
Inversions controls Description
=================== ========================================================
*rhopoints* (128) number of steps in the returned profile. If this value
is too low, the profile will be coarse. If it is too
high, the computation will take a long time. The
additional smoothness generated by a high value of
*rhopoints* is illusory --- the information content of
the profile is limited by the number of Q points which
have been measured. Set *rhopoints* to (1/*dz*) for a
step size near *dz* in the profile.
*calcpoints* (4) number of internal steps per profile step. It is used
internally to improve the accuracy of the calculation.
For larger values of *rhopoints*, smaller values of
*calcpoints* are feasible.
*iters* (6) number of iterations to use for inversion. A value of 6
seems to work well. You can observe this by setting
*showiters* to True and looking at the convergence of
each stage of the averaging calculation.
*showiters* (False) set to true to show inversion converging. Click the
graph to move to the next stage.
*ctf_window* (0) cosine transform smoothing. In practice, it is set to 0
for no smoothing.
=================== ========================================================
**Computed profile:**
The reflectivity computed from *z*, *rho* will not match the input data
because the effect of the substrate has been removed in the process of
reconstructing the phase. Instead, you will need to compute reflectivity
from *rho*-*substrate* on the reversed profile. This is done in
:meth:`refl` when no surround material is selected, and can be used to show
the difference between measured and inverted reflectivity. You may need to
increase *calcpoints* or modify *thickness* to get a close match.
====================== ===========================================================
Computed profile Description
====================== ===========================================================
*Qinput*, *RealRinput* input data. The input data *Qinput*, *RealRinput* need to
be placed on an even grid going from 0 to *Qmax* using
linear interpolation. Values below *Qmin* are set to
zero, and the number of points between *Qmin* and *Qmax*
is preserved. This resampling works best when the input
data are equally spaced, starting at k*dQ for some k.
*Q*, *RealR*, *dRealR* output data. The returned *Q*, *RealR*, *dRealR* are the
values averaged over multiple stages with added noise.
The plots show this as the range of input variation used
in approximating the profile variation.
*z* represents the depth into the profile. *z* equals
*thickness* at the substrate. If the thickness is correct,
then *z* will be zero at the top of the film, but in
practice the *thickness* value provided will be larger
than the actual film thickness, and a portion of the vacuum
will be included at the beginning of the profile.
*rho* It is the SLD at depth *z* in units of 10^-6 inv A^2. It
is calculated from the average of the inverted profiles
from the noisy data sets, and includes the correction for
the substrate SLD defined by *substrate*. The inverted
*rho* will contain artifacts from the abrupt cutoff in the
signal at *Qmin* and *Qmax*.
*drho* It is the uncertainty in the SLD profile at depth *z*. It
is calculated from the standard deviation of the inverted
profiles from the noisy data sets. The uncertainty *drho*
does not take into account the possible variation in the
signal above *Qmax*.
*signals* It is a list of the noisy (Q, RealR) input signals generated
by the uncertainty controls.
*profiles* It is a list of the corresponding (z, rho) profiles. The
first stage is computed without noise, so *signals[0]*
contains the meshed input and *profiles[0]* contains the
output of the inversion process without additional noise.
====================== ===========================================================
**Output methods:**
The primary output methods are
============== ===========================================================
Output methods Description
============== ===========================================================
*save* save the profile to a file.
*show* show the profile on the screen.
*plot* plot data and profile.
*refl* compute reflectivity from profile.
*run* run or rerun the inversion with new settings.
============== ===========================================================
**Additional methods for finer control of plots:**
=============== ===========================================================
Output methods Description
=============== ===========================================================
*plot_data* plot just the data.
*plot_profile* plot just the profile.
*plot_residual* plot data minus theory.
=============== ===========================================================
"""
# Global parameters for the class and their default values
substrate = 0
thickness = 400
calcpoints = 4
rhopoints = 128
Qmin = 0
Qmax = None
iters = 6
stages = 10
ctf_window = 0
backrefl = True
noise = 1
bse = 0
showiters = False
monitor = None
def __init__(self, data=None, **kw):
# Load the data
if isstr(data):
self._loaddata(data)
else: # assume it is a pair, e.g., a tuple, a list, or an Nx2 array
self._setdata(data)
# Run with current keywords
self._set(**kw)
def _loaddata(self, file):
"""
Load data from a file of Q, real(R), dreal(R).
"""
data = np.loadtxt(file).T
self._setdata(data, name=file)
def _setdata(self, data, name="data"):
"""
Set *Qinput*, *RealRinput* from Q, real(R) vectors.
"""
self.name = name
if len(data) == 3:
q, rer, drer = data
else:
q, rer = data
drer = None
# Force equal spacing by interpolation
self.Qinput, self.RealRinput = np.asarray(q), np.asarray(rer)
self.dRealRinput = np.asarray(drer) if drer is not None else None
def _remesh(self):
"""
Returns Qmeshed, RealRmeshed.
Resamples the data on an even grid, setting values below Qmin and above
Qmax to zero. The number of points between Qmin and Qmax is preserved.
This works best when data are equally spaced to begin with, starting a
k*dQ for some k.
"""
q, rer, drer = self.Qinput, self.RealRinput, self.dRealRinput
if drer is None:
drer = 0*rer
# Trim from Qmin to Qmax
if self.Qmin is not None:
idx = q >= self.Qmin
q, rer, drer = q[idx], rer[idx], drer[idx]
if self.Qmax is not None:
idx = q <= self.Qmax
q, rer, drer = q[idx], rer[idx], drer[idx]
# Resample on even spaced grid, preserving approximately the points
# between Qmin and Qmax
dq = (q[-1]-q[0])/(len(q) - 1)
npts = int(q[-1]/dq + 1.5)
q, rer = remesh([q, rer], 0, q[-1], npts, left=0, right=0)
# Process uncertainty
if self.dRealRinput is not None:
q, drer = remesh([q, drer], 0, q[-1], npts, left=0, right=0)
else:
drer = None
return q, rer, drer
def run(self, **kw):
"""
Run multiple inversions with resynthesized data for each.
All control keywords from the constructor can be used, except
*data* and *outfile*.
Sets *signals* to the list of noisy (Q, RealR) signals and sets
*profiles* to the list of generated (z, rho) profiles.
"""
from numpy.random import uniform, poisson, normal
self._set(**kw)
q, rer, drer = self._remesh()
signals = []
profiles = []
stages = self.stages if self.noise > 0 else 1
for i in range(stages):
if i == 0:
# Use data noise for the first stage
noisyR = rer
elif self.monitor is not None:
# Use incident beam as noise source
pnoise = poisson(self.monitor*abs(rer))/self.monitor - abs(rer)
unoise = uniform(-1, 1, rer.shape)
noisyR = rer + self.noise*unoise*pnoise
elif drer is not None:
# Use gaussian uncertainty estimate as noise source
noisyR = rer + normal(0, 1)*self.noise*drer
else:
# Use 5% relative amplitude as noise source
noisyR = rer + normal(0, 1)*self.noise*0.05*abs(rer)
ctf = self._transform(noisyR, Qmax=q[-1],
bse=self.bse, porder=1)
qp = self._invert(ctf, iters=self.iters)
if self.showiters: # Show individual iterations
import pylab
pylab.cla()
for qpi in qp:
pylab.plot(qpi[0], qpi[1])
pylab.ginput(show_clicks=False)
z, rho = remesh(qp[-1], 0, self.thickness, self.rhopoints)
if not self.backrefl:
z, rho = z[::-1], rho[::-1]
signals.append((q, noisyR))
profiles.append((z, rho))
self.signals, self.profiles = signals, profiles
def chisq(self):
"""
Compute normalized sum squared difference between original real R and
the real R for the inverted profile.
"""
from numpy.random import normal
idx = self.dRealR > 1e-15
#print("min dR", min(self.dRealR[self.dRealR>1e-15]))
q, rer, drer = self.Q[idx], self.RealR[idx], self.dRealR[idx]
rerinv = real(self.refl(q))
chisq = np.sum(((rer - rerinv)/drer)**2)/len(q)
return chisq
# Computed attributes.
def _get_z(self):
"""Inverted SLD profile depth in Angstroms"""
return self.profiles[0][0]
def _get_rho(self):
"""Inverted SLD profile in 10^-6 * inv A^2 units"""
rho = mean([p[1] for p in self.profiles], axis=0) + self.substrate
return rho
def _get_drho(self):
"""Inverted SLD profile uncertainty"""
drho = std([p[1] for p in self.profiles], axis=0)
return drho
def _get_Q(self):
"""Inverted profile calculation points"""
return self.signals[0][0]
def _get_RealR(self):
"""Average inversion free film reflectivity input"""
return mean([p[1] for p in self.signals], axis=0)
def _get_dRealR(self):
"""Free film reflectivity input uncertainty"""
return std([p[1] for p in self.signals], axis=0)
z = property(_get_z)
rho = property(_get_rho)
drho = property(_get_drho)
Q = property(_get_Q)
RealR = property(_get_RealR)
dRealR = property(_get_dRealR)
def show(self):
"""Print z, rho, drho to the screen."""
print("# %9s %11s %11s"%("z", "rho", "drho"))
for point in zip(self.z, self.rho, self.drho):
print("%11.4f %11.4f %11.4f"%point)
def save(self, outfile=None):
"""
Save z, rho, drho to three column text file named *outfile*.
**Parameters:**
*outfile:* file
If *outfile* is not provided, the name of the input file
will be used, but with the extension replaced by '.amp'.
**Returns:**
*None*
"""
if outfile is None:
basefile = os.path.splitext(os.path.basename(self.name))[0]
outfile = basefile+os.extsep+"amp"
fid = open(outfile, "w")
fid.write("# Z Rho dRho\n")
np.savetxt(fid, np.array([self.z, self.rho, self.drho]).T)
fid.close()
def refl(self, Q=None, surround=None):
"""
Return the complex reflectivity amplitude.
**Parameters:**
*Q:* boolean
Use *Q* if provided, otherwise use the evenly spaced Q values
used for the inversion.
*surround:* boolean
If *surround* is provided, compute the reflectivity for the free
film in the context of the substrate and the surround, otherwise
compute the reflectivity of the reversed free film embedded in
the substrate to match against the reflectivity amplitude
supplied as input.
**Returns:**
*None*
"""
if Q is None:
Q = self.Q
if self.backrefl:
# Back reflectivity is equivalent to -Q inputs
Q = -Q
if surround is None:
# Phase reconstructed free film reflectivty is reversed,
# and has an implicit substrate in front and behind.
surround = self.substrate
Q = -Q
dz = np.hstack((0, diff(self.z), 0))
rho = np.hstack((surround, self.rho[1:], self.substrate))
r = refl(Q, dz, rho)
return r
def plot(self, details=False, phase=None):
"""
Plot the data and the inversion.
**Parameters:**
*details:* boolean
If *details* is True, then plot the individual stages used to
calculate the average, otherwise just plot the envelope.
*phase:* boolean
If *phase* is a phase reconstruction object, plot the original
measurements.
**Returns:**
*None*
"""
import pylab
if phase:
pylab.subplot(221)
phase.plot_measurement(profile=(self.z, self.rho))
pylab.subplot(223)
phase.plot_imaginary()
pylab.subplot(222 if phase else 211)
self.plot_profile(details=details)
pylab.subplot(224 if phase else 212)
self.plot_input(details=details)
def plot6(self, details=False, phase=None):
# This is an alternate to plot6 for evaluation purposes.
import pylab
if phase:
pylab.subplot(321)
phase.plot_measurement(profile=(self.z, self.rho))
pylab.subplot(323)
phase.plot_imaginary()
pylab.subplot(325)
phase.plot_phase()
pylab.subplot(322 if phase else 311)
self.plot_profile(details=details)
pylab.subplot(324 if phase else 312)
self.plot_input(details=details)
pylab.subplot(326 if phase else 313)
self.plot_residual()
def plot_input(self, details=False, lowQ_inset=0):
"""
Plot the real R vs. the real R computed from inversion.
**Parameters**
*details:* boolean
If *details* is True, then plot the individual stages used to
calculate the average, otherwise just plot the envelope.
*lowQ_inset:* intger
If *lowQ_inset* > 0, then plot a graph of Q, real R values
below lowQ_inset, without scaling by Q**2.
**Returns:**
*None*
"""
from matplotlib.font_manager import FontProperties
import pylab
if details:
plotamp(self.Qinput, self.RealRinput)
for p in self.signals:
plotamp(self.Q, p[1])
else:
plotamp(self.Q, self.RealR, dr=self.dRealR, label=None,
linestyle='', color="blue")
plotamp(self.Qinput, self.RealRinput, label="Input",
color="blue")
Rinverted = real(self.refl(self.Qinput))
plotamp(self.Qinput, Rinverted, color=DARK_RED, label="Inverted")
pylab.legend(prop=FontProperties(size='medium'))
chisq = self.chisq() # Note: cache calculated profile?
pylab.text(0.01, 0.01, "chisq=%.1f"%chisq,
transform=pylab.gca().transAxes,
ha='left', va='bottom')
if lowQ_inset > 0:
# Low Q inset
orig = pylab.gca()
box = orig.get_position()
ax = pylab.axes([box.xmin+0.02, box.ymin+0.02,
box.width/4, box.height/4],
axisbg=[0.95, 0.95, 0.65, 0.85])
ax.plot(self.Qinput, self.RealRinput, color="blue")
ax.plot(self.Qinput, Rinverted)
ax.text(0.99, 0.01, "Q, Real R for Q<%g"%lowQ_inset,
transform=ax.transAxes, ha='right', va='bottom')
qmax = lowQ_inset
ymax = max(max(self.RealRinput[self.Qinput < qmax]),
max(Rinverted[self.Qinput < qmax]))
pylab.setp(ax, xticks=[], yticks=[],
xlim=[0, qmax], ylim=[-1, 1.1*(ymax+1)-1])
pylab.axes(orig)
plottitle('Reconstructed Phase')
def plot_profile(self, details=False, **kw):
"""
Plot the computed profiles.
**Parameters:**
*details:* boolean
If *details* is True, then plot the individual stages used to
calculate the average, otherwise just plot the envelope.
**Returns:**
*None*
"""
import pylab
pylab.grid(True)
if details:
for p in self.profiles:
pylab.plot(p[0], p[1]+self.substrate)
else:
z, rho, drho = self.z, self.rho, self.drho
[h] = pylab.plot(z, rho, color=DARK_RED, **kw)
pylab.fill_between(z, rho-drho, rho+drho,
color=h.get_color(), alpha=0.2)
#pylab.plot(z, rho+drho, '--', color=h.get_color())
#pylab.plot(z, rho-drho, '--', color=h.get_color())
pylab.text(0.01, 0.01, 'surface',
transform=pylab.gca().transAxes,
ha='left', va='bottom')
pylab.text(0.99, 0.01, 'substrate',
transform=pylab.gca().transAxes,
ha='right', va='bottom')
pylab.ylabel('SLD (inv A^2)')
pylab.xlabel('Depth (A)')
plottitle('Depth Profile')
def plot_residual(self, details=False):
"""
Plot the residuals (inversion minus input).
**Parameters:**
*details:* boolean
If *details* is True, then plot the individual stages used to
calculate the average, otherwise just plot the envelope.
**Returns:**
*None*
"""
import pylab
Q, RealR = self.Qinput, self.RealRinput
r = self.refl(Q)
pylab.plot(Q, Q**2*(real(r)-RealR))
pylab.ylabel('Residuals [Q^2 * (Real R - input)]')
pylab.xlabel("Q (inv A)")
plottitle('Phase Residuals')
def _set(self, **kw):
"""
Set a group of attributes.
"""
for k, v in kw.items():
if hasattr(self, k):
setattr(self, k, v)
else:
raise ValueError("Invalid keyword argument for Inversion class")
self.rhoscale = 1e6 / (4 * pi * self.thickness**2)
def _transform(self, RealR, Qmax=None, bse=0, porder=1):
"""
Returns the cosine transform function used by inversion.
*bse* is bound-state energy, with units of 10^-6 inv A^2. It was used
in the past to handle profiles with negative SLD at the beginning, but
the the plain correction of bse=0 has since been found to be good
enough for the profiles we are looking at. *porder* is the order of the
interpolating polynomial, which must be 1 for the current interpolation
class.
"""
if not 0 <= porder <= 6:
raise ValueError("Polynomial order must be between 0 and 6")
npts = len(RealR)
dK = 0.5 * Qmax / npts
kappa = sqrt(bse*1e-6)
dx = self.thickness/self.rhopoints
xs = dx*arange(2*self.rhopoints)
dim = int(2*pi/(dx*dK))
if dim < len(xs):
raise ValueError("Q spacing is too low for the given thickness")
# 1/sqrt(dim) is the normalization convention for Mathematica FFT
ct = real(fft(RealR, dim)/sqrt(dim))
convertfac = 2*dK/pi * sqrt(dim) * self.thickness
ctdatax = convertfac * ct[:len(xs)] # * rhoscale
## PAK <--
## Mathematica guarantees that the interpolation function
## goes through the points, so Interpolator(xs, ctall)(xs)
## is just the same as ctall, and so newctall is just ctdatax.
## Furthermore, "ctf[x_] := newctif[x]" is an identity transform
## and is not necessary. In the end, we only need one
## interplotor plus the correction for ctf[0] == 0.
#ctall = ctdatax
#ctif = Interpolation(xs, ctall, InterpolationOrder -> porder)
#newctall = ctif(xs)
#newctif = Interpolation(xs, newctall, InterpolationOrder -> porder)
#ctf[x_] := newctif[x]
# This is the uncorrected Cosine Transform
#newctf[x_] := ctf[x] - exp(-kappa*x) * ctf[0]
# This is the boundstate-corrected Cosine Transform
## PAK -->
# This is the uncorrected Cosine Transform
raw_ctf = Interpolator(xs, ctdatax, porder=porder)
# This is the boundstate-corrected Cosine Transform
ctf = lambda x: raw_ctf(x) - exp(-kappa*x) * raw_ctf(0)
return ctf
def _invert(self, ctf, iters):
"""
Perform the inversion.
"""
dz = 2/(self.calcpoints*self.rhopoints)
x = arange(0, ceil(2/dz))*dz
maxm = len(x)
if maxm%2 == 0:
maxm += 1
mx = int(maxm/2+0.5)
h = 2/(2*mx-3)
g = np.hstack((ctf(x[:-1]*self.thickness), 0, 0, 0))
q = 2 * diff(g[:-2])/h
q[-1] = 0
ut = arange(2*mx-2)*h*self.thickness/2
if self.ctf_window > 0:
# Smooth ctf with 3-sample approximation
du = self.ctf_window*h*self.thickness/2
qinter = Interpolator(ut, q, porder=1)
q = (qinter(ut - du) + qinter(ut) + qinter(ut + du))/3
q = np.hstack((q, 0))
qp = [(ut, -2*q*self.rhoscale)]
Delta = np.zeros((mx, 2*mx), 'd')
for iter in range(iters):
for m in range(2, mx):
n = np.array(range(m, 2*mx-(m+1)))
Delta[m, n] = (
h**2 * q[m-1] * (g[m+n] + Delta[m-1, n])
+ Delta[m-1, n+1] + Delta[m-1, n-1] - Delta[m-2, n])
udiag = -g[:2*mx-2:2] - diag(Delta)[:mx-1]
mup = len(udiag) - 2
h = 1/mup
ut = arange(mup)*h*self.thickness
q = 2 * diff(udiag[:-1])/h
qp.append((ut, self.rhoscale*q))
q = np.hstack((q, 0, 0))
return qp
def plottitle(title):
import pylab
# Place title above the plot so that it is not overlapped by the legend.
# Note that the title is drawn as text rather than as a title object so
# that it will be kept as close as possible to the plot when the window is
# resized to a smaller size.
pylab.text(0.5, 1.07, title, fontsize='medium',
transform=pylab.gca().transAxes,
ha='center', va='top', backgroundcolor=(0.9, 0.9, 0.6))
def plotamp(Q, r, dr=None, scaled=True, ylabel="Real R", **kw):
"""
Plot Q, realR data.
"""
import pylab
scale = 1e4*Q**2 if scaled else 1
if scaled:
ylabel = "(100 Q)^2 "+ylabel
[h] = pylab.plot(Q, scale*r, **kw)
if dr is not None:
pylab.fill_between(Q, scale*(r-dr), scale*(r+dr),
color=h.get_color(), alpha=0.2)
pylab.ylabel(ylabel)
pylab.xlabel("Q $[\AA^{-1}]$")
class Interpolator():
"""
Construct an interpolation function from pairs (xi, yi).
"""
def __init__(self, xi, yi, porder=1):
if len(xi) != len(yi):
raise ValueError("xi:%d and yi:%d must have the same length"
%(len(xi), len(yi)))
self.xi, self.yi = xi, yi
self.porder = porder
if porder != 1:
raise NotImplementedError(
"Interpolator only supports polynomial order of 1")
def __call__(self, x):
return np.interp(x, self.xi, self.yi)
def phase_shift(q, r, shift=0):
return r*exp(1j*shift*q)
def remesh(data, xmin, xmax, npts, left=None, right=None):
"""
Resample the data on a fixed grid.
"""
x, y = data
x, y = x[isfinite(x)], y[isfinite(y)]
if npts > len(x):
npts = len(x)
newx = np.linspace(xmin, xmax, npts)
newy = np.interp(newx, x, y, left=left, right=right)
return np.array((newx, newy))
# This program is public domain.
# Author: Paul Kienzle
"""
Optical matrix form of the reflectivity calculation.
O.S. Heavens, Optical Properties of Thin Solid Films
"""
def refl(Qz, depth, rho, mu=0, wavelength=1, sigma=0):
"""
Reflectometry as a function of Qz and wavelength.
**Parameters:**
*Qz:* float|A
Scattering vector 4*pi*sin(theta)/wavelength. This is an array.
*depth:* float|A
Thickness of each layer. The thickness of the incident medium
and substrate are ignored.
*rho, mu (uNb):* (float, float)|
Scattering length density and absorption of each layer.
*wavelength:* float|A
Incident wavelength (angstrom).
*sigma:* float|A
Interfacial roughness. This is the roughness between a layer
and the subsequent layer. There is no interface associated
with the substrate. The sigma array should have at least n-1
entries, though it may have n with the last entry ignored.
:Returns:
*r* array of float
"""
if isscalar(Qz):
Qz = np.array([Qz], 'd')
n = len(rho)
nQ = len(Qz)
# Make everything into arrays
kz = np.asarray(Qz, 'd')/2
depth = np.asarray(depth, 'd')
rho = np.asarray(rho, 'd')
mu = mu*np.ones(n, 'd') if isscalar(mu) else np.asarray(mu, 'd')
wavelength = wavelength*np.ones(nQ, 'd') \
if isscalar(wavelength) else np.asarray(wavelength, 'd')
sigma = sigma*np.ones(n-1, 'd') if isscalar(sigma) else np.asarray(sigma, 'd')
# Scale units
rho = rho*1e-6
mu = mu*1e-6
## For kz < 0 we need to reverse the order of the layers
## Note that the interface array sigma is conceptually one
## shorter than rho, mu so when reversing it, start at n-1.
## This allows the caller to provide an array of length n
## corresponding to rho, mu or of length n-1.
idx = (kz >= 0)
r = np.empty(len(kz), 'D')
r[idx] = _refl_calc(kz[idx], wavelength[idx], depth, rho, mu, sigma)
r[~idx] = _refl_calc(
abs(kz[~idx]), wavelength[~idx],
depth[-1::-1], rho[-1::-1], mu[-1::-1],
sigma[n-2::-1])
r[abs(kz) < 1.e-6] = -1 # reflectivity at kz=0 is -1
return r
def _refl_calc(kz, wavelength, depth, rho, mu, sigma):
"""Abeles matrix calculation."""
if len(kz) == 0:
return kz
## Complex index of refraction is relative to the incident medium.
## We can get the same effect using kz_rel^2 = kz^2 + 4*pi*rho_o
## in place of kz^2, and ignoring rho_o.
kz_sq = kz**2 + 4*pi*rho[0]
k = kz
# According to Heavens, the initial matrix should be [ 1 F; F 1],
# which we do by setting B=I and M0 to [1 F; F 1]. An extra matrix
# multiply versus some coding convenience.
B11 = 1
B22 = 1
B21 = 0
B12 = 0
for i in range(0, len(rho)-1):
k_next = sqrt(kz_sq - (4*pi*rho[i+1] + 2j*pi*mu[i+1]/wavelength))
F = (k - k_next) / (k + k_next)
F *= exp(-2*k*k_next*sigma[i]**2)
M11 = exp(1j*k*depth[i]) if i > 0 else 1
M22 = exp(-1j*k*depth[i]) if i > 0 else 1
M21 = F*M11
M12 = F*M22
C1 = B11*M11 + B21*M12
C2 = B11*M21 + B21*M22
B11 = C1
B21 = C2
C1 = B12*M11 + B22*M12
C2 = B12*M21 + B22*M22
B12 = C1
B22 = C2
k = k_next
r = B12/B11
return r
def reconstruct(file1, file2, u, v1, v2, stages=100):
r"""
Two reflectivity measurements of a film with different surrounding media
:math:`|r_1|^2` and :math:`|r_2|^2` can be combined to compute the expected
complex reflection amplitude r_reversed of the free standing film measured
from the opposite side. The calculation can be done by varying the fronting
media or by varying the backing media. For this code we only support
measurements through a uniform substrate *u*, on two varying surrounding
materials *v1*, *v2*.
We have to be careful about terminology. We will use the term substrate to
mean the base on which we deposit our film of interest, and surface to be
the material we put on the other side. The fronting or incident medium is
the material through which the beam enters the sample. The backing
material is the material on the other side. In back reflectivity, the
fronting material is the substrate and the backing material is the surface.
We are using u for the uniform substrate and v for the varying surface
material.
In the experimental setup at the NCNR, we have a liquid resevoir which we
can place above the film. We measure first with one liquid in the resevoir
such as heavy water (D2O) and again with air or a contrasting liquid such
as water (H2O). At approximately 100 um, the resevoir depth is much
thicker than the effective coherence length of the neutron in the z
direction, and so can be treated as a semi-infinite substrate, even when it
is empty.
.. Note:: You cannot simulate a semi-infinite substrate using a large but
finitely thick material using the reflectometry calculation; at
best the resulting reflection will be a high frequency signal which
smooths after applying the resolution correction to a magnitude
that is twice the reflection from a semi-infinite substrate.
The incident beam is measured through the substrate, and thus subject to
the same absorption as the reflected beam. Refraction on entering and
leaving the substrated is accounted for by a small adjustment to Q
inside the reflectivity calculation.
When measuring reflectivity through the substrate, the beam enters the
substrate from the side, refracts a little because of the steep angle of
entry, reflects off the sample, and leaves through the other side of the
substrate with an equal but opposite refraction. The reflectivity
calculation takes this into account. Traveling through several centimeters
of substrate, some of the beam will get absorbed. We account for this
either by entering an incident medium transmission coefficient in the
reduction process, or by measuring the incident beam through the substrate
so that it is subject to approximately the same absorption.
The phase cannot be properly computed for Q values which are below the
critical edge Qc^2 for both surround variations. This problem can be
avoided by choosing a substrate which is smaller than the surround on at
least one of the measurements. This measurement will not have a critical
edge at positive Q. In order to do a correct footprint correction the
other measurement should use a substrate SLD greater than the surround SLD.
If the input file records uncertainty in the measurement, we perform a
Monte Carlo uncertainty estimate of the reconstructed complex amplitude.
**Inputs:**
================ =============================================================
Input parameters Description
================ =============================================================
*file1*, *file2* reflectivity measurements at identical Q values. *file1*
and *file2* can be pairs of vectors (q1, r1), (q2, r2) or files
containing at least two columns (q, r), with the remaining
columns such as dr, dq, and lambda ignored. If a third
vector, dr, is present in both datasets, then an uncertainty
estimate will be calculated for the reconstructed phase.
*v1*, *v2* SLD of varying surrounds in *file1* and *file2*
*u* SLD of the uniform substrate
*stages* number of trials in Monte Carlo uncertainty estimate
================ =============================================================
Returns a :class:`SurroundVariation` object with the following attributes:
================== =========================================
Attributes Description
================== =========================================
*RealR*, *ImagR* real and imaginary reflectivity
*dRealR*, *dImagR* Monte Carlo uncertainty estimate
*name1*, *name2* names of the input files
*save(file)* save Q, RealR, ImagR to a file
*show()*, *plot()* display the results
================== =========================================
**Notes:**
There is a question of how beam effects (scale, background, resolution)
will show up in the phase reconstruction. To understand this we can play
with the reverse problem applying beam effects (intensity=A, background=B,
resolution=G) to the reflectivity amplitude $r$ such that the computed
$|r|^2$ matches the measured $R = A G*|r|^2 + B$, where $*$ is the
convolution operator.
There is a reasonably pretty solution for intensity and background: set
$s = r \surd A + i r \surd B / |r|$ so that
$|s|^2 = A |r|^2 + |r|^2 B/|r|^2 = A |r|^2 + B$. Since $r$ is complex,
the intensity and background will show up in both real and imaginary
channels of the phase reconstruction.
It is not so pretty for resolution since the sum of the squares does not
match the square of the sum:
.. math::
G * |r|^2 = \int G(q'-q)|r(q)|^2 dq \ne |\int G(q'-q)r(q)dq|^2 = |G*r|^2
This is an area may have been investigated in the 90's when the theory of
neutron phase reconstruction and inversion was developing, but this
reconstruction code does not do anything to take resolution into account.
Given that we known $\Delta q$ for each measured $R$ we should be able to
deconvolute using a matrix approximation to the integral:
.. math::
R = G R' \Rightarrow R' = G^{-1} R
where each row of $G$ is the gaussian weights $G(q_k - q)$ with width
$\Delta q_k$ evaluated at all measured points $q$. Trying this didn't
produce a useful (or believable) result. Maybe it was a problem with the
test code, or maybe it is an effect of applying an ill-conditioned
linear operator over data that varies by orders of magnitude.
So question: are there techniques for deconvoluting reflectivity curves?
Going the other direction, we can apply a resolution function to $Re(r)$
and $Im(r)$ to see how well they reproduce the resolution applied to
$|r|^2$. The answer is that it does a pretty good job, but the overall
smoothing is somewhat less than expected.
.. figure:: ../images/resolution.png
:alt: Reflectivity after applying resolution to amplitude.
Amplitude effects of applying a 2% $\Delta Q/Q$ resolution to the
complex amplitude prior to squaring.
I'm guessing that our reconstructed amplitude is going to show a similar
decay due to resolution. This ought to show up as a rounding off of edges
in the inverted profile (guessing again from the effects of applying
windowing functions to reduce ringing in the Fourier transform). This is
intuitive: poor resolution should show less detail in the profile.
"""
return SurroundVariation(file1, file2, u, v1, v2, stages=stages)
class SurroundVariation():
"""
See :func:`reconstruct` for details.
**Attributes:**
===================== ========================================
Attributes Description
===================== ========================================
*Q*, *RealR*, *ImagR* real and imaginary reflectivity
*dRealR*, *dImagR* Monte Carlo uncertainty estimate or None
*Qin*, *R1*, *R2* input data
*dR1*, *dR2* input uncertainty or None
*name1*, *name2* input file names
*save(file)* save output
*show()*, *plot()* show Q, RealR, ImagR
===================== ========================================
"""
backrefl = True
def __init__(self, file1, file2, u, v1, v2, stages=100):
self.u = u
self.v1, self.v2 = v1, v2
self._load(file1, file2)
self._calc()
self._calc_err(stages=stages)
self.clean()
def optimize(self, z, rho_initial):
"""
Run a quasi-Newton optimizer on a discretized profile.
**Parameters:**
*z:* boolean
Represents the depth into the profile. z equals thickness at
the substrate.
*rho_initial:* boolean
The initial profile *rho_initial* should come from direct
inversion.
**Returns:**
*rho:* (boolean, boolean)|
Returns the final profile rho which minimizes chisq.
"""
from scipy.optimize import fmin_l_bfgs_b as fmin
def cost(rho):
R1, R2 = self.refl(z, rho, resid=True)
return np.sum(R1**2) + np.sum(R2**2)
rho_final = rho_initial
rho_final, f, d = fmin(cost, rho_initial, approx_grad=True, maxfun=20)
return z, rho_final
def refl(self, z, rho, resid=False):
"""
Return the reflectivities R1 and R2 for the film *z*, *rho* in the
context of the substrate and surround variation.
**Parameters:**
*z:* boolean
Represents the depth into the profile. z equals thickness at
the substrate.
*rho:* boolean
If the resolution is known, then return the convolved theory
function.
*resid:* boolean
If *resid* is True, then return the weighted residuals vector.
**Returns:**
*R1, R2:* (boolean, boolean)|
Return the reflectivities R1 and R2 for the film *z*, *rho*.
"""
w = np.hstack((0, np.diff(z), 0))
rho = np.hstack((0, rho[1:], self.u))
rho[0] = self.v1
R1 = self._calc_refl(w, rho)
rho[0] = self.v2
R2 = self._calc_refl(w, rho)
if resid:
R1 = (self.R1in-R1)/self.dR1in
R2 = (self.R2in-R2)/self.dR2in
return R1, R2
def _calc_free(self, z, rho):
# This is more or less cloned code that should be written just once.
w = np.hstack((0, np.diff(z), 0))
rho = np.hstack((self.u, rho[1:], self.u))
rho[0] = self.u
Q = -self.Qin
if self.backrefl:
Q = -Q
r = refl(Q, w, rho)
return r.real, r.imag
def _calc_refl(self, w, rho):
Q, dQ = self.Qin, self.dQin
# Back reflectivity is equivalent to -Q inputs
if self.backrefl:
Q = -Q
r = refl(Q, w, rho)
if dQ is not None:
R = convolve(Q, abs(r)**2, Q, dQ)
else:
R = abs(r)**2
return R
def clean(self):
"""
Remove points which are NaN or Inf from the computed phase.
"""
# Toss invalid values
Q, re, im = self.Qin, self.RealR, self.ImagR
if self.dRealR is not None:
dre, dim = self.dRealR, self.dImagR
keep = reduce(lambda y, x: isfinite(x)&y, [re, im], True)
self.Q, self.RealR, self.dRealR, self.ImagR, self.dImagR \
= [v[keep] for v in (Q, re, dre, im, dim)]
else:
keep = reduce(lambda y, x: isfinite(x)&y, [re, im], True)
self.Q, self.RealR, self.ImagR = [v[keep] for v in (Q, re, im)]
def save(self, outfile=None, uncertainty=True):
"""
Save Q, RealR, ImagR to a three column text file named *outfile*, or
save Q, RealR, ImagR, dRealR, dImagR to a five column text file.
**Parameters:**
*outfile:* file
Include dRealR, dImagR if they exist and if *uncertainty*
is True, making a five column file.
*uncertainity:* boolean
Include dRealR and dImagR if True.
**Returns:**
*None*
"""
if outfile is None:
basefile = os.path.splitext(os.path.basename(self.name1))[0]
outfile = basefile+os.extsep+"amp"
header = "# Q RealR ImagR"
v = [self.Q, self.RealR, self.ImagR]
if self.dRealR is not None and uncertainty:
header += " dRealR dImagR"
v += [self.dRealR, self.dImagR]
fid = open(outfile, "w")
fid.write(header+"\n")
np.savetxt(fid, np.array(v).T)
fid.close()
def save_inverted(self, outfile=None, profile=None):
"""
Save Q, R1, R2, RealR of the inverted profile.
"""
R1, R2 = self.refl(*profile)
rer, imr = self._calc_free(*profile)
data = np.vstack((self.Qin, R1, R2, rer, imr))
fid = open(outfile, "w")
fid.write("# Q R1 R2 RealR ImagR\n")
np.savetxt(fid, np.array(data).T)
fid.close()
def show(self):
"""Print Q, RealR, ImagR to the screen."""
print("# %9s %11s %11s"%("Q", "RealR", "ImagR"))
for point in zip(self.Q, self.RealR, self.ImagR):
print("%11.4g %11.4g %11.4g"%point)
def plot_measurement(self, profile=None):
"""Plot the data, and if available, the inverted theory."""
from matplotlib.font_manager import FontProperties
import pylab
def plot1(Q, R, dR, Rth, surround, label, color):
# Fresnel reflectivity
if self.backrefl:
F = abs(refl(Q, [0, 0], [self.u, surround]))**2
else:
F = abs(refl(Q, [0, 0], [surround, self.u]))**2
pylab.plot(Q, R/F, '.', label=label, color=color)
if Rth is not None:
pylab.plot(Q, Rth/F, '-', label=None, color=color)
if dR is not None:
pylab.fill_between(Q, (R-dR)/F, (R+dR)/F,
color=color, alpha=0.2)
if Rth is not None:
chisq = np.sum(((R-Rth)/dR)**2)
else:
chisq = 0
return chisq, len(Q)
else:
# Doesn't make sense to compute chisq for unweighted
# reflectivity since there are several orders of magnitude
# differences between the data points.
return 0, 1
if profile is not None:
R1, R2 = self.refl(*profile)
else:
R1, R2 = None, None
# Only show file.ext portion of the file specification
name1 = os.path.basename(self.name1)
name2 = os.path.basename(self.name2)
pylab.cla()
chisq1, n1 = plot1(self.Qin, self.R1in, self.dR1in, R1,
self.v1, name1, 'blue')
chisq2, n2 = plot1(self.Qin, self.R2in, self.dR2in, R2,
self.v2, name2, 'green')
pylab.legend(prop=FontProperties(size='medium'))
chisq = (chisq1+chisq2)/(n1+n2)
if chisq != 0:
pylab.text(0.01, 0.01, "chisq=%.1f"%chisq,
transform=pylab.gca().transAxes,
ha='left', va='bottom')
pylab.ylabel('R / Fresnel_R')
pylab.xlabel('Q (inv A)')
plottitle('Reflectivity Measurements')
def plot_phase(self):
from matplotlib.font_manager import FontProperties
import pylab
plotamp(self.Q, self.ImagR, dr=self.dImagR,
color='blue', label='Imag R')
plotamp(self.Q, self.RealR, dr=self.dRealR,
color=DARK_RED, label='Real R')
pylab.legend(prop=FontProperties(size='medium'))
plottitle('Reconstructed Phase')
def plot_imaginary(self):
from matplotlib.font_manager import FontProperties
import pylab
plotamp(self.Q, -self.ImagR, dr=self.dImagR,
color='blue', label='Imag R+')
plotamp(self.Q, self.ImagR, dr=self.dImagR,
color='green', label='Imag R-')
pylab.legend(prop=FontProperties(size='medium'))
pylab.ylabel("(100 Q)^2 Imag R")
pylab.xlabel("Q (inv A)")
plottitle('Reconstructed Phase')
def _load(self, file1, file2):
"""
Load the data from files or from tuples of (Q, R) or (Q, R, dR),
(Q, dQ, R, dR) or (Q, dQ, R, dR, L).
"""
# This code assumes the following data file formats:
# 2-column data: Q, R
# 3-column data: Q, R, dR
# 4-column data: Q, dQ, R, dR
# 5-column data: Q, dQ, R, dR, Lambda
if isstr(file1):
d1 = np.loadtxt(file1).T
name1 = file1
else:
d1 = file1
name1 = "SimData1"
if isstr(file2):
d2 = np.loadtxt(file2).T
name2 = file2
else:
d2 = file2
name2 = "SimData2"
ncols = len(d1)
if ncols <= 1:
raise ValueError("Data file has less than two columns")
elif ncols == 2:
q1, r1 = d1[0:2]
q2, r2 = d2[0:2]
dr1 = dr2 = None
dq1 = dq2 = None
elif ncols == 3:
q1, r1, dr1 = d1[0:3]
q2, r2, dr2 = d2[0:3]
dq1 = dq2 = None
elif ncols == 4:
q1, dq1, r1, dr1 = d1[0:4]
q2, dq2, r2, dr2 = d2[0:4]
elif ncols >= 5:
q1, dq1, r1, dr1, lambda1 = d1[0:5]
q2, dq2, r2, dr2, lanbda2 = d2[0:5]
if not q1.shape == q2.shape or not (q1 == q2).all():
raise ValueError("Q points do not match in data files")
# Note that q2, dq2, lambda1, and lambda2 are currently discarded.
self.name1, self.name2 = name1, name2
self.Qin, self.dQin = q1, dq1
self.R1in, self.R2in = r1, r2
self.dR1in, self.dR2in = dr1, dr2
def _calc(self):
"""
Call the phase reconstruction calculator.
"""
re, im = _phase_reconstruction(self.Qin, self.R1in, self.R2in,
self.u, self.v1, self.v2)
self.RealR, self.ImagR = re, im
self.Q = self.Qin
def _calc_err(self, stages):
if self.dR1in is None:
return
from numpy.random import normal
runs = []
for i in range(stages):
R1 = normal(self.R1in, self.dR1in)
R2 = normal(self.R2in, self.dR2in)
rer, imr = _phase_reconstruction(self.Qin, R1, R2,
self.u, self.v1, self.v2)
runs.append((rer, imr))
rers, rims = zip(*runs)
self.RealR = valid_f(mean, rers)
self.ImagR = valid_f(mean, rims)
self.dRealR = valid_f(std, rers)
self.dImagR = valid_f(std, rims)
def valid_f(f, A, axis=0):
"""
Calculate vector function f using only the finite elements of the array *A*.
*axis* is the axis over which the calculation should be performed, or None
if the calculation should summarize the entire array.
"""
A = np.asarray(A)
A = np.ma.masked_array(A, mask=~isfinite(A))
return np.asarray(f(A, axis=axis))
def _phase_reconstruction(Q, R1sq, R2sq, rho_u, rho_v1, rho_v2):
"""
Compute phase reconstruction from back reflectivity on paired samples
with varying surface materials.
"Fixed Nonvacuum Fronting, Variable Backing"
Uses eq. (31), (32) from [Majkrzak2003].
Inputs::
*Q* is the measurement positions
*R1sq*, *R2sq* are the measurements in the two conditions
*rho_v1*, *rho_v2* are the backing media SLDs for *R1sq* and *R2sq*
*rho_u* is the fronting medium SLD
Returns RealR, ImagR
"""
# The used notation here is different from the paper [Majkrzak2003].
# To more easily understand the code, take a look at the following translation table
#
# Paper | Code
# f^2 = usq
# f^2(a^2 + f^2b^2) = alpha
# f^2(d^2 + c^2) = beta
# \Sigma^{fh_i} = sigmai with i = 1, 2
# h_1^2, h_1^2 = v1sq, v2sq
Qsq = Q**2 + 16.*pi*rho_u*1e-6
usq, v1sq, v2sq = [(1-16*pi*rho*1e-6/Qsq) for rho in (rho_u, rho_v1, rho_v2)]
with np.errstate(invalid='ignore'):
sigma1 = 2 * sqrt(v1sq*usq) * (1+R1sq) / (1-R1sq)
sigma2 = 2 * sqrt(v2sq*usq) * (1+R2sq) / (1-R2sq)
alpha = usq * (sigma1-sigma2) / (v1sq-v2sq)
beta = (v2sq*sigma1-v1sq*sigma2) / (v2sq-v1sq)
gamma = sqrt(alpha*beta - usq**2)
Rre = (alpha-beta) / (2*usq+alpha+beta)
Rim = -2*gamma / (2*usq+alpha+beta)
return Rre, Rim
def main():
"""
Drive phase reconstruction and direct inversion from the command line.
"""
import sys
import os
from optparse import OptionParser, OptionGroup
description = """\
Compute the scattering length density profile from the real portion of the
phase reconstructed reflectivity. Call with a phase reconstructed reflectivity
dataset AMP, or with a pair of reduced reflectivity datasets RF1 and RF2 for
complete phase inversion. Phase inversion requires two surrounding materials
and one substrate material to be specified. The measurement is assumed to come
through the substrate."""
parser = OptionParser(usage="%prog [options] AMP or RF1 RF2",
description=description,
version="%prog 1.0")
inversion_keys = [] # Collect the keywords we are using
group = OptionGroup(parser, "Sample description", description=None)
group.add_option("-t", "--thickness", dest="thickness",
default=Inversion.thickness, type="float",
help="sample thickness (A)")
group.add_option("-u", "--substrate", dest="substrate",
default=Inversion.substrate, type="float",
help="sample substrate material (10^6 * SLD)")
group.add_option("-v", "--surround", dest="surround",
type="float", nargs=2,
help="varying materials v1 v2 (10^6 * SLD) [for phase]")
# fronting is not an inversion key
inversion_keys += ['thickness', 'substrate']
parser.add_option_group(group)
group = OptionGroup(parser, "Data description", description=None)
group.add_option("--Qmin", dest="Qmin",
default=Inversion.Qmin, type="float",
help="minimum Q value to use from the data")
group.add_option("--Qmax", dest="Qmax",
default=Inversion.Qmax, type="float",
help="maximum Q value to use from the data")
group.add_option("-n", "--noise", dest="noise",
default=Inversion.noise, type="float",
help="noise scaling")
group.add_option("-M", "--monitor", dest="monitor",
default=Inversion.monitor, type="int",
help="monitor counts used for measurement")
inversion_keys += ['Qmin', 'Qmax', 'noise', 'monitor']
parser.add_option_group(group)
group = OptionGroup(parser, "Outputs", description=None)
group.add_option("-o", "--outfile", dest="outfile", default=None,
help="profile file (infile.prf), use '-' for console")
group.add_option("--ampfile", dest="ampfile", default=None,
help="amplitude file (infile.amp)")
group.add_option("-p", "--plot", dest="doplot",
action="store_true",
help="show plot of result")
group.add_option("-q", "--quiet", dest="doplot",
action="store_false", default=True,
help="don't show output plot")
# doplot is a post inversion options
parser.add_option_group(group)
group = OptionGroup(parser, "Calculation controls", description=None)
group.add_option("--rhopoints", dest="rhopoints",
default=Inversion.rhopoints, type="int",
help="number of profile steps [dz=thickness/rhopoints]")
group.add_option("-z", "--dz", dest="dz",
default=None, type="float",
help="max profile step size (A) [rhopoints=thickness/dz]")
group.add_option("--calcpoints", dest="calcpoints",
default=Inversion.calcpoints, type="int",
help="number of calculation points per profile step")
group.add_option("--stages", dest="stages",
default=Inversion.stages, type="int",
help="number of inversions to average over")
group.add_option("-a", dest="amp_only", default=False,
action="store_true",
help="calculate amplitude and stop")
inversion_keys += ['rhopoints', 'calcpoints', 'stages']
parser.add_option_group(group)
(options, args) = parser.parse_args()
if len(args) < 1 or len(args) > 2:
parser.error("Need real R data file or pair of reflectivities")
basefile = os.path.splitext(os.path.basename(args[0]))[0]
if len(args) == 1:
phase = None
data = args[0]
elif len(args) == 2:
if not options.surround or not options.substrate:
parser.error("Need fronting and backing for phase inversion")
v1, v2 = options.surround
u = options.substrate
phase = SurroundVariation(args[0], args[1], u=u, v1=v1, v2=v2)
data = phase.Q, phase.RealR, phase.dRealR
if options.ampfile:
phase.save(options.ampfile)
if options.amp_only and options.doplot:
import pylab
phase.plot()
pylab.show()
if options.amp_only:
return
if options.dz:
options.rhopoints = ceil(1/options.dz)
# Rather than trying to remember which control parameters I
# have options for, I update the list of parameters that I
# allow for each group of parameters, and pull the returned
# values out below.
res = Inversion(data=data, **dict((key, getattr(options, key))
for key in inversion_keys))
res.run(showiters=False)
if options.outfile == None:
options.outfile = basefile+os.path.extsep+"prf"
if options.outfile == "-":
res.show()
elif options.outfile != None:
res.save(options.outfile)
if options.doplot:
import pylab
res.plot(phase=phase)
pylab.show()
if __name__ == "__main__":
main()
| 66,608 | 20,405 |
import numpy as np
import matplotlib.pyplot as plt
def TestTrace(IntModel='jrm33',ExtModel='Con2020',fig=None,maps=[1,1,0,0],color='green'):
from ..TraceField import TraceField
from ..Con2020 import Config
#set the starting coords
n = 7
x = np.linspace(2.0,30.0,n)
x = np.append(-x[::-1],x)
y = np.zeros(n*2)
z = np.zeros(n*2)
#get the trace
cfg = Config()
Config(equation_type='analytic')
T = TraceField(x,y,z,Verbose=True,IntModel=IntModel,ExtModel=ExtModel)
Config(cfg)
#plot it
lab = ''
if not IntModel.upper() == 'NONE':
lab += IntModel
if not ExtModel.upper() == 'NONE':
if not lab == '':
lab += ' + '
lab += ExtModel
ax = T.PlotXZ(fig=fig,maps=maps,label=lab,color=color)
return ax
def CompareTrace():
from ..TraceField import TraceField
from ..Con2020 import Config
#get some starting coords
n = 8
theta = (180.0 - np.linspace(21,35,n))*np.pi/180.0
r = np.ones(n)
x = r*np.sin(theta)
y = np.zeros(n)
z = r*np.cos(theta)
#get traces with and without the external field
cfg = Config()
Config(equation_type='analytic')
T0 = TraceField(x,y,z,Verbose=True,IntModel='jrm33',ExtModel='none')
T1 = TraceField(x,y,z,Verbose=True,IntModel='jrm33',ExtModel='Con2020')
Config(cfg)
#plot them
ax = T0.PlotRhoZ(label='JRM33',color='black')
ax = T1.PlotRhoZ(fig=ax,label='JRM33 + Con2020',color='red')
ax.set_xlim(-2.0,25.0)
ax.set_ylim(-10.0,10.0)
return ax
| 1,429 | 664 |
class CustomException(Exception):
def __init__(self, message: str):
self.message = message
def throw(err: Exception):
raise err
| 146 | 41 |
from __future__ import annotations
from typing import Callable, Optional, Tuple, List
from enum import Enum, Flag, auto
from threading import Thread
import datetime
import json
import websocket
class SIStatus(Enum):
"""
Status of operations on the OpenStuder gateway.
- **SIStatus.SUCCESS**: Operation was successfully completed.
- **SIStatus.IN_PROGRESS**: Operation is already in progress or another operation is occupying the resource.
- **SIStatus.ERROR**: General (unspecified) error.
- **SIStatus.NO_PROPERTY**: The property does not exist or the user's access level does not allow to access the property.
- **SIStatus.NO_DEVICE**: The device does not exist.
- **SIStatus.NO_DEVICE_ACCESS**: The device access instance does not exist.
- **SIStatus.TIMEOUT**: A timeout occurred when waiting for the completion of the operation.
- **SIStatus.INVALID_VALUE**: A invalid value was passed.
"""
SUCCESS = 0
IN_PROGRESS = 1
ERROR = -1
NO_PROPERTY = -2
NO_DEVICE = -3
NO_DEVICE_ACCESS = -4
TIMEOUT = -5
INVALID_VALUE = -6
@staticmethod
def from_string(string: str) -> SIStatus:
if string == 'Success':
return SIStatus.SUCCESS
elif string == 'InProgress':
return SIStatus.IN_PROGRESS
elif string == 'Error':
return SIStatus.ERROR
elif string == 'NoProperty':
return SIStatus.NO_PROPERTY
elif string == 'NoDevice':
return SIStatus.NO_DEVICE
elif string == 'NoDeviceAccess':
return SIStatus.NO_DEVICE_ACCESS
elif string == 'Timeout':
return SIStatus.TIMEOUT
elif string == 'InvalidValue':
return SIStatus.INVALID_VALUE
else:
return SIStatus.ERROR
class SIConnectionState(Enum):
"""
State of the connection to the OpenStuder gateway.
- **SIConnectionState.DISCONNECTED**: The client is not connected.
- **SIConnectionState.CONNECTING**: The client is establishing the WebSocket connection to the gateway.
- **SIConnectionState.AUTHORIZING**: The WebSocket connection to the gateway has been established and the client is authorizing.
- **SIConnectionState.CONNECTED**: The WebSocket connection is established and the client is authorized, ready to use.
"""
DISCONNECTED = auto()
CONNECTING = auto()
AUTHORIZING = auto()
CONNECTED = auto()
class SIAccessLevel(Enum):
"""
Level of access granted to a client from the OpenStuder gateway.
- **NONE**: No access at all.
- **BASIC**: Basic access to device information properties (configuration excluded).
- **INSTALLER**: Basic access + additional access to most common configuration properties.
- **EXPERT**: Installer + additional advanced configuration properties.
- **QUALIFIED_SERVICE_PERSONNEL**: Expert and all configuration and service properties only for qualified service personnel.
"""
NONE = 0
BASIC = auto()
INSTALLER = auto()
EXPERT = auto()
QUALIFIED_SERVICE_PERSONNEL = auto()
@staticmethod
def from_string(string: str) -> SIAccessLevel:
if string == 'None':
return SIAccessLevel.NONE
elif string == 'Basic':
return SIAccessLevel.BASIC
elif string == 'Installer':
return SIAccessLevel.INSTALLER
elif string == 'Expert':
return SIAccessLevel.EXPERT
elif string == 'QSP':
return SIAccessLevel.QUALIFIED_SERVICE_PERSONNEL
else:
return SIAccessLevel.NONE
class SIDescriptionFlags(Flag):
"""
Flags to control the format of the **DESCRIBE** functionality.
- **SIDescriptionFlags.NONE**: No description flags.
- **SIDescriptionFlags.INCLUDE_ACCESS_INFORMATION**: Includes device access instances information.
- **SIDescriptionFlags.INCLUDE_DEVICE_INFORMATION**: Include device information.
- **SIDescriptionFlags.INCLUDE_DRIVER_INFORMATION**: Include device property information.
- **SIDescriptionFlags.INCLUDE_DRIVER_INFORMATION**: Include device access driver information.
"""
NONE = 0
INCLUDE_ACCESS_INFORMATION = auto()
INCLUDE_DEVICE_INFORMATION = auto()
INCLUDE_PROPERTY_INFORMATION = auto()
INCLUDE_DRIVER_INFORMATION = auto()
class SIWriteFlags(Flag):
"""
Flags to control write property operation.
- **SIWriteFlags.NONE**: No write flags.
- **SIWriteFlags.PERMANENT**: Write the change to the persistent storage, eg the change lasts reboots.
"""
NONE = 0
PERMANENT = auto()
class SIProtocolError(IOError):
"""
Class for reporting all OpenStuder protocol errors.
"""
def __init__(self, message):
super(SIProtocolError, self).__init__(message)
def reason(self) -> str:
"""
Returns the actual reason for the error.
:return: Reason for the error.
"""
return super(SIProtocolError, self).args[0]
class SIDeviceMessage:
"""
The SIDeviceMessage class represents a message a device connected to the OpenStuder gateway has broadcast.
"""
def __init__(self, access_id: str, device_id: str, message_id: str, message: str, timestamp: datetime.datetime):
self.timestamp = timestamp
"""
Timestamp when the device message was received by the gateway.
"""
self.access_id = access_id
"""
ID of the device access driver that received the message.
"""
self.device_id = device_id
"""
ID of the device that broadcast the message.
"""
self.message_id = message_id
"""
Message ID.
"""
self.message = message
"""
String representation of the message.
"""
@staticmethod
def from_dict(d: dict) -> SIDeviceMessage:
try:
return SIDeviceMessage(d['access_id'], d['device_id'], d['message_id'], d['message'], datetime.datetime.fromisoformat(d['timestamp'].replace("Z", "+00:00")))
except KeyError:
raise SIProtocolError('invalid json body')
class SIPropertyReadResult:
"""
The SIDPropertyReadResult class represents the status of a property read result.
"""
def __init__(self, status: SIStatus, id_: str, value: Optional[any]):
self.status = status
"""
Status of the property read operation.
"""
self.id = id_
"""
ID of the property read.
"""
self.value = value
"""
Value that was read from the property, optional.
"""
def to_tuple(self) -> Tuple[SIStatus, str, Optional[any]]:
return self.status, self.id, self.value
@staticmethod
def from_dict(d: dict) -> SIPropertyReadResult:
try:
result = SIPropertyReadResult(SIStatus.from_string(d['status']), d['id'], None)
if 'value' in d and d['value'] is not None:
try:
result.value = float(d['value'])
except ValueError:
string = d['value'].lower()
if string == 'true':
result.value = True
elif string == 'false':
result.value = False
else:
result.value = string
return result
except KeyError:
raise SIProtocolError('invalid json body')
class SIPropertySubscriptionResult:
"""
The SIDPropertyReadResult class represents the status of a property subscription/unsubscription.
"""
def __init__(self, status: SIStatus, id_: str):
self.status = status
"""
Status of the property subscribe or unsubscribe operation.
"""
self.id = id_
"""
ID of the property.
"""
def to_tuple(self) -> Tuple[SIStatus, str]:
return self.status, self.id
@staticmethod
def from_dict(d: dict) -> SIPropertySubscriptionResult:
try:
return SIPropertySubscriptionResult(SIStatus.from_string(d['status']), d['id'])
except KeyError:
raise SIProtocolError('invalid json body')
class _SIAbstractGatewayClient:
def __init__(self):
super(_SIAbstractGatewayClient, self).__init__()
@staticmethod
def encode_authorize_frame_without_credentials() -> str:
return 'AUTHORIZE\nprotocol_version:1\n\n'
@staticmethod
def encode_authorize_frame_with_credentials(user: str, password: str) -> str:
return 'AUTHORIZE\nuser:{user}\npassword:{password}\nprotocol_version:1\n\n'.format(user=user, password=password)
@staticmethod
def decode_authorized_frame(frame: str) -> Tuple[SIAccessLevel, str]:
command, headers, _ = _SIAbstractGatewayClient.decode_frame(frame)
if command == 'AUTHORIZED' and 'access_level' in headers and 'protocol_version' in headers and 'gateway_version' in headers:
if headers['protocol_version'] == '1':
return SIAccessLevel.from_string(headers['access_level']), headers['gateway_version']
else:
raise SIProtocolError('protocol version 1 not supported by server')
elif command == 'ERROR' and 'reason' in headers:
raise SIProtocolError(headers['reason'])
else:
raise SIProtocolError('unknown error during authorization')
@staticmethod
def encode_enumerate_frame() -> str:
return 'ENUMERATE\n\n'
@staticmethod
def decode_enumerated_frame(frame: str) -> Tuple[SIStatus, int]:
command, headers, _ = _SIAbstractGatewayClient.decode_frame(frame)
if command == 'ENUMERATED' and 'status' in headers and 'device_count' in headers:
return SIStatus.from_string(headers['status']), int(headers['device_count'])
elif command == 'ERROR' and 'reason' in headers:
raise SIProtocolError(headers['reason'])
else:
raise SIProtocolError('unknown error during device enumeration')
@staticmethod
def encode_describe_frame(device_access_id: Optional[str], device_id: Optional[str], property_id: Optional[int], flags: Optional[SIDescriptionFlags]) -> str:
frame = 'DESCRIBE\n'
if device_access_id is not None:
frame += 'id:{device_access_id}'.format(device_access_id=device_access_id)
if device_id is not None:
frame += '.{device_id}'.format(device_id=device_id)
if property_id is not None:
frame += '.{property_id}'.format(property_id=property_id)
frame += '\n'
if flags is not None and isinstance(flags, SIDescriptionFlags):
frame += 'flags:'
if flags & SIDescriptionFlags.INCLUDE_ACCESS_INFORMATION:
frame += 'IncludeAccessInformation,'
if flags & SIDescriptionFlags.INCLUDE_DEVICE_INFORMATION:
frame += 'IncludeDeviceInformation,'
if flags & SIDescriptionFlags.INCLUDE_PROPERTY_INFORMATION:
frame += 'IncludePropertyInformation,'
if flags & SIDescriptionFlags.INCLUDE_DRIVER_INFORMATION:
frame += 'IncludeDriverInformation,'
frame = frame[:-1]
frame += '\n'
frame += '\n'
return frame
@staticmethod
def decode_description_frame(frame: str) -> Tuple[SIStatus, Optional[str], object]:
command, headers, body = _SIAbstractGatewayClient.decode_frame(frame)
if command == 'DESCRIPTION' and 'status' in headers:
status = SIStatus.from_string(headers['status'])
if status == SIStatus.SUCCESS:
description = json.loads(body)
return status, headers.get('id', None), description
else:
return status, headers.get('id', None), {}
elif command == 'ERROR' and 'reason' in headers:
raise SIProtocolError(headers['reason'])
else:
raise SIProtocolError('unknown error during description')
@staticmethod
def encode_find_properties_frame(property_id: str) -> str:
return 'FIND PROPERTIES\nid:{property_id}\n\n'.format(property_id=property_id)
@staticmethod
def decode_properties_found_frame(frame: str) -> (SIStatus, str, int, List[str]):
command, headers, body = _SIAbstractGatewayClient.decode_frame(frame)
if command == 'PROPERTIES FOUND' and 'status' in headers and 'id' in headers and 'count' in headers:
status = SIStatus.from_string(headers['status'])
if status == SIStatus.SUCCESS:
properties = json.loads(body)
return status, headers.get('id'), int(headers.get('count', 0)), properties
else:
return status, headers.get('id'), int(headers.get('count', 0)), []
elif command == 'ERROR' and 'reason' in headers:
raise SIProtocolError(headers['reason'])
else:
raise SIProtocolError('unknown error during finding properties')
@staticmethod
def encode_read_property_frame(property_id: str) -> str:
return 'READ PROPERTY\nid:{property_id}\n\n'.format(property_id=property_id)
@staticmethod
def decode_property_read_frame(frame: str) -> SIPropertyReadResult:
command, headers, _ = _SIAbstractGatewayClient.decode_frame(frame)
if command == 'PROPERTY READ' and 'status' in headers and 'id' in headers:
status = SIStatus.from_string(headers['status'])
if status == SIStatus.SUCCESS and 'value' in headers:
try:
value = float(headers['value'])
except ValueError:
string = headers['value'].lower()
if string == 'true':
value = True
elif string == 'false':
value = False
else:
value = string
return SIPropertyReadResult(status, headers['id'], value)
else:
return SIPropertyReadResult(status, headers['id'], None)
elif command == 'ERROR' and 'reason' in headers:
raise SIProtocolError(headers['reason'])
else:
raise SIProtocolError('unknown error during property read')
@staticmethod
def encode_read_properties_frame(property_ids: List[str]) -> str:
return 'READ PROPERTIES\n\n{property_ids}'.format(property_ids=json.dumps(property_ids))
@staticmethod
def decode_properties_read_frame(frame: str) -> List[SIPropertyReadResult]:
command, headers, body = _SIAbstractGatewayClient.decode_frame(frame)
if command == 'PROPERTIES READ' and 'status' in headers:
status = SIStatus.from_string(headers['status'])
if status == SIStatus.SUCCESS:
return json.loads(body, object_hook=SIPropertyReadResult.from_dict)
else:
raise SIProtocolError(f'error during property read, status={headers["status"]}')
elif command == 'ERROR' and 'reason' in headers:
raise SIProtocolError(headers['reason'])
else:
raise SIProtocolError('unknown error during properties read')
@staticmethod
def encode_write_property_frame(property_id: str, value: Optional[any], flags: Optional[SIWriteFlags]) -> str:
frame = 'WRITE PROPERTY\nid:{property_id}\n'.format(property_id=property_id)
if flags is not None and isinstance(flags, SIWriteFlags):
frame += 'flags:'
if flags & SIWriteFlags.PERMANENT:
frame += 'Permanent'
frame += '\n'
if value is not None:
frame += 'value:{value}\n'.format(value=value)
frame += '\n'
return frame
@staticmethod
def decode_property_written_frame(frame: str) -> Tuple[SIStatus, str]:
command, headers, _ = _SIAbstractGatewayClient.decode_frame(frame)
if command == 'PROPERTY WRITTEN' and 'status' in headers and 'id' in headers:
return SIStatus.from_string(headers['status']), headers['id']
elif command == 'ERROR' and 'reason' in headers:
raise SIProtocolError(headers['reason'])
else:
raise SIProtocolError('unknown error during property write')
@staticmethod
def encode_subscribe_property_frame(property_id: str) -> str:
return 'SUBSCRIBE PROPERTY\nid:{property_id}\n\n'.format(property_id=property_id)
@staticmethod
def decode_property_subscribed_frame(frame: str) -> Tuple[SIStatus, str]:
command, headers, _ = _SIAbstractGatewayClient.decode_frame(frame)
if command == 'PROPERTY SUBSCRIBED' and 'status' in headers and 'id' in headers:
return SIStatus.from_string(headers['status']), headers['id']
elif command == 'ERROR' and 'reason' in headers:
raise SIProtocolError(headers['reason'])
else:
raise SIProtocolError('unknown error during property subscribe')
@staticmethod
def encode_subscribe_properties_frame(property_ids: List[str]) -> str:
return 'SUBSCRIBE PROPERTIES\n\n{property_ids}'.format(property_ids=json.dumps(property_ids))
@staticmethod
def decode_properties_subscribed_frame(frame: str) -> List[SIPropertySubscriptionResult]:
command, headers, body = _SIAbstractGatewayClient.decode_frame(frame)
if command == 'PROPERTIES SUBSCRIBED' and 'status' in headers:
status = SIStatus.from_string(headers['status'])
if status == SIStatus.SUCCESS:
return json.loads(body, object_hook=SIPropertySubscriptionResult.from_dict)
else:
raise SIProtocolError(f'error during properties read, status={headers["status"]}')
elif command == 'ERROR' and 'reason' in headers:
raise SIProtocolError(headers['reason'])
else:
raise SIProtocolError('unknown error during properties subscribe')
@staticmethod
def encode_unsubscribe_property_frame(property_id: str) -> str:
return 'UNSUBSCRIBE PROPERTY\nid:{property_id}\n\n'.format(property_id=property_id)
@staticmethod
def decode_property_unsubscribed_frame(frame: str) -> Tuple[SIStatus, str]:
command, headers, _ = _SIAbstractGatewayClient.decode_frame(frame)
if command == 'PROPERTY UNSUBSCRIBED' and 'status' in headers and 'id' in headers:
return SIStatus.from_string(headers['status']), headers['id']
elif command == 'ERROR' and 'reason' in headers:
raise SIProtocolError(headers['reason'])
else:
raise SIProtocolError('unknown error during property unsubscribe')
@staticmethod
def encode_unsubscribe_properties_frame(property_ids: List[str]) -> str:
return 'UNSUBSCRIBE PROPERTIES\n\n{property_ids}'.format(property_ids=json.dumps(property_ids))
@staticmethod
def decode_properties_unsubscribed_frame(frame: str) -> List[SIPropertySubscriptionResult]:
command, headers, body = _SIAbstractGatewayClient.decode_frame(frame)
if command == 'PROPERTIES UNSUBSCRIBED' and 'status' in headers:
status = SIStatus.from_string(headers['status'])
if status == SIStatus.SUCCESS:
return json.loads(body, object_hook=SIPropertySubscriptionResult.from_dict)
else:
raise SIProtocolError(f'error during properties unsubscribe, status={headers["status"]}')
elif command == 'ERROR' and 'reason' in headers:
raise SIProtocolError(headers['reason'])
else:
raise SIProtocolError('unknown error during properties unsubscribe')
@staticmethod
def decode_property_update_frame(frame: str) -> Tuple[str, any]:
command, headers, _ = _SIAbstractGatewayClient.decode_frame(frame)
if command == 'PROPERTY UPDATE' and 'id' in headers and 'value' in headers:
try:
value = float(headers['value'])
except ValueError:
string = headers['value'].lower()
if string == 'true':
value = True
elif string == 'false':
value = False
else:
value = string
return headers['id'], value
elif command == 'ERROR' and 'reason' in headers:
raise SIProtocolError(headers['reason'])
else:
raise SIProtocolError('unknown error receiving property update')
@staticmethod
def encode_read_datalog_frame(property_id: Optional[str], from_: Optional[datetime.datetime], to: Optional[datetime.datetime], limit: Optional[int]) -> str:
frame = 'READ DATALOG\n'
if property_id is not None:
frame += 'id:{property_id}\n'.format(property_id=property_id)
frame += _SIAbstractGatewayClient.get_timestamp_header_if_present('from', from_)
frame += _SIAbstractGatewayClient.get_timestamp_header_if_present('to', to)
if limit is not None:
frame += 'limit:{limit}\n'.format(limit=limit)
frame += '\n'
return frame
@staticmethod
def decode_datalog_read_frame(frame: str) -> Tuple[SIStatus, Optional[str], int, str]:
command, headers, body = _SIAbstractGatewayClient.decode_frame(frame)
if command == 'DATALOG READ' and 'status' in headers and 'count' in headers:
return SIStatus.from_string(headers['status']), headers.get('id'), int(headers['count']), body
elif command == 'ERROR' and 'reason' in headers:
raise SIProtocolError(headers['reason'])
else:
raise SIProtocolError('unknown error receiving datalog read')
@staticmethod
def encode_read_messages_frame(from_: Optional[datetime.datetime], to: Optional[datetime.datetime], limit: Optional[int]) -> str:
frame = 'READ MESSAGES\n'
frame += _SIAbstractGatewayClient.get_timestamp_header_if_present('from', from_)
frame += _SIAbstractGatewayClient.get_timestamp_header_if_present('to', to)
if limit is not None:
frame += 'limit:{limit}\n'.format(limit=limit)
frame += '\n'
return frame
@staticmethod
def decode_messages_read_frame(frame: str) -> Tuple[SIStatus, int, List[SIDeviceMessage]]:
command, headers, body = _SIAbstractGatewayClient.decode_frame(frame)
if command == 'MESSAGES READ' and 'status' in headers and 'count' in headers:
status = SIStatus.from_string(headers['status'])
if status == SIStatus.SUCCESS:
messages = json.loads(body, object_hook=SIDeviceMessage.from_dict)
return status, int(headers['count']), messages
else:
return status, int(headers['count']), []
elif command == 'ERROR' and 'reason' in headers:
raise SIProtocolError(headers['reason'])
else:
raise SIProtocolError('unknown error during description')
@staticmethod
def decode_device_message_frame(frame: str) -> SIDeviceMessage:
command, headers, _ = _SIAbstractGatewayClient.decode_frame(frame)
if command == 'DEVICE MESSAGE' and 'access_id' in headers and 'device_id' in headers and 'message_id' in headers and 'message' in headers and 'timestamp' in headers:
return SIDeviceMessage.from_dict(headers)
elif command == 'ERROR' and 'reason' in headers:
raise SIProtocolError(headers['reason'])
else:
raise SIProtocolError('unknown error receiving device message')
@staticmethod
def peek_frame_command(frame: str) -> str:
return frame[:frame.index('\n')]
@staticmethod
def decode_frame(frame: str) -> Tuple[str, dict, str]:
lines = frame.split('\n')
if len(lines) < 2:
raise SIProtocolError('invalid frame')
command = lines[0]
line = 1
headers = {}
while line < len(lines) and lines[line]:
components = lines[line].split(':')
if len(components) >= 2:
headers[components[0]] = ':'.join(components[1:])
line += 1
line += 1
if line >= len(lines):
raise SIProtocolError('invalid frame')
body = '\n'.join(lines[line:])
return command, headers, body
@staticmethod
def get_timestamp_header_if_present(key: str, timestamp: Optional[datetime.datetime]):
if timestamp is not None and isinstance(timestamp, datetime.datetime):
return '{key}:{timestamp}\n'.format(key=key, timestamp=timestamp.replace(microsecond=0).isoformat())
else:
return ''
class SIGatewayClient(_SIAbstractGatewayClient):
"""
Simple, synchronous (blocking) OpenStuder gateway client.
This client uses a synchronous model which has the advantage to be much simpler to use than the asynchronous version SIAsyncGatewayClient. The drawback is that device message
indications are ignored by this client and subscriptions to property changes are not possible.
"""
def __init__(self):
super(SIGatewayClient, self).__init__()
self.__state: SIConnectionState = SIConnectionState.DISCONNECTED
self.__ws: Optional[websocket.WebSocket] = None
self.__access_level: SIAccessLevel = SIAccessLevel.NONE
self.__gateway_version: str = ''
def connect(self, host: str, port: int = 1987, user: str = None, password: str = None) -> SIAccessLevel:
"""
Establishes the WebSocket connection to the OpenStuder gateway and executes the user authorization process once the connection has been established. This method blocks the
current thread until the operation (authorize) has been completed or an error occurred. The method returns the access level granted to the client during authorization on
success or throws an **SIProtocolError** otherwise.
:param host: Hostname or IP address of the OpenStuder gateway to connect to.
:param port: TCP port used for the connection to the OpenStuder gateway, defaults to 1987.
:param user: Username send to the gateway used for authorization.
:param password: Password send to the gateway used for authorization.
:return: Access Level granted to the client.
:raises SIProtocolError: If the connection could not be established, or the authorization was refused.
"""
# Ensure that the client is in the DISCONNECTED state.
self.__ensure_in_state(SIConnectionState.DISCONNECTED)
# Connect to WebSocket server.
self.__state = SIConnectionState.CONNECTING
self.__ws = websocket.create_connection('ws://{host}:{port}'.format(host=host, port=port))
# Authorize client.
self.__state = SIConnectionState.AUTHORIZING
if user is None or password is None:
self.__ws.send(super(SIGatewayClient, self).encode_authorize_frame_without_credentials())
else:
self.__ws.send(super(SIGatewayClient, self).encode_authorize_frame_with_credentials(user, password))
try:
self.__access_level, self.__gateway_version = super(SIGatewayClient, self).decode_authorized_frame(self.__ws.recv())
except ConnectionRefusedError:
self.__state = SIConnectionState.DISCONNECTED
raise SIProtocolError('WebSocket connection refused')
# Change state to connected.
self.__state = SIConnectionState.CONNECTED
# Return access level.
return self.__access_level
def state(self) -> SIConnectionState:
"""
Returns the current state of the client. See **SIConnectionState** for details.
:return: Current state of the client.
"""
return self.__state
def access_level(self) -> SIAccessLevel:
"""
Return the access level the client has gained on the gateway connected. See **SIAccessLevel** for details.
:return: Access level granted to client.
"""
return self.__access_level
def gateway_version(self) -> str:
"""
Returns the version of the OpenStuder gateway software running on the host the client is connected to.
:return: Version of the gateway software.
"""
return self.__gateway_version
def enumerate(self) -> Tuple[SIStatus, int]:
"""
Instructs the gateway to scan every configured and functional device access driver for new devices and remove devices that do not respond anymore. Returns the status of
the operation, and the number of devices present.
:return: Returns two values. 1: operation status, 2: the number of devices present.
:raises SIProtocolError: On a connection, protocol of framing error.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send ENUMERATE message to gateway.
self.__ws.send(super(SIGatewayClient, self).encode_enumerate_frame())
# Wait for ENUMERATED message, decode it and return data.
return super(SIGatewayClient, self).decode_enumerated_frame(self.__receive_frame_until_commands(['ENUMERATED', 'ERROR']))
def describe(self, device_access_id: str = None, device_id: str = None, property_id: int = None, flags: SIDescriptionFlags = None) -> Tuple[SIStatus, Optional[str], object]:
"""
This method can be used to retrieve information about the available devices and their properties from the connected gateway. Using the optional device_access_id,
device_id and property_id parameters, the method can either request information about the whole topology, a particular device access instance, a device or a property.
The flags control the level of detail in the gateway's response.
:param device_access_id: Device access ID for which the description should be retrieved.
:param device_id: Device ID for which the description should be retrieved. Note that device_access_id must be present too.
:param property_id: Property ID for which the description should be retrieved. Note that device_access_id and device_id must be present too.
:param flags: Flags to control level of detail of the response.
:return: Returns three values. 1: Status of the operation, 2: the subject's id, 3: the description object.
:raises SIProtocolError: On a connection, protocol of framing error.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send DESCRIBE message to gateway.
self.__ws.send(super(SIGatewayClient, self).encode_describe_frame(device_access_id, device_id, property_id, flags))
# Wait for DESCRIPTION message, decode it and return data.
return super(SIGatewayClient, self).decode_description_frame(self.__receive_frame_until_commands(['DESCRIPTION', 'ERROR']))
def find_properties(self, property_id: str) -> Tuple[SIStatus, str, int, List[str]]:
"""
This method is used to retrieve a list of existing properties that match the given property ID in the form "<device access ID>.<device ID>.<property ID>". The wildcard
character "*" is supported for <device access ID> and <device ID> fields.
For example "*.inv.3136" represents all properties with ID 3136 on the device with ID "inv" connected through any device access, "demo.*.3136" represents all properties
with ID 3136 on any device that disposes that property connected through the device access "demo" and finally "*.*.3136" represents all properties with ID 3136 on any
device that disposes that property connected through any device access.
:param property_id: The search wildcard ID.
:return: Returns four values: 1: Status of the find operation, 2: the searched ID (including wildcard character), 3: the number of properties found,
4: List of the property IDs.
:raises SIProtocolError: On a connection, protocol of framing error.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send FIND PROPERTIES message to gateway.
self.__ws.send(super(SIGatewayClient, self).encode_find_properties_frame(property_id))
# Wait for PROPERTIES FOUND message, decode it and return data.
return super(SIGatewayClient, self).decode_properties_found_frame(self.__receive_frame_until_commands(['PROPERTIES FOUND', 'ERROR']))
def read_property(self, property_id: str) -> Tuple[SIStatus, str, Optional[any]]:
"""
This method is used to retrieve the actual value of a given property from the connected gateway. The property is identified by the property_id parameter.
:param property_id: The ID of the property to read in the form '{device access ID}.{device ID}.{property ID}'.
:return: Returns three values: 1: Status of the read operation, 2: the ID of the property read, 3: the value read.
:raises SIProtocolError: On a connection, protocol of framing error.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send READ PROPERTY message to gateway.
self.__ws.send(super(SIGatewayClient, self).encode_read_property_frame(property_id))
# Wait for PROPERTY READ message, decode it and return data.
return super(SIGatewayClient, self).decode_property_read_frame(self.__receive_frame_until_commands(['PROPERTY READ', 'ERROR'])).to_tuple()
def read_properties(self, property_ids: List[str]) -> List[SIPropertyReadResult]:
"""
This method is used to retrieve the actual value of multiple properties at the same time from the connected gateway. The properties are identified by the property_ids
parameter.
:param property_ids: The IDs of the properties to read in the form '{device access ID}.{device ID}.{property ID}'.
:return: Returns one value: 1: List of statuses and values of all read properties.
:raises SIProtocolError: On a connection, protocol of framing error.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send READ PROPERTIES message to gateway.
self.__ws.send(super(SIGatewayClient, self).encode_read_properties_frame(property_ids))
# Wait for PROPERTIES READ message, decode it and return data.
return super(SIGatewayClient, self).decode_properties_read_frame(self.__receive_frame_until_commands(['PROPERTIES READ', 'ERROR']))
def write_property(self, property_id: str, value: any = None, flags: SIWriteFlags = None) -> Tuple[SIStatus, str]:
"""
The write_property method is used to change the actual value of a given property. The property is identified by the property_id parameter and the new value is passed by the
optional value parameter.
This value parameter is optional as it is possible to write to properties with the data type "Signal" where there is no actual value written, the write operation rather
triggers an action on the device.
:param property_id: The ID of the property to write in the form '{device access ID}.{<device ID}.{<property ID}'.
:param value: Optional value to write.
:param flags: Write flags, See SIWriteFlags for details, if not provided the flags are not send by the client, and the gateway uses the default flags
(SIWriteFlags.PERMANENT).
:return: Returns two values: 1: Status of the write operation, 2: the ID of the property written.
:raises SIProtocolError: On a connection, protocol of framing error.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send WRITE PROPERTY message to gateway.
self.__ws.send(super(SIGatewayClient, self).encode_write_property_frame(property_id, value, flags))
# Wait for PROPERTY WRITTEN message, decode it and return data.
return super(SIGatewayClient, self).decode_property_written_frame(self.__receive_frame_until_commands(['PROPERTY WRITTEN', 'ERROR']))
def read_datalog_properties(self, from_: datetime.datetime = None, to: datetime.datetime = None) -> Tuple[SIStatus, List[str]]:
"""
This method is used to retrieve the list of IDs of all properties for whom data is logged on the gateway. If a time window is given using from and to, only data in this
time windows is considered.
:param from_: Optional date and time of the start of the time window to be considered.
:param to: Optional date and time of the end of the time window to be considered.
:return: Returns two values: 1: Status of the operation, 2: List of all properties for whom data is logged on the gateway in the optional time window.
:raises SIProtocolError: On a connection, protocol of framing error.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send READ DATALOG message to gateway.
self.__ws.send(super(SIGatewayClient, self).encode_read_datalog_frame(None, from_, to, None))
# Wait for DATALOG READ message, decode it and return data.
status, _, _, parameters = super(SIGatewayClient, self).decode_datalog_read_frame(self.__receive_frame_until_commands(['DATALOG READ', 'ERROR']))
return status, parameters.splitlines()
def read_datalog_csv(self, property_id: str, from_: datetime.datetime = None, to: datetime.datetime = None, limit: int = None) -> Tuple[SIStatus, str, int, str]:
"""
This method is used to retrieve all or a subset of logged data of a given property from the gateway.
:param property_id: Global ID of the property for which the logged data should be retrieved. It has to be in the form '{device access ID}.{device ID}.{property ID}'.
:param from_: Optional date and time from which the data has to be retrieved, defaults to the oldest value logged.
:param to: Optional date and time to which the data has to be retrieved, defaults to the current time on the gateway.
:param limit: Using this optional parameter you can limit the number of results retrieved in total.
:return: Returns four values: 1: Status of the operation, 2: id of the property, 3: number of entries, 4: Properties data in CSV format whereas the first column is the
date and time in ISO 8601 extended format, and the second column contains the actual values.
:raises SIProtocolError: On a connection, protocol of framing error.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send READ DATALOG message to gateway.
self.__ws.send(super(SIGatewayClient, self).encode_read_datalog_frame(property_id, from_, to, limit))
# Wait for DATALOG READ message, decode it and return data.
return super(SIGatewayClient, self).decode_datalog_read_frame(self.__receive_frame_until_commands(['DATALOG READ', 'ERROR']))
def read_messages(self, from_: datetime.datetime = None, to: datetime.datetime = None, limit: int = None) -> Tuple[SIStatus, int, List[SIDeviceMessage]]:
"""
The read_messages() method can be used to retrieve all or a subset of stored messages send by devices on all buses in the past from the gateway.
:param from_: Optional date and time from which the messages have to be retrieved, defaults to the oldest message saved.
:param to: Optional date and time to which the messages have to be retrieved, defaults to the current time on the gateway.
:param limit: Using this optional parameter you can limit the number of messages retrieved in total.
:return: Returns three values. 1: the status of the operation, 2: the number of messages, 3: the list of retrieved messages.
:raises SIProtocolError: On a connection, protocol of framing error.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send READ MESSAGES message to gateway.
self.__ws.send(super(SIGatewayClient, self).encode_read_messages_frame(from_, to, limit))
# Wait for MESSAGES READ message, decode it and return data.
return super(SIGatewayClient, self).decode_messages_read_frame(self.__receive_frame_until_commands(['MESSAGES READ', 'ERROR']))
def disconnect(self) -> None:
"""
Disconnects the client from the gateway.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Change state to disconnected.
self.__state = SIConnectionState.DISCONNECTED
# Close the WebSocket
self.__ws.close()
def __ensure_in_state(self, state: SIConnectionState) -> None:
if self.__state != state:
raise SIProtocolError("invalid client state")
def __receive_frame_until_commands(self, commands: list) -> str:
while True:
frame = self.__ws.recv()
if super(SIGatewayClient, self).peek_frame_command(frame) in commands:
return frame
class SIAsyncGatewayClientCallbacks:
"""
Base class containing all callback methods that can be called by the SIAsyncGatewayClient. You can use this as your base class and register it using
IAsyncGatewayClient.set_callbacks().
"""
def on_connected(self, access_level: SIAccessLevel, gateway_version: str) -> None:
"""
This method is called once the connection to the gateway could be established and the user has been successfully authorized.
:param access_level: Access level that was granted to the user during authorization.
:param gateway_version: Version of the OpenStuder software running on the gateway.
"""
pass
def on_disconnected(self) -> None:
"""
Called when the connection to the OpenStuder gateway has been gracefully closed by either side or the connection was lost by any other reason.
"""
pass
def on_error(self, reason) -> None:
"""
Called on severe errors.
:param reason: Exception that caused the erroneous behavior.
"""
pass
def on_enumerated(self, status: SIStatus, device_count: int) -> None:
"""
Called when the enumeration operation started using enumerate() has completed on the gateway.
The callback takes two arguments. 1: , 2: the .
:param status: Operation status.
:param device_count: Number of devices present.
"""
pass
def on_description(self, status: SIStatus, id_: Optional[str], description: object) -> None:
"""
Called when the gateway returned the description requested using the describe() method.
:param status: Status of the operation.
:param id_: Subject's ID.
:param description: Description object.
"""
pass
def on_properties_found(self, status: SIStatus, id_: str, count: int, properties: List[str]):
"""
Called when the gateway returned the list of found properties requested using the find_properties() method.
:param status: Status of the find operation.
:param id_: The searched ID (including wildcard character).
:param count: The number of properties found.
:param properties: List of the property IDs.
"""
pass
def on_property_read(self, status: SIStatus, property_id: str, value: Optional[any]) -> None:
"""
Called when the property read operation started using read_property() has completed on the gateway.
:param status: Status of the read operation.
:param property_id: ID of the property read.
:param value: The value read.
"""
pass
def on_properties_read(self, results: List[SIPropertyReadResult]) -> None:
"""
Called when the multiple properties read operation started using read_properties() has completed on the gateway.
:param results: List of all results of the operation.
"""
pass
def on_property_written(self, status: SIStatus, property_id: str) -> None:
"""
Called when the property write operation started using write_property() has completed on the gateway.
:param status: Status of the write operation.
:param property_id: ID of the property written.
"""
pass
def on_property_subscribed(self, status: SIStatus, property_id: str) -> None:
"""
Called when the gateway returned the status of the property subscription requested using the subscribe_to_property() method.
:param status: The status of the subscription.
:param property_id: ID of the property.
"""
pass
def on_properties_subscribed(self, statuses: List[SIPropertySubscriptionResult]) -> None:
"""
Called when the gateway returned the status of the properties subscription requested using the subscribe_to_properties() method.
:param statuses: The statuses of the individual subscriptions.
"""
pass
def on_property_unsubscribed(self, status: SIStatus, property_id: str) -> None:
"""
Called when the gateway returned the status of the property unsubscription requested using the unsubscribe_from_property() method.
:param status: The status of the unsubscription.
:param property_id: ID of the property.
"""
pass
def on_properties_unsubscribed(self, statuses: List[SIPropertySubscriptionResult]) -> None:
"""
Called when the gateway returned the status of the properties unsubscription requested using the unsubscribe_from_properties() method.
:param statuses: The statuses of the individual unsubscriptions.
"""
pass
def on_property_updated(self, property_id: str, value: any) -> None:
"""
This callback is called whenever the gateway send a property update.
:param property_id: ID of the updated property.
:param value: The current value of the property.
"""
pass
def on_datalog_properties_read(self, status: SIStatus, properties: List[str]) -> None:
"""
Called when the datalog property list operation started using read_datalog_properties() has completed on the gateway.
:param status: Status of the operation.
:param properties: List of the IDs of the properties for whom data is available in the data log.
"""
pass
def on_datalog_read_csv(self, status: SIStatus, property_id: str, count: int, values: str) -> None:
"""
Called when the datalog read operation started using read_datalog() has completed on the gateway. This version of the method returns the data in CSV format suitable to
be written to a file.
:param status: Status of the operation.
:param property_id: ID of the property.
:param count: Number of entries.
:param values: Properties data in CSV format whereas the first column is the date and time in ISO 8601 extended format and the second column contains the actual values.
"""
pass
def on_device_message(self, message: SIDeviceMessage) -> None:
"""
This callback is called whenever the gateway send a device message indication.
:param message: The device message received.
"""
pass
def on_messages_read(self, status: SIStatus, count: int, messages: List[SIDeviceMessage]) -> None:
"""
Called when the gateway returned the status of the read messages operation using the read_messages() method.
:param status: The status of the operation.
:param count: Number of messages retrieved.
:param messages: List of retrieved messages.
"""
pass
class SIAsyncGatewayClient(_SIAbstractGatewayClient):
"""
Complete, asynchronous (non-blocking) OpenStuder gateway client.
This client uses an asynchronous model which has the disadvantage to be a bit harder to use than the synchronous version. The advantages are that long operations do not block
the main thread as all results are reported using callbacks, device message indications are supported and subscriptions to property changes are possible.
"""
def __init__(self):
super(SIAsyncGatewayClient, self).__init__()
self.__state: SIConnectionState = SIConnectionState.DISCONNECTED
self.__ws: Optional[websocket.WebSocketApp] = None
self.__thread: Optional[Thread] = None
self.__access_level: SIAccessLevel = SIAccessLevel.NONE
self.__gateway_version: str = ''
self.__user: Optional[str] = None
self.__password: Optional[str] = None
self.on_connected: Optional[Callable[[SIAccessLevel, str], None]] = None
"""
This callback is called once the connection to the gateway could be established and the user has been successfully authorized.
The callback takes two arguments. 1: the access level that was granted to the user during authorization, 2: the version of the OpenStuder software running on the gateway.
"""
self.on_disconnected: Optional[Callable[[], None]] = None
"""
Called when the connection to the OpenStuder gateway has been gracefully closed by either side or the connection was lost by any other reason.
This callback has no parameters.
"""
self.on_error: Optional[Callable[[Exception], None]] = None
"""
Called on severe errors.
The single parameter passed to the callback is the exception that caused the erroneous behavior.
"""
self.on_enumerated: Optional[Callable[[str, int], None]] = None
"""
Called when the enumeration operation started using enumerate() has completed on the gateway.
The callback takes two arguments. 1: operation status, 2: the number of devices present.
"""
self.on_description: Optional[Callable[[str, Optional[str], object], None]] = None
"""
Called when the gateway returned the description requested using the describe() method.
The callback takes three parameters: 1: Status of the operation, 2: the subject's ID, 3: the description object.
"""
self.on_properties_found: Optional[Callable[[SIStatus, str, int, List[str]], None]] = None
"""
Called when the gateway returned the list of found properties requested using the find_properties() method.
The callback takes four parameters: 1: Status of the find operation, 2: the searched ID (including wildcard character), 3: the number of properties found,
4: List of the property IDs.
"""
self.on_property_read: Optional[Callable[[str, str, Optional[any]], None]] = None
"""
Called when the property read operation started using read_property() has completed on the gateway.
The callback takes three parameters: 1: Status of the read operation, 2: the ID of the property read, 3: the value read.
"""
self.on_properties_read: Optional[Callable[[List[SIPropertyReadResult]], None]] = None
"""
Called when the multiple properties read operation started using read_properties() has completed on the gateway.
The callback takes one parameters: 1: List of all results of the operation.
"""
self.on_property_written: Optional[Callable[[str, str], None]] = None
"""
Called when the property write operation started using write_property() has completed on the gateway.
The callback takes two parameters: 1: Status of the write operation, 2: the ID of the property written.
"""
self.on_property_subscribed: Optional[Callable[[str, str], None]] = None
"""
Called when the gateway returned the status of the property subscription requested using the subscribe_to_property() method.
The callback takes two parameters: 1: The status of the subscription, 2: The ID of the property.
"""
self.on_properties_subscribed: Optional[Callable[[List[SIPropertySubscriptionResult]], None]] = None
"""
Called when the gateway returned the status of the properties subscription requested using the subscribe_to_properties() method.
The callback takes one parameter: 1: List of statuses of individual subscription requests.
"""
self.on_property_unsubscribed: Optional[Callable[[str, str], None]] = None
"""
Called when the gateway returned the status of the property unsubscription requested using the unsubscribe_from_property() method.
The callback takes two parameters: 1: The status of the unsubscription, 2: The ID of the property.
"""
self.on_properties_unsubscribed: Optional[Callable[[List[SIPropertySubscriptionResult]], None]] = None
"""
Called when the gateway returned the status of the properties unsubscription requested using the unsubscribe_from_properties() method.
The callback takes one parameter: 1: List of statuses of individual unsubscription requests.
"""
self.on_property_updated: Optional[Callable[[str, any], None]] = None
"""
This callback is called whenever the gateway send a property update.
The callback takes two parameters: 1: the ID of the property that has updated, 2: the actual value.
"""
self.on_datalog_properties_read: Optional[Callable[[SIStatus, List[str]], None]] = None
"""
Called when the datalog property list operation started using read_datalog_properties() has completed on the gateway.
The callback takes 2 parameters: 1: Status of the operation, 2: List of the IDs of the properties for whom data is available in the data log.
"""
self.on_datalog_read_csv: Optional[Callable[[str, str, int, str], None]] = None
"""
Called when the datalog read operation started using read_datalog() has completed on the gateway. This version of the callback returns the data in CSV format suitable to
be written to a file.
The callback takes four parameters: 1: Status of the operation, 2: ID of the property, 3: number of entries, 4: properties data in CSV format whereas the first column is
the date and time in ISO 8601 extended format and the second column contains the actual values.
"""
self.on_device_message: Optional[Callable[[SIDeviceMessage], None]] = None
"""
This callback is called whenever the gateway send a device message indication.
The callback takes one parameter, the device message object.
"""
self.on_messages_read: Optional[Callable[[str, Optional[int], List[SIDeviceMessage]], None]] = None
"""
Called when the gateway returned the status of the read messages operation using the read_messages() method.
The callback takes three parameters: 1: the status of the operation, 2: the number of messages retrieved, 3: the list of retrieved messages.
"""
def connect(self, host: str, port: int = 1987, user: str = None, password: str = None, background: bool = True) -> None:
"""
Establishes the WebSocket connection to the OpenStuder gateway and executes the user authorization process once the connection has been established in the background. This
method returns immediately and does not block the current thread.
The status of the connection attempt is reported either by the on_connected() callback on success or the on_error() callback if the connection could not be established
or the authorisation for the given user was rejected by the gateway.
:param host: Hostname or IP address of the OpenStuder gateway to connect to.
:param port: TCP port used for the connection to the OpenStuder gateway, defaults to 1987.
:param user: Username send to the gateway used for authorization.
:param password: Password send to the gateway used for authorization.
:param background: If true, the handling of the WebSocket connection is done in the background, if false the current thread is took over.
:raises SIProtocolError: If there was an error initiating the WebSocket connection.
"""
# Ensure that the client is in the DISCONNECTED state.
self.__ensure_in_state(SIConnectionState.DISCONNECTED)
# Save parameter for later use.
self.__user = user
self.__password = password
# Connect to WebSocket server.
self.__state = SIConnectionState.CONNECTING
self.__ws = websocket.WebSocketApp('ws://{host}:{port}'.format(host=host, port=port),
on_open=self.__on_open,
on_message=self.__on_message,
on_error=self.__on_error,
on_close=self.__on_close
)
# TODO: Start connection timeout.
# If background mode is selected, start a daemon thread for the connection handling, otherwise take over current thread.
if background:
self.__thread = Thread(target=self.__ws.run_forever)
self.__thread.setDaemon(True)
self.__thread.start()
else:
self.__ws.run_forever()
def set_callbacks(self, callbacks: SIAsyncGatewayClientCallbacks) -> None:
"""
Configures the client to use all callbacks of the passed abstract client callback class. Using this you can set all callbacks to be called on the given object and avoid
having to set each callback individually.
:param callbacks: Object derived from SIAsyncGatewayClientCallbacks to be used for all callbacks.
"""
if isinstance(callbacks, SIAsyncGatewayClientCallbacks):
self.on_connected = callbacks.on_connected
self.on_disconnected = callbacks.on_disconnected
self.on_error = callbacks.on_error
self.on_enumerated = callbacks.on_enumerated
self.on_description = callbacks.on_description
self.on_properties_found = callbacks.on_properties_found
self.on_property_read = callbacks.on_property_read
self.on_properties_read = callbacks.on_properties_read
self.on_property_written = callbacks.on_property_written
self.on_property_subscribed = callbacks.on_property_subscribed
self.on_properties_subscribed = callbacks.on_properties_subscribed
self.on_property_unsubscribed = callbacks.on_property_unsubscribed
self.on_properties_unsubscribed = callbacks.on_properties_unsubscribed
self.on_property_updated = callbacks.on_property_updated
self.on_datalog_properties_read = callbacks.on_datalog_properties_read
self.on_datalog_read_csv = callbacks.on_datalog_read_csv
self.on_device_message = callbacks.on_device_message
self.on_messages_read = callbacks.on_messages_read
def state(self) -> SIConnectionState:
"""
Returns the current state of the client. See **SIConnectionState** for details.
:return: Current state of the client.
"""
return self.__state
def access_level(self) -> SIAccessLevel:
"""
Return the access level the client has gained on the gateway connected. See **SIAccessLevel** for details.
:return: Access level granted to client.
"""
return self.__access_level
def gateway_version(self) -> str:
"""
Returns the version of the OpenStuder gateway software running on the host the client is connected to.
:return: Version of the gateway software.
"""
return self.__gateway_version
def enumerate(self) -> None:
"""
Instructs the gateway to scan every configured and functional device access driver for new devices and remove devices that do not respond anymore.
The status of the operation and the number of devices present are reported using the on_enumerated() callback.
:raises SIProtocolError: If the client is not connected or not yet authorized.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send ENUMERATE message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_enumerate_frame())
def describe(self, device_access_id: str = None, device_id: str = None, property_id: int = None, flags: SIDescriptionFlags = None) -> None:
"""
This method can be used to retrieve information about the available devices and their properties from the connected gateway. Using the optional device_access_id,
device_id and property_id parameters, the method can either request information about the whole topology, a particular device access instance, a device or a property.
The flags control the level of detail in the gateway's response.
The description is reported using the on_description() callback.
:param device_access_id: Device access ID for which the description should be retrieved.
:param device_id: Device ID for which the description should be retrieved. Note that device_access_id must be present too.
:param property_id: Property ID for which the description should be retrieved. Note that device_access_id and device_id must be present too.
:param flags: Flags to control level of detail of the response.
:raises SIProtocolError: If the client is not connected or not yet authorized.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send DESCRIBE message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_describe_frame(device_access_id, device_id, property_id, flags))
def find_properties(self, property_id: str) -> None:
"""
This method is used to retrieve a list of existing properties that match the given property ID in the form "<device access ID>.<device ID>.<property ID>". The wildcard
character "*" is supported for <device access ID> and <device ID> fields.
For example "*.inv.3136" represents all properties with ID 3136 on the device with ID "inv" connected through any device access, "demo.*.3136" represents all properties
with ID 3136 on any device that disposes that property connected through the device access "demo" and finally "*.*.3136" represents all properties with ID 3136 on any
device that disposes that property connected through any device access.
The status of the read operation and the actual value of the property are reported using the on_properties_found() callback.
:param property_id: The search wildcard ID.
:raises SIProtocolError: If the client is not connected or not yet authorized.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send FIND PROPERTIES message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_find_properties_frame(property_id))
def read_property(self, property_id: str) -> None:
"""
This method is used to retrieve the actual value of a given property from the connected gateway. The property is identified by the property_id parameter.
The status of the read operation and the actual value of the property are reported using the on_property_read() callback.
:param property_id: The ID of the property to read in the form '{device access ID}.{device ID}.{property ID}'.
:raises SIProtocolError: If the client is not connected or not yet authorized.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send READ PROPERTY message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_read_property_frame(property_id))
def read_properties(self, property_ids: List[str]) -> None:
"""
This method is used to retrieve the actual value of multiple property at the same time from the connected gateway. The properties are identified by the property_ids
parameter.
The status of the multiple read operations and the actual value of the properties are reported using the on_properties_read() callback.
:param property_ids: The IDs of the properties to read in the form '{device access ID}.{device ID}.{property ID}'.
:raises SIProtocolError: If the client is not connected or not yet authorized.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send READ PROPERTIES message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_read_properties_frame(property_ids))
def write_property(self, property_id: str, value: any = None, flags: SIWriteFlags = None) -> None:
"""
The write_property method is used to change the actual value of a given property. The property is identified by the property_id parameter and the new value is passed by the
optional value parameter.
This value parameter is optional as it is possible to write to properties with the data type "Signal" where there is no actual value written, the write operation rather
triggers an action on the device.
The status of the write operation is reported using the on_property_written() callback.
:param property_id: The ID of the property to write in the form '{device access ID}.{<device ID}.{<property ID}'.
:param value: Optional value to write.
:param flags: Write flags, See SIWriteFlags for details, if not provided the flags are not send by the client and the gateway uses the default flags
(SIWriteFlags.PERMANENT).
:raises SIProtocolError: If the client is not connected or not yet authorized.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send WRITE PROPERTY message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_write_property_frame(property_id, value, flags))
def subscribe_to_property(self, property_id: str) -> None:
"""
This method can be used to subscribe to a property on the connected gateway. The property is identified by the property_id parameter.
The status of the subscribe request is reported using the on_property_subscribed() callback.
:param property_id: The ID of the property to subscribe to in the form '{device access ID}.{device ID}.{property ID}'.
:raises SIProtocolError: If the client is not connected or not yet authorized.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send SUBSCRIBE PROPERTY message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_subscribe_property_frame(property_id))
def subscribe_to_properties(self, property_ids: List[str]) -> None:
"""
This method can be used to subscribe to multiple properties on the connected gateway. The properties are identified by the property_ids parameter.
The status of the subscribe request is reported using the on_properties_subscribed() callback.
:param property_ids: The list of IDs of the properties to subscribe to in the form '{device access ID}.{device ID}.{property ID}'.
:raises SIProtocolError: If the client is not connected or not yet authorized.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send SUBSCRIBE PROPERTIES message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_subscribe_properties_frame(property_ids))
def unsubscribe_from_property(self, property_id: str) -> None:
"""
This method can be used to unsubscribe from a property on the connected gateway. The property is identified by the property_id parameter.
The status of the unsubscribe request is reported using the on_property_unsubscribed() callback.
:param property_id: The ID of the property to unsubscribe from in the form '{device access ID}.{device ID}.{property ID}'.
:raises SIProtocolError: If the client is not connected or not yet authorized.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send UNSUBSCRIBE PROPERTY message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_unsubscribe_property_frame(property_id))
def unsubscribe_from_properties(self, property_ids: List[str]) -> None:
"""
This method can be used to unsubscribe from multiple properties on the connected gateway. The properties are identified by the property_ids parameter.
The status of the unsubscribe request is reported using the on_properties_unsubscribed() callback.
:param property_ids: The list of IDs of the properties to unsubscribe from in the form '{device access ID}.{device ID}.{property ID}'.
:raises SIProtocolError: If the client is not connected or not yet authorized.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send UNSUBSCRIBE PROPERTY message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_unsubscribe_properties_frame(property_ids))
def read_datalog_properties(self, from_: datetime.datetime = None, to: datetime.datetime = None) -> None:
"""
This method is used to retrieve the list of IDs of all properties for whom data is logged on the gateway. If a time window is given using from and to, only data in this
time windows is considered.
The status of the operation is the list of properties for whom logged data is available are reported using the on_datalog_properties_read() callback.
:param from_: Optional date and time of the start of the time window to be considered.
:param to: Optional date and time of the end of the time window to be considered.
:raises SIProtocolError: On a connection, protocol of framing error.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send READ DATALOG message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_read_datalog_frame(None, from_, to, None))
def read_datalog(self, property_id: str, from_: datetime.datetime = None, to: datetime.datetime = None, limit: int = None) -> None:
"""
This method is used to retrieve all or a subset of logged data of a given property from the gateway.
The status of this operation and the respective values are reported using the on_datalog_read_csv() callback.
:param property_id: Global ID of the property for which the logged data should be retrieved. It has to be in the form '{device access ID}.{device ID}.{property ID}'.
:param from_: Optional date and time from which the data has to be retrieved, defaults to the oldest value logged.
:param to: Optional date and time to which the data has to be retrieved, defaults to the current time on the gateway.
:param limit: Using this optional parameter you can limit the number of results retrieved in total.
:raises SIProtocolError: If the client is not connected or not yet authorized.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send READ DATALOG message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_read_datalog_frame(property_id, from_, to, limit))
def read_messages(self, from_: datetime.datetime = None, to: datetime.datetime = None, limit: int = None) -> None:
"""
The read_messages method can be used to retrieve all or a subset of stored messages send by devices on all buses in the past from the gateway.
The status of this operation and the retrieved messages are reported using the on_messages_read() callback.
:param from_: Optional date and time from which the messages have to be retrieved, defaults to the oldest message saved.
:param to: Optional date and time to which the messages have to be retrieved, defaults to the current time on the gateway.
:param limit: Using this optional parameter you can limit the number of messages retrieved in total.
:raises SIProtocolError: If the client is not connected or not yet authorized.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send READ MESSAGES message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_read_messages_frame(from_, to, limit))
def disconnect(self) -> None:
"""
Disconnects the client from the gateway.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Close the WebSocket
self.__ws.close()
def __ensure_in_state(self, state: SIConnectionState) -> None:
if self.__state != state:
raise SIProtocolError("invalid client state")
def __on_open(self, ws) -> None:
# Change state to AUTHORIZING.
self.__state = SIConnectionState.AUTHORIZING
# Encode and send AUTHORIZE message to gateway.
if self.__user is None or self.__password is None:
self.__ws.send(super(SIAsyncGatewayClient, self).encode_authorize_frame_without_credentials())
else:
self.__ws.send(super(SIAsyncGatewayClient, self).encode_authorize_frame_with_credentials(self.__user, self.__password))
def __on_message(self, ws, frame: str) -> None:
# Determine the actual command.
command = super(SIAsyncGatewayClient, self).peek_frame_command(frame)
try:
# In AUTHORIZE state we only handle AUTHORIZED messages.
if self.__state == SIConnectionState.AUTHORIZING:
self.__access_level, self.__gateway_version = super(SIAsyncGatewayClient, self).decode_authorized_frame(frame)
# Change state to CONNECTED.
self.__state = SIConnectionState.CONNECTED
# Call callback if present.
if callable(self.on_connected):
self.on_connected(self.__access_level, self.__gateway_version)
# In CONNECTED state we handle all messages except the AUTHORIZED message.
else:
if command == 'ERROR':
if callable(self.on_error):
_, headers, _ = super(SIAsyncGatewayClient, self).decode_frame(frame)
self.on_error(SIProtocolError(headers['reason']))
elif command == 'ENUMERATED':
status, device_count = super(SIAsyncGatewayClient, self).decode_enumerated_frame(frame)
if callable(self.on_enumerated):
self.on_enumerated(status, device_count)
elif command == 'DESCRIPTION':
status, id_, description = super(SIAsyncGatewayClient, self).decode_description_frame(frame)
if callable(self.on_description):
self.on_description(status, id_, description)
elif command == 'PROPERTIES FOUND':
status, id_, count, list = super(SIAsyncGatewayClient, self).decode_properties_found_frame(frame)
if callable(self.on_properties_found):
self.on_properties_found(status, id_, count, list)
elif command == 'PROPERTY READ':
result = super(SIAsyncGatewayClient, self).decode_property_read_frame(frame)
if callable(self.on_property_read):
self.on_property_read(result.status, result.id, result.value)
elif command == 'PROPERTIES READ':
results = super(SIAsyncGatewayClient, self).decode_properties_read_frame(frame)
if callable(self.on_properties_read):
self.on_properties_read(results)
elif command == 'PROPERTY WRITTEN':
status, id_ = super(SIAsyncGatewayClient, self).decode_property_written_frame(frame)
if callable(self.on_property_written):
self.on_property_written(status, id_)
elif command == 'PROPERTY SUBSCRIBED':
status, id_ = super(SIAsyncGatewayClient, self).decode_property_subscribed_frame(frame)
if callable(self.on_property_subscribed):
self.on_property_subscribed(status, id_)
elif command == 'PROPERTIES SUBSCRIBED':
statuses = super(SIAsyncGatewayClient, self).decode_properties_subscribed_frame(frame)
if callable(self.on_properties_subscribed):
self.on_properties_subscribed(statuses)
elif command == 'PROPERTY UNSUBSCRIBED':
status, id_ = super(SIAsyncGatewayClient, self).decode_property_unsubscribed_frame(frame)
if callable(self.on_property_unsubscribed):
self.on_property_unsubscribed(status, id_)
elif command == 'PROPERTIES UNSUBSCRIBED':
statuses = super(SIAsyncGatewayClient, self).decode_properties_unsubscribed_frame(frame)
if callable(self.on_properties_unsubscribed):
self.on_properties_unsubscribed(statuses)
elif command == 'PROPERTY UPDATE':
id_, value = super(SIAsyncGatewayClient, self).decode_property_update_frame(frame)
if callable(self.on_property_updated):
self.on_property_updated(id_, value)
elif command == 'DATALOG READ':
status, id_, count, values = super(SIAsyncGatewayClient, self).decode_datalog_read_frame(frame)
if id_ is None:
if callable(self.on_datalog_properties_read):
self.on_datalog_properties_read(status, values.splitlines())
else:
if callable(self.on_datalog_read_csv):
self.on_datalog_read_csv(status, id_, count, values)
elif command == 'DEVICE MESSAGE':
message = super(SIAsyncGatewayClient, self).decode_device_message_frame(frame)
if callable(self.on_device_message):
self.on_device_message(message)
elif command == 'MESSAGES READ':
status, count, messages = super(SIAsyncGatewayClient, self).decode_messages_read_frame(frame)
if callable(self.on_messages_read):
self.on_messages_read(status, count, messages)
else:
if callable(self.on_error):
self.on_error(SIProtocolError('unsupported frame command: {command}'.format(command=command)))
except SIProtocolError as error:
if callable(self.on_error):
self.on_error(error)
if self.__state == SIConnectionState.AUTHORIZING:
self.__ws.close()
self.__state = SIConnectionState.DISCONNECTED
def __on_error(self, ws, error: Exception) -> None:
if callable(self.on_error):
self.on_error(SIProtocolError(error.args[1]))
def __on_close(self, ws) -> None:
# Change state to DISCONNECTED.
self.__state = SIConnectionState.DISCONNECTED
# Change access level to NONE.
self.__access_level = SIAccessLevel.NONE
# Call callback.
if callable(self.on_disconnected):
self.on_disconnected()
# Wait for the end of the thread.
self.__thread.join()
| 81,938 | 21,428 |
# jsb/rest/client.py
#
#
""" Rest Client class """
## jsb imports
from jsb.utils.url import geturl4, posturl, deleteurl, useragent
from jsb.utils.generic import toenc
from jsb.utils.exception import handle_exception, exceptionmsg
from jsb.utils.locking import lockdec
from jsb.utils.lazydict import LazyDict
from jsb.imports import getjson
json = getjson()
## basic imports
from urllib2 import HTTPError, URLError
from httplib import InvalidURL
from urlparse import urlparse
import socket
import asynchat
import urllib
import sys
import thread
import re
import asyncore
import time
import logging
## defines
restlock = thread.allocate_lock()
locked = lockdec(restlock)
## RestResult class
class RestResult(LazyDict):
def __init__(self, url="", name=""):
LazyDict.__init__(self)
self.url = url
self.name = name
self.data = None
self.error = None
self.status = None
self.reason = ""
## RestClient class
class RestClient(object):
""" Provide a REST client that works in sync mode. """
def __init__(self, url, keyfile=None, certfile=None, port=None):
if not url.endswith('/'): url += '/'
try:
u = urlparse(url)
splitted = u[1].split(':')
if len(splitted) == 2: host, port = splitted
else:
host = splitted[0]
port = port or 9999
path = u[2]
except Exception, ex: raise
self.host = host
try: self.ip = socket.gethostbyname(self.host)
except Exception, ex: handle_exception()
self.path = path
self.port = port
self.url = url
self.keyfile = keyfile
self.certfile = certfile
self.callbacks = []
def addcb(self, callback):
""" add a callback. """
if not callback: return
self.callbacks.append(callback)
logging.debug('rest.client - added callback %s' % str(callback))
return self
def delcb(self, callback):
""" delete callback. """
try:
del self.callbacks[callback]
logging.debug('rest.client - deleted callback %s' % str(callback))
except ValueError: pass
def do(self, func, url, *args, **kwargs):
""" perform a rest request. """
result = RestResult(url)
try:
logging.info("rest.client - %s - calling %s" % (url, str(func)))
res = func(url, {}, kwargs, self.keyfile, self.certfile, self.port)
result.status = res.status
result.reason = res.reason
if result.status >= 400: result.error = result.status
else: result.error = None
if result.status == 200:
r = res.read()
result.data = json.loads(r)
else: result.data = None
logging.info("rest.client - %s - result: %s" % (url, str(result)))
except Exception, ex:
result.error = str(ex)
result.data = None
for cb in self.callbacks:
try:
cb(self, result)
logging.info('rest.client - %s - called callback %s' % (url, str(cb)))
except Exception, ex:
handle_exception()
return result
def post(self, *args, **kwargs):
""" do a POST request. """
return self.do(posturl, self.url, *args, **kwargs)
def add(self, *args, **kwargs):
""" add an REST item. """
return self.do(posturl, self.url, *args, **kwargs)
def delete(self, nr=None):
""" delete a REST item. """
if nr: return self.do(deleteurl, self.url + '/' + str(nr))
else: return self.do(deleteurl, self.url)
def get(self, nr=None):
""" get a REST item. """
if not nr: return self.do(geturl4, self.url)
else: return self.do(geturl4, self.url + '/' + str(nr))
## RestClientAsync class
class RestClientAsync(RestClient, asynchat.async_chat):
""" Async REST client. """
def __init__(self, url, name=""):
RestClient.__init__(self, url)
asynchat.async_chat.__init__(self)
self.set_terminator("\r\n\r\n")
self.reading_headers = True
self.error = None
self.buffer = ''
self.name = name or self.url
self.headers = {}
self.status = None
def handle_error(self):
""" take care of errors. """
exctype, excvalue, tb = sys.exc_info()
if exctype == socket.error:
try:
errno, errtxt = excvalue
if errno in [11, 35, 9]:
logging.error("res.client - %s - %s %s" % (self.url, errno, errtxt))
return
except ValueError: pass
self.error = str(excvalue)
else:
logging.error("%s - %s" % (self.name, exceptionmsg()))
self.error = exceptionmsg()
self.buffer = ''
result = RestResult(self.url, self.name)
result.error = self.error
result.data = None
for cb in self.callbacks:
try:
cb(self, result)
logging.info('rest.client - %s - called callback %s' % (url, str(cb)))
except Exception, ex: handle_exception()
self.close()
def handle_expt(self):
""" handle an exception. """
handle_exception()
def handle_connect(self):
""" called after succesfull connect. """
logging.info('rest.client - %s - connected %s' % (self.url, str(self)))
def start(self):
""" start the client loop. """
assert(self.host)
assert(int(self.port))
try:
logging.info('rest.client - %s - starting client' % self.url)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.connect((self.ip, int(self.port)))
except socket.error, ex:
self.error = str(ex)
try:
self.connect((self.ip, int(self.port)))
except socket.error, ex: self.error = str(ex)
except Exception, ex: self.error = str(ex)
if self.error: self.warn("rest.client - %s - can't start %s" % (self.url, self.error))
else: return True
@locked
def found_terminator(self):
""" called when terminator is found. """
logging.info('rest.client - %s - found terminator' % self.url)
if self.reading_headers:
self.reading_headers = False
try:
self.headers = self.buffer.split('\r\n')
self.status = int(self.headers[0].split()[1])
except (ValueError, IndexError):
logging.warn("rest.client - %s - can't parse headers %s" % (self.url, self.headers))
return
self.set_terminator(None)
self.buffer = ''
logging.info('rest.client - %s - headers: %s' % (self.url, self.headers))
def collect_incoming_data(self, data):
""" aggregate seperate data chunks. """
self.buffer = self.buffer + data
def handle_close(self):
""" called on connection close. """
self.reading_headers = False
self.handle_incoming()
logging.info('rest.client - %s - closed' % self.url)
self.close()
def handle_incoming(self):
""" handle incoming data. """
logging.info("rest.client - %s - incoming: %s" % (self.url, self.buffer))
if not self.reading_headers:
result = RestResult(self.url, self.name)
if self.status >= 400:
logging.warn('rest.client - %s - error status: %s' % (self.url, self.status))
result.error = self.status
result.data = None
elif self.error:
result.error = self.error
result.data = None
elif self.buffer == "":
result.data = ""
result.error = None
else:
try:
res = json.loads(self.buffer)
if not res:
self.buffer = ''
return
result.data = res
result.error = None
except ValueError, ex:
logging.info("rest.client - %s - can't decode %s" % (self.url, self.buffer))
result.error = str(ex)
except Exception, ex:
logging.error("rest.client - %s - %s" % (self.url, exceptionmsg()))
result.error = exceptionmsg()
result.data = None
for cb in self.callbacks:
try:
cb(self, result)
logging.info('rest.client - %s - called callback %s' % (self.url, str(cb)))
except Exception, ex: handle_exception()
self.buffer = ''
@locked
def dorequest(self, method, path, postdata={}, headers={}):
if postdata: postdata = urllib.urlencode(postdata)
if headers:
if not headers.has_key('Content-Length'): headers['Content-Length'] = len(postdata)
headerstxt = ""
for i,j in headers.iteritems(): headerstxt += "%s: %s\r\n" % (i.lower(), j)
else: headerstxt = ""
if method == 'POST': s = toenc("%s %s HTTP/1.0\r\n%s\r\n%s\r\n\r\n" % (method, path, headerstxt, postdata), 'ascii')
else: s = toenc("%s %s HTTP/1.0\r\n\r\n" % (method, path), 'ascii')
if self.start():
logging.info('rest.client - %s - sending %s' % (self.url, s))
self.push(s)
def sendpost(self, postdata):
headers = {'Content-Type': 'application/x-www-form-urlencoded', \
'Accept': 'text/plain; text/html', 'User-Agent': useragent()}
self.dorequest('POST', self.path, postdata, headers)
def sendget(self):
""" send a GET request. """
self.dorequest('GET', self.path)
def post(self, *args, **kwargs):
""" do a POST request. """
self.sendpost(kwargs)
def get(self):
""" call GET request. """
self.sendget()
| 10,172 | 2,975 |
import timeit
W = 32 #Number of bits in word
M = 1 << W
FF = M - 1 #0xFFFFFFFF (for performing addition mod 2**32)
#Constants from SHA256 definition
K_t = (0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2)
#Initial values for compression func
H_t = (0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a,
0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19)
#Block Padding
padding = (
0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00)
# 32-bit bitwise rotate right
def RR(x, b):
return ((x >> b) | (x << (W - b))) & FF
# Pads a message and converts to byte array
def Pad(W):
mdi = len(W) % 64
L = (len(W) << 3).to_bytes(8, 'big') #Binary of len(W) in bits
npad = 55 - mdi if mdi < 56 else 119 - mdi #Pad so 64 | len; add 1 block if needed
return bytes(W, 'ascii') + b'\x80' + (b'\x00' * npad) + L #64 | 1 + npad + 8 + len(W)
# Compression Function
def Sha256CF(Wt, Kt, A, B, C, D, E, F, G, H):
Ch = (E & F) ^ (~E & G)
Ma = (A & B) ^ (A & C) ^ (B & C) #Major
S0 = RR(A, 2) ^ RR(A, 13) ^ RR(A, 22) #Sigma_0
S1 = RR(E, 6) ^ RR(E, 11) ^ RR(E, 25) #Sigma_1
T1 = H + S1 + Ch + Wt + Kt
return (T1 + S0 + Ma) & FF, A, B, C, (D + T1) & FF, E, F, G
def Sha256(M):
'''
Performs SHA256 on an input string
M: The string to process
return: A 32 byte array of the binary digest
'''
M = Pad(M) #Pad message so that length is divisible by 64
DG = list(H_t) #Digest as 8 32-bit words (A-H)
for j in range(0, len(M), 64): #Iterate over message in chunks of 64
S = M[j:j + 64] #Current chunk
W = [0] * 64
W[0:16] = [int.from_bytes(S[i:i + 4], 'big') for i in range(0, 64, 4)]
for i in range(16, 64):
s0 = RR(W[i - 15], 7) ^ RR(W[i - 15], 18) ^ (W[i - 15] >> 3)
s1 = RR(W[i - 2], 17) ^ RR(W[i - 2], 19) ^ (W[i - 2] >> 10)
W[i] = (W[i - 16] + s0 + W[i-7] + s1) & FF
A, B, C, D, E, F, G, H = DG #State of the compression function
for i in range(64):
A, B, C, D, E, F, G, H = Sha256CF(W[i], K_t[i], A, B, C, D, E, F, G, H)
DG = [(X + Y) & FF for X, Y in zip(DG, (A, B, C, D, E, F, G, H))]
return b''.join(Di.to_bytes(4, 'big') for Di in DG) #Convert to byte array
if __name__ == "__main__":
print('\n'*10)
print("Running Benchmark for software\n")
time = timeit.timeit("Sha256('Bitcoin Miner!')", number=10000, globals=globals())
print(f'Python Software Encryption Speed: {10000/time} H/s\n')
while(1):
msg = input("Enter msg:")
bd = Sha256(msg)
print(''.join('{:02x}'.format(i) for i in bd)) | 3,820 | 2,358 |
r1 = float(input("Digite um numero: "))
r2 = float(input("Digite outro numero: "))
r3 = float(input("Digite outro numero: "))
if r1 < r2 + r3 and r2 < r1 + r3 and r3 < r1 + r2:
print("Os seguimentos acima é um triangulo", end='')
if r1 == r2 == r3:
print("equilatero")
elif r1 != r2 != r3 != r1:
print("escaleno")
else:
print("isoceles")
else:
print("Os seguimentos acima n é um triangulo") | 438 | 174 |
#### Purpose:
# Parse D*R files.
# Individual envelope formats are handled elsewhere (dxr_envelope etc.).
import struct
from shockabsorber.model.sections import Section, SectionMap, AssociationTable
from shockabsorber.model.cast import CastLibrary, CastLibraryTable
from shockabsorber.model.movie import Movie
from shockabsorber.loader.util import SeqBuffer, rev
from . import script_parser
from . import score_parser
import shockabsorber.loader.dxr_envelope
import shockabsorber.loader.dcr_envelope
class LoaderContext: #------------------------------
"""Contains information about endianness and file format version of a file."""
def __init__(self, file_tag, is_little_endian):
self.file_tag = file_tag
self.is_little_endian = is_little_endian
#--------------------------------------------------
def parse_assoc_table(blob, loader_context):
""" Takes a 'KEY*' section and returns an AssociationTable. """
buf = SeqBuffer(blob, loader_context.is_little_endian)
[v1,v2,nElems,nValid] = buf.unpack('>HHii', '<HHii')
print("KEY* header: %s" % [v1,v2,nElems,nValid])
# v1 = table start offset, v2 = table entry size?
atable = AssociationTable()
for i in range(nValid):
[owned_section_id, composite_id] = buf.unpack('>ii', '<ii')
tag = buf.readTag()
castlib_assoc_id = composite_id >> 16
owner_section_id = composite_id & 0xFFFF
print "DB| KEY* entry #%d: %s" % (i, [tag, owned_section_id, castlib_assoc_id, owner_section_id])
if owner_section_id == 1024:
atable.add_library_section(castlib_assoc_id, owned_section_id, tag)
else:
atable.add_cast_media(owner_section_id, owned_section_id, tag)
return atable
def parse_cast_table_section(blob, loader_context):
buf = SeqBuffer(blob)
res = []
while not buf.at_eof():
(item,) = buf.unpack('>i')
res.append(item)
return res
#--------------------------------------------------
class CastMember: #------------------------------
def __init__(self, section_nr, type, name, attrs, castdata):
self.media = {}
self.type = type
self.name = name
self.attrs = attrs
self.section_nr = section_nr
self.castdata = castdata
def __repr__(self):
return "<CastMember (@%d) type=%d name=\"%s\" attrs=%s meta=%s media=%s>" % \
(self.section_nr, self.type, self.name, self.attrs, self.castdata, self.media)
def add_media(self,tag,data):
self.media[tag] = data
def get_name(self): return self.name
@staticmethod
def parse(blob,snr, loader_context):
buf = SeqBuffer(blob)
[type,common_length,v2] = buf.unpack('>3i')
common_blob = buf.readBytes(common_length)
buf2 = SeqBuffer(common_blob)
[v3,v4,v5,v6,cast_id,nElems] = buf2.unpack('>5iH')
offsets = []
for i in range(nElems+1):
[tmp] = buf2.unpack('>i')
offsets.append(tmp)
blob_after_table=buf2.peek_bytes_left()
attrs = []
for i in range(len(offsets)-1):
attr = blob_after_table[offsets[i]:offsets[i+1]]
print "DB| Cast member attr #%d: <%s>" % (i, attr)
attrs.append(attr)
if len(attrs)>=2 and len(attrs[1])>0:
name = SeqBuffer(attrs[1]).unpackString8()
else:
name = None
print "DB| Cast-member common: name=\"%s\" attrs=%s misc=%s" % (
name, attrs, [v2,v3,v4,v5,v6, cast_id])
noncommon = buf.peek_bytes_left()
castdata = CastMember.parse_castdata(type, cast_id, SeqBuffer(noncommon), attrs)
res = CastMember(snr,type, name, attrs, castdata)
return res
@staticmethod
def parse_castdata(type, cast_id, buf, attrs):
if type==1:
return ImageCastType.parse(buf)
elif type==11:
return ScriptCastType.parse(buf, cast_id)
else:
return ("Unknown cast type", cast_id, attrs, buf.peek_bytes_left())
class CastType: #--------------------
def __repr__(self):
return "<%s%s>" % (self.__class__.__name__, self.repr_extra())
def repr_extra(self): return ""
class ImageCastType(CastType): #--------------------
def __init__(self, dims, total_dims, anchor, bpp, misc):
self.dims = dims
self.total_dims = total_dims
self.anchor = anchor
self.bpp = bpp # Bits per pixel
print "DB| ImageCastType: misc=%s\n dims=%s total_dims=%s anchor=%s" % (misc, dims, total_dims, anchor)
self.misc = misc
def repr_extra(self):
return " dims=%s anchor=%s bpp=%d misc=%s" % (
self.dims, self.anchor, self.bpp, self.misc)
def get_anchor(self): return self.anchor
@staticmethod
def parse(buf):
[v10,v11, height,width,v12,v13,v14, anchor_x,anchor_y,
v15,bits_per_pixel,v17
] = buf.unpack('>Hi HH ihh hh bbi')
total_width = v10 & 0x7FFF
v10 = "0x%x" % v10
v12 = "0x%x" % v12
print "DB| ImageCastType.parse: ILE=%s %s" % (buf.is_little_endian, [(width, height), (total_width,height), bits_per_pixel])
misc = ((v10,v11), (v12,v13,v14), (v15,v17))
return ImageCastType((width, height),
(total_width,height),
(anchor_x, anchor_y),
bits_per_pixel,
misc)
#--------------------------------------------------
class ScriptCastType(CastType): #--------------------
def __init__(self, id, misc):
self.id = id
self.misc = misc
print "DB| ScriptCastType: id=#%d misc=%s" % (id, misc)
def repr_extra(self):
return " id=#%d misc=%s" % (self.id, self.misc)
@staticmethod
def parse(buf, script_id):
[v30] = buf.unpack('>H')
misc = [v30]
return ScriptCastType(script_id, misc)
#--------------------------------------------------
class Media: #------------------------------
def __init__(self,snr,tag,data):
self.snr = snr
self.data = data
self.tag = tag
def __repr__(self):
return "<%s (@%d)%s>" % (self.__class__.__name__, self.snr,
self.repr_extra())
def repr_extra(self): return ""
@staticmethod
def parse(snr,tag,blob):
if tag=="BITD":
return BITDMedia(snr,tag,blob)
else:
return Media(snr,tag,blob)
class BITDMedia(Media): #------------------------------
def __init__(self,snr,tag,blob):
Media.__init__(self,snr,tag,blob)
buf = SeqBuffer(blob)
"TODO"
#--------------------------------------------------
def load_movie(filename):
with open(filename) as f:
(loader_context, sections_map, castlibs, castidx_order) = load_file(f)
script_ctx = script_parser.create_script_context(sections_map, loader_context)
frame_labels = score_parser.parse_frame_label_section(sections_map, loader_context)
score = score_parser.parse_score_section(sections_map, loader_context)
return Movie(castlibs=castlibs, frames=score, scripts="TODO")
def load_cast_library(filename):
print "DB| load_cast_library: filename=%s" % filename
with open(filename) as f:
(loader_context, sections_map, castlibs, castidx_order) = load_file(f)
# TODO script_ctx = script_parser.create_script_context(sections_map, loader_context)
print "DB| load_cast_library: filename=%s" % filename
return castlibs.get_cast_library(0)
def load_file(f):
xheader = f.read(12)
[magic,size,tag] = struct.unpack('!4si4s', xheader)
is_little_endian = (magic == "XFIR")
if is_little_endian:
tag = rev(tag)
magic = rev(magic)
if magic != "RIFX":
raise Exception("Bad file type")
loader_context = LoaderContext(tag, is_little_endian)
print "DB| Loader context: %s / %s" % (tag, is_little_endian)
if (tag=="MV93"):
sections_map = shockabsorber.loader.dxr_envelope.create_section_map(f, loader_context)
elif (tag=="FGDM"):
sections_map = shockabsorber.loader.dcr_envelope.create_section_map(f, loader_context)
else:
raise Exception("Bad file type")
(castlibs, assoc_table) = read_singletons(sections_map, loader_context)
populate_cast_libraries(castlibs, assoc_table, sections_map, loader_context)
# for e in sections_map.entries:
# tag = e.tag
# if tag=="STXT" or tag=="Sord" or tag=="XMED" or tag=="VWSC" or tag=="VWFI" or tag=="VWLB" or tag=="SCRF" or tag=="DRCF" or tag=="MCsL" or tag=="Cinf":
# print "section bytes for %s (len=%d): <%s>" % (tag, len(e.bytes()), e.bytes())
castorder_section_id = assoc_table.get_library_sections(0).get("Sord")
if castorder_section_id == None:
castidx_order = None
else:
castorder_e = sections_map[castorder_section_id]
castidx_order = parse_cast_order_section(castorder_e.bytes(), loader_context)
for i,k in enumerate(castidx_order):
(clnr, cmnr) = k
print "DB| Cast order #%d: %s -> %s" % (i, k, castlibs.by_nr[clnr].castmember_table[cmnr-1])
return (loader_context, sections_map, castlibs, castidx_order)
def read_singletons(sections_map, loader_context):
mcsl_e = sections_map.entry_by_tag("MCsL")
castlib_table = (CastLibraryTable([CastLibrary(0,None,None,0,None,1024)]) if mcsl_e==None else
parse_cast_lib_section(mcsl_e.bytes(), loader_context))
keys_e = sections_map.entry_by_tag("KEY*")
assoc_table = parse_assoc_table(keys_e.bytes(), loader_context)
return (castlib_table, assoc_table)
def populate_cast_libraries(castlibs, assoc_table, sections_map, loader_context):
for cl in castlibs.iter_by_nr():
# Read cast list:
assoc_id = cl.assoc_id
if assoc_id==0 and cl.name<>None: continue
print "DB| populate_cast_libraries: sections: %s" % (assoc_table.get_library_sections(assoc_id),)
castlist_section_id = assoc_table.get_library_sections(cl.assoc_id).get("CAS*")
if castlist_section_id==None: continue
print "DB| populate_cast_libraries: CAS*-id=%d" % (castlist_section_id,)
castlist_e = sections_map[castlist_section_id]
cast_idx_table = parse_cast_table_section(castlist_e.bytes(), loader_context)
print "DB| populate_cast_libraries: idx_table=%s" % (cast_idx_table,)
def section_nr_to_cast_member(nr):
if nr==0: return None
cast_section = sections_map[nr].bytes()
castmember = CastMember.parse(cast_section,nr, loader_context)
populate_cast_member_media(castmember, cl.assoc_id, nr,
assoc_table, sections_map)
return castmember
cast_table = map(section_nr_to_cast_member, cast_idx_table)
print "DB| populate_cast_libraries: cast_table=%s" % (cast_table,)
cl.set_castmember_table(cast_table)
def populate_cast_member_media(castmember, castlib_assoc_id, castmember_section_id, assoc_table, sections_map):
medias = assoc_table.get_cast_media(castmember_section_id)
print "DB| populate_cast_member_media: %d,%d -> %s" % (castlib_assoc_id,castmember_section_id,medias)
for tag,media_section_id in medias.iteritems():
media_section_e = sections_map[media_section_id]
if media_section_e == None: continue
# TODO: Load media more lazily.
media_section = media_section_e.bytes()
media = Media.parse(media_section_id, tag, media_section)
castmember.add_media(tag, media)
def parse_cast_lib_section(blob, loader_context):
# Read header:
buf = SeqBuffer(blob)
[v1,nElems,ofsPerElem,nOffsets,v5] = buf.unpack('>iiHii')
print "DB| Cast lib section header: nElems=%d, nOffsets=%d, ofsPerElem=%d, misc=%s" % (nElems, nOffsets, ofsPerElem, [v1,v5])
# Read offset table:
offsets = []
for i in range(nOffsets):
[offset] = buf.unpack('>i')
offsets.append(offset)
base = buf.tell()
#print "DB| Cast lib section: offsets=%s" % offsets
offnr = 0
table = []
for enr in range(nElems):
entry = []
for i in range(ofsPerElem):
subblob = buf.buf[base + offsets[offnr]:base + offsets[offnr+1]]
offnr += 1
#print "DB| i=%d subblob=<%s>" % (i,subblob)
buf2 = SeqBuffer(subblob)
if i==0:
item = buf2.unpackString8()
elif i==1:
if buf2.bytes_left()>0:
item = buf2.unpackString8()
else:
item = None
elif i==2:
[item] = buf2.unpack('>h')
elif i==3:
[w1,w2,w3,w4] = buf2.unpack('>hhhh')
item = (w1,w2,w3,w4)
else:
item = subblob
entry.append(item)
print "DB| Cast lib table entry #%d: %s" % (enr+1,entry)
[name, path, _zero, (low_idx,high_idx, assoc_id, self_idx)] = entry
table.append(CastLibrary(enr+1, name, path, assoc_id, (low_idx,high_idx), self_idx))
return CastLibraryTable(table)
def parse_cast_order_section(blob, loader_context):
print "DB| parse_cast_order_section..."
buf = SeqBuffer(blob, loader_context)
[_zero1, _zero2, nElems, nElems2, v5] = buf.unpack('>5i')
print "DB| parse_cast_order_section: header: %s" % ([_zero1, _zero2, nElems, nElems2, v5],)
table = []
for i in range(nElems):
[castlib_nr, castmember_nr] = buf.unpack('>HH')
print "DB| parse_cast_order_section #%d: %s" % (i, (castlib_nr,castmember_nr))
table.append((castlib_nr,castmember_nr))
return table
| 13,839 | 4,736 |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: parse_bpmnxml.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from flowable_service_sdk.model.flowable_service import bpmn_sequence_flow_pb2 as flowable__service__sdk_dot_model_dot_flowable__service_dot_bpmn__sequence__flow__pb2
from flowable_service_sdk.model.flowable_service import bpmn_exclusive_gateway_pb2 as flowable__service__sdk_dot_model_dot_flowable__service_dot_bpmn__exclusive__gateway__pb2
from flowable_service_sdk.model.flowable_service import bpmn_start_event_pb2 as flowable__service__sdk_dot_model_dot_flowable__service_dot_bpmn__start__event__pb2
from flowable_service_sdk.model.flowable_service import bpmn_end_event_pb2 as flowable__service__sdk_dot_model_dot_flowable__service_dot_bpmn__end__event__pb2
from flowable_service_sdk.model.flowable_service import bpmn_user_task_pb2 as flowable__service__sdk_dot_model_dot_flowable__service_dot_bpmn__user__task__pb2
from flowable_service_sdk.model.flowable_service import bpmn_process_pb2 as flowable__service__sdk_dot_model_dot_flowable__service_dot_bpmn__process__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='parse_bpmnxml.proto',
package='process_definition',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x13parse_bpmnxml.proto\x12\x12process_definition\x1a\x44\x66lowable_service_sdk/model/flowable_service/bpmn_sequence_flow.proto\x1aHflowable_service_sdk/model/flowable_service/bpmn_exclusive_gateway.proto\x1a\x42\x66lowable_service_sdk/model/flowable_service/bpmn_start_event.proto\x1a@flowable_service_sdk/model/flowable_service/bpmn_end_event.proto\x1a@flowable_service_sdk/model/flowable_service/bpmn_user_task.proto\x1a>flowable_service_sdk/model/flowable_service/bpmn_process.proto\"&\n\x13ParseBPMNXMLRequest\x12\x0f\n\x07\x62pmnXML\x18\x01 \x01(\t\"|\n\x1bParseBPMNXMLResponseWrapper\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x63odeExplain\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12+\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32\x1d.flowable_service.BPMNProcessb\x06proto3')
,
dependencies=[flowable__service__sdk_dot_model_dot_flowable__service_dot_bpmn__sequence__flow__pb2.DESCRIPTOR,flowable__service__sdk_dot_model_dot_flowable__service_dot_bpmn__exclusive__gateway__pb2.DESCRIPTOR,flowable__service__sdk_dot_model_dot_flowable__service_dot_bpmn__start__event__pb2.DESCRIPTOR,flowable__service__sdk_dot_model_dot_flowable__service_dot_bpmn__end__event__pb2.DESCRIPTOR,flowable__service__sdk_dot_model_dot_flowable__service_dot_bpmn__user__task__pb2.DESCRIPTOR,flowable__service__sdk_dot_model_dot_flowable__service_dot_bpmn__process__pb2.DESCRIPTOR,])
_PARSEBPMNXMLREQUEST = _descriptor.Descriptor(
name='ParseBPMNXMLRequest',
full_name='process_definition.ParseBPMNXMLRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='bpmnXML', full_name='process_definition.ParseBPMNXMLRequest.bpmnXML', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=451,
serialized_end=489,
)
_PARSEBPMNXMLRESPONSEWRAPPER = _descriptor.Descriptor(
name='ParseBPMNXMLResponseWrapper',
full_name='process_definition.ParseBPMNXMLResponseWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='process_definition.ParseBPMNXMLResponseWrapper.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='codeExplain', full_name='process_definition.ParseBPMNXMLResponseWrapper.codeExplain', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='process_definition.ParseBPMNXMLResponseWrapper.error', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='process_definition.ParseBPMNXMLResponseWrapper.data', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=491,
serialized_end=615,
)
_PARSEBPMNXMLRESPONSEWRAPPER.fields_by_name['data'].message_type = flowable__service__sdk_dot_model_dot_flowable__service_dot_bpmn__process__pb2._BPMNPROCESS
DESCRIPTOR.message_types_by_name['ParseBPMNXMLRequest'] = _PARSEBPMNXMLREQUEST
DESCRIPTOR.message_types_by_name['ParseBPMNXMLResponseWrapper'] = _PARSEBPMNXMLRESPONSEWRAPPER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ParseBPMNXMLRequest = _reflection.GeneratedProtocolMessageType('ParseBPMNXMLRequest', (_message.Message,), {
'DESCRIPTOR' : _PARSEBPMNXMLREQUEST,
'__module__' : 'parse_bpmnxml_pb2'
# @@protoc_insertion_point(class_scope:process_definition.ParseBPMNXMLRequest)
})
_sym_db.RegisterMessage(ParseBPMNXMLRequest)
ParseBPMNXMLResponseWrapper = _reflection.GeneratedProtocolMessageType('ParseBPMNXMLResponseWrapper', (_message.Message,), {
'DESCRIPTOR' : _PARSEBPMNXMLRESPONSEWRAPPER,
'__module__' : 'parse_bpmnxml_pb2'
# @@protoc_insertion_point(class_scope:process_definition.ParseBPMNXMLResponseWrapper)
})
_sym_db.RegisterMessage(ParseBPMNXMLResponseWrapper)
# @@protoc_insertion_point(module_scope)
| 6,976 | 2,623 |
# Generated by Django 2.1.7 on 2019-03-31 14:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('conferences', '0002_auto_20190329_1326'),
]
operations = [
migrations.AddField(
model_name='conference',
name='description',
field=models.TextField(blank=True, default='', verbose_name='Medium length description of the conference'),
),
migrations.AddField(
model_name='conference',
name='site_url',
field=models.URLField(blank=True, default='', verbose_name='Conference informational site'),
),
]
| 677 | 208 |
from spider import Spider
class PageSync(object):
def __init__(self, cn_url, cn_username, cn_password, eu_url, eu_username=None, eu_password=None):
self.spider_cn = Spider(url=cn_url, username=cn_username, password=cn_password)
self.spider_cn.login()
self.spider_eu = Spider(url=eu_url, username=eu_username, password=eu_password)
@staticmethod
def get_page_title_by_prefix(spider: Spider, keyword, option=None):
if option is None:
option = {}
page_list, continue_key = spider.get_page_list(keyword=keyword, limit=500, option=option)
title_list = [each_page["title"] for each_page in page_list]
while continue_key:
page_list, continue_key = spider.get_page_list(
keyword=keyword, limit=500, option={**option, "apcontinue": continue_key}
)
title_list.extend([each_page["title"] for each_page in page_list])
return title_list
def run(self, namespace=None):
dig_list = "0123456789"
str_list = "abcdefghijklmnopqrstuvwxyz"
merged_list = []
passed_list = []
error_list = []
option = {"apnamespace": namespace} if namespace else {}
for letter in dig_list + str_list:
origin_titles = self.get_page_title_by_prefix(self.spider_eu, letter, option=option)
current_titles = self.get_page_title_by_prefix(self.spider_cn, letter, option=option)
for title in origin_titles:
if title in current_titles:
passed_list.append(title)
print('page "%s" passed' % title)
else:
content = self.spider_eu.get_page_text(title)
try:
self.spider_cn.edit(title=title, text=content, summary="merge from offical wiki")
merged_list.append(title)
print('page "%s" merged successful' % title)
except:
error_list.append(title)
print("letter %s checked, %i pages." % (letter, len(origin_titles)))
print("merged number:", len(merged_list))
print("passed number:", len(passed_list))
print("ERROR:", error_list)
if __name__ == "__main__":
p = PageSync(
cn_url="https://ck3.parawikis.com/api.php",
cn_username="用户名",
cn_password="密码",
eu_url="https://ck3.paradoxwikis.com/api.php",
)
p.run()
p.run(namespace=6)
p.run(namespace=10)
p.run(namespace=14)
| 2,572 | 807 |
import boto3
from datetime import datetime, timezone
class SnapshotException(Exception):
pass
def lambda_handler(event, context):
# Input from Cloudwatch event rule
aurora_cluster_id=event["aurora_cluster_id"]
s3_bucket_for_rds_snap_exp=event["s3_bucket_for_rds_snap_exp"]
iam_role_for_rds_snap_exp = event["iam_role_for_rds_snap_exp"]
kms_key_id_for_rds_snap_exp = event["kms_key_id_for_rds_snap_exp"]
export_list = event["export_list"]
run_date=event["run_date"]
#Get run_date for which snapshot export needs to happen.
if run_date == "":
run_date= datetime.now(timezone.utc).strftime('%Y-%m-%d')
print('Run date is:' + run_date)
stsclient = boto3.client('sts')
response = stsclient.assume_role(
DurationSeconds=3600,
RoleArn=iam_role_for_rds_snap_exp,
RoleSessionName='snapshot-export-demo-session'
)
ACCESS_KEY = response['Credentials']['AccessKeyId']
SECRET_KEY = response['Credentials']['SecretAccessKey']
SESSION_TOKEN = response['Credentials']['SessionToken']
session = boto3.session.Session(
aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_KEY,
aws_session_token=SESSION_TOKEN
)
rdsclient = session.client('rds')
response = rdsclient.describe_db_cluster_snapshots(
DBClusterIdentifier=aurora_cluster_id,
SnapshotType='automated'
)
DBClusterSnapshots=response['DBClusterSnapshots']
# Find a snapshot matching the run_date
export_snapshot_arn = ''
for DBClusterSnapshot in DBClusterSnapshots:
snapshot_arn = DBClusterSnapshot['DBClusterSnapshotArn']
snapshot_status = DBClusterSnapshot['Status']
snapshot_date = datetime.strftime(DBClusterSnapshot['SnapshotCreateTime'], '%Y-%m-%d')
#print (snapshot_arn,snapshot_status,snapshot_date)
if snapshot_status == 'available' and snapshot_date == run_date:
export_snapshot_arn = snapshot_arn
print ('A valid snapshot to be exported matching the run date found: ' + snapshot_arn)
break
if export_snapshot_arn == '':
print ('Valid snapshot to export not found. Exiting...')
raise SnapshotException("Snapshot Not Found")
else:
return export_snapshot_arn | 2,485 | 801 |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
#################train lstm-crf example on CoNLL2000########################
"""
import os
from copy import deepcopy
import numpy as np
from src.util import F1, get_chunks, get_label_lists
from src.model_utils.config import config
from src.dataset import get_data_set
from src.LSTM_CRF import Lstm_CRF
from src.imdb import ImdbParser
from mindspore import Tensor, Model, context
from mindspore.train.serialization import load_checkpoint, load_param_into_net
def modelarts_process():
config.ckpt_file = os.path.join(config.output_path, config.ckpt_file)
def eval_lstm_crf():
""" eval lstm """
print('\neval.py config: \n', config)
context.set_context(
mode=context.GRAPH_MODE,
save_graphs=False,
device_id=config.device_id,
device_target=config.device_target
)
embeddings_size = config.embed_size
parser = ImdbParser(config.data_CoNLL_path,
config.glove_path,
config.data_CoNLL_path,
embed_size=config.embed_size
)
embeddings, sequence_length, _, _, sequence_index, sequence_tag_index, tags_to_index_map \
= parser.get_datas_embeddings(seg=['test'], build_data=False)
embeddings_table = embeddings.astype(np.float32)
# DynamicRNN in this network on Ascend platform only support the condition that the shape of input_size
# and hiddle_size is multiples of 16, this problem will be solved later.
if config.device_target == 'Ascend':
pad_num = int(np.ceil(config.embed_size / 16) * 16 - config.embed_size)
if pad_num > 0:
embeddings_table = np.pad(embeddings_table, [(0, 0), (0, pad_num)], 'constant')
embeddings_size = int(np.ceil(config.embed_size / 16) * 16)
ds_test = get_data_set(sequence_index, sequence_tag_index, config.batch_size)
network = Lstm_CRF(vocab_size=embeddings.shape[0],
tag_to_index=tags_to_index_map,
embedding_size=embeddings_size,
hidden_size=config.num_hiddens,
num_layers=config.num_layers,
weight=Tensor(embeddings_table),
bidirectional=config.bidirectional,
batch_size=config.batch_size,
seq_length=sequence_length,
is_training=False)
callback = F1(len(tags_to_index_map))
model = Model(network)
param_dict = load_checkpoint(os.path.join(config.ckpt_save_path, config.ckpt_path))
load_param_into_net(network, param_dict)
print("============== Starting Testing ==============")
rest_golds_list = list()
rest_preds_list = list()
columns_list = ["feature", "label"]
for data in ds_test.create_dict_iterator(num_epochs=1):
input_data = []
for i in columns_list:
input_data.append(data[i])
feature, label = input_data
logits = model.predict(feature, label)
logit_ids, label_ids = callback.update(logits, label)
rest_preds = np.array(logit_ids)
rest_preds = np.expand_dims(rest_preds, 0)
rest_labels = deepcopy(label_ids)
label_ids = np.expand_dims(label_ids, 0)
rest_labels = np.expand_dims(rest_labels, 0)
rest_golds, rest_preds = get_label_lists(rest_labels, rest_preds, label_ids)
rest_golds_list += rest_golds
rest_preds_list += rest_preds
accs = []
correct_preds, total_correct, total_preds = 0., 0., 0.
for golds, preds in zip(rest_golds_list, rest_preds_list):
accs += [a == b for (a, b) in zip(golds, preds)]
golds_chunks = set(get_chunks(golds, tags_to_index_map))
preds_chunks = set(get_chunks(preds, tags_to_index_map))
correct_preds += len(golds_chunks & preds_chunks)
total_preds += len(preds_chunks)
total_correct += len(golds_chunks)
p = correct_preds / total_preds if correct_preds > 0 else 0
r = correct_preds / total_correct if correct_preds > 0 else 0
f1 = 2 * p * r / (p + r) if correct_preds > 0 else 0
acc = np.mean(accs)
print("acc: {:.6f}%, F1: {:.6f}% ".format(acc*100, f1*100))
if __name__ == '__main__':
eval_lstm_crf()
| 4,932 | 1,641 |
# import libraries
from pyrevit import EXEC_PARAMS
from pyrevit import forms
# prevent the tool, await input
mip = forms.alert("No modelling in place!", options = ["Oops, my bad...", "But you see, I am an artiste"], title = "Not going to happen", footer = "Uhoh")
# process the outcome
if mip == "Oops, my bad...":
# if they concede
EXEC_PARAMS.event_args.Cancel = True
elif mip == "But you see, I am an artiste":
# if they challenge the command
pw = forms.GetValueWindow.show('Input password', title='Input password', width=500, height=600, default="")
if pw != "Interior designer":
# if they get it right
EXEC_PARAMS.event_args.Cancel = True
else:
# if they don't
EXEC_PARAMS.event_args.Cancel = False
else:
# cancelling the command
EXEC_PARAMS.event_args.Cancel = True | 837 | 275 |
import cv2
import numpy as np
import tensorflow as tf
import time
import statistics
import h5py
vid_file = '/home/vijayaganesh/Videos/Google Chrome Dinosaur Game [Bird Update] BEST SCORE OF THE WORLD (No hack).mp4'
data_file = 'training_data.txt'
roi_x = 320
roi_y = 120
roi_w = 459
roi_h = 112
font = cv2.FONT_HERSHEY_SIMPLEX
vid = cv2.VideoCapture(vid_file)
### jump Case
jx = 0
jy = 48
jw = 30
jh = 40
# tx = 0
# ty = 30
# tw = 30
# th = 41
### Duck Case
dx = 0
dy = 102
dw = 45
dh = 10
### Idle Case
tx = 0
ty = 68
tw = 30
th = 27
### Variables to store state of jump
prev_j = ty
### Obstacle List
# prev_j_1 = ty
dist = 500
prev_dist = 500
frame_count = 1
speed_list = list()
speed = 0
dino_y = 0
control = ''
file = open(data_file,'w')
while(vid.isOpened()):
_,frame = vid.read()
roi_rgb = frame[roi_y:roi_y+roi_h,roi_x:roi_x+roi_w]
roi = cv2.cvtColor(roi_rgb,cv2.COLOR_BGR2GRAY)
print(frame.shape[:2])
_,roi_thresh = cv2.threshold(roi,150,255,cv2.THRESH_BINARY_INV)
_,contours,hierarchy = cv2.findContours(roi_thresh,cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
obstacle_x,obstacle_y = 500,500
for c in contours:
x,y,w,h = cv2.boundingRect(c)
if(w < 7 and h < 7):
continue
if(x > 340 and y == 4 ):
continue
if(x == jx and w ==jw):
if(prev_j-y > 0 and y < 67 and y > 45):
control = 'u'
prev_j = y
dino_y = y
elif(x == dx and y == dy and w == dw and h == dh):
control = 'd'
dino_y = y
elif(x == dx):
control = 'na'
dino_y = y
if(x>40):
cv2.rectangle(frame,(x+roi_x,y+roi_y),(roi_x+x+w,roi_y+y+h),(0,255,0),2)
if(x<obstacle_x):
obstacle_x = x;
obstacle_y = y;
dist = obstacle_x
cv2.putText(frame,'x = '+repr(obstacle_x)+","+repr(obstacle_y),(10,600), font, 4,(255,0,0),2,cv2.LINE_AA)
if(frame_count < 30):
speed_list.append(prev_dist - dist)
else:
speed = max(speed_list,key=speed_list.count)
speed_list = list()
frame_count = 0
cv2.putText(frame,repr(dino_y),(10,400), font, 4,(0,0,255),2,cv2.LINE_AA)
cv2.putText(frame,control,(10,500), font, 4,(0,0,255),2,cv2.LINE_AA)
cv2.putText(frame,'dx/dt = '+repr(speed),(10,700), font, 4,(255,0,0),2,cv2.LINE_AA)
prev_dist = dist
file.write(repr(dino_y)+","+repr(speed)+","+repr(obstacle_x)+","+repr(obstacle_y)+","+control+"\n")
cv2.imshow('roi',frame)
# time.sleep(0.1)
frame_count += 1
if cv2.waitKey(1) & 0xFF == ord('q'):
break
vid.release()
cv2.destroyAllWindows()
| 2,682 | 1,250 |
from unittest.mock import AsyncMock, Mock, call
import pytest
from galo_startup_commands import DependencyGraphNodeStartupCommand, startup_command
def test_without_parameters() -> None:
@startup_command
def startup():
pass
command = getattr(startup, "startup_command")
assert isinstance(command, DependencyGraphNodeStartupCommand)
assert command.name is None
assert command.after is None
assert command.before is None
assert command.order is None
def test_with_name_parameter() -> None:
@startup_command(name="test")
def startup():
pass
command = getattr(startup, "startup_command")
assert isinstance(command, DependencyGraphNodeStartupCommand)
assert command.name == "test"
def test_with_after_parameter() -> None:
@startup_command(after=[])
def startup():
pass
command = getattr(startup, "startup_command")
assert isinstance(command, DependencyGraphNodeStartupCommand)
assert command.after == []
def test_with_before_parameter() -> None:
@startup_command(before=[])
def startup():
pass
command = getattr(startup, "startup_command")
assert isinstance(command, DependencyGraphNodeStartupCommand)
assert command.before == []
def test_with_order_parameter() -> None:
@startup_command(order=0)
def startup():
pass
command = getattr(startup, "startup_command")
assert isinstance(command, DependencyGraphNodeStartupCommand)
assert command.order == 0
def test_function() -> None:
@startup_command(order=0)
def startup():
mock.startup()
mock = Mock()
command = getattr(startup, "startup_command")
assert isinstance(command, DependencyGraphNodeStartupCommand)
command.startup()
command.shutdown()
mock.startup.assert_called_once_with()
def test_generator_function() -> None:
@startup_command(order=0)
def startup():
mock.startup()
yield
mock.shutdown()
mock = Mock()
command = getattr(startup, "startup_command")
assert isinstance(command, DependencyGraphNodeStartupCommand)
command.startup()
mock.assert_has_calls([call.startup()])
command.shutdown()
mock.assert_has_calls(
[
call.startup(),
call.shutdown(),
]
)
@pytest.mark.asyncio
async def test_async_function() -> None:
@startup_command(order=0)
async def startup():
await mock.startup_async()
mock = AsyncMock()
command = getattr(startup, "startup_command")
assert isinstance(command, DependencyGraphNodeStartupCommand)
await command.startup_async()
await command.shutdown_async()
mock.startup_async.assert_called_once_with()
@pytest.mark.asyncio
async def test_async_generator_function() -> None:
@startup_command(order=0)
async def startup():
await mock.startup_async()
yield
await mock.shutdown_async()
mock = AsyncMock()
command = getattr(startup, "startup_command")
assert isinstance(command, DependencyGraphNodeStartupCommand)
await command.startup_async()
mock.assert_has_calls([call.startup_async()])
await command.shutdown_async()
mock.assert_has_calls(
[
call.startup_async(),
call.shutdown_async(),
]
)
def test_not_a_function() -> None:
with pytest.raises(TypeError):
startup_command(Mock())
| 3,436 | 1,049 |
import subprocess
import os.path
#CONFIGURES BASH STUFF
#TODO: maybe to all users?
#PREVENT TAMPERING WITH THESE FILES
#==================================
append_only = [".bash_history",".bash_profile",".bash_login",".profile",".bash_logout",".bashrc"]
for appends in append_only:
subprocess.call(("chattr +a ~/" + appends).split()) #set to append only
if os.path.exists("~/.bashrc") == False:
#CREATE IT
#===========
subprocess.call("touch ~/.bashrc".split())
file = open("~/.bashrc","r+")
text = file.read().strip("\n").split("\n")
text.append("shopt -s histappend")
text.append('readonly PROMPT_COMMAND="history -a" ')
text.append("readonly HISTFILE")
text.append("readonly HISTFILESIZE")
text.append("readonly HISTSIZE")
text.append("readonly HISTCMD")
text.append("readonly HISTCONTROL")
text.append("readonly HISTIGNORE")
text = '\n'.join([str(x) for x in text])
file.seek(0)
file.write(text)
file.truncate()
file.close()
#DISABLE OTHER SHELLS
#=====================
subprocess.call("chmod 750 csh".spit())
subprocess.call("chmod 750 tcsh ".spit())
subprocess.call("chmod 750 ksh".spit())
| 1,122 | 417 |
from autonmt.search.beam_search import beam_search
from autonmt.search.greedy_search import greedy_search
| 106 | 34 |
take_action = 0
pass_action = 1 | 31 | 14 |
import importlib
def find_trainer_using_name(model_name):
model_filename = "trainers." + model_name + "_trainer"
modellib = importlib.import_module(model_filename)
# In the file, the class called ModelNameModel() will
# be instantiated. It has to be a subclass of torch.nn.Module,
# and it is case-insensitive.
model = None
target_model_name = model_name.replace('_', '') + 'trainer'
for name, cls in modellib.__dict__.items():
if name.lower() == target_model_name.lower():
model = cls
if model is None:
print("In %s.py, there should be a subclass of torch.nn.Module with class name that matches %s in lowercase." % (model_filename, target_model_name))
exit(0)
return model
def create_trainer(opt):
model = find_trainer_using_name(opt.trainer)
instance = model(opt)
print("model [%s] was created" % (type(instance).__name__))
return instance
| 941 | 297 |
# -*- coding: utf-8 -*-
from ..expr import *
def_Topic(
Title("Imaginary unit"),
Section("Definitions"),
Entries(
"be8e05",
),
Section("Domain"),
Entries(
"88ad6f",
"cd8a07",
"a08fb9",
),
Section("Quadratic equations"),
Entries(
"08ad28",
),
Section("Numerical value"),
Entries(
"72cef9",
"27586f",
),
Section("Complex parts"),
Entries(
"65bbd6",
"249fd6",
"61784f",
"735409",
"089f85",
"09c107",
),
Section("Transformations"),
Entries(
"31b0df",
"8be138",
"e0425a",
"c12a41",
"44ae4a",
"67c262",
"f8a56f",
"15f92d",
"0ad836",
"a39534",
),
Section("Special functions at this value"),
Entries(
"c331da", # log
"9c93bb",
"3ac0ce",
"208da7",
),
),
make_entry(ID("be8e05"),
SymbolDefinition(ConstI, ConstI, "Imaginary unit"),
Description("Represents the constant", i, ", the imaginary unit."))
# Domain
make_entry(ID("88ad6f"),
Formula(Element(ConstI, CC)))
make_entry(ID("cd8a07"),
Formula(Element(ConstI, AlgebraicNumbers)))
make_entry(ID("a08fb9"),
Formula(NotElement(ConstI, RR)))
# Quadratic equations
make_entry(ID("08ad28"),
Formula(Equal(Solutions(Brackets(Equal(x**2 + 1, 0)), ForElement(x, CC)), Set(ConstI, -ConstI))))
# Numerical value
make_entry(ID("72cef9"),
Formula(Equal(ConstI, Sqrt(-1))))
make_entry(ID("27586f"),
Formula(Equal(ConstI, Pow(-1, Div(1,2)))))
# Complex parts
make_entry(ID("65bbd6"),
Formula(Equal(Abs(ConstI), 1)))
make_entry(ID("249fd6"),
Formula(Equal(Re(ConstI), 0)))
make_entry(ID("61784f"),
Formula(Equal(Im(ConstI), 1)))
make_entry(ID("09c107"),
Formula(Equal(Sign(ConstI), ConstI)))
# Operations
make_entry(ID("31b0df"),
Formula(Equal(ConstI**2, -1)))
make_entry(ID("8be138"),
Formula(Equal(ConstI**3, -ConstI)))
make_entry(ID("e0425a"),
Formula(Equal(ConstI**4, 1)))
make_entry(ID("c12a41"),
Formula(Equal(ConstI**n, Cases(
Tuple(1, CongruentMod(n, 0, 4)),
Tuple(ConstI, CongruentMod(n, 1, 4)),
Tuple(-1, CongruentMod(n, 2, 4)),
Tuple(-ConstI, CongruentMod(n, 3, 4))))),
Variables(n),
Assumptions(Element(n, ZZ)))
make_entry(ID("44ae4a"),
Formula(Equal(Conjugate(ConstI), -ConstI)))
make_entry(ID("67c262"),
Formula(Equal(1/ConstI, -ConstI)))
make_entry(ID("f8a56f"),
Formula(Equal(ConstI**z, Exp(Pi*ConstI*z/2))),
Variables(z),
Assumptions(Element(z, CC)))
make_entry(ID("15f92d"),
Formula(Equal(ConstI**z, Cos(Pi/2 * z) + Sin(Pi/2 * z) * ConstI)),
Variables(z),
Assumptions(Element(z, CC)))
make_entry(ID("a39534"),
Formula(Equal(ConstI**ConstI, Exp(-(Pi/2)))))
# Special functions at this value
make_entry(ID("9c93bb"),
Formula(Equal(Abs(Gamma(ConstI)), Sqrt(Pi/Sinh(Pi)))))
make_entry(ID("3ac0ce"),
Formula(Equal(Im(DigammaFunction(ConstI)), Div(1,2)*(Pi*Coth(Pi) + 1))))
make_entry(ID("208da7"),
Formula(Equal(PolyLog(2, ConstI), -(Pi**2/48) + ConstCatalan*ConstI)))
"""
19773f
35e09c
40f42c
22c52e
7c4b00
daaa7a
efe0fb
i**n
(actuall n in CC)
# log(i)
#
"""
| 3,308 | 1,469 |
import csv
from utils import to_sds_date
SCHOOL_ID = '1'
SCHOOL_FILENAME = 'School.csv'
SECTION_FILENAME = 'Section.csv'
STUDENT_FILENAME = 'Student.csv'
TEACHER_FILENAME = 'Teacher.csv'
STUDENT_ENROLLMENT_FILENAME = 'StudentEnrollment.csv'
TEACHER_ROSTER_FILENAME = 'TeacherRoster.csv'
class Writer:
HEADERS = []
def __init__(self, csvfile, datascource):
self.csvwriter = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_NONE)
self.datasource = datascource
def row_iterator(self, sourcerow):
return sourcerow
def generate(self, *args, **kwargs):
self.csvwriter.writerow(self.HEADERS)
for sourcerow in self.datasource:
self.csvwriter.writerow(self.row_iterator(sourcerow, *args, **kwargs))
class School(Writer):
HEADERS = ['SIS ID', 'Name']
def __init__(self, csvfile, schoolname):
super(School, self).__init__(csvfile, [[SCHOOL_ID, schoolname]])
class Section(Writer):
HEADERS = ['SIS ID', 'School SIS ID', 'Section Name']
def __init__(self, csvfile, datascource):
super(Section, self).__init__(csvfile, datascource)
def row_iterator(self, sourcerow, schoolyear=2020):
return [
sourcerow.get_id(schoolyear),
SCHOOL_ID,
sourcerow.course + ' ' + str(sourcerow.grade)
]
class Student(Writer):
HEADERS = ['SIS ID', 'School SIS ID', 'Username', 'Grade']
def __init__(self, csvfile, datascource):
super(Student, self).__init__(csvfile, datascource)
def row_iterator(self, sourcerow):
return [sourcerow.shortname, SCHOOL_ID, sourcerow.get_username(), sourcerow.grade]
class Teacher(Writer):
HEADERS = ['SIS ID', 'School SIS ID', 'Username']
def __init__(self, csvfile, datascource):
super(Teacher, self).__init__(csvfile, datascource)
def row_iterator(self, sourcerow):
return [sourcerow.shortname, SCHOOL_ID, sourcerow.get_username()]
class StudentEnrollment(Writer):
HEADERS = ['Section SIS ID', 'SIS ID']
def __init__(self, csvfile, datascource):
super(StudentEnrollment, self).__init__(csvfile, datascource)
def row_iterator(self, sourcerow, schoolyear=2020):
return [sourcerow.get_id(schoolyear), sourcerow.student]
class TeacherRoaster(Writer):
HEADERS = ['Section SIS ID', 'SIS ID']
def __init__(self, csvfile, datascource):
super(TeacherRoaster, self).__init__(csvfile, datascource)
def row_iterator(self, sourcerow, schoolyear=2020):
return [sourcerow.get_id(schoolyear), sourcerow.teacher]
| 2,590 | 932 |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import io
import re
import os
import glob
from setuptools import find_packages
from setuptools import setup
def read(*names, **kwargs):
with io.open(
os.path.join(os.path.dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf8')
) as fh:
return fh.read()
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'requirements.txt'), encoding='utf-8') as requirements_file:
requirements = requirements_file.read().splitlines()
with open(os.path.join(here, 'requirements_dev.txt'), encoding='utf-8') as requirements_dev_file:
requirements_dev = requirements_dev_file.read().splitlines()
# split the developer requirements into setup and test requirements
if not requirements_dev.count("") == 1 or requirements_dev.index("") == 0:
raise SyntaxError("requirements_dev.txt has the wrong format: setup and test "
"requirements have to be separated by one blank line.")
requirements_dev_split = requirements_dev.index("")
setup_requirements = ["pip>9",
"setuptools_scm",
"setuptools_scm_git_archive"]
test_requirements = requirements_dev[requirements_dev_split + 1:] # +1: skip empty line
setup(
name='zfit-flavour',
use_scm_version={
'local_scheme': 'dirty-tag',
'write_to': 'src/zfit_flavour/_version.py',
'fallback_version': '0.0.1',
},
license='BSD-3-Clause',
description='Flavour physics for zfit',
long_description='%s\n%s' % (
re.compile('^.. start-badges.*^.. end-badges', re.M | re.S).sub('', read('README.rst')),
re.sub(':[a-z]+:`~?(.*?)`', r'``\1``', read('CHANGELOG.rst'))
),
author='Jonas Eschle, Rafael Silva Coutinho',
author_email='Jonas.Eschle@cern.ch, rafael.silva.coutinho@cern.ch',
url='https://github.com/zfit/zfit-flavour',
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[os.path.splitext(os.path.basename(path))[0] for path in glob.glob('zfit_flavour/*.py')],
include_package_data=True,
zip_safe=False,
classifiers=[
# complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: Unix',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
# uncomment if you test on these interpreters:
# 'Programming Language :: Python :: Implementation :: IronPython',
# 'Programming Language :: Python :: Implementation :: Jython',
# 'Programming Language :: Python :: Implementation :: Stackless',
'Topic :: Utilities',
],
project_urls={
'Documentation': 'https://zfit-flavour.readthedocs.io/',
'Changelog': 'https://zfit-flavour.readthedocs.io/en/latest/changelog.html',
'Issue Tracker': 'https://github.com/zfit/zfit-flavour/issues',
},
keywords=[
'flavour', 'zfit', 'model fitting'
],
python_requires='>=3.6',
install_requires=requirements,
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
)
| 3,790 | 1,213 |
from collections import deque
class TaskScheduler:
'''
Represents an ordered Scheduler.
'''
def __init__(self):
self._task_deque = deque()
def new_task(self, task):
'''
Admit a newly started task to the scheduler\n
(must be a generator `yield`)
'''
self._task_deque.append(task)
def run(self):
'''
Run until there are no more tasks
'''
while self._task_deque:
task = self._task_deque.popleft()
try:
# Run the task until the next yield
next(task)
# Not ended
self._task_deque.append(task)
except StopIteration:
# Generator is no longer executing
pass
# Two simple generator functions
def __countdown(n):
while n > 0:
print('T-minus', n)
yield
n -= 1
print('Blastoff!')
def __countup(n):
x = 0
while x < n:
print('Counting up', x)
yield
x += 1
if __name__ == "__main__":
# Example use
sched = TaskScheduler()
sched.new_task(__countdown(10))
sched.new_task(__countdown(5))
sched.new_task(__countup(15))
sched.run()
# output:
# T-minus 10
# T-minus 5
# Counting up 0
# T-minus 9
# T-minus 4
# Counting up 1
# T-minus 8
# T-minus 3
# Counting up 2
# T-minus 7
# T-minus 2
# Counting up 3
# T-minus 6
# T-minus 1
# Counting up 4
# T-minus 5
# Blastoff!
# Counting up 5
# T-minus 4
# Counting up 6
# T-minus 3
# Counting up 7
# T-minus 2
# Counting up 8
# T-minus 1
# Counting up 9
# Blastoff!
# Counting up 10
# Counting up 11
# Counting up 12
# Counting up 13
# Counting up 14
| 1,858 | 648 |
class Solution:
def intToRoman(self, num: int) -> str:
a = {
'I': 1,
'IV': 4,
'V': 5,
'IX': 9,
'X': 10,
'XL': 40,
'L': 50,
'XC': 90,
'C': 100,
'CD': 400,
'D': 500,
'CM': 900,
'M': 1000
}
c = []
for k, v in reversed(a.items()):
while num > 0:
if v <= num:
c.append(k)
num -= v
else:
break
return "".join(c)
sol = Solution()
print(sol.intToRoman(1994))
print(sol.intToRoman(562))
print(sol.intToRoman(42))
print(sol.intToRoman(724))
print("59 -> ", sol.intToRoman(59))
class Solution3:
def intToRoman(self, num: int) -> str:
roman = [["I", 1], ["IV", 4], ["V", 5], ["IX", 9], ["X", 10], ["XL", 40], ["L", 50], ["XC", 90], ["C", 100],
["CD", 400], ["D", 500], ["CM", 900], ["M", 1000]]
result = ''
for key, value in reversed(roman):
if num // value:
count = num // value
result += (count * key)
num = num % value
return result
sol3 = Solution3()
print(sol3.intToRoman(625))
class Solution2:
def intToRoman(self, num: int) -> str:
symbol_map = {1: 'I',
5: 'V',
10: 'X',
50: 'L',
100: 'C',
500: 'D',
1000: 'M'}
res = (num // 1000) * symbol_map[1000]
num %= 1000
div = 100
while div:
div_count = num // div
div_symbol, divx5_symbol = symbol_map[div], symbol_map[div * 5]
if div_count == 4:
res += div_symbol + divx5_symbol
elif div_count == 9:
res += div_symbol + symbol_map[div * 10]
else:
res += ((div_count >= 5) * divx5_symbol) + ((div_count % 5) * div_symbol)
num %= div
div //= 10
return res
| 2,138 | 785 |
import zlib
import zmq
import simplejson
import sys
import time
import pprint
import math
pp = pprint.PrettyPrinter(indent=4)
"""
" Configuration
"""
__relayEDDN = 'tcp://eddn.edcd.io:9500'
__timeoutEDDN = 600000
"""
" Start
"""
def distance_finder(input_coords):
colonia_coords = [-9530.5, -910.28125, 19808.125]
ogmar_coords = [-9534, -905.28125, 19802.03125]
colonia_dist = math.sqrt(((colonia_coords[0] - (input_coords[0])) ** 2) + ((colonia_coords[1] - (input_coords[1])) ** 2) + ((colonia_coords[2] - (input_coords[2]))**2))
ogmar_dist = math.sqrt(((ogmar_coords[0] - (input_coords[0]))**2) + ((ogmar_coords[1] - (input_coords[1]))**2) + ((ogmar_coords[2] - input_coords[2])**2))
output = [colonia_dist, ogmar_dist]
return output
def main():
context = zmq.Context()
subscriber = context.socket(zmq.SUB)
subscriber.setsockopt(zmq.SUBSCRIBE, b"")
subscriber.setsockopt(zmq.RCVTIMEO, __timeoutEDDN)
while True:
try:
subscriber.connect(__relayEDDN)
while True:
__message = subscriber.recv()
if __message == False:
subscriber.disconnect(__relayEDDN)
break
__message = zlib.decompress(__message)
__json = simplejson.loads(__message)
# call dumps() to ensure double quotes in output
#pp.pprint(__json)
try:
star_system = __json['message']['StarSystem']
star_pos = __json['message']['StarPos']
timestamp = __json['header']['gatewayTimestamp']
softwarename = __json['header']['softwareName']
distances = distance_finder(star_pos)
print(f'{timestamp} {star_system} {distances[1]}')
except:
print('data missing')
sys.stdout.flush()
except zmq.ZMQError as e:
print ('ZMQSocketException: ' + str(e))
sys.stdout.flush()
subscriber.disconnect(__relayEDDN)
time.sleep(5)
time.sleep(.1)
if __name__ == '__main__':
main() | 2,310 | 762 |
from __future__ import print_function, division
import warnings; warnings.filterwarnings("ignore")
from nilmtk import DataSet
import pandas as pd
import numpy as np
import datetime
import time
import math
import glob
from sklearn.tree import DecisionTreeRegressor
# Bring packages onto the path
import sys, os
sys.path.append(os.path.abspath('../bayesian_optimization/'))
from utils import metrics_np
from utils.metrics_np import Metrics
# import argparse
def decision_tree(dataset_path, train_building, train_start, train_end, val_building, val_start, val_end, test_building, test_start, test_end, meter_key, sample_period, criterion, min_sample_split):
# Start tracking time
start = time.time()
# Prepare dataset and options
dataset_path = dataset_path
train = DataSet(dataset_path)
train.set_window(start=train_start, end=train_end)
val = DataSet(dataset_path)
val.set_window(start=val_start, end=val_end)
test = DataSet(dataset_path)
test.set_window(start=test_start, end=test_end)
train_building = train_building
val_building = val_building
test_building = test_building
meter_key = meter_key
sample_period = sample_period
train_elec = train.buildings[train_building].elec
val_elec = val.buildings[val_building].elec
test_elec = test.buildings[test_building].elec
try: # REDD
X_train = next(train_elec.mains().all_meters()[0].load(sample_period=sample_period)).fillna(0)
y_train = next(train_elec[meter_key].load(sample_period=sample_period)).fillna(0)
X_test = next(test_elec.mains().all_meters()[0].load(sample_period=sample_period)).fillna(0)
y_test = next(test_elec[meter_key].load(sample_period=sample_period)).fillna(0)
X_val = next(val_elec.mains().all_meters()[0].load(sample_period=sample_period)).fillna(0)
y_val = next(val_elec[meter_key].load(sample_period=sample_period)).fillna(0)
# Intersect between two dataframe - to make sure same trining instances in X and y
# Train set
intersect_index = pd.Index(np.sort(list(set(X_train.index).intersection(set(y_train.index)))))
X_train = X_train.ix[intersect_index]
y_train = y_train.ix[intersect_index]
# Test set
intersect_index = pd.Index(np.sort(list(set(X_test.index).intersection(set(y_test.index)))))
X_test = X_test.ix[intersect_index]
y_test = y_test.ix[intersect_index]
# Val set
intersect_index = pd.Index(np.sort(list(set(X_val.index).intersection(set(y_val.index)))))
X_val = X_val.ix[intersect_index]
y_val = y_val.ix[intersect_index]
# Get values from numpy array
X_train = X_train.values
y_train = y_train.values
X_test = X_test.values
y_test = y_test.values
X_val = X_val.values
y_val = y_val.values
except AttributeError: # UKDALE
X_train = train_elec.mains().power_series_all_data(sample_period=sample_period).fillna(0)
y_train = next(train_elec[meter_key].power_series(sample_period=sample_period)).fillna(0)
X_test = test_elec.mains().power_series_all_data(sample_period=sample_period).fillna(0)
y_test = next(test_elec[meter_key].power_series(sample_period=sample_period)).fillna(0)
# Intersect between two dataframe - to make sure same trining instances in X and y
# Train set
intersect_index = pd.Index(np.sort(list(set(X_train.index).intersection(set(y_train.index)))))
X_train = X_train.ix[intersect_index]
y_train = y_train.ix[intersect_index]
# Test set
intersect_index = pd.Index(np.sort(list(set(X_test.index).intersection(set(y_test.index)))))
X_test = X_test.ix[intersect_index]
y_test = y_test.ix[intersect_index]
# X_train = X_train.reshape(-1, 1)
# y_train = y_train.reshape(-1, 1)
# X_test = X_test.reshape(-1, 1)
# y_test = y_test.reshape(-1, 1)
# Get values from numpy array - Avoid server error
X_train = X_train.values.reshape(-1, 1)
y_train = y_train.values.reshape(-1, 1)
X_test = X_test.values.reshape(-1, 1)
y_test = y_test.values.reshape(-1, 1)
# Model settings and hyperparameters
min_samples_split = min_sample_split
tree_clf = DecisionTreeRegressor(criterion = criterion, min_samples_split = min_sample_split)
# print("========== TRAIN ============")
tree_clf.fit(X_train, y_train)
# print("========== DISAGGREGATE ============")
y_val_predict = tree_clf.predict(X_val)
y_test_predict = tree_clf.predict(X_test)
# print("========== RESULTS ============")
# me = Metrics(state_boundaries=[10])
on_power_threshold = train_elec[meter_key].on_power_threshold()
me = Metrics(state_boundaries=[on_power_threshold])
val_metrics_results_dict = Metrics.compute_metrics(me, y_val_predict, y_val.flatten())
test_metrics_results_dict = Metrics.compute_metrics(me, y_test_predict, y_test.flatten())
# end tracking time
end = time.time()
time_taken = end-start # in seconds
model_result_data = {
'val_metrics': val_metrics_results_dict,
'test_metrics': test_metrics_results_dict,
'time_taken': format(time_taken, '.2f'),
'epochs': None,
}
# Close Dataset files
train.store.close()
val.store.close()
test.store.close()
return model_result_data
# def main():
#
# # Take in arguments from command line
# parser = argparse.ArgumentParser(description='Decision Tree Regressor')
# parser.add_argument('--datapath', '-d', type=str, required=True,
# help='hd5 filepath')
#
# parser.add_argument('--train_building', type=int, required=True)
# parser.add_argument('--train_start', type=str, default=None, help='YYYY-MM-DD')
# parser.add_argument('--train_end', type=str, required=True, help='YYYY-MM-DD')
#
# parser.add_argument('--test_building', type=int, required=True)
# parser.add_argument('--test_start', type=str, required=True, help='YYYY-MM-DD')
# parser.add_argument('--test_end', type=str, default=None, help='YYYY-MM-DD')
#
# parser.add_argument('--appliance', type=str, required=True)
# parser.add_argument('--sampling_rate', type=int, required=True)
#
# # Model specific options and hyperparameters
# parser.add_argument('--min_sample_split', type=int, default=100)
# args = parser.parse_args()
#
# hd5_filepath = args.datapath
# train_building = args.train_building
# train_start = pd.Timestamp(args.train_start) if args.train_start != None else None
# train_end = pd.Timestamp(args.train_end)
# test_building = args.test_building
# test_start = pd.Timestamp(args.test_start)
# test_end = pd.Timestamp(args.test_end) if args.test_end != None else None
# appliance = args.appliance
# downsampling_period = args.sampling_rate
# min_sample_split = args.min_sample_split
#
#
# model_result_data = decision_tree(
# dataset_path=hd5_filepath,
# train_building=train_building, train_start=train_start, train_end=train_end,
# test_building=test_building, test_start=test_start, test_end=test_end,
# meter_key=appliance,
# sample_period=downsampling_period,
# criterion="mae",
# min_sample_split=min_sample_split)
#
# # # Write options and results to file
# # with open('dt_json.json', 'a+') as outfile:
# # json.dump(model_result_data, outfile, sort_keys=True,
# # indent=4, separators=(',', ': '))
# print(model_result_data)
#
# if __name__ == "__main__":
# main()
# python algorithms/dt.py --datapath ../data/REDD/redd.h5 --train_building 1 --train_building 1 --train_end 2011-05-10 --test_building 1 --test_start 2011-05-10 --appliance fridge --sampling_rate 20 --min_sample_split 100
# python dt.py --datapath ../data/REDD/redd.h5 --train_building 1 --train_building 1 --train_end 2011-05-10 --test_building 1 --test_start 2011-05-10 --appliance fridge --sampling_rate 20 --min_sample_split 100
# python dt.py --datapath /mnt/data/datasets/wattanavaekin/REDD/redd.h5 --train_building 1 --train_end 2011-05-10 --test_building 1 --test_start 2011-05-10 --appliance fridge --sampling_rate 20 --min_sample_split 100
# python dt.py --datapath /mnt/data/datasets/wattanavaekin/UKDALE/ukdale-2017.h5 --train_building 2 --train_end 2013-08-02 --test_building 2 --test_start 2013-08-02 --appliance fridge --sampling_rate 120 --min_sample_split 100
| 8,599 | 3,038 |
difficultlist = ['>tr|Q9IQN3|Q9IQN3_9HIV1 Protein Rev (Fragment) OS=Human immunodeficiency virus 1 OX=11676 GN=rev PE=4 SV=1', 'PPPSSEGTRQARRNRRRRWRERQRQIRRISGWILSNHLGGLTEPVPLQLPPLERLTLDCN', 'EDCGTSGTQGVGSPQIPVESPTVLESGTKE', '>sp|O95218|ZRAB2_HUMAN Zinc finger Ran-binding domain-containing protein 2 OS=Homo sapiens OX=9606 GN=ZRANB2 PE=1 SV=2', 'MSTKNFRVSDGDWICPDKKCGNVNFARRTSCNRCGREKTTEAKMMKAGGTEIGKTLAEKS', 'RGLFSANDWQCKTCSNVNWARRSECNMCNTPKYAKLEERTGYGGGFNERENVEYIEREES', 'DGEYDEFGRKKKKYRGKAVGPASILKEVEDKESEGEEEDEDEDLSKYKLDEDEDEDDADL', 'SKYNLDASEEEDSNKKKSNRRSRSKSRSSHSRSSSRSSSPSSSRSRSRSRSRSSSSSQSR', 'SRSSSRERSRSRGSKSRSSSRSHRGSSSPRKRSYSSSSSSPERNRKRSRSRSSSSGDRKK', 'RRTRSRSPERRHRSSSGSSHSGSRSSSKKK', '']
difficultlist
saveseq = []
easystring = ''
seq1 = ''
seq2=''
for i in range(len(difficultlist)):
easystring += difficultlist[i] # string without newlines now I can use the method split() again
seqlist = easystring.split('>', 2) #splits into list of 3 items [0]='' [1]=seq1 [2]=seq2 (remember to test if it also works on file with more than 2 seq)
print(seqlist[1])
print(easystring)
print(seqlist)
# if difficultlist[i].startswith('>') != True:
# saveseq.append(difficultlist[i])
# print(saveseq)
my_list = ["Hello", "world"]
print(str.join('-', my_list))
# return filist
# def split_any_fasta(fastafile):
# filist=[]
# with open('./'+fastafile, 'r') as rfile:
# for line in rfile:
# filist.append(line.split("\n"))
# print(filist)
# return filist
# def list_to_string(filist):
# stringed = ''
# for i in range(len(filist)):
# for j in range(len(filist)):
# if str(filist[i][j].startswith('>')) == True:
# continue
# else:
# print(filist[i][j])
# list_to_string(listoflists)
# def fasta_to_string(fastaf):
# with open('./'+fastaf, 'r') as rfile:
# aaline=''
# seq1=''
# seq2=''
# for line in rfile:
# if line.find(' ') != None:
# continue
# else:
# aaline = rfile.readline()
# print(aaline)
# return aaline
# fi = "Q9IQN3.fasta"
# print(fasta_to_string(fi))
# #return (seq1, seq2)
# # le = len(rfile.readlines())
# # for line in range(1, le):
# # if line != '>': | 2,368 | 999 |
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import urllib2, base64
import json
from optparse import OptionParser
from rangerrest import RangerRestHelper
def foo_callback(option, opt, value, parser):
setattr(parser.values, option.dest, value.split(','))
def option_parser():
'''option parser'''
parser = OptionParser()
parser.remove_option('-h')
parser.add_option('-?', '--help', action='help')
parser.add_option('-h', '--host', dest="host", help='host of the ranger server', \
default='localhost')
parser.add_option('-p', '--port', dest="port", \
help='port of the ranger server', type='int', default=6080)
parser.add_option('-U', '--rangeruser', dest="rangerusername", default='admin', \
help='ranger username')
parser.add_option('-w', '--rangerpassword', dest="rangerpassword", \
default='admin', help='ranger password')
parser.add_option('-d', '--detelepolicy', dest="deletedpolicyname",\
default= '', help='delete a policy in ranger')
parser.add_option('-a', '--addpolicy', dest="newpolicyfilename", \
default = '', help='add a policy in ranger by json file')
return parser
def create_policy(policy_json_file_name, rangerhelper):
if policy_json_file_name != '':
jsonfile = open(policy_json_file_name, "r")
json_decode=json.load(jsonfile)
policyname = json_decode['name']
#print json_decode
response, is_success = rangerhelper.create_policy(json.dumps(json_decode))
# is there is a duplicate policy error, we try to update policy.
if is_success == False:
#get duplicate policy name
policy_start_pos = response.find("policy-name=[")
response = response[policy_start_pos+13:]
policy_end_pos = response.find("], service=[")
dup_policy_name = response[0:policy_end_pos]
#get duplicate policy and add privilege item.
service_name = 'hawq'
print "find duplicate policy, try to update [%s]" % (dup_policy_name)
response, is_success = rangerhelper.get_policy(service_name, dup_policy_name)
if is_success:
response_dict = json.load(response)
for new_policy_item in json_decode['policyItems']:
response_dict["policyItems"].append(new_policy_item)
for new_policy_item in json_decode['denyPolicyItems']:
response_dict["denyPolicyItems"].append(new_policy_item)
for new_policy_item in json_decode['allowExceptions']:
response_dict["allowExceptions"].append(new_policy_item)
for new_policy_item in json_decode['denyExceptions']:
response_dict["denyExceptions"].append(new_policy_item)
response, is_success = rangerhelper.update_policy(service_name, dup_policy_name, \
json.dumps(response_dict))
else:
return policyname, False
return policyname, is_success
def delete_policy(delete_policy_name, rangerhelper):
response, is_success = rangerhelper.delete_policy("hawq", delete_policy_name)
return is_success
if __name__ == '__main__':
#parse argument
parser = option_parser()
(options, args) = parser.parse_args()
rangeruser = options.rangerusername
rangerpasswd= options.rangerpassword
host = options.host
port = str(options.port)
new_policy_json_file_name = options.newpolicyfilename
delete_policy_name = options.deletedpolicyname
#init rangerresthelper
helper = RangerRestHelper(host, port, rangeruser, rangerpasswd)
if new_policy_json_file_name != "":
policyname, is_success = create_policy(new_policy_json_file_name, helper)
if is_success:
print "policy {} created".format(policyname)
else:
print "policy {} create failed".format(policyname)
sys.exit(-1)
if delete_policy_name != "":
is_success = delete_policy(delete_policy_name, helper)
if is_success:
print "policy {} deleted".format(delete_policy_name)
else:
print "policy {} delete failed".format(delete_policy_name)
sys.exit(-1)
sys.exit(0)
| 5,169 | 1,461 |
#MenuTitle: New Tab with Special Layers
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
from builtins import str
__doc__="""
Opens a new Edit tab containing all special (bracket & brace) layers.
"""
Glyphs.clearLog() # clears log of Macro window
thisFont = Glyphs.font # frontmost font
affectedLayers = []
for thisGlyph in thisFont.glyphs: # loop through all glyphs
for thisLayer in thisGlyph.layers: # loop through all layers
# collect affected layers:
if thisLayer.isSpecialLayer:
affectedLayers.append(thisLayer)
# open a new tab with the affected layers:
if affectedLayers:
newTab = thisFont.newTab()
newTab.layers = affectedLayers
# otherwise send a message:
else:
Message(
title = "Nothing Found",
message = "Could not find any bracket or brace layers in the font.",
OKButton = None
)
| 856 | 283 |
from typing import Optional
from clvm_tools.binutils import assemble
from chinilla.types.blockchain_format.program import Program
from chinilla.types.blockchain_format.sized_bytes import bytes32
from chinilla.util.ints import uint16
from chinilla.wallet.nft_wallet.ownership_outer_puzzle import puzzle_for_ownership_layer
from chinilla.wallet.nft_wallet.transfer_program_puzzle import puzzle_for_transfer_program
from chinilla.wallet.outer_puzzles import (
construct_puzzle,
create_asset_id,
get_inner_puzzle,
get_inner_solution,
match_puzzle,
solve_puzzle,
)
from chinilla.wallet.puzzle_drivers import PuzzleInfo, Solver
def test_ownership_outer_puzzle() -> None:
ACS = Program.to(1)
NIL = Program.to([])
owner = bytes32([0] * 32)
# (mod (current_owner conditions solution)
# (list current_owner () conditions)
# )
transfer_program = assemble( # type: ignore
"""
(c 2 (c () (c 5 ())))
"""
)
transfer_program_default: Program = puzzle_for_transfer_program(bytes32([1] * 32), bytes32([2] * 32), uint16(5000))
ownership_puzzle: Program = puzzle_for_ownership_layer(owner, transfer_program, ACS)
ownership_puzzle_empty: Program = puzzle_for_ownership_layer(NIL, transfer_program, ACS)
ownership_puzzle_default: Program = puzzle_for_ownership_layer(owner, transfer_program_default, ACS)
ownership_driver: Optional[PuzzleInfo] = match_puzzle(ownership_puzzle)
ownership_driver_empty: Optional[PuzzleInfo] = match_puzzle(ownership_puzzle_empty)
ownership_driver_default: Optional[PuzzleInfo] = match_puzzle(ownership_puzzle_default)
transfer_program_driver: Optional[PuzzleInfo] = match_puzzle(transfer_program_default)
assert ownership_driver is not None
assert ownership_driver_empty is not None
assert ownership_driver_default is not None
assert transfer_program_driver is not None
assert ownership_driver.type() == "ownership"
assert ownership_driver["owner"] == owner
assert ownership_driver_empty["owner"] == NIL
assert ownership_driver["transfer_program"] == transfer_program
assert ownership_driver_default["transfer_program"] == transfer_program_driver
assert transfer_program_driver.type() == "royalty transfer program"
assert transfer_program_driver["launcher_id"] == bytes32([1] * 32)
assert transfer_program_driver["royalty_address"] == bytes32([2] * 32)
assert transfer_program_driver["royalty_percentage"] == 5000
assert construct_puzzle(ownership_driver, ACS) == ownership_puzzle
assert construct_puzzle(ownership_driver_empty, ACS) == ownership_puzzle_empty
assert construct_puzzle(ownership_driver_default, ACS) == ownership_puzzle_default
assert get_inner_puzzle(ownership_driver, ownership_puzzle) == ACS
assert create_asset_id(ownership_driver) is None
# Set up for solve
inner_solution = Program.to(
[
[51, ACS.get_tree_hash(), 1],
[-10],
]
)
solution: Program = solve_puzzle(
ownership_driver,
Solver({}),
ACS,
inner_solution,
)
ownership_puzzle.run(solution)
assert get_inner_solution(ownership_driver, solution) == inner_solution
| 3,244 | 1,065 |
import math
import random
import torch
import numpy as np
from torch.utils.data.dataset import Dataset
from PIL import Image
import os
from torchvision import transforms
from utils import *
# 1 x n_class x height x width tensor
def decode_output_to_label(temp):
n, c, h, w = temp.size()
temp = temp.transpose(1, 2).transpose(2, 3).squeeze(0).view(h, w, c)
if torch.cuda.is_available():
temp = temp.cpu()
temp = temp.argmax(-1)
temp = torch.LongTensor(temp.view(1, 1, h, w))
return temp
# heightxwidth
class OrganSeg(Dataset):
def __init__(self, current_fold, list_path, n_class, organ_id, slice_threshold=0, transforms=True):
self.organ_ID = int(organ_id)
self.n_class = int(n_class)
self.transforms = transforms
self.augmentations = None
image_list = open(training_set_filename(list_path, current_fold), 'r').read().splitlines()
self.training_image_set = np.zeros((len(image_list)), dtype=np.int)
for i in range(len(image_list)):
s = image_list[i].split(' ')
self.training_image_set[i] = int(s[0])
slice_list = open(list_training_all(list_path), 'r').read().splitlines()
self.slices = len(slice_list)
self.image_ID = np.zeros(self.slices, dtype=np.int)
self.slice_ID = np.zeros(self.slices, dtype=np.int)
self.image_filename = ['' for l in range(self.slices)]
self.label_filename = ['' for l in range(self.slices)]
self.average = np.zeros(self.slices)
self.pixels = np.zeros(self.slices, dtype=np.int)
for l in range(self.slices):
s = slice_list[l].split(' ')
self.image_ID[l] = s[0]
self.slice_ID[l] = s[1]
self.image_filename[l] = s[2] # important
self.label_filename[l] = s[3] # important
self.average[l] = float(s[4]) # pixel value avg
self.pixels[l] = int(s[organ_id + 5 - 1]) # sum of label
if 0 < slice_threshold < 1: # 0.98
pixels_index = sorted(range(self.slices), key=lambda l: self.pixels[l])
last_index = int(math.floor((self.pixels > 0).sum() * slice_threshold))
min_pixels = self.pixels[pixels_index[-last_index]]
else: # or set up directly
min_pixels = slice_threshold
# slice_threshold = min_pixels = 0 means all organ
self.active_index = [l for l, p in enumerate(self.pixels)
if p >= min_pixels and self.image_ID[l] in self.training_image_set] # true active
colors = [ #
[0, 0, 0],
[128, 64, 128],
[244, 35, 232],
[70, 70, 70],
[102, 102, 156],
[190, 153, 153],
[153, 153, 153],
[250, 170, 30],
[220, 220, 0],
[107, 142, 35],
]
self.label_colours = dict(zip(range(self.n_class), colors))
def __getitem__(self, index):
# stuff
self.index1 = self.active_index[index]
if '.dcm' in self.image_filename[self.index1]:
image1 = dcm2npy(self.image_filename[self.index1]).astype(np.float32)
elif '.npy' in self.image_filename[self.index1]:
image1 = npy2npy(self.image_filename[self.index1]).astype(np.float32)
if 'T1DUAL' in self.image_filename[self.index1]:
self.low_range = 0.0
self.high_range = 1200.0
elif 'T2SPIR' in self.image_filename[self.index1]:
self.low_range = 0.0
self.high_range = 1800.0
# set range
np.minimum(np.maximum(image1, self.low_range, image1), self.high_range, image1)
if random.randint(0, 1) == 1:
image1 = self.high_range + self.low_range - image1
# image1 -= self.low_range
# image1 /= (self.high_range - self.low_range)
if '.png' in self.label_filename[self.index1]:
label1 = png2npy(self.label_filename[self.index1])
elif '.npy' in self.label_filename[self.index1]:
label1 = npy2npy(self.label_filename[self.index1], mask=True)
width = label1.shape[0]
height = label1.shape[1]
lbl = label1.reshape(1, width, height)
img = image1.reshape(1, width, height)
if self.transforms is not None:
img, lbl = self.transform(img, lbl)
width, height = 256, 256
lbl = lbl.reshape(width, height)
img = img.reshape(width, height)
# set rotate
# rotate_time = random.randint(0, 3)
# lbl = np.rot90(lbl, rotate_time)
# img = np.rot90(img, rotate_time)
# set flip
# flip_time = random.randint(0, 1)
# if flip_time == 1:
# lbl = lbl.T
# img = img.T
# mix_rate = random.randint(0, 5)
# if mix_rate >= 8:
# length = len(self.active_index)
# self.random_index = (self.index1 + random.randint(0, length - 1)) % length
# if '.dcm' in self.image_filename[self.random_index]:
# image1 = dcm2npy(self.image_filename[self.random_index]).astype(np.float32)
# elif '.npy' in self.image_filename[self.random_index]:
# image1 = npy2npy(self.image_filename[self.random_index]).astype(np.float32)
# np.minimum(np.maximum(image1, self.low_range, image1), self.high_range, image1)
#
# width = image1.shape[0]
# height = image1.shape[1]
# image1 = image1.reshape(1, width, height)
# image1, image1 = self.transform(image1, image1)
#
# width, height = 256, 256
# image1 = image1.reshape(width, height)
# img = img * 0.6 + image1 * 0.4
img = np.repeat(img.reshape(1, width, height), 3, axis=0)
lbl = lbl.reshape(1, width, height)
if self.augmentations is not None:
img, lbl = self.augmentations(img, lbl)
img = np.ascontiguousarray(img, dtype=np.float32)
lbl = np.ascontiguousarray(lbl, dtype=np.int64)
return img, lbl
def transform(self, img, lbl):
W = 256
H = 256
if lbl.shape[1] > H and lbl.shape[2] > W:
X = int((lbl.shape[1] - H) / 2)
Y = int((lbl.shape[2] - W) / 2)
lbl = lbl[:, X:X + H, Y:Y + W]
if img.shape[1] > H and img.shape[2] > W:
X = int((img.shape[1] - H) / 2)
Y = int((img.shape[2] - W) / 2)
img = img[:, X:X + H, Y:Y + W]
# transformations_train = transforms.Compose([transforms.RandomRotation(10),
# transforms.RandomHorizontalFlip(),
# transforms.ToTensor()])
# img = transformations_train(img)
# lbl = transformations_train(lbl)
return img, lbl
def decode_segmap(self, temp, bias=0):
n, c, h, w = temp.size()
temp = temp.view(h, w)
temp = temp.numpy()
temp = temp.astype(np.int8)
r = temp.copy()
g = temp.copy()
b = temp.copy()
for l in range(0, self.n_class):
r[temp == l] = self.label_colours[l][0 + bias * 3]
g[temp == l] = self.label_colours[l][1 + bias * 3]
b[temp == l] = self.label_colours[l][2 + bias * 3]
rgb = np.zeros((3, temp.shape[0], temp.shape[1]))
rgb[0, :, :] = r
rgb[1, :, :] = g
rgb[2, :, :] = b
return rgb
def __len__(self):
return len(self.active_index) # of how many data(images?) you have
class OrganTest(Dataset):
def __init__(self, current_fold, list_path, transforms=None):
self.augmentations = None
self.transforms = transforms
image_list = open(testing_set_filename(list_path, current_fold), 'r').read().splitlines()
self.testing_image_set = np.zeros((len(image_list)), dtype=np.int)
for i in range(len(image_list)):
s = image_list[i].split(' ')
self.testing_image_set[i] = int(s[0])
slice_list = open(list_training_all(list_path), 'r').read().splitlines()
self.slices = len(slice_list)
self.image_ID = np.zeros(self.slices, dtype=np.int)
self.pixels = np.zeros(self.slices, dtype=np.int)
self.image_filename = ['' for l in range(self.slices)]
self.label_filename = ['' for l in range(self.slices)]
for l in range(self.slices):
s = slice_list[l].split(' ')
self.image_ID[l] = s[0]
self.image_filename[l] = s[2] # important
self.label_filename[l] = s[3] # important
self.active_index = [l for l, p in enumerate(self.pixels)
if self.image_ID[l] in self.testing_image_set] # true active
def __getitem__(self, index):
# stuff
self.index1 = self.active_index[index]
image1 = dcm2npy(self.image_filename[self.index1]).astype(np.float32)
if 'T1DUAL' in self.image_filename[self.index1]:
self.low_range = 0.0
self.high_range = 1200.0
elif 'T2SPIR' in self.image_filename[self.index1]:
self.low_range = 0.0
self.high_range = 1800.0
np.minimum(np.maximum(image1, self.low_range, image1), self.high_range, image1)
# image1 -= self.low_range
# image1 /= (self.high_range - self.low_range)
label1 = png2npy(self.label_filename[self.index1])
width = label1.shape[0]
height = label1.shape[1]
img = np.repeat(image1.reshape(1, width, height), 3, axis=0)
lbl = label1.reshape(1, width, height)
if self.augmentations is not None:
img, lbl = self.augmentations(img, lbl)
if self.transforms is not None:
img = self.transforms(img)
lbl = self.transforms(lbl)
return img, lbl
def __len__(self):
return len(self.active_index)
class OrganVolTest(Dataset):
def __init__(self, current_fold, list_path, transforms=None):
self.augmentations = None
self.n_class = 5
self.transforms = transforms
image_list = open(testing_set_filename(list_path, current_fold), 'r').read().splitlines()
self.testing_image_set = np.zeros((len(image_list)), dtype=np.int)
for i in range(len(image_list)):
s = image_list[i].split(' ')
self.testing_image_set[i] = int(s[0])
slice_list = open(list_training_all(list_path), 'r').read().splitlines()
self.slices = len(slice_list)
self.image_ID = np.zeros(self.slices, dtype=np.int)
self.pixels = np.zeros(self.slices, dtype=np.int)
self.image_filename = ['' for l in range(self.slices)]
self.label_filename = ['' for l in range(self.slices)]
for l in range(self.slices):
s = slice_list[l].split(' ')
self.image_ID[l] = s[0]
self.image_filename[l] = s[2] # important
self.label_filename[l] = s[3] # important
colors = [ #
[0, 0, 0],
[63, 63, 63],
[126, 126, 126],
[189, 189, 189],
[252, 252, 252],
[128, 64, 128],
# [70, 70, 70],
[102, 102, 156],
[190, 153, 153],
# [153, 153, 153],
[250, 170, 30],
[220, 220, 0],
[107, 142, 35],
[244, 35, 32],
[152, 251, 52],
[0, 130, 80],
[244, 35, 232],
[152, 251, 152],
[0, 130, 180],
[220, 20, 60],
[255, 0, 0],
[0, 0, 142],
[0, 0, 70],
[0, 60, 100],
[0, 80, 100],
[0, 0, 230],
[119, 11, 32],
]
self.label_colours = colors
def __getitem__(self, index):
# stuff
self.index1 = self.testing_image_set[index]
self.active_index = [l for l, p in enumerate(self.pixels)
if self.image_ID[l] == self.index1] # true active
if '.dcm' in self.image_filename[self.active_index[0]]:
tmp = dcm2npy(self.image_filename[self.active_index[0]]).astype(np.float32)
elif '.npy' in self.image_filename[self.active_index[0]]:
tmp = npy2npy(self.image_filename[self.active_index[0]]).astype(np.float32)
# tmp = dcm2npy(self.image_filename[self.active_index[0]]).astype(np.float32)
width = tmp.shape[0]
height = tmp.shape[1]
print(width, height)
W = 384
H = 384
img_vol = np.zeros((len(self.active_index), 3, H, W), dtype=np.float32)
lbl_vol = np.zeros((len(self.active_index), height, width), dtype=np.int64)
for idx, id in enumerate(self.active_index):
if '.dcm' in self.image_filename[id]:
image1 = dcm2npy(self.image_filename[id]).astype(np.float32)
elif '.npy' in self.image_filename[id]:
image1 = npy2npy(self.image_filename[id]).astype(np.float32)
# image1 = dcm2npy(self.image_filename[id]).astype(np.float32)
if '.png' in self.label_filename[id]:
label1 = png2npy(self.label_filename[id])
elif '.npy' in self.label_filename[id]:
label1 = npy2npy(self.label_filename[id], mask=True)
# label1 = png2npy(self.label_filename[id])
img = np.repeat(image1.reshape(1, width, height), 3, axis=0)
# lbl = label1.reshape(1, width, height)
lbl = img[0]
W = 384
H = 384
if height > H and width > W:
X = int((height - H) / 2)
Y = int((width - W) / 2)
img = img[:, X:X + H, Y:Y + W]
img_vol[idx, :] = img
lbl_vol[idx, :] = lbl
if self.augmentations is not None:
img, lbl = self.augmentations(img, lbl)
if self.transforms is not None:
img = self.transforms(img)
lbl = self.transforms(lbl)
return img_vol, lbl_vol, self.index1, width
def __len__(self):
return len(self.testing_image_set)
def decode_segmap(self, temp, bias=0):
n, c, h, w = temp.size()
temp = temp.view(c, h, w)
temp = temp.numpy()
temp = temp.astype(np.uint8)
r = temp.copy()
g = temp.copy()
b = temp.copy()
for l in range(0, self.n_class):
r[temp == l] = self.label_colours[l + bias * self.n_class][0]
g[temp == l] = self.label_colours[l + bias * self.n_class][1]
b[temp == l] = self.label_colours[l + bias * self.n_class][2]
l = 0
r[temp == l] = self.label_colours[l][0]
g[temp == l] = self.label_colours[l][1]
b[temp == l] = self.label_colours[l][2]
rgb = np.zeros((c, 3, h, w)).astype(np.uint8)
rgb[:, 0, :, :] = r
rgb[:, 1, :, :] = g
rgb[:, 2, :, :] = b
return rgb
| 15,095 | 5,556 |
def waterfvf(temp, p):
"Water FVF (Bw)"
# temp in Fahrenheit
# p pressure in psia
Vwp = (-1.95301E-9 * p * temp) - (1.72834E-13 * (p**2) * temp) - (3.588922E-7 * p) - (2.25341E-10 * p**2)
Vwt = (-1E-2) + (1.33391E-2 * temp) + (5.50654E-7 * temp**2)
Bw = (1 + Vwt) * (1 + Vwp)
return(Bw)
| 301 | 184 |
import torch
import torch.nn as nn
from pcfv.layers.ConvBlock import ConvBlock
from pcfv.layers.ScalingBlock import ScalingBlock
class UNet(nn.Module):
'''
Implementation of UNet (Ronneberger et al. U-Net: Convolutional Networks for Biomedical Image Segmentation)
'''
def __init__(self, in_channels, out_channels, inter_channel=64):
'''
:param in_channels:
:param out_channels:
'''
super(UNet, self).__init__()
self.scale_in = ScalingBlock(in_channels)
self.scale_out = ScalingBlock(out_channels)
self.conv_block1 = ConvBlock(in_channels=in_channels, out_channels=inter_channel)
self.conv_block2 = ConvBlock(in_channels=inter_channel, out_channels=inter_channel*2)
self.conv_block3 = ConvBlock(in_channels=inter_channel*2, out_channels=inter_channel*4)
self.conv_block4 = ConvBlock(in_channels=inter_channel*4, out_channels=inter_channel*8)
self.conv_block5 = ConvBlock(in_channels=inter_channel*8, out_channels=inter_channel*16)
self.conv_block6 = ConvBlock(in_channels=inter_channel*16, out_channels=inter_channel*8)
self.conv_block7 = ConvBlock(in_channels=inter_channel*8, out_channels=inter_channel*4)
self.conv_block8 = ConvBlock(in_channels=inter_channel*4, out_channels=inter_channel*2)
self.conv_block9 = ConvBlock(in_channels=inter_channel*2, out_channels=inter_channel)
self.max_pooling1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.max_pooling2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.max_pooling3 = nn.MaxPool2d(kernel_size=2, stride=2)
self.max_pooling4 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv_transpose1 = nn.ConvTranspose2d(in_channels=inter_channel*16, out_channels=inter_channel*8, kernel_size=2, stride=2)
self.conv_transpose2 = nn.ConvTranspose2d(in_channels=inter_channel*8, out_channels=inter_channel*4, kernel_size=2, stride=2)
self.conv_transpose3 = nn.ConvTranspose2d(in_channels=inter_channel*4, out_channels=inter_channel*2, kernel_size=2, stride=2)
self.conv_transpose4 = nn.ConvTranspose2d(in_channels=inter_channel*2, out_channels=inter_channel, kernel_size=2, stride=2)
self.final_conv = nn.Conv2d(in_channels=inter_channel, out_channels=out_channels, kernel_size=(1, 1))
def forward(self, x):
x = self.scale_in(x)
tmp1 = self.conv_block1(x)
tmp2 = self.conv_block2(self.max_pooling1(tmp1))
tmp3 = self.conv_block3(self.max_pooling1(tmp2))
tmp4 = self.conv_block4(self.max_pooling1(tmp3))
tmp5 = self.conv_block5(self.max_pooling1(tmp4))
tmp6 = self.conv_transpose1(tmp5)
tmp7 = self.conv_block6(torch.cat((tmp6, tmp4), dim=1))
tmp8 = self.conv_transpose2(tmp7)
tmp9 = self.conv_block7(torch.cat((tmp8, tmp3), dim=1))
tmp10 = self.conv_transpose3(tmp9)
tmp11 = self.conv_block8(torch.cat((tmp10, tmp2), dim=1))
tmp12 = self.conv_transpose4(tmp11)
tmp13 = self.conv_block9(torch.cat((tmp12, tmp1), dim=1))
y = self.final_conv(tmp13)
y = self.scale_out(y)
return y
def normalized_input(self, x):
x = self.scale_in(x)
return x | 3,258 | 1,247 |
import torch
import torch.nn as nn
from typing import Dict, Optional, Tuple, List, Union
from ding.torch_utils import MLP
class BEVSpeedConvEncoder(nn.Module):
"""
Convolutional encoder of Bird-eye View image and speed input. It takes a BeV image and a speed scalar as input.
The BeV image is encoded by a convolutional encoder, to get a embedding feature which is half size of the
embedding length. Then the speed value is repeated for half embedding length time, and concated to the above
feature to get a final feature.
:Arguments:
- obs_shape (Tuple): BeV image shape.
- hidden_dim_list (List): Conv encoder hidden layer dimension list.
- embedding_size (int): Embedding feature dimensions.
- kernel_size (List, optional): Conv kernel size for each layer. Defaults to [8, 4, 3].
- stride (List, optional): Conv stride for each layer. Defaults to [4, 2, 1].
"""
def __init__(
self,
obs_shape: Tuple,
hidden_dim_list: List,
embedding_size: int,
kernel_size: List = [8, 4, 3],
stride: List = [4, 2, 1],
) -> None:
super().__init__()
assert len(kernel_size) == len(stride), (kernel_size, stride)
self._obs_shape = obs_shape
self._embedding_size = embedding_size
self._relu = nn.ReLU()
layers = []
input_dim = obs_shape[0]
for i in range(len(hidden_dim_list)):
layers.append(nn.Conv2d(input_dim, hidden_dim_list[i], kernel_size[i], stride[i]))
layers.append(self._relu)
input_dim = hidden_dim_list[i]
layers.append(nn.Flatten())
self._model = nn.Sequential(*layers)
flatten_size = self._get_flatten_size()
self._mid = nn.Linear(flatten_size, self._embedding_size // 2)
def _get_flatten_size(self) -> int:
test_data = torch.randn(1, *self._obs_shape)
with torch.no_grad():
output = self._model(test_data)
return output.shape[1]
def forward(self, data: Dict) -> torch.tensor:
"""
Forward computation of encoder
:Arguments:
- data (Dict): Input data, must contain 'birdview' and 'speed'
:Returns:
torch.tensor: Embedding feature.
"""
image = data['birdview'].permute(0, 3, 1, 2)
speed = data['speed']
x = self._model(image)
x = self._mid(x)
speed_embedding_size = self._embedding_size - self._embedding_size // 2
speed_vec = torch.unsqueeze(speed, 1).repeat(1, speed_embedding_size)
h = torch.cat((x, speed_vec), dim=1)
return h
class FCContinuousNet(nn.Module):
"""
Overview:
FC continuous network which is used in ``QAC``.
A main feature is that it uses ``_final_tanh`` to control whether
add a tanh layer to scale the output to (-1, 1).
Interface:
__init__, forward
"""
def __init__(
self,
input_size: int,
output_size: int,
embedding_size: int = 64,
final_tanh: bool = False,
layer_num: int = 1,
) -> None:
super(FCContinuousNet, self).__init__()
self._act = nn.ReLU()
self._main = nn.Sequential(
MLP(input_size, embedding_size, embedding_size, layer_num + 1, activation=self._act),
nn.Linear(embedding_size, output_size)
)
self._final_tanh = final_tanh
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self._main(x)
if self._final_tanh:
x = torch.tanh(x)
if x.shape[1] == 1:
x = x.squeeze(1)
return x
class BEVSpeedDeterminateNet(nn.Module):
"""
Actor Neural Network takes Bird-eye View image and speed and outputs actions determinately. It use a
``BEVSpeedConvEncoder`` to get a embedding feature, and use a fully-connected layer to get final output.
It can be used as actor or critic network depending on forward arguments.
:Arguments:
- obs_shape (Tuple, optional): BeV image shape. Defaults to [5, 32, 32].
- action_shape (Union[int, tuple], optional): Action shape. Defaults to 2.
- encoder_hidden_dim_list (List, optional): Conv encoder hidden layer dimension list.
Defaults to [64, 128, 256].
- encoder_embedding_size (int, optional): Encoder output embedding size. Defaults to 512.
- head_embedding_dim (int, optional): FC hidden layer dimension. Defaults to 512.
- is_critic (bool, optional): Whether used as critic. Defaults to False.
"""
def __init__(
self,
obs_shape: Tuple = [5, 32, 32],
action_shape: Union[int, tuple] = 2,
encoder_hidden_dim_list: List = [64, 128, 256],
encoder_embedding_size: int = 512,
head_embedding_dim: int = 512,
is_critic: bool = False,
) -> None:
super().__init__()
self._obs_shape = obs_shape
self._act_shape = action_shape
self._is_critic = is_critic
self._encoder = BEVSpeedConvEncoder(
self._obs_shape, encoder_hidden_dim_list, encoder_embedding_size, [3, 3, 3], [2, 2, 2]
)
if is_critic:
self._head = FCContinuousNet(encoder_embedding_size + self._act_shape, 1, head_embedding_dim)
else:
self._head = FCContinuousNet(encoder_embedding_size, self._act_shape, head_embedding_dim, final_tanh=True)
def forward(self, obs: Dict, action: Optional[Dict] = None) -> torch.tensor:
"""
Forward computation of network. If is critic, action must not be ``None``
:Arguments:
- obs (Dict): Observation dict.
- action (Dict, optional): Action dict. Defaults to None.
:Returns:
torch.tensor: Actions or critic value.
"""
embedding = self._encoder(obs)
if self._is_critic:
assert action is not None
obs_action_input = torch.cat([embedding, action], dim=1)
q = self._head(obs_action_input)
return q
output = self._head(embedding)
return output
class BEVSpeedStochasticNet(nn.Module):
"""
Actor Neural Network takes Bird-eye View image and speed and outputs actions stochasticly. It use a
``BEVSpeedConvEncoder`` to get a embedding feature, and use a fully-connected layer to get mean and
std values.
:Arguments:
- obs_shape (Tuple, optional): BeV image shape. Defaults to [5, 32, 32].
- action_shape (Union[int, tuple], optional): Action shape. Defaults to 2.
- encoder_hidden_dim_list (List, optional): Conv encoder hidden layer dimension list.
Defaults to [64, 128, 256].
- policy_hideen_size (int, optional): Encoder output embedding size. Defaults to 512.
- log_std_min (int, optional): Log std min value. Defaults to -20.
- log_std_max (int, optional): Log std max value. Defaults to 2.
- init_w (float, optional): Clip value of mean and std layer weights. Defaults to 3e-3.
"""
def __init__(
self,
obs_shape: Tuple = [5, 32, 32],
action_shape: Union[int, tuple] = 2,
encoder_hidden_dim_list: List = [64, 128, 256],
policy_hideen_size: int = 512,
log_std_min: int = -20,
log_std_max: int = 2,
init_w: float = 3e-3,
) -> None:
super().__init__()
self._obs_shape = obs_shape
self._act_shape = action_shape
self._log_std_min = log_std_min
self._log_std_max = log_std_max
self._encoder = BEVSpeedConvEncoder(
self._obs_shape, encoder_hidden_dim_list, policy_hideen_size, [3, 3, 3], [2, 2, 2]
)
self._mean_layer = nn.Linear(policy_hideen_size, action_shape)
self._mean_layer.weight.data.uniform_(-init_w, init_w)
self._mean_layer.bias.data.uniform_(-init_w, init_w)
self._log_std_layer = nn.Linear(policy_hideen_size, action_shape)
self._log_std_layer.weight.data.uniform_(-init_w, init_w)
self._log_std_layer.bias.data.uniform_(-init_w, init_w)
def forward(self, obs: Dict) -> Tuple[torch.tensor, torch.tensor]:
"""
Forward computation of network.
:Arguments:
- obs (Dict): Observation dict.
:Returns:
Tuple[torch.tensor, torch.tensor]: Mean and std value for actions.
"""
embedding = self._encoder(obs)
mean = self._mean_layer(embedding)
log_std = self._log_std_layer(embedding)
log_std = torch.clamp(log_std, self._log_std_min, self._log_std_max)
return mean, log_std
class BEVSpeedSoftQNet(nn.Module):
def __init__(
self,
obs_shape: Tuple = [5, 32, 32],
action_shape: Union[int, tuple] = 2,
encoder_hidden_dim_list: List = [64, 128, 256],
soft_q_hidden_size: int = 512,
init_w: float = 3e-3,
) -> None:
super().__init__()
self._obs_shape = obs_shape
self._act_shape = action_shape
self._encoder = BEVSpeedConvEncoder(
self._obs_shape, encoder_hidden_dim_list, soft_q_hidden_size, [3, 3, 3], [2, 2, 2]
)
self._output_layer = nn.Linear(soft_q_hidden_size + self._act_shape, 1)
self._output_layer.weight.data.uniform_(-init_w, init_w)
self._output_layer.bias.data.uniform_(-init_w, init_w)
def forward(self, obs, action):
embedding = self._encoder(obs)
obs_action_input = torch.cat([embedding, action], dim=1)
output = self._output_layer(obs_action_input)
return output
class BEVSpeedProximalNet(nn.Module):
def __init__(
self,
obs_shape: Tuple = [5, 32, 32],
action_shape: Union[int, tuple] = 2,
encoder_embedding_size: int = 512,
encoder_hidden_dim_list: List = [64, 128, 256],
head_hidden_size=128,
head_layer_num=2,
is_critic=False,
) -> None:
super().__init__()
self._obs_shape = obs_shape
self._act_shape = action_shape
self._encoder_embedding_size = encoder_embedding_size
self._head_hidden_size = head_hidden_size
self._head_layer_num = head_layer_num
self._encoder = BEVSpeedConvEncoder(
self._obs_shape, encoder_hidden_dim_list, encoder_embedding_size, [3, 3, 3], [2, 2, 2]
)
self._is_critic = is_critic
if self._is_critic:
self._head = self._setup_critic()
else:
self._head = self._setup_actor()
def _setup_actor(self):
if isinstance(self._act_shape, tuple):
return nn.ModuleList([self._setup_1dim_actor(a) for a in self._act_shape])
else:
return self._setup_1dim_actor(self._act_shape)
def _setup_critic(self):
input_size = self._encoder_embedding_size
layers = []
for _ in range(self._head_layer_num):
layers.append(nn.Linear(input_size, self._head_hidden_size))
layers.append(nn.ReLU())
input_size = self._head_hidden_size
layers.append(nn.Linear(input_size, 1))
output = nn.Sequential(*layers)
return output
def _setup_1dim_actor(self, act_shape: int) -> torch.nn.Module:
input_size = self._encoder_embedding_size
layers = []
for _ in range(self._head_layer_num):
layers.append(nn.Linear(input_size, self._head_hidden_size))
layers.append(nn.ReLU())
input_size = self._head_hidden_size
layers.append(nn.Linear(input_size, act_shape))
output = nn.Sequential(*layers)
return output
def forward(self, obs):
embedding = self._encoder(obs)
# Because we use the value AC, so the input of the head of actor and critic is the same form
if self._is_critic:
output = self._head(embedding)
else:
output = self._head(embedding)
return output
| 12,191 | 3,976 |
from stem.descriptor.remote import DescriptorDownloader
downloader = DescriptorDownloader()
descriptors = downloader.get_consensus().run()
for descriptor in descriptors:
print('Nickname:',descriptor.nickname)
print('Fingerprint:',descriptor.fingerprint)
print('Address:',descriptor.address)
print('Bandwidth:',descriptor.bandwidth)
| 351 | 107 |
import getpass
import paramiko
class SSHConnection(object):
def __init__(self, host, username, password):
self.host = host
self.username = username
self.password = password
self.ssh = paramiko.SSHClient()
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
def __enter__(self):
self.ssh.connect(self.host,
username=self.username, password=self.password)
return self.ssh
def __exit__(self):
self.ssh.close()
def hostname(host, username, password=getpass.getpass("Enter pass: ")):
with SSHConnection(host, username, password) as ssh:
stdin, stdout, stderr = ssh.exec_command('hostname')
with stdout as out:
for line in out:
print line
with stdout as error:
for line in error:
print line
hostname('localhost', '529567')
| 920 | 255 |
import os
from sqlalchemy.orm import Session
from perception.database import SessionLocal, engine
from perception import models, schemas
from perception.core.faiss_helper import FaissCore
models.Base.metadata.create_all(bind=engine)
# Dependency
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
def get_food_by_index_id(db: Session, index_id: int):
try:
return db.query(models.Food).filter(models.Food.index_id == index_id).first()
except Exception as error:
print(repr(error))
def check_file_id(db: Session, file_id: int):
try:
result = db.query(models.Food).filter(models.Food.file_id == file_id).first()
return result
except Exception as error:
print(repr(error))
if __name__ == "__main__":
db = db = SessionLocal()
indexes = [0,1]
result = get_food_by_index_id(db, 0)
# result = db.query(models.Food).all()
# for obj in result:
# print(schemas.Food.from_orm(obj))
print(result.index_id)
# base_dir = os.path.dirname(os.path.realpath(__file__))
# index_store = os.path.join(base_dir, 'index_store')
# index = FaissCore('vector.index',index_store, dimension=6)
# print(index.size) | 1,266 | 440 |
import os
import json
import requests
import click
from .lamblayer import Lamblayer
class Init(Lamblayer):
def __init__(self, profile, region, log_level):
super().__init__(profile, region, log_level)
def __call__(self, function_name, download):
self.init(function_name, download)
def init(self, function_name, download):
"""
Inisialize function config file, and download layer zip contents.
Params
======
function_name: str
the name of function for inisialize
download: bool
download all layer zip contents, or not.
"""
self.logger.info(f"starting init {function_name}")
response = self.session.client("lambda").get_function(
FunctionName=function_name
)
try:
layers = response["Configuration"]["Layers"]
layer_version_arns = [layer["Arn"] for layer in layers]
except KeyError:
layer_version_arns = []
self.logger.info("createing function.json")
self.logger.debug(f"function_name: {function_name}")
self.logger.debug(f"layers: {layer_version_arns}")
self._gen_function_json(function_name, layer_version_arns)
if download:
self.logger.info("starging download layers")
for layer_version_arn in layer_version_arns:
self.logger.info(f"downloading {layer_version_arn}")
layer_content_url = self._get_layer_url(layer_version_arn)
self._download_layer(layer_content_url)
def _gen_function_json(self, function_name, layer_version_arns):
"""
Generate a function config file.
Params
======
function_name: str
the name of the function
layer_version_arns: str
the ARN of the layer version
"""
FUNCTION = "function.json"
config = {
"FunctionName": function_name,
"Layers": layer_version_arns,
}
if os.path.exists(FUNCTION):
if not click.confirm(f"Overwrite existing file {FUNCTION}?"):
self.logger.info("chanceled")
return 0
with open(FUNCTION, "w") as f:
json.dump(config, f, indent=4)
def _get_layer_url(self, layer_version_arn):
"""
Return a layer zip content url.
Params
======
layer_version_arn: str
the ARN of the layer version
Returns
=======
content_url: str
a url of layer zip content
"""
version = int(layer_version_arn.split(":")[-1])
layer_arn = layer_version_arn.rsplit(":", 1)[0]
response = self.session.client("lambda").get_layer_version(
LayerName=layer_arn,
VersionNumber=version,
)
content_url = response["Content"]["Location"]
return content_url
def _download_layer(self, layer_content_url):
"""
Download layer zip contents.
save path format : ./{layer name}-xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx.zip
Params
======
layer_content_url: str
a url of layer zip content
"""
save_path = layer_content_url.split("/")[-1].split("?")[0] + ".zip"
response = requests.get(layer_content_url)
with open(save_path, "wb") as f:
f.write(response.content)
| 3,462 | 976 |
# coding=utf-8
# Copyright 2021 The Deadunits Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Implements various utility functions for loading and transforming models.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from deadunits import data
from deadunits import generic_convnet
from deadunits import model_defs
import gin
from six.moves import zip
import tensorflow.compat.v2 as tf
INPUT_SHAPES = {'cub200': (2, 224, 224, 3),
'cifar10': (2, 32, 32, 3),
'imagenet': (2, 224, 224, 3)}
@gin.configurable
def get_model(model_arch_name=gin.REQUIRED,
dataset_name=gin.REQUIRED,
load_path=None,
prepare_for_pruning=False):
"""Creates or loads the model and returns it.
If the model does not match with the saved, version, usually no error or
warning is made, so be careful, CHECK YOUR VARIABLES.
Args:
model_arch_name: str, definition from .model_defs.py file.
dataset_name: str, either 'cifar10' or 'imagenet'.
load_path: str, checkpoint name/path to be load.
prepare_for_pruning: bool, if True the loaded model is copied in-to one with
TaylorScorer layer and layers are wrapped with MaskedLayer.
Returns:
generic_convnet.GenericConvnet, initialized or loaded model.
Raises:
ValueError: when the args doesn't match the specs.
IOError: when there is no checkpoint found at the path given.
"""
if dataset_name not in INPUT_SHAPES:
raise ValueError('Dataset_name: %s is not one of %s' %
(dataset_name, list(INPUT_SHAPES.keys())))
if not hasattr(model_defs, model_arch_name):
raise ValueError('Model name: %s...not in model_defs.py' % model_arch_name)
n_classes = data.N_CLASSES_BY_DATASET[dataset_name]
model_arch = (
getattr(model_defs, model_arch_name) + [['O', n_classes]])
model = generic_convnet.GenericConvnet(
model_arch=model_arch, name=model_arch_name)
dummy_var = tf.zeros(INPUT_SHAPES[dataset_name])
# Initializing model.
model(dummy_var)
if load_path is not None:
checkpoint = tf.train.Checkpoint(model=model)
checkpoint.restore(load_path)
if prepare_for_pruning:
old_model = model
model = generic_convnet.GenericConvnet(
model_arch=model_arch, name=model_arch_name,
use_taylor_scorer=True,
use_masked_layers=True)
model(dummy_var)
for v1, v2 in zip(old_model.trainable_variables,
model.trainable_variables):
v2.assign(v1)
return model
| 3,135 | 1,036 |
# Generated by Django 2.1.5 on 2019-02-07 15:53
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('paikkala', '0010_nonblank_room'),
('programme', '0075_auto_20181019_1918'),
]
operations = [
migrations.AddField(
model_name='programme',
name='is_using_paikkala',
field=models.BooleanField(default=False, help_text='If selected, reserved seats for this programme will be offered.', verbose_name='Reservable seats'),
),
migrations.AddField(
model_name='programme',
name='paikkala_program',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='kompassi_programme', to='paikkala.Program'),
),
migrations.AddField(
model_name='room',
name='paikkala_room',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='paikkala.Room'),
),
]
| 1,077 | 352 |
from django.conf.urls import url
from rest_framework.urlpatterns import format_suffix_patterns
from .views import CreateView, ListView, RetrieveView, DestroyView, UpdateView
urlpatterns = {
url(r'^mentorrequests/$', CreateView.as_view(), name="create"),
url(r'^mentorrequests/$', ListView.as_view(), name="list"),
url(r'^mentorrequests/$', RetrieveView.as_view(), name="retrieve"),
url(r'^mentorrequests/$', DestroyView.as_view(), name="destroy"),
url(r'^mentorrequests/$', UpdateView.as_view(), name="update"),
}
urlpatterns = format_suffix_patterns(urlpatterns)
| 587 | 197 |
import logging
import os
import torch
import numpy as np
from fairseq import utils, options, tasks, progress_bar, checkpoint_utils
from fairseq.data.knowledge_distillation import TeacherOutputDataset
logger = logging.getLogger(__name__)
def gen_outputs(args):
use_cuda = torch.cuda.is_available() and not args.cpu
# Load dataset splits
task = tasks.setup_task(args)
task.load_dataset(args.gen_subset)
dataset = task.dataset(args.gen_subset)
logger.info('{} {} {} examples'.format(args.data, args.gen_subset, len(dataset)))
# Load ensemble
logger.info('loading model(s) from {}'.format(args.path))
models, _ = checkpoint_utils.load_model_ensemble(
args.path.split(':'), task=task, arg_overrides=eval(args.model_overrides))
assert len(models) == 1
model = models[0]
# Optimize ensemble for generation
model.make_generation_fast_(
beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
need_attn=args.print_alignment,
)
if args.fp16:
model.half()
if use_cuda:
model.cuda()
# Load dataset (possibly sharded)
itr = task.get_batch_iterator(
dataset=dataset,
max_tokens=args.max_tokens,
max_sentences=args.max_sentences,
max_positions=utils.resolve_max_positions(
task.max_positions(),
model.max_positions()
),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=8,
num_shards=args.num_shards,
shard_id=args.shard_id,
).next_epoch_itr(shuffle=False)
outputs = [None for _ in range(len(dataset))]
with progress_bar.build_progress_bar(args, itr) as t:
for sample in t:
s = utils.move_to_cuda(sample) if use_cuda else sample
if 'net_input' not in s:
continue
# We assume the target is already present and known
assert s['target'] is not None
targets = s['target']
with torch.no_grad():
net_output = model(**s['net_input'])
topk_outs, topk_idx = torch.topk(net_output[0], args.distill_topk, dim=-1) # B, T, k
non_padding_mask = targets.ne(task.target_dictionary.pad()).cpu().numpy().astype(bool)
topk_idx = topk_idx.cpu().numpy()
topk_outs = topk_outs.cpu().numpy()
for i, id_s in enumerate(s['id'].data):
outputs[id_s] = [
topk_idx[i, non_padding_mask[i]].tolist(),
topk_outs[i, non_padding_mask[i]].tolist()]
return outputs
def save_expert_outputs(args, expert_outputs):
logger.info("Start saving expert outputs..")
src_lang = args.source_lang
tgt_lang = args.target_lang
file_prefix = '{}.{}-{}.{}'.format(args.gen_subset, src_lang, tgt_lang, tgt_lang)
path = os.path.join(args.data, file_prefix + '.top{}_idx'.format(args.distill_topk))
TeacherOutputDataset.save_bin(path, [o[0] for o in expert_outputs], np.int32)
logger.info("Written {}".format(path))
path = os.path.join(args.data, file_prefix + '.top{}_out'.format(args.distill_topk))
TeacherOutputDataset.save_bin(path, [o[1] for o in expert_outputs], np.float32)
logger.info("Written {}".format(path))
if __name__ == '__main__':
parser = options.get_generation_parser()
parser.add_argument('--distill-topk', default=8, type=int)
args = options.parse_args_and_arch(parser)
assert args.path is not None, '--path required for generation!'
assert not args.sampling or args.nbest == args.beam, \
'--sampling requires --nbest to be equal to --beam'
assert args.replace_unk is None or args.raw_text, \
'--replace-unk requires a raw text dataset (--raw-text)'
if args.max_tokens is None and args.max_sentences is None:
args.max_tokens = 12000
logger.info(args)
expert_outputs = gen_outputs(args)
save_expert_outputs(args, expert_outputs)
| 4,012 | 1,341 |
import os, sys
from sqlalchemy import Column, ForeignKey, Integer, String, Unicode
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
# from sqlalchemy_imageattach.entity import Image, image_attachment
from sqlalchemy import create_engine
Base = declarative_base()
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
name = Column(String(250), nullable=False)
email = Column(String(250), nullable=False)
picture = Column(String(250))
@property
def serialize(self):
"""return object data in easily serializable format"""
return {
'id':self.id,
'name': self.name,
'email': self.email,
'picture': self.picture,
}
class Category(Base):
__tablename__ = 'category'
id = Column(Integer, primary_key = True)
name = Column(String(250), nullable = False)
user_id = Column(Integer, ForeignKey('user.id'))
user = relationship(User)
@property
def serialize(self):
"""return object data in easily serializable format"""
return {
'id':self.id,
'name': self.name,
}
class CategoryItems(Base):
__tablename__ = 'category_item'
id = Column(Integer,primary_key = True)
name = Column(String(50), nullable = False)
description = Column(String(120))
usage = Column(String(1000))
category_id = Column(Integer, ForeignKey('category.id'))
category = relationship(Category)
user_id = Column(Integer, ForeignKey('user.id'))
user = relationship(User)
@property
def serialize(self):
"""return object data in easily serializable format"""
return {
'id': self.id,
'name': self.name,
'description': self.description,
'usage': self.usage,
}
#### insert at the end of file ####
engine = create_engine('sqlite:///catalogitems.db')
Base.metadata.create_all(engine)
| 1,796 | 634 |
#!/usr/bin/env python
import rospkg
import rospy
import yaml
from std_msgs.msg import Int8
from duckietown_msgs.msg import PatrolBot, BoolStamped, RobotName
import numpy as np
import tf.transformations as tr
from geometry_msgs.msg import PoseStamped
import time
class PatrollingNode(object):
def __init__(self):
#initial
self.start = False
self.node_name = "patrolling_node"
#cost of each node
self.left1_cost = 0
self.right1_cost = 0
self.left2_cost = 0
self.right2_cost = 0
self.left3_cost = 0
self.right3_cost = 0
self.left4_cost = 0
self.right4_cost = 0
#to see each node are target or not
self.left1_target = False
self.right1_target = False
self.left2_target = False
self.right2_target = False
self.left3_target = False
self.right3_target = False
self.left4_target = False
self.right4_target = False
print "initial"
'''#iniital starting time of each node
self.left1_start = self.timer_start
self.right1_start = self.timer_start
self.left2_start = self.timer_start
self.right2_start = self.timer_start
self.left3_start = self.timer_start
self.right3_start = self.timer_start
self.left4_start = self.timer_start
self.right4_start = self.timer_start'''
#======Subscriber======
self.sub_robot_info = rospy.Subscriber("/patrol", PatrolBot, self.sub_robot)
self.sub_set_pub = rospy.Subscriber("~setpub", RobotName, self.sub_setpub)
self.sub_reset = rospy.Subscriber("~reset", BoolStamped, self.reset)
#======Publisher======
self.pub_command = rospy.Publisher("/arg4/timer_node/command", Int8, queue_size=1)
self.pub_command = rospy.Publisher("/master/timer_node/command", Int8, queue_size=1)
#======start to count the time======
self.start_time()
def sub_setpub(self, msg):
self.pub_command = rospy.Publisher("/"+msg.robot_name+"/timer_node/command", Int8, queue_size=1)
def reset(self, msg):
if msg.data:
self.start = False
self.left1_target = False
self.right1_target = False
self.left2_target = False
self.right2_target = False
self.left3_target = False
self.right3_target = False
self.left4_target = False
self.right4_target = False
self.left1_cost = 0
self.right1_cost = 0
self.left2_cost = 0
self.right2_cost = 0
self.left3_cost = 0
self.right3_cost = 0
self.left4_cost = 0
self.right4_cost = 0
self.start_time()
print "initial"
#suppose msg.name--> robotrname msg.tag--> current tag ex:left1, right3
def sub_robot(self, msg):
self.count_cost()
cmd = Int8()
cmd.data = 0 # 1=forward 2=turnaround
#tar = "self." + msg.name + "_target"
#vars()[tar] = False
self.count_target()
if msg.tag == "left1":
self.left1_target = False
if self.right1_cost >= self.right2_cost:
self.right1_start = time.time()
self.left4_target = True
cmd.data = 1
else:
self.right2_target = True
cmd.data = 2
self.left1_start = time.time()
elif msg.tag == "right1":
self.right1_target = False
if self.left1_cost >= self.left4_cost:
self.right2_target = True
self.left1_start = time.time()
cmd.data = 1
else:
self.left4_target = True
cmd.data = 2
self.right1_start = time.time()
elif msg.tag == "left2":
self.left2_target = False
if self.right2_cost >= self.right3_cost:
self.right2_start = time.time()
self.left1_target = True
cmd.data = 1
else:
self.right3_target = True
cmd.data = 2
self.left2_start = time.time()
elif msg.tag == "right2":
self.right2_target = False
if self.left2_cost >= self.left1_cost:
self.right3_target = True
self.left2_start = time.time()
cmd.data = 1
else:
self.left1_target = True
cmd.data = 2
self.right2_start = time.time()
elif msg.tag == "left3":
self.left3_target = False
if self.right3_cost >= self.right4_cost:
self.right3_start = time.time()
self.left2_target = True
cmd.data = 1
else:
self.right4_target = True
cmd.data = 2
self.left3_start = time.time()
elif msg.tag == "right3":
self.right3_target = False
if self.left3_cost >= self.left2_cost:
self.right4_target = True
self.left3_start = time.time()
cmd.data = 1
else:
self.left2_target = True
cmd.data = 2
self.right3_start = time.time()
elif msg.tag == "left4":
self.left4_target = False
if self.right4_cost >= self.right1_cost:
self.right4_start = time.time()
self.left3_target = True
cmd.data = 1
else:
self.right1_target = True
cmd.data = 2
self.left4_start = time.time()
elif msg.tag == "right4":
self.right4_target = False
if self.left4_cost >= self.left3_cost:
self.right1_target = True
self.left4_start = time.time()
cmd.data = 1
else:
self.left3_target = True
cmd.data = 2
self.right4_start = time.time()
self.count_target()
self.print_cost()
self.pubcom(msg.name)
#self.pub_command = rospy.Publisher("/"+msg.name+"/timer_node/command", Int8, queue_size=1)
self.pub_command.publish(cmd)
def pubcom(self, pub):
self.pub_command = rospy.Publisher("/"+pub+"/timer_node/command", Int8, queue_size=1)
def print_cost(self):
if self.left1_target:
print "left1 --> " + str(self.left1_cost) + " (target)"
else:
print "left1 --> " + str(self.left1_cost)
if self.right1_target:
print "right1 --> " + str(self.right1_cost) + " (target)"
else:
print "right1 --> " + str(self.right1_cost)
if self.left2_target:
print "left2 --> " + str(self.left2_cost) + " (target)"
else:
print "left2 --> " + str(self.left2_cost)
if self.right2_target:
print "right2 --> " + str(self.right2_cost) + " (target)"
else:
print "right2 --> " + str(self.right2_cost)
if self.left3_target:
print "left3 --> " + str(self.left3_cost) + " (target)"
else:
print "left3 --> " + str(self.left3_cost)
if self.right3_target:
print "right3 --> " + str(self.right3_cost) + " (target)"
else:
print "right3 --> " + str(self.right3_cost)
if self.left4_target:
print "left4 --> " + str(self.left4_cost) + " (target)"
else:
print "left4 --> " + str(self.left4_cost)
if self.right4_target:
print "right4 --> " + str(self.right4_cost) + " (target)"
else:
print "right4 --> " + str(self.right4_cost)
print "---------------------"
print "---------------------"
print ""
#count the cost of each node (idleness)
def count_cost(self):
self.left1_cost = self.count_time(self.left1_start)
self.right1_cost = self.count_time(self.right1_start)
self.left2_cost = self.count_time(self.left2_start)
self.right2_cost = self.count_time(self.right2_start)
self.left3_cost = self.count_time(self.left3_start)
self.right3_cost = self.count_time(self.right3_start)
self.left4_cost = self.count_time(self.left4_start)
self.right4_cost = self.count_time(self.right4_start)
#initial time of all the nodes
def start_time(self):
if not self.start: # if timer not start yet
self.timer_start = time.time() # record start time
self.left1_start = self.timer_start
self.right1_start = self.timer_start
self.left2_start = self.timer_start
self.right2_start = self.timer_start
self.left3_start = self.timer_start
self.right3_start = self.timer_start
self.left4_start = self.timer_start
self.right4_start = self.timer_start
self.start = True # change timer state to start
#return current time - starting time
def count_time(self, t):
return int(time.time()-t)
def count_target(self):
if self.left1_target:
self.left1_cost = 0
if self.right1_target:
self.right1_cost = 0
if self.left2_target:
self.left2_cost = 0
if self.right2_target:
self.right2_cost = 0
if self.left3_target:
self.left3_cost = 0
if self.right3_target:
self.right3_cost = 0
if self.left4_target:
self.left4_cost = 0
if self.right4_target:
self.right4_cost = 0
if __name__ == '__main__':
rospy.init_node('PatrollingNode',anonymous=False)
node = PatrollingNode()
rospy.spin() | 9,903 | 3,099 |
from module.util.got.manager.TweetCriteria import TweetCriteria
from module.util.got.manager.TweetManager import TweetManager | 125 | 36 |
# generate an nxn matrix with cells above the diagonal blocked out
def generate_matrix(n):
matrix = [[1 for i in range(n)] for _ in range(n)]
for i in range(n):
for j in range(n):
if i < j:
matrix[i][j] = -1
return matrix
# receives a matrix and row and col parameters indicating the
# starting point in the matrix
# recursively traverses the given matrix, skipping over blocked cells
# counts the number of paths on the way to the opposite corner
def count_paths(matrix, row=0, col=0):
# base case: check if last cell is reached since there's only
# one path after that
if row == len(matrix) - 1 and col == len(matrix) - 1:
return 1
right = 0
down = 0
# check if we've exceeded the length of the matrix and if
# the next cell to the right is blocked
if row != len(matrix) - 1 and matrix[row+1][col] != -1:
# if we can go there, recurse with the right cell
right = count_paths(matrix, row+1, col)
# check if we've exceeded the width of the matrix and if the
# next cell down is blocked
if col != len(matrix) - 1 and matrix[row][col+1] != -1:
# if we can go there, recurse with the down cell
down = count_paths(matrix, row, col+1)
return right + down
'''
Below is a more efficient math-y solution
It turns out that if you plot the sequence of dimensions
as you scale up the size of the matrix, the number of valid
paths that don't cross the diagonal line follows a known
sequence called the Catalan numbers:
https://en.wikipedia.org/wiki/Catalan_number
So another way to solve this problem is to simply calculate for a
given dimension n, the nth Catalan number. Doing so requires
defining a combinatoric function to calculate nCr (n choose r).
'''
import operator as op
from functools import reduce
# n choose r function
def ncr(n, r):
r = min(r, n-r)
numerator = reduce(op.mul, range(n, n-r, -1), 1)
denominator = reduce(op.mul, range(1, r+1), 1)
return numerator / denominator
# the nth Catalan number adheres to the forumula:
# ((2*n) C n) / (n + 1)
def count_paths_combinatorics(n):
# mathematicians index by 1, so we have to subtract
# 1 from n to achieve 0 indexing
n = n - 1
return int(ncr(2*n, n) / (n + 1))
print(count_paths(generate_matrix(5), 0, 0))
print(count_paths_combinatorics(5)) | 2,332 | 791 |
from DTL.qt import QtCore, QtGui
from DTL.qt.QtCore import Qt
#------------------------------------------------------------
#------------------------------------------------------------
class TableModel(QtCore.QAbstractTableModel):
#------------------------------------------------------------
def __init__(self, data=[[]], headers=[], parent=None):
super(TableModel, self).__init__(parent)
self.__data = data
self.__headers = headers
#------------------------------------------------------------
def rowCount(self, parent):
return len(self.__data)
#------------------------------------------------------------
def columnCount(self, parent):
return len(self.__data[0])
#------------------------------------------------------------
def flags(self, index):
return Qt.ItemIsEditable | Qt.ItemIsEnabled | Qt.ItemIsSelectable
#------------------------------------------------------------
def headerData(self, section, orientation, role):
if role == Qt.DisplayRole:
if orientation == Qt.Horizontal :
if section < len(self.__headers):
return self.__headers[section]
else:
return 'NONE'
else:
return section
#------------------------------------------------------------
def data(self, index, role):
row = index.row()
column = index.column()
value = self.__data[row][column]
if role == Qt.EditRole :
return value
if role == Qt.DisplayRole :
return value
if role == Qt.ToolTipRole :
return value
#if role == Qt.DecorationRole:
#pixmap = QtGui.QPixmap(26, 26)
#pixmap.fill(QtGui.QColor(0,0,0))
#icon = QtGui.QIcon(pixmap)
#return icon
#------------------------------------------------------------
def setData(self, index, value, role=Qt.EditRole):
if index.isValid():
if role == Qt.EditRole:
self.__data[index.row()][index.column()] = value
self.dataChanged.emit(index, index)
return True
return False
#------------------------------------------------------------
def insertRows(self, position, rows, parent=QtCore.QModelIndex()):
self.beginInsertRows(parent, position, position + rows - 1)
for i in range(rows):
default_values = ['' for i in range(self.columnCount(None))]
self.__data.insert(position, default_values)
self.endInsertRows()
return True
#------------------------------------------------------------
def removeRows(self, position, rows, parent=QtCore.QModelIndex()):
self.beginRemoveRows(parent, position, position + rows - 1)
for i in range(rows):
value = self.__data[position]
self.__data.remove(value)
self.endRemoveRows()
return True
#------------------------------------------------------------
def insertColumns(self, position, columns, parent=QtCore.QModelIndex()):
self.beginInsertColumns(parent, position, position + columns - 1)
rowCount = len(self.__data)
for i in range(columns):
for j in range(rowCount):
self.__data[j].insert(position, '')
self.endInsertColumns()
return True
#------------------------------------------------------------
def removeColumns(self, position, columns, parent=QtCore.QModelIndex()):
self.beginRemoveRows(parent, position, position + columns - 1)
rowCount = len(self.__data)
for i in range(columns):
for j in range(rowCount):
value = self.__data[j][position]
self.__data[j].remove(value)
self.endRemoveRows()
return True | 3,928 | 960 |
from HashableDict import HashableDict
| 38 | 10 |
import os
#https://stackoverflow.com/questions/3751900/create-file-path-from-variables
import subprocess
process = subprocess.Popen(['powershell','-c', 'Get -PSDrive -PSProvider "Filesystem"'], stderr=subprocess.PIPE, stdout=subprocess.PIPE)
stdout, stderr = process.communicate()
print(stdout)
| 296 | 98 |
import asyncio
from nicett6.emulator.cover_emulator import TT6CoverEmulator
from nicett6.emulator.line_handler import (
LineHandler,
CMD_STOP,
CMD_MOVE_DOWN,
CMD_MOVE_UP,
)
from nicett6.ttbus_device import TTBusDeviceAddress
from unittest import IsolatedAsyncioTestCase
from unittest.mock import AsyncMock, MagicMock, PropertyMock
RCV_EOL = b"\r"
class TestHandleWebOnCommands(IsolatedAsyncioTestCase):
"""Test the behaviour of handle_line for web_on commands with mock controller"""
async def test_handle_web_on(self):
line_bytes = b"WEB_ON" + RCV_EOL
controller = AsyncMock()
controller.web_on = False
wrapped_writer = AsyncMock()
line_handler = LineHandler(wrapped_writer, controller)
await line_handler.handle_line(line_bytes)
self.assertTrue(controller.web_on)
wrapped_writer.write_msg.assert_awaited_once_with(
LineHandler.MSG_WEB_COMMANDS_ON
)
async def test_handle_web_on_err(self):
line_bytes = b"WEB_ON BAD" + RCV_EOL
controller = AsyncMock()
controller.web_on = False
wrapped_writer = AsyncMock()
line_handler = LineHandler(wrapped_writer, controller)
await line_handler.handle_line(line_bytes)
self.assertFalse(controller.web_on)
wrapped_writer.write_msg.assert_awaited_once_with(
LineHandler.MSG_INVALID_COMMAND_ERROR
)
async def test_handle_web_off(self):
line_bytes = b"WEB_OFF" + RCV_EOL
controller = AsyncMock()
controller.web_on = True
wrapped_writer = AsyncMock()
line_handler = LineHandler(wrapped_writer, controller)
await line_handler.handle_line(line_bytes)
self.assertFalse(controller.web_on)
wrapped_writer.write_msg.assert_awaited_once_with(
LineHandler.MSG_WEB_COMMANDS_OFF
)
async def test_handle_web_off_whitespace(self):
line_bytes = b"\n WEB_OFF " + RCV_EOL
controller = AsyncMock()
controller.web_on = True
wrapped_writer = AsyncMock()
line_handler = LineHandler(wrapped_writer, controller)
await line_handler.handle_line(line_bytes)
self.assertFalse(controller.web_on)
wrapped_writer.write_msg.assert_awaited_once_with(
LineHandler.MSG_WEB_COMMANDS_OFF
)
async def test_handle_web_cmd_while_web_off(self):
line_bytes = b"POS < 02 04 FFFF FFFF FF" + RCV_EOL
controller = AsyncMock()
controller.web_on = False
wrapped_writer = AsyncMock()
line_handler = LineHandler(wrapped_writer, controller)
await line_handler.handle_line(line_bytes)
wrapped_writer.write_msg.assert_awaited_once_with(
LineHandler.MSG_INVALID_COMMAND_ERROR
)
async def test_handle_quit(self):
line_bytes = b"QUIT" + RCV_EOL
controller = AsyncMock()
controller.stop_server = MagicMock()
wrapped_writer = AsyncMock()
line_handler = LineHandler(wrapped_writer, controller)
await line_handler.handle_line(line_bytes)
controller.stop_server.assert_called_once_with()
wrapped_writer.write_msg.assert_not_awaited()
class TestHandleMovementCommands(IsolatedAsyncioTestCase):
"""Test the behaviour of handle_line for movement commands using mock cover"""
async def asyncSetUp(self):
self.cover = AsyncMock(spec=TT6CoverEmulator)
self.cover.tt_addr = TTBusDeviceAddress(0x02, 0x04)
self.cover.name = "test_cover"
self.controller = AsyncMock()
self.controller.web_on = False
self.controller.lookup_device = MagicMock(return_value=self.cover)
self.wrapped_writer = AsyncMock()
self.line_handler = LineHandler(self.wrapped_writer, self.controller)
async def test_handle_move_up(self):
line_bytes = b"CMD 02 04 05" + RCV_EOL
await self.line_handler.handle_line(line_bytes)
self.cover.move_up.assert_awaited_once_with()
self.wrapped_writer.write_msg.assert_awaited_once_with("RSP 2 4 5")
async def test_handle_read_hex_pos(self):
line_bytes = b"CMD 02 04 45" + RCV_EOL
percent_pos = PropertyMock(return_value=0xAB / 0xFF)
type(self.cover).percent_pos = percent_pos
await self.line_handler.handle_line(line_bytes)
percent_pos.assert_called_once_with()
self.wrapped_writer.write_msg.assert_awaited_once_with("RSP 2 4 45 AB")
async def test_handle_move_hex_pos(self):
line_bytes = b"CMD 02 04 40 AB" + RCV_EOL
await self.line_handler.handle_line(line_bytes)
self.cover.move_to_percent_pos.assert_awaited_once_with(0xAB / 0xFF)
self.wrapped_writer.write_msg.assert_awaited_once_with("RSP 2 4 40 AB")
async def test_handle_read_pct_pos(self):
line_bytes = b"POS < 02 04 FFFF FFFF FF" + RCV_EOL
self.controller.web_on = True
percent_pos = PropertyMock(return_value=0.5)
type(self.cover).percent_pos = percent_pos
await self.line_handler.handle_line(line_bytes)
percent_pos.assert_called_once_with()
self.wrapped_writer.write_msg.assert_awaited_once_with(
"POS * 02 04 0500 FFFF FF"
)
async def test_handle_move_pct_pos(self):
line_bytes = b"POS > 02 04 0500 FFFF FF" + RCV_EOL
self.controller.web_on = True
await self.line_handler.handle_line(line_bytes)
self.cover.move_to_percent_pos.assert_awaited_once_with(0.5)
class TestMovementCommands(IsolatedAsyncioTestCase):
"""Test the behaviour of handle_line for movement commands using a cover emulator"""
async def asyncSetUp(self):
self.cover = TT6CoverEmulator(
"test_cover", TTBusDeviceAddress(0x02, 0x04), 0.01, 1.77, 0.08, 1.0
)
self.controller = AsyncMock()
self.controller.web_on = False
self.controller.lookup_device = MagicMock(return_value=self.cover)
self.wrapped_writer = AsyncMock()
self.line_handler = LineHandler(self.wrapped_writer, self.controller)
async def test_stop(self):
mover = asyncio.create_task(
self.line_handler.handle_line(
f"CMD 02 04 {CMD_MOVE_DOWN:02X}".encode("utf-8") + RCV_EOL
)
)
delay = 3
await asyncio.sleep(delay)
await self.line_handler.handle_line(
f"CMD 02 04 {CMD_STOP:02X}".encode("utf-8") + RCV_EOL
)
await mover
self.assertGreater(self.cover.drop, 0.19)
self.assertLess(self.cover.drop, 0.24)
async def test_move_while_moving(self):
mover = asyncio.create_task(
self.line_handler.handle_line(
f"CMD 02 04 {CMD_MOVE_DOWN:02X}".encode("utf-8") + RCV_EOL
)
)
delay = 3
await asyncio.sleep(delay)
self.assertGreater(self.cover.drop, 0.19)
self.assertLess(self.cover.drop, 0.24)
await self.line_handler.handle_line(
f"CMD 02 04 {CMD_MOVE_UP:02X}".encode("utf-8") + RCV_EOL
)
await mover
self.assertEqual(self.cover.drop, 0) | 7,187 | 2,464 |
# -*- coding: utf-8 -*-
"""
Harvest Time Tracking API Client
~~~~~~~~~~~~~~~~
:copyright: © 2012 Aurora Software LLC
:license: Apache 2.0, see LICENSE for more details.
"""
from .metadata import (
__author__,
__copyright__,
__email__,
__license__,
__maintainer__,
__version__,
)
from .harvest import *
__all__ = [
'__author__', '__copyright__', '__email__', '__license__',
'__maintainer__', '__version__', 'harvest'
]
| 455 | 178 |
class IPBlocker:
def block(self, ip: str) -> bool:
raise NotImplementedError()
def unblock(self, ip: str) -> bool:
raise NotImplementedError()
| 168 | 55 |
import os
import sys
import asyncio
from faker import Faker
faker = Faker()
sys.path.insert(0, os.path.abspath(os.curdir))
from config import init_db
from wendy.models import *
__all__ = [
'ChairFaker',
'seed_chair'
]
class ChairFaker(object):
async def generate(self, **kwargs):
await init_db()
fake = Chair(**kwargs)
await fake.save()
return fake
def seed_chair():
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.wait([
ChairFaker().generate(
position="Leader",
room_id=1
),
ChairFaker().generate(
position="Dev",
room_id=1
)
]))
loop.close()
| 706 | 236 |
# Copyright 2017 Carnegie Mellon University. See LICENSE.md file for terms.
import platform
try:
import win32com.client
except ImportError:
# Tasks must be importable on any platform.
pass
import api
from tasks import outlook
class OutlookSend(outlook.Outlook):
""" Interact with Outlook to send emails. Requires Outlook and OutlookRedemption to be installed. Windows-only.
"""
def __init__(self, config):
if not platform.system() == 'Windows':
raise OSError('This task is only compatible with Windows.')
self._config = config
self._outlook = outlook.SharedOutlook()
def __call__(self):
self._send_message()
def _send_message(self):
subject, body = self._get_content()
# Attempted workaround for emails sitting in Outbox. May not actually work correctly.
if self._outlook.outlook_application.Explorers.Count == 0:
folder = self._outlook.mapi_namespace.GetDefaultFolder(win32com.client.constants.olFolderOutbox)
folder.Display()
self._exchange_check()
# TODO: Make sure new order works.
outbox = self._outlook.mapi_namespace.GetDefaultFolder(win32com.client.constants.olFolderOutbox)
outlook_mail_item = self._outlook.outlook_application.CreateItem(win32com.client.constants.olMailItem)
outlook_mail_item = outlook_mail_item.Move(outbox)
outlook_mail_item.Subject = subject
outlook_mail_item.Body = body
outlook_mail_item.Save()
for file_ in self._config['attachments']:
outlook_mail_item.Attachments.Add(file_)
# Need to use Redemption to actually get it to send correctly.
new_email = win32com.client.Dispatch('Redemption.SafeMailItem')
new_email.Item = outlook_mail_item
new_email.Recipients.Add(self._config['destination'])
new_email.Recipients.ResolveAll()
new_email.Send()
def _get_content(self):
""" Get subject and body.
Returns:
str, str: First return value is email subject and second value is email body.
"""
if self._config['dynamic']:
subject = 'DYNAMIC OPTION NOT YET IMPLEMENTED'
body = 'DYNAMIC OPTION NOT YET IMPLEMENTED'
else:
subject = self._config['subject']
body = self._config['body']
return subject, body
@classmethod
def parameters(cls):
""" Information about this task's configuration.
Returns:
dict: With keys 'required' and 'optional', whose values are dicts with the task's required and optional
config keys, and whose values are human-readable strings giving information about that key.
"""
config = {}
required = {'username': 'str| The "From" address.',
'destination': 'str| The "To" address.',
'subject': 'str| Subject line. Specify empty string if optional parameter "dynamic" is used.',
'body': 'str| Message body. Specify empty string if optional parameter "dynamic" is used.'}
optional = {'attachments': '[str]| A list of paths to files that should be attached.',
'dynamic': 'bool| Generate subject and body. Default False.'}
config['required'] = required
config['optional'] = optional
return config
@classmethod
def validate(cls, config):
""" Validate the task configuration.
Raises:
KeyError: If a required key is missing.
ValueError: If a key's value is not valid.
"""
defaults = {'attachments': [],
'dynamic': False}
config = api.check_config(config, cls.parameters(), defaults)
return config
| 3,814 | 1,054 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/lbleier/cFS/tools/cFS-GroundSystem/Subsystems/cmdGui/ParameterDialog.ui'
#
# Created by: PyQt5 UI code generator 5.10.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.setEnabled(True)
Dialog.resize(782, 550)
self.label_title = QtWidgets.QLabel(Dialog)
self.label_title.setGeometry(QtCore.QRect(330, 120, 91, 31))
font = QtGui.QFont()
font.setFamily("Sans Serif")
font.setPointSize(10)
self.label_title.setFont(font)
self.label_title.setAlignment(QtCore.Qt.AlignCenter)
self.label_title.setObjectName("label_title")
self.label_instructions = QtWidgets.QLabel(Dialog)
self.label_instructions.setGeometry(QtCore.QRect(120, 140, 551, 31))
self.label_instructions.setAlignment(QtCore.Qt.AlignCenter)
self.label_instructions.setObjectName("label_instructions")
self.buttonBox = QtWidgets.QDialogButtonBox(Dialog)
self.buttonBox.setGeometry(QtCore.QRect(670, 490, 101, 31))
self.buttonBox.setLayoutDirection(QtCore.Qt.LeftToRight)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Close)
self.buttonBox.setCenterButtons(True)
self.buttonBox.setObjectName("buttonBox")
self.status_box = QtWidgets.QTextBrowser(Dialog)
self.status_box.setGeometry(QtCore.QRect(480, 40, 201, 41))
self.status_box.setAutoFillBackground(False)
self.status_box.setObjectName("status_box")
self.label_param_title_2 = QtWidgets.QLabel(Dialog)
self.label_param_title_2.setGeometry(QtCore.QRect(480, 10, 61, 21))
self.label_param_title_2.setObjectName("label_param_title_2")
self.SendButton_1 = QtWidgets.QPushButton(Dialog)
self.SendButton_1.setGeometry(QtCore.QRect(690, 47, 71, 27))
self.SendButton_1.setAutoDefault(False)
self.SendButton_1.setDefault(True)
self.SendButton_1.setObjectName("SendButton_1")
self.label_5 = QtWidgets.QLabel(Dialog)
self.label_5.setGeometry(QtCore.QRect(260, 10, 81, 20))
self.label_5.setObjectName("label_5")
self.subSystemCommandPageLabel = QtWidgets.QLabel(Dialog)
self.subSystemCommandPageLabel.setGeometry(QtCore.QRect(30, 10, 91, 24))
self.subSystemCommandPageLabel.setObjectName("subSystemCommandPageLabel")
self.subSystemTextBrowser = QtWidgets.QTextBrowser(Dialog)
self.subSystemTextBrowser.setGeometry(QtCore.QRect(30, 40, 221, 41))
self.subSystemTextBrowser.setObjectName("subSystemTextBrowser")
self.commandAddressTextBrowser = QtWidgets.QTextBrowser(Dialog)
self.commandAddressTextBrowser.setGeometry(QtCore.QRect(260, 40, 211, 41))
self.commandAddressTextBrowser.setObjectName("commandAddressTextBrowser")
self.tblParameters = QtWidgets.QTableWidget(Dialog)
self.tblParameters.setGeometry(QtCore.QRect(20, 180, 731, 301))
self.tblParameters.setObjectName("tblParameters")
self.tblParameters.setColumnCount(3)
self.tblParameters.setRowCount(0)
item = QtWidgets.QTableWidgetItem()
self.tblParameters.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tblParameters.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.tblParameters.setHorizontalHeaderItem(2, item)
self.tblParameters.verticalHeader().setVisible(False)
self.retranslateUi(Dialog)
self.buttonBox.clicked['QAbstractButton*'].connect(Dialog.close)
QtCore.QMetaObject.connectSlotsByName(Dialog)
Dialog.setTabOrder(self.status_box, self.SendButton_1)
Dialog.setTabOrder(self.SendButton_1, self.subSystemTextBrowser)
Dialog.setTabOrder(self.subSystemTextBrowser, self.commandAddressTextBrowser)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.label_title.setText(_translate("Dialog", "Parameters"))
self.label_instructions.setText(_translate("Dialog", "Please enter the following parameters then click \'Send\':"))
self.label_param_title_2.setText(_translate("Dialog", "Status:"))
self.SendButton_1.setText(_translate("Dialog", "Send"))
self.label_5.setText(_translate("Dialog", "Command:"))
self.subSystemCommandPageLabel.setText(_translate("Dialog", "Subsystem:"))
item = self.tblParameters.horizontalHeaderItem(0)
item.setText(_translate("Dialog", "Parameter"))
item = self.tblParameters.horizontalHeaderItem(1)
item.setText(_translate("Dialog", "Description"))
item = self.tblParameters.horizontalHeaderItem(2)
item.setText(_translate("Dialog", "Input"))
| 5,093 | 1,653 |
from abc import ABC, abstractmethod
from .utils import Device, cast_tuple
class EvaluationFunction(ABC):
@abstractmethod
def __call__(self, mode, data_loader, device=Device.CPU, **kwargs):
raise NotImplementedError
@staticmethod
def filter_call_rval(rval, return_dict=None, return_keys=None, key_for_non_dict=None):
"""
Filter through what is returned by __call__
TODO: Complete docstring
"""
assert return_dict in (None, False, True), "'return_dict' should be None, False or True"
# a simple fallthrough
if return_dict is None:
return rval
# the caller does not wants a dict
if not return_dict:
if not isinstance(rval, dict):
return rval
if len(rval) == 1:
return list(rval.values())[0]
return_keys = cast_tuple(return_keys)
if len(return_keys) != 1:
raise ValueError(
"__call__ returned a dict but 'return_dict' is False, 'return_keys' has to be "
"convertible in a single key (ex.: a single element iterable with the hashable key, "
"the hashable key, etc)")
return rval[return_keys[0]]
# at this point, the caller wants a dict but rval is not a dict
if not isinstance(rval, dict):
if key_for_non_dict is not None:
return {key_for_non_dict: rval}
raise ValueError(
"__call__ did not return a dict but 'return_dict' is True, 'key_for_non_dict' has "
"to be provided")
# at this point, the caller wants a dict and rval is a dict
if return_keys is None:
return_keys = '__all__'
if return_keys == '__all__':
return rval
return_keys = cast_tuple(return_keys)
return {k: rval[k] for k in return_keys}
| 1,936 | 547 |
salario = float(input('\033[32m Quanto é o salário? R$'))
novo = salario + (salario * 15 / 100)
print('\033[36m Um funcionário que ganhava R${:.2f}, com aumento de 15% agora recebe R${:.2f}'.format(salario,novo))
| 213 | 98 |
#2、生成名称目录脚本
import os
imglst = os.listdir("./annotations/xmls/")
with open("./annotations/trainval_person.txt","w") as ff:
for img_path in imglst:
name = img_path.split(".")[0]
print(name)
ff.write(name+"\n") | 236 | 96 |
from flask import current_app
import pickle
import os
import time
import fcntl
class FileLock(object):
def __init__(self, filename, *args, **kwargs):
self.filename = filename
self.open_args = args
self.open_kwargs = kwargs
self.fileobj = None
def __enter__(self):
f = open(self.filename, *self.open_args, **self.open_kwargs)
while True:
fcntl.flock(f, fcntl.LOCK_EX)
fnew = open(self.filename, *self.open_args, **self.open_kwargs)
if os.path.sameopenfile(f.fileno(), fnew.fileno()):
fnew.close()
break
else:
f.close()
f = fnew
self.fileobj = f
return f
def __exit__(self, _exc_type, _exc_value, _trackback):
self.fileobj.close()
CACHE_FILE = "disk_cache"
class SimpleCache(object):
"""
a simple dick cache with file lock
"""
def __init__(self):
if not os.path.exists(CACHE_FILE):
f = open(CACHE_FILE, "w")
f.write(pickle.dumps({"testCache": "testCache"}))
f.close()
@classmethod
def create_instance(cls):
if hasattr(cls, '__instance'):
return cls.__instance
else:
cls.__instance = cls()
return cls.__instance
def __setitem__(self, key, value):
#self.__cache[key] = value
with FileLock(CACHE_FILE, "r+") as f:
cache = ''.join(f.readlines())
cache = pickle.loads(cache)
cache[key] = value
dumps_result = pickle.dumps(cache)
f.seek(0)
f.write(dumps_result)
f.flush()
current_app.logger.info('set key: %s, value: %s' % (key, value))
def __getitem__(self, key):
with FileLock(CACHE_FILE, "r") as f:
cache = ''.join(f.readlines())
cache = pickle.loads(cache)
current_app.logger.info("get key: %s, value: %s" % (key, cache.get(key)))
return cache.get(key)
def __len__(self):
return len(self.__cache)
cache = SimpleCache.create_instance()
| 2,146 | 684 |
from .metadata import Metadata, MetaProxy
from .mondata import MonProxy
from .data_source import DataTransformer, SourceCatalog, SourceItem
| 140 | 36 |
"""
User models, which are responsible for authorization and all of the
user's business data.
"""
from sqlalchemy import func
from server.framework.db import (
attribute_presetter,
BaseModel,
)
from server.framework.db.fields import (
IntegerField,
StringField,
DateTimeField,
RandomStringField,
PasswordField,
CoefficientField,
BooleanField,
PositiveIntegerField,
OnoToOneField,
)
from server.settings import settings
__all__ = ["UserModel", "PersonModel"]
class UserModel(BaseModel):
"""
A user model that only does user authorization, but does it well.
"""
login = StringField(50, nullable=False)
name = StringField(50, nullable=False)
password = PasswordField(nullable=False)
pepper = RandomStringField(48)
token = RandomStringField(128)
created = DateTimeField(default=func.now())
last_login = DateTimeField()
is_deleted = BooleanField(default=False, nullable=False)
@attribute_presetter("password")
def password_setter(self, value):
return self.generate_password(value)
def generate_password(self, password):
return UserModel.password.generate(
password,
self.pepper,
settings.hash_salt
)
def check_auth(self):
pass
class PersonModel(BaseModel):
"""A user model containing business logic."""
user = OnoToOneField(UserModel)
type = IntegerField(nullable=False)
level = IntegerField(default=1, nullable=False)
experience = IntegerField(default=0, nullable=False)
money = IntegerField(default=0, nullable=False)
rating = IntegerField(default=0, nullable=False)
kill_ratio = CoefficientField(default=0.0, nullable=False)
fights_count = PositiveIntegerField(default=0, nullable=False)
| 1,806 | 530 |
"""Module for resources"""
| 27 | 8 |
from django.contrib import admin
from national_id.models import NationalId
# Register your models here.
admin.site.register(NationalId)
| 139 | 38 |
# coding=utf-8
# /usr/bin/env python
'''
Author: wenqiangw
Email: wenqiangw@opera.com
Date: 2020-07-28 15:07
Desc: 数据分布画图
'''
from .trajectory_playback import Trajectory as Trajectory_his
from .trajectory_playback_v2 import Trajectory as Trajectory | 251 | 110 |
A = np.ones((3,3))
print 3 * A - 1
# [[ 2. 2. 2.]
# [ 2. 2. 2.]
# [ 2. 2. 2.]] | 87 | 60 |
"""
This program builds a two-layer neural network for the Iris dataset.
The first layer is a relu layer with 10 units, and the second one is
a softmax layer. The network structure is specified in the "train" function.
The parameters are learned using SGD. The forward propagation and backward
propagation are carried out in the "compute_neural_net_loss" function.
"""
import numpy as np
import os, sys
import math
# Data sets
IRIS_TRAINING = os.getcwd() + "/data/iris_training.csv"
IRIS_TEST = os.getcwd() + "/data/iris_test.csv"
def get_data():
# Load datasets.
train_data = np.genfromtxt(IRIS_TRAINING, skip_header=1,
dtype=float, delimiter=',')
test_data = np.genfromtxt(IRIS_TEST, skip_header=1,
dtype=float, delimiter=',')
train_x = train_data[:, :4]
train_y = train_data[:, 4].astype(np.int64)
test_x = test_data[:, :4]
test_y = test_data[:, 4].astype(np.int64)
return train_x, train_y, test_x, test_y
def compute_neural_net_loss(params, X, y, reg=0.0):
"""
Neural network loss function.
Inputs:
- params: dictionary of parameters, including "W1", "b1", "W2", "b2"
- X: N x D array of training data. Each row is a D-dimensional point.
- y: 1-d array of shape (N, ) for the training labels.
Returns:
- loss: the softmax loss with regularization
- grads: dictionary of gradients for the parameters in params
"""
# Unpack variables from the params dictionary
W1, b1 = params['W1'], params['b1']
W2, b2 = params['W2'], params['b2']
N, D = X.shape
loss = 0.0
grads = {}
# forward propagation
relu = lambda x : x * (x > 0)
z1 = X.dot(W1) + b1
u1 = np.vectorize(relu)(z1)
z2 = u1.dot(W2) + b2
u2 = np.vectorize(math.exp)(z2)
NLL = - (np.vectorize(math.log)((np.array([u2[i][y[i]] / u2[i].sum() for i in range(N)])))).sum()
loss = NLL / N + 0.5 * reg * ((W1 ** 2).sum() + (W2 ** 2).sum())
# backward propagation
d_relu = lambda x: 1 * (x >= 0)
delta2 = np.zeros(z2.shape)
for i in range(delta2.shape[0]):
for k in range(delta2.shape[1]):
delta2[i][k] = u2[i][k] / u2[i].sum() - (y[i] == k)
dW2 = np.zeros(W2.shape)
for i in range(N):
dW2 += (u1[i].reshape(-1, 1)).dot(delta2[i].reshape(1, -1))
dW2 = dW2 / N + reg * W2
db2 = np.zeros(len(b2))
for i in range(N):
db2 += delta2[i]
db2 = db2 / N
delta1 = np.zeros(z1.shape)
for i in range(delta1.shape[0]):
for j in range(delta1.shape[1]):
delta1[i][j] = d_relu(z1[i][j]) * (delta2[i].dot(W2[j].T))
dW1 = np.zeros(W1.shape)
for i in range(N):
dW1 += (X[i].reshape(-1, 1)).dot(delta1[i].reshape(1, -1))
dW1 = dW1 / N + reg * W1
db1 = np.zeros(len(b1))
for i in range(N):
db1 += delta1[i]
db1 = db1 / N
grads['W1']=dW1
grads['W2']=dW2
grads['b1']=db1
grads['b2']=db2
return loss, grads
def predict(params, X):
"""
Use the trained weights of this linear classifier to predict labels for
data points.
Inputs:
- params: dictionary of parameters, including "W1", "b1", "W2", "b2"
- X: N x D array of training data. Each row is a D-dimensional point.
Returns:
- y_pred: Predicted labels for the data in X. y_pred is a 1-dimensional
array of length N, and each element is an integer giving the predicted
class.
"""
# Unpack variables from the params dictionary
W1, b1 = params['W1'], params['b1']
W2, b2 = params['W2'], params['b2']
y_pred = np.zeros(X.shape[1])
relu = lambda x: x * (x > 0)
z1 = np.dot(X,W1)+b1
u1 = relu(z1)
z2 = np.dot(u1,W2)+b2
y_pred = np.argmax(z2, axis=1)
return y_pred
def acc(ylabel, y_pred):
return np.mean(ylabel == y_pred)
def sgd_update(params, grads, learning_rate):
"""
Perform sgd update for parameters in params.
"""
for key in params:
params[key] += -learning_rate * grads[key]
def train(X, y, Xtest, ytest, learning_rate=1e-3, reg=1e-5, epochs=100, batch_size=20):
num_train, dim = X.shape
num_classes = np.max(y) + 1 # assume y takes values 0...K-1 where K is number of classes
num_iters_per_epoch = int(math.floor(1.0*num_train/batch_size))
params = {}
std = 0.001
params['W1'] = std * np.random.randn(dim, 10)
params['b1'] = np.zeros(10)
params['W2'] = std * np.random.randn(10, num_classes)
params['b2'] = np.zeros(num_classes)
for epoch in range(max_epochs):
perm_idx = np.random.permutation(num_train)
# perform mini-batch SGD update
for it in range(num_iters_per_epoch):
idx = perm_idx[it*batch_size:(it+1)*batch_size]
batch_x = X[idx]
batch_y = y[idx]
# evaluate loss and gradient
loss, grads = compute_neural_net_loss(params, batch_x, batch_y, reg)
# update parameters
sgd_update(params, grads, learning_rate)
# evaluate and print every 10 steps
if epoch % 10 == 0:
train_acc = acc(y, predict(params, X))
test_acc = acc(ytest, predict(params, Xtest))
print('Epoch %4d: loss = %.2f, train_acc = %.4f, test_acc = %.4f' \
% (epoch, loss, train_acc, test_acc))
return params
max_epochs = 200
batch_size = 20
learning_rate = 0.1
reg = 0.001
# get training and testing data
train_x, train_y, test_x, test_y = get_data()
params = train(train_x, train_y, test_x, test_y, learning_rate, reg, max_epochs, batch_size)
# Classify two new flower samples.
def new_samples():
return np.array(
[[6.4, 3.2, 4.5, 1.5],
[5.8, 3.1, 5.0, 1.7]], dtype=np.float32)
new_x = new_samples()
predictions = predict(params, new_x)
print("New Samples, Class Predictions: {}\n".format(predictions))
| 5,901 | 2,320 |
import sublime
import sublime_plugin
from os.path import isfile
from ..core import oa_setting, setup_new_override_view
from ..core import PackageListCollectionThread, ContextHelper
###----------------------------------------------------------------------------
class OverrideAuditContextCreateOverrideCommand(ContextHelper,sublime_plugin.TextCommand):
"""
When invoked on a read-only view that represents a package resource that
does not yet exist on disk (e.g. as opened by 'View Package Resource' in
the command palette), promote that view to be a potential new override.
"""
def run(self, edit, **kwargs):
target = self.view_target(self.view, **kwargs)
if self.package is not None:
target.window().run_command("override_audit_create_override", {
"package": self.package
})
else:
setup_new_override_view(target, reposition=False)
def description(self, **kwargs):
if self.package is not None:
return self.caption("Create Override in '%s'" % (self.package), **kwargs)
return self.caption("Override this resource", **kwargs)
def _ctx_package(self, **kwargs):
"""
Check the context of the command to see if it's being triggered on the
name of a package (only) which can contain overrides. If so, store the
name in the tracking variable and return it. Otherwise, reset the
tracking variable and return None.
"""
target = self.view_target(self.view, **kwargs)
ctx = self.view_context(target, False, **kwargs)
self.package = ctx.package if self.package_overrides_possible(target, ctx) else None
return self.package
def is_visible(self, **kwargs):
if self.always_visible(**kwargs):
return True
return self.package is not None or self.is_enabled(**kwargs)
def is_enabled(self, **kwargs):
# Always enabled if we're invoked via a context action on a package
# that can contain overrides.
if self._ctx_package(**kwargs) is not None:
return True
# The current buffers needs to be eligibile to promote to an override.
spp = sublime.packages_path()
view = self.view_target(self.view, **kwargs)
name = view.file_name()
# Unnamed or editable buffers can't represent new overrides, and neither
# can files not in the packages folder or files that already exist.
if (name is None or not view.is_read_only() or
not name.startswith(spp) or isfile(name)):
return False
# We can only enable the command if this file represents a resource
# that actually exists in the package.
res = name[len(spp) + 1:].replace("\\", "/")
if "Packages/" + res not in sublime.find_resources(res.split('/')[-1]):
return False
return True
###----------------------------------------------------------------------------
| 3,021 | 780 |
# -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2022 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# Johannes Schwarz <johannes.schwarz@frm2.tum.de>
#
# *****************************************************************************
"""Auxiliary classes for the sample changer."""
from nicos.core import Attach, Moveable, Override, Readable, oneof, status
class SamplePusher(Moveable):
"""Move the sample up/down inside the sample changer device."""
valuetype = oneof('down', 'up')
attached_devices = {
'actuator': Attach('Actuator to perform the switch', Moveable),
'sensort': Attach('Sensor at top of the tube.', Readable),
'sensorl': Attach('Sensor at down of the tube', Readable),
}
parameter_overrides = {
'unit': Override(default=''),
'fmtstr': Override(default='%s'),
}
def doInit(self, mode):
self._target_sens = None
def doStart(self, target):
self._attached_actuator.move(target)
if target == 'up':
self._target_sens = self._attached_sensort
elif target == 'down':
self._target_sens = self._attached_sensorl
def doStatus(self, maxage=0):
# it is a local object so poller gives wrong state here but maw works
if self._target_sens:
if self._target_sens.read(maxage) == 0:
return status.BUSY, 'moving'
elif self._target_sens.read(maxage) == 1:
self._target_sens = None
return status.OK, 'idle'
def doRead(self, maxage=0):
if self._attached_sensort.read(maxage):
return 'up'
elif self._attached_sensorl.read(maxage):
return 'down'
| 2,557 | 780 |
__author__ = 'shuai'
class Solution:
# @param {integer[]} nums
# @return {string}
def largestNumber(self, nums):
ret = ""
for i in range(len(nums)):
for j in range(i + 1, len(nums)):
str_i = str(nums[i])
str_j = str(nums[j])
if str_i + str_j < str_j + str_i:
tmp = nums[i]
nums[i] = nums[j]
nums[j] = tmp
# to check if max equals 0 ,return '0'
if i == 0 and nums[i] == 0:
return '0'
ret += str(nums[i])
return ret
sol = Solution()
print sol.largestNumber([3, 30, 34, 5, 9])
| 685 | 243 |
"""
Test the steps needed to generate wild type and mutant data for use in the statistical analysis
Usage: pytest -v -m "not notest" test_data_generation.py
The use of -m "not notest" is to be able to omit certain tests with the @pytest.mark.notest decorator
"""
from pathlib import Path
from lama.registration_pipeline import run_lama
import os
import shutil
import pytest
from scripts import lama_job_runner
from . import (registration_root, mut_registration_dir, wt_registration_dir)
@pytest.fixture
def delete_previous_files():
"""
Remove the output generated from previous tests. This does not occur directly after the test as we may want to
look at the results.
"""
def delete(root: Path):
shutil.rmtree(root / 'output', ignore_errors=True)
for p in root.iterdir():
if str(p).endswith(('.log', 'jobs.csv', 'csv.lock', '.yaml')):
p.unlink()
delete(wt_registration_dir)
delete(mut_registration_dir)
def test_make_jobs_file(delete_previous_files):
config_file = registration_root / 'registration_config.toml'
lama_job_runner.lama_job_runner(config_file, wt_registration_dir, make_job_file=True)
lama_job_runner.lama_job_runner(config_file, mut_registration_dir, make_job_file=True)
def test_lama_job_runner():
"""
Test the lama job runner which was made to utilise multiple machines or the grid.
This test just uses one machine for the tests at the moment.
test_make_jobs_file() should run before this to create a jobs file that can be consumed.
This test should be run before the stats test as it creates data that the stats test needs.
"""
config_file = registration_root / 'registration_config.toml'
assert lama_job_runner.lama_job_runner(config_file, wt_registration_dir) is True
assert lama_job_runner.lama_job_runner(config_file, mut_registration_dir) is True
| 1,907 | 596 |
def install(self):
self.scm.install()
| 42 | 16 |
"""Config flow for pioneer_async integration."""
import logging
import voluptuous as vol
from homeassistant import config_entries, core, exceptions
from homeassistant.const import (
CONF_HOST,
CONF_PORT,
CONF_SCAN_INTERVAL,
CONF_TIMEOUT,
)
from homeassistant.core import callback
from .pioneer_avr import PioneerAVR # pylint: disable=import-error
from .const import (
DATA_SCHEMA,
OPTIONS_DEFAULTS,
CONF_UNIQUE_ID,
CONF_COMMAND_DELAY,
CONF_VOLUME_WORKAROUND,
)
from .const import DOMAIN # pylint: disable=unused-import
_LOGGER = logging.getLogger(__name__)
async def validate_input(hass: core.HomeAssistant, data):
"""
Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
_LOGGER.debug(">> validate_input(%s)", data)
try:
pioneer = PioneerAVR(data[CONF_HOST], data[CONF_PORT])
await pioneer.connect()
except:
raise CannotConnect # pylint: disable=raise-missing-from
await pioneer.shutdown()
del pioneer
# Return info that you want to store in the config entry.
device_unique_id = data[CONF_HOST] + ":" + str(data[CONF_PORT])
return {
**data,
CONF_UNIQUE_ID: device_unique_id,
}
class PioneerAVRFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle Pioneer AVR config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
_LOGGER.debug(">> config.async_step_user(%s)", user_input)
errors = {}
if user_input is not None:
try:
info = await validate_input(self.hass, user_input)
await self.async_set_unique_id(info[CONF_UNIQUE_ID])
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=info[CONF_UNIQUE_ID], data=user_input
)
except CannotConnect:
errors["base"] = "cannot_connect"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors=errors
)
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return PioneerAVROptionsFlowHandler(config_entry)
class PioneerAVROptionsFlowHandler(config_entries.OptionsFlow):
"""Handle a option flow for Harmony."""
def __init__(self, config_entry: config_entries.ConfigEntry):
"""Initialize options flow."""
_LOGGER.debug(">> options.__init__(%s)", config_entry)
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Handle options flow."""
_LOGGER.debug(">> options.async_step_init(%s)", user_input)
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
## Get current set of options and build options schema
options = {
**OPTIONS_DEFAULTS,
**(self.config_entry.options if self.config_entry.options else {}),
}
data_schema = vol.Schema(
{
## TODO: add sources option: how to ask the user for a dictionary in config flow?
vol.Optional(
CONF_SCAN_INTERVAL, default=options[CONF_SCAN_INTERVAL]
): int,
vol.Optional(CONF_TIMEOUT, default=options[CONF_TIMEOUT]): vol.Coerce(
float
),
vol.Optional(
CONF_COMMAND_DELAY, default=options[CONF_COMMAND_DELAY]
): vol.Coerce(float),
vol.Optional(
CONF_VOLUME_WORKAROUND, default=options[CONF_VOLUME_WORKAROUND]
): bool,
}
)
return self.async_show_form(step_id="init", data_schema=data_schema)
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
| 4,265 | 1,301 |
from __future__ import absolute_import
from django.contrib import admin
from .models import Invitation
class InvitationAdmin(admin.ModelAdmin):
list_display = ('user', 'email', 'expiration_date')
admin.site.register(Invitation, InvitationAdmin)
| 253 | 72 |
import argparse
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import pickle as pkl
import common.config as cfg
from common.utils import Struct
matplotlib.rcParams.update({'font.size': 24})
matplotlib.rcParams['lines.linewidth'] = 2.5
matplotlib.rcParams['lines.markersize'] = 4
ap = argparse.ArgumentParser()
ap.add_argument('--dataset', type=str, required=False, default='mnist')
ap.add_argument('--num-nodes', type=int, required=False, default=125)
ap.add_argument('--epochs', type=int, required=False)
ap.add_argument('--histories', type=str, nargs='+', required=True)
ap.add_argument('--baselines', type=str, nargs='+', required=True)
ap.add_argument('--labels', type=str, nargs='+', required=True)
ap.add_argument('--name', type=str, required=True)
ap.add_argument('--ncols', type=int, required=True)
ap.add_argument('--dpi', type=int, required=True)
ap.add_argument('--colors', type=str, nargs='+', required=False, default=[])
ap.add_argument('--fracs', type=float, nargs='+', required=False, default=[])
ap.add_argument('--accuracy', type=float, required=False)
args = vars(ap.parse_args())
args = Struct(**args)
fig = plt.figure(figsize=(30, 7.5))
ax1 = fig.add_subplot(131, projection='3d')
ax2 = fig.add_subplot(132, projection='3d')
ax3 = fig.add_subplot(133, projection='3d')
colors = ['k.-', 'r.:', 'm.:', 'b.:', 'g.:', 'c.:', 'y.:', 'k.:', 'r', 'b']
if len(args.colors):
colors = args.colors
def get_milestone_epoch(mile_list, milestone):
for idx, mile in enumerate(mile_list, 1):
if mile > milestone:
return idx
def calculate_num_euts(eut_schedule, mile):
return len([_ for _ in eut_schedule if _ <= mile])
milestones = {}
power = {}
delay = {}
cost = {}
c1, c2, c3 = 10**(-4), 10**(2), 0.5*10**(4)
for idx, history in enumerate(args.histories):
aux = history[:-4] + '_aux.pkl'
x_ax, y_ax, l_test, rounds, eps, eta_phi = pkl.load(
open('../ckpts/{}_{}/history/{}'.format(
args.dataset, args.num_nodes, history), 'rb'))
train_args, eut_schedule = pkl.load(
open('../ckpts/{}_{}/history/{}'.format(
args.dataset, args.num_nodes, aux), 'rb'))
nc = train_args.num_clusters[0]
nw = train_args.num_workers
e_glob, e_d2d = cfg.E_glob, cfg.E_glob*train_args.e_frac
d_glob, d_d2d = cfg.D_glob, cfg.D_glob*train_args.d_frac
alpha = 1600
miles = get_milestone_epoch(y_ax, args.accuracy)
tag = 'E_{}_D_{}'.format(train_args.e_frac, train_args.d_frac)
milestones[tag] = miles
rounds = sum(rounds[:miles])*train_args.num_clusters[0]
num_eut = calculate_num_euts(eut_schedule, miles)
cost[tag] = c1*(num_eut*nc*e_glob + nw*rounds*e_d2d) + \
c2*(num_eut*d_glob + rounds*d_d2d) + \
sum([
c3*(1-(eut_schedule[i-1]+alpha)/(
eut_schedule[i-1]+eut_schedule[i]+alpha)
) for i in range(1, len(eut_schedule))
])
power[tag] = (num_eut*nc*e_glob*d_glob) + (nw*rounds*e_d2d*d_d2d)
delay[tag] = (num_eut*d_glob) + (rounds*d_d2d)
for (idx, history), n in zip(enumerate(args.baselines),('central')):
x_ax, y_ax, l_test, rounds, eps, eta_phi, beta, mu = pkl.load(
open('../ckpts/{}_{}/history/{}'.format(
args.dataset, args.num_nodes, history), 'rb'))
miles = get_milestone_epoch(y_ax, args.accuracy)
milestones[n] = miles
# cost[n] = c1*(train_args.epochs*nw*e_glob) + c2*(train_args.epochs*d_glob)
power[n] = miles*nw*e_glob*d_glob
delay[n] = miles*d_glob
fracs = args.fracs
n = len(fracs)
power_mat = np.zeros((n, n))
delay_mat = np.zeros((n, n))
miles_mat = np.zeros((n, n))
costs_mat = np.zeros((n, n))
for i, ie in enumerate(fracs):
for j, jd in enumerate(fracs):
tag = 'E_{}_D_{}'.format(ie, jd)
power_mat[i,n-j-1] = power[tag]
delay_mat[i,n-j-1] = delay[tag]
miles_mat[i,n-j-1] = milestones[tag]
costs_mat[i,n-j-1] = cost[tag]
column_names = list(map(str, fracs[::-1]))
row_names = list(map(str, fracs))
r, c = len(fracs), len(fracs)
xpos = np.arange(0, r, 1)
ypos = np.arange(0, c, 1)
xpos, ypos = np.meshgrid(xpos+0.25, ypos+0.25)
x, y = np.meshgrid(np.arange(0, r+1, 1),
np.arange(0, c+1, 1))
xpos = xpos.flatten()
ypos = ypos.flatten()
zpos = np.zeros(r*c)
dx = 0.5 * np.ones_like(zpos)
dy = dx.copy()
dz = costs_mat.flatten()/(10**4)
flat = np.ones((r+1, c+1))*milestones['c']
cs = ['m', 'b', 'g', 'c'] * c
ax1.bar3d(xpos, ypos, zpos, dx, dy, dz, color=cs)
# ax1.plot_surface(x, y, flat, alpha=0.4, color='k')
ax1.w_xaxis.set_ticks([0.25, 1.25, 2.25, 3.25])
ax1.w_xaxis.set_ticklabels(column_names)
ax1.w_yaxis.set_ticks([0.25, 1.25, 2.25, 3.25])
ax1.w_yaxis.set_ticklabels(row_names)
ax1.set_xlabel('delay fraction', labelpad=25)
ax1.set_ylabel('energy fraction', labelpad=25)
ax1.set_zlabel('cumm. cost ($x 10^4$)', labelpad=10)
k=(10**6)
dz = power_mat.flatten()/k
flat = np.ones((r+1, c+1))*power['c']/k
ax2.bar3d(xpos, ypos, zpos, dx, dy, dz, color=cs)
# ax2.plot_surface(x, y, flat, alpha=0.6, color='k')
ax2.w_xaxis.set_ticks([0.25, 1.25, 2.25, 3.25])
ax2.w_xaxis.set_ticklabels(column_names)
ax2.w_yaxis.set_ticks([0.25, 1.25, 2.25, 3.25])
ax2.w_yaxis.set_ticklabels(row_names)
ax2.set_xlabel('delay fraction', labelpad=25)
ax2.set_ylabel('energy fraction', labelpad=25)
ax2.set_zlabel('cumm. power ($x 10^6$ J)', labelpad=10)
k=100
dz = delay_mat.flatten()/k
flat = np.ones((r+1,c+1))*delay['c']/k
ax3.bar3d(xpos, ypos, zpos, dx, dy, dz, color=cs)
# ax3.plot_surface(x, y, flat, alpha=0.6, color='k')
ax3.w_xaxis.set_ticks([0.25, 1.25, 2.25, 3.25])
ax3.w_xaxis.set_ticklabels(column_names)
ax3.w_yaxis.set_ticks([0.25, 1.25, 2.25, 3.25])
ax3.w_yaxis.set_ticklabels(row_names)
ax3.set_xlabel('delay fraction', labelpad=25)
ax3.set_ylabel('energy fraction', labelpad=25)
ax3.set_zlabel('cumm. delay ($10^2$ s)', labelpad=10)
ax1.set_title('(a)', y=-0.2)
ax2.set_title('(b)', y=-0.2)
ax3.set_title('(c)', y=-0.2)
args.name = args.name.format(args.accuracy)
print('Saving: ', args.name)
fig.subplots_adjust(wspace=0.025)
plt.savefig('../ckpts/{}_{}/plots/{}'.format(
args.dataset, args.num_nodes, args.name),
bbox_inches='tight', pad_inches=0.5, dpi=args.dpi)
| 6,316 | 2,835 |
VERSION = (0, 2, 2, 1)
| 23 | 18 |
import asyncio
import aiohttp
import time
URLS = [
'http://127.0.0.1:8000',
'http://127.0.0.1:8000',
'http://127.0.0.1:8000',
]
@asyncio.coroutine
def request_greetings():
response_tasks = yield from asyncio.wait([aiohttp.get(url) for url in URLS])
text_tasks = yield from asyncio.wait(
[task.result().text() for task in response_tasks[0]]
)
texts = [task.result() for task in text_tasks[0]]
return '\n'.join(texts)
loop = asyncio.get_event_loop()
t1 = time.time()
greetings = loop.run_until_complete(request_greetings())
print(time.time() - t1, 'seconds passed')
print(greetings)
loop.close()
| 638 | 261 |
from mongoengine import *
from models import *
def get_symbol_data(symbol):
db_client = connect(db = 'stocks_db')
data = []
for sp in StockPrice.objects(symbol = symbol).order_by('date'):
data.append({
'date': sp.date,
'open': sp.open,
'high': sp.high,
'low': sp.low,
'close': sp.close,
'volume': sp.volume
})
db_client.close()
return data | 394 | 137 |
# ---------------------------------------------------------------------
# HP.GbE2.get_mac_address_table
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetmacaddresstable import IGetMACAddressTable
from noc.core.text import parse_table
class Script(BaseScript):
name = "HP.GbE2.get_mac_address_table"
interface = IGetMACAddressTable
def execute(self, interface=None, vlan=None, mac=None):
cmd = "/info/l2/fdb"
if vlan:
cmd += "/vlan %d" % vlan
svlan = str(vlan)
elif mac:
cmd += "/find %s" % mac
elif interface:
cmd += "/port %s" % interface
else:
cmd += "/dump"
r = []
for m, v, port, trk, state in parse_table(self.cli(cmd)):
if not m:
continue
if (not mac or m.upper() == mac) and (not vlan or v == svlan):
p = trk if trk else port
if interface and interface != p:
continue
if v == "4095": # Built-in vlans on port 19
continue
r += [{"vlan_id": v, "mac": m, "interfaces": [p], "type": "D"}]
return r
| 1,434 | 422 |
import setuptools
if __name__ == "main":
setuptools.setup()
| 65 | 24 |
import os
import logging
from modules.utils.helpers import parse_size, tobool, validate_max_size
from modules.model_zoo.getter import prepare_backend
from modules.configs import Configs
from env_parser import EnvConfigs
log_level = os.getenv('LOG_LEVEL', 'INFO')
logging.basicConfig(
level=log_level,
format='%(asctime)s %(levelname)s - %(message)s',
datefmt='[%H:%M:%S]',
)
def prepare_models(root_dir: str = '/models'):
model_configs = Configs(models_dir=root_dir)
env_configs = EnvConfigs()
rec_name = env_configs.models.rec_name
det_name = env_configs.models.det_name
ga_name = env_configs.models.ga_name
mask_detector = env_configs.models.mask_detector
max_size = env_configs.defaults.max_size
if max_size is None:
max_size = [640, 640]
max_size = validate_max_size(max_size)
models = [model for model in [det_name, rec_name, ga_name, mask_detector] if model is not None]
for model in models:
batch_size = 1
if model_configs.models[model].get('allow_batching'):
if model == det_name:
batch_size = env_configs.models.det_batch_size
else:
batch_size = env_configs.models.rec_batch_size
logging.info(f"Preparing '{model}' model...")
prepare_backend(model_name=model, backend_name=env_configs.models.backend_name, im_size=max_size,
force_fp16=env_configs.models.fp16,
max_batch_size=batch_size, config=model_configs)
logging.info(f"'{model}' model ready!")
if __name__ == "__main__":
prepare_models()
| 1,628 | 559 |
#!/usr/bin/env python
"""Creates the Script menu.
To Do:
- add html help; note that this will have to be fed to ScriptWdg,
RO.ScriptWdg has no idea of TUI help
History:
2004-07-19 ROwen
2004-08-11 ROwen Modified for updated RO.Wdg.Toplevel.
2004-08-23 ROwen Added some diagnostic print statements (commented out).
2004-10-11 ROwen Modified to reject files whose names begin with ".".
2004-10-28 ROwen Bug fix: Open... was broken.
2005-09-22 ROwen Fix PR 272: standard scripts not available on Mac;
this was broken by the packaging overhaul for TUI 1.0.1.
Fix PR 132: Script menu may not load at first on MacOS X;
this was fixed via a hideous hack.
Modified to check/rebuild the entire menu when the root
menu is shown, instead of using lazy check/rebuild;
this simplified the hack for PR 132.
Modified to prebuild the menu at startup.
Modified test code to show a standard pull-down menu.
2011-06-16 ROwen Ditched obsolete "except (SystemExit, KeyboardInterrupt): raise" code
2012-07-18 ROwen Removed use of update_idletasks and an ugly Mac workaround that is no longer required.
2014-02-12 ROwen Moved some code to TUI.Base.ScriptLoader so other users could get to it more easily.
2015-03-18 ROwen Removed _RootNode.isAqua because it was not being used.
"""
import os
import Tkinter
import tkFileDialog
import RO.Alg
from TUI.Base.ScriptLoader import getScriptDirs, ScriptLoader
__all__ = ["getScriptMenu"]
def getScriptMenu(master):
scriptDirs = getScriptDirs()
rootNode = _RootNode(master=master, label="", pathList=scriptDirs)
rootNode.checkMenu(recurse=True)
return rootNode.menu
class _MenuNode:
"""Menu and related information about sub-menu of the Scripts menu
Each node represents one level of hiearchy in the various scripts directories.
The contents of a given subdir are dynamically tested, but the existence
of a particular subdirectory is not. This sounds like a mistake to me;
if a given subdir exists in any scripts dir, it should be checked every time
in all scripts dirs.
"""
def __init__(self, parentNode, label, pathList):
"""Construct a _MenuNode
Inputs:
- parentNode: parent menu node
- label: label of this sub-menu
- pathList: list of paths to this subdirectory in the script hierarchy
(one entry for each of the following, but only if the subdir exists:
built-in scripts dir, local TUIAddtions/Scripts and shared TUIAdditions/Scripts)
"""
# print "_MenuNode(%r, %r, %r)" % (parentNode, label, pathList)
self.parentNode = parentNode
self.label = label
self.pathList = pathList
self.itemDict = {}
self.subDict = RO.Alg.ListDict()
self.subNodeList = []
self._setMenu()
def _setMenu(self):
self.menu = Tkinter.Menu(
self.parentNode.menu,
tearoff = False,
# postcommand = self.checkMenu,
)
self.parentNode.menu.add_cascade(
label = self.label,
menu = self.menu,
)
def checkMenu(self, recurse=True):
"""Check contents of menu and rebuild if anything has changed.
Return True if anything rebuilt.
"""
# print "%s checkMenu" % (self,)
newItemDict = {}
newSubDict = RO.Alg.ListDict()
didRebuild = False
for path in self.pathList:
for baseName in os.listdir(path):
# reject files that would be invisible on unix
if baseName.startswith("."):
continue
baseBody, baseExt = os.path.splitext(baseName)
fullPath = os.path.normpath(os.path.join(path, baseName))
if os.path.isfile(fullPath) and baseExt.lower() == ".py":
# print "checkMenu newItem[%r] = %r" % (baseBody, fullPath)
newItemDict[baseBody] = fullPath
elif os.path.isdir(fullPath) and baseExt.lower() != ".py":
# print "checkMenu newSubDir[%r] = %r" % (baseBody, fullPath)
newSubDict[baseName] = fullPath
# else:
# print "checkMenu ignoring %r = %r" % (baseName, fullPath)
if (self.itemDict != newItemDict) or (self.subDict != newSubDict):
didRebuild = True
# rebuild contents
# print "checkMenu rebuild contents"
self.itemDict = newItemDict
self.subDict = newSubDict
self.menu.delete(0, "end")
self.subNodeList = []
self._fillMenu()
# else:
# print "checkMenu do not rebuild contents"
if recurse:
for subNode in self.subNodeList:
subRebuilt = subNode.checkMenu(recurse=True)
didRebuild = didRebuild or subRebuilt
return didRebuild
def _fillMenu(self):
"""Fill the menu.
"""
# print "%s _fillMenu"
itemKeys = self.itemDict.keys()
itemKeys.sort()
# print "%s found items: %s" % (self, itemKeys)
for label in itemKeys:
subPathList = list(self.getLabels()) + [label]
fullPath = self.itemDict[label]
# print "adding script %r: %r" % (label, fullPath)
self.menu.add_command(
label = label,
command = ScriptLoader(subPathList=subPathList, fullPath=fullPath),
)
subdirList = self.subDict.keys()
subdirList.sort()
# print "%s found subdirs: %s" % (self, subdirList)
for subdir in subdirList:
pathList = self.subDict[subdir]
# print "adding submenu %r: %r" % (subdir, pathList)
self.subNodeList.append(_MenuNode(self, subdir, pathList))
def getLabels(self):
"""Return a list of labels all the way up to, but not including, the root node.
"""
retVal = self.parentNode.getLabels()
retVal.append(self.label)
return retVal
def __str__(self):
return "%s %s" % (self.__class__.__name__, ":".join(self.getLabels()))
class _RootNode(_MenuNode):
"""The main scripts menu and related information
"""
def __init__(self, master, label, pathList):
"""Construct the _RootNode
Inputs:
- parentNode: parent menu node
- label: label of this sub-menu
- pathList: list of paths to scripts, as returned by TUI.Base.ScriptLoader.getScriptDirs()
"""
self.master = master
_MenuNode.__init__(self, None, label, pathList)
def _setMenu(self):
self.menu = Tkinter.Menu(
self.master,
tearoff = False,
postcommand = self.checkMenu,
)
def _fillMenu(self):
"""Fill the menu.
"""
self.menu.add_command(label="Open...", command=self.doOpen)
_MenuNode._fillMenu(self)
def doOpen(self):
"""Handle Open... menu item.
"""
initialDir = os.path.expanduser("~")
if initialDir == "~":
initialDir = None
fullPath = tkFileDialog.askopenfilename(
master = self.master,
initialdir = initialDir,
title="TUI Script",
filetypes = [("Python", "*.py")],
)
if not fullPath:
return
pathList = os.path.split(fullPath)
ScriptLoader(subPathList=pathList, fullPath=fullPath)()
def getLabels(self):
"""Return a list of labels all the way up to, but not including, the root node.
"""
return []
if __name__ == "__main__":
import RO.Wdg
root = Tkinter.Tk()
menuBar = Tkinter.Menu(root)
root["menu"] = menuBar
scriptMenu = getScriptMenu(menuBar)
menuBar.add_cascade(label="Scripts", menu=scriptMenu)
root.mainloop()
| 8,202 | 2,416 |