text stringlengths 38 1.54M |
|---|
filename = "guest.txt"
user_name = input("Please enter your name: ")
with open(filename, "w") as fileobject:
fileobject.write(f"{user_name}")
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Support module generated by PAGE version 4.23a
# in conjunction with Tcl version 8.6
# Jul 03, 2019 01:16:17 PM -03 platform: Windows NT
import sys
import controllers.Gestos as ctrl
try:
import Tkinter as tk
except ImportError:
import tkinter as tk
try:
import ttk
py3 = False
except ImportError:
import tkinter.ttk as ttk
py3 = True
def set_Tk_var():
global descripcion
descripcion = tk.StringVar()
global punto1
punto1 = tk.IntVar()
global punto2
punto2 = tk.IntVar()
def nuevoGesto():
success = ctrl.crearGesto(descripcion.get(),punto1.get(),punto2.get())
if (success != None):
print("Se creo exitosamente el gesto")
import views.gestos as gestos
gestos.top.insertarUltimo(success)
gestos.top.clear()
else:
print("Hubo un error al crear el gesto")
def getAll():
return ctrl.buscarAllGestos()
def destroy_window():
print('gestos_support.destroy_window')
sys.stdout.flush()
def init(top, gui, *args, **kwargs):
global w, top_level, root
w = gui
top_level = top
root = top
def destroy_window():
# Function which closes the window.
global top_level
top_level.destroy()
top_level = None
if __name__ == '__main__':
import gestos
gestos.vp_start_gui()
|
# Kimberly Vo collab with stephen chew, julie nguyen, megan Van Rafelghem
# kv3nw.... ssc6ae, jqn5xk. mtv2mn
instructor_list = []
import urllib.request
def instructors(department):
'''
returns an alphabetized list of professors that teach in the department without repeating
:param department: which department
:return: list of teachers
'''
dept = department
url = 'http://cs1110.cs.virginia.edu/files/louslist/' + str(dept)
f = urllib.request.urlopen(url)
for line in f:
x = str(line)
x = x.strip().split('|')
professor = (x[4])
extra = professor.find("+")
if extra != -1:
professor = professor[0:-2]
if professor not in instructor_list:
instructor_list.append(professor)
sorted_list = sorted(instructor_list)
return sorted_list
def class_search(dept_name, has_seats_available=True, level=None, not_before=None, not_after=None):
'''
searches for the classes that fit all the requirements given
:param dept_name: name of department
:param has_seats_available: seats available in the class
:param level: level of difficulty
:param not_before: not before a time
:param not_after: not after a time
:return: list of classes that meet parameters
'''
classes = urllib.request.urlopen(url='http://cs1110.cs.virginia.edu/files/louslist/' + dept_name).read().decode('utf-8').split('\n')
matched_classes = []
for line in classes:
line = line.split('|')
if len(line) > 1:
if availability(has_seats_available, line) and class_before(not_before, line) and class_after(not_after, line) and class_level(level,line):
matched_classes.append(line)
return matched_classes
def class_level(level, line):
'''
searches for classes that are in the level range
:param level: level given
:param line: line of code being worked on
:return:
'''
if level is None:
return True
else:
return str(line[1])[0] == str(level)[0]
def availability(has_seats_available, line):
'''
searches for classes with available seats
:param has_seats_available: how many seats available
:param line: line being worked on
:return:
'''
if has_seats_available:
return line[15] < line[16]
else:
return True
def class_before(not_before, line):
'''
looks for classes not before a certain time
:param not_before: time given
:param line: line being worked on
:return:
'''
start = int(line[12])
if not_before is None:
return True
else:
return start >= not_before
def class_after(not_after, line):
'''
searches for classes not after time given
:param not_after: time given
:param line: line being worked on
:return:
'''
start = int(line[12])
if not_after is None:
return True
else:
return start <= not_after
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 13 19:07:20 2019
@author: jorgeagr
"""
import numpy as np
def perceptron(x, w):
'''
x: Data matrix in column form. Rows are attributes, columns are instances.
w: Column vector, each row is weight of attribute.
'''
y = np.dot(w.T, x)
y[y > 0] = 1
y[y < 0] = -1
return y.flatten() |
def prefSum(a):
return functools.reduce(lambda i,x : i + [i[-1] +x], a[1:], [a[0]])
#functools.reduce(function, iterable[, initializer])
|
# Generated by Django 1.11.6 on 2017-11-14 19:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("zerver", "0118_defaultstreamgroup_description"),
]
operations = [
migrations.AddField(
model_name="userprofile",
name="night_mode",
field=models.BooleanField(default=False),
),
]
|
from generateplayer import Player
from .models import Team
pointguard = Player(1)
shootingguard = Player(2)
smallforward = Player(3)
powerforward = Player(4)
center = Player(5)
newTeam = Team()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-04-13 10:24
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('location', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='location',
name='country',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='locations', to='location.Country'),
),
migrations.AlterField(
model_name='location',
name='postal_code',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='locations', to='location.PostalCode'),
),
]
|
print ("BAJAS")
dam2 = ["Sergio", "Xabi","Xabi", "Maria", "Alexander", "Carlos" ,"Juan" ,"Imanol", "Pedro" ,"Uxue", "Javier", "Iker", "Carlos", "Xabi", "Alejandra", "Carolina","Iñaki", "Asier","Maria"]
print (dam2)
nom = input("Nombre a eliminar: ")
nom= nom.capitalize()
print ("El nombres aparece " + str(dam2.count(nom)) + ' veces')
#no imprime bien la pos buscada
for i in range(0, dam2.count(nom)):
pos = dam2.index(nom)
resp = input(nom + ' esta en la posicion ' + str(pos) + ' --> ¿Quieres borrarlo?(S/N)')
if resp == 'S' or resp == 's':
dam2.pop(pos)
elif resp =='N' or resp == 'n':
print("No se ha borrado")
else:
print("Error")
print (dam2)
|
'''Exercise 16.2. Write a boolean function called is_after that takes two Time objects, t1 and t2,
and returns True if t1 follows t2 chronologically and False otherwise. Challenge: don’t use an if
statement.
'''
import time
import datetime
class Time(object):
def __init__(self, year=2000, month=1, day=1, hour=12, minute=0, sec=0): #24 hrs format
self.date = datetime.datetime(year, month, day, hour, minute, sec)
def is_time(self):
return time.is_time(self.date.timetuple())
t1 = Time(2013, 1, 3, 15)
t2 = Time(2013, 1, 3, 1)
def is_after(time1, time2):
return time1.is_time() > time2.is_time()
print(is_after(t1, t2))
|
def euler028(diagonal):
'''Number spiral diagonals
Starting with the number 1 and moving to the right in a clockwise direction a
5 by 5 spiral is formed as follows:
21 22 23 24 25
20 7 8 9 10
19 6 1 2 11
18 5 4 3 12
17 16 15 14 13
It can be verified that the sum of the numbers on the diagonals is 101.
What is the sum of the numbers on the diagonals in a 1001 by 1001 spiral formed in the same way?
>>> euler028(5)
101
>>> euler028(1001)
669171001
'''
return sum(sum(xrange((r*2+1)**2-(r*2)*3, ((r*2+1)**2)+1, r*2)) for r in xrange(1, ((diagonal-1)/2)+1))+1
|
from keras.preprocessing import image
from keras.models import load_model
import numpy as np
import matplotlib.pyplot as plt
# test_dir = r'.\newDataSet\test\0\00000.jpg'
test_dir = r'.\pic\10.jpg'
model = load_model('newModel.h5')
img = image.load_img(test_dir, target_size=(48, 48))
x = np.expand_dims(img, axis=0)
y = model.predict(x)
plt.figure()
x_axis = ['0 anger', '1 disgust', '2 fear', '3 happy', '4 sad', '5 surprise', '6 normal']
y_axis = np.array(y).flatten()
plt.bar(x_axis, y_axis)
plt.show()
print(np.array(y).flatten())
y = model.predict_classes(x)
print(y)
|
from django.db import models
from django.conf import settings
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
# Create your models here.
class profile(models.Model):
user = models.OneToOneField(User, on_delete= models.CASCADE)
name = models.CharField(max_length =30 ,blank=False)
blood_group = models.CharField(max_length=1 , blank=False)
email = models.EmailField(max_length=100)
address = models.CharField(max_length=100)
city = models.CharField(max_length = 100)
about_me = models.TextField()
@receiver(post_save ,sender=User)
def update_user_profile(sender , instance , created , **kwargs):
if created:
profile.objects.create(user=instance)
instance.profile.save()
class Person(models.Model):
name = models.ForeignKey(settings.AUTH_USER_MODEL ,on_delete=models.CASCADE)
blood_grp = models.CharField(max_length=1)
address = models.CharField(max_length=200)
email = models.EmailField(max_length=100)
phone = models.CharField(max_length=12)
#bio = models.TextField()
def __str__(self):
return self.email
|
# -----------------------------------------------------------------------------
# Copyright (c) 2012 - 2018, Anaconda, Inc. and Intake contributors
# All rights reserved.
#
# The full license is in the LICENSE file, distributed with this software.
# -----------------------------------------------------------------------------
import os.path
import numpy as np
import pytest
from intake.container import serializer
all_serializers = pytest.mark.parametrize("ser", serializer.format_registry.values())
all_compressors = pytest.mark.parametrize("comp", serializer.compression_registry.values())
@all_serializers
def test_dataframe(ser):
pd = pytest.importorskip("pandas")
pytest.importorskip("pyarrow")
csv_filename = os.path.join(os.path.dirname(__file__), "entry1_1.csv")
expected_df = pd.read_csv(csv_filename)
# Check roundtrip
df = ser.decode(ser.encode(expected_df, "dataframe"), "dataframe")
assert expected_df.equals(df)
@all_serializers
def test_ndarray(ser):
pytest.importorskip("msgpack_numpy")
expected_array = np.arange(35).reshape((5, 7))
# Check roundtrip
array = ser.decode(ser.encode(expected_array, "ndarray"), "ndarray")
np.testing.assert_array_equal(expected_array, array)
@all_serializers
def test_python(ser):
expected = [dict(a=1, b=[1, 2], c="str"), dict(a=[1, 2], b="str", d=None)]
actual = ser.decode(ser.encode(expected, "python"), "python")
assert expected == actual
@all_compressors
def test_compression_roundtrip(comp):
data = b"1234\x01\x02"
assert data == comp.decompress(comp.compress(data))
def test_none_compress():
data = b"1234\x01\x02"
comp = serializer.NoneCompressor()
# None should be no-op
assert data == comp.decompress(data)
assert data == comp.compress(data)
|
# -*- coding: utf-8 -*-
"""
LSTM网络结构与LOSS函数。
@author:chenli0830(李辰)
@source:https://github.com/happynoom/DeepTrade
"""
import tensorflow as tf
from tensorflow.contrib import rnn
import os
from tensorflow.contrib.rnn import DropoutWrapper
from tensorflow.python.ops.init_ops import glorot_uniform_initializer, orthogonal_initializer
from LSTM_LOSS_MODEL.rawdate import RawData, read_sample_data
from LSTM_LOSS_MODEL.chart import extract_feature
from LSTM_LOSS_MODEL.dataset import DataSet
import numpy
from tensorflow.contrib.layers.python.layers.layers import batch_norm
import sys
from numpy.random import seed
class SmartTrade(object):
def __init__(self, num_step, input_size, init_learning_rate, hidden_size, nclasses,
decay_step=500, decay_rate=1.0, cost=0.0002):
"""
建立SmartTrade参数
:param num_step:
:param input_size:
:param init_learning_rate:
:param hidden_size:
:param nclasses:
:param decay_step:
:param decay_rate:
:param cost:
"""
self.num_step = num_step
self.input_size = input_size
self.global_step = None
self.init_learning_rate = init_learning_rate
self.decay_step = decay_step
self.decay_rate = decay_rate
self.learning_rate = None
self.hidden_size = hidden_size
self.nclasses = nclasses
self.position = None
self.summary_op = None
self.weights = None
self.biases = None
self.cost = cost
self.loss = None
self.avg_position = None
self.keep_prob = None
self.x = None
self.y = None
self.is_training = None
def _create_learning_rate(self):
'''
create learning rate
:return:
'''
with tf.variable_scope("parameter"):
self.global_step = tf.Variable(0, trainable=False, name="global_step")
self.learning_rate = tf.train.exponential_decay(self.init_learning_rate, self.global_step,
self.decay_step, self.decay_rate, staircase=True,
name="learning rate")
def _create_placeholder(self):
with tf.variable_scope("input"):
self.x = tf.placeholder(tf.float32, shape=[None, self.num_step, self.input_size], name='history_feature')
self.y = tf.placeholder(tf.float32, shape=[None, 1], name='target')
self.is_training = tf.placeholder(tf.bool, name='mode')
self.keep_prob = tf.placeholder(tf.float32, name='keep_prob')
def _create_weight(self):
with tf.variable_scope("weights"):
self.weights = {
'out': tf.get_variable("weights", [self.hidden_size, self.nclasses],
initializer=tf.random_normal_initializer(mean=0, stddev=0.01, seed=1))
}
self.biases = {
'out': tf.get_variable("bias", [self.nclasses],
initializer=tf.random_normal_initializer(mean=0, stddev=0.01, seed=1))
}
def batch_norm_layer(self, signal, scope):
'''
在激活之间批量归一化的层
:param signal: input signal
:param scope: name scope
:return: normalized signal
'''
return tf.cond(self.is_training,
lambda : batch_norm(signal, is_training=True,
param_initializers={"beta": tf.constant_initializer(3.),
"gamma": tf.constant_initializer(2,5)},
center=True, scale=True, activation_fn=tf.nn.relu, decay=1., scope=scope),
lambda : batch_norm(signal, is_training=False,
param_initializers={"beta": tf.constant_initializer(3.),
"gamma": tf.constant_initializer(2,5)},
center=True, scale=True, activation_fn=tf.nn.relu, decay=1.,
scope=scope, reuse=True))
def _create_loss(self):
'''
风险评估损失函数
Loss = -100. * mean(P * (R-c))
P : self.position, output, the planed position we should hold to next day
R : self.y, the change rate of next day
c : cost
:return:
'''
# self.x.shape = (batch_size, num_step, input_size)
# xx.shape = (num_step, (batch_size, input_size))
xx = tf.unstack(self.x, self.num_step, 1)
lstm_cell = rnn.LSTMCell(self.hidden_size, forget_bias=1.0, initializer=orthogonal_initializer())
dropout_cell = DropoutWrapper(lstm_cell, input_keep_prob=self.keep_prob,
output_keep_prob=self.keep_prob, state_keep_prob=self.keep_prob)
outputs, states = rnn.static_rnn(dropout_cell, xx, dtype=tf.float32)
signal = tf.matmul(outputs[-1], self.weights['out']) + self.biases['out']
scope = "activation_batch_norm"
norm_signal = self.batch_norm_layer(signal, scope=scope)
self.position = tf.nn.relu6(norm_signal, name="relu_limit") / 6.
self.avg_position = tf.reduce_mean(self.position)
self.loss = -100. * tf.reduce_mean(tf.multiply((self.y - self.cost), self.position, name='estimated_risk'))
def _create_optimizer(self):
'''
优化
:return:
'''
self.optimizer = tf.train.RMSPropOptimizer(self.learning_rate, name="optimizer").\
minimize(self.loss, global_step=self.global_step)
def _create_summary(self):
tf.summary.scalar("loss", self.loss)
tf.summary.histogram("histogram loss", self.loss)
tf.summary.scalar('average position', self.avg_position)
tf.summary.histogram("histogram position", self.avg_position)
self.summary_op = tf.summary.merge_all()
def build_graph(self):
self._create_learning_rate()
self._create_placeholder()
self._create_weight()
self._create_loss()
self._create_optimizer()
self._create_summary()
def train(trade, train_set, val_set, train_steps=10000, batch_size=32, keep_prob=1.):
initial_step = 1
val_features = val_set.images
val_labals = val_set.labels
VERBOSE_STEP = 10
VALIDATION_STEP = VERBOSE_STEP * 100
saver = tf.train.Saver()
min_validation_loss = 100000000.
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter("./graphs", sess.graph)
for i in range(initial_step, initial_step + train_steps):
batch_features, batch_labels = train_set.next_batch(batch_size)
_, loss, avg_pos, summary = sess.run([trade.optimizer, trade.loss, trade.avg_position,
trade.summary_op],
feed_dict={trade.x: batch_features,
trade.y: batch_labels,
trade.is_training: True,
trade.keep_prob: keep_prob})
writer.add_summary(summary, global_step=i)
if i % VERBOSE_STEP == 0:
hint = None
if i % VALIDATION_STEP == 0:
val_loss, val_avg_pos = sess.run([trade.loss, trade.avg_position],
feed_dict={trade.x: val_features,
trade.y: val_labals,
trade.is_training: False,
trade.keep_prob: 1.})
hint = 'Average Train Loss at step {}: {:.7f} Average position {:.7f}, Validation Loss: {:.7f} Average Position: {:.7f}'.\
format(i, loss, avg_pos, val_loss, val_avg_pos)
if val_loss < min_validation_loss:
min_validation_loss = val_loss
saver.save(sess, "./checlkpoint/best_model", i)
else:
hint = 'Average loss at step {}: {:.7f} Average position {:.7f}'.format(i, loss, avg_pos)
print(hint)
def calculate_cumulative_return(labels, pred):
cr = []
if len(labels) <= 0:
return cr
cr.append(1. * (1. + labels[0] * pred[0]))
for l in range(1, len(labels)):
cr.append(cr[l - 1] * (1 + labels[l] * pred[l]))
for i in range(len(cr)):
cr[i] = cr[i] - 1
return cr
def predict(val_set, num_step=30, input_size=61, learning_rate=0.001, hidden_size=8, nclasses=1):
features = val_set.images
labels = val_set.labels
trade = SmartTrade(num_step, input_size, learning_rate, hidden_size, nclasses)
trade.build_graph()
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
ckpt = tf.train.get_checkpoint_state(os.path.dirname('checkpoint/checkpoint'))
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
pred, avg_pos = sess.run([trade.position, trade.avg_position],
feed_dict={trade.x: features, trade.y: labels,
trade.is_training: False, trade.keep_prob: 1.})
cr = calculate_cumulative_return(labels, pred)
print("changeRate\tpositionAdvice\tprincipal\tcumlativeReturn")
for i in range(len(labels)):
print(str(labels[i]) + "\t" + str(pred[i]) + "\t" + str(cr[i] + 1.) + "\t" + str(cr[i]))
def main(operation='train', code=None):
num_step = 30
input_size = 61
train_steps = 500
batch_size = 512
learning_rate = 0.001
hidden_size = 14
nclasses = 1
validation_size = 700
keep_prob = 0.7
selector = ["ROCP", "OROCP", "HROCP", "LROCP", "MACD", "RSI", "VROCP", "BOLL", "MA", "VMA", "PRICE_VOLUME"]
input_shape = [30, 61]
if operation == 'train':
dataset_dir = "./data"
train_features = []
train_labels = []
val_features = []
val_labels = []
for filename in os.listdir(dataset_dir):
print("processing file: " + filename)
filepath = os.path.join(dataset_dir,filename)
raw_data = read_sample_data(filepath)
moving_features, moving_labels = extract_feature(raw_data=raw_data, selector=selector,
window=input_shape[0],
with_label=True, flatten=False)
train_features.extend(moving_features[:-validation_size])
train_labels.extend((moving_labels[:-validation_size]))
val_features.extend(moving_features[-validation_size:])
val_labels.extend(moving_labels[-validation_size:])
train_features = numpy.transpose(numpy.asarray(train_features), [0, 2, 1])
train_labels = numpy.asarray(train_labels)
train_labels = numpy.reshape(train_labels, [train_labels.shape[0], 1])
val_features = numpy.transpose(numpy.asarray(val_features), [0, 2, 1])
val_labels = numpy.asarray(val_labels)
val_labels = numpy.reshape(val_labels, [val_labels.shape[0], 1])
train_set = DataSet(train_features, train_labels)
val_set = DataSet(val_features, val_labels)
trade = SmartTrade(num_step, input_size, learning_rate, hidden_size, nclasses)
trade.build_graph()
train(trade, train_set, val_set, train_steps, batch_size=batch_size, keep_prob=keep_prob)
elif operation == "predict":
predict_file_path = "./data/000001.csv"
if code is not None:
predict_file_path = "./data/%s.csv" %code
print("processing file %s" %predict_file_path)
raw_data = read_sample_data(predict_file_path)
moving_features, moving_labels = extract_feature(raw_data=raw_data, selector=selector, window=input_shape[0],
with_label=True, flatten=False)
moving_features = numpy.asarray(moving_features)
moving_features = numpy.transpose(moving_features, [0, 2, 1])
moving_labels = numpy.asarray(moving_labels)
moving_labels = numpy.reshape(moving_labels, [moving_labels.shape[0], 1])
val_set = DataSet(moving_features[-validation_size:], moving_labels[-validation_size:])
predict(val_set, num_step=num_step, input_size=input_size, learning_rate=learning_rate,
hidden_size=hidden_size, nclasses=nclasses)
else:
print("Operation not supported.")
if __name__ == '__main__':
tf.set_random_seed(2)
seed(1)
operation = "train"
code = None
if len(sys.argv) > 1:
operation = sys.argv[1]
if len(sys.argv) > 2:
code = sys.argv[2]
main(operation, code)
|
import os
import torch
from transformers import AdamW, get_linear_schedule_with_warmup
class BaseModel():
def __init__(self, opt):
self.net=None
self.opt = opt
self.gpu_ids = opt.gpu_ids
self.isTrain = opt.isTrain
self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu') # get device name: CPU or GPU
self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) # save all the checkpoints to save_dir
self.number_sentiments=opt.number_sentiments
self.interpretation_model=None
@staticmethod
def modify_commandline_options(self,parser):
return parser
def create_dataloader(opt):
pass
def set_input(self, input):
pass
def forward(self):
pass
def optimize_parameters(self):
pass
def setup(self, opt, dataset_size=0):
if self.isTrain:
self.optimizer = AdamW(self.net.parameters(),
lr=opt.lr,
eps=opt.eps_adam)
self.scheduler = get_linear_schedule_with_warmup(self.optimizer,
num_warmup_steps=opt.num_warmup_steps,
num_training_steps=dataset_size*opt.num_epochs)
if not self.isTrain or opt.continue_train:
self.load_networks(opt.load_epoch)
#no need to create a special call for eval
if not self.isTrain:
self.net.eval()
#initialize word importance ranking model
if not opt.disable_word_importance:
self.setup_interpretation_model()
self.print_networks(opt.verbose)
def setup_interpretation_model(self):
pass
def set_train(self):
self.net.train()
def predict(self,dataloader):
pass
def evaluate(self,dataloader_val):
pass
def save_networks(self, epoch):
if issubclass(self.__class__, BaseModel):
save_filename = '%s_net_%s.pth' % (epoch, self.__class__.__name__)
save_path = os.path.join(self.save_dir, save_filename)
torch.save(self.net.state_dict(), save_path)
def load_networks(self, epoch):
if issubclass(self.__class__, BaseModel):
load_filename = '%s_net_%s.pth' % (epoch, self.__class__.__name__)
load_path = os.path.join(self.save_dir, load_filename)
self.net.load_state_dict(torch.load(load_path, map_location=self.device))
def print_networks(self, verbose):
print('---------- Networks initialized -------------')
if issubclass(self.__class__, BaseModel) and verbose:
num_params = 0
for param in self.net.parameters():
num_params += param.numel()
print(self.net)
print('[Network %s] Total number of parameters : %.3f M' % (str(self.net.__class__), num_params / 1e6))
print('-----------------------------------------------')
|
"""Represents an entire atomic snapshot (including descriptor/target data)."""
from os.path import join
import numpy as np
from mala.common.json_serializable import JSONSerializable
class Snapshot(JSONSerializable):
"""
Represents a snapshot on a hard drive.
A snapshot consists of numpy arrays for input/output data and an
optional DFT calculation output, needed for post-processing.
Parameters
----------
input_npy_file : string
File with saved numpy input array.
input_npy_directory : string
Directory containing input_npy_directory.
output_npy_file : string
File with saved numpy output array.
output_npy_directory : string
Directory containing output_npy_file.
input_units : string
Units of input data. See descriptor classes to see which units are
supported.
output_units : string
Units of output data. See target classes to see which units are
supported.
calculation_output : string
File with the output of the original snapshot calculation. This is
only needed when testing multiple snapshots.
snapshot_function : string
"Function" of the snapshot in the MALA workflow.
- te: This snapshot will be a testing snapshot.
- tr: This snapshot will be a training snapshot.
- va: This snapshot will be a validation snapshot.
Replaces the old approach of MALA to have a separate list.
Default is None.
"""
def __init__(self, input_npy_file, input_npy_directory,
output_npy_file, output_npy_directory,
snapshot_function,
input_units="", output_units="",
calculation_output="",
snapshot_type="openpmd"):
super(Snapshot, self).__init__()
# Inputs.
self.input_npy_file = input_npy_file
self.input_npy_directory = input_npy_directory
self.input_units = input_units
# Outputs.
self.output_npy_file = output_npy_file
self.output_npy_directory = output_npy_directory
self.output_units = output_units
# Calculation output.
self.calculation_output = calculation_output
# Function of the snapshot.
self.snapshot_function = snapshot_function
# Legacy functionality: Determine whether the snapshot contains
# numpy or openpmd files.
self.snapshot_type = snapshot_type
# All the dimensionalities of the snapshot.
self.grid_dimensions = None
self.grid_size = None
self.input_dimension = None
self.output_dimension = None
@classmethod
def from_json(cls, json_dict):
"""
Read this object from a dictionary saved in a JSON file.
Parameters
----------
json_dict : dict
A dictionary containing all attributes, properties, etc. as saved
in the json file.
Returns
-------
deserialized_object : JSONSerializable
The object as read from the JSON file.
"""
deserialized_object = cls(json_dict["input_npy_file"],
json_dict["input_npy_directory"],
json_dict["output_npy_file"],
json_dict["output_npy_directory"],
json_dict["snapshot_function"],
json_dict["snapshot_type"])
for key in json_dict:
setattr(deserialized_object, key, json_dict[key])
return deserialized_object
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import __future__
import sys
ip = "./challenge_sample_input"
op = "./challenge_sample_output"
print("===" * 30)
print("SAMPLE INPUT:")
print("===" * 30)
print(open(ip, 'r').read())
sys.stdin = open(ip, 'r')
print("===" * 30)
print("SAMPLE OUTPUT:")
print("===" * 30)
print(open(op, 'r').read())
print("===" * 30)
print("START")
print("===" * 30)
smap = {
'U' : 1,
'D' : -1,
}
num_steps = int(raw_input())
steps = list(raw_input())
assert len(steps) == num_steps
valleys = 0
level = 0
for i in steps:
prev = level
level += smap[i]
if prev < 0 and level == 0: valleys += 1
print(valleys)
|
a=int(input("digite um numero: "))
for i in range(a,0,-1):
print("*"*i)
for i in range(1,a+1,1):
print("*"*i) |
# -*- coding: UTF-8 -*-
import random
# 随机生成五位数的验证码 验证码由字母或数字组成
def auth_code():
code = ''
for i in range(5):
number = str(random.randrange(1, 10))
alphabet = chr(random.randrange(65, 91))
code += random.choice([number,alphabet])
return code
print(auth_code())
|
import wit
import json
def main(interval=2):
i=0
total_fucks=0
while i<interval:
access_token = '5OOPLQECDO32JWXIAN5TAPE7JZ7J4UHX'
wit.init()
response = wit.voice_query_auto(access_token)
parse_for_fucks = json.loads(response)
print(response)
total_fucks = total_fucks +len(parse_for_fucks["outcomes"][0]["entities"]["fuck_type"])
wit.close()
i = i + 1
print("Fucks Given: ",total_fucks)
|
# Generated by Django 2.1.2 on 2019-05-16 07:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('report', '0042_dailycreditcardfile_grand_total'),
]
operations = [
migrations.AddField(
model_name='dailycreditcardfile',
name='from_date',
field=models.DateField(blank=True, null=True),
),
]
|
# Author = 'Vincent FUNG'
# Create = '2017/09/26'
import datetime
import os
import sqlite3
import time
# try:
from .logger import Logger
# except ModuleNotFoundError:
# from http_websocket.logger import Logger
LOG_FILE = os.path.join(os.path.expanduser(
'~'), 'CrashParser', 'log', 'CrashParser.log')
LOG = Logger(LOG_FILE, 'SQLiteBase')
def get_today_timestamp():
"""Get today timestamp.
Returns:
[String] -- [A timestamp.]
"""
today = datetime.datetime.today() - datetime.timedelta(1)
return str(int(time.mktime(
datetime.datetime(
today.year,
today.month,
today.day,
today.hour,
today.minute,
today.second).timetuple()
)))
def sqlite_connect(sql_name='CrashCount.sqlite', sql_abs_path=0):
"""Sqlite3 connector
Keyword Arguments:
sql_name {String} -- [Sqlite file name.] (default: {CrashCount.sqlite})
sql_abs_path {String} -- [The sqlite file absolutely location.] (default: {0})
Returns:
Normally:
[Sqlite3.Connection] -- [The sqlite conncetion.]
False:
[Boolean] -- [False]
"""
if not sql_abs_path:
sql_path = os.path.join(os.path.expanduser(
'~'), 'CrashParser', 'database', sql_name)
else:
sql_path = sql_abs_path
conn = sqlite3.connect(sql_path)
if conn:
cursor = conn.cursor()
if cursor:
return conn, cursor
else:
return False
def create_base_table(conn, cursor, end=True):
"""[summary]
Arguments:
conn {Sqlite3.Connection} -- [The sqlite connection.]
cursor {Sqlite3.Cusor} -- [The sqlite cursor.]
end {Boolean} -- [Close the sqlite connection when this value set to True.] (default: {True})
Keyword Arguments:
end {Boolean} -- [The signal to close or not] (default: {True})
"""
cursor.execute('''CREATE TABLE statistics
(FREQUENCY INT NOT NULL,
PROJECT MESSAGE_TEXT,
CONTENT MESSAGE_TEXT NOT NULL,
FIRST_VERSION MESSAGE_TEXT NOT NULL,
LAST_VERSION MESSAGE_TEXT ,
INSERT_TIME MESSAGE_TEXT NOT NULL,
LAST_UPDATE MESSAGE_TEXT );''')
if end:
cursor.close()
conn.close()
def create_backtrack_table(conn, cursor, end=True, **kwargs):
"""Create backtrack tables.
Arguments:
conn {Sqlite3.Connection} -- [The sqlite connection.]
cursor {Sqlite3.Cusor} -- [The sqlite cursor.]
**kwargs {String} -- [Table id.]
Keyword Arguments:
end {Boolean} -- [Close the sqlite connection when this value set to True.] (default: {True})
"""
cursor.execute('CREATE TABLE backtrack_%s( \
CRASH_ID MESSAGE_TEXT, \
PROJECT MESSAGE_TEXT,\
REASON_ID MESSAGE_TEXT, \
REASON MESSAGE_TEXT, \
VERSION MESSAGE_TEXT, \
INSERT_TIME MESSAGE_TEXT NOT NULL, \
LAST_UPDATE MESSAGE_TEXT );' % kwargs['id'])
if end:
cursor.close()
conn.close()
def create_report_table(conn, cursor, end=True):
"""Create report tabel.
Arguments:
conn {Sqlite3.Connection} -- [The sqlite connection.]
cursor {Sqlite3.Cusor} -- [The sqlite cursor.]
Keyword Arguments:
end {Boolean} -- [Close the sqlite connection when this value set to True.] (default: {True})
"""
cursor.execute(
'CREATE TABLE report(CRASH_ID MESSAGE_TEXT, PROJECT MESSAGE_TEXT, VERSION MESSAGE_TEXT, CALL MESSAGE_TEXT, LOG MESSAGE_TEXT);')
if end:
cursor.close()
conn.close()
def create_reasons_table(conn, cursor, end=True):
"""Create reasons table.
Arguments:
conn {Sqlite3.Connection} -- [The sqlite connection.]
cursor {Sqlite3.Cusor} -- [The sqlite cursor.]
Keyword Arguments:
end {Boolean} -- [Close the sqlite connection when this value set to True.] (default: {True})
"""
cursor.execute('''CREATE TABLE reasons(
FIXED INT DEFAULT 0,
JIRAID MESSAGE_TEXT,
PROJECT MESSAGE_TEXT,
FREQUENCY INT DEFAULT 1,
REASON MESSAGE_TEXT,
INSERT_TIME MESSAGE_TEXT NOT NULL,
LAST_UPDATE MESSAGE_TEXT)''')
if end:
cursor.close()
conn.close()
def create_unmatch_table(conn, cursor, end=True):
"""Create unmatch table.
Arguments:
conn {Sqlite3.Connection} -- [The sqlite connection.]
cursor {Sqlite3.Cusor} -- [The sqlite cursor.]
Keyword Arguments:
end {Boolean} -- [Close the sqlite connection when this value set to True.] (default: {True})
"""
cursor.execute('''CREATE TABLE unmatch(
CRASH_ID MESSAGE_TEXT,
INSERT_TIME MESSAGE_TEXT NOT NULL)''')
if end:
cursor.close()
conn.close()
def create_tables(conn, cursor, tablename, end=True, create=True):
"""[summary]
Arguments:
conn {Sqlite3.Connection} -- [The sqlite connection.]
cursor {Sqlite3.Cusor} -- [The sqlite cursor.]
tablename {String} -- [What name want to create.]
Keyword Arguments:
end {Boolean} -- [Close the sqlite connection when this value set to True.] (default: {True})
create {Boolean} -- [Create the table when this value set to True.] (default: {True})
Returns:
[Boolean] -- [If create successfully this will be True.]
"""
exist = "SELECT COUNT(*) FROM sqlite_master where type='table' and name='%s'" % tablename
if cursor.execute(exist).fetchall()[0][0] == 1:
return True
else:
if create and tablename.startswith('backtrack_'):
create_backtrack_table(conn, cursor, end=end,
id=tablename.split('_')[-1])
return True
elif create and tablename == 'statistics':
create_base_table(conn, cursor, end=end)
return True
elif create and tablename == 'report':
create_report_table(conn, cursor, end=end)
return True
elif create and tablename == 'reasons':
create_reasons_table(conn, cursor, end=end)
return True
elif create and tablename == 'unmatch':
create_unmatch_table(conn, cursor, end=end)
return True
else:
return False
def insert(conn, cursor, end=True, **kwargs):
"""[summary]
Arguments:
conn {Sqlite3.Connection} -- [The sqlite connection.]
cursor {Sqlite3.Cusor} -- [The sqlite cursor.]
**kwargs {String} -- [What value want to insert.]
Keyword Arguments:
end {Boolean} -- [Close the sqlite connection when this value set to True.] (default: {True})
Returns:
[Integer] -- [The line id just was inserted.]
"""
if create_tables(
conn=conn, cursor=cursor, tablename=kwargs['table_name'], create=True, end=False):
if kwargs['table_name'] == 'statistics':
_inse_cmd_format = "INSERT INTO statistics(FREQUENCY, PROJECT, CONTENT, FIRST_VERSION, LAST_VERSION, INSERT_TIME, LAST_UPDATE) values(?,?,?,?,?,?,?)"
cursor.execute(_inse_cmd_format,
(kwargs['frequency'], kwargs['project'],kwargs['content'], kwargs['fv'], kwargs['lv'], get_today_timestamp(),
get_today_timestamp()))
elif kwargs['table_name'].startswith('backtrack_'):
_inse_cmd_format_ = "INSERT INTO %s(CRASH_ID, PROJECT, VERSION, INSERT_TIME) values(?,?,?,?)" % kwargs[
'table_name']
cursor.execute(
_inse_cmd_format_, (kwargs['crash_id'], kwargs['project'], kwargs['version'], get_today_timestamp()))
elif kwargs['table_name'] == 'report':
_inse_cmd_format = "INSERT INTO report(CRASH_ID, PROJECT, VERSION, CALL, LOG) values(?,?,?,?,?)"
cursor.execute(_inse_cmd_format,
(kwargs['crash_id'], kwargs['project'], kwargs['version'], kwargs['crash_call'], kwargs['log']))
elif kwargs['table_name'] == 'reasons':
_inse_cmd_format = "INSERT INTO reasons(JIRAID, PROJECT, FREQUENCY, REASON, INSERT_TIME) VALUES(?,?,?,?,?)"
cursor.execute(_inse_cmd_format,
(kwargs['jiraid'], kwargs['project'], kwargs['frequency'], kwargs['reason'], get_today_timestamp()))
elif kwargs['table_name'] == 'unmatch':
_inse_cmd_format = "INSERT INTO unmatch(CRASH_ID, INSERT_TIME) VALUES(?,?)"
cursor.execute(_inse_cmd_format,
(kwargs['crash_id'], get_today_timestamp()))
_row_id = cursor.execute('SELECT LAST_INSERT_ROWID()').fetchall()[0][0]
conn.commit()
if end:
cursor.close()
conn.commit()
conn.close()
return _row_id
def update(conn, cursor, end=True, **kwargs):
"""[summary]
Arguments:
conn {Sqlite3.Connection} -- [The sqlite connection.]
cursor {Sqlite3.Cusor} -- [The sqlite cursor.]
**kwargs {String} -- [What value want to insert.]
Keyword Arguments:
end {Boolean} -- [Close the sqlite connection when this value set to True.] (default: {True})
Returns:
[Integer] -- [The line id just was updated.]
"""
_update_sql = 'UPDATE %s SET ' % kwargs['table_name']
if 'columns' in kwargs.keys():
for _index, _value in enumerate(kwargs['columns']):
if _index >= 1:
_update_sql += ', '
if 'FREQUENCY' == _value:
_update_sql += 'FREQUENCY = %s' % kwargs['values'][0]
else:
_update_sql += "%s = \'%s\'" % (_value,
kwargs['values'][_index])
_update_sql += ", LAST_UPDATE = \'%s\' " % get_today_timestamp() + kwargs[
'condition']
elif 'reason' in kwargs.keys():
_update_sql += "%s = \'%s\', LAST_UPDATE = \'%s\' %s" % (
'REASON', kwargs['reason'], get_today_timestamp(), kwargs[
'condition'])
cursor.execute(_update_sql)
conn.commit()
_row_id_update = search(conn, cursor,
end=False,
columns='rowid',
table_name=kwargs['table_name'],
condition=kwargs['condition'])
if end:
cursor.close()
conn.close()
return _row_id_update[0][0]
def search(conn, cursor, end=True, only=False, **kwargs):
"""[summary]
Arguments:
conn {Sqlite3.Connection} -- [The sqlite connection.]
cursor {Sqlite3.Cusor} -- [The sqlite cursor.]
**kwargs {String} -- [The search condition.]
Keyword Arguments:
end {Boolean} -- [Close the sqlite connection when this value set to True.] (default: {True})
only {Boolean} -- [Search result the distinct data from sqlite when this value set to Ture.] (default: {False})
Returns:
Normally:
[List] -- [The result of searching.]
False:
[Boolean] -- [When the sqlite3.OpeartioncalError was throw out.]
"""
try:
distinct = str()
if only:
distinct = 'DISTINCT '
else:
distinct = ''
search_sql = 'SELECT %s %s FROM %s %s' % (
distinct,
kwargs['columns'],
kwargs['table_name'],
kwargs['condition'])
result = cursor.execute(search_sql).fetchall()
if end:
cursor.close()
conn.close()
return result
return result
except sqlite3.OperationalError as sqlite_err:
LOG.cri(' %-20s ]-[ SQLite search error: %s' %
(LOG.get_function_name(), sqlite_err))
return False
|
def division(n1, n2): # try-except-else-finally
r = -1
try:
r = n1 / n2
except Exception as e:
print(e)
else:
print('No exception detected!')
finally:
print('I will be executed anyways')
return r
def main_exept():
print('Exceptions Handling M1')
print('\n')
print(division(3, 1))
return
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def generateTrees(self, n: int) -> List[TreeNode]:
if n == 0: return []
if n == 1: return [TreeNode(1)]
def generate(left, right):
ans = []
if left > right:
return [None]
for i in range(left, right + 1):
left_ans = generate(left, i - 1)
right_ans = generate(i + 1, right)
for l in left_ans:
for r in right_ans:
t = TreeNode(i)
t.left = l
t.right = r
ans.append(t)
return ans
return generate(1, n)
# res = []
# for t in self.generateTrees(n-1):
# add_tree = TreeNode(n)
# add_tree.left = t
# res.append(add_tree)
# adjoint = t
# while True:
# adjoint_now = adjoint
# if not adjoint_now.right:
# adjoint_now.right = TreeNode(n)
# res.append(adjoint_now)
# break
# else:
# a = TreeNode(n)
# a.left = adjoint_now.right
# adjoint_now.right = a
# res.append(adjoint_now)
|
# Generated by Django 3.2.6 on 2021-08-28 08:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0004_auto_20210826_1448'),
]
operations = [
migrations.CreateModel(
name='RequestForm',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('email', models.EmailField(max_length=254)),
('phone', models.CharField(max_length=15, unique=True)),
('people', models.CharField(choices=[('1', '1 person'), ('2', '2 persons'), ('3', '3 persons'), ('4', '4 persons'), ('5', '5 persons'), ('6', 'More than 5 persons')], max_length=15)),
('package_name', models.CharField(max_length=100)),
],
),
]
|
from django.conf.urls import url
from . import views
urlpatterns = [
# ex: /ssapp/
url(r'^$', views.index, name='index'),
# ex: /ssapp/family/5/
url(r'^family/(?P<family_id>[0-9]+)/$', views.family, name='family'),
# ex: /ssapp/person/5/
url(r'^person/(?P<person_id>[0-9]+)/$', views.person, name='person'),
url(r'^detail/$', views.detail, name='detail'),
url(r'^setup/$', views.setup, name='setup'),
] |
# Django settings for satchmo project.
# This is a recommended base setting for further customization
import os
DIRNAME = os.path.dirname(__file__)
DJANGO_PROJECT = 'store'
DJANGO_SETTINGS_MODULE = 'store.settings'
ADMINS = (
('Yanchenko Igor', 'yanchenko.igor@gmail.com'), # tuple (name, email) - important for error reports sending, if DEBUG is disabled.
)
MANAGERS = ADMINS
# Local time zone for this installation. All choices can be found here:
# http://www.postgresql.org/docs/current/static/datetime-keywords.html#DATETIME-TIMEZONE-SET-TABLE
TIME_ZONE = 'Europe/Kiev'
# Language code for this installation. All choices can be found here:
# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
# http://blogs.law.harvard.edu/tech/stories/storyReader$15
LANGUAGE_CODE = 'ru'
USE_I18N = True
SITE_ID = 1
USE_TZ = False
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
# Image files will be stored off of this path
#
# If you are using Windows, recommend using normalize_path() here
#
# from satchmo_utils.thumbnail import normalize_path
# MEDIA_ROOT = normalize_path(os.path.join(DIRNAME, 'static/'))
MEDIA_ROOT = os.path.join(DIRNAME, 'static/')
# URL that handles the media served from MEDIA_ROOT.
# Example: "http://media.lawrence.com"
MEDIA_URL="/static/"
STATIC_URL="/media/"
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'r2d$af@=#ri5md5n1u2=r%h2u1-^86q#2p^-1%!6kn(7i5f%t8'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
"django.middleware.common.CommonMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.middleware.doc.XViewMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"threaded_multihost.middleware.ThreadLocalMiddleware",
"satchmo_store.shop.SSLMiddleware.SSLRedirect",
#"satchmo_ext.recentlist.middleware.RecentProductMiddleware",
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
)
#this is used to add additional config variables to each request
# NOTE: If you enable the recent_products context_processor, you MUST have the
# 'satchmo_ext.recentlist' app installed.
TEMPLATE_CONTEXT_PROCESSORS = ('satchmo_store.shop.context_processors.settings',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'store.localsite.context_processors.categories',
#'satchmo_ext.recentlist.context_processors.recent_products',
)
ROOT_URLCONF = 'store.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(DIRNAME,'templates'),
)
FIXTURE_DIRS = [
os.path.join(DIRNAME, 'fixtures'),
]
INSTALLED_APPS = (
'django.contrib.sites',
'satchmo_store.shop',
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.comments',
'django.contrib.sessions',
'django.contrib.sitemaps',
'django.contrib.formtools',
'django.contrib.staticfiles',
'django.contrib.flatpages',
'tagging',
'registration',
'sorl.thumbnail',
'keyedcache',
'livesettings',
'l10n',
'satchmo_utils.thumbnail',
'satchmo_store.contact',
'tax',
'tax.modules.no',
'tax.modules.area',
'tax.modules.percent',
'shipping',
#'satchmo_store.contact.supplier',
#'shipping.modules.tiered',
'satchmo_ext.newsletter',
#'satchmo_ext.recentlist',
#'testimonials', # dependency on http://www.assembla.com/spaces/django-testimonials/
'product',
'product.modules.configurable',
'product.modules.custom',
#'product.modules.downloadable',
'product.modules.subscription',
#'satchmo_ext.product_feeds',
#'satchmo_ext.brand',
'payment',
#'payment.modules.dummy',
'payment.modules.cod',
#'payment.modules.purchaseorder',
#'payment.modules.giftcertificate',
#'satchmo_ext.wishlist',
#'satchmo_ext.upsell',
#'satchmo_ext.productratings',
'satchmo_ext.satchmo_toolbar',
'satchmo_utils',
#'shipping.modules.tieredquantity',
#'satchmo_ext.tieredpricing',
#'typogrify', # dependency on http://code.google.com/p/typogrify/
#'debug_toolbar',
'app_plugins',
'store',
'store.localsite',
#'debug_toolbar',
'south',
'django_extensions',
'tinymce',
'flatblocks',
'mailer',
)
AUTHENTICATION_BACKENDS = (
'satchmo_store.accounts.email-auth.EmailBackend',
'django.contrib.auth.backends.ModelBackend',
)
#### Satchmo unique variables ####
from django.conf.urls.defaults import patterns, include, url
SATCHMO_SETTINGS = {
'SHOP_BASE' : '',
'MULTISHOP' : False,
'SHOP_URLS' : patterns('',
url(r'^i18n/', include('l10n.urls')),
url(r'^featured/', 'localsite.views.display_featured', name='localsite_featured'),
url(r'^related/(?P<id>.*)/$', 'localsite.views.display_related', name='display_related'),
url(r'^hallmap/(?P<eventdate_id>\d+)/(?P<price>\d+)/$', 'localsite.views.get_hall_map_by_price', name='get_hall_map_by_price'),
url(r'^hallmap/(?P<eventdate_id>\d+)/$', 'localsite.views.get_hall_map', name='get_hall_map'),
url(r'^events/$', 'localsite.views.select_event', name='select_event'),
url(r'^tags/(?P<slug>[- \w]+)/$', view='localsite.views.tag_detail', name='event_tag_detail'),
url(r'^event/(?P<event_id>\d+)/edit/$', 'localsite.views.edit_event', name='edit_event'),
url(r'^section/(?P<section_id>\d+)/edit/$', 'localsite.views.place_editor', name='place_editor'),
url(r'^flatpages/$', 'localsite.views.flatpages', name='flatpages'),
url(r'^flatpage/(?P<flatpage_id>\d+)/$', 'localsite.views.flatpage_editor', name='flatpage_editor'),
url(r'^flatblocks/(?P<pk>\d+)/edit/$', 'localsite.views.flatblock_edit', name='flatblocks-edit'),
url(r'^tinymce/', include('tinymce.urls')),
url(r'^ajax_select_city/$', 'localsite.views.ajax_select_city', name='ajax_select_city'),
url(r'^ajax_select_ticket/$', 'localsite.views.ajax_select_ticket', name='ajax_select_ticket'),
url(r'^ajax_select_ticket2/$', 'localsite.views.ajax_select_ticket2', name='ajax_select_ticket2'),
url(r'^add_ticket/$', 'localsite.views.add_ticket', name='add_ticket'),
url(r'^wizards/event/$', 'localsite.views.wizard_event', name='wizard_event_step0'),
url(r'^wizards/event/(?P<step>.*)/$', 'localsite.views.wizard_event'),
url(r'^$', 'localsite.views.display_recent', name='satchmo_shop_home'),
url(r'^product/view/bestsellers/$', 'localsite.views.display_bestsellers', name='satchmo_product_best_selling'),
)
}
SKIP_SOUTH_TESTS=True
TINYMCE_DEFAULT_CONFIG = {
'mode' : "textareas",
'theme' : "advanced",
'plugins' : "autolink,lists,spellchecker,pagebreak,style,layer,table,save,advhr,advimage,advlink,emotions,iespell,inlinepopups,insertdatetime,preview,media,searchreplace,print,contextmenu,paste,directionality,fullscreen,noneditable,visualchars,nonbreaking,xhtmlxtras,template",
'theme_advanced_buttons1' : "save,newdocument,|,bold,italic,underline,strikethrough,|,justifyleft,justifycenter,justifyright,justifyfull,|,styleselect,formatselect,fontselect,fontsizeselect",
'theme_advanced_buttons2' : "cut,copy,paste,pastetext,pasteword,|,search,replace,|,bullist,numlist,|,outdent,indent,blockquote,|,undo,redo,|,link,unlink,anchor,image,cleanup,help,code,|,insertdate,inserttime,preview,|,forecolor,backcolor",
'theme_advanced_buttons3' : "tablecontrols,|,hr,removeformat,visualaid,|,sub,sup,|,charmap,emotions,iespell,media,advhr,|,print,|,ltr,rtl,|,fullscreen",
'theme_advanced_buttons4' : "insertlayer,moveforward,movebackward,absolute,|,styleprops,spellchecker,|,cite,abbr,acronym,del,ins,attribs,|,visualchars,nonbreaking,template,blockquote,pagebreak,|,insertfile,insertimage",
'theme_advanced_toolbar_location' : "top",
'theme_advanced_toolbar_align' : "left",
'theme_advanced_statusbar_location' : "bottom",
'theme_advanced_resizing' : True,
'relative_urls': False,
}
TINYMCE_JS_URL = MEDIA_URL + 'js/tiny_mce/tiny_mce.js'
TINYMCE_JS_ROOT = MEDIA_ROOT + 'js/tiny_mce'
EMAIL_BACKEND = "mailer.backend.DbBackend"
# Load the local settings
from local_settings import *
|
import graphene
from graphql import GraphQLError
from backend.likes.models import Like as LikeModel
from backend.posts.models import Post as PostModel
from backend.comments.models import Comment as CommentModel
from backend.likes.schemas.queries import LikeNode
class LikePost(graphene.Mutation):
"""
Adds a like to the specified post.
"""
class Arguments:
post_unique_identifier = graphene.String(required=True, description="Unique identifier of the post")
' Fields '
like = graphene.Field(LikeNode)
def mutate(self, info, post_unique_identifier):
if info.context.user.is_anonymous:
raise GraphQLError('Not logged in.')
post = PostModel.objects.get(unique_identifier=post_unique_identifier)
try:
new_like = LikeModel(content_object=post, user=info.context.user)
except Exception as e:
raise GraphQLError(e)
else:
new_like.save()
return LikePost(like=new_like)
class LikeComment(graphene.Mutation):
"""
Adds a like to the specified post.
"""
class Arguments:
comment_unique_identifier = graphene.String(required=True, description="Unique identifier of the comment")
' Fields '
like = graphene.Field(LikeNode)
def mutate(self, info, comment_unique_identifier):
if info.context.user.is_anonymous:
raise GraphQLError('Not logged in.')
comment = CommentModel.objects.get(unique_identifier=comment_unique_identifier)
try:
new_like = LikeModel(content_object=comment, user=info.context.user)
except Exception as e:
raise GraphQLError(e)
else:
new_like.save()
return LikeComment(like=new_like)
class UnlikePost(graphene.Mutation):
"""
removes like to the specified post.
"""
class Arguments:
like_unique_identifier = graphene.String(required=True, description="Unique identifier of the like")
' Fields '
successful = graphene.Boolean()
def mutate(self, info, like_unique_identifier):
if info.context.user.is_anonymous:
raise GraphQLError('Not logged in.')
try:
like = LikeModel.objects.filter(
unique_identifier=like_unique_identifier
).get()
except Exception as e:
raise GraphQLError(e)
else:
like.delete()
return UnlikePost(successful=True)
class UnlikeComment(graphene.Mutation):
"""
removes like to the specified comment.
"""
class Arguments:
like_unique_identifier = graphene.String(required=True, description="Unique identifier of the like")
' Fields '
successful = graphene.Boolean()
def mutate(self, info, like_unique_identifier):
if info.context.user.is_anonymous:
raise GraphQLError('Not logged in.')
try:
like = LikeModel.objects.filter(
unique_identifier=like_unique_identifier
).get()
except Exception as e:
raise GraphQLError(e)
else:
like.delete()
return UnlikeComment(successful=True)
|
from io import BytesIO
from PIL import Image
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.core.files.base import ContentFile
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.urls import reverse
from .models import TravelRecord, TravelImage
from .forms import TravelRecordForm
def resize_image(image_data):
"""Helper function to resize an image"""
img = Image.open(image_data)
img.thumbnail([1200, 1200], Image.ANTIALIAS)
buffer = BytesIO()
img_format = image_data.content_type.split('/')[1]
if img_format == 'apng':
img_format = 'png'
img.save(fp=buffer, format=img_format)
img_file = ContentFile(buffer.getvalue())
return InMemoryUploadedFile(img_file, None, image_data.name,
image_data.content_type, img_file.tell, None)
def map_view(request):
form = TravelRecordForm(request.POST or None, request.FILES or None)
if request.method == 'POST' and form.is_valid():
travel_record = form.save()
for photo in request.FILES.getlist('photos'):
photo = resize_image(photo)
TravelImage.objects.create(photo=photo, travel=travel_record)
return HttpResponseRedirect(reverse('map'))
records = TravelRecord.objects.all().order_by('start_date')
records_dict = {}
for record in records:
if record.place_name not in records_dict:
records_dict[record.place_name] = []
records_dict[record.place_name].append(record)
context = {
"form": form,
"records_dict": records_dict,
"records": records,
}
return render(request, "map/map.html", context)
|
from keras.models import Model
from keras.regularizers import l2
from keras.optimizers import SGD, Adam
from keras.layers import *
import tensorflow as tf
import keras.backend as K
import numpy as np
def FCN(input_shape=None, weight_decay=0., batch_momentum=0.9, classes=1):
img_input = Input(shape=input_shape)
image_size = input_shape[:2]
# Block 1
x = Conv2D(64, (3, 3), activation='relu', padding='same',
name='block1_conv1', kernel_regularizer=l2(weight_decay))(img_input)
x = Conv2D(64, (3, 3), activation='relu',
padding='same', name='block1_conv2', kernel_regularizer=l2(weight_decay))(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# Block 2
x = Conv2D(128, (3, 3), activation='relu', padding='same',
name='block2_conv1', kernel_regularizer=l2(weight_decay))(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same',
name='block2_conv2', kernel_regularizer=l2(weight_decay))(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
# Convolutional layers transfered from fully-connected layers
x = Conv2D(256, (7, 7), activation='relu', padding='same',
name='fc1', kernel_regularizer=l2(weight_decay))(x)
x = Dropout(0.5)(x)
x = Conv2D(256, (1, 1), activation='relu', padding='same',
name='fc2', kernel_regularizer=l2(weight_decay))(x)
x = Dropout(0.5)(x)
# classifying layer
x = Conv2D(classes, (1, 1), kernel_initializer='he_normal', activation='linear',
padding='valid', strides=(1, 1), kernel_regularizer=l2(weight_decay))(x)
x = UpSampling2D(size=(4, 4))(x)
model = Model(img_input, x)
return model
|
import inspect
class Action_Error(RuntimeError):
def __init__(self, msg):
RuntimeError.__init__(self, msg)
class Action_Invokation_Error(Action_Error):
def __init__(self, msg):
Action_Error.__init__(self, msg)
_supported_action_additional_argument_types = []
def get_all_supported_action_additional_argument_types():
return tuple(_supported_action_additional_argument_types)
def register_supported_action_additional_argument_type(typ):
if not (isinstance(typ, type)):
raise (TypeError("Cannot register non-type object."))
if not typ in _supported_action_additional_argument_types:
_supported_action_additional_argument_types.append(typ)
def unregister_all_supported_action_additional_argument_types():
_supported_action_additional_argument_types.clear()
# interface:
# def on_<action_name>(self, context, <kwarg_1>:<kwarg_1_type>, ..., <kwarg_N>:<kwarg_N_type>)
def action(action_function):
# check function name
if not action_function.__name__.startswith("on"):
raise (
TypeError(
"Function "
+ action_function.__qualname__
+ " decorated with @action must have a name beginning with `on` (not fullfilled by `"
+ action_function.__name__
+ "`)."
)
)
# check required arguments
argument_names = inspect.getfullargspec(action_function).args
if not len(argument_names) >= 2:
raise (
TypeError(
"Function "
+ action_function.__qualname__
+ " decorated with @action must have at laest two arguments."
)
)
if not argument_names[0] == "self":
raise (
TypeError(
"Function "
+ action_function.__qualname__
+ " decorated with @action must have first argument called `self` (not `"
+ argument_names[0]
+ "`)."
)
)
if not argument_names[1] == "context":
raise (
TypeError(
"Function "
+ action_function.__qualname__
+ " decorated with @action must have first argument called `context` (not `"
+ argument_names[1]
+ "`)."
)
)
# check optional arguments
for i in range(2, len(argument_names)):
additional_argument_name = argument_names[i]
if not additional_argument_name in action_function.__annotations__:
raise (
TypeError(
"Function "
+ action_function.__qualname__
+ " decorated with @action must have annotated additional arguments only: Argument `"
+ additional_argument_name
+ "` has no annotation. (In function definition: Replace `"
+ additional_argument_name
+ "` with `"
+ additional_argument_name
+ ":<type>` where `<type>` is "
+ " or ".join(
[
"`" + t.__qualname__ + "`"
for t in _supported_action_additional_argument_types
]
)
+ ")"
)
)
annotation = action_function.__annotations__[additional_argument_name]
if not isinstance(annotation, type):
raise (
TypeError(
"Function "
+ action_function.__qualname__
+ " decorated with @action must have additional arguments annotated with a type only: Argument `"
+ additional_argument_name
+ "` has non-type annotation `"
+ str(annotation)
+ "`."
)
)
if not issubclass(
annotation, tuple(_supported_action_additional_argument_types)
):
raise (
TypeError(
"Function "
+ action_function.__qualname__
+ " decorated with @action must have additional arguments annotated with a supported types only: Argument `"
+ additional_argument_name
+ "` has non supported annotation `"
+ annotation.__qualname__
+ "` (must be "
+ " or ".join(
[
"`" + t.__qualname__ + "`"
for t in _supported_action_additional_argument_types
]
)
+ ")."
)
)
# mark as action
action_function._is_game_action = None
# forwarding
return action_function
def is_game_action(function):
return hasattr(function, "_is_game_action")
def get_additional_action_argument_names(action_function):
if not is_game_action(action_function):
raise (
TypeError(
"Function "
+ action_function.__qualname__
+ " is not a @action."
)
)
return inspect.getfullargspec(action_function).args[2:]
def get_additional_action_argument_types(action_function):
additional_argument_names = get_additional_action_argument_names(
action_function
)
return tuple(
[
action_function.__annotations__[additional_argument_name]
for additional_argument_name in additional_argument_names
]
)
def get_additional_action_argument_dict(action_function):
additional_argument_names = get_additional_action_argument_names(
action_function
)
return {
additional_argument_name: action_function.__annotations__[
additional_argument_name
]
for additional_argument_name in additional_argument_names
}
def can_invoke_bound_action(bound_action_function, additional_args):
additional_argument_names = get_additional_action_argument_names(
bound_action_function
)
if len(additional_args) != len(additional_argument_names):
return False
additional_argument_types = get_additional_action_argument_types(
bound_action_function
)
for i in range(len(additional_argument_names)):
given_argument = additional_args[i]
expected_type = additional_argument_types[i]
if not isinstance(given_argument, expected_type):
return False
return True
def invoke_bound_action(bound_action_function, context, additional_args):
additional_argument_names = get_additional_action_argument_names(
bound_action_function
)
if len(additional_args) != len(additional_argument_names):
raise Action_Error(
"Cannot invoke action: Number of given additional arguments "
+ str(len(additional_args))
+ " does not match expected value of "
+ str(len(additional_argument_names))
+ "."
)
additional_argument_types = get_additional_action_argument_types(
bound_action_function
)
additional_arguments = {}
for i in range(len(additional_argument_names)):
given_argument = additional_args[i]
expected_type = additional_argument_types[i]
if not isinstance(given_argument, expected_type):
raise Action_Error(
"Cannot invoke action: Additional argument `"
+ additional_argument_names[i]
+ "` at position "
+ str(i + 1)
+ " is not an instance of "
+ expected_type.__qualname__
+ "."
)
argument_name = additional_argument_names[i]
additional_arguments[argument_name] = given_argument
return bound_action_function(context=context, **additional_arguments)
def get_all_bound_action_methods(object):
bound_action_methods = []
# search for bound `@action` methods
for attribute_name in dir(object):
# skip private attributes
if attribute_name.startswith("_"):
continue
# check if `_is_game_action` attribute is present
attribute = getattr(object, attribute_name)
if callable(attribute) and hasattr(attribute, "_is_game_action"):
# found a `@action`
bound_action_methods.append(attribute)
return bound_action_methods
|
import sys
import csv
import utilities
import numpy
from datetime import datetime
from datetime import timedelta
date_format = "%Y-%m-%d"
start_semester = datetime.strptime('2018-09-17', date_format)
def main():
if len(sys.argv) != 3:
print("Numbers of parameter are wrong")
else:
csv_reader = csv.DictReader(open(sys.argv[1], mode='r'))
data = utilities.parse_csv_to_dict(csv_reader)
student_id = sys.argv[2]
exercise_data = points_only_for_student(utilities.merge_exercise_columns(data), student_id)
if exercise_data is None:
sys.stderr.write("Student with id {} doesn't exist!".format(student_id))
exit(1)
result={}
result['mean'] = numpy.mean(exercise_data)
result["median"] = numpy.median(exercise_data)
result["total"] = sum(exercise_data)
result["passed"] = utilities.passed(exercise_data)
sorted_merged_date_data = utilities.merge_date_columns(data)
points_per_day = points_only_for_student(sorted_merged_date_data, student_id)
accumulated_points = numpy.cumsum(points_per_day)
diff_from_start_semester = get_date_diff_from_start(sorted_merged_date_data)
# 1D -> 2D array; (https://stackoverflow.com/questions/12575421/convert-a-1d-array-to-a-2d-array-in-numpy )
# due to exception Array must be two-dimensional
a = numpy.expand_dims(numpy.array(diff_from_start_semester), axis=1)
# add zero values (interception should be 0 ) fixed slope = -0.0 https://stackoverflow.com/questions/8486294/how-to-add-an-extra-column-to-a-numpy-array?rq=1
a = numpy.insert(a, 1, values=0, axis=1)
b = numpy.array(accumulated_points)
slope = numpy.linalg.lstsq(a, b, rcond=1)[0][0]
result["regression slope"] = slope
if slope > 0:
date_to_16 = compute_date_to_point(16, slope)
date_to_20 = compute_date_to_point(20, slope)
result["date 16"] = date_to_16.strftime("%Y-%m-%d")
result["date 20"] = date_to_20.strftime("%Y-%m-%d")
utilities.print_dict_as_json(result)
# return list of all points for particular student (particular row for particular student id)
def points_only_for_student(data, student_id):
index_of_student = -1
if student_id != 'average':
# find student index in array
student_id = float(student_id)
for index, value in enumerate(data['student']):
if value == student_id:
index_of_student = index
if index_of_student == -1:
return None # not average and student doesn't exist
result = []
for column_name in data:
if column_name == 'student':
continue
if index_of_student == -1:
average_for_column = numpy.mean(data[column_name])
result.append(average_for_column)
else:
result.append(data[column_name][index_of_student])
return result
# https://stackoverflow.com/questions/151199/how-do-i-calculate-number-of-days-between-two-dates-using-python
# sorted_date_data is sorted dictionary with merged column by date
def get_date_diff_from_start(sorted_date_data):
result = []
dates = []
for originalColumn in sorted_date_data:
if originalColumn == 'student':
continue
date = originalColumn
if date not in dates:
dates.append(date)
for date in dates:
curr_date = datetime.strptime(date, date_format)
delta = curr_date - start_semester
result.append(delta.days)
return result
# compute how many day is needed to reach the point from the start date
# https://stackoverflow.com/questions/6871016/adding-5-days-to-a-date-in-python
def compute_date_to_point(point, slope):
date_diff_from_start = point / slope
return start_semester + timedelta(days=date_diff_from_start)
main()
|
import math
import torch
import random
from torch import nn
from torch.autograd import Variable
import torch.nn.functional as F
from utils import lengths2mask
class Encoder(nn.Module):
RNN_TYPES = {'lstm': nn.LSTM, 'gru': nn.GRU, 'rnn': nn.RNN}
def __init__(self, args, embedding):
super(Encoder, self).__init__()
self.hidden_size = args.hidden_size
self.brnn = args.brnn
self.rnn_type = args.rnn_type
self.embed = nn.Embedding.from_pretrained(embedding, freeze=args.fixed_embed)
self.n_layers = args.encode_layers
self.rnn = self.RNN_TYPES[args.rnn_type](args.char_embed_size, self.hidden_size, args.encode_layers,
dropout=args.encode_dropout, bidirectional=args.brnn, batch_first=True)
def forward(self, src, hidden=None):
embedded = self.embed(src)
outputs, hidden = self.rnn(embedded)
# sum bidirectional outputs
if self.brnn:
outputs = (outputs[:, :, :self.hidden_size] + outputs[:, :, self.hidden_size:])
'''
hidden = hidden[-2, :, :] + hidden[-1, :, :]
else:
hidden = hidden[-1]
'''
# output - (batch, len, hidden)
# hidden - (2 * n_layers, batch, hidden)
return outputs, hidden
class Attention_1(nn.Module):
def __init__(self, hidden_size):
super(Attention, self).__init__()
self.hidden_size = hidden_size
def forward(self, hidden, encoder_outputs, lens):
score = torch.bmm(encoder_outputs, hidden.unsqueeze(-1)) # [B, T, 1]
score = score.squeeze(-1)
if lens is not None:
mask = lengths2mask(lens, encoder_outputs.size(1), byte=True)
mask = ~mask
score = score.data.masked_fill_(mask.data, float("-inf"))
att = F.softmax(score, dim=-1)
return att.unsqueeze(1)
class Attention_0(nn.Module):
def __init__(self, hidden_size):
super(Attention, self).__init__()
self.hidden_size = hidden_size
self.attn = nn.Linear(self.hidden_size * 2, hidden_size)
self.v = nn.Parameter(torch.rand(hidden_size))
stdv = 1. / math.sqrt(self.v.size(0))
self.v.data.uniform_(-stdv, stdv)
def forward(self, hidden, encoder_outputs, lens):
'''
hidden: (batch, hidden)
encoder_outputs: (batch, len, hidden)
'''
timestep = encoder_outputs.size(1)
h = hidden.repeat(timestep, 1, 1).transpose(0, 1) #[B,T,H]
attn_energies = self.score(h, encoder_outputs)
return F.relu(attn_energies)
def score(self, hidden, encoder_outputs):
# [B,T,2H]->[B,T,H]
energy = F.softmax(self.attn(torch.cat([hidden, encoder_outputs], 2)))
energy = energy.transpose(1, 2) # [B,H,T]
v = self.v.repeat(encoder_outputs.size(0), 1).unsqueeze(1) # [B,1,H]
energy = torch.bmm(v, energy) # [B,1,T]
return energy
class Attention(nn.Module):
def __init__(self, hidden_size):
super(Attention, self).__init__()
self.hidden_size = hidden_size
self.attn = nn.Linear(self.hidden_size * 2, hidden_size)
self.v = nn.Parameter(torch.rand(hidden_size))
stdv = 1. / math.sqrt(self.v.size(0))
self.v.data.uniform_(-stdv, stdv)
def forward(self, hidden, encoder_outputs, lens):
'''
hidden: (batch, hidden)
encoder_outputs: (batch, len, hidden)
'''
timestep = encoder_outputs.size(1)
h = hidden.repeat(timestep, 1, 1).transpose(0, 1) #[B,T,H]
attn_energies = self.score(h, encoder_outputs)
'''
if lens is not None:
mask = lengths2mask(lens, encoder_outputs.size(1), byte=True)
mask = ~mask
attn_energies = attn_energies.data.masked_fill_(mask.data, float("-inf"))
'''
return F.softmax(attn_energies, dim=-1).unsqueeze(1)
def score(self, hidden, encoder_outputs):
# [B,T,2H]->[B,T,H]
energy = F.tanh(self.attn(torch.cat([hidden, encoder_outputs], 2)))
energy = energy.transpose(1, 2) # [B,H,T]
v = self.v.repeat(encoder_outputs.size(0), 1).unsqueeze(1) # [B,1,H]
energy = torch.bmm(v, energy) # [B,1,T]
return energy.squeeze(1)
class Decoder(nn.Module):
RNN_TYPES = {'lstm': nn.LSTM, 'gru': nn.GRU, 'rnn': nn.RNN}
def __init__(self, args, embedding):
super(Decoder, self).__init__()
self.device = args.device
self.embed = nn.Embedding.from_pretrained(embedding, freeze=args.fixed_embed)
self.output_size = args.word_size
self.n_layers = args.decode_layers
self.dropout = nn.Dropout(args.decode_dropout, inplace=True)
self.attention = Attention(args.hidden_size)
self.rnn = self.RNN_TYPES[args.rnn_type](args.hidden_size + args.word_embed_size, args.hidden_size,
args.decode_layers, dropout=args.decode_dropout, batch_first=True)
self.out = nn.Linear(args.hidden_size * 2, args.word_size)
def forward(self, input, last_hidden, encoder_outputs, src_len):
'''
input: (batch, 1)
last_hidden: (n_layers, batch, hidden)
encoder_outputs: (batch, len, hidden)
'''
# Get the embedding of the current input word (last output word)
embedded = self.embed(input).unsqueeze(1) # (B,1,embed)
embedded = self.dropout(embedded)
# Calculate attention weights and apply to encoder outputs
if len(last_hidden) == 2:
h_n = last_hidden[0]
else:
h_n = last_hidden
attn_weights = self.attention(h_n[-1, :, :], encoder_outputs, src_len) # [B,1,T]
# print(attn_weights[0])
context = attn_weights.bmm(encoder_outputs) # (B,1,H)
# Combine embedded input word and attended context, run through RNN
rnn_input = torch.cat([embedded, context], 2) # (B,1,embed+H)
output, hidden = self.rnn(rnn_input, last_hidden)
output = output.squeeze(1) # (B,1,H) -> (B,H)
context = context.squeeze(1)
output = self.out(torch.cat([output, context], 1)) # [B,output_size]
output = F.log_softmax(output, dim=1)
return output, hidden, attn_weights
class Seq2Seq(nn.Module):
def __init__(self, encoder, decoder):
super(Seq2Seq, self).__init__()
self.encoder = encoder
self.decoder = decoder
def forward(self, src, trg, src_len, teacher_forcing_ratio=0.5):
batch_size, max_len = trg.size()
vocab_size = self.decoder.output_size
outputs = Variable(torch.zeros(batch_size, max_len, vocab_size)).cuda(self.decoder.device)
encoder_output, hidden = self.encoder(src)
if self.encoder.rnn_type == 'lstm':
hidden, c_n = hidden
hidden = hidden[:self.decoder.n_layers]
c_n = c_n[:self.decoder.n_layers]
hidden = (hidden, c_n)
else:
hidden = hidden[:self.decoder.n_layers] # [n_layers,B,H]
# hidden = hidden[-2].unsqueeze(0)
output = trg.data[:, 0] # 'sos'
# print('----------------------------------------------')
for t in range(1, max_len):
output, hidden, attn_weights= self.decoder(
output, hidden, encoder_output, src_len)
outputs[:,t,:] = output
is_teacher = random.random() < teacher_forcing_ratio
top1 = output.max(1)[1]
output = trg.data[:, t] if is_teacher else top1
return outputs
|
from django.contrib import admin
from django.urls import path, include
from Report import views
urlpatterns = [
path('report/', views.report, name='report')
] |
#import sys,os
#BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # __file__获取执行文件相对路径,整行为取上一级的上一级目录
#sys.path.append(BASE_DIR)
import numpy as np
import cv2
import time
import datetime
import tracktarget as tar
def trackmove(frame):
frame=cv2.GaussianBlur(frame,(5,5),0)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#edges = cv2.Canny(gray, 50, 150, apertureSize=3)
fgmask = fgbg.apply(gray)
element = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3)) # 形态学去噪
fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, element) # 开运算去噪
ret,fgmask=cv2.threshold(fgmask, 128, 255, cv2.THRESH_BINARY)#二值化
dilate = cv2.dilate(fgmask, None, iterations=4)#膨胀
cv2.imshow('dilate', dilate)
erode = cv2.erode(dilate, None, iterations=2)# 腐蚀
cv2.imshow('erode', erode)
contours, hierarchy = cv2.findContours(erode.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) #寻找前景
cv2.drawContours(frame,contours,-1,(0,0,255),3)
#cv2.drawContours(erode,contours,-1,(0,0,255),3)
cv2.imshow('erode2', erode)
count=0
rect_array=[]
for cont in contours:
Area = cv2.contourArea(cont) # 计算轮廓面积
if Area < 500: # 过滤面积小于10的形状
continue
count += 1 # 计数加一
print("{}-prospect:{}".format(count,Area),end=" ") #打印出每个前景的面积
rect = cv2.boundingRect(cont) #提取矩形坐标
rect_array.append(rect)
print("x:{} y:{}".format(rect[0],rect[1]))#打印坐标
cv2.rectangle(frame,(rect[0],rect[1]),(rect[0]+rect[2],rect[1]+rect[3]),colour[count%6],1)#原图上绘制矩形
cv2.rectangle(erode,(rect[0],rect[1]),(rect[0]+rect[2],rect[1]+rect[3]),(0xff, 0xff, 0xff), 1) #黑白前景上绘制矩形
y = 10 if rect[1] < 10 else rect[1] # 防止编号到图片之外
cv2.putText(frame, str(count), (rect[0], y), cv2.FONT_HERSHEY_COMPLEX, 0.4, (0, 255, 0), 1) # 在前景上写上编号
cv2.putText(frame, "count:", (5, 20), cv2.FONT_HERSHEY_COMPLEX, 0.6, (0, 255, 0), 1) #显示总数
cv2.putText(frame, str(count), (75, 20), cv2.FONT_HERSHEY_COMPLEX, 0.6, (0, 255, 0), 1)
print("----------------------------")
cv2.imshow('frame', frame)#在原图上标注
#cv2.imshow('frame2', erode) # 以黑白的形式显示前景和背景
#out.write(frame)
return rect_array
colour=((0, 205, 205),(154, 250, 0),(34,34,178),(211, 0, 148),(255, 118, 72),(137, 137, 139))#定义矩形颜色
fgbg = cv2.createBackgroundSubtractorMOG2()#混合高斯背景建模算法
#fourcc = cv2.VideoWriter_fourcc(*'XVID')#设置保存图片格式
#out = cv2.VideoWriter(datetime.datetime.now().strftime("%A_%d_%B_%Y_%I_%M_%S%p")+'.avi',fourcc, 10.0, (768,576))#分辨率要和原视频对应
#frame=cv2.imread("images/26.jpg")
if __name__ == '__main__' :
#cap = cv2.VideoCapture(0) #参数为0是打开摄像头,文件名是打开视频
#cap = cv2.VideoCapture("images/vtest.avi") #参数为0是打开摄像头,文件名是打开视频
cap = cv2.VideoCapture("images/face2.mp4")
while True:
ret, frame = cap.read() #读取图片
cv2.imshow("live",frame)
trackmove(frame)
k = cv2.waitKey(30)&0xff #按esc退出
if k == 27:
break
#out.release()#释放文件
cap.release()
cv2.waitKey()
cv2.destoryAllWindows()#关闭所有窗口 |
import logging
from agents import dp, greedy
from decks.deck_factory import Deck
from env import SetteMezzoEnv, Player
logger = logging.getLogger('sette-mezzo')
depth = 4
limit = 4
deck = Deck()
logger.info('Deck %s', deck)
players = {Player(0): dp.DpAgent(),
Player(1, limit=limit): greedy.BookmakerAgent(limit=limit)}
agents = list(players.values())
state = SetteMezzoEnv(list(players.keys()), deck, depth)
# Draw initial player cards
state.apply_action(None)
state.apply_action(None)
# Playing the game
while not state.is_terminal():
agent = agents[state.current_player.id]
action = agent.step(state)
logger.info('Player %s action %s', state.current_player.id, action)
state.apply_action(action)
returns = state.returns()
end = 'winned' if returns[0] > returns[1] else 'tied' if returns[0] == returns[1] else 'lost'
logger.info(f"Returns: {returns}, you {end}")
|
# Generated by Django 3.0.4 on 2020-04-02 10:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('covidecapi', '0005_auto_20200402_1047'),
]
operations = [
migrations.AlterField(
model_name='casocovid',
name='activos',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='casocovid',
name='confirmados',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='casocovid',
name='muertos',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='casocovid',
name='nuevos',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='casocovid',
name='recuperados',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='casocovid',
name='total',
field=models.FloatField(blank=True, null=True),
),
]
|
import unittest
from tasks.JsonConverter.Encoder import to_json
from json import dumps
class TestJsonEncoder(unittest.TestCase):
def test_list(self):
list_to_json = [74, True, False, None, [1, 2], {"key": 4}]
self.assertTrue(to_json(list_to_json) == dumps(list_to_json))
def test_dict(self):
dict_to_json = {1: "one", 2: 'two'}
self.assertTrue(to_json(dict_to_json) == dumps(dict_to_json))
def test_bool(self):
self.assertEqual(to_json(True), dumps(True))
def test_none(self):
self.assertEqual(to_json(None), dumps(None))
def test_string(self):
self.assertEqual(to_json("str"), dumps("str"))
def test_integer(self):
self.assertEqual(to_json(50), dumps(50))
def test_float(self):
self.assertEqual(to_json(3.4), dumps(3.4))
|
from flask_wtf import FlaskForm
from wtforms import validators, SelectField, SubmitField, TextAreaField
from wtforms.widgets import TextArea
from wtforms.fields.html5 import DateField
from wtforms.validators import DataRequired
import datetime
class gradePerformanceform(FlaskForm):
lesson_id = SelectField(label='Last Lesson', choices='',validators=[DataRequired()])
formChoice = [(i, str(i)) for i in range(1,11)]
technique = SelectField(label='Technique', choices=formChoice,validators=[DataRequired()], default=5)
ukemi = SelectField(label='Ukemi', choices=formChoice,validators=[DataRequired()], default=5)
discipline = SelectField(label='discipline', choices=formChoice,validators=[DataRequired()], default=5)
coordination = SelectField(label='coordination', choices=formChoice,validators=[DataRequired()], default=5)
knowledge = SelectField(label='knowledge', choices=formChoice,validators=[DataRequired()], default=5)
spirit = SelectField(label='spirit', choices=formChoice,validators=[DataRequired()], default=5)
submit = SubmitField('Submit')
class performanceRemarkform(FlaskForm):
remark = TextAreaField(label='Remark', widget=TextArea(), validators=[DataRequired()])
date = DateField(label='Date of remark',
default=datetime.date.today(), validators=[DataRequired()])
submit = SubmitField('Submit')
|
# Example 3
class Person:
# attributes
__name = "" # use a double underscore for private attribute
__address = "" # use a double underscore for private attribute
# methods
def __init__(self, giveName, givenAddress):
self.__name = giveName
self.__address = givenAddress
def output_name(self):
return (self.__name)
def output_address(self):
return (self.__address)
def set_name(self, newName):
self.__name = newName
def set_address(self, newAddress):
self.__address = newAddress
p3 = Person("Abigail", "Somewhere in London")
print(p3.__name) # 'Person' object has no attribute '__name'
print(p3.output_name()) # Abigail
|
class job:
def __init__(self, name, value = 0):
self.name = name
self.value = value
self.categorie_list = []
class categorie:
def __init__(self, name):
self.name = name
self.amount = []
self.date = []
self.paid = []
self.paid_date = []
def add(self, amount, date):
self.amount.append(amount)
self.date.append(date)
def add_paid(self, amount, date):
self.paid.append(amount)
self.paid_date.append(date)
def total(self):
total_amount = 0
for money in self.amount:
total_amount += money
return total_amount
def total_paid(self):
total_amount = 0
for money in self.paid:
total_amount += money
return total_amount
def __str__(self):
return f"Name: {self.name} \ntotal: {self.total()} \nowe: {self.total() - self.total_paid()}"
|
from introspective_api import generics
from introspective_api.response import ApiResponse
from dynamic_widgets.editor import models as local_models
from dynamic_widgets.editor.api.dynamic_content import serializers as model_serializers
from dynamic_widgets.settings import dynamic_widgets_settings
api_endpoint = dynamic_widgets_settings.API_ROOT
class DynamicContentVersionSerializer(model_serializers.DynamicContentVersionSerializer):
class Meta(model_serializers.DynamicContentVersionSerializer.Meta):
view_namespace = "api:staff"
class DynamicContentList(generics.ListCreateAPIView):
"""
API endpoint that represents a list of entities.
"""
#def get_template_names(self):
# return ("competences/list.part.html",)
model = local_models.DynamicContentVersion
serializer_class = DynamicContentVersionSerializer
#paginate_by = 10
slug_url_kwarg = "content_identifier"
slug_field = "content_identifier"
def get_queryset(self):
content_identifier = self.kwargs.get('content_identifier', None)
if content_identifier:
return self.model.latest_objects.filter(content_identifier=content_identifier)
else:
return self.model.latest_objects.all()
class OrigDynamicContentDetail(generics.RetrieveUpdateAPIView):
model = local_models.DynamicContentVersion
slug_url_kwarg = "orig_uuid"
slug_field = "orig_uuid"
def get_queryset(self):
return self.model.latest_objects.all()
class DynamicContentDetail(generics.RetrieveUpdateAPIView):
model = local_models.DynamicContentVersion
pk_url_kwarg = "uuid"
def get_queryset(self): # to release the most recent one
return self.model.latest_objects.all()
def put(self, request, *args, **kwargs):
if request.DATA.get('action', None) == 'release':
dyn_content = self.get_object()
if release_dyn_content(dyn_content):
return ApiResponse({"msg": "done"})
else:
return ApiResponse({"msg": "error"}, status = 500)
return ApiResponse({"msg": "what to do?"}, status = 400)
api_endpoint.register_endpoint(
root_name = 'staff',
endpoint_url = "dynamic_content/(?P<uuid>[0-9\-a-zA-Z]*)",
view = DynamicContentDetail,
name = 'dynamiccontentversion-detail'
)
api_endpoint.register_endpoint(
root_name = 'staff',
endpoint_url = "dynamic_content/orig/(?P<orig_uuid>[0-9\-a-zA-Z]*)",
view = OrigDynamicContentDetail,
name = 'dynamiccontentversion-detail'
)
api_endpoint.register_endpoint(
root_name = 'staff',
endpoint_url = "dynamic_content/(?P<content_identifier>[0-9:_\-a-zA-Z]*)/versions",
view = DynamicContentList,
name = 'dynamiccontentversion-list-filtered'
)
api_endpoint.register_endpoint(
root_name = 'staff',
endpoint_url = "dynamic_content",
view = DynamicContentList,
name = 'dynamiccontentversion-list'
) |
# Generated by Django 3.2 on 2021-04-23 16:15
from django.db import migrations
from django.contrib.auth.hashers import make_password
import random
import decimal
def populate_db(apps, schema_editor):
User = apps.get_model('catalogue', 'User')
admin = User(username="admin", password=make_password("admin"), email="admin@catalogue.com", is_superuser=True, is_staff=True)
admin.save()
user = User(username="user", password=make_password("catalogue123"), email="user@catalogue.com")
user.save()
Product = apps.get_model('catalogue', 'Product')
for i in range(50):
product = Product(name=f'{i+1}-Product', price=f'{decimal.Decimal(random.randrange(155, 38967))/100}')
product.save()
class Migration(migrations.Migration):
dependencies = [
('catalogue', '0001_initial'),
]
operations = [
migrations.RunPython(populate_db),
]
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 2 19:37:59 2018
@author: deepak
"""
from sklearn.preprocessing import MinMaxScaler
from flask import jsonify, make_response, request, current_app
import numpy as np
from flask import Flask
import keras
from keras.models import model_from_json
app = Flask(__name__)
@app.route('/direct',methods=['GET','OPTIONS'])
def index():
try:
c = np.load('test3.npy')
c.reshape(1,60,1)
c=c.reshape(60)
c
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
loaded_model.load_weights("model.h5")
k=loaded_model.predict(c.reshape(1,60,1))[0][0]
new_stock=[]
for i in range(1,len(c)):
new_stock.append(c[i])
new_stock.append(float(k))
new_stock=np.array(new_stock)
np.save('test3.npy', new_stock.reshape(1,60,1))
return jsonify({'result':str(k),'results':'sucess'})
except Exception,e:
return e
# load weights into new model
if __name__ == '__main__':
app.run(debug=True,port=8011) |
import sims4.log
from contextlib import contextmanager
logger = sims4.log.Logger('Profiler')
if __profile__:
import _profiler
begin_scope = _profiler.begin_scope
end_scope = _profiler.end_scope
enable_profiler = _profiler.begin
disable_profiler = _profiler.end
flush = _profiler.flush
else:
def enable_profiler(*args, **kwargs):
logger.error('__profile__ is not set. Did you forget to pass in the command line argument?')
def disable_profiler(*args, **kwargs):
pass
def begin_scope(*args, **kwargs):
logger.error('__profile__ is not set. Did you forget to pass in the command line argument?')
def begin_scope(*args, **kwargs):
logger.error('__profile__ is not set. Did you forget to pass in the command line argument?')
@contextmanager
def scope(name):
if __profile__:
sims4.profiler.begin_scope(name)
try:
yield None
finally:
if __profile__:
sims4.profiler.end_scope()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""--- Day 1: Not Quite Lisp ---
Santa was hoping for a white Christmas, but his weather machine's "snow"
function is powered by stars, and he's fresh out! To save Christmas, he needs
you to collect fifty stars by December 25th.
Collect stars by helping Santa solve puzzles. Two puzzles will be made available
on each day in the advent calendar; the second puzzle is unlocked when you
complete the first. Each puzzle grants one star. Good luck!
Here's an easy puzzle to warm you up.
Santa is trying to deliver presents in a large apartment building, but he can't
find the right floor - the directions he got are a little confusing. He starts
on the ground floor (floor 0) and then follows the instructions one character
at a time.
An opening parenthesis, (, means he should go up one floor, and a closing
parenthesis, ), means he should go down one floor.
The apartment building is very tall, and the basement is very deep; he will
never find the top or bottom floors.
For example:
(()) and ()() both result in floor 0.
((( and (()(()( both result in floor 3.
))((((( also results in floor 3.
()) and ))( both result in floor -1 (the first basement level).
))) and )())()) both result in floor -3.
To what floor do the instructions take Santa?
--- Part Two ---
Now, given the same instructions, find the position of the first character that
causes him to enter the basement (floor -1). The first character in the
instructions has position 1, the second character has position 2, and so on.
For example:
) causes him to enter the basement at character position 1.
()()) causes him to enter the basement at character position 5.
What is the position of the character that causes Santa to first enter the
basement?
"""
import collections
import sys
import click
def get_floor_number(text):
"""Returns the destination floor number."""
counter = collections.Counter(text)
return counter.get('(', 0)-counter.get(')', 0)
def get_first_negative_floor(text):
"""Returns the position where # of ) > # of ("""
negative_floor = None
floor = 0
counter = collections.Counter('')
for char in text:
if char in '()':
floor += 1
counter.update(char)
if counter.get('(', 0)-counter.get(')', 0) < 0:
negative_floor = floor
break
return negative_floor
def calculate_solution_1(data):
return get_floor_number(data)
def calculate_solution_2(data):
return get_first_negative_floor(data)
@click.command()
@click.option('--source_file', default='data/01.txt',
help='source data file for problem')
def main(source_file):
"""Simple solution to adventofcode problem 1."""
data = ''
with open(source_file) as source:
data = source.read()
print('Destination floor is {}.'.format(get_floor_number(data)))
print('First negative floor is {}'.format(get_first_negative_floor(data)))
if __name__ == "__main__":
sys.exit(main())
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from io import StringIO
import ldif
def get_gluevalidator_summary(r):
"""Returns a dictionary with the (errors, warnings, info) counters."""
d = {}
for line in r.split('\n'):
m = re.search("(?<=\|).+", line)
if m:
d = dict([elem.strip().split('=')
for elem in m.group(0).split(';')])
break
return d
def ldifize(ldap_result):
"""Writes ldap's query result in LDIF format to the given file."""
out = StringIO.StringIO()
for dn, attrs in ldap_result:
ldif_writer = ldif.LDIFWriter(out)
ldif_writer.unparse(dn, attrs)
return out.getvalue()
def validate_version(v):
try:
return re.search("^\w+(\.[\w-]+)+$", v).group(0)
except AttributeError:
return False
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^$', 'play.views.webplay', name='web_play'),
url(r'^admin/', include(admin.site.urls)),
(r'^download/pins/', 'pins.views.download_pins'),
url(r'^traffic/$', 'traffiq.views.report'),
url(r'^emmob/$', 'emmob.views.report'),
url(r'^map/$', 'traffiq.views.map'),
url(r'^markers/$', 'traffiq.views.get_markers'),
)
urlpatterns += patterns(
'django.contrib.auth.views',
url(r'^accounts/login/$', 'login', name='site_login'),
url(r'^accounts/logout/$', 'logout',
{'next_page': '/'}, name='site_logout'),
)
|
#!/usr/bin/python
"""Report authorization information."""
import datetime as dt
import argparse
import json
import wrapper
import os
_KEY = "->"
_NA = "n/a"
_DENY = "denied"
def _new_key(user, mac):
"""Create a key."""
return "{}{}{}".format(user, _KEY, mac)
def _file(day_offset, auth_info, logs):
"""Read a file."""
uuid_log = {}
file_name = os.path.join(logs, "trace.log.{}".format(day_offset))
if not os.path.exists(file_name):
print("{} does not exist".format(file_name))
return
with open(file_name, 'r') as f:
for l in f:
parts = l.split("->")
uuid = parts[0].split(":")[3].strip()
data = parts[1]
is_accept = "Tunnel-Type" in data
is_response = "('Response', 2)" in data
if is_accept or is_response:
if uuid in uuid_log:
user = uuid_log[uuid]
auth_cur = auth_info[user]
if auth_cur != day_offset:
auth_info[user] = day_offset
if is_accept:
auth_info[user] += "?"
else:
if "User-Name" in data:
idx = data.index("User-Name") + 13
user_start = data[idx:]
user_start = user_start[:user_start.index(")") - 1]
calling = None
if "Calling-Station-Id" in data:
calling_station = data.index("Calling-Station-Id") + 22
calling = data[calling_station:]
calling = calling[:calling.index(")") - 1]
calling = calling.replace(":",
"").replace("-",
"").lower()
key = _new_key(user_start, calling)
uuid_log[uuid] = key
if key not in auth_info:
auth_info[key] = "{} ({})".format(_DENY,
day_offset)
def main():
"""Accept/reject reporting."""
parser = argparse.ArgumentParser()
parser.add_argument("--days", type=int, default=10)
parser.add_argument("--config",
type=str,
default="/etc/raddb/mods-config/python/network.json")
parser.add_argument("--output",
type=str,
default=None)
parser.add_argument("--logs",
type=str,
default="/var/log/radius/freepydius")
args = parser.parse_args()
config = None
authd = {}
with open(args.config) as f:
j = json.loads(f.read())
users = j[wrapper.USERS]
for u in users:
for m in users[u][wrapper.MACS]:
k = _new_key(u, m)
authd[k] = _NA
today = dt.date.today()
for x in reversed(range(1, args.days + 1)):
_file("{}".format(today - dt.timedelta(days=x)), authd, args.logs)
lines = []
lines.append("| user | mac | last |")
lines.append("| --- | --- | --- |")
denied = []
cruft = []
rest = []
for item in sorted(authd.keys()):
val = authd[item]
if _NA in val:
cruft.append(item)
elif _DENY in val:
denied.append(item)
else:
rest.append(item)
for item in denied + cruft + rest:
on = authd[item]
parts = item.split(_KEY)
if on is None:
on = ""
lines.append("| {} | {} | {} |".format(parts[0], parts[1], on))
if args.output is None:
for l in lines:
print(l)
else:
with open(args.output, 'w') as f:
f.write("\n".join(lines))
if __name__ == "__main__":
main()
|
from PIL import Image
from core.NST import NST
from core.preprocessor import Preprocessor
##### hyperparameters #####
no_iter = 50
alpha = 0.2
beta = 0.8
def setup_nst() -> NST:
NST.initialize('imagenet-vgg-verydeep-19.mat')
NST.set_cost_weights(alpha=alpha, beta=beta)
def nst_handler(c_image: Image, s_image: Image) -> Image:
processed_c = Preprocessor.transform(c_image)
processed_s = Preprocessor.transform(s_image)
g_img, _ = NST.generate(processed_c, processed_s, no_iter=no_iter)
return Preprocessor.post_process(g_img) |
import os
import sys
import glob
VALID_TAGS = tuple('natural caption blank_line attribute source_header block_header code anchor image_link'.split() +
'block_start block_end code_start code_end natural_start natural_end'.split() +
['heading{}'.format(i) for i in range(1, 7)])
INCLUDE_TAGS = ('natural', 'caption', 'heading1', 'heading2', 'heading3', 'heading4', 'heading5')
def get_lines(file_path):
r""" Retrieve text lines from the manuscript Chapter*.asc and Appendix*.asc files
Args:
file_path (str): Path to directory containing manuscript asciidoc files
i.e.: /Users/cole-home/repos/nlpinaction/manuscript/
Returns:
list of lists of str, one list for each Chapter or Appendix
"""
path = os.path.join(file_path, 'Chapter*')
files = glob.glob(path)
lines = []
for file in files:
with open(file, 'r') as f:
lines.append(f.readlines())
path = os.path.join(file_path, 'Appendix*')
files = glob.glob(path)
for file in files:
with open(file, 'r') as f:
lines.append(f.readlines())
return lines
def tag_lines(lines):
r""" Naively tags lines from manuscript with: code, natural, heading, etc.
Returns:
list of tuples [(tag, line), ...]
>>> VALID_TAGS
('natural',
'caption',
'blank_line',
'attribute',
'source_header',
'block_header',
'code',
'anchor',
'image_link',
'block_start',
'block_end',
'code_start',
'code_end',
'natural_start',
'natural_end',
'heading1',
'heading2',
'heading3',
'heading4',
'heading5',
'heading6')
>>> tag_lines('|= Title| :chapter: 0|Hello|cruel world|==Heading Level 2| \t| [source,bash]|====|$ grep this|====|'.split('|'))
[('blank_line', ''),
('heading1', '= Title'),
('attribute', ':chapter: 0'),
('natural', 'Hello'),
('natural', 'cruel world'),
('heading2', '==Heading Level 2'),
('blank_line', ''),
('source_header', '[source,bash]'),
('block_start', '===='),
('code', '$ grep this'),
('block_end', '===='),
('blank_line', '')]
"""
current_block_type = None
open_block = False
block_terminator = None
block_start = 0
tup_lines = []
for idx, line in enumerate(lines):
normalized_line = line.lower().strip().replace(" ", "")
if not normalized_line:
tag = 'blank_line'
elif normalized_line[0] in r'/:':
tag = 'attribute'
elif normalized_line.startswith('[source'):
current_block_type = 'code'
block_start = idx
open_block = True
tag = 'source_header'
elif normalized_line[:4] in ('[tip', '[not', '[imp', '[quo'):
current_block_type = 'natural'
block_start = idx
open_block = True
tag = 'block_header'
elif open_block and idx == block_start + 1:
if not normalized_line.startswith('--') and not normalized_line.startswith('=='):
block_terminator = '\n'
tag = current_block_type
else:
block_terminator = normalized_line[:2]
tag = (current_block_type or 'block') + '_start'
elif open_block and normalized_line[:2] == block_terminator:
current_block_type = None
open_block = False
block_terminator = None
block_start = 0
tag = (current_block_type or 'block') + '_end'
elif open_block and current_block_type == 'code':
tag = 'code'
elif normalized_line.startswith('='):
tag = 'heading'
tag += str(len([c for c in normalized_line if c == '=']))
elif normalized_line.startswith('.'):
tag = 'caption'
elif normalized_line.startswith('image:'):
tag = 'image_link'
elif normalized_line.startswith('[['):
tag = 'anchor'
else:
tag = 'natural'
current_block_type = None
tup_lines.append((tag, line.strip()))
return tup_lines
def main(book_dir='.',
include_tags=INCLUDE_TAGS,
verbose=True):
sections = [tag_lines(section) for section in get_lines(book_dir)]
if verbose:
for section in sections:
for line in section:
if line[0] in include_tags:
print(line[1])
return sections
if __name__ == '__main__':
args = sys.argv[1:]
book_dir = os.path.curdir
if args:
book_dir = args[0]
include_tags = ['natural']
if len(args) > 1:
include_tags = list(args[1:])
# print('Parsing Chapters and Appendices in: ' + book_dir)
# print('***PRINTING LINES WITH TAGS***: ' + str(include_tags))
main(book_dir=book_dir, include_tags=include_tags, verbose=True)
|
import cv2
import mediapipe as mp
import time
import math
import numpy as np
class poseDetector():
def __init__(self, mode = False, upBody =False, smooth = True, detectionCon= 0.5, trackCon = 0.5):
self.mode = mode
self.upBody = upBody
self.smooth = smooth
self.detectionCon = detectionCon
self.trackCon = trackCon
self.mpDraw = mp.solutions.drawing_utils
self.mpPose = mp.solutions.pose
self.pose = self.mpPose.Pose(self.mode, self.upBody, self.smooth, self.detectionCon, self.trackCon)
def findPose(self, img, draw = True):
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.results = self.pose.process(imgRGB)
if self.results.pose_landmarks:
if draw:
self.mpDraw.draw_landmarks(img, self.results.pose_landmarks, self.mpPose.POSE_CONNECTIONS)
return (img)
def findPosition(self, img, draw = True):
self.lmList = []
if self.results.pose_landmarks:
for id, lm in enumerate(self.results.pose_landmarks.landmark):
h, w, c = img.shape
# print(id, lm)
cx, cy = int(lm.x*w), int(lm.y*h)
self.lmList.append([id, cx, cy])
if draw:
cv2.circle(img, (cx, cy), 5, (255, 0, 0), cv2.FILLED)
return(self.lmList)
def findAngle(self, img, p1, p2, p3, draw = True):
#Get the landmarks
x1, y1 = self.lmList[p1][1:]
x2, y2 = self.lmList[p2][1:]
x3, y3 = self.lmList[p3][1:]
# Calculate the Angle
angle = math.degrees(math.atan2(y3 - y2, x3 -x2) - math.atan2(y1-y2 , x1 -x2))
if angle < 0:
angle += 0
angle1 = angle
if angle > 299:
angle += -260
angle1 = angle
# print(angle)
#Draw
if draw:
cv2.line(img, (x1, y1), (x2, y2), (255,255,255), 3)
cv2.line(img, (x3, y3), (x2, y2), (255,255,255), 3)
cv2.circle(img, (x1, y1), 10, (255, 0, 0), cv2.FILLED)
cv2.circle(img, (x1, y1), 15, (255, 0, 0), 2)
cv2.circle(img, (x2, y2), 10, (255, 0, 0), cv2.FILLED)
cv2.circle(img, (x2, y2), 15, (255, 0, 0), 2)
cv2.circle(img, (x3, y3), 10, (255, 0, 0), cv2.FILLED)
cv2.circle(img, (x3, y3), 15, (255, 0, 0), 2)
cv2.putText(img, str(int(angle)), (x2 - 20, y2 +50), cv2.FONT_HERSHEY_PLAIN, 2.5, (0, 0, 255), 2 )
return angle
def main():
cap = cv2.VideoCapture('../Pose_Estimate/1.mp4')
pTime = 0
detector = poseDetector()
while True:
success, img = cap.read()
img = detector.findPose(img)
lmList = detector.findPosition(img)
# print(lmList)
cTime = time.time()
fps = 1/(cTime - pTime)
pTime = cTime
cv2.putText(img, str(int(fps)), (70 ,50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0),3)
cv2.imshow("Image", img)
cv2.waitKey(10)
if __name__ == "__main__":
main() |
from unittest import TestCase
from task_1 import quick_sort
""" Only look at the cases with numbers. Strings, Tuples etc. can be ignored.
Cases that have to be tested are:
- Empty list - should return a empty list
- single values - should return a list with the same single value
- unsorted values - should return a list with sorted values
- None
- List of Floats and integers"""
class Task1Test(TestCase):
"""
Task 1: Quicksort
"""
def test_empty_list(self):
# Tests if a sorted empty list is returned as empty list
self.assertEquals([], quick_sort([]),"A empty list must return empty list")
def test_none_list(self):
# Tests if a list containing None returns None
self.assertEquals([None], quick_sort([None]), "A list containing nothing should return a list containing nothing!")
def test_returns_sorted_list(self):
# Test if a unsorted list is returned sorted
self.assertTrue(quick_sort([6,7,4,3,2])==sorted([6,7,4,3,2], reversed=True), "Your code does not return a sorted list.")
def test_single_value_list(self):
# Tests if a list with one value is returned unchanged
self.assertEquals(quick_sort(self))
if __name__ == '__main__':
unittest.main()
if __name__ == "__main__":
assert quick_sort([-1,2,-3,4,-1,4,5]) == [-3,-1,-1,2,4,4,5]
assert quick_sort([2,2,2]) == [2,2,2]
assert quick_sort([2.1, 4.3, 1.2, 3,5,2.1]) == [1.2,2.1,2.1,3.5,4.3]
|
import argparse
import re
import os
import wget
def main():
p = argparse.ArgumentParser(description="Script using to looking for extended files")
p.add_argument("--log_name", help="name files where is extended files", dest="log_name", required=True)
p.add_argument("--directory", help="name folder where will be downloading files", dest="directory", required=True)
args = p.parse_args()
log = args.log_name
direct = args.directory
look_for(log, direct)
def look_for(log, direct):
list = []
with open(log, 'r') as f1:
directory = os.mkdir(direct)
os.chdir(direct)
direct = os.getcwd()
for line in f1.readlines():
list.append([line])
for i in line.split(","):
list[-1].append(i)
print(i)
give_me_url(i, direct)
def give_me_url(i, direct):
urls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', i)
listToString = ' '.join([str(elem) for elem in urls])
print(listToString)
download(listToString, direct)
def download(listToString, direct):
if (".rpm" in listToString):
wget.download(listToString, direct)
main()
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class DescribeInstanceTypesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'DescribeInstanceTypes','ecs')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_GPUSpec(self): # String
return self.get_query_params().get('GPUSpec')
def set_GPUSpec(self, GPUSpec): # String
self.add_query_param('GPUSpec', GPUSpec)
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_MaximumCpuCoreCount(self): # Integer
return self.get_query_params().get('MaximumCpuCoreCount')
def set_MaximumCpuCoreCount(self, MaximumCpuCoreCount): # Integer
self.add_query_param('MaximumCpuCoreCount', MaximumCpuCoreCount)
def get_MaximumGPUAmount(self): # Integer
return self.get_query_params().get('MaximumGPUAmount')
def set_MaximumGPUAmount(self, MaximumGPUAmount): # Integer
self.add_query_param('MaximumGPUAmount', MaximumGPUAmount)
def get_LocalStorageCategory(self): # String
return self.get_query_params().get('LocalStorageCategory')
def set_LocalStorageCategory(self, LocalStorageCategory): # String
self.add_query_param('LocalStorageCategory', LocalStorageCategory)
def get_MaximumMemorySize(self): # Float
return self.get_query_params().get('MaximumMemorySize')
def set_MaximumMemorySize(self, MaximumMemorySize): # Float
self.add_query_param('MaximumMemorySize', MaximumMemorySize)
def get_InstanceCategory(self): # String
return self.get_query_params().get('InstanceCategory')
def set_InstanceCategory(self, InstanceCategory): # String
self.add_query_param('InstanceCategory', InstanceCategory)
def get_MinimumInstancePpsTx(self): # Long
return self.get_query_params().get('MinimumInstancePpsTx')
def set_MinimumInstancePpsTx(self, MinimumInstancePpsTx): # Long
self.add_query_param('MinimumInstancePpsTx', MinimumInstancePpsTx)
def get_MinimumCpuCoreCount(self): # Integer
return self.get_query_params().get('MinimumCpuCoreCount')
def set_MinimumCpuCoreCount(self, MinimumCpuCoreCount): # Integer
self.add_query_param('MinimumCpuCoreCount', MinimumCpuCoreCount)
def get_MinimumPrimaryEniQueueNumber(self): # Integer
return self.get_query_params().get('MinimumPrimaryEniQueueNumber')
def set_MinimumPrimaryEniQueueNumber(self, MinimumPrimaryEniQueueNumber): # Integer
self.add_query_param('MinimumPrimaryEniQueueNumber', MinimumPrimaryEniQueueNumber)
def get_MinimumBaselineCredit(self): # Integer
return self.get_query_params().get('MinimumBaselineCredit')
def set_MinimumBaselineCredit(self, MinimumBaselineCredit): # Integer
self.add_query_param('MinimumBaselineCredit', MinimumBaselineCredit)
def get_MinimumSecondaryEniQueueNumber(self): # Integer
return self.get_query_params().get('MinimumSecondaryEniQueueNumber')
def set_MinimumSecondaryEniQueueNumber(self, MinimumSecondaryEniQueueNumber): # Integer
self.add_query_param('MinimumSecondaryEniQueueNumber', MinimumSecondaryEniQueueNumber)
def get_MinimumInstanceBandwidthTx(self): # Integer
return self.get_query_params().get('MinimumInstanceBandwidthTx')
def set_MinimumInstanceBandwidthTx(self, MinimumInstanceBandwidthTx): # Integer
self.add_query_param('MinimumInstanceBandwidthTx', MinimumInstanceBandwidthTx)
def get_MinimumGPUAmount(self): # Integer
return self.get_query_params().get('MinimumGPUAmount')
def set_MinimumGPUAmount(self, MinimumGPUAmount): # Integer
self.add_query_param('MinimumGPUAmount', MinimumGPUAmount)
def get_MaximumCpuSpeedFrequency(self): # Float
return self.get_query_params().get('MaximumCpuSpeedFrequency')
def set_MaximumCpuSpeedFrequency(self, MaximumCpuSpeedFrequency): # Float
self.add_query_param('MaximumCpuSpeedFrequency', MaximumCpuSpeedFrequency)
def get_CpuArchitecture(self): # String
return self.get_query_params().get('CpuArchitecture')
def set_CpuArchitecture(self, CpuArchitecture): # String
self.add_query_param('CpuArchitecture', CpuArchitecture)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_MinimumMemorySize(self): # Float
return self.get_query_params().get('MinimumMemorySize')
def set_MinimumMemorySize(self, MinimumMemorySize): # Float
self.add_query_param('MinimumMemorySize', MinimumMemorySize)
def get_MinimumEniQuantity(self): # Integer
return self.get_query_params().get('MinimumEniQuantity')
def set_MinimumEniQuantity(self, MinimumEniQuantity): # Integer
self.add_query_param('MinimumEniQuantity', MinimumEniQuantity)
def get_InstanceFamilyLevel(self): # String
return self.get_query_params().get('InstanceFamilyLevel')
def set_InstanceFamilyLevel(self, InstanceFamilyLevel): # String
self.add_query_param('InstanceFamilyLevel', InstanceFamilyLevel)
def get_MinimumQueuePairNumber(self): # Integer
return self.get_query_params().get('MinimumQueuePairNumber')
def set_MinimumQueuePairNumber(self, MinimumQueuePairNumber): # Integer
self.add_query_param('MinimumQueuePairNumber', MinimumQueuePairNumber)
def get_MinimumLocalStorageAmount(self): # Integer
return self.get_query_params().get('MinimumLocalStorageAmount')
def set_MinimumLocalStorageAmount(self, MinimumLocalStorageAmount): # Integer
self.add_query_param('MinimumLocalStorageAmount', MinimumLocalStorageAmount)
def get_MaxResults(self): # Long
return self.get_query_params().get('MaxResults')
def set_MaxResults(self, MaxResults): # Long
self.add_query_param('MaxResults', MaxResults)
def get_PhysicalProcessorModel(self): # String
return self.get_query_params().get('PhysicalProcessorModel')
def set_PhysicalProcessorModel(self, PhysicalProcessorModel): # String
self.add_query_param('PhysicalProcessorModel', PhysicalProcessorModel)
def get_MaximumCpuTurboFrequency(self): # Float
return self.get_query_params().get('MaximumCpuTurboFrequency')
def set_MaximumCpuTurboFrequency(self, MaximumCpuTurboFrequency): # Float
self.add_query_param('MaximumCpuTurboFrequency', MaximumCpuTurboFrequency)
def get_InstanceTypess(self): # RepeatList
return self.get_query_params().get('InstanceTypes')
def set_InstanceTypess(self, InstanceTypes): # RepeatList
for depth1 in range(len(InstanceTypes)):
self.add_query_param('InstanceTypes.' + str(depth1 + 1), InstanceTypes[depth1])
def get_MinimumInstancePpsRx(self): # Long
return self.get_query_params().get('MinimumInstancePpsRx')
def set_MinimumInstancePpsRx(self, MinimumInstancePpsRx): # Long
self.add_query_param('MinimumInstancePpsRx', MinimumInstancePpsRx)
def get_MinimumEniIpv6AddressQuantity(self): # Integer
return self.get_query_params().get('MinimumEniIpv6AddressQuantity')
def set_MinimumEniIpv6AddressQuantity(self, MinimumEniIpv6AddressQuantity): # Integer
self.add_query_param('MinimumEniIpv6AddressQuantity', MinimumEniIpv6AddressQuantity)
def get_MinimumEriQuantity(self): # Integer
return self.get_query_params().get('MinimumEriQuantity')
def set_MinimumEriQuantity(self, MinimumEriQuantity): # Integer
self.add_query_param('MinimumEriQuantity', MinimumEriQuantity)
def get_MinimumDiskQuantity(self): # Integer
return self.get_query_params().get('MinimumDiskQuantity')
def set_MinimumDiskQuantity(self, MinimumDiskQuantity): # Integer
self.add_query_param('MinimumDiskQuantity', MinimumDiskQuantity)
def get_MinimumCpuTurboFrequency(self): # Float
return self.get_query_params().get('MinimumCpuTurboFrequency')
def set_MinimumCpuTurboFrequency(self, MinimumCpuTurboFrequency): # Float
self.add_query_param('MinimumCpuTurboFrequency', MinimumCpuTurboFrequency)
def get_NextToken(self): # String
return self.get_query_params().get('NextToken')
def set_NextToken(self, NextToken): # String
self.add_query_param('NextToken', NextToken)
def get_MinimumInstanceBandwidthRx(self): # Integer
return self.get_query_params().get('MinimumInstanceBandwidthRx')
def set_MinimumInstanceBandwidthRx(self, MinimumInstanceBandwidthRx): # Integer
self.add_query_param('MinimumInstanceBandwidthRx', MinimumInstanceBandwidthRx)
def get_MinimumCpuSpeedFrequency(self): # Float
return self.get_query_params().get('MinimumCpuSpeedFrequency')
def set_MinimumCpuSpeedFrequency(self, MinimumCpuSpeedFrequency): # Float
self.add_query_param('MinimumCpuSpeedFrequency', MinimumCpuSpeedFrequency)
def get_NvmeSupport(self): # String
return self.get_query_params().get('NvmeSupport')
def set_NvmeSupport(self, NvmeSupport): # String
self.add_query_param('NvmeSupport', NvmeSupport)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_MinimumInitialCredit(self): # Integer
return self.get_query_params().get('MinimumInitialCredit')
def set_MinimumInitialCredit(self, MinimumInitialCredit): # Integer
self.add_query_param('MinimumInitialCredit', MinimumInitialCredit)
def get_InstanceTypeFamily(self): # String
return self.get_query_params().get('InstanceTypeFamily')
def set_InstanceTypeFamily(self, InstanceTypeFamily): # String
self.add_query_param('InstanceTypeFamily', InstanceTypeFamily)
def get_MinimumEniPrivateIpAddressQuantity(self): # Integer
return self.get_query_params().get('MinimumEniPrivateIpAddressQuantity')
def set_MinimumEniPrivateIpAddressQuantity(self, MinimumEniPrivateIpAddressQuantity): # Integer
self.add_query_param('MinimumEniPrivateIpAddressQuantity', MinimumEniPrivateIpAddressQuantity)
def get_MinimumLocalStorageCapacity(self): # Long
return self.get_query_params().get('MinimumLocalStorageCapacity')
def set_MinimumLocalStorageCapacity(self, MinimumLocalStorageCapacity): # Long
self.add_query_param('MinimumLocalStorageCapacity', MinimumLocalStorageCapacity)
|
#!/usr/bin/env python
"""
iTunes Graph Parser
Parses an iTunes library XML file and generates a JSON file
for use in the D3.js JavaScript library.
Example Track info:
{
'Album': 'Nirvana',
'Persistent ID': 'A50FE1436726815C',
'Track Number': 4,
'Location': 'file://localhost/Users/foo/Music/iTunes/iTunes%20Music/Nirvana/Nirvana/04%20Sliver.mp3',
'File Folder Count': 4,
'Album Rating Computed': True,
'Total Time': 134295,
'Sample Rate': 44100,
'Genre': 'Rock/Alternative',
'Bit Rate': 236,
'Kind': 'MPEG audio file',
'Name': 'Sliver',
'Artist': 'Nirvana',
'Date Added': datetime.datetime(2006, 10, 11, 4, 31, 38),
'Album Rating': 60,
'Rating': 40,
'Date Modified': datetime.datetime(2009, 7, 18, 4, 57, 41),
'Library Folder Count': 1,
'Year': 2002,
'Track ID': 7459,
'Size': 3972838,
'Track Type': 'File',
'Play Count': 2,
'Play Date UTC': datetime.datetime(2009, 7, 18, 5, 00, 00)
}
"""
from __future__ import division
from optparse import OptionParser
from operator import itemgetter
import os
import io
import plistlib
import json
class SetEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
return json.JSONEncoder.default(self, obj)
class ITunesGraphParser:
def __init__(self, libraryFile):
self.libraryFile = libraryFile
def toJson(self, rating=4, indent=None):
self._rating = rating * 20
self._maxArtistSongs = 0
self._maxArtistPlays = 0
self._maxGenreSongs = 0
self._maxGenrePlays = 0
self._processArtists()
self._processGenres()
self._processNodes()
jsonObj = {
'nodes': self._nodes,
'links': self._links,
'maxArtistSongs': self._maxArtistSongs,
'maxArtistPlays': self._maxArtistPlays,
'maxGenreSongs': self._maxGenreSongs,
'maxGenrePlays': self._maxGenrePlays
}
return json.dumps(jsonObj, indent=indent, cls=SetEncoder)
def toJsonP(self, rating=4, indent=None):
json = self.toJson(rating, indent)
jsonp = ';itgCallback(' + json + ');'
return jsonp
def _readTracks(self):
pl = plistlib.readPlist(self.libraryFile)
return pl['Tracks']
def _processArtists(self):
tracks = self._readTracks()
self._artists = {}
for k in tracks:
track = tracks[k]
# Filter out any non-music with ratings lower than 3 stars
if (track['Track Type'] != 'File') or ('Artist' not in track) or ('Genre' not in track) or (
'Rating' not in track) or (track['Rating'] < self._rating) or (track['Artist'] == 'Various Artists'):
continue
akey = track['Artist']
if akey not in self._artists:
self._artists[akey] = {
'id': len(self._artists),
'name': akey,
'type': 'a', 'count': 0, 'plays': 0, 'rating': 0,
'genres': set()
}
rating = (track['Rating'] // 20)
plays = track['Play Count'] if 'Play Count' in track else 0
self._artists[akey]['count'] += 1
self._artists[akey]['rating'] += rating
self._artists[akey]['plays'] += plays
self._maxArtistSongs = max(self._maxArtistSongs, self._artists[akey]['count'])
self._maxArtistPlays = max(self._maxArtistPlays, self._artists[akey]['plays'])
# Split up the Genres
genreParts = track['Genre'].split('/')
self._artists[akey]['genres'] |= set(genreParts)
def _processGenres(self):
self._genres = {}
for akey in self._artists.keys():
# Filter out any one-hit wonders
if self._artists[akey]['count'] <= 2:
del self._artists[akey]
continue
genreParts = self._artists[akey]['genres']
for gkey in list(genreParts):
if gkey == 'Mix':
genreParts.remove(gkey)
continue
if gkey not in self._genres:
self._genres[gkey] = {
'id': len(self._genres),
'name': gkey,
'type': 'g', 'count': 0, 'plays': 0, 'rating': 0,
'adjGenres': set()
}
self._genres[gkey]['count'] += self._artists[akey]['count']
self._genres[gkey]['rating'] += self._artists[akey]['rating']
self._genres[gkey]['plays'] += self._artists[akey]['plays']
self._maxGenreSongs = max(self._maxGenreSongs, self._genres[gkey]['count'])
self._maxGenrePlays = max(self._maxGenrePlays, self._genres[gkey]['plays'])
# Add adjacencies between genre parts
for gkey in genreParts:
for gkey2 in genreParts:
if gkey != gkey2:
self._genres[gkey]['adjGenres'].add(gkey2)
def _processNodes(self):
self._links = []
self._nodes = sorted(self._genres.itervalues(), key=itemgetter('id'))
for idx, genre in enumerate(self._nodes):
#for gid in genre['adjGenres']:
#self._links.append({ 'source': gid, 'target': idx })
del genre['adjGenres']
idx = len(self._nodes);
for akey in self._artists.keys():
self._nodes.append(self._artists[akey])
for g in self._artists[akey]['genres']:
self._links.append({ 'source': idx, 'target': self._genres[g]['id'] })
idx += 1
#### main block ####
defaultLibraryFile = os.path.expanduser('~/Music/iTunes/iTunes Music Library.xml')
defaultOutputFile = os.path.dirname(os.path.realpath(__file__)) + '/js/music-data.json'
parser = OptionParser(version="%prog 1.0")
parser.add_option('-f', '--file', dest='file', type='string',
help='iTunes Library XML file path',
default=defaultLibraryFile)
parser.add_option('-o', '--output', dest='output', type='string',
help='Output to file (default=./js/music-data.json)',
default=defaultOutputFile)
parser.add_option('-c', '--console', dest='console', action='store_true',
help='Output to console instead of file')
parser.add_option('-r', '--rating', dest='rating', type='int',
help='Minimum rating filter (default = 4)',
default=4)
parser.add_option('-p', '--jsonp', dest='jsonp', action='store_true',
help='Output in JSON-P format')
parser.add_option('-i', '--indent', dest='indent', type='int',
help='Indent level for output format (default=None)')
parser.add_option('-v', '--verbose', dest='verbose', action='store_true',
help='Verbose output')
if __name__ == '__main__':
(options, args) = parser.parse_args()
itunesParser = ITunesGraphParser(options.file)
if options.jsonp:
output = itunesParser.toJsonP(options.rating, options.indent)
else:
output = itunesParser.toJson(options.rating, options.indent)
if options.console:
print output
else:
with io.open(options.output, 'wb') as outfile:
outfile.write(output)
print "JSON data written to: " + options.output
|
# -*- encoding: utf-8 -*-
from setuptools import setup
from setuptools import find_packages
from os.path import join, dirname
import threebot_worker as app
def long_description():
try:
return open(join(dirname(__file__), 'README.md')).read()
except IOError:
return "LONG_DESCRIPTION Error"
setup(
name="threebot-worker",
version=app.__version__,
description="Worker scripts for the 3bot plattform.",
long_description=long_description(),
author='arteria GmbH',
author_email="admin@arteria.ch",
maintainer_email="admin@arteria.ch",
url="https://github.com/3bot/3bot-worker",
packages=find_packages(),
include_package_data=True,
install_requires=open('requirements.txt').read().split('\n'),
scripts=['threebot_worker/threebot-worker'],
)
|
from flask_script import Manager
from flask import url_for
from fooApp.app import app
manager = Manager(app)
app.config['DEBUG'] = True # Ensure debugger will load.
if __name__ == '__main__':
manager.run() |
print("Enter an integer for X and Y to get the Harmonic and Arithmetic mean")
x = int(input("Enter x: "))
y = int(input("Enter y: "))
h = 2/(1/x + 1/y)
a = (x+y)/2
print(f"\nArithmetic mean: {a}")
print(f"Harmonic mean: {h}") |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat May 16 04:14:45 2020
@author: antonio
UTILS
"""
import numpy as np
from random import randrange
def dist_left(snake_head, thing, dist_wall_left, wall_right):
return (snake_head[0]-thing[0]) if (snake_head[0]-thing[0]) > 0 \
else (dist_wall_left+wall_right-thing[0])
def dist_right(snake_head, thing, dist_wall_right, wall_left):
return (thing[0]-snake_head[0]) if (thing[0]-snake_head[0]) > 0 \
else (dist_wall_right+wall_left+thing[0])
def dist_up(snake_head, thing, dist_wall_up, wall_down):
return (thing[1]-snake_head[1]) if (thing[1]-snake_head[1]) > 0 \
else (dist_wall_up+wall_down+thing[1])
def dist_down(snake_head, thing, dist_wall_down, wall_up):
return (snake_head[1]-thing[1]) if (snake_head[1]-thing[1]) > 0 \
else (dist_wall_down+wall_up-thing[1])
def calculate_dist(snake_pos, food, wall_left, wall_right, wall_up, wall_down):
dist_wall_left = abs(snake_pos[0][0] - wall_left)
dist_wall_right = abs(snake_pos[0][0] - wall_right)
dist_wall_up = abs(snake_pos[0][1] - wall_up)
dist_wall_down = abs(snake_pos[0][1] - wall_down)
dist_food_left = dist_left(snake_pos[0], food, dist_wall_left, wall_right)
dist_food_right = dist_right(snake_pos[0], food, dist_wall_right, wall_left)
dist_food_up = dist_up(snake_pos[0], food, dist_wall_up, wall_down)
dist_food_down = dist_down(snake_pos[0], food, dist_wall_down, wall_up)
dist_body_left = min(dist_left(snake_pos[0], x, dist_wall_left, wall_right)
for x in snake_pos[1:])
dist_body_right = min(dist_right(snake_pos[0], x, dist_wall_right, wall_left)
for x in snake_pos[1:])
dist_body_up = min(dist_up(snake_pos[0], x, dist_wall_up, wall_down)
for x in snake_pos[1:])
dist_body_down = min(dist_down(snake_pos[0], x, dist_wall_down, wall_up)
for x in snake_pos[1:])
inputs = [dist_wall_left, dist_wall_right, dist_wall_up, dist_wall_down,
dist_food_left, dist_food_right, dist_food_up, dist_food_down,
dist_body_left, dist_body_right, dist_body_up, dist_body_down]
return inputs
def update_snake_pos(snake, _dir):
head_tail = [snake[0][0] - snake[1][0], snake[0][1] - snake[1][1]]
if head_tail == [1,0]:
head_tail_direction = 0
elif head_tail == [-1,0]:
head_tail_direction = 1
elif head_tail == [0,-1]:
head_tail_direction = 2
elif head_tail == [0, 1]:
head_tail_direction = 3
if head_tail_direction == _dir:
# Cambiar la dirección
snake = list(reversed(snake))
if _dir == 0: #left
snake_head = [snake[0][0] - 1, snake[0][1]]
snake_updated = [snake_head] + snake[0:-1]
elif _dir == 1: # right
snake_head = [snake[0][0] + 1, snake[0][1]]
snake_updated = [snake_head] + snake[0:-1]
elif _dir == 2: #up
snake_head = [snake[0][0], snake[0][1] + 1]
snake_updated = [snake_head] + snake[0:-1]
elif _dir == 3: # down
snake_head = [snake[0][0], snake[0][1] - 1]
snake_updated = [snake_head] + snake[0:-1]
return snake_updated, head_tail_direction
def grow_snake(snake, head_tail_direction):
snake.append(snake[-1])
if head_tail_direction == 0:
snake[-1][0] = snake[-1][0] - 1
elif head_tail_direction == 1:
snake[-1][0] = snake[-1][0] + 1
elif head_tail_direction == 2:
snake[-1][1] = snake[-1][1] + 1
elif head_tail_direction == 3:
snake[-1][1] = snake[-1][1] - 1
return snake
def grow_fruit(snake):
valid = False
while valid==False:
food = [randrange(20), randrange(20)]
if food not in snake:
valid = True
return food
def decide_movement(model, inputs):
data = np.array(inputs)
data = np.reshape(data, (1,12))
return model.predict(data).argmax(-1)
|
# _*_ conding: utf8 _*_
import random
total = 1000000
doors = [1, 2, 3]
wins = 0
for x in range(total):
binggo = random.choice(doors)
hoost = random.choice(doors)
if hoost == binggo:
wins += 1
print "wins gailv for stay: %.5f%%" % (wins/float(total)*100)
wins_2 = 0
for x in range(total):
binggo = random.choice(doors)
hoost = random.choice(doors)
doors_2 = doors[:]
doors_2.remove(hoost)
swicth = random.choice(doors_2)
if swicth == binggo:
wins_2 += 1
print "wins gailv for swicth: %.5f%%" % (wins_2/float(total)*100)
stay_gailv = wins/float(total)
swicth_gailv = wins_2/float(total)
if stay_gailv > swicth_gailv:
print "stay > swicth"
else:
print "swicth > stay"
|
# -*- coding: utf-8 -*-
#导入包
from appium import webdriver
#前置代码
desired_caps = {}
desired_caps['platformName'] = 'Android'
desired_caps['platformVersion'] = '5.1'
desired_caps['deviceName'] = '192.168.56.101:5555'
desired_caps['appPackage'] = 'com.android.settings'
desired_caps['appActivity'] = '.Settings'
desired_caps['unicodeKeyboard'] = True
desired_caps['resetKeyboard'] = True
#声明对象
driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', desired_caps)
#通过xpath定位“设置”
setting_bottom = driver.find_element_by_xpath("//*[contains(@text,'设置')]")
#点击设置按钮
setting_bottom.click()
#获取文本信息
text_name = driver.find_elements_by_class("android.widget.TextView")
for i in text_name:
if i.text == "关于手机":
#获取“关于手机的属性”
text_value = driver.find_element_by_xpath("//*[contains(@text,'关于手机')]")
text_value.click()
text_value2 = driver.find_elements_by_class("android.widget.TextView")
for a in text_value2:
if a.text == "5.1":
print(a.text)
|
#!/usr/bin/python
f = open('in.txt')
i = 0
for line in f:
i += 1
if i == 1:
continue
cur = int(line)
if cur == 0:
print 'Case #' + str(i-1) + ': INSOMNIA'
continue
ps = 'Case #' + str(i-1) + ': '
mul = 1
total = cur
found = ''
while 1:
s = str(total)
total += cur
if '0' in found:
a = 1
elif '0' in s:
found += '0'
if '1' in found:
a = 1
elif '1' in s:
found += '1'
if '2' in found:
a = 1
elif '2' in s:
found += '2'
if '3' in found:
a = 1
elif '3' in s:
found += '3'
if '4' in found:
a = 1
elif '4' in s:
found += '4'
if '5' in found:
a = 1
elif '5' in s:
found += '5'
if '6' in found:
a = 1
elif '6' in s:
found += '6'
if '7' in found:
a = 1
elif '7' in s:
found += '7'
if '8' in found:
a = 1
elif '8' in s:
found += '8'
if '9' in found:
a = 1
elif '9' in s:
found += '9'
if '0' in found and '1' in found and '2' in found and '3' in found and '4' in found and '5' in found and '6' in found and '7' in found and '8' in found and '9' in found:
ps = ps + s
print ps
break
|
from django.shortcuts import render
from django.http import Http404
from rest_framework.generics import RetrieveUpdateDestroyAPIView, ListCreateAPIView, ListAPIView, RetrieveAPIView, GenericAPIView
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import status
from rest_framework import permissions
from core.models import Task
from core.modulos.task.serializers import TaskSerializer
from core.modulos.user.serializers import UserSerializer
from core.modulos.task.permissions import IsOwnerOrReadOnly
from django.contrib.auth.models import User
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework import renderers
#API ROOT
@api_view(['GET'])
def api_root(request, format=None):
return Response({
'users': reverse('users-list', request=request, format=format),
'tasks': reverse('tasks-list', request=request, format=format)
})
# class TaskList(APIView):
# def get(self, request, format=None):
# tasks = Task.objects.all()
# serializer = TaskSerializer(tasks, many=True)
# return Response(serializer.data)
# def post(self, request, format=None):
# serializer = TaskSerializer(data=request.data)
# if serializer.is_valid():
# serializer.save()
# return Response(serializer.data, status=status.HTTP_201_CREATED)
# return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class TaskList(ListCreateAPIView):
serializer_class = TaskSerializer
permission_classes = [permissions.IsAuthenticated, IsOwnerOrReadOnly]
def get_queryset(self):
return Task.objects.filter(owner=self.request.user)
def perform_create(self, serializer):
serializer.save(owner=self.request.user)
# class TaskDetail(APIView):
# """
# Retrieve, update or delete a snippet instance.
# """
# def get_object(self, pk):
# try:
# return Task.objects.get(pk=pk)
# except Task.DoesNotExist:
# raise Http404
# def get(self, request, pk, format=None):
# task = self.get_object(pk)
# serializer = TaskSerializer(task)
# return Response(serializer.data)
# def put(self, request, pk, format=None):
# task = self.get_object(pk)
# serializer = TaskSerializer(task, data=request.data)
# if serializer.is_valid():
# serializer.save()
# return Response(serializer.data)
# return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# def delete(self, request, pk, format=None):
# task = self.get_object(pk)
# task.delete()
# return Response(status=status.HTTP_204_NO_CONTENT)
class TaskDetail(RetrieveUpdateDestroyAPIView):
serializer_class = TaskSerializer
permission_classes = [permissions.IsAuthenticated, IsOwnerOrReadOnly]
def get_queryset(self):
return Task.objects.filter(owner=self.request.user)
class TaskHighlight(GenericAPIView):
queryset = Task.objects.all()
renderer_classes = [renderers.StaticHTMLRenderer]
def get(self, request, *args, **kwargs):
task = self.get_object()
return Response(task.highlighted)
#USER
class UserList(ListAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
class UserDetail(RetrieveAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer |
#!/usr/bin/python
#coding=utf-8
import sys, time
from sensetimebi_productstests.Sharedscript.ShareedSSH import SSH
from sensetimebi_productstests.Sharedscript.logger import Logger
if __name__ == '__main__':
host_ip = '10.9.40.150'
# print(host_ip)
ssh_name = 'root'
ssh_pwd = 'BI_SensePassXS#'
# print(ssh_pwd)
ssh_port = 22
logpath = sys.path[0] + '\\log.txt'
log = Logger(logpath, level='debug') # 保存脚本运行log
ssh_obj = SSH(host_ip, ssh_port, ssh_name, ssh_pwd)
ssh_obj.connects()
for i in range(1, 16):
log.logger.debug('----------test No.%s----------' % i)
ssh_obj.send_data("sed -i \"1i U disk upgrade\" /etc/product_info")
ssh_obj.send_data("cat /etc/product_info")
info = ssh_obj.get_data()
log.logger.debug(info)
flag = info.find('U disk upgrade')
# print(flag)
if flag != -1:
log.logger.debug('flag set up success!!!')
else:
log.logger.debug('flag set up fail!!!')
ssh_obj.send_data("reboot -f")
log.logger.debug('waiting upgrade...')
time.sleep(100)
ssh_obj = SSH(host_ip, ssh_port, ssh_name, ssh_pwd)
ssh_obj.connects()
ssh_obj.send_data("cat /etc/product_info")
info = ssh_obj.get_data()
log.logger.debug(info)
flag = info.find('U disk upgrade')
if flag == -1:
log.logger.debug('U disk upgrade success!!!')
else:
log.logger.debug('U disk upgrade fail!!!')
|
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
mail_content = "Hello, This is a simple mail. There is only text, no attachments are there The mail is sent using Python SMTP library. Thank You"
#The mail addresses and password
sender_address = 'dummy.upgrad@gmail.com'
sender_pass = '!Qaz2wsx'
receiver_address = 'dummy.upgrad@gmail.com'
#Setup the MIME
message = MIMEMultipart()
message['From'] = sender_address
message['To'] = receiver_address
message['Subject'] = 'A test mail sent by Python. It has an attachment.' #The subject line
#The body and the attachments for the mail
message.attach(MIMEText(mail_content, 'plain'))
try:
#Create SMTP session for sending the mail
session = smtplib.SMTP('smtp.gmail.com', 587) #use gmail with port
session.starttls() #enable security
session.login(sender_address, sender_pass) #login with mail_id and password
text = message.as_string()
session.sendmail(sender_address, receiver_address, text)
print('Mail Sent')
except Exception as e:
# Print any error messages to stdout
print(e)
finally:
session.quit() |
from sanic import Blueprint
from apis.users.UsersController import users_bp
users_group = Blueprint.group(users_bp, url_prefix='api/v1/users')
|
import os
import pygame
import GameplayConstants
class Sounds:
def __init__(self):
#explosies
soundfolder = os.path.join(os.path.dirname(__file__), 'sounds')
self.explosions = []
filenames = ["explosion_" + str(nr) + ".wav" for nr in range(10)]
for filename in filenames:
self.explosions.append(pygame.mixer.Sound(os.path.join(soundfolder, filename)))
#menugeluiden
self.soundclick = pygame.mixer.Sound(os.path.join(soundfolder, "menu", "click.wav"))
self.soundfail = pygame.mixer.Sound(os.path.join(soundfolder, "menu", "fail.wav"))
self.soundimplement = pygame.mixer.Sound(os.path.join(soundfolder, "menu", "implement.wav"))
self.soundremove = pygame.mixer.Sound(os.path.join(soundfolder, "menu", "remove.wav"))
self.soundcancel = pygame.mixer.Sound(os.path.join(soundfolder, "menu", "cancel.wav"))
#geluiden van hero
self.lasersound = pygame.mixer.Sound(os.path.join(soundfolder, 'laser.wav'))
self.pickupsound = pygame.mixer.Sound(os.path.join(soundfolder, 'goldpickup.wav'))
self.bangs = []
self.bangs.append(pygame.mixer.Sound(os.path.join(soundfolder, "bang0.ogg")))
self.bangs.append(pygame.mixer.Sound(os.path.join(soundfolder, "bang1.ogg")))
self.bangs.append(pygame.mixer.Sound(os.path.join(soundfolder, "bang2.ogg")))
self.shieldhitsound = pygame.mixer.Sound(os.path.join(soundfolder, "shieldhit.wav"))
def soundchange(self):
for sound in self.explosions:
sound.set_volume(GameplayConstants.effectsvolume / 100)
self.soundclick.set_volume(GameplayConstants.effectsvolume / 100)
self.soundfail.set_volume(GameplayConstants.effectsvolume / 100)
self.soundimplement.set_volume(GameplayConstants.effectsvolume / 100)
self.soundremove.set_volume(GameplayConstants.effectsvolume / 100)
self.soundcancel.set_volume(GameplayConstants.effectsvolume / 100)
self.lasersound.set_volume(GameplayConstants.effectsvolume / 100)
self.lasersound.set_volume(GameplayConstants.effectsvolume / 100)
self.shieldhitsound.set_volume(GameplayConstants.effectsvolume / 100)
self.pickupsound.set_volume(GameplayConstants.effectsvolume / 100)
for sound in self.bangs:
sound.set_volume(GameplayConstants.effectsvolume / 100)
def playsong(name, queue):
file = os.path.join(os.path.join(os.path.dirname(__file__), 'sounds', 'music', name))
if queue:
pygame.mixer.music.queue(file)
else:
pygame.mixer.music.load(file)
pygame.mixer.music.play(loops=0)
pygame.mixer.music.set_volume(GameplayConstants.musicvolume / 100)
sounds = Sounds()
sounds.soundchange() |
import pytest
from yandex_testing_lesson import Rectangle
def test_1():
with pytest.raises(TypeError):
Rectangle('1', 12)
def test_2():
with pytest.raises(TypeError):
Rectangle([], 12)
def test_3():
with pytest.raises(TypeError):
Rectangle(12, '1')
def test_4():
with pytest.raises(TypeError):
Rectangle(12, [])
def test_5():
with pytest.raises(ValueError):
Rectangle(12, -1)
def test_6():
with pytest.raises(ValueError):
Rectangle(-1, 12)
def test_7():
with pytest.raises(TypeError):
Rectangle(-1, '1')
def test_8():
with pytest.raises(TypeError):
Rectangle('1', -1)
def test_9():
assert Rectangle(5, 7).get_area() == 35
def test_10():
assert Rectangle(5, 7).get_perimeter() == 24 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, os, fileinput
""" A light xml parser application which is good for transforming HunToken's xml based output into plain lines,
which is necesseraly for input data form of HunPoS (part-of-speech tagging) and HunMorph (morphological analysis).
It is embedded in MorphologicalAnalysis.sh file's shell pipeline. """
def main():
# Parse incoming XML file
try:
for line in fileinput.input():
if line.startswith("<w>"):
print line.replace("<w>","").replace("</w>","").replace("\n","")
if line.startswith("</s>"):
print "thisistheending"
except:
print "ERROR at xmlparser.py file"
if __name__ == '__main__':
main()
|
# -*- coding: latin-1 -*-
import heapq as hq
from graphsearch import GraphSearch
class BestFirstSearch(GraphSearch):
def insert_border(self, node):
hq.heappush(self.border, (self.f(node), node))
def remove_border(self):
(_, node) = hq.heappop(self.border)
return node
def f(self, node):
raise NotImplementedError
def order(self, node):
return self.f(node)
class AASearch(BestFirstSearch):
def f(self, node):
return node.cost + self.problem.heuristics(node.state) |
from __future__ import division
import os, re
import string
from datetime import datetime
import logging
import logging.handlers
from collections import MutableMapping
from itertools import islice
from functools import wraps, partial
LOGDIR = os.path.join(os.path.dirname(__file__), 'logs')
def removehandlers(logger):
for handler in logger.handlers:
logger.removeHandler(handler)
handler.flush()
handler.close()
def configurelogging(logger, ch = None, fh = None, formatter = '', level = logging.DEBUG, logdir = LOGDIR, extrakeys = []):
removehandlers(logger)
fmtstring = "%(levelname)s|%(message)s|%(asctime)s"
if extrakeys:
fmtstring = fmtstring + "|" + '|'.join("%" + "(%s)s" % k for k in extrakeys)
if formatter:
fmtstring = fmtstring + "|" + formatter
_formatter = logging.Formatter(fmtstring)
if not fh:
logfile = os.path.join(logdir, logger.name + '.log')
fh = logging.handlers.RotatingFileHandler(logfile, encoding = 'utf-8')
fh.setLevel(logging.DEBUG)
fh.setFormatter(_formatter)
if not ch:
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(_formatter)
logger.setLevel(level)
logger.addHandler(fh)
logger.addHandler(ch)
return logger
def mergedicts(*dictionaries, **kwds):
result = {}
for d in dictionaries:
result.update(d)
result.update(kwds)
return result
def reversedict(dictionary):
return {v : k for k, v in dictionary.items()}
def chunker(iterable, chunksize = 675000):
_ = iter(iterable)
while True:
__ = list(islice(_, chunksize))
if not __:
raise StopIteration
yield __
def loadcontainer(func, container = dict):
def inner(*args, **kwds):
return container(func(*args, **kwds))
return inner
def attribute_generator(obj, private_attrs = False, callables_only = False, key = None):
"""Takes an object, obj (can be any object), and yields
key value pair (attribute name _> value).
Parameters:
-----------
obj : Can be a class, module, string or any other data structure/object with attributes.
[private_attrs] : Flag to additionally take private attributes. Defaults to False. bool
[callables_only] : Flag to take callable items only. Defaults to False. bool
[key] : Get a specific attribute. key will take
precedence over all flags.Defaults to None. str
"""
for name in dir(obj):
if key and key != name:
continue
elif not private_attrs and name.startswith('_'):
continue
try:
__ = getattr(obj, name)
if not callable(__) and callables_only:
continue
yield name, __
except AttributeError:
continue
attrlist = loadcontainer(attribute_generator, container = list)
attrdict = loadcontainer(attribute_generator)
def grabfunctions(obj, module, attname):
for name, func in attrlist(module, callables_only = True):
setattr(obj, name, partial(func, getattr(obj, attname)))
def textstring(func):
@wraps(func)
def inner(x, *args, **kwds):
try:
return func(x, *args, **kwds)
except (AttributeError, TypeError):
return x
return inner
def numberclean(x):
if isinstance(x, unicode):
x = str(x)
return x.translate(None, r'$=(),%*')\
.rstrip("-%s" % string.ascii_letters)
def numeric(func):
@wraps(func)
def inner(x, force = False):
try:
return func(x)
except ValueError:
try:
return func(numberclean(x))
except ValueError:
if not force:
return x
return
except TypeError:
return x
return inner
@textstring
def strip(x, *args): return x.strip(" \t%s" % ''.join(args))
@textstring
def to_single_space(x): return re.compile(r'\s{2,}').sub(' ', x)
@textstring
def remove_non_ascii(x): return ''.join(i for i in x if ord(i)<128)
def fuzzyprep(x):
"""Remove whitespace, punctuation, and non-ascii characters
from x in preparation for fuzzy text matching.
Parameters:
-----------
x : Item or string to parse. str
"""
x = remove_non_ascii(x)
if not isinstance(x, str):
x = str(x)
return ''.join(re.split(r'\s+', x\
.translate(None, string.punctuation).lower()))
@numeric
def integer(x, **kwds):
return int((x if isinstance(x, str) else str(x)).split('.')[0])
@numeric
def floating_point(x, **kwds):
return float(x)
class idict(MutableMapping):
"""A case-insensitive dict-like object.
Taken from "https://github.com/requests/requests-docs-it/blob/master/requests/structures.py"
to avoid the unecessary import. Thanks requests!
"""
def __init__(self, data = None, **kwds):
self._store = dict()
if not data:
data = {}
self.update(data, **kwds)
@staticmethod
def _lower(key):
if isinstance(key, (str, unicode)):
return key.lower()
return key
def __repr__(self):
return str(dict(self.items()))
def __setitem__(self, key, value):
self._store[self._lower(key)] = (key, value)
def __getitem__(self, key):
return self._store[self._lower(key)][1]
def __delitem__(self, key):
del self._store[self._lower(key)]
def __iter__(self):
return (casedkey for casedkey, mappedvalue in self._store.values())
def __len__(self):
return len(self._store)
def __eq__(self, other):
if isinstance(other, collections.Mapping):
return dict(self.lower_items()) == dict(idict(other).lower_items())
return NotImplemented
def copy(self):
return idict(self._store.values())
def lower_items(self):
return [(lowerkey, keyval[1]) for
(lowerkey, keyval) in self._store.items()]
class EasyInit(object):
def __init__(self, *args, **kwds):
self._kwds = {}
self.kwds = kwds
@staticmethod
def get_logger_name(obj):
return obj.__class__.__name__.lower()
@staticmethod
def add_logging_methods(obj, logger = None, extra = {}, **kwds):
if not logger:
obj._logger = logging.getLogger(EasyInit.get_logger_name(obj))
else:
obj._logger = logger
removehandlers(obj._logger)
configurelogging(obj._logger, extrakeys = extra.keys())
for level in ['info', 'debug', 'warning', 'error', 'critical']:
setattr(obj, level, lambda msg, level = level: getattr(obj._logger, level)(msg, extra = extra))
return obj
def __call__(self, func):
@wraps(func)
def inner(slf, *args, **kwds):
setuplogging = kwds.pop('setuplogging', True)
func(slf, *args, **kwds)
if setuplogging:
extra = {k : v for k, v in slf.__dict__.items() if v in args}
self.add_logging_methods(slf, extra = extra)
__ = merge_dicts(kwds, self.kwds, self._kwds)
for k, v in __.items():
if '_logging' not in k:
setattr(slf, k, v)
return inner
class GenericBase(object):
@EasyInit()
def __init__(self, setuplogging = True, *args, **kwds):
if hasattr(self, '_logger'):
self._logger.handlers = []
class Test(GenericBase):
def __init__(self, path, setuplogging = False, *args, **kwds):
self.path = path
super(Test, self).__init__(path, setuplogging = setuplogging, *args, **kwds)
|
n1=eval(input())
n2=n1
for i in range(n1):
for j in range(n1-i):
print('*',end="")
print()
|
"""Ingest the files kindly sent to me by poker"""
from __future__ import print_function
import glob
import re
import datetime
import subprocess
import os
import pytz
from pyiem.util import noaaport_text, get_dbconn
from pyiem.nws.product import TextProduct
BAD_CHARS = r"[^\n\r\001\003a-zA-Z0-9:\(\)\%\.,\s\*\-\?\|/><&$=\+\@]"
DEBUG = False
PGCONN = get_dbconn('afos')
XREF_SOURCE = {'KDSM': 'KDMX',
'KOKC': 'KOUN'}
def process(order):
""" Process this timestamp """
cursor = PGCONN.cursor()
ts = datetime.datetime.strptime(order[:6], "%y%m%d").replace(
tzinfo=pytz.utc)
base = ts - datetime.timedelta(days=2)
ceiling = ts + datetime.timedelta(days=2)
subprocess.call("tar -xzf %s" % (order, ), shell=True)
inserts = 0
deletes = 0
filesparsed = 0
bad = 0
for fn in glob.glob("%s[0-2][0-9].*" % (order[:6], )):
content = re.sub(BAD_CHARS, "",
open(fn, 'rb').read().decode('ascii', 'ignore'))
# Now we are getting closer, lets split by the delimter as we
# may have multiple products in one file!
for bulletin in content.split("\001"):
if bulletin == '':
continue
try:
bulletin = noaaport_text(bulletin)
prod = TextProduct(bulletin, utcnow=ts,
parse_segments=False)
prod.source = XREF_SOURCE.get(prod.source, prod.source)
except Exception as exp:
if DEBUG:
print('Parsing Failure %s' % (exp, ))
bad += 1
continue
if prod.valid < base or prod.valid > ceiling:
# print('Timestamp out of bounds %s %s %s' % (base, prod.valid,
# ceiling))
bad += 1
continue
table = "products_%s_%s" % (prod.valid.year,
("0712" if prod.valid.month > 6
else "0106"))
cursor.execute("""
DELETE from """ + table + """ WHERE pil = %s and
entered = %s and source = %s and data = %s
""", (prod.afos, prod.valid, prod.source, bulletin))
deletes += cursor.rowcount
cursor.execute("""INSERT into """+table+"""
(data, pil, entered, source, wmo) values (%s,%s,%s,%s,%s)
""", (bulletin, prod.afos, prod.valid, prod.source, prod.wmo))
inserts += 1
os.unlink(fn)
filesparsed += 1
print(("%s Files Parsed: %s Inserts: %s Deletes: %s Bad: %s"
) % (order, filesparsed, inserts, deletes, bad))
cursor.close()
PGCONN.commit()
def main():
""" Go Main Go """
os.chdir("/mesonet/tmp/poker")
for order in glob.glob("??????.DDPLUS.tar.gz"):
process(order)
if __name__ == '__main__':
# do something
main()
|
from app import Application
from tkinter import *
#Driver
def main():
root = Tk()
root.title("Stock Tracker")
root.geometry("340x540")
app = Application(root)
root.mainloop()
main()
|
from turtle import *
from random import randint
from sys import exit
door = randint(0,1)
def getPosition():
x,y = position()
h = heading()
return (int(round(x,1)),int(round(y,1)),int(round(h/10))*10)
def isDoorOpened():
return door
def drawRoom():
forward(300)
left(90)
forward(200)
right(90)
forward(10)
left(90)
if isDoorOpened():
penup()
forward(20)
pendown()
else:
forward(20)
left(90)
forward(10)
right(90)
forward(80)
left(90)
forward(300)
left(90)
forward(300)
left(90)
def crash(s):
write("crash: " + s)
exitonclick()
exit(0)
def victory():
write("victory")
exitonclick()
exit(0)
def amIFrontDoor():
x,y,h = getPosition()
return (x == 300) and (y == 210) and (h == 0)
def openDoor():
global door
if not(amIFrontDoor()):
crash("you are not in front of the door")
if isDoorOpened():
crash("the door is already open!")
pencolor("white")
forward(10)
left(90)
forward(10)
left(180)
forward(20)
left(180)
forward(10)
left(90)
forward(10)
left(180)
pencolor("black")
door = True
def amIFrontWall():
x,y,h = getPosition()
case1 = (x == 300) and (h == 0)
case2 = (y == 300) and (h == 90)
case3 = (x == 0) and (h == 180)
case4 = (y == 0) and (h == 270)
return not(amIFrontDoor) or case1 or case2 or case3 or case4
def step(n):
forward(n*10)
x,y,h = getPosition()
if (x > 300) and (y == 210) and (h == 0) and isDoorOpened():
victory()
else:
if (x < 0) or (y < 0) or ( x > 300) or (y > 300):
crash("you went through a wall")
def problem1():
drawRoom()
penup()
setposition(300,210)
pendown()
def problem2():
drawRoom()
penup()
setposition(randint(0,30)*10,210)
pendown()
def problem3():
drawRoom()
penup()
setposition(randint(0,30)*10,randint(0,21)*10)
pendown()
def problem4():
drawRoom()
penup()
setposition(randint(0,30)*10,randint(0,30)*10)
setheading(randint(0,3)*90)
pendown()
|
from isis.table_view import Table_View
from isis.data_model.table import Table
from isis.dialog import Dialog
from decimal import Decimal
from pymongo import MongoClient
from dict import Dict
from PySide.QtGui import QVBoxLayout, QMenu
from PySide.QtCore import Qt
d1 = MongoClient('mongodb://comercialpicazo.com', document_class=Dict)
d1.admin.authenticate('alejandro', '47exI4')
class Splits_Model(Table):
def __init__(self):
Table.__init__(self)
self.columns.add('id', str)
self.columns.add('datetime', str)
self.columns.add('num', int)
self.columns.add('description', str)
# self.columns.add('transfered', str)
self.columns.add('debit', Decimal, 'c')
self.columns.add('credit', Decimal, 'c')
self.columns.add('balance', Decimal, 'c')
self._account = None
self.with_new_empty_row = False
self.readonly = True
def get_data_transfered(row):
if 'tx' in row:
tx = row.tx
if 'splits' in tx and len(tx.splits) == 2 and 'id' in row:
splits = tx.splits
if 'id' not in splits[0] or splits[0].id != row.id:
counter = splits[0]
if 'description' in counter:
return counter.description
elif 'splits' in tx and len(tx.splits) > 2:
return 'multiple'
return 'unknow'
# self.columns['transfered'].getter_data = get_data_transfered
self.columns['debit'].getter_data = lambda x: x.value if 'value' in x and x.value > 0 else None
self.columns['credit'].getter_data = lambda x: -x.value if 'value' in x and x.value < 0 else None
@property
def account(self):
return self._account
@account.setter
def account(self, account):
self._account = account
if account is not None:
# c = d1.kristine.transaction.find({'splits.account.id': account.id},
# {'_id': False, 'datetime': True, 'splits': True, 'id': True})
nature = 'debitable'
self.account.nature = nature
from katherine import d6_config
import pymysql
d6 = pymysql.connect(**d6_config)
d6_cursor = d6.cursor(pymysql.cursors.DictCursor)
d6_cursor.execute('select id, tx_id, datetime, description, num, value from kristine.split '
'where account_id = %s order by datetime desc;', (account.id,))
splits = [Dict(split) for split in d6_cursor]
balance = Decimal()
for split in reversed(splits):
if 'value' in split:
if nature == 'debitable':
balance += split.value
else:
balance -= split.value
split.balance = balance
self.datasource = splits
d6_cursor.close()
d6.close()
else:
self.datasource = None
# self.with_new_empty_row = account is not None
class Splits_Table_View(Table_View):
def __init__(self, *args, **kwargs):
Table_View.__init__(self, *args, **kwargs)
self.setSelectionBehavior(self.SelectRows)
self.setSelectionMode(self.SingleSelection)
def mousePressEvent(self, event):
if event.button() == Qt.RightButton:
index = self.indexAt(event.pos())
if index.isValid():
menu = QMenu(self)
a = menu.addAction('view transaction')
def h():
row = self.model.datasource[index.row()]
from katherine import d1
tx = d1.kristine.transaction.find_one({'id': row.tx_id}, {'_id': False})
if tx is not None:
from isis.kristine.viewer_transaction import Viewer_Transaction
vv = Viewer_Transaction()
vv.transaction = tx
vv.show()
a.triggered.connect(h)
menu.popup(event.globalPos())
Table_View.mousePressEvent(self, event)
class Splits(Dialog):
def __init__(self, *args, **kwargs):
Dialog.__init__(self, *args, **kwargs)
self.setWindowTitle(self.__class__.__name__)
self.resize(800, 500)
self._account = None
from isis.kristine.widget_account import Widget_Account
self.viewer_account = Widget_Account(self)
self.tableview = Splits_Table_View(self)
layout_main = QVBoxLayout(self)
layout_main.addWidget(self.viewer_account)
layout_main.addWidget(self.tableview)
self.layout = layout_main
self.model = Splits_Model()
self.tableview.model = self.model
self._account = None
self.account = None
@property
def account(self):
return self.viewer_account.account
@account.setter
def account(self, account):
self.model.account = account
self.viewer_account.account = account
if __name__ == '__main__':
from PySide.QtGui import QApplication
import sys
app = QApplication(sys.argv)
vv = Splits()
vv.account = Dict({'id': '100-11'})
vv.show()
sys.exit(app.exec_())
|
#!/usr/bin/env python
year = int(input("Enter a year: "))
if year % 100 == 0:
if year % 4 == 0:
print 'True'
else:
print 'False'
elif year % 4 == 0:
print 'True'
else:
print 'False'
|
import turtle
t1 = turtle.Turtle()
length = float(input("Please Enter A Length for all sides: "))
for i in range(3):
t1.left(120)
t1.forward(length)
t1.forward(150)
for i in range(4):
t1.left(90)
t1.forward(length)
t1.forward(150)
for i in range(5):
t1.left(72)
t1.forward(length)
t1.forward(200)
for i in range(6):
t1.left(60)
t1.forward(length)
t1.forward(250)
for i in range(7):
t1.left(51.43)
t1.forward(length)
|
import datetime
def date_range(start, end, delta=None):
if delta is None:
delta = datetime.timedelta(days=1)
while True:
if start > end:
raise StopIteration
yield start
start = start + delta
def main():
start = datetime.date(1901, 1, 1)
end = datetime.date(2000, 12, 31)
delta = datetime.timedelta(days=7)
print sum([1 for d in date_range(start, end, delta) if d.day == 1])
if __name__ == '__main__':
main()
|
__author__ = 'Zaheeb Shamsi'
import json
import requests
def load_json():
with open('ec2.json') as env:
return json.load(env)
class ScheduleEC2:
@staticmethod
def schedule_ec2(ec2json):
"""
:param ec2json: The json from the user.
:return: response from the aws server.
"""
url = "https://4xmhdj3mgj.execute-api.us-east-2.amazonaws.com/live_ec2/start-stop"
header = {
"Content-Type": "application/json"
}
res = requests.post(url, data=json.dumps(ec2json, indent=4), headers=header)
if res.status_code == 200:
resp = res.content.decode('utf-8')
return json.loads(resp)
else:
print("There was a problem in the API Response. ")
resp = res.content.decode('utf-8')
return json.loads(resp)
ec2_json = load_json()
obj = ScheduleEC2()
out = obj.schedule_ec2(ec2_json)
print(out)
|
from scripts.statement import Statement
def test_generate_monthly_statement():
statementMonth0 = Statement(42, 0.04)
assert statementMonth0.getMinPmt(42, 0.04) == 1.68
def test_print_statement():
statementMonth0 = Statement(42, 0.04)
assert statementMonth0.printStatement(
42, 1.68) == "Balance: 42; Minimum Payment: 1.68"
|
import socket
import datetime
def Main():
listeningPort = input("Enter listening port: ")
print("starting...")
mySocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
mySocket.bind(('0.0.0.0', int(listeningPort)))
data = ""
while data != 'q':
data = mySocket.recv(512).decode()
print ("Time Recvd:"+datetime.datetime.now().strftime("%H:%M:%S.%f")+", "+data)
mySocket.close()
if __name__ == '__main__':
Main()
|
import re
def hash(banks):
return '-'.join(str(s) for s in banks)
def shuffle(banks):
i = banks.index(max(banks))
l = len(banks)
to_distribute = banks[i]
banks[i] = 0
for j in range(to_distribute):
banks[(1 + i + j) % l] += 1
return banks
def repeat(banks):
seen = {hash(banks)}
shuffles = 0
while True:
shuffles += 1
banks = shuffle(banks)
h = hash(banks)
if h in seen:
return shuffles, banks
seen.add(h)
with open('input') as f:
parts = re.split(r'\s+', f.readline().strip())
banks = [int(i) for i in parts]
one, banks = repeat(banks)
two, _ = repeat(banks)
print('Part 1:', one)
print('Part 2:', two) |
from game.Event import Event
from utils.beauty_print import *
from utils.common import line, prim_opt, valid_number, clear, is_integer
from game.DataStructure import DataStructure
from game.Board import Board
from game.Logger import Logger
from entity.livingbeing.person.player.Player import Player
class PlayerIM(Player):
def __init__(self):
super().__init__()
# called on set_factory() -> update_subscribers()
# def update_subscribers(self):
# dar subscribe em building_board_print,
# para quando o board for montar o seu print, o Player pegar
# todos os jogadores que estão em partida e inserir
# na lista da board de coisas para imprimir
MAX_ENERGY = 30
def being_move(self, being_id):
pass
def player_move(self, player_id):
# player = self.get_players()[player_id]
player = self.get_concrete_thing(player_id)
# execute a function according to the mode of the player
self.modes_func[player[self.attr_mode]](self.reference(player_id))
# new_concrete_thing
def create_player(self, name):
data : DataStructure = self.get("DataStructure")
player = self.new_concrete_thing()
self.update_concrete(player)
board : Board = self.get("Board")
player[self.attr_name] = name
player[self.attr_money] = 200
player[self.attr_energy] = self.MAX_ENERGY
player[self.attr_max_energy] = self.MAX_ENERGY
player[self.attr_dice_method] = "DiceRollOrRandom"
player[self.attr_coord] = board.rc_to_coord(0, 0)
# self.add_attr_if_not_exists(player, self.attr_dice_method, "DiceRollOrRandom")
data.keep_concrete_thing(player["id"], player, self.get_category())
def update_concrete(self, player: dict):
# print_debug("chamadooooooooooooooooooooooo",__name__,line())
super().update_concrete(player)
self.add_attr_if_not_exists(player, self.attr_energy, self.MAX_ENERGY)
self.add_attr_if_not_exists(player, self.attr_max_energy, self.MAX_ENERGY)
def move_on_board(self, params=None):
player_id = params["id"]
player = self.get_concrete_thing(player_id)
name = player["name"]
while(player_id == self.get("GameManager").turn_of()):
print_normal(f"\nEscolha uma opção")
print_normal(f"\tENTER) Jogar dado")
if("Food" in player["inventory"]):
print_normal(f"\t{prim_opt.EAT_FOOD}) Comer")
print_normal(f"\t{prim_opt.PASS_TURN}) Passar vez")
# print_normal(f"\t{prim_opt.SAVE}) Salvar")
# print_normal(f"\t{prim_opt.EXIT}) Sair")
print_normal(f"\t{prim_opt.SAVE_EXIT}) Salvar e sair para menu da partida")
option = input_question("\nOpção: ").upper()
if(option == prim_opt.PASS_TURN):
# print_normal("Passando vez... ENTER para confirmar ou outra coisa para cancelar\n")
print_normal(f"Passando vez de {name}... \n")
# if len(input("")) == 0:
break
elif(option == prim_opt.ROLL_DICE):
self.roll_dice_to_move(player_id)
break
elif(option == prim_opt.EAT_FOOD):
if(not self.eat_food(player_id)):
a = self.get_concrete_thing(player_id)
# print_debug(f"O jogador {player_id} morreu comendo",__name__,line())
break
# break
elif(option == prim_opt.SAVE_EXIT):
print_sucess("Salvando e saindo...")
self.get("GameManager").stop()
self.get("GameManager").save()
return
else:
print_error(f"Opção ({option}) inválida! pressione ENTER")
input("")
def create_players(self):
clear()
created_players = 0
while(True):
print_header("\tCriação ou adicionar jogadores fora da partida\n")
print_warning("\t\tobs: Aperte ENTER para sair\n")
players_oom = self.get("GameManager").player_oom.get_players_list()
print_header("Fora da partida: ")
self.print_players_list(players_oom)
print_normal("\nNome do jogador para criá-lo")
name = input_question(" ou o N° de alguém para adicionar na partida: ")
if(len(name) == 0):
break
if(is_integer(name)):
self.add_player_on_match(name)
else:
clear()
self.create_player(name)
created_players += 1
print_sucess(f"Jogador {name} criado!\n")
self.get("GameManager").save()
print_sucess(f"Foram criados {created_players} jogadores")
# def on_school_move(self, params=None):
# person = self.get_concrete_thing(params["id"])
# name = person["name"]
# print_warning(f"Pessoa {name} está na escola")
def gui_output(self, text, color=bcolors.ENDC, end='\n',pause=False):
print_header(f"{color}{text}{bcolors.ENDC}",end=end)
if pause:
input("")
def gui_input(self, _id=None, function=None, question_id=None, params=None):
return input_question("")
# def kill_being(self, being_ref, cause=None):
# if not cause:
# cause = ''
# player = self.get_concrete_thing_by_ref(being_ref)
# self.drop_inventory(being_ref)
# person = self.get_concrete_thing_by_ref(being_ref)
# name = person["name"]
# categ = being_ref["category"]
# log : Logger = self.get("Logger")
# log.add(f"[{categ}] {name} morreu! {cause}", color=bcolors.FAIL)
# data : DataStructure = self.get("DataStructure")
# self.remove_player(being_ref["id"])
# try:
# super().kill_being(being_ref, cause)
# except:
# data.data["PlayerOOM"][being_ref["id"]] = player
|
from pylab import *
from core import *
import numpy
import simuPOP as sim
from simuPOP.utils import *
import sys
import time
class Model(Simulation):
""" Class that provides facilities to run simulations in simuPOP with an island model
:param Gen: number of generations over 1 simulation
:param loci: number of loci
:param alleles: number of alleles
:param dist: distance between loci (it can be either a constant number or a list)
:param numPop: number of populations we want to simulate
:param sizePop: population size of each subpopulation
"""
def __init__(self, Gen,loci,alleles,dist,numPop):
self.loci=loci
self.alleles = alleles
self.dist=dist
self.numPop = numPop
Simulation.__init__(self, Gen, loci=self.loci, alleles=self.alleles, dist=self.dist, numPop=self.numPop)
def reset(self):
""" important to reset the parameters of the simulation for each run"""
Simulation.reset(self)
def run(self,step=2,
sizePop=100,
infoFields=['migrate_to','fitness'],
recombination_rate = 0.00375,
migration_rate = 0.01,
mutation_rate = [0.00000001],
subPopNames = ['x','y','z','w'],
alleleNames = ['A','B'],
s1 = 0.1,
burnin=50,
**kargs):
""" This method will perform the simulation in simuPOP.
The above parameters are the default ones but they can easily be changed.
:param step: the step in the generations
:param sizePop: define the population size. If you have more than one subpopulation, this is the size of the one subpopulation.
if you want your subpopulations to have different size, you need to change a little the script.
:param numPop: number of subpopulations
:param infoFields: fields needed for simupop
:param recombination_rate
:param migration_rate
:param mutation rate
:param initFreq: initialize the frequencies for all loci
:param subPopNames: names of the subpopulations
:param alleleNames
:param s1: value of the selection coefficient
:param burnin:
"""
self.reset()
pop=sim.Population(size=[sizePop]*self.numPop, loci=self.loci, lociPos=list(range(self.dist, (self.dist*self.loci)+1,self.dist)), subPopNames=subPopNames, infoFields=infoFields)
simu = sim.Simulator(pop)
print("The simulation has started")
t1 = time.time()
mutate_snps=range(0,50)+range(51,101)
# define the initialization of each loci based the beta distribution where a and b parameters are allele frequencies from noncoding human regions
snps=[0.14, 0.11, 0.17, 0.11, 0.32, 0.33, 0.21, 0.11, 0.11, 0.28, 0.11, 0.12, 0.8, 0.66, 0.74, 0.68, 0.66, 0.77, 0.77, 0.76, 0.77, 0.74, 0.72, 0.11, 0.73, 0.72, 0.72, 0.72, 0.54, 0.17, 0.78, 0.64, 0.78, 0.2, 0.24, 0.25, 0.78, 0.66, 0.2, 0.14, 0.75, 0.16, 0.72, 0.18, 0.77, 0.42, 0.34, 0.7, 0.17, 0.14, 0.2, 0.46, 0.13, 0.26, 0.16, 0.13, 0.14, 0.24, 0.18, 0.36, 0.71, 0.27, 0.28, 0.25, 0.25, 0.3, 0.19, 0.14, 0.16, 0.3, 0.39, 0.16, 0.24, 0.32, 0.11, 0.18, 0.48, 0.31, 0.21, 0.15, 0.34, 0.71, 0.33, 0.18, 0.71, 0.13, 0.23, 0.2, 0.22, 0.23, 0.16, 0.23, 0.23, 0.22, 0.24, 0.82, 0.36, 0.37, 0.72, 0.16, 0.14]
self.initFreq=[]
for i in range(len(snps)):
alpha=float(4*sizePop*migration_rate*snps[i])
bhta=float(4*sizePop*migration_rate*(1-snps[i]))
p=numpy.random.beta(alpha,bhta)
while (p>=0.9 or p<=0.1):
p=numpy.random.beta(alpha,bhta)
print " SNP {snp} with alpha {alpha}, bhta {bhta} and frequency {p}".format(snp=i, alpha=alpha, bhta=bhta, p=p)
self.initFreq.append(p)
simu.evolve(
initOps=[sim.InitGenotype(freq=[self.initFreq[i], 1-self.initFreq[i]], loci=i) for i in range(len(snps))],
# initialize the sex and select the 50 loci (parents)
preOps = [sim.InitSex(maleProp=0.5,at=[0]),
# initialize the genotype of locus 50 at generation 0 (in the beginning of the simulation)
sim.PyOperator(self.genotypeBegin,at=[0]),
# Wait 50 generations for the system to reach equilibrium
# Then, change the the genotype of locus 50 at generation 50 by inserting a single copy of allele 0 in one individual
sim.PyOperator(self.genotypeAfter,at=[50]),
# function that carries out the selection proccess
sim.MaSelector(loci=50,wildtype=0,fitness=[1+s1, 1+s1/2, 1],begin=50, end=-1,subPops=1)],
# recombination
matingScheme=sim.RandomMating(ops=[
sim.Recombinator(rates=recombination_rate)]),
# mutation and migration of offsprings
postOps = [
sim.SNPMutator(u=mutation_rate,loci=mutate_snps),
# call function to calculate Fst and check for equilibrium state
sim.PyOperator(self.calcFst,step=step),
#migration
# Here we define an island model, but this can easily be changed.
# For more information about the migration models, please look in the documentation of SimuPOP here http://simupop.sourceforge.net/manual_svn/build/userGuide_ch7_sec3.html
sim.Migrator(sim.utils.migrIslandRates(migration_rate,self.numPop)),
# call function to save the allele frequencies
sim.PyOperator(self.checkAlleles, step=step, param = subPopNames),
# check if locus 50 is lost due to genetic drift. If yes, we terminate the simulation
sim.Stat(alleleFreq=50,step=step,subPops=1,begin=50,end=-1),
sim.TerminateIf('alleleFreq[50][0] == 0',step=step,begin=50,end=-1),
# check the progress of the simulation
sim.PyEval('"Gen: %d" % gen',step=step),
sim.PyOutput('\n',step=step),
],
gen=self.Gen
)
t2 = time.time()
print "simulation took", t2-t1, "seconds."
# sys.stderr = old_stderr
# debugOutput.close()
class MultiModel(MultiSimulation):
""" Class that provides facilities to run simulations in simuPOP mutiple times
>>> g = MultiModel(Gen=2000,Nruns=100)
>>> g.run()
:param Gen: number of generations over 1 simulation
:param Nruns: number of runs
"""
# the init arguments must be those of the optim_func
def __init__(self, Gen=1000, loci=100, dist=4, alleles=2, Nruns=10, numPop=4):
super(MultiModel, self).__init__(Gen, loci, dist, alleles, Nruns, numPop,
optim_func=Model)
# the parameters are the same as the ones as running one simulation
def run(self, step=10, sizePop=100, loci=100, infoFields=['migrate_to','fitness'],recombination_rate = 0.00001,
migration_rate = 0.05, mutation_rate = [0.00000001], subPopNames = ['x','y','w','z'], alleleNames=['A','B'],
s1 = 0.015, burnin=100, **kargs):
super(MultiModel, self).run( step=step,
sizePop=sizePop,
infoFields=infoFields,
recombination_rate = recombination_rate,
migration_rate = migration_rate,
mutation_rate = mutation_rate,
subPopNames = subPopNames,
alleleNames = alleleNames,
s1 = s1,
burnin=burnin, **kargs)
|
import re
phoneNumber = "415-555-1011"
phonenumberRegEx = re.compile(r"\d\d\d")
# 1st way with findAll method, this will return all matches
print(phonenumberRegEx.findall(phoneNumber))
# 2nd way with search, this returns a match object on which we call the group method. This will just return the first match.
matchObject = phonenumberRegEx.search(phoneNumber)
print(matchObject.group())
# Grouping syntax
phonenumberRegEx = re.compile(r"(\d{3})-(\d{3})-(\d{4})")
matchObject2 = phonenumberRegEx.search(phoneNumber) # Notice the use of the raw string!
print(matchObject2.group(2))
batRegEx = re.compile(r"Bat(man|mobile|copter)", re.IGNORECASE)
print(batRegEx.findall("Batman lost a batmobile"))
matchObject3 = batRegEx.search("Batman lost a batmobile")
# Can return None as object
if matchObject3 != None:
print(matchObject3.group(1))
# Without precompiling the regex:
print(re.match("Bat(wo)?man", "Batman"))
print(re.search("Bat(wo)*man", "Batman") == None)
print(re.findall("Bat(wo)+man", "Batwoman"))
# Non greedy (means matching the shortes string possible, with ? after curly braces):
print(re.match("\d{3,5}?", "1234567890"))
# .* means all but not newline chars, to add those as well use second argument re.DOTALL
dotStar = re.compile(r".*", re.DOTALL | re.IGNORECASE)
print(dotStar.search("Hello\nWorld!"))
# Find and replace using sub method
namesRegex = re.compile(r"Agent \w+")
print(namesRegex.sub("***", "Agent Bob is on duty and so is Agent Alice"))
# Replace with parts of the original group (use raw string because of \1 for group)
namesRegex2 = re.compile(r"Agent (\w)\w*")
print(namesRegex2.sub(r"\1***", "Agent Bob is on duty and so is Agent Alice"))
# Verbose lets you use multiline and comments into the regex
verboseRegex = re.compile(r'''
\d\d\d
- # A comment here for example still works
\d\d\d
''', re.VERBOSE)
print(verboseRegex.search("123-456"))
|
"""
Haardt, F., & Madau, P. 2012, ApJ, 746, 125
Notes
-----
"""
import os
import numpy as np
from ares.physics.Constants import h_p, c, erg_per_ev
_input = os.getenv('ARES') + '/input/hm12'
pars_ml = \
{
'a': 6.9e-3,
'b': 0.14,
'c': 2.2,
'd': 5.29,
}
pars_err = \
{
'a': 0.001,
'b': 0.21,
'c': 0.14,
'd': 0.19,
}
def _read_UVB():
fn = 'UVB.out'
skip = 20
f = open('{0!s}/{1!s}'.format(_input, fn), 'r')
data = []
for i, line in enumerate(f):
if i < skip:
continue
if i == 20:
z = np.array(list(map(float, line.split())))
continue
data.append(list(map(float, line.split())))
return z, np.array(data)
def MetaGalacticBackground():
z, data = _read_UVB()
# Reshape data so this function looks like an
# ares.simulations.MetaGalacticBackground object
dT = data.T
wavelengths = dT[0]
E = h_p * c / (wavelengths / 1e8) / erg_per_ev
fluxes = dT[1:]
return z[-1::-1], E[-1::-1], fluxes[-1::-1,-1::-1]
def SFRD(z, **kwargs):
return (6.9e-3 + 0.14 * (z / 2.2)**1.5) / (1. + (z / 2.7)**4.1)
def _qso_sed_uv():
pass
def _qso_emissivity_uv():
pass
def _qso_sed_xray():
pass
def _qso_emissivity_xray():
pass
|
# -*- coding: utf-8 -*-
import re
import json
import scrapy
from scrapy_redis.spiders import RedisSpider
from ..items import Hospital99Item
#
class InfoSpider(RedisSpider):
name = 'hos99_slave'
redis_key = 'hos99_spider:slave8_urls'
def __init__(self, *args, **kwargs):
domain = kwargs.pop('domain', '')
self.allowed_domans = filter(None, domain.split(','))
super(InfoSpider, self).__init__(*args, **kwargs)
def parse(self, response):
# 解析编号NUM
a = re.findall(r'cn/(.*?)/', response.url)[0]
b = re.findall(r'/(\d+)/', response.url)[0]
num = a + b
con = response.xpath("//div[@class='hpi_content clearbox']")
item = Hospital99Item()
item['LINK'] = response.url
item['HOS_NAME'] = response.xpath("//div[@class='hospital_name clearbox']/h1/text()").extract()[0].strip()
item['ALIAS'] = con.xpath("ul/li[1]/span/text()").extract()[0]
try:
item['ADDRESS'] = con.xpath("ul/li[5]/span/text()").extract()[0]
except:
item['ADDRESS'] = ""
try:
item['TEL'] = con.xpath("ul/li[4]/span/text()").extract()[0]
except:
item['TEL'] = ""
try:
item['RATE'] = con.xpath("ul/li[3]/span/text()").extract()[0]
except:
item['RATE'] = ""
try:
item['NATURE'] = con.xpath("ul/li[2]/text()").extract()[0]
except:
item['NATURE'] = ""
item["NUM"] = num
yield item |
import streamlit as st
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import math
# import temporal data
death_df = pd.read_csv('./time_series_2019-ncov-Deaths.csv')
confirmed_df = pd.read_csv('./time_series_2019-ncov-Confirmed.csv')
recovered_df = pd.read_csv('./time_series_2019-ncov-Recovered.csv')
# temporal plot
def plot_death(country = 'Beijing'):
x = death_df[death_df['Province/State'] == country].values[0][4:]
time_series = death_df.columns.values[4:]
def time_format(date):
return(date.split(' ')[0].split('/')[0] + \
'-' + date.split(' ')[0].split('/')[1])
plot_dot = {}
plot_dot['date'] = []
plot_dot['count'] = []
for i in range(len(x)):
if math.isnan(x[i]) == False:
plot_dot['count'].append(int(x[i]))
plot_dot['date'].append(time_format(time_series[i]))
plot_df = pd.DataFrame(plot_dot)
plot_df = plot_df.drop_duplicates()
sns.relplot(x='date', y="count", data=plot_df, height=8, aspect=1.5)
st.pyplot()
def plot_confirmed(country = 'Beijing'):
x = confirmed_df[confirmed_df['Province/State'] == country].values[0][4:]
time_series = confirmed_df.columns.values[4:]
def time_format(date):
return(date.split(' ')[0].split('/')[0] + \
'-' + date.split(' ')[0].split('/')[1])
plot_dot = {}
plot_dot['date'] = []
plot_dot['count'] = []
for i in range(len(x)):
if math.isnan(x[i]) == False:
plot_dot['count'].append(int(x[i]))
plot_dot['date'].append(time_format(time_series[i]))
plot_df = pd.DataFrame(plot_dot)
plot_df = plot_df.drop_duplicates()
sns.relplot(x='date', y="count", data=plot_df, height=8, aspect=1.5)
st.pyplot()
def plot_recovered(country = 'Beijing'):
x = recovered_df[recovered_df['Province/State'] == country].values[0][4:]
time_series = recovered_df.columns.values[4:]
def time_format(date):
return(date.split(' ')[0].split('/')[0] + \
'-' + date.split(' ')[0].split('/')[1])
plot_dot = {}
plot_dot['date'] = []
plot_dot['count'] = []
for i in range(len(x)):
if math.isnan(x[i]) == False:
plot_dot['count'].append(int(x[i]))
plot_dot['date'].append(time_format(time_series[i]))
plot_df = pd.DataFrame(plot_dot)
plot_df = plot_df.drop_duplicates()
sns.relplot(x='date', y="count", data=plot_df, height=8, aspect=1.5)
st.pyplot()
st.title("Vicualization: Map of Coronavirus's cases")
# The sidebar's content
add_title = st.sidebar.title('Functional Segement')
add_selectbox = st.sidebar.selectbox(
'See what we can do...',
('TimeSeries Display', 'Map Distribution', 'Visual Diagnosis')
)
if add_selectbox == 'TimeSeries Display':
add_slot = st.sidebar.empty()
add_sliderbar = st.sidebar.selectbox('Check the needed info',
('Confirmed Cases','Recovered Cases','Death Cases')
)
add_sliderbar_c = st.sidebar.selectbox(
'location',
('Anhui', 'Beijing', 'Chongqing', 'Fujian', 'Gansu', 'Guangdong',
'Guangxi', 'Guizhou', 'Hainan', 'Hebei', 'Heilongjiang', 'Henan',
'Hubei', 'Hunan', 'Inner Mongolia', 'Jiangsu', 'Jiangxi', 'Jilin',
'Liaoning', 'Ningxia', 'Qinghai', 'Shaanxi', 'Shandong',
'Shanghai', 'Shanxi', 'Sichuan', 'Tianjin', 'Tibet', 'Xinjiang',
'Yunnan', 'Zhejiang', 'Hong Kong', 'Macau', 'Taiwan',
'Seattle, WA', 'Chicago, IL', 'Tempe, AZ')
)
if add_sliderbar == 'Confirmed Cases':
plot_confirmed(add_sliderbar_c)
elif add_sliderbar == 'Recovered Cases':
plot_recovered(add_sliderbar_c)
elif add_sliderbar == 'Death Cases':
plot_death(add_sliderbar_c)
elif add_selectbox == 'Map Distribution':
add_para_1 = st.sidebar.markdown('The data displayed is updated in 14, Feb, 2020')
else:
add_para_2 = st.sidebar.markdown('The visual recognition model is supported by IBM Watson Studio-Visual Recognition, to whom we are expressing our thanks')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# https://github.com/lxyu/kindle-clippings
import collections
import msgpack
import os
BOUNDARY = u"==========\r\n"
DATA_FILE = u"clips.msgpack"
OUTPUT_DIR = u"output"
if not os.path.exists(OUTPUT_DIR):
os.makedirs(OUTPUT_DIR)
def get_sections(filename):
with open(filename, 'r') as f:
content = f.read().decode('utf-8')
content = content.replace(u'\ufeff', u'')
return content.split(BOUNDARY)
def get_clip(section):
clip = {}
lines = [l for l in section.split(u'\r\n') if l]
if len(lines) != 3:
return
clip['book'] = lines[0]
location_line = lines[1]
## handle "Your Highlight Location 39-39"
## and "Your Highlight on Page 193 | Location 2950-2950 | Added o"
if "page" in location_line.lower():
lowcase = location_line.lower()
pagenum_start = lowcase.rfind('page') + 4
pagenum_end = location_line.rfind('-')
position = location_line[pagenum_start:pagenum_end]
if '|' in position:
position = position[:position.rfind('|')-1]
else:
position = location_line[29:location_line.rfind('-')]
if not position:
return
try:
clip['position'] = int(position)
except:
import ipdb; ipdb.set_trace()
clip['content'] = lines[2]
return clip
def slugify(value):
"""
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
"""
import unicodedata
import re
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = unicode(re.sub('[^\w\s-]', '', value).strip().lower())
value = unicode(re.sub('[-\s]+', '-', value))
value = value[:100]
return value
def export_txt(clips):
"""
Export each book's clips to single text.
"""
for book in clips:
lines = []
for pos in sorted(clips[book]):
lines.append(clips[book][pos].encode('utf-8'))
filename = os.path.join(OUTPUT_DIR, u"%s.txt" % slugify(book))
with open(filename, 'w') as f:
f.write("\n\n--\n\n".join(lines))
def load_clips():
"""
Load previous clips from DATA_FILE
"""
try:
with open(DATA_FILE, 'r') as f:
return msgpack.unpack(f, encoding='utf-8')
except IOError:
return {}
def save_clips(clips):
"""
Save new clips to DATA_FILE
"""
with open(DATA_FILE, 'wb') as f:
f.write(msgpack.packb(clips, encoding='utf-8'))
def read_clippings(from_file):
# load old clips
clips = collections.defaultdict(dict)
clips.update(load_clips())
# extract clips
sections = get_sections(from_file)
for section in sections:
clip = get_clip(section)
if clip:
clips[clip['book']][clip['position']] = clip['content']
# remove key with empty value
clips = {k: v for k, v in clips.iteritems() if v}
# save/export clips
save_clips(clips)
export_txt(clips)
return clips
def main():
read_clippings(u'My Clippings.txt')
# save/export clips
save_clips(clips)
export_txt(clips)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
"""reducer.py"""
import sys,string
previous_citing_id = "-"
current_state = "-"
for line in sys.stdin:
citing,cited,state = line.split("\t")
if not previous_citing_id or previous_citing_id != citing:
previous_citing_id = citing
current_state = state
elif citing == previous_citing_id:
state = current_state
print(citing+","+cited+","+state)
|
'''NXOS Implementation for Msdp unconfigconfig triggers'''
# Genie Libs
from genie.libs.sdk.libs.utils.mapping import Mapping
from genie.libs.sdk.triggers.unconfigconfig.unconfigconfig import TriggerUnconfigConfig
# import pyats
from pyats.utils.objects import Not, NotExists
# Which key to exclude for Msdp Ops comparison
msdp_exclude = ['maker', 'elapsed_time', 'discontinuity_time',
'keepalive', 'total', 'up_time', 'expire', 'remote',
'last_message_received', 'num_of_comparison', 'rpf_failure',
'total_accept_count', 'total_reject_count', 'notification']
class TriggerUnconfigConfigMsdpPeer(TriggerUnconfigConfig):
"""Unconfigure and reapply the whole configurations of dynamically
learned MSDP peer(s)."""
__description__ = """Unconfigure and reapply the whole configurations of dynamically
learned MSDP peer(s).
trigger_datafile:
Mandatory:
timeout:
max_time (`int`): Maximum wait time for the trigger,
in second. Default: 180
interval (`int`): Wait time between iteration when looping is needed,
in second. Default: 15
method (`str`): Method to recover the device configuration,
Support methods:
'checkpoint': Rollback the configuration by
checkpoint (nxos),
archive file (iosxe),
load the saved running-config file on disk (iosxr)
Optional:
tgn_timeout (`int`): Maximum wait time for all traffic threads to be
restored to the reference rate,
in second. Default: 60
tgn_delay (`int`): Wait time between each poll to verify if traffic is resumed,
in second. Default: 10
timeout_recovery:
Buffer recovery timeout make sure devices are recovered at the end
of the trigger execution. Used when previous timeouts have been exhausted.
max_time (`int`): Maximum wait time for the last step of the trigger,
in second. Default: 180
interval (`int`): Wait time between iteration when looping is needed,
in second. Default: 15
in second. Default: 15
static:
The keys below are dynamically learnt by default.
However, they can also be set to a custom value when provided in the trigger datafile.
vrf: `str`
peer: `str`
(e.g) interface: '(?P<interface>Ethernet1*)' (Regex supported)
OR
interface: 'Ethernet1/1/1' (Specific value)
steps:
1. Learn Msdp Ops object and store the "established" MSDP peer(s)
if has any, otherwise, SKIP the trigger
2. Save the current device configurations through "method" which user uses
3. Unconfigure the learned MSDP peer(s) from step 1
with Msdp Conf object
4. Verify the MSDP peer(s) from step 3 are no longer existed
5. Recover the device configurations to the one in step 2
6. Learn Msdp Ops again and verify it is the same as the Ops in step 1
"""
# Mapping of Information between Ops and Conf
# Also permit to dictate which key to verify
mapping = Mapping(requirements={'ops.msdp.msdp.Msdp':{
'requirements':[\
['info', 'vrf', '(?P<vrf>.*)', 'peer',
'(?P<peer>.*)', 'session_state', 'established']],
'kwargs':{'attributes': [
'info[vrf][(.*)][peer][(.*)]']},
'exclude': msdp_exclude}},
config_info={'conf.msdp.Msdp':{
'requirements':[
['device_attr', '{uut}', 'vrf_attr', '(?P<vrf>.*)',
'peer_attr', '(?P<peer>.*)']],
'verify_conf':False,
'kwargs':{}}},
verify_ops={'ops.msdp.msdp.Msdp':{
'requirements':[\
['info', 'vrf', '(?P<vrf>.*)', 'peer', NotExists('(?P<peer>.*)')]],
'kwargs':{'attributes': ['info[vrf][(.*)][peer][(.*)]']},
'exclude': msdp_exclude}},
num_values={'vrf': 1, 'peer':1})
class TriggerUnconfigConfigMsdpSaFilterIn(TriggerUnconfigConfig):
"""Unconfigure and reapply the whole configurations of dynamically
learned MSDP peer(s) sa-filter in."""
__description__ = """Unconfigure and reapply the whole configurations of dynamically
learned MSDP peer(s) sa-filter in.
trigger_datafile:
Mandatory:
timeout:
max_time (`int`): Maximum wait time for the trigger,
in second. Default: 180
interval (`int`): Wait time between iteration when looping is needed,
in second. Default: 15
method (`str`): Method to recover the device configuration,
Support methods:
'checkpoint': Rollback the configuration by
checkpoint (nxos),
archive file (iosxe),
load the saved running-config file on disk (iosxr)
Optional:
tgn_timeout (`int`): Maximum wait time for all traffic threads to be
restored to the reference rate,
in second. Default: 60
tgn_delay (`int`): Wait time between each poll to verify if traffic is resumed,
in second. Default: 10
timeout_recovery:
Buffer recovery timeout make sure devices are recovered at the end
of the trigger execution. Used when previous timeouts have been exhausted.
max_time (`int`): Maximum wait time for the last step of the trigger,
in second. Default: 180
interval (`int`): Wait time between iteration when looping is needed,
in second. Default: 15
static:
The keys below are dynamically learnt by default.
However, they can also be set to a custom value when provided in the trigger datafile.
vrf: `str`
peer: `str`
sa_filter_in: `str`
(e.g) interface: '(?P<interface>Ethernet1*)' (Regex supported)
OR
interface: 'Ethernet1/1/1' (Specific value)
steps:
1. Learn Msdp Ops object and store the "established" MSDP peer(s) sa-filter in
if has any, otherwise, SKIP the trigger
2. Save the current device configurations through "method" which user uses
3. Unconfigure the learned MSDP peer(s) sa-filter in from step 1
with Msdp Conf object
4. Verify the MSDP peer(s) sa-filter in from step 3 are no longer existed
5. Recover the device configurations to the one in step 2
6. Learn Msdp Ops again and verify it is the same as the Ops in step 1
"""
# Mapping of Information between Ops and Conf
# Also permit to dictate which key to verify
mapping = Mapping(requirements={'ops.msdp.msdp.Msdp':{
'requirements':[\
['info', 'vrf', '(?P<vrf>.*)', 'peer',
'(?P<peer>.*)', 'session_state', 'established'],
['info', 'vrf', '(?P<vrf>.*)', 'peer', '(?P<peer>.*)',
'sa_filter', 'in', '(?P<sa_filter_in>.*)']],
'kwargs':{'attributes': [
'info[vrf][(.*)][peer][(.*)][session_state]',
'info[vrf][(.*)][peer][(.*)][sa_filter]']},
'exclude': msdp_exclude}},
config_info={'conf.msdp.Msdp':{
'requirements':[
['device_attr', '{uut}', 'vrf_attr', '(?P<vrf>.*)',
'peer_attr', '(?P<peer>.*)', 'sa_filter_in','(?P<sa_filter_in>.*)']],
'verify_conf':False,
'kwargs':{}}},
verify_ops={'ops.msdp.msdp.Msdp':{
'requirements':[\
['info', 'vrf', '(?P<vrf>.*)', 'peer', '(?P<peer>.*)',
'sa_filter', NotExists('in')]], # , '(?P<sa_filter_in>.*)'
'kwargs':{'attributes': ['info[vrf][(.*)][peer][(.*)][session_state]',
'info[vrf][(.*)][peer][(.*)][sa_filter]']},
'exclude': msdp_exclude}},
num_values={'vrf': 1, 'peer':1})
class TriggerUnconfigConfigMsdpSaFilterOut(TriggerUnconfigConfig):
"""Unconfigure and reapply the whole configurations of dynamically
learned MSDP peer(s) sa-filter out."""
__description__ = """Unconfigure and reapply the whole configurations of dynamically
learned MSDP peer(s) sa-filter out.
trigger_datafile:
Mandatory:
timeout:
max_time (`int`): Maximum wait time for the trigger,
in second. Default: 180
interval (`int`): Wait time between iteration when looping is needed,
in second. Default: 15
method (`str`): Method to recover the device configuration,
Support methods:
'checkpoint': Rollback the configuration by
checkpoint (nxos),
archive file (iosxe),
load the saved running-config file on disk (iosxr)
Optional:
tgn_timeout (`int`): Maximum wait time for all traffic threads to be
restored to the reference rate,
in second. Default: 60
tgn_delay (`int`): Wait time between each poll to verify if traffic is resumed,
in second. Default: 10
timeout_recovery:
Buffer recovery timeout make sure devices are recovered at the end
of the trigger execution. Used when previous timeouts have been exhausted.
max_time (`int`): Maximum wait time for the last step of the trigger,
in second. Default: 180
interval (`int`): Wait time between iteration when looping is needed,
in second. Default: 15
static:
The keys below are dynamically learnt by default.
However, they can also be set to a custom value when provided in the trigger datafile.
vrf: `str`
peer: `str`
sa_filter_out: `str`
(e.g) interface: '(?P<interface>Ethernet1*)' (Regex supported)
OR
interface: 'Ethernet1/1/1' (Specific value)
steps:
1. Learn Msdp Ops object and store the "established" MSDP peer(s) sa-filter out
if has any, otherwise, SKIP the trigger
2. Save the current device configurations through "method" which user uses
3. Unconfigure the learned MSDP peer(s) sa-filter out from step 1
with Msdp Conf object
4. Verify the MSDP peer(s) sa-filter out from step 3 are no longer existed
5. Recover the device configurations to the one in step 2
6. Learn Msdp Ops again and verify it is the same as the Ops in step 1
"""
# Mapping of Information between Ops and Conf
# Also permit to dictate which key to verify
mapping = Mapping(requirements={'ops.msdp.msdp.Msdp':{
'requirements':[\
['info', 'vrf', '(?P<vrf>.*)', 'peer',
'(?P<peer>.*)', 'session_state', 'established'],
['info', 'vrf', '(?P<vrf>.*)', 'peer', '(?P<peer>.*)',
'sa_filter', 'out', '(?P<sa_filter_out>.*)']],
'kwargs':{'attributes': [
'info[vrf][(.*)][peer][(.*)][session_state]',
'info[vrf][(.*)][peer][(.*)][sa_filter]']},
'exclude': msdp_exclude}},
config_info={'conf.msdp.Msdp':{
'requirements':[
['device_attr', '{uut}', 'vrf_attr', '(?P<vrf>.*)',
'peer_attr', '(?P<peer>.*)', 'sa_filter_out','(?P<sa_filter_out>.*)']],
'verify_conf':False,
'kwargs':{}}},
verify_ops={'ops.msdp.msdp.Msdp':{
'requirements':[\
['info', 'vrf', '(?P<vrf>.*)', 'peer', '(?P<peer>.*)',
'sa_filter', NotExists('out')]], # , '(?P<sa_filter_out>.*)'
'kwargs':{'attributes': ['info[vrf][(.*)][peer][(.*)][session_state]',
'info[vrf][(.*)][peer][(.*)][sa_filter]']},
'exclude': msdp_exclude}},
num_values={'vrf': 1, 'peer':1})
class TriggerUnconfigConfigMsdpSaLimit(TriggerUnconfigConfig):
"""Unconfigure and reapply the whole configurations of dynamically
learned MSDP peer(s) sa-limit."""
__description__ = """Unconfigure and reapply the whole configurations of dynamically
learned MSDP peer(s) sa-limit.
trigger_datafile:
Mandatory:
timeout:
max_time (`int`): Maximum wait time for the trigger,
in second. Default: 180
interval (`int`): Wait time between iteration when looping is needed,
in second. Default: 15
method (`str`): Method to recover the device configuration,
Support methods:
'checkpoint': Rollback the configuration by
checkpoint (nxos),
archive file (iosxe),
load the saved running-config file on disk (iosxr)
Optional:
tgn_timeout (`int`): Maximum wait time for all traffic threads to be
restored to the reference rate,
in second. Default: 60
tgn_delay (`int`): Wait time between each poll to verify if traffic is resumed,
in second. Default: 10
timeout_recovery:
Buffer recovery timeout make sure devices are recovered at the end
of the trigger execution. Used when previous timeouts have been exhausted.
max_time (`int`): Maximum wait time for the last step of the trigger,
in second. Default: 180
interval (`int`): Wait time between iteration when looping is needed,
in second. Default: 15
static:
The keys below are dynamically learnt by default.
However, they can also be set to a custom value when provided in the trigger datafile.
vrf: `str`
peer: `str`
sa_limit: `int`
(e.g) interface: '(?P<interface>Ethernet1*)' (Regex supported)
OR
interface: 'Ethernet1/1/1' (Specific value)
steps:
1. Learn Msdp Ops object and store the "established" MSDP peer(s) sa-limit
if has any, otherwise, SKIP the trigger
2. Save the current device configurations through "method" which user uses
3. Unconfigure the learned MSDP peer(s) sa-limit from step 1
with Msdp Conf object
4. Verify the MSDP peer(s) sa-limit from step 3 are no longer existed
5. Recover the device configurations to the one in step 2
6. Learn Msdp Ops again and verify it is the same as the Ops in step 1
"""
# Mapping of Information between Ops and Conf
# Also permit to dictate which key to verify
mapping = Mapping(requirements={'ops.msdp.msdp.Msdp':{
'requirements':[\
['info', 'vrf', '(?P<vrf>.*)', 'peer',
'(?P<peer>.*)', 'session_state', 'established'],
['info', 'vrf', '(?P<vrf>.*)', 'peer',
'(?P<peer>.*)', 'sa_limit', '(?P<sa_limit>\d+)']],
'kwargs':{'attributes': [
'info[vrf][(.*)][peer][(.*)][session_state]',
'info[vrf][(.*)][peer][(.*)][sa_limit]']},
'exclude': msdp_exclude}},
config_info={'conf.msdp.Msdp':{
'requirements':[
['device_attr', '{uut}', 'vrf_attr', '(?P<vrf>.*)',
'peer_attr', '(?P<peer>.*)', 'sa_limit','(?P<sa_limit>\d+)']],
'verify_conf':False,
'kwargs':{}}},
verify_ops={'ops.msdp.msdp.Msdp':{
'requirements':[\
['info', 'vrf', '(?P<vrf>.*)', 'peer',
'(?P<peer>.*)', 'sa_limit', 'unlimited']],
'kwargs':{'attributes': [
'info[vrf][(.*)][peer][(.*)][session_state]',
'info[vrf][(.*)][peer][(.*)][sa_limit]']},
'exclude': msdp_exclude}},
num_values={'vrf': 1, 'peer':1})
class TriggerUnconfigConfigMsdpMeshGroup(TriggerUnconfigConfig):
"""Unconfigure and reapply the whole configurations of dynamically
learned 'established' MSDP peer(s) mesh group."""
__description__ = """Unconfigure and reapply the whole configurations of dynamically
learned 'established' MSDP peer(s) mesh group.
trigger_datafile:
Mandatory:
timeout:
max_time (`int`): Maximum wait time for the trigger,
in second. Default: 180
interval (`int`): Wait time between iteration when looping is needed,
in second. Default: 15
method (`str`): Method to recover the device configuration,
Support methods:
'checkpoint': Rollback the configuration by
checkpoint (nxos),
archive file (iosxe),
load the saved running-config file on disk (iosxr)
Optional:
tgn_timeout (`int`): Maximum wait time for all traffic threads to be
restored to the reference rate,
in second. Default: 60
tgn_delay (`int`): Wait time between each poll to verify if traffic is resumed,
in second. Default: 10
timeout_recovery:
Buffer recovery timeout make sure devices are recovered at the end
of the trigger execution. Used when previous timeouts have been exhausted.
max_time (`int`): Maximum wait time for the last step of the trigger,
in second. Default: 180
interval (`int`): Wait time between iteration when looping is needed,
in second. Default: 15
static:
The keys below are dynamically learnt by default.
However, they can also be set to a custom value when provided in the trigger datafile.
vrf: `str`
peer: `str`
mesh_group: `str`
(e.g) interface: '(?P<interface>Ethernet1*)' (Regex supported)
OR
interface: 'Ethernet1/1/1' (Specific value)
steps:
1. Learn Msdp Ops object and store the 'established' MSDP peer(s) mesh group
if has any, otherwise, SKIP the trigger
2. Save the current device configurations through "method" which user uses
3. Unconfigure the learned MSDP peer(s) mesh group from step 1
with Msdp Conf object
4. Verify the MSDP peer(s) mesh group from step 3 are no longer existed
5. Recover the device configurations to the one in step 2
6. Learn Msdp Ops again and verify it is the same as the Ops in step 1
"""
# Mapping of Information between Ops and Conf
# Also permit to dictate which key to verify
mapping = Mapping(requirements={'ops.msdp.msdp.Msdp':{
'requirements':[\
['info', 'vrf', '(?P<vrf>.*)', 'peer',
'(?P<peer>.*)', 'session_state', 'established'],
['info', 'vrf', '(?P<vrf>.*)', 'peer',
'(?P<peer>.*)', 'mesh_group', '(?P<mesh_group>.*)']],
'kwargs':{'attributes': [
'info[vrf][(.*)][peer][(.*)][session_state]',
'info[vrf][(.*)][peer][(.*)][mesh_group]']},
'exclude': msdp_exclude}},
config_info={'conf.msdp.Msdp':{
'requirements':[
['device_attr', '{uut}', 'vrf_attr', '(?P<vrf>.*)',
'peer_attr', '(?P<peer>.*)', 'mesh_group','(?P<mesh_group>.*)']],
'verify_conf':False,
'kwargs':{}}},
verify_ops={'ops.msdp.msdp.Msdp':{
'requirements':[\
['info', 'vrf', '(?P<vrf>.*)', 'peer',
'(?P<peer>.*)', NotExists('mesh_group')]], # , '(?P<mesh_group>.*)'
'kwargs':{'attributes': [
'info[vrf][(.*)][peer][(.*)][session_state]',
'info[vrf][(.*)][peer][(.*)][mesh_group]']},
'exclude': msdp_exclude}},
num_values={'vrf': 1, 'peer':1})
class TriggerUnconfigConfigMsdpOriginatorId(TriggerUnconfigConfig):
"""Unconfigure and reapply the whole configurations of dynamically
learned MSDP originator-id."""
__description__ = """Unconfigure and reapply the whole configurations of dynamically
learned MSDP originator-id.
trigger_datafile:
Mandatory:
timeout:
max_time (`int`): Maximum wait time for the trigger,
in second. Default: 180
interval (`int`): Wait time between iteration when looping is needed,
in second. Default: 15
method (`str`): Method to recover the device configuration,
Support methods:
'checkpoint': Rollback the configuration by
checkpoint (nxos),
archive file (iosxe),
load the saved running-config file on disk (iosxr)
Optional:
tgn_timeout (`int`): Maximum wait time for all traffic threads to be
restored to the reference rate,
in second. Default: 60
tgn_delay (`int`): Wait time between each poll to verify if traffic is resumed,
in second. Default: 10
timeout_recovery:
Buffer recovery timeout make sure devices are recovered at the end
of the trigger execution. Used when previous timeouts have been exhausted.
max_time (`int`): Maximum wait time for the last step of the trigger,
in second. Default: 180
interval (`int`): Wait time between iteration when looping is needed,
in second. Default: 15
static:
The keys below are dynamically learnt by default.
However, they can also be set to a custom value when provided in the trigger datafile.
vrf: `str`
peer: `str`
originator_id: `str`
originating_rp: `str`
(e.g) interface: '(?P<interface>Ethernet1*)' (Regex supported)
OR
interface: 'Ethernet1/1/1' (Specific value)
steps:
1. Learn Msdp Ops object and store the MSDP originator-id
if has any, otherwise, SKIP the trigger
2. Save the current device configurations through "method" which user uses
3. Unconfigure the learned MSDP originator-id from step 1
with Msdp Conf object
4. Verify the MSDP originator-id from step 3 are no longer existed
5. Recover the device configurations to the one in step 2
6. Learn Msdp Ops again and verify it is the same as the Ops in step 1
"""
# Mapping of Information between Ops and Conf
# Also permit to dictate which key to verify
mapping = Mapping(requirements={'ops.msdp.msdp.Msdp':{
'requirements':[\
['info', 'vrf', '(?P<vrf>.*)', 'global',
'originator_id', '(?P<originator_id>.*)']],
'kwargs':{'attributes': [
'info[vrf][(.*)][global][originator_id]',
'info[vrf][(.*)][peer][(.*)][session_state]']},
'exclude': msdp_exclude},
'conf.msdp.Msdp':{
'requirements':[\
['device_attr', '{uut}', '_vrf_attr', '(?P<vrf>.*)',
'originating_rp', '(?P<originating_rp>.*)']],
'kwargs':{'attributes': [
'msdp[vrf_attr][(.*)][originating_rp]']},
'exclude': msdp_exclude}},
config_info={'conf.msdp.Msdp':{
'requirements':[
['device_attr', '{uut}', 'vrf_attr', '(?P<vrf>.*)',
'originating_rp', '(?P<originating_rp>.*)']],
'verify_conf':False,
'kwargs':{}}},
verify_ops={'conf.msdp.Msdp':{
'requirements':[\
['device_attr', '{uut}', '_vrf_attr', '(?P<vrf>.*)',
NotExists('originating_rp')]], # , '(?P<originating_rp>.*)'
'kwargs':{'attributes': [
'info[vrf][(.*)][originating_rp]',
'info[vrf][(.*)][peer][(.*)][session_state]']},
'exclude': msdp_exclude}},
num_values={'vrf': 1, 'peer':1})
class TriggerUnconfigConfigMsdpKeepaliveHoldtime(TriggerUnconfigConfig):
"""Unconfigure and reapply the whole configurations of dynamically
learned MSDP peer(s) keepalive&holdtime interval."""
__description__ = """Unconfigure and reapply the whole configurations of dynamically
learned MSDP peer(s) keepalive&holdtime interval.
trigger_datafile:
Mandatory:
timeout:
max_time (`int`): Maximum wait time for the trigger,
in second. Default: 180
interval (`int`): Wait time between iteration when looping is needed,
in second. Default: 15
method (`str`): Method to recover the device configuration,
Support methods:
'checkpoint': Rollback the configuration by
checkpoint (nxos),
archive file (iosxe),
load the saved running-config file on disk (iosxr)
Optional:
tgn_timeout (`int`): Maximum wait time for all traffic threads to be
restored to the reference rate,
in second. Default: 60
tgn_delay (`int`): Wait time between each poll to verify if traffic is resumed,
in second. Default: 10
timeout_recovery:
Buffer recovery timeout make sure devices are recovered at the end
of the trigger execution. Used when previous timeouts have been exhausted.
max_time (`int`): Maximum wait time for the last step of the trigger,
in second. Default: 180
interval (`int`): Wait time between iteration when looping is needed,
in second. Default: 15
static:
The keys below are dynamically learnt by default.
However, they can also be set to a custom value when provided in the trigger datafile.
vrf: `str`
peer: `str`
keepalive_interval: `int`
holdtime_interval: `int`
(e.g) interface: '(?P<interface>Ethernet1*)' (Regex supported)
OR
interface: 'Ethernet1/1/1' (Specific value)
steps:
1. Learn Msdp Ops object and store the MSDP peer(s)
keepalive&holdtime interval if has any, otherwise, SKIP the trigger
2. Save the current device configurations through "method" which user uses
3. Unconfigure the learned MSDP peer(s) keepalive&holdtime interval from step 1
with Msdp Conf object
4. Verify the MSDP peer(s) keepalive&holdtime interval from step 3 are no longer existed
5. Recover the device configurations to the one in step 2
6. Learn Msdp Ops again and verify it is the same as the Ops in step 1
"""
# Mapping of Information between Ops and Conf
# Also permit to dictate which key to verify
mapping = Mapping(requirements={'conf.msdp.Msdp':{
'requirements':[\
['device_attr', '{uut}', '_vrf_attr', '(?P<vrf>.*)',
'_peer_attr', '(?P<peer>.*)', 'keepalive_interval',
'(?P<keepalive_interval>.*)'],
['device_attr', '{uut}', '_vrf_attr', '(?P<vrf>.*)',
'_peer_attr', '(?P<peer>.*)', 'holdtime_interval',
'(?P<holdtime_interval>.*)']],
'all_keys': True,
'kwargs':{'attributes': [
'msdp[vrf_attr][(.*)][peer_attr][(.*)]']},
'exclude': msdp_exclude},
'ops.msdp.msdp.Msdp':{
'requirements':[\
['info', 'vrf', '(?P<vrf>.*)', 'peer', '(?P<peer>.*)',
'timer', 'keepalive_interval', '(?P<keepalive_interval>.*)'],
['info', 'vrf', '(?P<vrf>.*)', 'peer', '(?P<peer>.*)',
'timer', 'holdtime_interval', '(?P<holdtime_interval>.*)']],
'all_keys': True,
'kwargs':{'attributes': [
'info[vrf][(.*)][peer][(.*)][session_state]',
'info[vrf][(.*)][peer][(.*)][timer]']},
'exclude': msdp_exclude}},
config_info={'conf.msdp.Msdp':{
'requirements':[
['device_attr', '{uut}', 'vrf_attr', '(?P<vrf>.*)', 'peer_attr',
'(?P<peer>.*)', 'keepalive_interval','(?P<keepalive_interval>.*)'],
['device_attr', '{uut}', 'vrf_attr', '(?P<vrf>.*)', 'peer_attr',
'(?P<peer>.*)', 'holdtime_interval','(?P<holdtime_interval>.*)']],
'verify_conf': True,
'kwargs':{}}},
verify_ops={'ops.msdp.msdp.Msdp':{
'requirements':[\
['info', 'vrf', '(?P<vrf>.*)', 'peer', '(?P<peer>.*)',
'timer', 'keepalive_interval', 60], # change to default value
['info', 'vrf', '(?P<vrf>.*)', 'peer', '(?P<peer>.*)',
'timer', 'holdtime_interval', 90]], # change to default value
'kwargs':{'attributes': [
'info[vrf][(.*)][peer][(.*)][session_state]',
'info[vrf][(.*)][peer][(.*)][timer]']},
'exclude': msdp_exclude}},
num_values={'vrf': 1, 'peer':1})
class TriggerUnconfigConfigMsdpReconnectInterval(TriggerUnconfigConfig):
"""Unconfigure and reapply the whole configurations of dynamically
learned MSDP reconnect interval."""
__description__ = """Unconfigure and reapply the whole configurations of dynamically
learned MSDP reconnect interval.
trigger_datafile:
Mandatory:
timeout:
max_time (`int`): Maximum wait time for the trigger,
in second. Default: 180
interval (`int`): Wait time between iteration when looping is needed,
in second. Default: 15
method (`str`): Method to recover the device configuration,
Support methods:
'checkpoint': Rollback the configuration by
checkpoint (nxos),
archive file (iosxe),
load the saved running-config file on disk (iosxr)
Optional:
tgn_timeout (`int`): Maximum wait time for all traffic threads to be
restored to the reference rate,
in second. Default: 60
tgn_delay (`int`): Wait time between each poll to verify if traffic is resumed,
in second. Default: 10
timeout_recovery:
Buffer recovery timeout make sure devices are recovered at the end
of the trigger execution. Used when previous timeouts have been exhausted.
max_time (`int`): Maximum wait time for the last step of the trigger,
in second. Default: 180
interval (`int`): Wait time between iteration when looping is needed,
in second. Default: 15
static:
The keys below are dynamically learnt by default.
However, they can also be set to a custom value when provided in the trigger datafile.
vrf: `str`
peer: `str`
connect_retry_interval: `int`
(e.g) interface: '(?P<interface>Ethernet1*)' (Regex supported)
OR
interface: 'Ethernet1/1/1' (Specific value)
steps:
1. Learn Msdp Ops object and store the MSDP reconnect interval
if has any, otherwise, SKIP the trigger
2. Save the current device configurations through "method" which user uses
3. Unconfigure the learned MSDP reconnect interval from step 1
with Msdp Conf object
4. Verify the MSDP reconnect interval from step 3 are no longer existed
5. Recover the device configurations to the one in step 2
6. Learn Msdp Ops again and verify it is the same as the Ops in step 1
"""
# Mapping of Information between Ops and Conf
# Also permit to dictate which key to verify
mapping = Mapping(requirements={'conf.msdp.Msdp':{
'requirements':[\
['device_attr', '{uut}', '_vrf_attr', '(?P<vrf>.*)',
'global_connect_retry_interval',
'(?P<connect_retry_interval>.*)']],
'kwargs':{'attributes': [
'msdp[vrf_attr][(.*)][global_connect_retry_interval]']},
'exclude': msdp_exclude},
'ops.msdp.msdp.Msdp':{
'requirements':[\
['info', 'vrf', '(?P<vrf>.*)', 'peer', '(?P<peer>.*)', 'timer',
'connect_retry_interval', '(?P<connect_retry_interval>.*)']],
'kwargs':{'attributes': [
'info[vrf][(.*)][peer][(.*)][session_state]',
'info[vrf][(.*)][peer][(.*)][timer]']},
'exclude': msdp_exclude}},
config_info={'conf.msdp.Msdp':{
'requirements':[
['device_attr', '{uut}', 'vrf_attr', '(?P<vrf>.*)',
'global_connect_retry_interval', '(?P<connect_retry_interval>.*)']],
'verify_conf':False,
'kwargs':{}}},
verify_ops={'ops.msdp.msdp.Msdp':{
'requirements':[\
['info', 'vrf', '(?P<vrf>.*)', 'peer', '(?P<peer>.*)', 'timer',
'connect_retry_interval', 10]], # change to default value
'kwargs':{'attributes': [
'info[vrf][(.*)][peer][(.*)][session_state]',
'info[vrf][(.*)][peer][(.*)][timer]']},
'exclude': msdp_exclude}},
num_values={'vrf': 1, 'peer': 'all'})
class TriggerUnconfigConfigMsdpDescription(TriggerUnconfigConfig):
"""Unconfigure and reapply the whole configurations of dynamically
learned MSDP peer(s) description."""
__description__ = """Unconfigure and reapply the whole configurations of dynamically
learned MSDP peer(s) description.
trigger_datafile:
Mandatory:
timeout:
max_time (`int`): Maximum wait time for the trigger,
in second. Default: 180
interval (`int`): Wait time between iteration when looping is needed,
in second. Default: 15
method (`str`): Method to recover the device configuration,
Support methods:
'checkpoint': Rollback the configuration by
checkpoint (nxos),
archive file (iosxe),
load the saved running-config file on disk (iosxr)
Optional:
tgn_timeout (`int`): Maximum wait time for all traffic threads to be
restored to the reference rate,
in second. Default: 60
tgn_delay (`int`): Wait time between each poll to verify if traffic is resumed,
in second. Default: 10
timeout_recovery:
Buffer recovery timeout make sure devices are recovered at the end
of the trigger execution. Used when previous timeouts have been exhausted.
max_time (`int`): Maximum wait time for the last step of the trigger,
in second. Default: 180
interval (`int`): Wait time between iteration when looping is needed,
in second. Default: 15
static:
The keys below are dynamically learnt by default.
However, they can also be set to a custom value when provided in the trigger datafile.
vrf: `str`
peer: `str`
connect_retry_interval: `int`
(e.g) interface: '(?P<interface>Ethernet1*)' (Regex supported)
OR
interface: 'Ethernet1/1/1' (Specific value)
steps:
1. Learn Msdp Ops object and store the MSDP peer(s) description
if has any, otherwise, SKIP the trigger
2. Save the current device configurations through "method" which user uses
3. Unconfigure the learned MSDP peer(s) description from step 1
with Msdp Conf object
4. Verify the MSDP peer(s) description from step 3 are no longer existed
5. Recover the device configurations to the one in step 2
6. Learn Msdp Ops again and verify it is the same as the Ops in step 1
"""
# Mapping of Information between Ops and Conf
# Also permit to dictate which key to verify
mapping = Mapping(requirements={'conf.msdp.Msdp':{
'requirements':[\
['device_attr', '{uut}', '_vrf_attr', '(?P<vrf>.*)',
'_peer_attr', '(?P<peer>.*)', 'description',
'(?P<description>.*)']],
'kwargs':{'attributes': [
'msdp[vrf_attr][(.*)][peer_attr][(.*)]']},
'exclude': msdp_exclude},
'ops.msdp.msdp.Msdp':{
'requirements':[\
['info', 'vrf', '(?P<vrf>.*)', 'peer',
'(?P<peer>.*)', 'description', '(?P<description>.*)']],
'kwargs':{'attributes': [
'info[vrf][(.*)][peer][(.*)][session_state]',
'info[vrf][(.*)][peer][(.*)][description]']},
'exclude': msdp_exclude}},
config_info={'conf.msdp.Msdp':{
'requirements':[
['device_attr', '{uut}', 'vrf_attr', '(?P<vrf>.*)',
'peer_attr', '(?P<peer>.*)', 'description','(?P<description>.*)']],
'verify_conf':False,
'kwargs':{}}},
verify_ops={'ops.msdp.msdp.Msdp':{
'requirements':[\
['info', 'vrf', '(?P<vrf>.*)', 'peer',
'(?P<peer>.*)', NotExists('description')]], # , '(?P<description>.*)'
'kwargs':{'attributes': [
'info[vrf][(.*)][peer][(.*)][session_state]',
'info[vrf][(.*)][peer][(.*)][description]']},
'exclude': msdp_exclude}},
num_values={'vrf': 1, 'peer': 1})
|
# find the first fib number with 1000 digits
def count_digits(input):
input = int(input)
digits = 0
if input == 0:
return 1
while input:
digits+=1
input //= 10
return digits
f1 = 1
f2 = 1
digits = 0
count = 2
while digits!=1000:
term = f1 + f2
digits = count_digits(term)
f1 = f2
f2 = term
count+=1
print(count)
|
#------------------------------------------------------
# import
#------------------------------------------------------
import os
import argparse
import codecs
import time
import imghdr
import numpy as np
import cv2
print("opencv : ",cv2.__version__)
from model_wrapper import *
#------------------------------------------------------
# global
#------------------------------------------------------
#------------------------------------------------------
# function
#------------------------------------------------------
def process_images(model, img_path):
img = cv2.imread(img_path)
img_size = (img.shape[1], img.shape[0], img.shape[2])
inference(model, img, draw=True)
# display image
cv2.imshow("image", img)
cv2.waitKey(3000)
cv2.destroyAllWindows()
# write image with bbox
base_fname, _ = os.path.splitext(os.path.basename(img_path))
out_file_path = os.path.join(os.getcwd(), base_fname+'_with_box.jpg')
cv2.imwrite(out_file_path, img.astype(np.uint8));
print("save : ", out_file_path)
def arg_parser():
parser = argparse.ArgumentParser(description="Annotate with YOLOv3.")
parser.add_argument("img_path", type=str, help="path2your_image", default=None)
return parser
def main(args):
process_images(load_model(), args.img_path)
#------------------------------------------------------
# main
#------------------------------------------------------
if __name__ == '__main__':
parser = arg_parser()
args = parser.parse_args()
main(args)
|
import os
import csv
import re
import datetime
# from urlparse import urlparse
from urllib.parse import urlparse
from seed.data_importer.tasks import save_raw_data, map_data, match_buildings
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.http import JsonResponse, HttpResponse, HttpResponseNotFound
from django.shortcuts import render
from django.template.loader import render_to_string
from django.db.models import Q
from rest_framework.decorators import api_view
from seed.models import Cycle, PropertyView, PropertyState, Property
from seed.models.certification import GreenAssessmentPropertyAuditLog, GreenAssessmentURL
from helix.models import HELIXGreenAssessment, HELIXGreenAssessmentProperty, HelixMeasurement, HELIXPropertyMeasure
from seed.models.auditlog import (
AUDIT_USER_EXPORT,
)
from seed.data_importer.models import ImportRecord
from helix.models import HELIXOrganization as Organization
# from seed.lib.superperms.orgs.models import Organization
from seed.utils.api import api_endpoint
import helix.helix_utils as utils
from hes import hes
from label import label
# Return the green assessment front end page. This can be accessed through
# the seed side bar or at /app/assessments
# Parameters:
# orgs: list of parent organizations
@login_required
def assessment_view(request):
orgs = Organization.objects.all()
return render(request, 'helix/green_assessments.html', {'org_list': orgs})
# Returns and html interface for editing an existing green assessment which is
# identified by the parameter id.
# Parameters:
# assessment: certification id
@login_required
def assessment_edit(request):
assessment = HELIXGreenAssessment.objects.get(pk=request.GET['id'])
return render(request, 'helix/assessment_edit.html', {'assessment': assessment})
# Export the GreenAssessmentProperty information for the list of property ids provided
# Parameters:
# ids: comma separated list of views ids to retrieve
# file_name: optional parameter that can be set to have the web browser open
# a save dialog with this as the file name. When not set, raw text
# is displayed
# Example:
# GET /helix/helix-csv-export/?view_ids=11,12,13,14
@api_endpoint
@api_view(['GET', 'POST'])
def helix_csv_export(request):
# property_ids = map(lambda view_id: int(view_id), request.data.get['ids'].split(','))
property_ids = request.data.get('ids', [])
view_ids = PropertyView.objects.filter(property_id__in=property_ids)
state_ids = view_ids.values_list('state_id', flat=True)
# retrieve green assessment properties that belong to one of these ids
today = datetime.datetime.today()
organizations = Organization.objects.filter(users=request.user)
reso_certifications = HELIXGreenAssessment.objects.filter(organization_id__in=organizations).filter(is_reso_certification=True)
assessments = HELIXGreenAssessmentProperty.objects.filter(
view__pk__in=view_ids).filter(Q(_expiration_date__gte=today) | Q(_expiration_date=None)).filter(opt_out=False).filter(assessment_id__in=reso_certifications)
# num_certification = assessments.values_list('assessment_id', flat=True)
# retrieve measures that belogn to one of these ids
matching_measures = HELIXPropertyMeasure.objects.filter(property_state__in=state_ids) # only pv can be exported
file_name = request.data.get('filename')
# Handle optional parameter
if (file_name is not None):
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="' + file_name + '"'
else:
response = HttpResponse()
# Dump all fields of all retrieved assessments properties into csv
addressmap = {'custom_id_1': 'UniversalPropertyId', 'city': 'City', 'postal_code': 'PostalCode', 'state': 'State', 'latitude': 'Latitude', 'longitude': 'Longitude'}
addressmapxd = {'StreetDirPrefix': 'StreetDirPrefix', 'StreetDirSuffix': 'StreetDirSuffix', 'StreetName': 'StreetName', 'StreetNumber': 'StreetNumber', 'StreetSuffix': 'StreetSuffix', 'UnitNumber': 'UnitNumber'}
fieldnames = ['GreenVerificationBody', 'GreenBuildingVerificationType', 'GreenVerificationRating', 'GreenVerificationMetric', 'GreenVerificationVersion', 'GreenVerificationYear', 'GreenVerificationSource', 'GreenVerificationStatus', 'GreenVerificationURL']
measurenames = ['PowerProductionSource', 'PowerProductionOwnership', 'Electric', 'PowerProductionAnnualStatus', 'PowerProductionSize', 'PowerProductionType', 'PowerProductionAnnual', 'PowerProductionYearInstall']
writer = csv.writer(response)
arr = [value for key, value in addressmap.items()] + [value for key, value in addressmapxd.items()] + ['Unparsed Address'] + [str(f) for f in fieldnames]
if matching_measures:
arr += [str(m) for m in measurenames]
writer.writerow(arr)
for a in assessments:
a_dict = a.to_reso_dict()
unparsedAddress = a.view.state.address_line_1
if a.view.state.address_line_2:
unparsedAddress += ' ' + a.view.state.address_line_2
writer.writerow([str(getattr(a.view.state, key, '')) for key, value in addressmap.items()] +
[str(getattr(a.view.state, key, '')) for key, value in addressmapxd.items()] + [unparsedAddress] +
[str(a_dict.get(f, '')) for f in fieldnames])
# log changes
a.log(
user=request.user,
record_type=AUDIT_USER_EXPORT,
name='Export log',
description='Exported via csv')
for measure in matching_measures:
matching_measurements = HelixMeasurement.objects.filter(
measure_property__pk=measure.propertymeasure_ptr_id,
measurement_type__in=['PROD', 'CAP'],
measurement_subtype__in=['PV', 'WIND']
)
measurement_dict = {}
for match in matching_measurements:
measurement_dict.update(match.to_reso_dict())
measurement_dict.update(measure.to_reso_dict())
unparsedAddress = measure.property_state.address_line_1
if measure.property_state.address_line_2:
unparsedAddress += ' ' + measure.property_state.address_line_2
writer.writerow([str(getattr(measure.property_state, key, '')) for key, value in addressmap.items()] +
[str(getattr(measure.property_state, key, '')) for key, value in addressmapxd.items()] + [unparsedAddress] +
[str(getattr({}, f, '')) for f in fieldnames] + [measurement_dict.get(m, '') for m in measurenames])
return response
# Export the property address information for the list of property ids provided, matching up likely duplicates
# Parameters:
# ids: comma separated list of views ids to retrieve
# file_name: optional parameter that can be set to have the web browser open
# a save dialog with this as the file name. When not set, raw text
# is displayed
# Example:
# GET /helix/helix-dups-export/?view_ids=11,12,13,14
@api_endpoint
@api_view(['GET', 'POST'])
def helix_dups_export(request):
property_ids = request.data.get('ids', [])
view_ids = PropertyView.objects.filter(property_id__in=property_ids)
state_ids = view_ids.values_list('state_id', flat=True)
# convert to list to facilitate removal later on
states = [s for s in PropertyState.objects.filter(id__in=state_ids).only("id", "address_line_1", "normalized_address", "postal_code", "extra_data")]
file_name = request.data.get('filename')
# Handle optional parameter
if (file_name is not None):
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="' + file_name + '"'
else:
response = HttpResponse()
writer = csv.writer(response)
addressmap = ['id', 'address_line_1', 'city', 'postal_code']
writer.writerow(addressmap)
remaining_states = states
skip_states = []
for state in states:
if state.id in skip_states:
continue
for rem_state in remaining_states:
# likely matches, same zip code
if state.extra_data['StreetNumber'] == rem_state.extra_data['StreetNumber'] and state.extra_data['StreetName'] == rem_state.extra_data['StreetName'] and state.extra_data['UnitNumber'] == rem_state.extra_data['UnitNumber'] and state.postal_code == rem_state.postal_code and state.id != rem_state.id:
writer.writerow(['Similar street address, same postal code'])
writer.writerow([str(getattr(state, elem, '')) for elem in addressmap])
writer.writerow([str(getattr(rem_state, elem, '')) for elem in addressmap])
# remaining_states = list(filter(lambda i: i.id != rem_state.id, remaining_states))
skip_states.append(rem_state.id)
continue
# likely matches, no unit number, same zip code
if state.extra_data['StreetNumber'] == rem_state.extra_data['StreetNumber'] and state.extra_data['StreetName'] == rem_state.extra_data['StreetName'] and state.postal_code == rem_state.postal_code and state.id != rem_state.id:
writer.writerow(['Similar address, excludes unit #, same postal code'])
writer.writerow([str(getattr(state, elem, '')) for elem in addressmap])
writer.writerow([str(getattr(rem_state, elem, '')) for elem in addressmap])
skip_states.append(rem_state.id)
continue
# likely matches, different zip code
if state.extra_data['StreetNumber'] == rem_state.extra_data['StreetNumber'] and state.extra_data['StreetDirPrefix'] == rem_state.extra_data['StreetDirPrefix'] and state.extra_data['StreetName'] == rem_state.extra_data['StreetName'] and state.extra_data['UnitNumber'] == rem_state.extra_data['UnitNumber'] and state.postal_code != rem_state.postal_code and state.id != rem_state.id:
writer.writerow(['Same street address, different postal code'])
writer.writerow([str(getattr(state, elem, '')) for elem in addressmap])
writer.writerow([str(getattr(rem_state, elem, '')) for elem in addressmap])
skip_states.append(rem_state.id)
continue
# { 'StreetSuffix': 'court', 'StreetDirSuffix': '', 'StreetNamePreDirectional': ''}
return response
# Export List of updated properties in an xml
# format
# Parameters:
# start_date: A date in the format yyyy-mm-dd specifying the earliest
# date to export.
# end_date: A date in the same format specifying the last date to export.
# private_data: An optional parameter, not included in the official documentation.
# If equal to True, then all matching
# records are returned. If absent or equal to anything other
# than true, only records with a disclosure are returned.
# At the moment, this can be set by any user. It might be
# that case that only owners/admins should be able to retrieve
# private data.
# Example:
# http://localhost:8000/helix/helix-reso-export-list-xml/?start_date=2016-09-14&end_date=2017-07-11&private_data=True
@api_endpoint
@api_view(['GET'])
def helix_reso_export_list_xml(request):
content = []
ga_pks = GreenAssessmentPropertyAuditLog.objects.none()
property_pks = Property.objects.none()
start_date = request.GET.get('start_date', None)
end_date = request.GET.get('end_date', None)
print(start_date)
print(end_date)
organization = request.GET.get('organization', None)
if organization:
organizations = Organization.objects.filter(users=request.user, name=organization)
organizations = organizations | Organization.objects.filter(parent_org_id__in=organizations) # add sub-organizations with same parent
else:
organizations = Organization.objects.filter(users=request.user)
organizations = organizations | Organization.objects.filter(parent_org_id__in=organizations) # add sub-organizations with same parent
# if propertyview.state.data_quality == 2:
# return HttpResponse('<errors><error>Property has errors and cannot be exported</error></errors>', content_type='text/xml')
try:
# select green assessment properties that are in the specified create / update date range
# and associated with the correct property view
if start_date:
ga_pks = GreenAssessmentPropertyAuditLog.objects.filter(organization_id__in=organizations, created__gte=start_date)
property_pks = Property.objects.filter(organization_id__in=organizations, updated__gte=start_date)
if end_date:
ga_pks = ga_pks & GreenAssessmentPropertyAuditLog.objects.filter(organization_id__in=organizations, created__lte=end_date)
property_pks = property_pks & Property.objects.filter(organization_id__in=organizations, updated__lte=end_date)
if property_pks:
property_views = PropertyView.objects.filter(property__in=property_pks)
content = list(property_views.values_list('id', flat=True))
if ga_pks:
content = list(set(content) | set(list(ga_pks.values_list('property_view_id', flat=True))))
if content:
context = {
'content': content
}
rendered_xml = render_to_string('reso_export_list_template.xml', context)
return HttpResponse(rendered_xml, content_type='text/xml')
else:
return HttpResponseNotFound('<?xml version="1.0"?>\n<!--No properties found --!>')
except PropertyView.DoesNotExist:
return HttpResponseNotFound('<?xml version="1.0"?>\n<!--No properties found --!>')
# Export GreenAssessmentProperty and Measures information for a property view in an xml
# format using RESO fields
# Parameters:
# propertyview_pk: primary key into the property view table. Determines
# which records are exported. If the key does not exist
# in the database, a response code 404 is returned.
# Example:
# http://localhost:8000/helix/helix-reso-export-xml/?property_id=11
# @login_required
@api_endpoint
@api_view(['GET'])
def helix_reso_export_xml(request):
propertyview = utils.propertyview_find(request)
if not propertyview:
return HttpResponseNotFound('<?xml version="1.0"?>\n<!--No property found --!>')
today = datetime.datetime.today()
if 'crsdata' in request.GET:
propertyview.state.jurisdiction_property_id = propertyview.state.custom_id_1
organizations = Organization.objects.filter(users=request.user)
property = propertyview.first().state
match = re.search(r'.*\d{5}',property.normalized_address)
if match:
property.normalized_address = property.normalized_address[0:property.normalized_address.rindex(' ')]
property_info = {
"property": property,
}
# for pv in propertyview:
# if pv.state.data_quality == 2: #exclude records with data quality errors
# propertyview.exclude(pv)
# return HttpResponse('<errors><error>Property has errors and cannot be exported</error></errors>', content_type='text/xml')
measurement_dict = {}
# assessments
matching_assessments = HELIXGreenAssessmentProperty.objects.filter(
view__in=propertyview).filter(Q(_expiration_date__gte=today) | Q(_expiration_date=None)).filter(opt_out=False).exclude(status__in=['draft','test','preliminary'])
if matching_assessments:
reso_certifications = HELIXGreenAssessment.objects.filter(organization_id__in=organizations).filter(is_reso_certification=True)
property_info["assessments"] = matching_assessments.filter(assessment_id__in=reso_certifications)
for assessment in matching_assessments.filter(assessment_id__in=reso_certifications):
matching_measurements = HelixMeasurement.objects.filter(
assessment_property__pk=assessment.greenassessmentproperty_ptr_id
)
for match in matching_measurements:
measurement_dict.update(match.to_reso_dict())
property_info["measurements"] = measurement_dict
# measures
for pv in propertyview:
matching_measures = HELIXPropertyMeasure.objects.filter(property_state=pv.state) # only pv can be exported
if matching_measures:
for measure in matching_measures:
matching_measurements = HelixMeasurement.objects.filter(
measure_property__pk=measure.propertymeasure_ptr_id,
measurement_type__in=['PROD', 'CAP'],
measurement_subtype__in=['PV', 'WIND']
)
for match in matching_measurements:
measurement_dict.update(match.to_reso_dict())
measurement_dict.update(measure.to_reso_dict())
property_info["measurements"] = measurement_dict
context = {
'content': property_info
}
# log changes
for a in matching_assessments:
a.log(
user=request.user,
record_type=AUDIT_USER_EXPORT,
name='Export log',
description='Exported via xml')
rendered_xml = render_to_string('reso_export_template.xml', context)
return HttpResponse(rendered_xml, content_type='text/xml')
@api_endpoint
@api_view(['GET'])
def helix_green_addendum(request, pk=None):
lab = label.Label(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
if 'organization_id' in request.GET:
org_id = request.GET['organization_id']
org = Organization.objects.get(pk=org_id)
elif 'organization_name' in request.GET:
org = Organization.objects.get(name=request.GET['organization_name'])
org_id = org.id
else:
return HttpResponseNotFound('<?xml version="1.0"?>\n<!--No organization found --!>')
user = request.user
# try:
assessment = HELIXGreenAssessment.objects.get(name='Green Addendum', organization_id=org_id)
dataset_name = request.GET.get('dataset_name','Green Addendum')
if pk is not None:
property_state = PropertyState.objects.get(pk=pk)
property_view = PropertyView.objects.get(state=property_state)
else:
property_view = utils.propertyview_find(request, org)
if not property_view:
property_view = _create_propertyview(request, org, user, dataset_name)
if not property_view:
return HttpResponseNotFound('<?xml version="1.0"?>\n<!--No property found --!>')
elif pk is None:
property_view = property_view[0]
property_state = property_view.state
data_dict = {
'street': property_state.address_line_1,
'street_2': property_state.address_line_1,
'street_3': property_state.address_line_1,
'city': property_state.city,
'state': property_state.state,
'zip': property_state.postal_code
}
assessment_data = {'assessment': assessment, 'view': property_view, 'date': datetime.date.today()}
if dataset_name == "Green Addendum":
if 'ga_from_ee' in request.GET and request.GET['ga_from_ee'] is not None: #Green Addendum from external data
txtvars = ['indoor_air_plus', 'water_sense', 'energy_star', 'zerh',
'ngbs_bronze', 'ngbs_silver', 'ngbs_gold', 'ngbs_emerald',
'living_building_certified', 'petal_certification', 'phi_low_energy', 'energy_phit', 'passive_house', 'phius_2015'
'leed_certified', 'leed_silver', 'leed_gold', 'leed_platinum', 'green_certification_date_verified',
'verification_reviewed_on_site', 'verification_attached', 'green_certification_version', 'green_certification_organization_url',
'hers_rating', 'hers_sampling_rating', 'hers_projected_rating', 'hers_confirmed_rating', 'hers_estimated_savings', 'hers_rate',
'hes_score', 'hes_official', 'hes_unofficial', 'hes_estimated_savings', 'hes_rate', 'score_date_verified', 'score_version']
floatvars = []
boolvars = []
intvars = []
# energy_improvement_description, cost_of_energy_improvement
# resnet_url, hes_url, other_score_url_check, other_score_url
# score_reviewed_on_site, score_attached
# solar_leased, solar_owned, solar_loan_ucc, solar_ppa
# solar_size, solar_production, solar_production_type, solar_age
# solar_fixed_mount, solar_tracking_mount
# same with _2]
source_data_dict = utils.data_dict_from_vars(request, txtvars, floatvars, intvars, boolvars)
for key in source_data_dict:
if source_data_dict[key] is None:
source_data_dict[key] = ''
data_dict.update(source_data_dict)
else: #Green Addendum from HELIX data
if 'Utility' in property_state.extra_data:
data_dict['utility_name'] = property_state.extra_data['Utility']
# retrieve green assessment properties
assessments = HELIXGreenAssessmentProperty.objects.filter(view=property_view).filter(opt_out=False)
for assess in assessments:
data_dict.update(assess.to_label_dict())
measurements = HelixMeasurement.objects.filter(assessment_property=assess)
for measurement in measurements:
if assess.name == 'HERS Index Score':
data_dict.update(measurement.to_label_dict(0, 'hers'))
elif assess.name == 'Home Energy Score':
data_dict.update(measurement.to_label_dict(0, 'hes'))
# retrieve measures
measures = HELIXPropertyMeasure.objects.filter(property_state=property_state)
for index, meas in enumerate(measures):
# for meas in measures:
data_dict.update(meas.to_label_dict(index))
# add _2 for second solar
measurements = HelixMeasurement.objects.filter(measure_property=meas)
for measurement in measurements:
data_dict.update(measurement.to_label_dict(index))
key = lab.green_addendum(data_dict, settings.AWS_BUCKET_NAME)
elif dataset_name == "Project Summary":
txtvars = ['address_line_1', 'city', 'state', 'postal_code',
'customer_name', 'customer_phone', 'customer_email',
'contractor_name', 'contractor_company', 'contractor_phone',
'coach_name', 'coach_phone',
'originator_name', 'originator_phone',
'measure_name_1', 'measure_name_2','measure_name_3', 'measure_name_4', 'measure_name_5', 'measure_name_6','measure_name_7', 'measure_name_8',
'notes']
floatvars = ['mortgage', 'measure_cost_1', 'measure_cost_2', 'measure_cost_3', 'measure_cost_4',
'measure_cost_5', 'measure_cost_6', 'measure_cost_7', 'measure_cost_8',
'cost_pre', 'cost_post', 'hes_pre', 'hes_post']
boolvars = []
intvars = []
source_data_dict = utils.data_dict_from_vars(request, txtvars, floatvars, intvars, boolvars)
data_dict.update(source_data_dict)
key = lab.energy_first_mortgage(data_dict, settings.AWS_BUCKET_NAME)
url = 'https://s3.amazonaws.com/' + settings.AWS_BUCKET_NAME + '/' + key
priorAssessments = HELIXGreenAssessmentProperty.objects.filter(
view=property_view,
assessment=assessment)
if(not priorAssessments.exists()):
# If the property does not have an assessment in the database
# for the specifed assesment type create a new one.
green_property = HELIXGreenAssessmentProperty.objects.create(**assessment_data)
green_property.initialize_audit_logs(user=user)
green_property.save()
else:
# find most recently created property and a corresponding audit log
green_property = priorAssessments.order_by('date').last()
old_audit_log = GreenAssessmentPropertyAuditLog.objects.filter(greenassessmentproperty=green_property).exclude(record_type=AUDIT_USER_EXPORT).order_by('created').last()
# log changes
green_property.log(
changed_fields=assessment_data,
ancestor=old_audit_log.ancestor,
parent=old_audit_log,
user=user)
ga_url, _created = GreenAssessmentURL.objects.get_or_create(property_assessment=green_property)
ga_url.url = url
ga_url.description = 'Green Addendum Generated on ' + datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
ga_url.save()
return JsonResponse({'status': 'success', 'url': url})
# except:
# return JsonResponse({'status': 'error', 'msg': 'Green Addendum generation failed'})
@api_endpoint
@api_view(['GET'])
# Test with /helix-home-energy-score?organization_name=ClearlyEnergy&hes_id=332297
def helix_home_energy_score(request):
user = request.user
org = Organization.objects.get(name=request.GET['organization_name'])
hes_id = request.GET['hes_id']
# instantiate HES client for external API
hes_auth = {'user_key': settings.HES_USER_KEY,
'user_name': org.hes_partner_name,
'password': org.hes_partner_password,
'client_url': settings.HES_CLIENT_URL}
hes_client = hes.HesHelix(hes_auth['client_url'], hes_auth['user_name'], hes_auth['password'], hes_auth['user_key'])
hes_data = hes_client.query_hes(hes_id)
if hes_data['status'] == 'error':
return JsonResponse({'status': 'error', 'message': 'no existing home'})
else:
del hes_data['status']
if(hes_client is not None):
hes_client.end_session()
return JsonResponse({'status': 'success', 'data': hes_data})
@api_endpoint
@api_view(['GET'])
def helix_vermont_profile(request):
org = Organization.objects.get(name=request.GET['organization_name'])
user = request.user
propertyview = utils.propertyview_find(request, org)
dataset_name = request.GET['dataset_name']
if not propertyview:
propertyview = _create_propertyview(request, org, user, dataset_name)
if not propertyview:
return HttpResponseNotFound('<?xml version="1.0"?>\n<!--No property found --!>')
assessment = HELIXGreenAssessment.objects.get(name=dataset_name, organization=org)
txtvars = ['street', 'city', 'state', 'zipcode', 'evt', 'leed', 'ngbs', 'heatingfuel', 'author_name', 'author_company', 'auditor', 'rating', 'low_cost_action', 'heater_type', 'water_type', 'solar_ownership', 'weatherization', 'source', 'third_party', 'bill', 'comments']
floatvars = ['cons_mmbtu', 'cons_mmbtu_avg', 'cons_mmbtu_max', 'cons_mmbtu_min', 'cons_mmbtu_avg', 'score', 'elec_score', 'ng_score', 'ho_score', 'propane_score', 'wood_cord_score', 'wood_pellet_score', 'solar_score',
'finishedsqft', 'yearbuilt', 'hers_score', 'hes_score', 'capacity',
'cons_elec', 'cons_ng', 'cons_ho', 'cons_propane', 'cons_wood_cord', 'cons_wood_pellet', 'cons_solar',
'rate_elec', 'rate_ng', 'rate_ho', 'rate_propane', 'rate_wood_cord', 'rate_wood_pellet', 'high_cost_action']
boolvars = ['estar_wh', 'iap', 'zerh', 'phius', 'heater_estar', 'water_estar', 'water_solar', 'ac_estar', 'fridge_estar', 'washer_estar', 'dishwasher_estar', 'lighting_estar', 'has_audit', 'has_solar', 'has_storage', 'evcharger', 'has_cert', 'certified_bill', 'opt_out']
intvars = []
data_dict = utils.data_dict_from_vars(request, txtvars, floatvars, intvars, boolvars)
lab = label.Label(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
if request.GET['state'] == 'VT':
key = lab.vermont_energy_profile(data_dict, settings.AWS_BUCKET_NAME)
else:
key = lab.generic_energy_profile(data_dict, settings.AWS_BUCKET_NAME)
url = 'https://s3.amazonaws.com/' + settings.AWS_BUCKET_NAME + '/' + key
if propertyview is not None:
utils.add_certification_label_to_property(propertyview, user, assessment, url, data_dict)
return JsonResponse({'status': 'success', 'url': url})
else:
return JsonResponse({'status': 'error', 'message': 'no existing home'})
@api_endpoint
@api_view(['GET'])
def helix_massachusetts_scorecard(request, pk=None):
org_id = request.GET['organization_id']
user = request.user
property_state = PropertyState.objects.get(pk=pk)
propertyview = PropertyView.objects.filter(state=property_state)
if not propertyview:
return HttpResponseNotFound('<?xml version="1.0"?>\n<!--No property found --!>')
assessment = HELIXGreenAssessment.objects.get(name='Massachusetts Scorecard', organization_id=org_id)
data_dict = {
'address_line_1': property_state.address_line_1,
'address_line_2': property_state.address_line_2,
'city': property_state.city,
'state': property_state.state,
'postal_code': property_state.postal_code,
}
floatvars = ['Utility Price > Fuel Oil', 'Utility Price > Electricity', 'Utility Price > Natural Gas', 'Utility Price > Wood', 'Utility Price > Pellets', 'Utility Price > Propane',
'Utilities > Primary Heating Fuel Type', 'Metrics > Fuel Energy Usage Base (therms/yr)',
'Metrics > Total Energy Cost Improved ($/yr)', 'Metrics > Total Energy Cost Base ($/yr)',
'Metrics > Total Energy Usage Improved (MMBtu/yr)', 'Metrics > Total Energy Usage Base (MMBtu/yr)',
'Metrics > Electric Energy Usage Base (kWh/yr)',
'Metrics > CO2 Production Improved (Tons/yr)', 'Metrics > CO2 Production Base (Tons/yr)',
'Building > Conditioned Area', 'Building > Year Built', 'Building > Number Of Bedrooms', 'Contractor > Name',
'Green Assessment Property Date', 'HES > Final > Base Score', 'HES > Final > Improved Score']
for var in floatvars:
if var in property_state.extra_data:
part1 = var.split('>')[-1].lstrip()
# part2 = part1.replace('/yr)','')
# part2 = part2.replace('(','')
part2 = part1.split('(')[0].rstrip()
part3 = part2.replace(' ', '_').lower()
data_dict[part3] = property_state.extra_data[var]
data_dict['assessment_date'] = data_dict['green_assessment_property_date']
if 'year_built' not in data_dict:
data_dict['year_built'] = property_state.year_built
if 'conditioned_area' not in data_dict:
data_dict['conditioned_area'] = property_state.conditioned_floor_area
# to_btu = {'electric': 0.003412, 'fuel_oil': 0.1, 'propane': 0.1, 'natural_gas': 0.1, 'wood': 0.1, 'pellets': 0.1}
to_co2 = {'electric': 0.00061}
if data_dict['fuel_energy_usage_base'] is not None:
data_dict['fuel_percentage'] = 100.0 * data_dict['fuel_energy_usage_base']*0.1 / (data_dict['fuel_energy_usage_base']*0.1 + data_dict['electric_energy_usage_base']*0.003412)
data_dict['fuel_percentage_co2'] = 100.0 * (data_dict['co2_production_base'] - to_co2['electric'] * data_dict['electric_energy_usage_base']) / data_dict['co2_production_base']
else:
data_dict['fuel_percentage'] = 0.0
data_dict['fuel_percentage_co2'] = 0.0
data_dict['electric_percentage'] = 100.0 - data_dict['fuel_percentage']
data_dict['electric_percentage_co2'] = 100.0 - data_dict['fuel_percentage_co2']
lab = label.Label(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
key = lab.massachusetts_energy_scorecard(data_dict, settings.AWS_BUCKET_NAME)
url = 'https://s3.amazonaws.com/' + settings.AWS_BUCKET_NAME + '/' + key
if propertyview is not None:
utils.add_certification_label_to_property(propertyview, user, assessment, url, data_dict, request.GET.get('status', None), request.GET.get('reference_id', None))
return JsonResponse({'status': 'success', 'url': url})
else:
return JsonResponse({'status': 'error', 'message': 'no existing home'})
return None
# Create Massachusetts Scorecard (external service)
# Parameters:
# property attributes
# Example: http://localhost:8000/helix/massachusetts-scorecard/?address_line_1=298%20Highland%20Ave&city=Cambridge&postal_code=02139&state=MA&propane=2.3&fuel_oil=2.4&natural_gas=0.1&electricity=0.1&wood=200&pellets=0.5&conditioned_area=2000&year_built=1945&number_of_bedrooms=3&primary_heating_fuel_type=propane&name=JoeContractor&assessment_date=2019-06-07&fuel_energy_usage_base=120&total_energy_cost_base=2500&total_energy_cost_improved=1500&total_energy_usage_base=150&total_energy_usage_improved=120&electric_energy_usage_base=12000&co2_production_base=12.1&co2_production_improved=9.9&base_score=7&improved_score=9&incentive_1=5000&status=draft&organization=Snugg%20Pro&reference_id=myref124&url=https://mysnuggurl.com&organization=ClearlyEnergy
# @login_required
@api_endpoint
@api_view(['GET'])
def massachusetts_scorecard(request, pk=None):
user = request.user
try:
org = Organization.objects.get(users=user, name=request.GET['organization'])
except:
return JsonResponse({'status': 'error', 'message': 'organization does not exist'})
try:
assessment = HELIXGreenAssessment.objects.get(name='Massachusetts Scorecard', organization=org)
except:
return JsonResponse({'status': 'error', 'message': 'Please create certification with name: Massachusetts Scorecard'})
# test if property exists
propertyview = utils.propertyview_find(request, org)
if not propertyview:
dataset_name = 'MA API'
propertyview = _create_propertyview(request, org, user, dataset_name)
if not propertyview:
return HttpResponseNotFound('<?xml version="1.0"?>\n<!--No property found --!>')
data_dict = None
txtvars = ['address_line_1', 'address_line_2', 'city', 'state', 'postal_code', 'primary_heating_fuel_type', 'name', 'assessment_date']
floatvars = ['fuel_oil', 'electricity', 'natural_gas', 'wood', 'pellets', 'propane',
'conditioned_area', 'year_built', 'number_of_bedrooms',
'fuel_energy_usage_base',
'total_energy_cost_base', 'total_energy_cost_improved',
'total_energy_usage_base', 'total_energy_usage_improved',
'electric_energy_usage_base',
'co2_production_base', 'co2_production_improved',
'base_score', 'improved_score']
intvars = ['base_score', 'improved_score']
boolvars = []
data_dict = utils.data_dict_from_vars(request, txtvars, floatvars, intvars, boolvars)
if request.GET.get('url', None):
url = request.GET['url']
else:
# to_btu = {'electric': 0.003412, 'fuel_oil': 0.1, 'propane': 0.1, 'natural_gas': 0.1, 'wood': 0.1, 'pellets': 0.1}
to_co2 = {'electric': 0.00061}
if data_dict['fuel_energy_usage_base'] is not None and data_dict['electric_energy_usage_base'] is not None:
data_dict['fuel_percentage'] = 100.0 * data_dict['fuel_energy_usage_base']*0.1 / (data_dict['fuel_energy_usage_base']*0.1 + data_dict['electric_energy_usage_base']*0.003412)
data_dict['fuel_percentage_co2'] = 100.0 * (data_dict['co2_production_base'] - to_co2['electric'] * data_dict['electric_energy_usage_base']) / data_dict['co2_production_base']
else:
data_dict['fuel_percentage'] = 0.0
data_dict['fuel_percentage_co2'] = 0.0
data_dict['electric_percentage'] = 100.0 - data_dict['fuel_percentage']
data_dict['electric_percentage_co2'] = 100.0 - data_dict['fuel_percentage_co2']
lab = label.Label(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
key = lab.massachusetts_energy_scorecard(data_dict, settings.AWS_BUCKET_NAME)
url = 'https://s3.amazonaws.com/' + settings.AWS_BUCKET_NAME + '/' + key
if propertyview is not None:
# need to save data_dict to extra data
utils.add_certification_label_to_property(propertyview, user, assessment, url, data_dict, request.GET.get('status', None), request.GET.get('reference_id', None), org)
return JsonResponse({'status': 'success', 'url': url, 'property_id': propertyview.first().id})
else:
return JsonResponse({'status': 'error', 'message': 'no existing home'})
@api_endpoint
@api_view(['GET'])
def helix_remove_profile(request):
org = Organization.objects.get(name=request.GET['organization_name'])
propertyview = utils.propertyview_find(request, org=None)
if not propertyview:
return HttpResponseNotFound('<?xml version="1.0"?>\n<!--No property found --!>')
certification_name = request.GET['certification_name']
assessment = HELIXGreenAssessment.objects.get(name=certification_name, organization=org)
lab = label.Label(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
if propertyview is not None:
for pv in propertyview:
# consolidate with green addendum
priorAssessments = HELIXGreenAssessmentProperty.objects.filter(
view=pv,
assessment=assessment)
if priorAssessments:
# find most recently created property and a corresponding audit log
green_property = priorAssessments.order_by('date').last()
ga_urls = GreenAssessmentURL.objects.filter(property_assessment=green_property)
for ga_url in ga_urls:
label_link = ga_url.url
o = urlparse(label_link)
if o:
link_parts = os.path.split(o.path)
label_link = link_parts[1]
lab.remove_label(label_link, settings.AWS_BUCKET_NAME)
ga_url.delete() # delete URL entry in DB
else:
JsonResponse({'status': 'success', 'message': 'no existing profile'})
return JsonResponse({'status': 'success'})
else:
return JsonResponse({'status': 'error', 'message': 'no existing home'})
def _create_propertyview(request, org, user, dataset_name):
cycle = Cycle.objects.filter(organization=org).last() # might need to hardcode this
dataset = ImportRecord.objects.get(name=dataset_name, super_organization=org)
result = [{'City': request.GET['city'],
'State': request.GET['state']}]
if 'street' in request.GET:
result[0]['Address Line 1'] = request.GET['street']
else:
result[0]['Address Line 1'] = request.GET['address_line_1']
if 'zipcode' in request.GET:
result[0]['Postal Code'] = request.GET['zipcode']
else:
result[0]['Postal Code'] = request.GET['postal_code']
if 'property_uid' in request.GET:
result[0]['Custom ID 1'] = request.GET['property_uid']
file_pk = utils.save_and_load(user, dataset, cycle, result, "profile_data.csv")
# save data
resp = save_raw_data(file_pk)
save_prog_key = resp['progress_key']
utils.wait_for_task(save_prog_key)
# map data
# save_column_mappings(file_id, col_mappings) #perform column mapping
resp = map_data(file_pk)
map_prog_key = resp['progress_key']
utils.wait_for_task(map_prog_key)
resp = match_buildings(file_pk)
# resp = geocode_buildings_task(file_pk)
if (resp['status'] == 'error'):
return resp
match_prog_key = resp['progress_key']
utils.wait_for_task(match_prog_key)
propertyview = utils.propertyview_find(request, org)
return propertyview
|
import warnings
import pandas
import itertools
import bokeh.palettes
from bokeh.plotting import figure
from bokeh.io import show, curdoc
from bokeh.layouts import column, row, widgetbox
from bokeh.models.widgets import MultiSelect, TextInput, Dropdown
from bokeh.models import ColumnDataSource, CustomJS, HoverTool
from bokeh.embed import components, server_document
warnings.simplefilter(action='ignore', category=FutureWarning)
def update_graphs(attr, old, new):
dropdown.label = dropdown.value.replace('var_', '').replace('_', ' ')
f1.x_range.factors = list(df_scores_var.groupby(['location']).mean().sort_values(dropdown.value, ascending=False).index.values)
f.y_range.factors = list(df_scores_var.sort_values(dropdown.value)['name'])
f2.x_range.factors = list(df_scores_var.nlargest(10, dropdown.value)['name'])
f3.x_range.factors = list(df_scores_var.nsmallest(10, dropdown.value)['name'])
f1.title.text = 'Variance {} per country from 2011 to 2018'.format(dropdown.value).replace('var_', '').replace('_', ' ')
f.title.text = 'Variance {} from 2011 to 2018'.format(dropdown.value).replace('var_', '').replace('_', ' ')
f2.title.text = 'Top 10 least stable universities ({})'.format(dropdown.value).replace('var_', '').replace('_', ' ')
f3.title.text = 'Top 10 most stable universities ({})'.format(dropdown.value).replace('var_', '').replace('_', ' ')
source.data=dict(x_f=df_scores_var.sort_values(dropdown.value, ascending=False)['name'].tolist(),
top_f=df_scores_var.sort_values(dropdown.value, ascending=False)[dropdown.value])
source1.data=dict(x_f1=df_scores_var.groupby(['location']).mean().sort_values(dropdown.value, ascending=False).index.values,
top_f1=df_scores_var.groupby(['location']).mean().sort_values(dropdown.value, ascending=False)[dropdown.value])
source2.data=dict(x_f2=df_scores_var.nlargest(10, dropdown.value)['name'].tolist(),
top_f2=df_scores_var.nlargest(10, dropdown.value)[dropdown.value])
source3.data=dict(x_f3=df_scores_var.nsmallest(10, dropdown.value)['name'].tolist(),
top_f3=df_scores_var.nsmallest(10, dropdown.value)[dropdown.value])
return
df_all = pandas.DataFrame.from_csv('../Data_csv/rank_order.csv')
dfT_all = df_all.transpose()
df_citations = pandas.DataFrame.from_csv('../Data_csv/citations_rank.csv')
dfT_citations = df_citations.transpose()
df_industry = pandas.DataFrame.from_csv('../Data_csv/industry_income_rank.csv')
dfT_industry = df_industry.transpose()
df_international = pandas.DataFrame.from_csv('../Data_csv/international_outlook_rank.csv')
dfT_international = df_international.transpose()
df_research = pandas.DataFrame.from_csv('../Data_csv/research_rank.csv')
dfT_research = df_research.transpose()
df_teaching = pandas.DataFrame.from_csv('../Data_csv/teaching_rank.csv')
dfT_teaching = df_teaching.transpose()
df_citations_scores = pandas.DataFrame.from_csv('../Data_csv/citations.csv')
dfT_citations_scores = df_citations_scores.transpose()
df_industry_scores = pandas.DataFrame.from_csv('../Data_csv/industry_income.csv')
dfT_industry_scores = df_industry_scores.transpose()
df_international_scores = pandas.DataFrame.from_csv('../Data_csv/international_outlook.csv')
dfT_international_scores = df_international_scores.transpose()
df_research_scores = pandas.DataFrame.from_csv('../Data_csv/research.csv')
dfT_research_scores = df_research_scores.transpose()
df_teaching_scores = pandas.DataFrame.from_csv('../Data_csv/teaching.csv')
dfT_teaching_scores = df_teaching_scores.transpose()
df_scores_var = pandas.concat([df_all['name'], df_all['location'], df_citations_scores.var(axis=1), df_industry_scores.var(axis=1), df_international_scores.var(axis=1), df_research_scores.var(axis=1), df_teaching_scores.var(axis=1)], axis=1)
df_scores_var.columns = ['name', 'location', 'var_citations', 'var_industry_income', 'var_international_outlook', 'var_research', 'var_teaching']
df_ranks_var = pandas.concat([df_all['name'], df_all['location'], df_all.var(axis=1), df_citations.var(axis=1), df_industry.var(axis=1), df_international.var(axis=1), df_research.var(axis=1), df_teaching.var(axis=1)], axis=1)
df_ranks_var.columns = ['name', 'location', 'var_overall', 'var_citations', 'var_industry_income', 'var_international_outlook', 'var_research', 'var_teaching']
category = 'var_citations'
source = ColumnDataSource(data=dict(x_f=df_scores_var.sort_values('var_citations', ascending=False)['name'].tolist(),
top_f=df_scores_var.sort_values('var_citations', ascending=False)['var_citations']))
source1 = ColumnDataSource(data=dict(x_f1= df_scores_var.groupby(['location']).mean().sort_values('var_citations', ascending=False).index.values,
top_f1=df_scores_var.groupby(['location']).mean().sort_values('var_citations', ascending=False)['var_citations']))
source2 = ColumnDataSource(data=dict(x_f2=df_scores_var.nlargest(10, 'var_citations')['name'].tolist(),
top_f2=df_scores_var.nlargest(10, 'var_citations')['var_citations']))
source3 = ColumnDataSource(data=dict(x_f3=df_scores_var.nsmallest(10, 'var_citations')['name'].tolist(),
top_f3=df_scores_var.nsmallest(10, 'var_citations')['var_citations']))
f = figure(y_range=df_scores_var.sort_values('var_citations')['name'].tolist(), title='Variance citations from 2011 to 2018', y_axis_label='variance')
f.hbar(y='x_f', left=0, right='top_f', source=source, height=0.5, fill_color="#b3de69")
f.plot_height = 2000
f1 = figure(x_range=df_scores_var.groupby(['location']).mean().sort_values('var_citations', ascending=False).index.values, title='Variance citations per country from 2011 to 2018', y_axis_label='variance')
f1.vbar(x='x_f1', top='top_f1', source=source1, bottom=0, width=0.5, fill_color="#b3de69")
f1.xaxis.major_label_orientation = 'vertical'
f2 = figure(x_range=df_scores_var.nlargest(10, 'var_citations')['name'].tolist(), title='Top 10 least stable universities (citations)', y_axis_label='variance')
f2.vbar(x='x_f2', top='top_f2', source=source2, bottom=0, width=0.5, fill_color="#b3de69")
f2.xaxis.major_label_orientation = 'vertical'
f3 = figure(x_range=df_scores_var.nsmallest(10, 'var_citations')['name'].tolist(), title='Top 10 most stable universities (citations)', y_axis_label='variance')
f3.vbar(x='x_f3', top='top_f3', source=source3, bottom=0, width=0.5, fill_color="#b3de69")
f3.xaxis.major_label_orientation = 'vertical'
dropdown = Dropdown(label="Category", button_type="warning", menu=[('citations', 'var_citations'), ('industry income', 'var_industry_income'), ('international outlook', 'var_international_outlook'), ('research', 'var_research'), ('teaching', 'var_teaching')])
dropdown.on_change('value', update_graphs)
curdoc().add_root(row(f,column(dropdown,f1,f2,f3)))
curdoc().title = "Variance scores Universities"
script = server_document("http://localhost:5006/variance")
# print(script)
# with open('ranks.script.html', 'w') as file:
# file.write(script)
show(row(f, column(dropdown, f1, f2, f3)))
|
# Title: Extended Euclidean Algorithm
# Creator: Austin Akerley
# Date Created: 11/26/2019
# Last Editor: Austin Akerley
# Date Last Edited: 02/02/2020
# Associated Book Page Nuber: 16
# INPUT(s) -
# x - type: int, desc: one of the inputs for the extended euclidean algorithm, example: 12345
# y - type: int, desc: one of the inputs for the extended euclidean algorithm, example: 75232
# Formula: gcd(x,y) = ax + by
# Conditions:
# 1.) x must be smaller than y
# 2.) x and y not equal to 0
def eea(x, y):
if x <= 0 or y <= 0:
raise ValueError("x or y cannot be 0 or less")
starting_x = x
starting_y = y
q_l = 0
r_l = x
s = 0
s_l = 1
s_l2 = None
t = 1
t_l = 0
t_l2 = None
quotient_remainder = (None, None)
while quotient_remainder[1] != 0:
quotient_remainder = divmod(y, x)
y = x
x = quotient_remainder[1]
t_l2=t_l
t_l = t
s_l2=s_l
s_l = s
s = (s_l2-(s_l*q_l))
t = (t_l2-(t_l*q_l))
q_l = quotient_remainder[0]
return {"gcd": y, "a":s, "x":starting_x, "b":t, "y":starting_y} # a is mod inv of x
# OUTPUT - type: dictionary or None
# {
# "gcd" - type: int, desc: gcd(x, y),
# "a" - type: int, desc: a,
# "x" - type: int, desc: x,
# "b" - type: int, desc: b,
# "y" - type: int, desc: y
# }
|
__author__ = 'Крымов Иван'
# Задание-1: Решите задачу (дублированную ниже):
# Дана ведомость расчета заработной платы (файл "data/workers").
# Рассчитайте зарплату всех работников, зная что они получат полный оклад,
# если отработают норму часов. Если же они отработали меньше нормы,
# то их ЗП уменьшается пропорционально, а за заждый час переработки они получают
# удвоенную ЗП, пропорциональную норме.
# Кол-во часов, которые были отработаны, указаны в файле "data/hours_of"
# С использованием классов.
# Реализуйте классы сотрудников так, чтобы на вход функции-конструктора
# каждый работник получал строку из файла
class Worker:
def __init__(self, worker):
self.name = worker[0]
self.surname = worker[1]
self.salary = float(worker[2])
self.rank = worker[3]
self.normal_hours = float(worker[4])
self.production = float()
self.payroll = float()
def add_production(self, production):
if production[0] == self.name and production[1] == self.surname:
self.production = int(production[2])
else:
pass
def payroll_accounting(self):
if self.production:
if self.production < self.normal_hours:
self.payroll = round(
self.salary - self.salary / self.normal_hours * (self.normal_hours - self.production), 2)
else:
self.payroll = round(
self.salary - 2 * self.salary / self.normal_hours * (self.normal_hours - self.production), 2)
else:
raise TypeError('Argument production is empty')
with open('data\workers', 'r', encoding="utf8") as workers:
workers_list = list()
for line in workers:
workers_list.append(line.split())
employees = [Worker(worker) for worker in workers_list if worker != workers_list[0]]
with open('data\workers', 'r', encoding="utf8") as hours_of:
hours_list = list()
for line in hours_of:
hours_list.append(line.split())
for employee in employees:
for hours in hours_list:
employee.add_production(hours)
for employee in employees:
employee.payroll_accounting()
print('{} {}: реальная зарплата - {}'.format(employee.surname, employee.name, employee.salary, employee.payroll))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.