text stringlengths 38 1.54M |
|---|
def binarySearch(array,key):
first_element = 0
last_element = len(array)-1
while first_element <= last_element: #base case
mid_element = (first_element + last_element)//2
#if element at index [i]
if array[mid_element] == key:
return mid_element
else:
#left side of array
if key < array[mid_element]:
last_element = mid_element - 1
#right side of array
else:
first_element = mid_element + 1
array = [0, 1, 2, 8, 13, 17, 19, 32, 42,]
key = 17
print(binarySearch(array, key))
|
class Person:
def __init__(self, n, a):
self.name = n
self.age = a
def getName(self):
return self.name
def getAge(self):
return self.age
class Customer(Person):
def __init__(self, nm, ag, tl):
super().__init__(nm, ag)
self.tel = tl
def getName(self):
#メソッドのオーバーライド
self.name = "顧客:" + self.name
return self.name
def getTel(self):
return self.tel |
#!/usr/bin/python2.7
needed = [ str( x ) for x in range( 10 ) ]
def solve( N, mult=1 ):
global needed
prev = N
temp = N * mult
for eachDigit in str( temp ):
if eachDigit in needed:
needed.remove( eachDigit )
if len( needed ) == 0:
needed = [ str( x ) for x in range( 10 ) ]
return str( temp )
elif temp == prev and mult != 1:
needed = [ str( x ) for x in range( 10 ) ]
return "INSOMNIA"
else:
mult += 1
return solve( N, mult )
out = open( 'output.txt', 'w' )
with open( 'input.txt', 'r' ) as f:
cases = f.readline()
case = 1
for eachLine in f.readlines():
if eachLine.strip() not in ['', None]:
out.write( 'Case #' + str( case ) + ": " )
solution = solve( int( eachLine.strip() ) )
solution = str( solve( int( eachLine.strip() ) ) ) + "\n"
out.write( solution )
case += 1
out.close()
|
import numpy as np
import auto_diff as ad
from .util import NumGradCheck
class TestOpArange(NumGradCheck):
def test_forward(self):
x = ad.arange(3)
actual = x.forward()
expect = np.array([0, 1, 2])
self.assertTrue(np.allclose(expect, actual), (expect, actual))
x = ad.arange(3, 7)
actual = x.forward()
expect = np.array([3, 4, 5, 6])
self.assertTrue(np.allclose(expect, actual), (expect, actual))
x = ad.arange(3, 7, 2)
actual = x.forward()
expect = np.array([3, 5])
self.assertTrue(np.allclose(expect, actual), (expect, actual))
def test_backward(self):
x_val = np.random.random((3,))
x = ad.variable(x_val)
y = ad.arange(3)
z = x * y
self.numeric_gradient_check(z, {}, [x])
|
#! /usr/bin/env python
# -*- coding:utf-8 -*-
from networking.config_ospf import ospf_port_data
from networking.config_rip import rip_port_data
from networking.config_bgp import bgp_port_data
def execute_data():
item_ospf_port_data, item_ospf_port, item_ospf_ip = ospf_port_data()
item_rip_port, item_rip_name, item_rip_ip = rip_port_data()
item_bgp_port, item_bgp_ip = bgp_port_data()
item_data = []
for i in range(len(item_ospf_port)):
item1 = "interface %s" % item_ospf_port[i]
item2 = "ip address %s" % item_ospf_ip[i]
item_data.append(item1)
item_data.append(item2)
for j in range(len(item_rip_name)):
item3 = "interface %s" % item_rip_name[j]
item4 = "ip address %s" % item_rip_ip[j]
item_data.append(item3)
item_data.append(item4)
for i in range(len(item_bgp_port)):
item5 = "interface %s" % item_bgp_port[i]
item6 = "ip address %s" % item_bgp_ip[i]
item_data.append(item5)
item_data.append(item6)
return item_data
def conf_zebra():
item_data = execute_data()
zebra_data = ["hostname zebra", "password centos7", "enable password centos7"]
with open("/usr/local/etc/zebra.conf", "w") as f:
for i in zebra_data:
f.write(str(i) + "\n")
for i in item_data:
f.write(str(i) + "\n")
if __name__ == "__main__":
conf_zebra() |
import random
import copy
def read_file(file_name):
f = open(file_name, "r")
n_jobs, n_machines = [int(valor) for valor in f.readline().split()]
operations = []
for i in range(1, n_jobs+1):
line = f.readline().split()
for j in range(0, n_machines*2, 2):
operations.append( (i, int(line[j]), int(line[j+1])) )
f.close()
return n_jobs, n_machines, operations
def evaluate_makespan(particle,n_jobs,n_machines):
# each machine has a end time
machine_time = [0 for _ in range(n_machines)]
# more recent end time of the job
job_time = [0 for _ in range(n_jobs)]
for operation in particle:
job,machine,time = operation
max_time = max(machine_time[machine],job_time[job-1])
machine_time[machine] = max_time + time
job_time[job-1] = machine_time[machine]
# job that has the max time to complete
makespan = max(job_time)
return makespan
# represents a solution to the problem
def create_particle(n_jobs,n_machines,operations):
sequences = [] # operations sequences of the jobs
start = 0
stop = n_machines
for _ in range(n_jobs):
sequences.append(operations[start:stop])
start = stop
stop += n_machines
particle = []
for _ in range(n_jobs*n_machines):
first_operations = [(sequence[0],idx) for idx,sequence in enumerate(sequences) if len(sequence)>0]
operation = random.choice(first_operations)
idx = operation[1]
sequences[idx].remove(operation[0])
particle.append(operation[0])
return particle
def generate_swarm(swarm_size, n_jobs, n_machines, operations):
swarm = []
for _ in range(swarm_size):
particle = create_particle(n_jobs, n_machines, operations)
swarm.append(particle)
return swarm
def mutation(swarm,idx_particle,pm):
particle_size = len(swarm[idx_particle])
# how many mutations in particle operations
mutation_times = round(pm * particle_size)
for _ in range(mutation_times):
# select randomly a operation
idx_operation = random.randint(0,particle_size-1)
operation = swarm[idx_particle][idx_operation]
job = operation[0]
# check the left border
left_border = -1
for op in range(idx_operation):
if job == swarm[idx_particle][op][0]:
left_border = op
# check the right border
for op in range(idx_operation+1, particle_size):
if job == swarm[idx_particle][op][0]:
right_border = op
break
else:
right_border = particle_size
new_index = random.randint(left_border+1, right_border-1)
swarm[idx_particle].insert(new_index, swarm[idx_particle].pop(idx_operation))
# discovery particle and global best position
def find_best(swarm,particle_best,particle_makespan,n_jobs,n_machines):
min_makespan = float("inf")
for position,particle in enumerate(swarm):
makespan = evaluate_makespan(particle,n_jobs,n_machines)
# update particle best position
if makespan < particle_makespan[position]:
particle_best[position] = particle
particle_makespan[position] = makespan
# update swarm best position
if makespan < min_makespan:
min_makespan = makespan
global_best = particle
return global_best
def insert_position(operations,position):
for idx,operation in enumerate(operations):
if operation not in position:
position.append(operation)
break
return operations[idx+1:]
def update_position(wc,wb,n,swarm,particle_best,global_best,idx):
new_position = []
# copys of particles because of the pop function use
aux_swarm = copy.deepcopy(swarm[idx])
aux_part_best = copy.deepcopy(particle_best[idx])
aux_glob_best = copy.deepcopy(global_best)
for _ in range(n):
u = random.random()
u = round(u,2)
# position from current position
if u <= wc:
aux_swarm = insert_position(aux_swarm,new_position)
# position from best particle position
elif u > wc and u <= wc+wb:
aux_part_best = insert_position(aux_part_best,new_position)
# position from best global position
else:
aux_glob_best = insert_position(aux_glob_best,new_position)
return new_position
def execute(swarm,wc,wb,n_jobs,n_machines,iterations=100):
swarm_size = len(swarm)
# particle mutation probability
pm = 0.70
# the start position for each particle is the best position
particle_best = copy.deepcopy(swarm)
# number of operations
n_ops = n_jobs * n_machines
# stopping criteria: iterations number
for _ in range(iterations):
# makespan of each particle best position
particle_makespan = [evaluate_makespan(p,n_jobs,n_machines) for p in particle_best]
# global best position
global_best = find_best(swarm,particle_best,particle_makespan,n_jobs,n_machines)
for idx in range(swarm_size):
new_position = update_position(wc,wb,n_ops,swarm,particle_best,global_best,idx)
swarm[idx] = new_position
# if particle is gonna mutate
u = round(random.random(),2)
if u <= pm:
mutation(swarm,idx,0.4)
return global_best
def main():
file = "datasets//ft20.txt"
n_jobs, n_machines, operations = read_file(file)
# cognitive coefficients
wc = 0.20 # particle current position
wb = 0.30 # particle best position
wg = 0.50 # swarm best position
values = []
for _ in range(10):
swarm_size = 200
swarm = generate_swarm(swarm_size,n_jobs,n_machines,operations)
iterations = 50
best_particle = execute(swarm,wc,wb,n_jobs,n_machines,iterations)
values.append(evaluate_makespan(best_particle,n_jobs,n_machines))
print(min(values))
main()
|
# -*- coding: utf-8 -*-
# @Author: Yeshwanth
# @Date: 2021-01-04 18:58:12
# @Last Modified by: Yeshwanth
# @Last Modified time: 2021-01-09 12:30:53
# @Title: System Time
import time
print(time.ctime()) |
from sklearn.svm import SVC
import numpy as np
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
'''
### sklearn.svm.SVC
C = 1.0, kernel = 'rbf', degree = 3, gamma = 'auto', coef0 = 0.0, shrinking = True, probability = False, tol = 0.001, cache_size = 200, class_weight = None, verbose = False, max_iter = -1, decision_function_shape = None, random_state = None) *
参数:
- C:C - SVC的惩罚参数C?默认值是1
.0
C越大,相当于惩罚松弛变量,希望松弛变量接近0,即对误分类的惩罚增大,趋向于对训练集全分对的情况,这样对训练集测试时准确率很高,但泛化能力弱。C值小,对误分类的惩罚减小,允许容错,将他们当成噪声点,泛化能力较强。
- kernel :核函数,默认是rbf,可以是‘linear’, ‘poly’, ‘rbf’, ‘sigmoid’, ‘precomputed’
– 线性:u'v
– 多项式:(gamma * u'*v + coef0)^degree
– RBF函数:exp(-gamma | u - v | ^ 2)
– sigmoid:tanh(gamma * u'*v + coef0)
- degree :多项式poly函数的维度,默认是3,选择其他核函数时会被忽略。
- gamma : ‘rbf’, ‘poly’ 和‘sigmoid’的核函数参数。默认是’auto’,则会选择1 / n_features
- coef0 :核函数的常数项。对于‘poly’和 ‘sigmoid’有用。
- probability :是否采用概率估计?.默认为False
- shrinking :是否采用shrinking
heuristic方法,默认为true
- tol :停止训练的误差值大小,默认为1e - 3
- cache_size :核函数cache缓存大小,默认为200
- class_weight :类别的权重,字典形式传递。设置第几类的参数C为weight * C(C - SVC中的C)
- verbose :允许冗余输出?
- max_iter :最大迭代次数。-1
为无限制。
- decision_function_shape :‘ovo’, ‘ovr’ or None, default = None3
- random_state :数据洗牌时的种子值,int值
主要调节的参数有:C、kernel、degree、gamma、coef0。
'''
def create_data():
iris = load_iris()
df = pd.DataFrame(iris.data, columns=iris.feature_names)
df['label'] = iris.target
df.columns = ['sepal length', 'sepal width', 'petal length', 'petal width', 'label']
data = np.array(df.iloc[:100, [0, 1, -1]])
for i in range(len(data)):
if data[i,-1] == 0:
data[i,-1] = -1
# print(data)
return data[:,:2], data[:,-1]
X, y = create_data()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)
clf = SVC()
clf.fit(X_train, y_train)
clf.score(X_test, y_test)
|
import time as t
class Timer():
# 初始化构造函数
def __init__(self):
self.prompt = "未开始计时..."
self.lasted = []
self.begin = 0
self.end = 0
# 重写__str__方法 (演示使用,代码可省略)
def __str__(self):
return self.prompt
# 重写__repr__方法
def __repr__(self):
return self.prompt
# 开始计时
def start(self):
self.begin = t.localtime()
print("计时开始....")
# 结束计时
def stop(self):
self.end = t.localtime()
self.calc()
print("计时结束...")
# 计算运行时间
def calc(self):
self.lasted = []
self.prompt = "总共运行了"
for i in range(6):
self.lasted.append(self.end[i] - self.begin[i])
self.prompt += str(self.lasted[i])
# print |
from manga_py.fs import dirname, path_join, get_temp_path, rename
from manga_py.provider import Provider
from .helpers.std import Std
class MangaChanMe(Provider, Std):
def get_archive_name(self) -> str:
idx = self.get_chapter_index().split('-')
return 'vol_{:0>3}-{}'.format(*idx)
def get_chapter_index(self) -> str:
return str(self.chapter_id)
def get_main_content(self):
pass
def get_manga_name(self) -> str:
name = r'\.me/[^/]+/\d+-(.+)\.html'
return self._get_name(name)
def loop_chapters(self):
arc_name = self.get_archive_name()
arc = self._archive_type()
path = path_join(dirname(self.get_archive_path()), arc_name + '.%s' % arc)
url = self.chapter
temp_path = get_temp_path('{:0>2}_{}-temp_arc.zip'.format(
self._storage['current_chapter'],
arc_name
))
self.save_file(url, temp_path)
rename(temp_path, path)
def get_chapters(self):
selector = r'\.me/[^/]+/(\d+-.+\.html)'
url = self._get_name(selector)
url = '/download/{}'.format(url)
return self.html_fromstring(url, 'table#download_table tr td + td > a')
def get_files(self):
return []
def get_cover(self):
selector = r'\.me/[^/]+/(\d+-.+\.html)'
url = self._get_name(selector)
url = '{}/manga/{}'.format(self.domain, url)
img = self._elements('#cover', self.http_get(url))
if img and len(img):
return img[0].get('src')
def book_meta(self) -> dict:
# todo meta
pass
main = MangaChanMe
|
# -*- coding: utf-8 -*-
premium_cost = 150
def ground_cost(weight):
flat_charge = 20
if weight <= 2:
cost = (1.50 * weight) + flat_charge
return cost
elif weight <= 6:
cost = (3 * weight) + flat_charge
return cost
elif weight <= 10:
cost = (4 * weight) + flat_charge
return cost
else:
cost = (4.75 * weight) + flat_charge
return cost
def drone_cost(weight):
if weight <= 2:
cost = 4.50 * weight
return cost
elif weight <= 6:
cost = 9 * weight
return cost
elif weight <= 10:
cost = 12 * weight
return cost
else:
cost = 14.25 * weight
return cost
def cheapest_shipping(weight):
if drone_cost(weight) < ground_cost(weight) and drone_cost(weight) < premium_cost:
print("The cheapest way to ship this package is drone shipping at a cost of £" + str(ground_cost(weight)))
elif ground_cost(weight) < drone_cost(weight) and ground_cost(weight) < premium_cost:
print("The cheapest way to ship this package is ground shipping at a cost of £" + str(ground_cost(weight)))
else:
print("The cheapest way to ship this package is premium ground shipping at a cost of £" + str(premium_cost))
cheapest_shipping(4.8)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions.categorical import Categorical
from utils.plotter import VisdomLinePlotter
import gym
class LinearAct(nn.Linear):
def __init__(self, *args, activation=F.relu, **kwargs):
super().__init__(*args, **kwargs)
torch.nn.init.kaiming_normal_(self.weight, nonlinearity='relu')
self.activation = activation
def foward(self, x):
x = super().forward(x)
return self.activation(x)
class MLP(nn.Module):
def __init__(self, sizes: list[int]):
super().__init__()
self.layers = nn.Sequential(*(LinearAct(s1, s2) for s1, s2 in zip(sizes, sizes[1:])))
def forward(self, x):
return self.layers(x)
class PolicyGradient:
def __init__(self, obs_dim: int, nb_actions: int, hidden_sizes: list[int] = [32],
lr: float = 1e-2, batch_size: int = 4096, render=False, device = torch.device('cpu')):
self.net = MLP([obs_dim, *hidden_sizes, nb_actions]).to(device)
self.opt = torch.optim.Adam(self.net.parameters(), lr=lr)
self.batch_size = batch_size
self.device = device
self.render = render
def get_policy(self, obs):
return Categorical(logits=self.net(obs))
def get_action(self, obs):
return self.get_policy(obs).sample().item()
def get_loss(self, obs, action, weight):
log_prob = self.get_policy(obs).log_prob(action)
return -(log_prob * weight).mean()
def reward_to_go(self, rews):
n = len(rews)
rtg = torch.zeros(n)
for i in range(n-1, -1, -1):
rtg[i] = rews[i] + (rtg[i+1] if i+1 < n else 0)
return rtg
def learn(self, env):
batch_obs = []
batch_acts = []
batch_weights = []
batch_returns = []
batch_lengths = []
obs = env.reset()
done = False
episode_rewards = []
rendered = False
while True:
if self.render and not rendered:
env.render()
batch_obs.append(obs.copy()) # do we need a copy here?
act = self.get_action(torch.as_tensor(obs, dtype=torch.float32).to(self.device))
obs, rew, done, _ = env.step(act)
batch_acts.append(act)
episode_rewards.append(rew)
if done:
episode_return, episode_length = sum(episode_rewards), len(episode_rewards)
batch_returns.append(episode_return)
batch_lengths.append(episode_length)
# if not rendered:
# env.close()
rendered = True
# batch_weights += [episode_return] * episode_length # duplicate return length times
batch_weights += list(self.reward_to_go(episode_rewards))
obs, done, episode_rewards = env.reset(), False, []
if len(batch_obs) > self.batch_size:
break
self.opt.zero_grad()
loss = self.get_loss(obs=torch.as_tensor(batch_obs, dtype=torch.float32).to(self.device),
action=torch.as_tensor(batch_acts, dtype=torch.int32).to(self.device),
weight=torch.as_tensor(batch_weights, dtype=torch.float32).to(self.device)
)
loss.backward()
self.opt.step()
return batch_returns, batch_lengths
def main(env_id: str = 'CartPole-v0', hidden_sizes=[32], lr=1e-2,
epochs=100, batch_size=4096, render=True,
repeat_test: int = 3, device=torch.device('cpu')):
env = gym.make(env_id)
obs_dim = env.observation_space.shape[0]
nb_actions = env.action_space.n
plotter = VisdomLinePlotter()
agent = PolicyGradient(obs_dim, nb_actions, hidden_sizes, lr=lr, batch_size=batch_size, render=render, device=device)
for eid in range(epochs):
returns, _ = agent.learn(env)
print(f"> epoch: {eid+1}/{epochs}, \t mean_return: {sum(returns) / len(returns)}, \t max_return: {max(returns)}")
plotter.plot('return', 'mean return', "Return (lr)", eid+1, sum(returns) / len(returns))
plotter.plot('return', 'max return', "Return (lr)", eid+1, max(returns))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--env_name', '--env', type=str, default='CartPole-v0')
parser.add_argument('--render', action='store_true')
parser.add_argument('--cpu', action='store_true')
parser.add_argument('--lr', type=float, default=1e-2)
parser.add_argument('--epochs', type=int, default=100)
args = parser.parse_args()
device = torch.device('cpu') if args.cpu else torch.device('cuda')
main(env_id=args.env_name, render=args.render, device=device, lr=args.lr, hidden_sizes=[64,64], epochs=args.epochs)
|
'''
Created on Feb 4, 2014
@author: Naved
'''
import threading
class MyThread(threading.Thread):
def __init__(self):
super(MyThread, self).__init__()
def run(self):
print "tada"
threading.Lock.acquire()
print "done"
threading.Lock.release()
threadlist=[]
for e in xrange(0,10):
a=MyThread()
a.start()
threadlist.append(a)
for thread in threadlist:
thread.join()
|
"""
通过spark的api, 或者直接调用MysqlDB的api从业务数据源中获取数据
"""
from typing import Tuple, List, Union
import datetime
from pyspark import Row, RDD
from pyspark.sql import SparkSession, DataFrame
from process.spark.context import RetailerContext
class MysqlDataLoader(object):
def __init__(self, retailer_context):
# type: (RetailerContext) -> None
super(MysqlDataLoader, self).__init__()
self.config = retailer_context.config
self.spark = retailer_context.spark
self.retailer_context = retailer_context
def retailer_data_frame(self, database_name=None, table_name=None, sql=None):
# type: (str, str, str) -> DataFrame
return self._data_frame("retailer", database_name=database_name, table_name=table_name, sql=sql)
def _data_frame(self, database_type, database_name=None, table_name=None,
sql=None, partition_column=None, lower_bound=None, upper_bound=None, num_partitions=None):
database_config = self.config[database_type]
jdbc_url = "jdbc:mysql://{0}:{1}".format(database_config["host"], database_config["port"])
user_name = database_config["username"]
password = database_config["password"]
frame = self.spark.read.format("jdbc")\
.option("url", jdbc_url)\
.option("user", user_name).option("password", password)
if partition_column is not None:
frame = frame.option("partitionColumn", partition_column)\
.option("lowerBound", lower_bound).option("upperBound", upper_bound)\
.option("numPartitions", num_partitions)
if sql is not None:
print(sql)
frame = frame.option("dbtable", "({0}) as tmp".format(sql))
else:
frame = frame.option("dbtable", "{0}.{1}".format(database_name, table_name))
return frame.load()
|
from django import forms
from .models import Pet, Owner
class PetForm(forms.ModelForm):
class Meta:
model = Pet
fields = ('name', 'age', 'breed', 'owner_name')
class OwnerForm(forms.ModelForm):
class Meta:
model = Owner
fields = ('name', 'age', 'pet_name') |
from collections import Counter
letters = ['A','B','A','C','C']
frequency = Counter(letters).items()
print(frequency) |
import os, sys, types, re
# supposed to contain a class, FeatConf, to ease working with fsf files in
# python. started as a quick-and-dirty parsing class with little semantic
# knowledge of the fsf file, but now I'm using this in several places, this
# code has a bunch of uglinesses that would require a search-and-replace in the
# other scripts to fix. Which I should do. But for the time being we'll be
# incrementally adding some more intelligence to the class in inelegant ways.
class FeatEntry:
p = re.compile(r'''([^(]+)\((.+)\)''')
# starting version 1, field quotes will be stripped
VERSION = 1
STRIP_QUOTES = True
def _trim(self, s):
if isinstance(s, basestring):
return s.strip('"')
else:
return s
def __init__(self, name, value, comment):
self._is_keyword = False
self.name = name
match = self.p.match(name)
self.groupname, self.entrykey = match.groups()
if self.groupname == 'feat_files':
self.entrykey = int(self.entrykey)
elif self.entrykey.startswith("con_mode"):
self._is_keyword = True
elif self.entrykey == "unwarp_dir":
self._is_keyword = True
elif '"' not in value:
# assume numeric
if '.' in value:
value = float(value)
elif value.isdigit():
value = int(value)
if self.STRIP_QUOTES:
value = self._trim(value)
self.value = value
self.comment = comment
def renderedvalue(self):
if not self.STRIP_QUOTES:
return self.value
if isinstance(self.value, basestring) and not self._is_keyword:
return '"%s"' % self.value
else:
return self.value
def __str__(self):
return \
(self.comment and '# %s\n' % self.comment.replace('\n', '\n# ') or '') + \
'set %s %s' % (self.name, self.renderedvalue())
def __repr__(self):
return str(self)
class Bunch(dict):
def __init__(self, **kw):
dict.__init__(self, kw)
self.__dict__ == self
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
class FeatConf:
FEATVERSION = 5.98
def output_order(self):
out = []
for key in """\
version
inmelodic
level
analysis
relative_yn
help_yn
featwatcher_yn
sscleanup_yn
outputdir
tr
npts
ndelete
tagfirst
multiple
inputtype
filtering_yn
brain_thresh
critical_z
noise
noisear
newdir_yn
mc
sh_yn
regunwarp_yn
dwell
te
signallossthresh
unwarp_dir
st
st_file
bet_yn
smooth
norm_yn
perfsub_yn
temphp_yn
templp_yn
melodic_yn
stats_yn
prewhiten_yn
motionevs
robust_yn
mixed_yn
evs_orig
evs_real
evs_vox
ncon_orig
ncon_real
nftests_orig
nftests_real
constcol
poststats_yn
threshmask
thresh
prob_thresh
z_thresh
zdisplay
zmin
zmax
rendertype
bgimage
tsplot_yn
reg_yn
reginitial_highres_yn
reginitial_highres_search
reginitial_highres_dof
reghighres_yn
reghighres_search
reghighres_dof
regstandard_yn
regstandard
regstandard_search
regstandard_dof
regstandard_nonlinear_yn
regstandard_nonlinear_warpres
paradigm_hp
ncopeinputs
copeinput.1
""".strip().split():
out.append(str(self.fmri[key]))
# feat_files section
for feat_file_num in range(1, len(self.feat_files)+1):
out.append(str(self.feat_files[feat_file_num]))
# TODO
# keys that get created per event and group etc.
# e.g. evtitle1, groupmem.1 and stuff like that
# these were laid out in an old modification
# and i no longer remember the original intention
# axh Mon Feb 18 17:55:35 EST 2013
# ---
# evtitle
# shape
# convolve
# convolve_phase
# tempfilt_yn
# deriv_yn
# custom
# ortho
# evg
# groupmem
# conpic_real
# conname_real
# con_real
# conmask
# ...
for key in """\
confoundevs
alternative_example_func
alternative_mask
init_initial_highres
init_highres
init_standard
overwrite_yn
level2orth
con_mode_old
con_mode
conmask_zerothresh_yn
""".strip().split():
out.append(str(self.fmri[key]))
return "\n\n".join(out)
def __setitem__(self, key, val):
if key not in self.dc_index:
print("WARNING: setting new item [%s]" % key)
self.dc_index[key].value = val
def __delitem__(self, key):
idx = self.index(key)
fe = self.dc_index[key]
del self.ls_entry[idx]
del self.dc_index[key]
return fe
def __init__(self, fl_input):
if type(fl_input) == types.StringType:
if len(fl_input) < 255 and os.path.exists(fl_input):
ls_line = open(fl_input).readlines()
else:
ls_line = fl_input.split("\n")
ls_line[-1] += "\n"
else:
ls_line = fl_input.readlines()
self.ls_entry = []
self.dc_index = {}
entrypattern = re.compile(r'^set\s+(\S+)\s+(.*)$')
self.ls_groupmem = []
commentbuf = []
for line in ls_line:
line = line.strip()
if len(line) == 0 and len(commentbuf) == 0:
continue
if line.startswith('#'):
commentbuf.append(line[2:])
elif line.startswith('set '):
match = entrypattern.match(line).groups()
fe = FeatEntry(match[0], match[1], "\n".join(commentbuf))
commentbuf = []
self.dc_index[fe.name] = fe
self.ls_entry.append(fe)
if not hasattr(self, fe.groupname):
setattr(self, fe.groupname, Bunch())
self.__dict__[fe.groupname][fe.entrykey] = fe
def __getitem__(self, name):
return self.dc_index.get(name).value
def __str__(self):
return "\n\n".join([str(fe) for fe in self.ls_entry])
def find(self, matcher):
p = re.compile(matcher)
rtn = {}
for k in self.dc_index.keys():
if p.match(k):
rtn[k] = self[k]
return rtn
def complain_if_exists(self, fe):
if fe.name in self.dc_index:
raise Exception("this entry already exists")
def append(self, fe):
self.complain_if_exists(fe)
self.ls_entry.append(fe)
self.dc_index[fe.name] = fe
def index(self, key):
return [fe.name for fe in self.ls_entry].index(key)
def insert(self, idx, fe):
self.complain_if_exists(fe)
self.ls_entry.insert(idx, fe)
self.dc_index[fe.name] = fe
def remove_feat_input(self, p_match):
"""remove from the 4D or feat directory input list, and rebuild output structure
this is a very dumb function and only assumes 1 group type and 1 EV type!
make sure your fsf fits this use case!
Arguments:
- `p_match`: the regex to match
"""
ls_feat_files = []
idx, end = 0, len(self.ls_entry)
while idx < end:
fe = self.ls_entry[idx]
if any(map(lambda search: fe.name.startswith(search),
["feat_files",
"fmri(evg",
"fmri(groupmem"])):
# exclude anything that matches from the new buffer
if fe.name.startswith("feat_files"):
if not re.match(p_match, fe.value):
ls_feat_files.append(fe.value)
else:
print("removing: " + fe.value)
del self.dc_index[fe.name]
del self.ls_entry[idx]
end -= 1
else:
idx += 1
# rebuild
idx = 0
while idx < len(self.ls_entry):
fe = self.ls_entry[idx]
make_fe = None
if fe.name == "fmri(confoundevs)":
make_fe = lambda num, feat_file: FeatEntry("feat_files(%s)" % num, feat_file, "4D AVW data or FEAT directory (%s)" % num)
elif fe.name == "fmri(level2orth)":
make_fe = lambda num, feat_file: FeatEntry("fmri(evg%s.1)" % num, "1.0", "Higher-level EV value for EV 1 and input %s" % num)
elif fe.name == "fmri(con_mode_old)":
make_fe = lambda num, feat_file: FeatEntry("fmri(groupmem.%s)" % num, "1", "Group membership for input %s" % num)
if make_fe:
num_feat_file = 0
for feat_file in ls_feat_files:
num_feat_file += 1
fenew = make_fe(num_feat_file, feat_file)
self.dc_index[fenew.name] = fenew
self.ls_entry.insert(idx, fenew)
idx += 1
idx += 1
self.dc_index["fmri(npts)"].value = len(ls_feat_files)
self.dc_index["fmri(multiple)"].value = len(ls_feat_files)
if __name__ == "__main__":
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-c", "--contrast_list", action="store_true",
help = "list contrasts")
parser.add_option("-i", "--input_list", action="store_true",
help = "list inputs")
parser.add_option("-p", "--print_everything", action="store_true",
help = "print everything (echo... to test output)")
(options, args) = parser.parse_args()
if len(sys.argv) < 2:
parser.print_help()
sys.exit()
fsf_file = sys.argv[-1]
if not os.path.exists(fsf_file):
print("the file does not exist!")
sys.exit(1)
def sort_by_dotnumber(t1, t2):
p = re.compile(r'\D*(\d+)\D*')
def getnum(s):
m = p.match(s)
return m and int(m.group(1)) or 0
return getnum(t1[0]) > getnum(t2[0]) and 1 or -1
FC = FeatConf(fsf_file)
if options.print_everything:
print(str(FC))
else:
res = None
if options.contrast_list:
res = FC.find(r'.*conname_real.*')
elif options.input_list:
res = FC.find(r'.*feat_files.*')
if res:
maxlenk = max(map(len, res.keys()))
for k, v in sorted(res.items(), sort_by_dotnumber):
print(" " + k.ljust(maxlenk + 1) + ": " + v)
|
from django.contrib import admin
from djCell.apps.productos.models import TiempoGarantia,Estatus,Marca,Gama,DetallesEquipo,Equipo,TipoIcc,DetallesExpres,Expres,Secciones,MarcaAccesorio,DetallesAccesorio,EstatusAccesorio,Accesorio,NominacionFicha,EstatusFicha,Ficha,TiempoAire, HistorialPreciosEquipos,HistorialPreciosAccesorios,HistorialPreciosExpres
admin.site.register(TiempoGarantia)
admin.site.register(Estatus)
admin.site.register(Marca)
admin.site.register(Gama)
admin.site.register(DetallesEquipo)
admin.site.register(Equipo)
admin.site.register(TipoIcc)
admin.site.register(DetallesExpres)
admin.site.register(Expres)
admin.site.register(Secciones)
admin.site.register(MarcaAccesorio)
admin.site.register(DetallesAccesorio)
admin.site.register(EstatusAccesorio)
admin.site.register(Accesorio)
admin.site.register(NominacionFicha)
admin.site.register(EstatusFicha)
admin.site.register(Ficha)
admin.site.register(TiempoAire)
admin.site.register(HistorialPreciosEquipos)
admin.site.register(HistorialPreciosAccesorios)
admin.site.register(HistorialPreciosExpres)
|
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
plt.style.use('natalia')
Qx_list1, Qy_list1 = np.loadtxt('../output/mytunes_nominalSPS_QpxQPy0.txt', delimiter=",", unpack=True)
Qx_list2, Qy_list2 = np.loadtxt('../output/mytunes_nominalSPS_QpxQPy2.txt', delimiter=",", unpack=True)
f, ax = plt.subplots(figsize=(14,14))
ax.scatter(Qx_list2, Qy_list2, c='C1', label='Qpx=Qpy=2')
ax.scatter(Qx_list1, Qy_list1, c='C0', label='Qpx=Qpy=0')
ax.set_ylim(0.17993, 0.18007)
ax.set_xlim(0.12993, 0.13007)
# neglect the offset in the axis
y_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
ax.yaxis.set_major_formatter(y_formatter)
x_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
ax.xaxis.set_major_formatter(y_formatter)
plt.xlabel('Qx')
plt.ylabel('Qy')
plt.grid()
plt.legend()
plt.tight_layout()
#plt.savefig('gaussian_footprint_nominalSPStunespread_chroma.png')
plt.show() |
# coding=UTF-8
import sys
#import time
import pygame # Load the required library
#from gtts import gTTS
filePath = sys.argv[1]
#tts = gTTS(text=say, lang='zh-tw')
#timeStamp = str(time.time())
#tts.save("sound/" + timeStamp + ".mp3")
print(filePath)
pygame.mixer.pre_init(44100, -16, 2, 1024*2)
pygame.mixer.init()
pygame.mixer.music.load(filePath)
pygame.mixer.music.set_volume(0.5)
try:
clock = pygame.time.Clock()
pygame.mixer.music.play()
while pygame.mixer.music.get_busy():
#print "Playing..."
clock.tick(1000)
except:
print("can't play")
|
#Write a program that reads a name and an age for a person, until the name is blank. Once all names have been present the user with an option to list the entered people in alphabetical order, or in descending age order. For either choice, list each person's name followed by their age on a single line. Make sure you output the correct age for the correct person
Name= raw_input("Enter name: ")
Age= int(raw_input("Enter age: "))
namelist=[]
while Name !='':
age=int(Age)
namelist=namelist + [[Name, age]]
Name= raw_input("Enter name: ")
Age= raw_input("Enter age: ")
question1= raw_input("Do you want to sort names or age, enter, age or name: ")
if question1 == 'name':
namelist.sort()
if question1 == 'age':
for i in namelist:
i.reverse()
namelist.sort()
print namelist
|
from django.shortcuts import render
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from .models import Posts
from .forms import CreatePostForm
# Create your views here.
def index(request):
#return HttpResponse('HELLO FROM POSTS')
posts = Posts.objects.all()[:10]
context = {
'title' : 'Latest Posts',
'posts': posts
}
return render(request, 'posts/index.html', context)
def details(request, id):
post = Posts.objects.get(id=id)
context = {
'post': post
}
return render(request, 'posts/details.html', context)
def create(request):
if request.method == 'POST':
form = CreatePostForm(request.POST)
print ('Form Errors: form.errors %s' %form.errors)
if form.is_valid():
#Save to db
#post_title = form.cleaned_data['post_title']
#post_body = form.cleaned_data['post_body']
#post_attachment = form.files['post_attachment']
#post_attachment_path = form.cleaned_data['post_attachment_input']
#push uploaded file to S3
form.save()
print ('POST: Form is valid')
#print ('File "%s" would presumably be saved to disk now.' % post_attachment)
#Posts.objects.create(title=post_title, body=post_body)
posts = Posts.objects.all()[:10]
context = {
'title' : 'Latest Posts',
'posts': posts
}
print ('POST: Form is not valid - File: "%s"' % form.cleaned_data['post_title'])
return HttpResponseRedirect('/posts/')
#return render(request, 'posts/index.html', context)
else :
form = CreatePostForm()
context = {
'title' : 'Create a Post',
'form' : form
}
return render(request, 'posts/create.html', context) |
def l_norm_distance(vector1, vector2, n):
d = 0
l = len(vector1)
for i in range(l):
d = d + abs(vector1[i] - vector2[i]) ** n
d = d ** (1 / n)
return d
def find_k_nearest_neighbours(train_x, value_to_be_predicted, k, n):
l = len(train_x)
# print(l)
distances = []
for i in range(l):
d = l_norm_distance(train_x[i], value_to_be_predicted, n)
distances.append([i, d])
distances.sort(key = lambda x: (x[1], x[0]))
# print( distances)
k_neighbours = distances[:k]
# print("sorted")
# print(k_neighbours)
return k_neighbours
def classify_test_sample(train_x, train_y, test_sample, k, n):
# print(len(train_y))
k_neighbours = find_k_nearest_neighbours(train_x, test_sample, k, n)
predicted_y = []
for i in range(k):
predicted_y.append(train_y[k_neighbours[i][0]])
counter = 0
# if(k=binadicted_y)
predicted_value = predicted_y[0]
for i in predicted_y:
curr_frequency = predicted_y.count(i)
if curr_frequency > counter:
counter = curr_frequency
predicted_value = i
return predicted_value
def classify_test_set(train_x, train_y, test_x, k, n):
l = len(test_x)
predicted_y = []
# print(k)
for i in range(l):
predicted_y.append(classify_test_sample(train_x, train_y, test_x[i], k, n))
# print(predicted_y)
return predicted_y
def accuracy(predicted_y, actual_y):
l = len(predicted_y)
correct_predicted = 0
for i in range(l):
if predicted_y[i] == actual_y[i]:
correct_predicted += 1
accuracy_of_k = correct_predicted / l
return accuracy_of_k
def predict_k_value(train_x, train_y, split_ratio, n):
print(len(train_x))
length_test_set =int(split_ratio * len(train_x))
test_x = train_x[:length_test_set]
actual_y = train_y[:length_test_set]
train_x = train_x[length_test_set:]
train_y = train_y[length_test_set:]
l = len(train_x)
accuracy_with_k = []
for k in range(1, l):
predicted_y = classify_test_set(train_x, train_y, test_x, k, n)
# print(k)
# print(actual_y)
accuracy_predicted_y = accuracy(predicted_y, actual_y)
# print(accuracy_predicted_y)
accuracy_with_k.append([k, accuracy_predicted_y])
accuracy_with_k.sort(key=lambda x: (x[1], x[0]),reverse=True)
print(accuracy_with_k)
return accuracy_with_k[0][0]
|
#! /usr/bin/env python
# coding: utf-8
import time
from dms.utils.singleton import Singleton
from dms.objects.base import DBObject
class WebConfig(Singleton, DBObject):
def __init__(self):
DBObject.__init__(self)
self.t = "web_config"
self.cache = dict()
self.cols = ["config_key", "config_value", "allow_update",
"update_time", "add_time"]
def insert_config(self, config_key, config_value, allow_update=False):
c_time = time.time()
kwargs = dict(config_key=config_key, config_value=config_value,
allow_update=allow_update, update_time=c_time,
add_time=c_time)
line = self.db.execute_insert(self.t, kwargs, ignore=True)
if line > 0 and allow_update is False:
self.cache[config_key] = kwargs
return line
def get_key(self, config_key):
if config_key in self.cache:
return self.cache[config_key]
where_value = dict(config_key=config_key)
items = self.db.execute_select(self.t, cols=self.cols,
where_value=where_value)
if len(items) <= 0:
return None
item = items[0]
if item["allow_update"] is False:
self.cache[config_key] = item
return item
def get_keys(self, config_keys):
r = dict()
for key in config_keys:
r[key] = self.get_key(key)
return r
def update_key(self, config_key, config_value, allow_update=None):
c_time = time.time()
where_value = dict(config_key=config_key, allow_update=True)
update_value = dict(config_value=config_value, update_time=c_time)
if allow_update is not None:
update_value["allow_update"] = allow_update
line = self.db.execute_update(self.t, update_value=update_value,
where_value=where_value)
return line
def new_configs(self, configs, allow_update=False):
for key, value in configs.items():
self.insert_config(key, value, allow_update)
return True
if __name__ == "__main__":
config_man = WebConfig()
config_man2 = WebConfig()
print(id(config_man))
print(id(config_man2))
|
import thread
import curses
import time
from threading import Lock
#globals
Scr = None
P = 12
Count = 0
mutex=Lock()
#functions
def init():
global Scr
Scr = curses.initscr()
curses.noecho()
curses.cbreak()
curses.curs_set(0)
Scr.keypad(1)
def finish():
curses.nocbreak()
Scr.keypad(0)
curses.echo()
curses.endwin()
def atomicprint(row,column,string):
mutex.acquire()
Scr.addstr(row,column,string)
mutex.release()
def counter():
global Count
global P
Count=0
while True:
atomicprint(4,P, "Count = " + str(Count))
Scr.refresh()
Count+=1
time.sleep(0.0001)
#Main program
init()
atomicprint(5,5,"Press q to quit")
Scr.refresh()
thread.start_new_thread(counter, ())
while True:
n = Scr.getch()
if(n<256):
c = chr(n)
atomicprint(6,5, "You pressed " + str(c))
Scr.refresh()
if c=='r':
P=80
atomicprint(4,12, " ")
atomicprint(4,5, " ")
if c=='l':
P=5
atomicprint(4,80, " ")
atomicprint(4,12, " ")
if c=='c':
Count = 0
atomicprint(4,80, " ")
atomicprint(4,5, " ")
if(c=="q"):
break
finish()
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
import os
import sys
# reload(sys)
# sys.setdefaultencoding("utf-8")
"""
Simple wrapper around the Feng-Hirst parser, used as an entry point for
a Docker container.
In contrast to parse.py, this script only accepts one input file.
Since parse.py is quite chatty, it's stdout will be suppressed and stored
in a file. If the parser doesn't produce a parse, this file will
be printed to stderr.
In contrast to parser_wrapper.py, this script accepts a list of input files.
Since parse.py is quite chatty, it's stdout will be supressed and stored in a file.
If the parer doesn't return a list of parses, a json of the list will be printed to stderr
"""
from nltk.tree import ParentedTree
#from parse2 import parse_args
from parse2 import main as feng_main
import argparse
import json
class ParserException(Exception):
pass
def get_parser_stdout(parser_stdout_filepath):
"""Returns the re-routed STDOUT of the Feng/Hirst parser."""
sys.stdout.close()
stdout_file = open(parser_stdout_filepath)
stdout_str = stdout_file.read()
stdout_file.close()
sys.stdout = open(parser_stdout_filepath, "w")
return stdout_str
# def get_output_filepath(args):
# """Returns the path to the output file of the parser."""
# input_filepath = args[0]
# input_filename = os.path.basename(input_filepath)
# return os.path.join("../texts/results", "{}.tree".format(input_filename))
def main(li_utterances,
verbose=False,
skip_parsing=False,
global_features=True,
logging=False,
redirect_output=False):
"""[summary]
Args:
li_utterances ([type]): [json encoded li-utteranc]
Returns:
[type]: [description]
"""
parser_stdout_filepath = 'parser.stdout'
li_utterances = json.loads(li_utterances)
kwargs = {
'verbose':verbose,
'skip_parsing':skip_parsing,
'global_features':global_features,
'logging':logging
}
# if len(args) != 1:
# sys.stderr.write("Please provide (only) one file to parse.")
# sys.exit(1)
# output_filepath = get_output_filepath(args)
# if os.path.isfile(output_filepath):
# # You can't parse a file with the same name twice, unless you
# # remove the parser output file first.
# os.remove(output_filepath)
# re-route the print/stdout output of the parser to a file
if redirect_output:
old_stdout = sys.stdout
sys.stdout = open(parser_stdout_filepath, "w", buffering=1)
try:
results = feng_main(li_utterances, **kwargs) #li of parse trees
assert len(results) != 0
except AssertionError as e:
e.args += ("Expected parse trees as a result, but got: {0}.\n"
"Parser STDOUT was:\n{1}").format(
results, get_parser_stdout(parser_stdout_filepath))
raise e
finally:
if redirect_output:
sys.stdout.close()
sys.stdout = old_stdout
pass
#parse_tree = results[0].__repr__() + "\n"
# sys.stdout.write(parse_tree)
# sys.stdout.write(str(type(results[0])) )
# sys.stdout.write(str(type(results)))
# sys.stdout.write(str( results[0].__class__.__module__))
escaped_parse_trees = json.dumps([pt.pformat(parens='{}' ) for pt in results])
#return results
sys.stdout.write(escaped_parse_trees)
return escaped_parse_trees
if __name__ == "__main__":
parser = argparse.ArgumentParser( )
parser.add_argument('--li_utterances',type=str,
default=json.dumps( ["Shut up janice, you've always been a hater","If you're here then how can you be there too"]),
)
parser.add_argument('--skip-parsing',type=bool, default=False)
parser.add_argument('--global_features',type=bool,default=True)
parser.add_argument('--logging',type=bool, default=False)
parser.add_argument('--redirect_output',type=bool,default=True)
args = parser.parse_args()
main( **vars(args) )
|
# -*- coding: utf-8 -*-
import codecs
import sys
from pinyin import PinYin
class NameSearch:
def __init__(self):
self.name_yin_tone_dict = self.get_name_yin_tone_dict('name_yin_tone_dict')
self.TP = 200
self.score_thre = self.TP * 0.7
self.w = 1 / self.TP
def calculate_score(self,name, yin, tone):
name_dict = {}
for key, val in self.name_yin_tone_dict.items():
lib_name = key
lib_yin = val.split(':')[0]
lib_tone = val.split(':')[1].strip('\n')
scores = self.fun1(name, lib_name) + self.fun2(yin, lib_yin) + self.fun3(tone, lib_tone)
#add dict
if scores >= self.score_thre:
name_dict[key] = round(self.w * scores,2)
return name_dict
def run(self,name):
yin, tone = self.hanzi2yintone(name)
name_dict = self.calculate_score(name, yin, tone)
name_dict_sort = sorted(name_dict.items(), key = lambda
item:item[1], reverse = True)
print(name_dict_sort)
return name_dict_sort
def get_name_yin_tone_dict(self,path):
name_yin_tone_dict = {}
name_yin_tone_file = codecs.open(path,'r','utf-8')
for name_yin_tone in name_yin_tone_file:
name_yin_tone_list = name_yin_tone.split('\t')
name = name_yin_tone_list[0]
yin_tone = name_yin_tone_list[1].strip('\n')
name_yin_tone_dict[name] = yin_tone
return name_yin_tone_dict
'''
Define evaluation function
function1:form 0-50
2:yin 0-100
3:tone 0-50
'''
#fun1 use the same number of word1/word2 to
#culculate the similarity of two words
def fun1(self, word1, word2):
if len(word2) != len(word1):
return 0
num = self.words_compare(word1, word2)
score_incre = 1 / len( word1 ) * 50.0
score = num * score_incre
return score
def fun2(self,word1_yin, word2_yin):
score = 0
yin1_list = word1_yin.split(' ')
yin2_list = word2_yin.split(' ')
if len(yin1_list) != len(yin2_list):
return 0
score_incre = 1 / len(yin1_list) * 100
for i in range(len(yin1_list)):
diff_char = self.string_compare(yin1_list[i], yin2_list[i])
if diff_char == '':
score += score_incre
continue
if len(diff_char) == 2:
#if not distinguish 'l' 'n'
#if not distinguish if there are 'g'
if diff_char == 'ln' or diff_char == 'nl' or diff_char == ' g' or diff_char == 'g ':
score += score_incre - 10
if len(diff_char) == 4:
if diff_char == 'ln g' or diff_char == 'nl g' or diff_char == 'lng ' or diff_char == 'nlg ':
score += score_incre - 15
#initial_char_same
if self.same_initial_char( yin1_list[i], yin2_list[i]):
score += 5
return score
def fun3(self, word1_tone, word2_tone):
tone1_list = word1_tone.split()
tone2_list = word2_tone.split()
if len(tone1_list) != len(tone2_list):
return 0
score_incre = 1 / len( tone1_list ) * 50
score = 0
for i in range(len(tone1_list)):
if tone1_list[i] == tone2_list[i]:
score += score_incre
return score
def same_initial_char(self,string1,string2):
if len(string1) == 0 or len(string2) == 0:
return False
if string1[0] == string2[0]:
return True
return False
def string_align(self,string1,string2):
l1 = len(string1)
l2 = len(string2)
if l1 > l2:
for i in range(l1 - l2):
string2 += ' '
else:
for i in range(l2 - l1):
string1 += ' '
return string1,string2
#compare string1 and string2,return
def string_compare(self, string1, string2):
if len(string1) != len(string2):
string1, string2 = self.string_align(string1, string2)
diff_char = ''
for i in range(len(string1)):
if string1[i] != string2[i]:
diff_char += string1[i] + string2[i]
return diff_char
#compare word1 and word2
def words_compare(self, word1, word2):
char_num = {}
for ele in word1:
if ele in char_num.keys():
char_num[ele] += 1
else:
char_num[ele] = 1
num = 0
for ele in word2:
if ele in char_num.keys():
num += 1
return num
def hanzi2yintone(self, hanzi):
py = PinYin()
py.load_word()
yin_list,tone_list = py.hanzi2yintone(hanzi)
yin = ' '.join(yin_list)
tone = ' '.join(tone_list)
return yin,tone
if __name__ == "__main__":
ns = NameSearch()
name = input("please input:")
ns.run(name)
'''
name_file = codecs.open('name_dict','r','utf-8')
for name in name_file:
print('name:%s' %name)
ns.run(name.strip('\n'))
print('--------------')
'''
|
'''
opencv + numpy製作資料
'''
import cv2
import numpy as np
print('------------------------------------------------------------') #60個
img=np.zeros((200,200),dtype=np.uint8) #預設為0, 黑色, 2維
print("img=\n",img)
cv2.imshow("one",img)
for i in range(50, 100):
for j in range(50, 100):
img[i,j] = 255
cv2.imshow("two",img)
cv2.waitKey()
cv2.destroyAllWindows()
print('------------------------------------------------------------') #60個
#-----------蓝色通道值--------------
blue=np.zeros((300,300,3),dtype=np.uint8) #預設為0, 黑色, 3維
blue[:,:,0]=255 #第0通道, 藍色
#print("blue=\n",blue)
cv2.imshow("blue",blue)
#-----------綠色通道值--------------
green=np.zeros((300,300,3),dtype=np.uint8) #預設為0, 黑色, 3維
green[:,:,1]=255 #第0通道, 綠色
#print("green=\n",green)
cv2.imshow("green",green)
#-----------紅色通道值--------------
red=np.zeros((300,300,3),dtype=np.uint8) #預設為0, 黑色, 3維
red[:,:,2]=255 #第0通道, 紅色
#print("red=\n",red)
cv2.imshow("red",red)
#-----------释放窗口--------------
cv2.waitKey()
cv2.destroyAllWindows()
print('------------------------------------------------------------') #60個
img=np.zeros((300,300,3),dtype=np.uint8) #預設為0, 黑色, 3維
img[:,0:100,0]=255
img[:,100:200,1]=255
img[:,200:300,2]=255
print("img=\n",img)
cv2.imshow("img",img)
cv2.waitKey()
cv2.destroyAllWindows()
print('------------------------------------------------------------') #60個
img=np.zeros((2,4,3),dtype=np.uint8) #預設為0, 黑色, 3維
print("img=\n",img)
print("读取像素点img[0,3]=",img[0,3])
print("读取像素点img[1,2,2]=",img[1,2,2])
img[0,3]=255
img[0,0]=[66,77,88]
img[1,1,1]=3
img[1,2,2]=4
img[0,2,0]=5
print("修改后img\n",img)
print("读取修改后像素点img[1,2,2]=",img[1,2,2])
print('------------------------------------------------------------') #60個
img=np.random.randint(0,256,size=[2,4,3],dtype=np.uint8)
rst=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
print("img=\n",img)
print("rst=\n",rst)
print("像素点(1,0)直接计算得到的值=", img[1,0,0]*0.114+img[1,0,1]*0.587+img[1,0,2]*0.299)
print("像素点(1,0)使用公式cv2.cvtColor()转换值=",rst[1,0])
'''
print(img[1,0,0])
print(img[1,0,1])
print(img[1,0,2])
'''
print('------------------------------------------------------------') #60個
img=np.random.randint(0,256,size=[2,4],dtype=np.uint8)
rst=cv2.cvtColor(img,cv2.COLOR_GRAY2BGR)
print("img=\n",img)
print("rst=\n",rst)
print('------------------------------------------------------------') #60個
img=np.random.randint(0,256,size=[2,4,3],dtype=np.uint8)
rgb=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
bgr=cv2.cvtColor(rgb,cv2.COLOR_RGB2BGR)
print("img=\n",img)
print("rgb=\n",rgb)
print("bgr=\n",bgr)
print('------------------------------------------------------------') #60個
img1=np.random.randint(0,256,size=[3,3],dtype=np.uint8)
img2=np.random.randint(0,256,size=[3,3],dtype=np.uint8)
print("img1=\n",img1)
print("img2=\n",img2)
print("img1+img2=\n",img1+img2)
print('------------------------------------------------------------') #60個
img1=np.random.randint(0,256,size=[3,3],dtype=np.uint8)
img2=np.random.randint(0,256,size=[3,3],dtype=np.uint8)
print("img1=\n",img1)
print("img2=\n",img2)
img3=cv2.add(img1,img2)
print("cv2.add(img1,img2)=\n",img3)
print('------------------------------------------------------------') #60個
a=np.random.randint(0,255,(5,5),dtype=np.uint8)
b=np.zeros((5,5),dtype=np.uint8)
b[0:3,0:3]=255
b[4,4]=255
c=cv2.bitwise_and(a,b)
print("a=\n",a)
print("b=\n",b)
print("c=\n",c)
print('------------------------------------------------------------') #60個
img1=np.ones((4,4),dtype=np.uint8)*3
img2=np.ones((4,4),dtype=np.uint8)*5
mask=np.zeros((4,4),dtype=np.uint8)
mask[2:4,2:4]=1
img3=np.ones((4,4),dtype=np.uint8)*66
print("img1=\n",img1)
print("img2=\n",img2)
print("mask=\n",mask)
print("初始值img3=\n",img3)
img3=cv2.add(img1,img2,mask=mask)
print("求和后img3=\n",img3)
print('------------------------------------------------------------') #60個
img1=np.ones((4,4),dtype=np.uint8)*3
img2=np.ones((4,4),dtype=np.uint8)*5
print("img1=\n",img1)
print("img2=\n",img2)
img3=cv2.add(img1,img2)
print("cv2.add(img1,img2)=\n",img3)
img4=cv2.add(img1,6)
print("cv2.add(img1,6)\n",img4)
img5=cv2.add(6,img2)
print("cv2.add(6,img2)=\n",img5)
print('------------------------------------------------------------') #60個
img1=np.ones((3,4),dtype=np.uint8)*100
img2=np.ones((3,4),dtype=np.uint8)*10
gamma=3
img3=cv2.addWeighted(img1,0.6,img2,5,gamma)
print(img3)
print('------------------------------------------------------------') #60個
img=np.random.randint(10,99,size=[5,5],dtype=np.uint8)
print("img=\n",img)
print("读取像素点img.item(3,2)=",img.item(3,2))
img.itemset((3,2),255)
print("修改后img=\n",img)
print("修改后像素点img.item(3,2)=",img.item(3,2))
print('------------------------------------------------------------') #60個
print('建立一個每點顏色任意顏色之圖')
img=np.random.randint(0,256,size=[512,512],dtype=np.uint8)
cv2.imshow("demo",img)
cv2.waitKey()
cv2.destroyAllWindows()
print('------------------------------------------------------------') #60個
img=np.random.randint(10,99,size=[2,4,3],dtype=np.uint8)
print("img=\n",img)
print("读取像素点img[1,2,0]=",img.item(1,2,0))
print("读取像素点img[0,2,1]=",img.item(0,2,1))
print("读取像素点img[1,0,2]=",img.item(1,0,2))
img.itemset((1,2,0),255)
img.itemset((0,2,1),255)
img.itemset((1,0,2),255)
print("修改后img=\n",img)
print("修改后像素点img[1,2,0]=",img.item(1,2,0))
print("修改后像素点img[0,2,1]=",img.item(0,2,1))
print("修改后像素点img[1,0,2]=",img.item(1,0,2))
print('------------------------------------------------------------') #60個
img=np.random.randint(0,256,size=[256,256,3],dtype=np.uint8)
cv2.imshow("demo",img)
cv2.waitKey()
cv2.destroyAllWindows()
print('------------------------------------------------------------') #60個
#=========测试下OpenCV中蓝色的HSV模式值=============
imgBlue=np.zeros([1,1,3],dtype=np.uint8)
imgBlue[0,0,0]=255
Blue=imgBlue
BlueHSV=cv2.cvtColor(Blue,cv2.COLOR_BGR2HSV)
print("Blue=\n",Blue)
print("BlueHSV=\n",BlueHSV)
#=========测试下OpenCV中绿色的HSV模式值=============
imgGreen=np.zeros([1,1,3],dtype=np.uint8)
imgGreen[0,0,1]=255
Green=imgGreen
GreenHSV=cv2.cvtColor(Green,cv2.COLOR_BGR2HSV)
print("Green=\n",Green)
print("GreenHSV=\n",GreenHSV)
#=========测试下OpenCV中红色的HSV模式值=============
imgRed=np.zeros([1,1,3],dtype=np.uint8)
imgRed[0,0,2]=255
Red=imgRed
RedHSV=cv2.cvtColor(Red,cv2.COLOR_BGR2HSV)
print("Red=\n",Red)
print("RedHSV=\n",RedHSV)
print('------------------------------------------------------------') #60個
img=np.random.randint(0,256,size=[5,5],dtype=np.uint8)
min=100
max=200
mask = cv2.inRange(img, min, max)
print("img=\n",img)
print("mask=\n",mask)
print('------------------------------------------------------------') #60個
img=np.ones([5,5],dtype=np.uint8)*9
mask =np.zeros([5,5],dtype=np.uint8)
mask[0:3,0]=1
mask[2:5,2:4]=1
roi=cv2.bitwise_and(img,img, mask= mask)
print("img=\n",img)
print("mask=\n",mask)
print("roi=\n",roi)
print('------------------------------------------------------------') #60個
img=np.random.randint(0,256,size=[2,3,3],dtype=np.uint8)
bgra = cv2.cvtColor(img, cv2.COLOR_BGR2BGRA)
print("img=\n",img)
print("bgra=\n",bgra)
b,g,r,a=cv2.split(bgra)
print("a=\n",a)
a[:,:]=125
bgra=cv2.merge([b,g,r,a])
print("bgra=\n",bgra)
print('------------------------------------------------------------') #60個
img=np.random.randint(-256,256,size=[4,5],dtype=np.int16)
rst=cv2.convertScaleAbs(img)
print("img=\n",img)
print("rst=\n",rst)
print('------------------------------------------------------------') #60個
img=np.zeros((5,5),np.uint8)
img[1:4,1:4]=1
kernel = np.ones((3,1),np.uint8)
erosion = cv2.erode(img,kernel)
print("img=\n",img)
print("kernel=\n",kernel)
print("erosion=\n",erosion)
print('------------------------------------------------------------') #60個
img=np.zeros((5,5),np.uint8)
img[2:3,1:4]=1
kernel = np.ones((3,1),np.uint8)
dilation = cv2.dilate(img,kernel)
print("img=\n",img)
print("kernel=\n",kernel)
print("dilation\n",dilation)
print('------------------------------------------------------------') #60個
print('------------------------------------------------------------') #60個
img=np.random.randint(0,256,size=[4,5],dtype=np.uint8)
t,rst=cv2.threshold(img,127,255,cv2.THRESH_BINARY)
print("img=\n",img)
print("t=",t)
print("rst=\n",rst)
print('------------------------------------------------------------') #60個
img=np.random.randint(0,256,size=[4,5],dtype=np.uint8)
t,rst=cv2.threshold(img,127,255,cv2.THRESH_BINARY_INV)
print("img=\n",img)
print("t=",t)
print("rst=\n",rst)
print('------------------------------------------------------------') #60個
img=np.random.randint(0,256,size=[4,5],dtype=np.uint8)
t,rst=cv2.threshold(img,127,255,cv2.THRESH_TRUNC)
print("img=\n",img)
print("t=",t)
print("rst=\n",rst)
print('------------------------------------------------------------') #60個
img=np.random.randint(0,256,size=[4,5],dtype=np.uint8)
t,rst=cv2.threshold(img,127,255,cv2.THRESH_TOZERO_INV)
print("img=\n",img)
print("t=",t)
print("rst=\n",rst)
print('------------------------------------------------------------') #60個
img=np.random.randint(0,256,size=[4,5],dtype=np.uint8)
t,rst=cv2.threshold(img,127,255,cv2.THRESH_TOZERO)
print("img=\n",img)
print("t=",t)
print("rst=\n",rst)
print('------------------------------------------------------------') #60個
img = np.zeros((5,5),dtype=np.uint8)
img[0:6,0:6]=123
img[2:6,2:6]=126
print("img=\n",img)
t1,thd=cv2.threshold(img,127,255,cv2.THRESH_BINARY)
print("thd=\n",thd)
t2,otsu=cv2.threshold(img,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
print("otsu=\n",otsu)
print('------------------------------------------------------------') #60個
n = 300
img = np.zeros((n+1,n+1,3), np.uint8)
img = cv2.line(img,(0,0),(n,n),(255,0,0),3)
img = cv2.line(img,(0,100),(n,100),(0,255,0),1)
img = cv2.line(img,(100,0),(100,n),(0,0,255),6)
winname = 'Demo19.1'
cv2.namedWindow(winname)
cv2.imshow(winname, img)
cv2.waitKey(0)
cv2.destroyAllWindows()
print('------------------------------------------------------------') #60個
n = 300
img = np.ones((n,n,3), np.uint8)*255
img = cv2.rectangle(img,(50,50),(n-100,n-50),(0,0,255),-1)
winname = 'Demo19.1'
cv2.namedWindow(winname)
cv2.imshow(winname, img)
cv2.waitKey(0)
cv2.destroyAllWindows()
print('------------------------------------------------------------') #60個
d = 400
img = np.ones((d,d,3),dtype="uint8")*255
(centerX,centerY) = (round(img.shape[1] / 2),round(img.shape[0] / 2))
#将图像的中心作为圆心,实际值为=d/2
red = (0,0,255)#设置白色变量
for r in range(5,round(d/2),12):
cv2.circle(img,(centerX,centerY),r,red,3)
#circle(载体图像,圆心,半径,颜色)
cv2.imshow("Demo19.3",img)
cv2.waitKey(0)
cv2.destroyAllWindows()
print('------------------------------------------------------------') #60個
d = 400
img = np.ones((d,d,3),dtype="uint8")*255
#生成白色背景
for i in range(0,100):
centerX = np.random.randint(0,high = d)
#生成随机圆心X,确保在画布img内
centerY = np.random.randint(0,high = d)
#生成随机圆心Y,确保在画布img内
radius = np.random.randint(5,high = d/5)
#生成随机半径,值范围:[5,d/5),最大半径是d/5
color = np.random.randint(0,high = 256,size = (3,)).tolist()
#生成随机颜色,3个[0,256)的随机数
cv2.circle(img,(centerX,centerY),radius,color,-1)
#使用上述随机数,在画布img内画圆
cv2.imshow("demo19.4",img)
cv2.waitKey(0)
cv2.destroyAllWindows()
print('------------------------------------------------------------') #60個
d = 400
img = np.ones((d,d,3),dtype="uint8")*255
#生成白色背景
center=(round(d/2),round(d/2))
#注意数值类型,center=(d/2,d/2)不可以
size=(100,200)
#轴的长度
for i in range(0,10):
angle = np.random.randint(0,361)
#偏移角度
color = np.random.randint(0,high = 256,size = (3,)).tolist()
#生成随机颜色,3个[0,256)的随机数
thickness = np.random.randint(1,9)
cv2.ellipse(img, center, size, angle, 0, 360, color,thickness)
cv2.imshow("demo19.5",img)
cv2.waitKey(0)
cv2.destroyAllWindows()
print('------------------------------------------------------------') #60個
d = 400
img = np.ones((d,d,3),dtype="uint8")*255
#生成白色背景
pts=np.array([[200,50],[300,200],[200,350],[100,200]], np.int32)
#生成各个顶点,注意数据类型为int32
pts=pts.reshape((-1,1,2))
#第1个参数为-1, 表明这一维的长度是根据后面的维度的计算出来的。
cv2.polylines(img,[pts],True,(0,255,0),8)
#调用函数polylines完成多边形绘图,注意第3个参数控制多边形封闭
# cv2.polylines(img,[pts],False,(0,255,0),8) #不闭合的的多边形
cv2.imshow("demo19.6",img)
cv2.waitKey(0)
cv2.destroyAllWindows()
print('------------------------------------------------------------') #60個
d = 400
img = np.ones((d,d,3),dtype="uint8")*255
#生成白色背景
font=cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img,'OpenCV',(0,200),font, 3,(0,0,255),15)
cv2.putText(img,'OpenCV',(0,200),font, 3,(0,255,0),5)
cv2.imshow("demo19.7",img)
cv2.waitKey(0)
cv2.destroyAllWindows()
print('------------------------------------------------------------') #60個
d = 400
img = np.ones((d,d,3),dtype="uint8")*255
#生成白色背景
font=cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img,'OpenCV',(0,150),font, 3,(0,0,255),15)
cv2.putText(img,'OpenCV',(0,250),font, 3,(0,255,0),15,
cv2.FONT_HERSHEY_SCRIPT_SIMPLEX,True)
cv2.imshow("demo19.7",img)
cv2.waitKey(0)
cv2.destroyAllWindows()
print('------------------------------------------------------------') #60個
def Demo(event,x,y,flags,param):
if event == cv2.EVENT_LBUTTONDOWN:
print("单击了鼠标左键")
elif event==cv2.EVENT_RBUTTONDOWN :
print("单击了鼠标右键")
elif flags==cv2.EVENT_FLAG_LBUTTON:
print("按住左键拖动了鼠标")
elif event==cv2.EVENT_MBUTTONDOWN :
print("单击了中间键")
#创建名称为Demo的响应(回调)函数OnMouseAction
#将回调函数Demo与窗口“Demo19.9”建立连接
img = np.ones((300,300,3),np.uint8)*255
cv2.namedWindow('Demo19.9')
cv2.setMouseCallback('Demo19.9',Demo)
cv2.imshow('Demo19.9',img)
cv2.waitKey()
cv2.destroyAllWindows()
print('------------------------------------------------------------') #60個
d = 400
def draw(event,x,y,flags,param):
if event==cv2.EVENT_LBUTTONDBLCLK:
p1x=x
p1y=y
p2x=np.random.randint(1,d-50)
p2y=np.random.randint(1,d-50)
color = np.random.randint(0,high = 256,size = (3,)).tolist()
cv2.rectangle(img,(p1x,p1y),(p2x,p2y),color,2)
img = np.ones((d,d,3),dtype="uint8")*255
cv2.namedWindow('Demo19.10')
cv2.setMouseCallback('Demo19.10',draw)
while(1):
cv2.imshow('Demo19.10',img)
if cv2.waitKey(20)==27:
break
cv2.destroyAllWindows()
print('------------------------------------------------------------') #60個
thickness=-1
mode=1
d=400
def draw_circle(event,x,y,flags,param):
if event==cv2.EVENT_LBUTTONDOWN:
a=np.random.randint(1,d-50)
r=np.random.randint(1,d/5)
angle = np.random.randint(0,361)
color = np.random.randint(0,high = 256,size = (3,)).tolist()
if mode==1:
cv2.rectangle(img,(x,y),(a,a),color,thickness)
elif mode==2:
cv2.circle(img,(x,y),r,color,thickness)
elif mode==3:
cv2.line(img,(a,a),(x,y),color,3)
elif mode==4:
cv2.ellipse(img, (x,y), (100,150), angle, 0, 360,color,thickness)
elif mode==5:
cv2.putText(img,'OpenCV',(0,round(d/2)),
cv2.FONT_HERSHEY_SIMPLEX, 2,color,5)
img=np.ones((d,d,3),np.uint8)*255
cv2.namedWindow('image')
cv2.setMouseCallback('image',draw_circle)
while(1):
cv2.imshow('image',img)
k=cv2.waitKey(1)&0xFF
if k==ord('r'):
mode=1
elif k==ord('c'):
mode=2
elif k==ord('l'):
mode=3
elif k==ord('e'):
mode=4
elif k==ord('t'):
mode=5
elif k==ord('f'):
thickness=-1
elif k==ord('u'):
thickness=3
elif k==27:
break
cv2.destroyAllWindows()
print('------------------------------------------------------------') #60個
def changeColor(x):
r=cv2.getTrackbarPos('R','image')
g=cv2.getTrackbarPos('G','image')
b=cv2.getTrackbarPos('B','image')
img[:]=[b,g,r]
img=np.zeros((100,700,3),np.uint8)
cv2.namedWindow('image')
cv2.createTrackbar('R','image',100,255,changeColor)
cv2.createTrackbar('G','image',0,255,changeColor)
cv2.createTrackbar('B','image',0,255,changeColor)
while(1):
cv2.imshow('image',img)
k=cv2.waitKey(1)&0xFF
if k==27:
break
cv2.destroyAllWindows()
print('------------------------------------------------------------') #60個
Type=0 #阈值处理类型值
Value=0 #使用的阈值
def onType(a):
Type= cv2.getTrackbarPos(tType, windowName)
Value= cv2.getTrackbarPos(tValue, windowName)
ret, dst = cv2.threshold(o, Value,255, Type)
cv2.imshow(windowName,dst)
def onValue(a):
Type= cv2.getTrackbarPos(tType, windowName)
Value= cv2.getTrackbarPos(tValue, windowName)
ret, dst = cv2.threshold(o, Value, 255, Type)
cv2.imshow(windowName,dst)
o = cv2.imread("lena512.bmp",0)
windowName = "Demo19.13" #窗体名
cv2.namedWindow(windowName)
cv2.imshow(windowName,o)
#创建两个滑动条
tType = "Type" #用来选取阈值处理类型的滚动条
tValue = "Value" #用来选取阈值的滚动条
cv2.createTrackbar(tType, windowName, 0, 4, onType)
cv2.createTrackbar(tValue, windowName,0, 255, onValue)
if cv2.waitKey(0) == 27:
cv2.destroyAllWindows()
print('------------------------------------------------------------') #60個
d=400
global thickness
thickness=-1
def fill(x):
pass
def draw(event,x,y,flags,param):
if event==cv2.EVENT_LBUTTONDBLCLK:
p1x=x
p1y=y
p2x=np.random.randint(1,d-50)
p2y=np.random.randint(1,d-50)
color = np.random.randint(0,high = 256,size = (3,)).tolist()
cv2.rectangle(img,(p1x,p1y),(p2x,p2y),color,thickness)
img=np.ones((d,d,3),np.uint8)*255
cv2.namedWindow('image')
cv2.setMouseCallback('image',draw)
cv2.createTrackbar('R','image',0,1,fill)
while(1):
cv2.imshow('image',img)
k=cv2.waitKey(1)&0xFF
g=cv2.getTrackbarPos('R','image')
if g==0:
thickness=-1
else:
thickness=2
if k==27:
break
cv2.destroyAllWindows()
print('------------------------------------------------------------') #60個
def changeColor(x):
g=cv2.getTrackbarPos('R','image')
if g==0:
img[:]=0
else:
img[:]=255
img=np.zeros((100,1000,3),np.uint8)
cv2.namedWindow('image')
cv2.createTrackbar('R','image',0,1,changeColor)
while(1):
cv2.imshow('image',img)
k=cv2.waitKey(1)&0xFF
if k==27:
break
cv2.destroyAllWindows()
print('------------------------------------------------------------') #60個
print('------------------------------------------------------------') #60個
|
"""
Classes from the 'TCC' framework.
"""
try:
from rubicon.objc import ObjCClass
except ValueError:
def ObjCClass(name):
return None
def _Class(name):
try:
return ObjCClass(name)
except NameError:
return None
OS_tcc_object = _Class("OS_tcc_object")
OS_tcc_events_subscription = _Class("OS_tcc_events_subscription")
OS_tcc_events_filter = _Class("OS_tcc_events_filter")
OS_tcc_runtime = _Class("OS_tcc_runtime")
OS_tcc_server = _Class("OS_tcc_server")
OS_tcc_service = _Class("OS_tcc_service")
OS_tcc_message_options = _Class("OS_tcc_message_options")
OS_tcc_authorization_record = _Class("OS_tcc_authorization_record")
OS_tcc_attributed_entity = _Class("OS_tcc_attributed_entity")
OS_tcc_credential = _Class("OS_tcc_credential")
OS_tcc_identity = _Class("OS_tcc_identity")
|
import numpy as np
a = np.fix(3.643532)
print(a)
"""<
3.0
>"""
b = np.fix(-3.643532)
print(b)
"""<
-3.0
>"""
c = np.fix(2)
print(c)
"""<
2.0
>"""
|
#!/usr/bin/env python
#encoding: utf-8
from django.conf.urls import patterns, include, url
urlpatterns = patterns('',
#url(r'^$', 'acra.views.dashboard', name='dashboard'),
url(r'dashboard/', 'acra.views.dashboard', name='dashboard'),
url(r'timeline/', 'acra.views.timeline', name='timeline'),
url(r'^report/', 'acra.views.index', name='submit'),
)
|
people = 30
cars = 10
trucks = 40
# if there are more cars than people, print the line
if cars > people and cars < trucks:
print("We should take the cars.")
# else if there are more people than cars, print the line
elif cars < people:
print("We should not take the cars.")
# if cars are equal to people, print the line.
else:
print("We can't decide.")
# if there are more trucks than cars, print the line.
if trucks > cars:
print("That's too many trucks.")
# else if there are more cars than trucks, print the line.
elif trucks < cars:
print("Maybe we could take the trucks.")
# if cars and trucks are equal, print the line.
else:
print("We still can't decide.")
# if there are more people than trucks, print the line.
if people > trucks:
print("Alright, let's just take the trucks.")
# otherwise, print this line.
else:
print("Fine, let's stay home then.")
|
import threading
from gi.repository import GLib
class UidReader:
def __init__(self, func):
self.func = func
def readUid(self):
#uid = rd.readCard()
uid = input()
GLib.idle_add(self.func, uid)
|
#!/usr/bin/python
from Bio.Seq import Seq
from Bio.Alphabet import generic_dna, generic_protein
class Ab_read:
def __init__(self, name = '', V = '', Vmut = [0], J='', Jmut=[0], ABtype = '',cdr3=Seq(''),cdr2=Seq(''),cdr1=Seq('')):
self.name = name
self.V = V
self.Vmut = Vmut
self.J = J
self.Jmut = Jmut
self.ABtype = ABtype
self.cdr3 = cdr3
self.cdr2 = cdr2
self.cdr1 = cdr1
if Vmut and Jmut:
self.sh = Vmut[0]+Jmut[0]
elif Vmut:
self.sh = Vmut[0]
else:
self.sh = 0
def __str__(self):
return u'Read V={V}, J={J}, type = {type}'.format(
V=self.V,
J=self.J,
type = self.ABtype
)
def __repr__(self):
return self.__str__()
class Clone:
def __init__(self, V = '', J='', cdr3=Seq(''), cdr2=Seq(''), cdr1=Seq(''), \
ABtype = '', num_reads = None, percent_reads = None, IDs = [],\
Vmut = None, Jmut = None, sh = None):
self.V = V
self.J = J
self.cdr3 = cdr3
self.cdr2 = cdr2
self.cdr1 = cdr1
self.num_reads = num_reads
self.percent_reads = percent_reads
self.ABtype = ABtype
self.IDs = IDs
self.Vmut = Vmut
self.Jmut = Jmut
self.sh = sh
def __str__(self):
return u'Clone V={V} J={J}, {num_reads} reads'.format(
V=self.V,
J=self.J,
num_reads=self.num_reads,
)
def __repr__(self):
return self.__str__()
class Cluster:
def __init__(self, V = '', Js=[], cdr3s=[], cdr2s=[], cdr1s=[], \
ABtypes = [], num_reads = None, percent_reads = None, \
IDs = []):
self.V = V
self.Js = Js
self.ABtypes = ABtypes
self.cdr3s = cdr3s
self.cdr2s = cdr2s
self.cdr1s = cdr1s
self.num_reads = num_reads
self.percent_reads = percent_reads
self.IDs = IDs
def __str__(self):
return u'Cluster V={V}, {num_reads} total reads'.format(
V=self.V,
num_reads=self.num_reads,
)
def __repr__(self):
return self.__str__() |
import pickle
from rummySearch.tilesearch import TileDetector, order_points
import cv2
import imutils
import argparse
import numpy as np
td = TileDetector()
image = cv2.imread("rummySearch/all_black_tiles.jpg")
td.set_image(image, resize_width=960)
tile_images = td.get_tile_images()
for i, tile in enumerate(tile_images):
cv2.imshow(str(i), tile)
pickle.dump(tile_images, open("rummySearch/dump.pickle", "wb"))
cv2.waitKey(0)
|
import matplotlib.pylab as plt
x = [1,2,3,4,5,6,7,8]
y = [5,2,4,2,1,4,5,2]
plt.scatter(x,y,label='plus',color='blue',marker='*',s=500)
#google : matplot lib marker
plt.xlabel('X')
plt.ylabel('Y')
plt.legend()
plt.title('Intresting Graph\nCheck it Out')
plt.show()
|
import requests
from utils.config import NewConfig
class PostMeasureGra(object):
def __init__(self, common, headers, accesstoken):
self.headers = headers
self.baseUrl = common.get('baseUrl')
self.accesstoken = accesstoken
self.headers.update({"accesstoken": self.accesstoken})
def post_measure_gra(self, measureID, curr_status, userAnswer):
url = "{}/userMeasure/{}/measureData".format(self.baseUrl, measureID)
# print(url)
# ,"studyType":"VOC","sysQuestID":"100-0","userAnswer":"2"
for c, u in zip(curr_status, userAnswer):
if c == 0:
data = {"elapsedSec":29,"stepType":0}
data.update(u)
# print("data", data)
# print("headers", self.headers)
response = requests.request("POST", url, headers=self.headers, json=data)
# print("response.text", response.text)
# print(eval(response.text).get('data') != None)
if len(eval(response.text).get('data')) != 0:
return eval(response.text).get('data')
else:
pass
if __name__ == '__main__':
cfg_info = NewConfig()
devices = cfg_info.get_info('vivox6')
c, h = cfg_info.get_info("vivox6")
print(c)
print(h)
a = 'dad46a52-0542-4f39-a371-3b47e05af4b8'
at = PostMeasureGra(c, h, a)
s = at.post_measure_gra()
|
from pathlib import Path
from lxml import etree as ET
from MusicXMLSynthesizer.utils import parse_notes_meta_to_list
from MusicXMLSynthesizer.Synthesizer import Synthesizer
def read_musicxml(path):
file_path = Path(path)
if not file_path.is_file():
print("Path:{}, Contnet: {}".format("Invalid", ""))
else:
tree = ET.parse(str(file_path))
return ET.tostring(tree, xml_declaration=True,
encoding="UTF-8",doctype="""<!DOCTYPE score-partwise PUBLIC "-//Recordare//DTD MusicXML 3.1 Partwise//EN" "http://www.musicxml.org/dtds/partwise.dtd">""").decode("UTF-8")
def create_synthesizer(input_data_directory):
# input data
xsd_path = "musicxml-3.1-dtd-xsd/schema/musicxml.xsd"
techs_and_notes_list = parse_notes_meta_to_list(
"{}FinalNotes.txt".format(input_data_directory))
beats_list = parse_notes_meta_to_list(
"{}beats.txt".format(input_data_directory))
downbeats_list = parse_notes_meta_to_list(
"{}downbeats.txt".format(input_data_directory))
# setup
synthesizer = Synthesizer(xsd_path)
synthesizer.save(techs_and_notes_list, downbeats_list, beats_list)
return synthesizer
|
import time
def profiler(method):
def wrapper_method(*arg, **kw):
t = time.time()
ret = method(*arg, **kw)
print('Method ' + method.__name__ + ' took : ' +
"{:2.5f}".format(time.time()-t) + ' sec')
return ret
return wrapper_method
def get_baord_sum(board):
return sum([sum(filter(None, l)) for l in board])
@profiler
def part1():
content = open('input.txt').read().split('\n\n')
nums = list(map(int, content[0].split(',')))
boards = [[list(map(int, l.split())) for l in board.split('\n')]
for board in content[1:]]
for num in nums:
# remove seleceted numbers from all boards
for board in boards:
for l in board:
if num in l:
l[l.index(num)] = None
# check all boards for marked lines
for board in boards:
for i in range(len(board[0])):
if all(l[i] is None for l in board) or \
all(e is None for e in board[i]):
print("part 1 : ", get_baord_sum(board) * num)
return
@profiler
def part2():
content = open('input.txt').read().split('\n\n')
nums = list(map(int, content[0].split(',')))
boards = [[list(map(int, l.split())) for l in board.split('\n')]
for board in content[1:]]
score = []
for num in nums:
# remove seleceted numbers from all boards
for board in boards:
for l in board:
if num in l:
l[l.index(num)] = None
# check all boards for marked lines
for board in boards:
for i in range(len(board[0])):
if all(l[i] is None for l in board) or \
all(e is None for e in board[i]):
score.append(get_baord_sum(board)*num)
if board in boards:
boards.remove(board)
print("part 2 : ", score[-1])
if __name__ == "__main__":
part1()
part2()
|
from typing import Mapping, Union
import numpy as np
from .operation import Operation
from .op_placeholder import OpPlaceholder
class OpSetitem(Operation):
"""Get item based on indexing"""
def __init__(self, x: Operation, key, value: Operation, **kwargs):
self.inputs = [x, value]
self.key = key
self.shape = x.shape
self.params = {
'key': key,
}
super(OpSetitem, self).__init__(**kwargs)
def _forward(self, feed_dict: Mapping[Union[str, OpPlaceholder], np.ndarray]) -> np.ndarray:
val = self.values[0]
val[self.key] = self.values[1]
return val
def _backward(self, gradient: Operation) -> None:
raise NotImplementedError('`setitem` is not differentiable')
|
from marshmallow import Schema, fields, validate
class CategorySchema(Schema):
id = fields.Int(dump_only=True)
name = fields.Str(required=True, validate=validate.Length(1, 50))
|
#!/usr/bin/python3
#
# Created by Stephen Farnsworth
#
text_under20 = ['', 'One', 'Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine', 'Ten', 'Eleven', 'Twelve', 'Thirteen', 'Fourteen', 'Fifteen', 'Sixteen', 'Seventeen', 'Eighteen', 'Nineteen']
text_tens = ['', 'Ten', 'Twenty', 'Thirty', 'Forty', 'Fifty', 'Sixty', 'Seventy', 'Eighty', 'Ninety']
text_hundred = 'Hundred'
text_powers = ['', 'Thousand', 'Million', 'Billion', 'Trillion']
def one_to_thousand(n):
text = ''
if n >= 100:
text = text_under20[int(n/100)] + text_hundred
n = n % 100
if n >= 20:
text = text + text_tens[int(n/10)]
n = n % 10
text = text + text_under20[int(n)]
return text
def dollars(n):
if n == 0:
return 'ZeroDollars'
#if n == 1:
#return 'OneDollar'
text = 'Dollars'
powers = 0;
while n > 0:
remainder = n % 1000
if remainder > 0:
text = one_to_thousand(remainder) + text_powers[powers] + text
n = int(n/1000)
powers += 1
return text
import sys
test_cases = open(sys.argv[1], 'r')
for test in test_cases:
try:
value = int(test)
print(dollars(value))
except:
pass
test_cases.close()
|
#!/usr/bin/env python
#_*_ coding: utf8_*_
#TwisterxS repaso argparse
#encontrar numeros primos scripting python
import argparse
parser = argparse.ArgumentParser(description="Encontrar Numeros Primos ")
parser.add_argument('-m','--maximun',help="Limite para encontrar los primos anteriores ")
parser = parser.parse_args()
def main():
if parser.maximun:
n=int(parser.maximun)
if n > 0 :
for i in range (2,n):
creciente = 2
EsPrimo = True
while EsPrimo and creciente < i :
if i % creciente == 0 :
EsPrimo = False
else:
creciente += 1
if EsPrimo :
print (i,"Primo! ┬┴┬┴┤・ω・)ノº")
else :
print ("intente de nuevo (∩╹□╹∩) ")
print ("nota!:numeros mayores a 2")
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print("salida [o_o]...")
|
# from flask_sqlalchemy import SQLAlchemy
#from pymongo import MongoClient
#from mongoengine import *
#from mv_nalign import settings
# db = SQLAlchemy()
# connect(app.config['SERVER_NAME'] )
#
#
# def reset_database():
# from rest_api_demo.database.models import Post, nalign # noqa
# db.drop_all()
# db.create_all()
# init MongoDB here |
from pprint import pprint
import boto3
from botocore.exceptions import ClientError
from rekognition_objects import (
RekognitionFace, RekognitionCelebrity, RekognitionLabel,
RekognitionModerationLabel, RekognitionText, show_bounding_boxes, show_polygons)
class RekognitionImage:
"""
Encapsulates an Amazon Rekognition image. This class is a thin wrapper
around parts of the Boto3 Amazon Rekognition API.
"""
def __init__(self, image, image_name, rekognition_client):
"""
Initializes the image object.
:param image: Data that defines the image, either the image bytes or
an Amazon S3 bucket and object key.
:param image_name: The name of the image.
:param rekognition_client: A Boto3 Rekognition client.
"""
self.image = image
self.image_name = image_name
self.rekognition_client = rekognition_client
@classmethod
def from_file(cls, image_file_name, rekognition_client, image_name=None):
"""
Creates a RekognitionImage object from a local file.
:param image_file_name: The file name of the image. The file is opened and its
bytes are read.
:param rekognition_client: A Boto3 Rekognition client.
:param image_name: The name of the image. If this is not specified, the
file name is used as the image name.
:return: The RekognitionImage object, initialized with image bytes from the
file.
"""
with open(image_file_name, 'rb') as img_file:
image = {'Bytes': img_file.read()}
name = image_file_name if image_name is None else image_name
return cls(image, name, rekognition_client)
@classmethod
def from_bucket(cls, s3_object, rekognition_client):
"""
Creates a RekognitionImage object from an Amazon S3 object.
:param s3_object: An Amazon S3 object that identifies the image. The image
is not retrieved until needed for a later call.
:param rekognition_client: A Boto3 Rekognition client.
:return: The RekognitionImage object, initialized with Amazon S3 object data.
"""
image = {'S3Object': {'Bucket': s3_object.bucket_name, 'Name': s3_object.key}}
return cls(image, s3_object.key, rekognition_client)
def detect_faces(self):
"""
Detects faces in the image.
:return: The list of faces found in the image.
"""
try:
response = self.rekognition_client.detect_faces(
Image=self.image, Attributes=['ALL'])
faces = [RekognitionFace(face) for face in response['FaceDetails']]
print(f"Detected {len(faces)} faces." )
except ClientError:
print("Couldn't detect faces in %s.", self.image_name)
raise
else:
return faces
def compare_faces(self, target_image, similarity):
"""
Compares faces in the image with the largest face in the target image.
:param target_image: The target image to compare against.
:param similarity: Faces in the image must have a similarity value greater
than this value to be included in the results.
:return: A tuple. The first element is the list of faces that match the
reference image. The second element is the list of faces that have
a similarity value below the specified threshold.
"""
try:
response = self.rekognition_client.compare_faces(
SourceImage=self.image,
TargetImage=target_image.image,
SimilarityThreshold=similarity)
matches = [RekognitionFace(match['Face']) for match
in response['FaceMatches']]
unmatches = [RekognitionFace(face) for face in response['UnmatchedFaces']]
print(
"Found %s matched faces and %s unmatched faces.",
len(matches), len(unmatches))
except ClientError:
print(
"Couldn't match faces from %s to %s.", self.image_name,
target_image.image_name)
raise
else:
return matches, unmatches
def detect_labels(self, max_labels):
"""
Detects labels in the image. Labels are objects and people.
:param max_labels: The maximum number of labels to return.
:return: The list of labels detected in the image.
"""
try:
response = self.rekognition_client.detect_labels(
Image=self.image, MaxLabels=max_labels)
labels = [RekognitionLabel(label) for label in response['Labels']]
print("Found {} labels in {}".format(len(labels), self.image_name) )
except ClientError:
print(f"Couldn't detect labels in {self.image_name}")
raise
else:
return labels
def detect_moderation_labels(self):
"""
Detects moderation labels in the image. Moderation labels identify content
that may be inappropriate for some audiences.
:return: The list of moderation labels found in the image.
"""
try:
response = self.rekognition_client.detect_moderation_labels(
Image=self.image)
labels = [RekognitionModerationLabel(label)
for label in response['ModerationLabels']]
print(
"Found %s moderation labels in %s.", len(labels), self.image_name)
except ClientError:
print(
"Couldn't detect moderation labels in %s.", self.image_name)
raise
else:
return labels
def detect_text(self):
"""
Detects text in the image.
:return The list of text elements found in the image.
"""
try:
response = self.rekognition_client.detect_text(Image=self.image)
texts = [RekognitionText(text) for text in response['TextDetections']]
print("Found %s texts in %s.", len(texts), self.image_name)
except ClientError:
print("Couldn't detect text in %s.", self.image_name)
raise
else:
return texts
def recognize_celebrities(self):
"""
Detects celebrities in the image.
:return: A tuple. The first element is the list of celebrities found in
the image. The second element is the list of faces that were
detected but did not match any known celebrities.
"""
try:
response = self.rekognition_client.recognize_celebrities(
Image=self.image)
celebrities = [RekognitionCelebrity(celeb)
for celeb in response['CelebrityFaces']]
other_faces = [RekognitionFace(face)
for face in response['UnrecognizedFaces']]
print(
"Found %s celebrities and %s other faces in %s.", len(celebrities),
len(other_faces), self.image_name)
except ClientError:
print("Couldn't detect celebrities in %s.", self.image_name)
raise
else:
return celebrities, other_faces
|
from hashlib import sha256
import onesignal as onesignal_sdk
from django.conf import settings
from django_rq import job
from openbook_common.utils.model_loaders import get_user_model
onesignal_client = onesignal_sdk.Client(
app_id=settings.ONE_SIGNAL_APP_ID,
app_auth_key=settings.ONE_SIGNAL_API_KEY
)
@job('default')
def send_notification_to_user_with_id(user_id, notification):
User = get_user_model()
user = User.objects.only('username', 'uuid', 'id').get(pk=user_id)
for device in user.devices.all():
notification.set_parameter('ios_badgeType', 'Increase')
notification.set_parameter('ios_badgeCount', '1')
user_id_contents = (str(user.uuid) + str(user.id)).encode('utf-8')
user_id = sha256(user_id_contents).hexdigest()
notification.set_filters([
{"field": "tag", "key": "user_id", "relation": "=", "value": user_id},
{"field": "tag", "key": "device_uuid", "relation": "=", "value": device.uuid},
])
onesignal_client.send_notification(notification)
|
# 练习:
# 写一个程序,让用户输入两个以上的正整数,当输入小于零的数时结束
# 输入(要求不允许输入重复的数)
# 1) 打印这些数的和
# 2) 打印这些数中的最大数
# 3) 打印这些数中的第二大的数
# 4) 删除最小的一个数
L = []
while True:
x = int(input('请输入正整数: '))
if x < 0:
# 如果L的个数大于等于2,则允许退出,否则继承输入
if len(L) >= 2:
break
else:
print("您输入的数据个数太少,请继承输入")
continue
# 如果x在L中不存在,则允许添加,否则打印一个未添加提示
if x not in L:
L.append(x) # 追加
else:
print(x, "已经存在,添加失败!")
print(L)
# 1) 打印这些数的和
print("和是:", sum(L))
# 2) 打印这些数中的最大数
print("最大数是:", max(L))
# 3) 打印这些数中的第二大的数
L2 = L.copy() # 复制一个副本
L2.sort(reverse=True)
print("第二大的数是:", L2[1])
del L2 # 释放L2绑定的列表
# 4) 删除最小的一个数
L.remove(min(L))
print(L) # 最终结果
|
from general.command import Command
from general.dnaSequence import DnaSequence
class Slice(Command):
def __init__(self, command):
super().__init__(command)
def sub_dna(self,dna_string ,index_start,index_end):
new_str_dna = ''
if 0 <= int(index_start) < int(index_end )< len(dna_string):
for i in range(int(index_start), int(index_end)):
new_str_dna = new_str_dna + dna_string[i]
return DnaSequence(new_str_dna)
def check_type_insert(self):
if len(self.list_command) < 4 and len(self.list_command)>6:
raise ValueError
dna_string = self.check_dna(self.list_command[1])
'''
check if command is valid
find dna by id or name and slice it
it their no : in the command change the DNA sequence of the name or id
otherwise if only : so create a new DNA
and put a default name like str+no_name_seq if has name put in the new name
if has @@ put in name old DNA with _num'''
def execute(self):
id = self.data_dna.get_id()
dna_string = self.check_dna(self.list_command[1])
if len(self.list_command) == 4:
new_dna=self.sub_dna(dna_string,self.list_command[2],self.list_command[3])
if self.list_command[1][0]=='@':
self.data_dna.change_dna_by_name(new_dna,self.list_command[1][1:],"*")
return "[{}] {}: {}".format(self.data_dna.get_dict_name_id()[self.list_command[1][1:]], self.list_command[1][1:], new_dna)
else:
self.data_dna.change_dna_by_id(new_dna, self.list_command[1][1:],"*")
return "[{}] {}: {}".format(self.list_command[1][1:], self.data_dna.get_name_by_id(self.list_command[1][1:]),new_dna )
elif len(self.list_command) == 5:
new_dna = self.sub_dna(dna_string, self.list_command[2], self.list_command[3])
if self.list_command[4]==':':
new_name="str_{}".format(self.data_dna.get_no_name_given())
self.data_dna.set_no_name_given()
self.data_dna.push_dict(id, new_name, new_dna,"o")
return "[{}] {}: {}".format(id, new_name, new_dna)
else:
raise ValueError
elif len(self.list_command) == 6:
new_dna = self.sub_dna(dna_string, self.list_command[2], self.list_command[3])
new_name=self.get_new_name('_s',5)
self.data_dna.push_dict(id, new_name, new_dna,'o')
return "[{}] {}: {}".format(id, new_name, new_dna)
else:
raise ValueError
|
import datetime
from unittest import IsolatedAsyncioTestCase
from unittest.mock import AsyncMock
from dipdup.config import (
ContractConfig,
OperationHandlerConfig,
OperationHandlerTransactionPatternConfig,
OperationIndexConfig,
OperationType,
TzktDatasourceConfig,
)
from dipdup.index import OperationIndex
from dipdup.models import OperationData
add_liquidity_operations = (
OperationData(
type='transaction',
id=76905130,
level=1676582,
timestamp=datetime.datetime(2021, 9, 8, 16, 2, 14, tzinfo=datetime.timezone.utc),
hash='opWVrmpgeuQ2tz65DcV5USnCFW7j7x97XQ2BzEAcmefPEjUfkMw',
counter=15811432,
sender_address='tz1cmAfyjWW3Rf3tH3M3maCpwsiAwBKbtmG4',
target_address='KT1BEC9uHmADgVLXCm3wxN52qJJ85ohrWEaU',
initiator_address=None,
amount=None,
status='applied',
has_internals=True,
storage={
'admin': 'KT1Kfu13FmNbcZSjTPZLrAUbEYNZim6vtg6d',
'lpFee': '400',
'paused': False,
'token1Id': '0',
'token2Id': '0',
'systemFee': '1000',
'token1_Fee': '0',
'token2_Fee': '0',
'token1Check': False,
'token1_pool': '470000000000000000000',
'token2Check': False,
'token2_pool': '16000000',
'totalSupply': '86717933554715',
'maxSwapLimit': '40',
'token1Address': 'KT1GRSvLoikDsXujKgZPsGLX8k8VvR2Tq95b',
'token2Address': 'KT1TwzD6zV3WeJ39ukuqxcfK2fJCnhvrdN1X',
'lpTokenAddress': 'KT1NLZah1MKeWuveQvdsCqAUCjksKw8J296z',
},
block=None,
sender_alias=None,
nonce=None,
target_alias='PLENTY / SMAK Swap',
initiator_alias=None,
entrypoint='AddLiquidity',
parameter_json={
'recipient': 'tz1cmAfyjWW3Rf3tH3M3maCpwsiAwBKbtmG4',
'token1_max': '470000000000000000000',
'token2_max': '16000000',
},
originated_contract_address=None,
originated_contract_alias=None,
originated_contract_type_hash=None,
originated_contract_code_hash=None,
diffs=None,
),
OperationData(
type='transaction',
id=76905131,
level=1676582,
timestamp=datetime.datetime(2021, 9, 8, 16, 2, 14, tzinfo=datetime.timezone.utc),
hash='opWVrmpgeuQ2tz65DcV5USnCFW7j7x97XQ2BzEAcmefPEjUfkMw',
counter=15811432,
sender_address='KT1BEC9uHmADgVLXCm3wxN52qJJ85ohrWEaU',
target_address='KT1GRSvLoikDsXujKgZPsGLX8k8VvR2Tq95b',
initiator_address='tz1cmAfyjWW3Rf3tH3M3maCpwsiAwBKbtmG4',
amount=None,
status='applied',
has_internals=False,
storage={
'paused': False,
'balances': 3943,
'metadata': 3944,
'lastUpdate': '1676287',
'totalSupply': '14712639179877222051752285',
'administrator': 'KT1GpTEq4p2XZ8w9p5xM7Wayyw5VR7tb3UaW',
'token_metadata': 3945,
'tokensPerBlock': '50000000000000000000',
},
block=None,
sender_alias='PLENTY / SMAK Swap',
nonce=0,
target_alias='PLENTY',
initiator_alias=None,
entrypoint='transfer',
parameter_json={
'to': 'KT1BEC9uHmADgVLXCm3wxN52qJJ85ohrWEaU',
'from': 'tz1cmAfyjWW3Rf3tH3M3maCpwsiAwBKbtmG4',
'value': '470000000000000000000',
},
originated_contract_address=None,
originated_contract_alias=None,
originated_contract_type_hash=None,
originated_contract_code_hash=None,
diffs=[
{
'bigmap': 3943,
'path': 'balances',
'action': 'update_key',
'content': {
'hash': 'exprtkqafR3YBedPSHP6Lts8WVHn4jj853RfRZiTpzYNP8KKLaU12H',
'key': 'tz1cmAfyjWW3Rf3tH3M3maCpwsiAwBKbtmG4',
'value': {
'balance': '1141847967508578897233841',
'approvals': {
'KT19Dskaofi6ZTkrw3Tq4pK7fUqHqCz4pTZ3': '0',
'KT1AbuUaPQmYLsB8n8FdSzBrxvrsm8ctwW1V': '0',
'KT1BEC9uHmADgVLXCm3wxN52qJJ85ohrWEaU': '0',
'KT1HUnqM6xFJa51PM2xHfLs7s6ARvXungtyq': '0',
'KT1HZkD2T4uczgYkZ6fb9gm1fymeJoRuezLz': '9060000000000000000000',
'KT1NtsnKQ1c3rYB12ZToP77XaJs8WDBvF221': '0',
'KT1PuPNtDFLR6U7e7vDuxunDoKasVT6kMSkz': '0',
'KT1UNBvCJXiwJY6tmHM7CJUVwNPew53XkSfh': '0',
'KT1VeNQa4mucRj36qAJ9rTzm4DTJKfemVaZT': '0',
'KT1X1LgNkQShpF9nRLYw3Dgdy4qp38MX617z': '0',
'KT1XVrXmWY9AdVri6KpxKo4CWxizKajmgzMt': '0',
'KT1XXAavg3tTj12W1ADvd3EEnm1pu6XTmiEF': '550000000000000000',
'KT1XutoFJ9dXvWxT7ttG86N2tSTUEpatFVTm': '0',
},
},
},
},
{
'bigmap': 3943,
'path': 'balances',
'action': 'add_key',
'content': {
'hash': 'exprugvwjodjwqmGVVryY5uqz9fcg6BndukYj6bproCFShQ6nkuG8e',
'key': 'KT1BEC9uHmADgVLXCm3wxN52qJJ85ohrWEaU',
'value': {'balance': '470000000000000000000', 'approvals': {}},
},
},
],
),
OperationData(
type='transaction',
id=76905132,
level=1676582,
timestamp=datetime.datetime(2021, 9, 8, 16, 2, 14, tzinfo=datetime.timezone.utc),
hash='opWVrmpgeuQ2tz65DcV5USnCFW7j7x97XQ2BzEAcmefPEjUfkMw',
counter=15811432,
sender_address='KT1BEC9uHmADgVLXCm3wxN52qJJ85ohrWEaU',
target_address='KT1TwzD6zV3WeJ39ukuqxcfK2fJCnhvrdN1X',
initiator_address='tz1cmAfyjWW3Rf3tH3M3maCpwsiAwBKbtmG4',
amount=None,
status='applied',
has_internals=False,
storage={
'freezer': 'KT1TwzD6zV3WeJ39ukuqxcfK2fJCnhvrdN1X',
'balances': 1798,
'metadata': 1800,
'totalSupply': '896083333000',
'administrator': 'KT1TwzD6zV3WeJ39ukuqxcfK2fJCnhvrdN1X',
'token_metadata': 1801,
'frozen_accounts': 1799,
},
block=None,
sender_alias='PLENTY / SMAK Swap',
nonce=1,
target_alias='Smartlink',
initiator_alias=None,
entrypoint='transfer',
parameter_json={'to': 'KT1BEC9uHmADgVLXCm3wxN52qJJ85ohrWEaU', 'from': 'tz1cmAfyjWW3Rf3tH3M3maCpwsiAwBKbtmG4', 'value': '16000000'},
originated_contract_address=None,
originated_contract_alias=None,
originated_contract_type_hash=None,
originated_contract_code_hash=None,
diffs=[
{
'bigmap': 1798,
'path': 'balances',
'action': 'update_key',
'content': {
'hash': 'exprtkqafR3YBedPSHP6Lts8WVHn4jj853RfRZiTpzYNP8KKLaU12H',
'key': 'tz1cmAfyjWW3Rf3tH3M3maCpwsiAwBKbtmG4',
'value': {'balance': '208684', 'approvals': {'KT1BEC9uHmADgVLXCm3wxN52qJJ85ohrWEaU': '0'}},
},
},
{
'bigmap': 1798,
'path': 'balances',
'action': 'add_key',
'content': {
'hash': 'exprugvwjodjwqmGVVryY5uqz9fcg6BndukYj6bproCFShQ6nkuG8e',
'key': 'KT1BEC9uHmADgVLXCm3wxN52qJJ85ohrWEaU',
'value': {'balance': '16000000', 'approvals': {}},
},
},
],
),
OperationData(
type='transaction',
id=76905133,
level=1676582,
timestamp=datetime.datetime(2021, 9, 8, 16, 2, 14, tzinfo=datetime.timezone.utc),
hash='opWVrmpgeuQ2tz65DcV5USnCFW7j7x97XQ2BzEAcmefPEjUfkMw',
counter=15811432,
sender_address='KT1BEC9uHmADgVLXCm3wxN52qJJ85ohrWEaU',
target_address='KT1NLZah1MKeWuveQvdsCqAUCjksKw8J296z',
initiator_address='tz1cmAfyjWW3Rf3tH3M3maCpwsiAwBKbtmG4',
amount=None,
status='applied',
has_internals=False,
storage={
'balances': 14107,
'metadata': 14108,
'totalSupply': '86717933554615',
'administrator': 'tz1ZnK6zYJrC9PfKCPryg9tPW6LrERisTGtg',
'securityCheck': True,
'token_metadata': 14109,
'exchangeAddress': 'KT1BEC9uHmADgVLXCm3wxN52qJJ85ohrWEaU',
},
block=None,
sender_alias='PLENTY / SMAK Swap',
nonce=2,
target_alias='PLENTY / SMAK LP Token',
initiator_alias=None,
entrypoint='mint',
parameter_json={'value': '86717933554615', 'address': 'tz1cmAfyjWW3Rf3tH3M3maCpwsiAwBKbtmG4'},
originated_contract_address=None,
originated_contract_alias=None,
originated_contract_type_hash=None,
originated_contract_code_hash=None,
diffs=[
{
'bigmap': 14107,
'path': 'balances',
'action': 'add_key',
'content': {
'hash': 'exprtkqafR3YBedPSHP6Lts8WVHn4jj853RfRZiTpzYNP8KKLaU12H',
'key': 'tz1cmAfyjWW3Rf3tH3M3maCpwsiAwBKbtmG4',
'value': {'balance': '86717933554615', 'approvals': {}},
},
}
],
),
)
index_config = OperationIndexConfig(
datasource=TzktDatasourceConfig(kind='tzkt', url='https://api.tzkt.io', http=None),
kind='operation',
handlers=[
OperationHandlerConfig(
callback='on_fa12_and_fa12_add_liquidity',
pattern=[
OperationHandlerTransactionPatternConfig(
type='transaction',
source=None,
destination=ContractConfig(address='KT1BEC9uHmADgVLXCm3wxN52qJJ85ohrWEaU', typename='plenty_smak_amm'),
entrypoint='AddLiquidity',
optional=False,
),
OperationHandlerTransactionPatternConfig(
type='transaction',
source=None,
destination=ContractConfig(address='KT1GRSvLoikDsXujKgZPsGLX8k8VvR2Tq95b', typename='plenty_token'),
entrypoint='transfer',
optional=False,
),
OperationHandlerTransactionPatternConfig(
type='transaction',
source=None,
destination=ContractConfig(address='KT1TwzD6zV3WeJ39ukuqxcfK2fJCnhvrdN1X', typename='smak_token'),
entrypoint='transfer',
optional=False,
),
OperationHandlerTransactionPatternConfig(
type='transaction',
source=None,
destination=ContractConfig(address='KT1NLZah1MKeWuveQvdsCqAUCjksKw8J296z', typename='plenty_smak_lp'),
entrypoint='mint',
optional=False,
),
],
),
],
types=[OperationType.transaction, OperationType.origination],
contracts=[ContractConfig(address='KT1BEC9uHmADgVLXCm3wxN52qJJ85ohrWEaU', typename='plenty_smak_amm')],
first_level=0,
last_level=0,
)
index_config.name = 'asdf'
class MatcherTest(IsolatedAsyncioTestCase):
async def test_match_smak_add_liquidity(self) -> None:
index = OperationIndex(None, index_config, None) # type: ignore
index._prepare_handler_args = AsyncMock() # type: ignore
matched_operations = await index._match_operations(add_liquidity_operations)
index._prepare_handler_args.assert_called()
self.assertEqual(len(matched_operations), 1)
|
class Queue:
#construct the list
def __init__(self):
self.items = []
#check if empty
def emptyQ(self):
return self.items == []
#insert into queue
def enqueue(self,item):
self.items.insert(0,item)
#remove from queue
def dequeue(self):
self.items.pop()
# returns size of list
def size(self):
return len(self.items)
#prints out each item
def printqueue(self):
for items in self.items:
print(items)
#prints out list
def viewList(self):
return self.items
myQ = Queue()
amtNumToPush = int(input("How many things would you like to push? ")) #gets the amount for list
#loops through to collect the users list items.
for i in range(amtNumToPush):
toPush = input("What do you want to push for the position {} in the list? ".format(i + 1)) #gets the input
myQ.enqueue(toPush)#pushes the input
print(myQ.viewList()) #prints the list
|
import unittest
from . import utils as TE
class TestReqifDatatypeDefinitionString(unittest.TestCase):
def setUp(self):
self.obj = TE.TReqz.reqif_datatype_definition_string()
def test_name(self):
self.assertEqual("DATATYPE-DEFINITION-STRING", self.obj.name)
def test_decode(self):
TE.utils.testDecodeIdentifiableAttributes(self, self.obj)
TE.utils.testDecodeAttribute(self, self.obj, 'max_length', 'MAX-LENGTH')
def test_encode(self):
TE.utils.testEncodeIdentifiableAttributes(self, self.obj)
TE.utils.testEncodeAttribute(self, self.obj, 'max_length', 'MAX-LENGTH') |
# Generated by Django 3.1 on 2020-08-20 00:35
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('vinhos', '0004_auto_20200819_2131'),
]
operations = [
migrations.AlterField(
model_name='vinhos',
name='data_cadastro',
field=models.DateTimeField(blank=True, default=datetime.datetime.now),
),
]
|
# DFS FOR DIRECTED GRAPH
from collections import defaultdict
class Graph:
def __init__(self):
self.graph = defaultdict(list)
def add_edge(self, n1, n2):
self.graph[n1].append(n2)
def print_graph(self):
for n1, n2 in self.graph.items():
print n1, n2
def do_dfs(self, start):
visited = [False] * len(self.graph)
print visited
self.dfs_util(start, visited)
def dfs_util(self, node, visited):
visited[node] = True
print node,
for i in self.graph[node]:
if not visited[i]:
self.dfs_util(i, visited)
graph = Graph()
graph.add_edge(2, 0)
graph.add_edge(2, 3)
graph.add_edge(0, 2)
graph.add_edge(0, 1)
graph.add_edge(1, 2)
graph.add_edge(3, 3)
graph.print_graph()
print "Traversing graph using dfs"
print "Enter node you wish to start"
n = input()
graph.do_dfs(n)
|
import pickle
import tensorflow as tf
import numpy as np
import csv
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import pandas as pd
import cv2
import scipy
import sys
import os
import pandas
import argparse
import json
from PIL import Image
from keras.layers import Input, Flatten, Dense, Lambda, ELU, Dropout, SpatialDropout2D
from keras.models import Model, Sequential
from keras.layers.convolutional import Convolution2D, Cropping2D
from keras.layers.pooling import MaxPooling2D
from keras.optimizers import Adam
from keras import initializations
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from skimage import transform as trf
# Augment data region
def augment_image_brightness(image):
new_image = cv2.cvtColor(image,cv2.COLOR_RGB2HSV)
random_bright = .25+np.random.uniform()
new_image[:,:,2] = new_image[:,:,2]*random_bright
new_image = cv2.cvtColor(new_image,cv2.COLOR_HSV2RGB)
return new_image
def shift_image(image,steer,shift_range):
shift_x = shift_range*np.random.uniform()-shift_range/2
new_steer = steer + shift_x/shift_range*2*.2
shift_y = 40*np.random.uniform()-40/2
Trans_M = np.float32([[1,0,shift_x],[0,1,shift_y]])
new_image = cv2.warpAffine(image,Trans_M,(320,160))
return new_image,new_steer
def augment_image_shadow(image, ):
top_y = 320*np.random.uniform()
top_x = 0
bot_x = 160
bot_y = 320*np.random.uniform()
image_hls = cv2.cvtColor(image,cv2.COLOR_RGB2HLS)
shadow_mask = 0*image_hls[:,:,1]
X_m = np.mgrid[0:image.shape[0],0:image.shape[1]][0]
Y_m = np.mgrid[0:image.shape[0],0:image.shape[1]][1]
shadow_mask[((X_m-top_x)*(bot_y-top_y) -(bot_x - top_x)*(Y_m-top_y) >=0)]=1
if np.random.randint(2)==1:
random_bright = .5
cond1 = shadow_mask==1
cond0 = shadow_mask==0
if np.random.randint(2)==1:
image_hls[:,:,1][cond1] = image_hls[:,:,1][cond1]*random_bright
else:
image_hls[:,:,1][cond0] = image_hls[:,:,1][cond0]*random_bright
image = cv2.cvtColor(image_hls,cv2.COLOR_HLS2RGB)
return image
def flip_image(image,steer):
random_flip = np.random.randint(2)
new_image = image
new_steer = steer
if random_flip == 0:
new_image = cv2.flip(image,1)
new_steer = -steer
return new_image,new_steer
def gen_old(data, data_size, batch_size):
first=0
last=first+batch_size
while True:
X_batch = np.array([mpimg.imread(os.path.join('data',image)) for image in data[first:last,0]])
#print(X_batch.shape)
y_batch = np.array(data[first:last,1])
first += batch_size
last += batch_size
if last >= data_size:
first = 0
last = first + batch_size
#print(first,last)
yield (X_batch, y_batch)
def gen(data, data_size, batch_size):
X_batch = np.zeros((batch_size, 160, 320, 3))
y_batch = np.zeros(batch_size)
while True:
for i in range(batch_size):
rand_line = np.random.randint(data_size)
keep_iter = 0
while keep_iter==0:
image_line = data[rand_line,0]
image = mpimg.imread(os.path.join('data',image_line))
steer = data[rand_line,1]
#shift image
image,steer = shift_image(image,steer,100)
#augment image brightness
image = augment_image_brightness(image)
#augment image shadow
image = augment_image_shadow(image)
#flip image
image,steer = flip_image(image,steer)
if abs(steer)<0.1:
prob = np.random.uniform()
if prob > 0.5:
keep_iter=1
else:
keep_iter=1
X_batch[i]=image
y_batch[i]=steer
#plt.imshow(image)
#print(steer)
yield (X_batch, y_batch)
# Preprocessing data region
def resize(image):
import tensorflow as tf #Import in here to be used by the model drive.py
return tf.image.resize_images(image,(80, 160))
def resize_invidia(image):
import tensorflow as tf #Import in here to be used by the model drive.py
return tf.image.resize_images(image,(66, 200))
def resize_blog(image):
import tensorflow as tf #Import in here to be used by the model drive.py
return tf.image.resize_images(image,(64, 64))
def normalize_greyscale(image):
a = -0.5
b = 0.5
grayscale_min = 0
grayscale_max = 255
return a + ( ( (image - grayscale_min)*(b - a) )/( grayscale_max - grayscale_min ) )
def crop_image(image):
return image[:, :, 22:135, :]
#Model definition region
def get_comma_ai_model():
ch, row, col = 3, 160, 320 # camera format
model = Sequential()
model.add(Cropping2D(cropping=((25,0),(25,0)),input_shape=(row, col, ch)))
model.add(Lambda(resize))
model.add(Lambda(normalize_greyscale)) # different normalization ([-0.5,0.5] range) than original comma_ai model ([-1,1] range)
#model.add(Lambda(lambda x: x/127.5 - 1.,input_shape=(row, col, ch)))
model.add(Convolution2D(16, 8, 8, subsample=(4, 4), border_mode="same"))
model.add(ELU())
model.add(Convolution2D(32, 5, 5, subsample=(2, 2), border_mode="same"))
model.add(ELU())
model.add(Convolution2D(64, 5, 5, subsample=(2, 2), border_mode="same"))
model.add(Flatten())
model.add(Dropout(.2))
model.add(ELU())
model.add(Dense(512))
model.add(Dropout(.5))
model.add(ELU())
model.add(Dense(1))
model.compile(optimizer="adam", loss="mse", metrics=['mse'])
print (model.summary())
return model
def get_invidia_model(learning_rate):
ch, row, col = 3, 160, 320 # camera format
model = Sequential()
model.add(Cropping2D(cropping=((30,0),(25,0)),input_shape=(row, col, ch)))
#model.add(Cropping2D(crop_image,input_shape=(row, col, ch)))
model.add(Lambda(resize_invidia))
#model.add(Lambda(resize_invidia,input_shape=(row, col, ch)))
model.add(Lambda(normalize_greyscale))
model.add(Convolution2D(24, 5, 5, subsample=(2, 2), border_mode="valid", init= initializations.uniform))
model.add(ELU())
model.add(SpatialDropout2D(.5))
model.add(Convolution2D(36, 5, 5, subsample=(2, 2), border_mode="valid", init= initializations.uniform))
model.add(ELU())
model.add(SpatialDropout2D(.5))
model.add(Convolution2D(48, 5, 5, subsample=(2, 2), border_mode="valid", init= initializations.uniform))
model.add(ELU())
model.add(SpatialDropout2D(.5))
model.add(Convolution2D(64, 3, 3, subsample=(1, 1), border_mode="valid", init= initializations.uniform))
model.add(ELU())
model.add(SpatialDropout2D(.5))
model.add(Convolution2D(64, 3, 3, subsample=(1, 1), border_mode="valid", init= initializations.uniform))
model.add(ELU())
model.add(SpatialDropout2D(.5))
model.add(Flatten())
model.add(Dense(100, init= initializations.uniform))
model.add(ELU())
model.add(Dropout(.5))
model.add(Dense(50, init= initializations.uniform))
model.add(ELU())
#model.add(Dropout(.5))
model.add(Dense(10, init= initializations.uniform))
model.add(ELU())
model.add(Dropout(.5))
model.add(Dense(1))
model.compile(optimizer=Adam(lr=learning_rate), loss="mse", metrics=['mse'])
print (model.summary())
return model
def get_blog_model(learning_rate):
ch, row, col = 3, 160, 320 # camera format
model = Sequential()
model.add(Cropping2D(cropping=((30,0),(25,0)),input_shape=(row, col, ch)))
#model.add(Cropping2D(crop_image,input_shape=(row, col, ch)))
model.add(Lambda(resize_blog))
#model.add(Lambda(resize_invidia,input_shape=(row, col, ch)))
model.add(Lambda(normalize_greyscale))
model.add(Convolution2D(3, 1, 1))
model.add(Convolution2D(32, 3, 3))
model.add(Convolution2D(32, 3, 3))
model.add(MaxPooling2D((2, 2)))
model.add(ELU())
model.add(SpatialDropout2D(.5))
model.add(Convolution2D(64, 3, 3))
model.add(Convolution2D(64, 3, 3))
model.add(MaxPooling2D((2, 2)))
model.add(ELU())
model.add(SpatialDropout2D(.5))
model.add(Convolution2D(128, 3, 3))
model.add(Convolution2D(128, 3, 3))
model.add(MaxPooling2D((2, 2)))
model.add(ELU())
model.add(SpatialDropout2D(.5))
model.add(Flatten())
model.add(Dense(512))
model.add(ELU())
model.add(Dense(64))
model.add(ELU())
model.add(Dense(16))
model.add(Dense(1))
model.compile(optimizer=Adam(lr=learning_rate), loss="mse", metrics=['mse'])
print (model.summary())
return model
def get_model(learning_rate):
ch, row, col = 3, 160, 320 # camera format
model = Sequential()
model.add(Cropping2D(cropping=((30,0),(25,0)),input_shape=(row, col, ch)))
#model.add(Cropping2D(crop_image,input_shape=(row, col, ch)))
model.add(Lambda(resize_blog))
#model.add(Lambda(resize_invidia,input_shape=(row, col, ch)))
model.add(Lambda(normalize_greyscale))
model.add(Convolution2D(3, 1, 1))
model.add(Convolution2D(16, 3, 3))
model.add(Convolution2D(16, 3, 3))
model.add(MaxPooling2D((2, 2)))
model.add(ELU())
model.add(SpatialDropout2D(.5))
model.add(Convolution2D(32, 3, 3))
model.add(Convolution2D(32, 3, 3))
model.add(MaxPooling2D((2, 2)))
model.add(ELU())
model.add(SpatialDropout2D(.5))
model.add(Convolution2D(64, 3, 3))
model.add(Convolution2D(64, 3, 3))
model.add(MaxPooling2D((2, 2)))
model.add(ELU())
model.add(SpatialDropout2D(.5))
model.add(Flatten())
model.add(Dense(1024))
model.add(ELU())
model.add(Dense(64))
model.add(ELU())
model.add(Dense(16))
model.add(Dense(1))
model.compile(optimizer=Adam(lr=learning_rate), loss="mse", metrics=['mse'])
print (model.summary())
return model
if __name__ == "__main__":
#parser.add_argument('--batch', type=int, default=75, help='Batch size.')
#parser.add_argument('--epoch', type=int, default=200, help='Number of epochs.')
batch = 200
epoch = 8
learning_rate = 0.0001
p_data = pd.read_csv(os.path.join('data','driving_log.csv'), sep=None, engine='python')
train, val = train_test_split(p_data, test_size=0.3, random_state=10)
val, test = train_test_split(val, test_size=0.3, random_state=10)
#train data
center_data = np.column_stack((np.array((train.center)),np.array((train.steering))))
left_data = np.column_stack((np.array((train.left)),np.array((train.steering+0.2)))) # add 0.2 to the steering angle
right_data = np.column_stack((np.array((train.right)),np.array((train.steering-0.2)))) # substract 0.2 to the steering angle
train_data = np.vstack((center_data,left_data,right_data))
#train_data = center_data
#validation data
val_data = np.column_stack((np.array((val.center)),np.array((val.steering))))
#test data
test_data = np.column_stack((np.array((test.center)),np.array((test.steering))))
#shuffle data: not needed anymore since in the generator I randomly pick up images from the data
#np.random.shuffle(train_data)
#np.random.shuffle(val_data)
#np.random.shuffle(test_data)
#model = get_comma_ai_model()
#model = get_invidia_model(learning_rate)
model = get_blog_model(learning_rate)
#model = get_model(learning_rate)
print('{}: {}'.format("Train samples",len(train_data)))
print('{}: {}'.format("Validation samples",len(val_data)))
print('{}: {}'.format("Test samples",len(test_data)))
model.fit_generator(
gen(train_data, len(train_data), batch),
samples_per_epoch = 25000, #30000
#samples_per_epoch = len(train_data),
nb_epoch= epoch,
validation_data = gen(val_data, len(val_data), batch),
nb_val_samples = 8000 #7500
)
#serialize model to JSON
model_json = model.to_json()
with open("model.json", "w") as json_file:
json.dump(model_json, json_file)
#serialize weights to HDF5
model.save_weights("model.h5")
#evalute model on test set
X_test = np.array([mpimg.imread(os.path.join('data',image)) for image in test_data[:,0]])
y_test = np.array(test_data[:,1])
metrics = model.evaluate(X_test, y_test, batch_size=batch)
for metric_i in range(len(model.metrics_names)):
metric_name = model.metrics_names[metric_i]
metric_value = metrics[metric_i]
print('{}: {}'.format(metric_name, metric_value))
#predict steering angles in test set
predictions = model.predict(X_test, batch_size = batch)
print(predictions)
print(y_test)
"""
X_train = np.array([mpimg.imread(os.path.join('data_test',image)) for image in train_data[:,0]])
y_train = np.array(train_data[:,1])
predictions = model.predict(X_train, batch_size = batch)
print(predictions)
print(y_train)
"""
|
#!/usr/bin/python
import sys, getopt, os, json
import datetime, time
def print_help_and_exit(exit_code,exit_msg=''):
if exit_msg == '':
print('test.py -i <inputfile> -o <outputfile>')
else:
print(exit_msg)
sys.exit(exit_code)
def process_results(directory, failed_list, broken_list, skipped_list):
processed_list = []
passed = 0
directory_files = os.listdir(directory)
directory_files.sort(key=lambda x: os.stat(os.path.join(directory, x)).st_mtime, reverse=True)
for filename in directory_files:
with open(directory+filename, "r") as read_file:
json_file = json.load(read_file)
if json_file["status"] == "passed":
if json_file["name"] not in processed_list:
processed_list.append(json_file["name"])
passed = passed + 1
elif json_file["status"] == "failed":
if json_file["name"] not in processed_list:
processed_list.append(json_file["name"])
failed_list[json_file["name"]] = json_file["statusDetails"]["message"]
elif json_file["status"] == "broken":
if json_file["name"] not in processed_list:
processed_list.append(json_file["name"])
broken_list[json_file["name"]] = json_file["statusDetails"]["message"]
elif json_file["status"] == "skipped":
if json_file["name"] not in processed_list:
processed_list.append(json_file["name"])
skipped_list[json_file["name"]] = ''
return passed
# def check_args(argv):
# opt_args_map = {}
# required_opts = ["-i", "-r"]
# required_args = ["-i", "-r"]
#
# skip_index = []
# for idx, arg in enumerate(argv):
# if idx in skip_index:
# continue
#
# if arg in required_opts:
# required_opts.remove(arg)
#
# if arg in required_args:
# if idx+1 > len(argv):
# print_help_and_exit(2)
# opt_args_map[arg] = argv[idx+1]
# else:
# opt_args_map[arg] = True
#
# return opt_args_map
def main(argv):
results_dir = ''
inputfile = ''
outputfile = 'output.html'
replace_strings = ''
replace_values = ''
replace_map = {}
pass_message = "Tests Passed"
fail_message = "Tests Failed"
test_success = True
result_color = "green"
test_total = 0
success_rate = 0
required = ["-i","--ifile","-r","--rdir"]
# options = check_args(argv)
# print(options)
# python2 parse-results.py -i index.html -r ../../behave-api-service/features/allure-results/ \
# --replace-strings=project-name,build-datetime \
# --replace-values=CoreEng/Olniku/kls/kalupi-regression-test,1563341384185
try:
opts, args = getopt.getopt(argv,"hi:o:r:",["ifile=","ofile=","rdir=","pass-message=","fail-message=","replace-strings=","replace-values="])
except getopt.GetoptError:
print_help_and_exit(2)
if len(opts) == 0:
print_help_and_exit(2)
for opt, arg in opts:
if opt == '-h':
print_help_and_exit(0)
elif opt in ("-i", "--ifile"):
inputfile = arg
required.remove("-i")
required.remove("--ifile")
elif opt in ("-o", "--ofile"):
outputfile = arg
elif opt in ("-r", "--rdir"):
results_dir = arg
required.remove("-r")
required.remove("--rdir")
elif opt in ("--pass-message"):
pass_message = arg
elif opt in ("--fail-message"):
fail_message = arg
elif opt in ("--replace-strings"):
replace_strings = arg
elif opt in ("--replace-values"):
replace_values = arg
if len(required) > 0:
print("missing required parameters: ", required)
sys.exit(2)
replace_strings = replace_strings.split(',')
replace_values = replace_values.split(',')
if len(replace_strings) != len(replace_values):
print_help_and_exit(2,"Error: replace-strings length does not match replace-values")
for idx, str in enumerate(replace_strings):
replace_map[str] = replace_values[idx]
print(replace_map)
# print('Input directory is ', results_dir)
# print('Input template is ', inputfile)
# print('Output file is ', outputfile)
#
# build_duration = float(build_duration) / 1000;
broken_list = {}
failed_list = {}
skipped_list = {}
passed = process_results(results_dir, failed_list, broken_list, skipped_list)
test_total = passed + len(broken_list) + len(failed_list) + len(skipped_list)
success_rate = (float(passed) / float(test_total)) * 100
print("passed:", passed)
print("failed:", len(failed_list))
# print("failed_list", failed_list)
print("broken:", len(broken_list))
# print("broken_list", broken_list)
print("skipped:", len(skipped_list))
# print("skipped_list", skipped_list)
print("test_total", test_total)
print("success_rate", success_rate)
if len(failed_list) > 0 or len(broken_list) > 0:
test_success = True
result_color = "red"
with open(inputfile, 'r') as f_in:
with open(outputfile, 'w') as f_out:
for line in f_in:
if "${test_success}" in line:
replaced_line = line.replace("${test_success}", pass_message if test_success else fail_message)
elif "${total_count}" in line:
replaced_line = line.replace("${total_count}", test_total.__str__())
elif "${passed_count}" in line:
replaced_line = line.replace("${passed_count}", passed.__str__())
elif "${failed_count}" in line:
replaced_line = line.replace("${failed_count}", len(failed_list).__str__())
elif "${skipped_count}" in line:
replaced_line = line.replace("${skipped_count}", len(skipped_list).__str__())
elif "${broken_count}" in line:
replaced_line = line.replace("${broken_count}", len(broken_list).__str__())
elif "${success_rate}" in line:
replaced_line = line.replace("${success_rate}", "%.2f" % (success_rate))
else:
for key,val in replace_map.items():
key = "${"+key+"}"
if key in line:
replaced_line = line.replace(key, val)
break
else:
replaced_line = line
f_out.write(line.replace(line, replaced_line))
if __name__ == "__main__":
main(sys.argv[1:]) |
# -*- coding: utf-8 -*-
"""
===============================================
Project Name:
Working with Python
-----------------------------------------------
Developer:
Operate:--Orion Analysis Team--
Program:--Vector Data Analysis Team--
...............................................
Author(Analyst):朱立松--Mr.Zhu--
The Chief of Teams
===============================================
"""
import pandas as pd
import numpy as np
import os
alist = []
with open('E:/the_data/panduanjieguo.txt',"a+") as log_writter:
for name in os.listdir('E:/the_data/the_all_data/thedata'):
domain = os.path.abspath(r'E:/the_data/the_all_data/thedata') #获取文件夹的路径
info = os.path.join(domain,name) #将路径与文件名结合起来就是每个文件的完整路径
data = pd.read_csv(info) #读取csv数据文件
right_turn_signals = data[data['right_turn_signals'].isin([0])]
left_turn_signals = data[data['left_turn_signals'].isin([0])]
hand_brake = data[data['hand_brake'].isin([0])]
foot_brake = data[data['foot_brake'].isin([0])]
device_num = data['device_num']
if len(right_turn_signals) ==len(device_num):
print('{}的right_turn_signals中没有非0项''\n'.format(name))
log_writter.write(str('{}的right_turn_signals中没有非0项''\n'.format(name)))
else:
print('{}的right_turn_signals中含有非0项''\n'.format(name))
log_writter.write(str('{}的right_turn_signals中含有非0项''\n'.format(name)))
if len(left_turn_signals) ==len(device_num):
print('{}的left_turn_signals中没有非0项''\n'.format(name))
log_writter.write(str('{}的left_turn_signals中没有非0项''\n'.format(name)))
else:
print('{}的left_turn_signals中含有有非0项''\n'.format(name))
log_writter.write(str('{}的left_turn_signals中含有有非0项''\n'.format(name)))
if len(hand_brake) ==len(device_num):
print('{}的hand_brake中没有非0项''\n'.format(name))
log_writter.write(str('{}的hand_brake中没有非0项''\n'.format(name)))
else:
print('{}的hand_brake中含有非0项''\n'.format(name))
log_writter.write(str('{}的hand_brake中含有非0项''\n'.format(name)))
if len(foot_brake) ==len(device_num):
print('{}的foot_brake中没有非0项''\n'.format(name))
log_writter.write(str('{}的foot_brake中没有非0项''\n'.format(name)))
else:
print('{}的foot_brake中含有非0项''\n'.format(name))
log_writter.write(str('{}的foot_brake中含有非0项''\n'.format(name)))
|
from operations.indices import ClickhouseIndices
from operations.internal_transactions import ClickhouseInternalTransactions
from operations.blocks import ClickhouseBlocks
from operations.contract_transactions import ClickhouseContractTransactions
from operations.contracts import ClickhouseContracts
from operations.inputs import ClickhouseTransactionsInputs, ClickhouseEventsInputs
from operations.events import ClickhouseEvents
from operations.token_holders import ClickhouseTokenHolders
from operations.token_prices import ClickhouseTokenPrices
from operations.contract_methods import ClickhouseContractMethods
from operations.bancor_trades import ClickhouseBancorTrades
from time import sleep
import os
from utils import repeat_on_exception
def prepare_indices():
"""
Prepare tables in database
"""
print("Preparing indices...")
indices = ClickhouseIndices()
indices.prepare_indices()
def prepare_blocks():
"""
Extract blocks with timestamps
"""
print("Preparing blocks...")
blocks = ClickhouseBlocks()
blocks.create_blocks()
def prepare_contracts_view():
"""
Prepare material view with contracts extracted from transactions table
"""
print("Preparing contracts view...")
contract_transactions = ClickhouseContractTransactions()
contract_transactions.extract_contract_addresses()
def extract_traces():
"""
Extract internal transactions
"""
print("Extracting internal transactions...")
internal_transactions = ClickhouseInternalTransactions()
internal_transactions.extract_traces()
def extract_contracts_abi():
"""
Extract ABI description from etherscan.io
Works only for contracts specified in config
"""
print("Extracting ABIs...")
contracts = ClickhouseContracts()
contracts.save_contracts_abi()
def extract_events():
"""
Extract events
"""
print("Extracting events...")
events = ClickhouseEvents()
events.extract_events()
def parse_transactions_inputs():
"""
Start input parsing for transactions.
The operation works only for contracts specified in config.
"""
print("Parsing transactions inputs...")
contracts = ClickhouseTransactionsInputs()
contracts.decode_inputs()
def parse_events_inputs():
"""
Start input parsing for events.
The operation works only for contracts specified in config
"""
print("Parsing events inputs...")
contracts = ClickhouseEventsInputs()
contracts.decode_inputs()
def extract_token_transactions():
"""
Prepare material view with erc20 transactions
extracted from transactions table.
"""
print("Preparing token transactions view...")
contracts = ClickhouseTokenHolders()
contracts.extract_token_transactions()
def extract_prices():
"""
Download exchange rates
Will extract token capitalization, ETH, BTC and USD prices
from cryptocompare.com and coinmarketcap.com
"""
print("Extracting prices...")
prices = ClickhouseTokenPrices()
prices.get_prices_within_interval()
def extract_tokens():
"""
Extract ERC20 token names, symbols, total supply and etc.
"""
print("Extracting tokens...")
tokens = ClickhouseContractMethods()
tokens.search_methods()
def prepare_bancor_trades():
"""
Prepare view with bancor trades
"""
print("Extracting trades...")
trades = ClickhouseBancorTrades()
trades.extract_trades()
def prepare_indices_and_views():
"""
Prepare all indices and views in database
"""
prepare_indices()
prepare_contracts_view()
extract_token_transactions()
def _fill_database():
prepare_blocks()
extract_traces()
extract_events()
extract_tokens()
def synchronize():
"""
Run partial synchronization of the database.
Will extract only new blocks, internal transactions, events and token descriptions
"""
prepare_indices()
_fill_database()
sleep(10)
def synchronize_full():
"""
Run full synchronization of the database
"""
prepare_indices()
_fill_database()
extract_contracts_abi()
parse_transactions_inputs()
parse_events_inputs()
extract_prices()
sleep(10)
def run_tests():
"""
Run tests
"""
os.system("nosetests --nologcapture -v .")
|
from lettuce import step, world
# Choose which browser to use
@step(r'am using (?:Z|z)ope')
def using_zope(step):
world.browser = world.zope
@step(r'am using (?:C|c)hrome')
def using_chrome(step):
world.browser = world.chrome
@step(r'am using (?:F|f)irefox')
def using_firefox(step):
world.browser = world.firefox
|
from static import StaticInfo
def choose_period():
period = None
while period not in StaticInfo.periods:
period = input('give period: {h for help list}')
if period == 'h':
print(StaticInfo.periods)
elif period not in StaticInfo.periods:
print('wrong period: h to get list of periods')
else:
continue
return period
def choose_interval():
interval = None
while interval not in StaticInfo.intervals:
interval = input('give interval: {h for help list}')
if interval == 'h':
print(StaticInfo.intervals)
elif interval not in StaticInfo.intervals:
print('wrong interval: h to get list of intervals')
else:
continue
return interval
|
import pymysql,time
from config import Config
from dal.base_dal import mysql
class Report:
'''举报投诉管理类'''
def add_report(self,imgs,reportInfo):
try:
if imgs: # 如果有上传图片
img_ids = []
for i in imgs:
with mysql() as cursor:
sql = "INSERT INTO attach(filename) VALUES (%s)"
cursor.executemany(sql, (i,))
img_ids.append(str(cursor.lastrowid))
reportInfo.append(','.join(img_ids))
else:
reportInfo.append('')
with mysql() as cursor:
sql = "INSERT INTO report(title,content,informer,informerPhone,informerAddress,Attach) VALUES (%s, %s, %s, %s, %s, %s)"
cursor.execute(sql, reportInfo)
return 200, '举报已提交'
except Exception as ex:
return 500,'举报提交失败'
def list_report(self,index,rows):
with mysql() as cursor:
sql = 'SELECT * FROM report order by ser desc limit %s,%s'
cursor.execute(sql, (index,rows))
result = cursor.fetchall()
sql = 'SELECT count(1) as count FROM report'
cursor.execute(sql)
count = cursor.fetchone()
# 获取查询结果
return count,result
def reply_report(self,ser):
with mysql() as cursor:
sql = 'update report set IsReply = True,ReplyTime = %s where ser = %s'
curtime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
return cursor.execute(sql, (curtime,ser))
def get_report(self,ser):
with mysql() as cursor:
sql = 'SELECT * FROM report where ser = %s'
cursor.execute(sql,(ser,))
result = cursor.fetchone()
# 获取查询结果
return result
if __name__ == '__main__':
r = Report()
# print(r.list_report(0,2))
print(r.get_report(2))
print(r.reply_report(2))
print(r.get_report(2)) |
#Date: 26-07-18
#Author: A.Suraj Kumar
#Roll Number: 181046037
#Assignment 10
#Python Program to Count the Number of Digits in a Number.
n=int(input("Enter your number:"))
count=0
while(n>0):
count=count+1
n=n//10
print(count)
|
import tensorflow as tf
def var_mask(var, cur_var, axis, ker_f):
prune_nodes = get_nodes_to_prune(cur_var, axis, ker_f)
keep_nodes = tf.logical_not(prune_nodes)
prune_mask = get_prune_mask(cur_var, keep_nodes, 1 - axis)
prune_sum = tf.multiply(cur_var, prune_mask)
return get_var_from_sum(var, prune_sum)
def gradient_mask(gradients, variables):
gzt = []
L = len(gradients)
for i in range(L):
if "bias" in variables[i].name:
gzt.append(gradients[i])
else:
gzt.append(tf.where(tf.abs(variables[i]) == 0.0,
tf.zeros_like(gradients[i]), gradients[i]))
return gzt
def get_prune_mask(var, keep_nodes, axis):
shape = var.shape.as_list()
shape[1 - axis] = 1
return tf.cast(
tf.broadcast_to(tf.reshape(keep_nodes, shape), var.shape),
var.dtype)
def get_var_from_sum(var, prune_sum):
if len(var.shape) <= 2:
return tf.where(prune_sum == 0, tf.zeros_like(var), var)
broad = tf.broadcast_to(
tf.reshape(
prune_sum,
tf.TensorShape([1, 1]).concatenate(prune_sum.shape)),
var.shape)
return tf.where(broad == 0, tf.zeros_like(var), var)
def get_nodes_to_keep(var, axis_shape, axis, ker_f):
threshold = 0.1
return tf.greater(
tf.reduce_sum(tf.abs(tf.tanh(50 * var)),
axis=(axis if axis <= 1 else [0, 1, axis])),
tf.minimum(10, threshold * axis_shape * ker_f))
def get_dynamic_shape(t, axis):
return tf.reduce_sum(tf.ones_like(tf.gather(t, 0, axis=1-axis)))
|
N = int(input())
S = input()
move = {
"R": (1, 0),
"L": (-1, 0),
"U": (0, 1),
"D": (0, -1),
}
cur = (0, 0)
visited = set()
visited.add(cur)
for c in S:
dx, dy = move[c]
cur = (cur[0]+dx, cur[1]+dy)
if cur in visited:
print("Yes")
exit()
visited.add(cur)
print("No")
|
def atoi(str):
value = 0
for i in range(len(str)):
c = str[i]
# 0~9
if c >= '0' and c <= '9':
digit = ord(c) - ord('0')
else:
break
value = value * 10 + digit
return value
a = '123'
# a = [1, 2, 3]
print(type(a))
b = atoi(a)
print(b, type(b)) |
class Restaurant:
def __init__(self, restaurant_name, cuisine_type):
self.restaurant_name = restaurant_name
self.cuisine_type = cuisine_type
def describe_restaurant(self):
print(f"The restaurant {self.restaurant_name} serves {self.cuisine_type} dishes")
def open_restaurant(self):
print(f"{self.restaurant_name} is now open!")
class IceCreamStand(Restaurant):
def __init__(self, restaurant_name, cuisine_type):
super().__init__(restaurant_name, cuisine_type)
self.flavors = ["Vanilla", "Chocolate", "Pistacho"]
def diplayFlavors(self):
print("These are the restaurant flavors:")
for flavor in self.flavors:
print(f"\t {flavor}")
iceRisto = IceCreamStand("Geloh", "iceCreams")
iceRisto.describe_restaurant()
iceRisto.diplayFlavors() |
from binary_trees.binary_search_tree import *
def height(root):
if not root:
return 0
left = height(root.left) + 1
right = height(root.right) + 1
return max(left, right)
if __name__ == "__main__":
bst = BinarySearchTree()
tree_nodes = [20, 8, 4, 12, 10, 14, 22, 25]
"""
Tree representation of above numbers
20
/ \
8 22
/ \ \
4 12 25
/ \
10 14
in-order traversal - 4, 8, 10, 12, 14, 20, 22, 25
"""
for node_data in tree_nodes:
bst.root = bst.insert(bst.root, node_data)
print(height(bst.root))
|
from django.conf.urls import url
from shop import views as application
urlpatterns = [
url(r'^$', application.main, name='index'),
url(r'^auth', application.auth, name='charges'),
]
|
try:
execfile = execfile
except NameError:
def execfile(filename, globals=None, locals=None):
code = compile(open(filename).read(), filename, 'exec')
exec(code, globals, locals)
try:
reduce = reduce
except NameError:
from functools import reduce
|
import code, codechef_solution
import requests, conf
from models import URL
with requests.Session() as s:
code.codechef_login(s, URL.BASE)
# Uncomment this line, if you want to fetch the rating
#code.get_rating(s, conf.handle)
codechef_solution.codechef_download(s, conf.handle)
code.codechef_logout(s, URL.BASE + URL.LOGOUT) |
import datetime
from asyncio.events import AbstractEventLoop
from typing import Generator, List, Union
from ..exceptions import VoyagerException
from .base import BaseResource
__all__ = [
'FireballResource',
]
class FireballRecord(object):
__slots__ = [
'_fc',
'_date',
'_lat',
'_lon',
'_lat_dir',
'_lon_dir',
'_alt',
'_vel',
'_energy',
'_impact_e',
'_vx',
'_vy',
'_vz',
]
_FIELDS = [
'date',
'lat',
'lon',
'lat-dir',
'lon-dir',
'alt',
'vel',
'energy',
'impact-e',
'vx',
'vy',
'vz',
]
_cache = {}
def __init__(self, data: List[str], fields: List[str]) -> None:
self._fc = self._FIELDS.copy()
for field, value in zip(fields, data):
setattr(self, f"_{field.replace('-', '_')}", value)
self._FIELDS.remove(field)
for unset in self._FIELDS:
setattr(self, f"_{unset.replace('-', '_')}", None)
def __len__(self) -> int:
if (l := "len") not in self._cache:
self._cache[l] = len(self._fc) - len(self._FIELDS)
del self._FIELDS
return self._cache[l]
@property
def date(self) -> Union[str, None]:
return self._date
@property
def datetime(self) -> Union[datetime.datetime, None]:
if not self._date:
return None
return datetime.datetime.strptime(self._date, "%Y-%m-%d %H:%M:%S")
@property
def lat(self) -> Union[float, None]:
if not self._lat:
return None
return float(self._lat)
@property
def latitude(self) -> Union[float, None]:
return self.lat
@property
def lon(self) -> Union[float, None]:
if not self._lon:
return None
return float(self._lon)
@property
def longitude(self) -> Union[float, None]:
return self.lon
@property
def lat_dir(self) -> Union[str, None]:
return self._lat_dir
@property
def latitude_dir(self) -> Union[str, None]:
return self.lat_dir
@property
def lon_dir(self) -> Union[str, None]:
return self._lon_dir
@property
def longitude_dir(self) -> Union[str, None]:
return self.lon_dir
@property
def alt(self) -> Union[float, None]:
if not self._alt:
return None
return float(self._alt)
@property
def altitude(self) -> Union[float, None]:
return self.alt
@property
def vel(self) -> Union[float, None]:
if not self._vel:
return None
return float(self._vel)
@property
def velocity(self) -> Union[float, None]:
return self.vel
@property
def energy(self) -> Union[float, None]:
if not self._energy:
return None
return float(self._energy)
@property
def impact_e(self) -> Union[float, None]:
if not self._impact_e:
return None
return float(self._impact_e)
@property
def impact_energy(self) -> Union[float, None]:
return self.impact_e
@property
def vx(self) -> Union[float, None]:
if not self._vx:
return None
return float(self._vx)
@property
def velocity_x(self) -> Union[float, None]:
return self.vx
@property
def vy(self) -> Union[float, None]:
if not self._vy:
return None
return float(self._vy)
@property
def velocity_y(self) -> Union[float, None]:
return self.vy
@property
def vz(self) -> Union[float, None]:
if not self._vz:
return None
return self._vz
@property
def velocity_z(self) -> Union[float, None]:
return self.vz
def _process_dict(self) -> dict:
return {field: getattr(self, f"_{field.replace('-', '_')}") for field in self._fc}
@property
def to_dict(self) -> dict:
if self not in self._cache:
self._cache[self] = self._process_dict()
return self._cache[self]
@classmethod
def from_dict(cls, data: dict) -> "FireballRecord":
if not all((key in cls._FIELDS for key in data)):
raise VoyagerException("Malformed input. Invalid key(s) supplied")
return cls([value for value in data.values()], [key for key in data])
class FireballResource(BaseResource):
__slots__ = [
'_signature',
'_count',
'_fields',
'_data',
]
_cache = {}
def __init__(self, data: dict,
loop: AbstractEventLoop = None) -> None:
super(FireballResource, self).__init__(data, loop=loop)
self._signature = data.get("signature")
self._count = data.get("count")
self._fields = data.get("fields")
self._data = data
def __len__(self) -> int:
return self.count
def __iter__(self):
return self
def __next__(self):
for fb in self.data:
yield fb
@property
def signature(self) -> str:
return self._signature
@property
def source(self) -> str:
return self._signature.get("source")
@property
def version(self) -> str:
return self._signature.get("version")
@property
def count(self) -> int:
return int(self._count)
@property
def fields(self) -> List[str]:
return self._fields
def _process_fb_data(self) -> Union[Generator[FireballRecord, None, None], FireballRecord, None]:
if not (fb := self._data.get("data")):
return None
elif len(fb) != 1:
for values in fb:
yield FireballRecord(values, self._fields)
else:
return FireballRecord(fb[0], self._fields)
@property
def data(self) -> Union[Generator[FireballRecord, None, None], FireballRecord, None]:
if self not in self._cache:
self._cache[self] = self._process_fb_data()
return self._cache[self]
@property
def to_dict(self) -> dict:
return self._data
@classmethod
def from_dict(cls, data: dict,
loop: AbstractEventLoop = None) -> "FireballResource":
return cls(data, loop=loop)
|
class Credential:
#mysql credentials
mysql_host = "localhost"
mysql_user = "root"
mysql_password = "12345"
|
# coding:utf-8
import os
import gc
import numpy as np
import pandas as pd
from keras.layers import *
from keras.models import Model
from keras.utils import Sequence
from keras.optimizers import Adam
from matplotlib import pyplot as plt
from keras.initializers import he_normal
from sklearn.model_selection import KFold
from scikitplot.metrics import plot_confusion_matrix
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import EarlyStopping, ReduceLROnPlateau
np.random.seed(7)
pd.set_option("max_rows", None)
pd.set_option("max_columns", None)
def residual(input_tensor, filters, strides, flag):
x = ZeroPadding2D(padding=1, data_format="channels_last")(input_tensor)
x = Conv2D(
filters=filters,
kernel_size=3,
strides=strides,
data_format="channels_last",
kernel_initializer=he_normal(7))(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = ZeroPadding2D(padding=1, data_format="channels_last")(x)
x = Conv2D(
filters=filters,
kernel_size=3,
data_format="channels_last",
kernel_initializer=he_normal(7))(x)
x = BatchNormalization()(x)
if flag:
input_tensor = Conv2D(
filters=filters,
kernel_size=1,
strides=strides,
data_format="channels_last",
kernel_initializer=he_normal(7))(input_tensor)
return Activation("relu")(Add()([x, input_tensor]))
def residual_net(output="binary"):
# input layer
input_layer = Input(shape=(28, 28, 1))
x = ZeroPadding2D(padding=1, data_format="channels_last")(input_layer)
x = Conv2D(
filters=64,
kernel_size=3,
data_format="channels_last",
kernel_initializer=he_normal(7))(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
# residual block 1
x = residual(input_tensor=x, filters=64, strides=1, flag=False)
x = residual(input_tensor=x, filters=64, strides=1, flag=False)
# residual block 2
x = residual(input_tensor=x, filters=128, strides=2, flag=True)
x = residual(input_tensor=x, filters=128, strides=1, flag=False)
# residual block 3
x = residual(input_tensor=x, filters=256, strides=2, flag=True)
x = residual(input_tensor=x, filters=256, strides=1, flag=False)
# output layer
if output == "binary":
x = GlobalAveragePooling2D()(x)
output_layer = Dense(units=1, activation="sigmoid", kernel_initializer=he_normal(7))(x)
else:
x = GlobalAveragePooling2D()(x)
output_layer = Dense(units=10, activation="softmax", kernel_initializer=he_normal(7))(x)
return Model(inputs=input_layer, outputs=output_layer)
class FitGenerator(Sequence):
def __init__(self, feature, label, batch_size, image_augment):
self.__index = np.arange(feature.shape[0])
self.__feature, self.__label = feature, label
self.__batch_size, self.__image_augment = batch_size, image_augment
def __len__(self):
return self.__feature.shape[0] // self.__batch_size
def __getitem__(self, idx):
index = self.__index[idx * self.__batch_size: (idx + 1) * self.__batch_size]
batch_feature, batch_label = (
np.array([image / 255 for image in self.__feature[index]]), self.__label[index])
if self.__image_augment is not None:
batch_feature, batch_label = (
next(self.__image_augment.flow(np.array(batch_feature), batch_label, batch_size=self.__batch_size)))
return batch_feature, batch_label
def on_epoch_end(self):
np.random.shuffle(self.__index)
class PredictGenerator(Sequence):
def __init__(self, feature):
self.__index = np.arange(feature.shape[0])
self.__feature = feature
def __len__(self):
return self.__feature.shape[0]
def __getitem__(self, idx):
index = self.__index[idx: (idx + 1)]
batch_feature = np.array([image / 255 for image in self.__feature[index]])
return batch_feature
class ResNet(object):
def __init__(self, *, path):
self.__path = path
self.__train, self.__test = [None for _ in range(2)]
self.__train_feature, self.__test_feature = [None for _ in range(2)]
self.__train_label, self.__test_index = [None for _ in range(2)]
self.__folds = None
self.__sub_preds = None
self.__sub_mixed = None
self.__val_preds = None
self.__val_mixed = None
self.__image_data_generator = None
self.__res_net = None
def data_read(self):
self.__train = pd.read_csv(os.path.join(self.__path, "train.csv"))
self.__test = pd.read_csv(os.path.join(self.__path, "test.csv"))
def data_prepare(self):
self.__train_feature, self.__train_label = (
self.__train.iloc[:, 1:].copy(deep=True), self.__train.iloc[:, 0].copy(deep=True))
self.__test_feature, self.__test_index = (
self.__test.iloc[:, 1:].copy(deep=True), self.__test.iloc[:, [0]].copy(deep=True))
del self.__train, self.__test
gc.collect()
self.__train_feature, self.__train_label = self.__train_feature.to_numpy(), self.__train_label.to_numpy()
self.__test_feature = self.__test_feature.to_numpy()
self.__train_feature = self.__train_feature.reshape((-1, 28, 28, 1))
self.__test_feature = self.__test_feature.reshape((-1, 28, 28, 1))
self.__image_data_generator = ImageDataGenerator(
rotation_range=15,
zoom_range=0.1,
width_shift_range=0.1,
height_shift_range=0.1,
data_format="channels_last"
)
def model_fit_predict(self):
self.__folds = KFold(n_splits=5, shuffle=True, random_state=7)
self.__sub_preds = np.zeros(shape=(self.__test_feature.shape[0], 10))
self.__val_preds = np.zeros(shape=(self.__train_feature.shape[0], 10))
# 1. network
for n_fold, (trn_idx, val_idx) in enumerate(self.__folds.split(
X=self.__train_feature, y=self.__train_label)):
print("Fold: " + str(n_fold))
trn_x = np.copy(self.__train_feature[trn_idx])
val_x = np.copy(self.__train_feature[val_idx])
tes_x = np.copy(self.__test_feature)
trn_y = np.copy(self.__train_label[trn_idx])
val_y = np.copy(self.__train_label[val_idx])
self.__res_net = residual_net(output="softmax")
self.__res_net.compile(optimizer=Adam(), loss="sparse_categorical_crossentropy", metrics=["accuracy"])
self.__res_net.fit_generator(
generator=FitGenerator(trn_x, trn_y, 256, self.__image_data_generator),
steps_per_epoch=trn_x.shape[0] // 256,
epochs=60,
verbose=1,
callbacks=[
ReduceLROnPlateau(
patience=3
),
EarlyStopping(
patience=6,
restore_best_weights=True
)
],
validation_data=FitGenerator(val_x, val_y, 256, None),
validation_steps=val_x.shape[0] // 256,
workers=1,
use_multiprocessing=False
)
self.__sub_preds += self.__res_net.predict_generator(
generator=PredictGenerator(tes_x),
steps=tes_x.shape[0],
workers=1,
use_multiprocessing=False) / self.__folds.n_splits
self.__val_preds[val_idx, :] = self.__res_net.predict_generator(
generator=PredictGenerator(val_x),
steps=val_x.shape[0],
workers=1,
use_multiprocessing=False)
# 2. network
tra_index = np.where(np.logical_or(
self.__train_label == 0,
self.__train_label == 1
))[0].tolist()
tes_index = np.where(np.logical_or(
np.argmax(self.__sub_preds, axis=1) == 0,
np.argmax(self.__sub_preds, axis=1) == 1
))[0].tolist()
self.__folds = KFold(n_splits=5, shuffle=True, random_state=7)
self.__sub_mixed = np.zeros(shape=(self.__test_feature[tes_index].shape[0], ))
self.__val_mixed = np.zeros(shape=(self.__train_feature[tra_index].shape[0],))
for n_fold, (trn_idx, val_idx) in enumerate(self.__folds.split(
X=self.__train_feature[tra_index], y=self.__train_label[tra_index])):
print("Fold: " + str(n_fold))
trn_x = np.copy(self.__train_feature[tra_index][trn_idx])
val_x = np.copy(self.__train_feature[tra_index][val_idx])
tes_x = np.copy(self.__test_feature[tes_index])
trn_y = np.copy(self.__train_label[tra_index][trn_idx])
val_y = np.copy(self.__train_label[tra_index][val_idx])
self.__res_net = residual_net(output="binary")
self.__res_net.compile(optimizer=Adam(), loss="binary_crossentropy", metrics=["accuracy"])
self.__res_net.fit_generator(
generator=FitGenerator(trn_x, trn_y, 256, self.__image_data_generator),
steps_per_epoch=trn_x.shape[0] // 256,
epochs=60,
verbose=1,
callbacks=[
ReduceLROnPlateau(
patience=3
),
EarlyStopping(
patience=6,
restore_best_weights=True
)
],
validation_data=FitGenerator(val_x, val_y, 256, None),
validation_steps=val_x.shape[0] // 256,
workers=1,
use_multiprocessing=False
)
self.__sub_mixed += self.__res_net.predict_generator(
generator=PredictGenerator(tes_x),
steps=tes_x.shape[0],
workers=1,
use_multiprocessing=False).reshape(-1, ) / self.__folds.n_splits
self.__val_mixed[val_idx] = self.__res_net.predict_generator(
generator=PredictGenerator(val_x),
steps=val_x.shape[0],
workers=1,
use_multiprocessing=False).reshape(-1, )
self.__sub_preds = np.argmax(self.__sub_preds, axis=1)
self.__sub_preds[tes_index] = np.where(self.__sub_mixed > 0.5, 1, 0)
self.__val_preds = np.argmax(self.__val_preds, axis=1)
self.__val_preds[tra_index] = np.where(self.__val_mixed > 0.5, 1, 0)
def data_write(self):
self.__test_index["label"] = self.__sub_preds
self.__test_index.to_csv("submission.csv", index=False)
def plot_image(self):
plot_confusion_matrix(self.__train_label, self.__val_preds, normalize=True)
plt.show()
if __name__ == "__main__":
rn = ResNet(path="D:\\Kaggle\\Kannada_MNIST")
rn.data_read()
rn.data_prepare()
rn.model_fit_predict()
rn.data_write()
|
#!/usr/bin/env python
print "\n================================================"
print " Serial Communication"
print "================================================"
from sys import stdout
from time import sleep
from Tkinter import *
'''try:
import serial
except:
print "Could not import serial!"
try:
usbport = 'COM2'
ser = serial.Serial(usbport, 9600, timeout=1)
except:
print "Cannot read " + usbport
SystemExit
quit()'''
root = Tk()
root.minsize(300,300)
root.geometry("650x450")
var = StringVar()
label = Label(root,fg="white", textvariable = var, anchor=NW, width=100, height=20,bg="black")
label.grid(padx=0,pady=0)
var.set("Hey!? How are you doing?")
entry = Entry(root, width=70)
entry.grid(padx=1,pady=6)
def onok():
#var.set(label.cget("text") + "\n" + entry.get())
var.set(entry.get())
#root.update_idletasks()
Button(root, text='OK', command=onok).grid(padx =12,pady=10)
root.mainloop()
'''while(1==1):
y = ser.read()
while(y != '~'):
ser.write("~")
ser.write(entry.get())
ser.write("~")
#print y
if(y == '~'):
x = ser.read()
while(x != '~'):
var.set(x)
var.set("\n")'''
|
# py37
#
# Partially automate generation of meeting YAMLs for CUAMS website
#
# If you're just looking to generate the updated file, change the contents of
# the shows array on the line starting "shows =".
#
# Resources:
# https://www.crummy.com/software/BeautifulSoup/bs4/doc/
# https://realpython.com/python-f-strings/
# https://realpython.com/python-requests/
# https://anilist.gitbook.io/anilist-apiv2-docs/overview/graphql/pagination
# https://realpython.com/python-json/
#
import urllib
import requests
import bs4
import re
import os
import json
import time
import random
from collections import namedtuple
TYPES = ['Main', 'Sunday', 'Bonus']
Show = namedtuple('Show', ['type', 'schedule', 'anime_name'])
def getAnimeYamlBlock(anime_name, assets_dir_string, schedule, delay=3):
'''Generate YAML-format block for anime_name by querying MAL/ANN/AniList.
'''
def searchRequestWrapper(requests_f, delay, **kwargs):
'''Wrapper to retry on 429 error.
'''
max_attempts = 10
attempt_n = 0
search_response = requests_f(**kwargs)
while(True):
attempt_n += 1
if attempt_n > max_attempts:
raise Exception('Max attempts reached.')
print(f'Attempt {attempt_n} response: {search_response}')
if search_response.status_code == 200:
break
elif search_response.status_code == 429:
# If we get a retry-after, respect it, otherwise just double
try:
delay = int(search_response.headers['Retry-after'])
except TypeError:
delay = delay*2
print(f'Delaying for {delay}s ...')
time.sleep(delay)
else:
raise Exception('search_response.status_code', search_response.status_code)
return(search_response)
# anime_name = 'Kekkai Sensen & Beyond'
delay += random.uniform(-1, 1)
print(f'Delaying for {delay}s ...')
time.sleep(delay)
anime_name_encoded = urllib.parse.quote(anime_name)
#
# MAL
#
print('Querying MAL...')
search_url = f'https://myanimelist.net/anime.php?q={anime_name_encoded}'
search_response = searchRequestWrapper(requests.get, delay, url=search_url)
search_soup = bs4.BeautifulSoup(search_response.text, 'html.parser')
search_results = search_soup.find_all(id=re.compile('^sarea'))
print(f'Found {len(search_results)} results')
if len(search_results):
# NOTE: assumes first search result is correct
# TODO: query user to confirm whether result is correct
anime_url = search_results[0].get('href')
# Get anime info
print('Getting MAL anime info...')
anime_response = searchRequestWrapper(requests.get, delay, url=anime_url, headers = {'User-agent': 'CUAMS Scraper'})
anime_soup = bs4.BeautifulSoup(anime_response.text, 'html.parser')
anime_image_url = anime_soup.find(itemprop='image').get('src')
anime_image_basename = os.path.basename(urllib.parse.urlparse(anime_url).path)
anime_image_ext = os.path.splitext(urllib.parse.urlparse(anime_image_url).path)[1]
anime_image_filename = anime_image_basename + anime_image_ext
urllib.request.urlretrieve(anime_image_url, anime_image_filename)
print(f'Downloaded anime image: {anime_image_url}')
anime_episodes = anime_soup.find('span', string='Episodes:').next_sibling.strip()
anime_aired = anime_soup.find('span', string='Aired:').next_sibling.strip()
anime_year = anime_aired.split(' to ')[0].split(', ')[1]
anime_studios = ', '.join(t.string for t in anime_soup.find('span', string='Studios:').find_next_siblings('a'))
else:
anime_url = 'https://myanimelist.net/anime/UNKNOWN/UNKNOWN'
anime_image_filename = 'UNKNOWN'
anime_episodes = 'UNKNOWN'
anime_year = 'UNKNOWN'
anime_studios = 'UNKNOWN'
#
# ANN
#
print('Querying ANN...')
search_url = f'https://www.animenewsnetwork.com/encyclopedia/search/name?only=anime&q={anime_name_encoded}'
search_response = searchRequestWrapper(requests.get, delay, url=search_url)
search_soup = bs4.BeautifulSoup(search_response.text, 'html.parser')
search_results = search_soup.find(id='content-zone').find_all('a', href=re.compile('/encyclopedia/anime'))
print(f'Found {len(search_results)} results')
if len(search_results):
ANN_anime_url = urllib.parse.urljoin(
'https://www.animenewsnetwork.com/',
search_results[0].get('href')
)
else:
ANN_anime_url = 'https://www.animenewsnetwork.com/encyclopedia/anime.php?id=UNKNOWN'
#
# AniList API
#
print('Querying AniList...')
query = '''query ($id: Int, $page: Int, $perPage: Int, $search: String) { Page (page: $page, perPage: $perPage) { pageInfo { total currentPage lastPage hasNextPage perPage } media (id: $id, search: $search) { id title { romaji } } } } '''
variables = {'search': f'{anime_name}', 'page': 1, 'perPage': 5}
search_response = searchRequestWrapper(requests.post, delay, url='https://graphql.anilist.co', json={'query': query, 'variables': variables})
search_results = json.loads(search_response.text)['data']['Page']['media']
print(f'Found {len(search_results)} results')
if len(search_results):
AniList_anime_url = urllib.parse.urljoin(
'https://anilist.co/anime/',
str(search_results[0]['id'])
)
else:
AniList_anime_url = 'https://anilist.co/anime/UNKNOWN'
meetings_yaml_template = f''' - title: "{anime_name}"
image: "{os.path.join(assets_dir_string, anime_image_filename)}"
episodes: {anime_episodes}
year: {anime_year}
studio: "{anime_studios}"
schedule: {schedule}
info:
- title: AL
link: {AniList_anime_url}
- title: MAL
link: {anime_url}
- title: ANN
link: {ANN_anime_url}'''
return(meetings_yaml_template)
if __name__ == '__main__':
out_dir = './_output'
out_schedule_shows_file = 'schedule.shows_only.yml'
out_meetings_yaml_file = 'meetings.yml'
#
assets_dir_string = '/assets/images/anime/'
# NOTE: use MAL anime names to increase probability of success
shows = [
Show('Main', 'Michaelmas 2019', 'Hinamatsuri'),
Show('Main', 'Michaelmas 2019', 'SSSS. Gridman'),
Show('Main', 'Michaelmas 2019', 'ACCA: 13-ku Kansatsu-ka'),
Show('Main', 'Lent 2019', 'Humanity has Declined!'),
Show('Main', 'Lent/Easter 2019', 'Shouwa Genroku Rakugo Shinjuu'),
Show('Main', 'Lent 2019', 'Ookami to Koushinryou'),
Show('Main', 'Easter 2019', 'Angel Beats!'),
Show('Main', 'Easter 2019', 'Ookami to Koushinryou II'),
Show('Sunday', 'Michaelmas 2019', 'Girls und Panzer'),
Show('Sunday', 'Michaelmas 2019', 'Seishun Buta Yarou'),
Show('Sunday', 'Michaelmas 2019', 'Gakkou Gurashi'),
Show('Sunday', 'Lent 2019', 'Kekkai Sensen'),
Show('Sunday', 'Lent 2019', 'Plastic Memories'),
Show('Sunday', 'Easter 2019', 'Kekkai Sensen & Beyond'),
Show('Sunday', 'Easter 2019', 'Granbelm'),
Show('Bonus', 'Freshers\' Squash', 'Daicon Opening Animations'),
Show('Bonus', 'Freshers\' Squash', 'Dareka no Manazashi'),
Show('Bonus', 'Freshers\' Squash', 'Maquia'),
]
os.makedirs(out_dir, exist_ok=True)
os.chdir(out_dir)
with open(out_schedule_shows_file, 'w') as f:
f.write(' shows:\n')
for show in shows:
f.write(f' - "{show.anime_name}"\n')
blocks = []
try:
for show_type in TYPES:
blocks.append(f'- type: {show_type}')
for show in shows:
if show.type == show_type:
print(f'--- Processing {show.anime_name} ---')
block = getAnimeYamlBlock(show.anime_name, assets_dir_string, show.schedule, delay=5)
print(block)
blocks.append(block)
except KeyboardInterrupt:
print("Aborted, writing results to file")
with open(out_meetings_yaml_file, 'w') as f:
f.write('\n'.join(blocks))
print(f'Wrote blocks to {out_meetings_yaml_file}')
|
#!C:/Python27/ArcGIS10.2/python.exe
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import sys
import psycopg2
import time
reload(sys)
sys.setdefaultencoding('utf8')
if not os.environ.get("DJANGO_SETTINGS_MODULE"):
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ResultManage.settings")
import django
django.setup()
from file.models import ResultFile
def conn_database(file_midir):
conn = psycopg2.connect(dbname=u"resmanageV0.1",
user=u"postgres",
password=u"Lantucx2018",
host=u"localhost",
port=u"5432")
cur = conn.cursor()
ergodic(file_midir, cur)
conn.commit()
conn.close()
def ergodic(file_midir, cur):
file_list_01 = os.listdir(file_midir)
for file in file_list_01:
path = file_midir + "/" + file
if os.path.isdir(path):
ergodic(path, cur)
elif os.path.isfile(path):
# SQL = "INSERT INTO file_resultfile (filepath) VALUES ('%s')" % (path)
ResultFile.objects.create(filepath=path)
# cur.execute(SQL)
print "文件:%s" % path
else:
# SQL = "INSERT INTO file_resultfile (filepath) VALUES ('%s')" % (path)
ResultFile.objects.create(filepath=path)
# print SQL
# cur.execute(SQL)
print "这是个神秘的文件:%s" % path
def os_walk(path):
for maindir, subdir_list, file_name_list in os.walk(path):
print "maiddir: %s" % maindir
print "subdir_list: %s" % subdir_list
dirlength = len(maindir)
maindir_list = maindir.split("\\")
dirdepth = len(maindir_list) - 3
ResultFile.objects.create(filepath=maindir, dirlength=dirlength, dirdepth=dirdepth)
for file_name in file_name_list:
file_path = os.path.join(maindir, file_name)
dirlength = len(file_path)
file_path_list = file_path.split("\\")
dirdepth = len(file_path_list) - 3
ResultFile.objects.create(filepath=file_path,dirlength=dirlength,dirdepth=dirdepth)
if __name__ == '__main__':
# file_midir = "D:/PycharmProjects"
# start_time = time.time()
# file_midir = "\\\\192.168.3.120\\新建文件夹"
# # conn_database(file_midir)
# end_time = time.time()
# total_time = end_time - start_time
# print total_time
# temp = r"\\192.168.3.120\新建文件夹\120转180所需安装包\apache\httpd-2.4.6-win32-VC9\Apache24"
# temp = temp.replace("\\", '/')
# print temp
# temp_list = temp.split("\\")
# print temp_list
os_walk(r'\\192.168.3.120\新建文件夹')
|
import logging
from collections import namedtuple
from datetime import datetime
from io import BytesIO
from zope.interface import alsoProvides
import xlsxwriter
from eea.cache import cache
from plone import api
from plone.api import portal
from plone.api.content import get_state, transition
from plone.api.portal import get_tool
from plone.dexterity.utils import createContentInContainer as create
from Products.CMFCore.utils import getToolByName
from Products.CMFDynamicViewFTI.interfaces import ISelectableBrowserDefault
from Products.CMFPlacefulWorkflow.WorkflowPolicyConfig import \
WorkflowPolicyConfig
from Products.Five.browser import BrowserView
from Products.statusmessages.interfaces import IStatusMessage
from wise.msfd import db, sql2018
from wise.msfd.compliance.assessment import AssessmentDataMixin
from wise.msfd.compliance.interfaces import (INationalDescriptorAssessment,
INationalDescriptorAssessmentSecondary)
from wise.msfd.compliance.vocabulary import (get_regions_for_country,
REGIONAL_DESCRIPTORS_REGIONS)
from wise.msfd.compliance.regionaldescriptors.base import COUNTRY
from wise.msfd.gescomponents import (get_all_descriptors, get_descriptor,
get_marine_units)
from wise.msfd.labels import get_indicator_labels
from wise.msfd.translation import Translation, get_detected_lang
from wise.msfd.translation.interfaces import ITranslationsStorage
from . import interfaces
from .base import (_get_secondary_articles, BaseComplianceView,
NAT_DESC_QUESTIONS, REG_DESC_QUESTIONS,
report_data_cache_key)
logger = logging.getLogger('wise.msfd')
CONTRIBUTOR_GROUP_ID = 'extranet-wisemarine-msfd-tl'
REVIEWER_GROUP_ID = 'extranet-wisemarine-msfd-reviewers'
EDITOR_GROUP_ID = 'extranet-wisemarine-msfd-editors'
def get_wf_state_id(context):
state = get_state(context)
wftool = get_tool('portal_workflow')
wf = wftool.getWorkflowsFor(context)[0] # assumes one wf
wf_state = wf.states[state]
wf_state_id = wf_state.id or state
return wf_state_id
class ToPDB(BrowserView):
def __call__(self):
import pdb
pdb.set_trace()
return 'ok'
class BootstrapCompliance(BrowserView):
""" Bootstrap the compliance module by creating all needed country folders
"""
@property
def debug(self):
return 'production' not in self.request.form
@db.use_db_session('2018')
def _get_countries(self):
""" Get a list of (code, name) countries
"""
count, res = db.get_all_records(
sql2018.LCountry
)
countries = [(x.Code, x.Country) for x in res]
if self.debug:
countries = [x for x in countries if x[0] in ('LV', 'NL', 'DE')]
return countries
@db.use_db_session('2018')
def _get_countries_names(self, country_codes):
result = []
all_countries = self._get_countries()
for code in country_codes:
result.extend([x for x in all_countries if x[0] == code])
return result
def _get_descriptors(self):
""" Get a list of (code, description) descriptors
"""
descriptors = get_all_descriptors()
debug_descriptors = ('D1.1', 'D4', 'D5', 'D6')
if self.debug:
descriptors = [x for x in descriptors if x[0] in debug_descriptors]
return descriptors
@db.use_db_session('2018')
def _get_articles(self):
# articles = db.get_unique_from_mapper(
# sql2018.LMSFDArticle,
# 'MSFDArticle'
# )
# return articles
return ['Art8', 'Art9', 'Art10']
def set_layout(self, obj, name):
ISelectableBrowserDefault(obj).setLayout(name)
def set_policy(self, context, name):
logger.info("Set placeful workflow policy for %s", context.getId())
config = WorkflowPolicyConfig(
workflow_policy_in='compliance_section_policy',
workflow_policy_below='compliance_section_policy',
)
context._setObject(config.id, config)
@db.use_db_session('2018')
def get_country_regions(self, country_code):
regions = get_regions_for_country(country_code)
return regions
def get_group(self, code):
if '.' in code:
code = 'd1'
code = code.lower()
return "{}-{}".format(CONTRIBUTOR_GROUP_ID, code)
def create_comments_folder(self, content):
for id, title, trans in [
(u'tl', 'Discussion track with Topic Leads', 'open_for_tl'),
(u'ec', 'Discussion track with EC', 'open_for_ec'),
]:
if id not in content.contentIds():
dt = create(content,
'wise.msfd.commentsfolder',
id=id,
title=title)
transition(obj=dt, transition=trans)
def make_country(self, parent, country_code, name):
if country_code.lower() in parent.contentIds():
cf = parent[country_code.lower()]
else:
cf = create(parent,
'wise.msfd.countrydescriptorsfolder',
title=name,
id=country_code)
for regid, region in self.get_country_regions(country_code):
if regid.lower() in cf.contentIds():
reg = cf[regid.lower()]
else:
reg = create(cf,
'Folder',
title=region,
id=regid.lower())
alsoProvides(reg, interfaces.INationalRegionDescriptorFolder)
self.set_layout(reg, '@@nat-desc-reg-view')
for desc_code, description in self._get_descriptors():
if desc_code.lower() in reg.contentIds():
df = reg[desc_code.lower()]
else:
df = create(reg, 'Folder', title=description, id=desc_code)
alsoProvides(df, interfaces.IDescriptorFolder)
for art in self._get_articles():
if art.lower() in df.contentIds():
nda = df[art.lower()]
else:
nda = create(df,
'wise.msfd.nationaldescriptorassessment',
title=art)
lr = nda.__ac_local_roles__
group = self.get_group(desc_code)
lr[group] = ['Contributor']
logger.info("Created NationalDescriptorAssessment %s",
nda.absolute_url())
self.set_layout(nda, '@@nat-desc-art-view')
self.create_comments_folder(nda)
return cf
def make_region(self, parent, region):
code, name = region.code.lower(), region.title
if code.lower() in parent.contentIds():
rf = parent[code.lower()]
else:
rf = create(parent,
'wise.msfd.regiondescriptorsfolder',
title=name,
id=code)
rf._subregions = region.subregions
rf._countries_for_region = self._get_countries_names(
region.countries
)
self.set_layout(rf, '@@reg-region-start')
alsoProvides(rf, interfaces.IRegionalDescriptorRegionsFolder)
for desc_code, description in self._get_descriptors():
if desc_code.lower() in rf.contentIds():
df = rf[desc_code.lower()]
else:
df = create(rf, 'Folder', title=description, id=desc_code)
alsoProvides(df, interfaces.IDescriptorFolder)
for art in self._get_articles():
if art.lower() in df.contentIds():
rda = df[art.lower()]
else:
rda = create(df,
'wise.msfd.regionaldescriptorassessment',
title=art)
lr = rda.__ac_local_roles__
group = self.get_group(desc_code)
lr[group] = ['Contributor']
logger.info("Created RegionalDescriptorArticle %s",
rda.absolute_url())
self.set_layout(rda, '@@reg-desc-art-view')
alsoProvides(rda, interfaces.IRegionalDescriptorAssessment)
self.create_comments_folder(rda)
return rf
def setup_nationaldescriptors(self, parent):
# National Descriptors Assessments
if 'national-descriptors-assessments' in parent.contentIds():
nda = parent['national-descriptors-assessments']
else:
nda = create(parent,
'Folder', title=u'National Descriptors Assessments')
self.set_layout(nda, '@@nat-desc-start')
alsoProvides(nda, interfaces.INationalDescriptorsFolder)
for code, country in self._get_countries():
self.make_country(nda, code, country)
def setup_regionaldescriptors(self, parent):
# Regional Descriptors Assessments
if 'regional-descriptors-assessments' in parent.contentIds():
rda = parent['regional-descriptors-assessments']
else:
rda = create(parent,
'Folder', title=u'Regional Descriptors Assessments')
self.set_layout(rda, '@@reg-desc-start')
alsoProvides(rda, interfaces.IRegionalDescriptorsFolder)
for region in REGIONAL_DESCRIPTORS_REGIONS:
if not region.is_main:
continue
self.make_region(rda, region)
def setup_nationalsummaries(self, parent):
if 'national-summaries' in parent.contentIds():
ns = parent['national-summaries']
else:
ns = create(parent,
'Folder', title=u'National summaries')
self.set_layout(ns, '@@nat-summary-start')
alsoProvides(ns, interfaces.INationalSummaryFolder)
for code, country in self._get_countries():
if code.lower() in ns.contentIds():
cf = ns[code.lower()]
else:
# national_summary type used for Assessment summary/pdf export
cf = create(ns,
'national_summary',
title=country,
id=code)
self.set_layout(cf, 'assessment-summary')
alsoProvides(cf, interfaces.INationalSummaryCountryFolder)
# self.create_comments_folder(cf)
# create the overview folder
# if 'overview' in cf.contentIds():
# of = cf['overview']
# else:
# of = create(cf,
# 'wise.msfd.nationalsummaryoverview',
# title='National summary overview',
# id='overview')
#
# self.set_layout(of, 'sum-country-start')
# alsoProvides(of, interfaces.INationalSummaryOverviewFolder)
def setup_regionalsummaries(self, parent):
if 'regional-summaries' in parent.contentIds():
ns = parent['regional-summaries']
else:
ns = create(parent,
'Folder',
title=u'Regional summaries')
self.set_layout(ns, 'reg-summary-start')
alsoProvides(ns, interfaces.IRegionalSummaryFolder)
for region in REGIONAL_DESCRIPTORS_REGIONS:
if not region.is_main:
continue
code, name = region.code.lower(), region.title
if code in ns.contentIds():
rf = ns[code]
else:
rf = create(ns,
'wise.msfd.regionalsummaryfolder',
title=name,
id=code)
rf._subregions = region.subregions
rf._countries_for_region = self._get_countries_names(
region.countries
)
self.set_layout(rf, 'assessment-summary')
alsoProvides(rf, interfaces.IRegionalSummaryRegionFolder)
# self.set_layout(rf, '@@sum-region-start')
def setup_secondary_articles(self, parent):
if 'national-descriptors-assessments' not in parent.contentIds():
return
nda_parent = parent['national-descriptors-assessments']
country_ids = nda_parent.contentIds()
for country in country_ids:
country_folder = nda_parent[country]
for article in _get_secondary_articles():
if article.lower() in country_folder.contentIds():
nda = country_folder[article.lower()]
else:
nda = create(country_folder,
'wise.msfd.nationaldescriptorassessment',
title=article)
logger.info("Created NationalDescriptorAssessment %s",
nda.absolute_url())
alsoProvides(
nda,
interfaces.INationalDescriptorAssessmentSecondary
)
self.set_layout(nda, '@@nat-desc-art-view-secondary')
self.create_comments_folder(nda)
def __call__(self):
# if 'compliance-module' in self.context.contentIds():
# self.context.manage_delObjects(['compliance-module'])
if 'assessment-module' in self.context.contentIds():
cm = self.context['assessment-module']
else:
cm = create(self.context, 'Folder', title=u'Compliance Module')
self.set_layout(cm, '@@comp-start')
self.set_policy(cm, 'compliance_section_policy')
alsoProvides(cm, interfaces.IComplianceModuleFolder)
lr = cm.__ac_local_roles__
lr[REVIEWER_GROUP_ID] = [u'Reviewer']
lr[EDITOR_GROUP_ID] = [u'Editor']
# Contributor: TL
# Reviewer: EC
# Editor: Milieu
# self.setup_nationaldescriptors(cm)
DEFAULT = 'regional,nationalsummary,regionalsummary,secondary'
targets = self.request.form.get('setup', DEFAULT)
if targets:
targets = targets.split(',')
else:
targets = DEFAULT
if "regional" in targets:
self.setup_regionaldescriptors(cm)
if "nationalsummary" in targets:
self.setup_nationalsummaries(cm)
if "secondary" in targets:
self.setup_secondary_articles(cm)
if 'regionalsummary' in targets:
self.setup_regionalsummaries(cm)
return cm.absolute_url()
class CleanupCache(BrowserView):
""" Remove the persistent cache that we have saved in objects
"""
def __call__(self):
brains = api.content.find(context=self.context, depth=10000)
for brain in brains:
obj = brain.getObject()
print "For obj", obj
for name in obj.__dict__.keys():
if name.startswith('_cache_'):
logger.info("Cleaning up %r: %s", obj, name)
delattr(obj, name)
return "done"
User = namedtuple('User', ['username', 'fullname', 'email'])
class ComplianceAdmin(BaseComplianceView):
""""""
name = 'admin'
section = 'compliance-admin'
@property
def get_descriptors(self):
descriptors = get_all_descriptors()
return descriptors
def get_users_by_group_id(self, group_id):
groups_tool = getToolByName(self.context, 'portal_groups')
g = groups_tool.getGroupById(group_id)
members = g.getGroupMembers()
if not members:
return []
res = []
for x in members:
user = User(x.getProperty('id'),
x.getProperty('fullname'),
x.getProperty('email'), )
res.append(user)
return res
# @cache #TODO
def get_groups_for_desc(self, descriptor):
descriptor = descriptor.split('.')[0]
group_id = '{}-{}'.format(CONTRIBUTOR_GROUP_ID, descriptor.lower())
return self.get_users_by_group_id(group_id)
@property
def get_reviewers(self):
group_id = REVIEWER_GROUP_ID
return self.get_users_by_group_id(group_id)
@property
def get_editors(self):
group_id = EDITOR_GROUP_ID
return self.get_users_by_group_id(group_id)
class AdminScoring(BaseComplianceView, AssessmentDataMixin):
name = 'admin-scoring'
section = 'compliance-admin'
questions = NAT_DESC_QUESTIONS
questions_reg = REG_DESC_QUESTIONS
def descriptor_obj(self, descriptor):
return get_descriptor(descriptor)
def get_available_countries(self, region_folder):
res = [
# id, title, definition, is_primary
COUNTRY(x[0], x[1], "", lambda _: True)
for x in region_folder._countries_for_region
]
return res
@cache(report_data_cache_key)
def muids(self, country_code, country_region_code, year):
""" Get all Marine Units for a country
:return: ['BAL- LV- AA- 001', 'BAL- LV- AA- 002', ...]
"""
return get_marine_units(country_code,
country_region_code,
year)
@property
def get_descriptors(self):
"""Exclude first item, D1 """
descriptors = get_all_descriptors()
return descriptors[1:]
@property
def ndas(self):
catalog = get_tool('portal_catalog')
brains = catalog.searchResults(
portal_type='wise.msfd.nationaldescriptorassessment',
)
for brain in brains:
obj = brain.getObject()
# safety check to exclude secondary articles
if not INationalDescriptorAssessment.providedBy(obj):
continue
# safety check to exclude secondary articles
obj_title = obj.title.capitalize()
if obj_title in _get_secondary_articles():
continue
if obj_title in ('Art3-4'):
continue
yield obj
@property
def ndas_sec(self):
catalog = get_tool('portal_catalog')
brains = catalog.searchResults(
portal_type='wise.msfd.nationaldescriptorassessment',
)
for brain in brains:
obj = brain.getObject()
# safety check to exclude primary articles
if not INationalDescriptorAssessmentSecondary.providedBy(obj):
continue
obj_title = obj.title.capitalize()
if obj_title not in _get_secondary_articles():
continue
yield obj
def reset_assessment_data(self):
""" Completely erase the assessment data from the system
TODO: when implementing the regional descriptors, make sure to adjust
"""
for obj in self.ndas:
logger.info('Reset assessment data for %s', obj.absolute_url())
if hasattr(obj, 'saved_assessment_data'):
del obj.saved_assessment_data
obj._p_changed = True
def recalculate_score_for_objects(self, objects, questions):
for obj in objects:
if hasattr(obj, 'saved_assessment_data') \
and obj.saved_assessment_data:
logger.info('recalculating scores for %r', obj)
data = obj.saved_assessment_data.last()
new_overall_score = 0
scores = {k: v for k, v in data.items()
if '_Score' in k and v is not None}
for q_id, score in scores.items():
id_ = score.question.id
article = score.question.article
_question = [
x
for x in questions.get(article, ())
if x.id == id_
]
if not _question:
continue
_question = _question[0]
# new_score_weight = _question.score_weights
# _question.score_weights = new_score_weight
values = score.values
descriptor = score.descriptor
new_score = _question.calculate_score(descriptor,
values)
data[q_id] = new_score
new_overall_score += getattr(new_score,
'weighted_score', 0)
data['OverallScore'] = new_overall_score
obj.saved_assessment_data._p_changed = True
def recalculate_scores(self):
self.recalculate_score_for_objects(self.ndas, self.questions)
self.recalculate_score_for_objects(self.rdas, self.questions_reg)
def get_data(self, obj):
""" Get assessment data for a country assessment object
"""
if not (hasattr(obj, 'saved_assessment_data')
and obj.saved_assessment_data):
return
state = get_wf_state_id(obj)
article = obj
descr = obj.aq_parent
region = obj.aq_parent.aq_parent
country = obj.aq_parent.aq_parent.aq_parent
d_obj = self.descriptor_obj(descr.id.upper())
muids = self.muids(country.id.upper(), region.id.upper(), '2018')
data = obj.saved_assessment_data.last()
for k, val in data.items():
if not val:
continue
if '_Score' in k:
last_change_name = "{}_{}_Last_update".format(article.title,
val.question.id)
last_change = data.get(last_change_name, '')
last_change = last_change and last_change.isoformat() or ''
for i, v in enumerate(val.values):
options = ([o.title
for o in val.question.get_assessed_elements(
d_obj, muids=muids)] or ['All criteria'])
# TODO IndexError: list index out of range
# investigate this
# Possible cause of error: D9C2 was removed and some old
# questions have answered it
try:
option = options[i]
except IndexError:
continue
option = 'ERROR with options: {} / index: {}'.format(
', '.join(options), i
)
answer = val.question.answers[v]
yield (country.title, region.title, d_obj.id,
article.title, val.question.id, option, answer,
val.question.scores[v], state, last_change)
elif '_Summary' in k:
article_id, question_id, _ = k.split('_')
last_change_name = "{}_{}_Last_update".format(article_id,
question_id)
last_change = data.get(last_change_name, '')
last_change = last_change and last_change.isoformat() or ''
yield (country.title, region.title, d_obj.id, article_id,
question_id, 'Summary', val, ' ', state, last_change)
elif '_assessment_summary' in k:
article_id, _, __ = k.split('_')
last_change_name = "{}_assess_summary_last_upd".format(
article_id
)
last_change = data.get(last_change_name, '')
last_change = last_change and last_change.isoformat() or ''
yield (country.title, region.title, d_obj.id, article_id,
' ', 'Assessment Summary', val, '', state, last_change)
elif '_recommendations' in k:
article_id, _ = k.split('_')
last_change_name = "{}_assess_summary_last_upd".format(
article_id
)
last_change = data.get(last_change_name, '')
last_change = last_change and last_change.isoformat() or ''
yield (country.title, region.title, d_obj.id, article_id,
' ', 'Recommendations', val, '', state, last_change)
elif '_progress' in k:
article_id, _ = k.split('_')
last_change_name = "{}_assess_summary_last_upd".format(
article_id
)
last_change = data.get(last_change_name, '')
last_change = last_change and last_change.isoformat() or ''
yield (country.title, region.title, d_obj.id, article_id,
' ', 'Progress', val, '', state, last_change)
def get_data_sec(self, obj):
""" Get assessment data for a country assessment object
"""
if not (hasattr(obj, 'saved_assessment_data')
and obj.saved_assessment_data):
return
state = get_wf_state_id(obj)
article = obj
country = obj.aq_parent
data = obj.saved_assessment_data.last()
d_obj = 'Not linked'
muids = []
for k, val in data.items():
if not val:
continue
if '_Score' in k:
for i, v in enumerate(val.values):
options = ([o.title
for o in val.question.get_assessed_elements(
d_obj, muids=muids)] or ['All criteria'])
# TODO IndexError: list index out of range
# investigate this
# Possible cause of error: D9C2 was removed and some old
# questions have answered it
try:
option = options[i]
except IndexError:
continue
answer = val.question.answers[v]
yield (country.title, article.title, val.question.id,
option, answer, val.question.scores[v], state)
elif '_Summary' in k:
article_id, question_id, _ = k.split('_')
yield (country.title, article_id, question_id,
'Summary', val, ' ', state)
elif '_assessment_summary' in k:
article_id, _, __ = k.split('_')
yield (country.title, article_id, ' ',
'Assessment Summary', val, '', state)
elif '_recommendations' in k:
article_id, _ = k.split('_')
yield (country.title, article_id, ' ',
'Recommendations', val, '', state)
elif '_progress' in k:
article_id, _ = k.split('_')
yield (country.title, article_id, ' ',
'Progress', val, '', state)
def get_data_rda(self, obj):
""" Get assessment data for a regional descriptor assessment
"""
if not (hasattr(obj, 'saved_assessment_data')
and obj.saved_assessment_data):
return
state = get_wf_state_id(obj)
article = obj
descr = obj.aq_parent
region = obj.aq_parent.aq_parent
d_obj = self.descriptor_obj(descr.id.upper())
data = obj.saved_assessment_data.last()
for k, val in data.items():
if not val:
continue
if '_Score' in k:
for i, v in enumerate(val.values):
options = (
[o.title for o in self.get_available_countries(region)]
or ['All criteria']
)
# TODO IndexError: list index out of range
# investigate this
# Possible cause of error: D9C2 was removed and some old
# questions have answered it
try:
option = options[i]
except IndexError:
continue
answer = val.question.answers[v]
yield (region.title, d_obj.id,
article.title, val.question.id, option, answer,
val.question.scores[v], state)
elif '_Summary' in k:
article_id, question_id, _ = k.split('_')
yield (region.title, d_obj.id,
article_id, question_id, 'Summary', val, ' ', state)
elif '_assessment_summary' in k:
article_id, _, __ = k.split('_')
yield (region.title, d_obj.id,
article_id, ' ', 'Assessment Summary', val, '', state)
elif '_recommendations' in k:
article_id, _ = k.split('_')
yield (region.title, d_obj.id,
article_id, ' ', 'Recommendations', val, '', state)
elif '_progress' in k:
article_id, _ = k.split('_')
yield (region.title, d_obj.id,
article_id, ' ', 'Progress', val, '', state)
def data_to_xls(self, all_data):
out = BytesIO()
workbook = xlsxwriter.Workbook(out, {'in_memory': True})
for sheetname, labels, data in all_data:
worksheet = workbook.add_worksheet(sheetname)
for i, label in enumerate(labels):
worksheet.write(0, i, label)
x = 0
for objdata in data:
for row in objdata:
x += 1
for iv, value in enumerate(row):
worksheet.write(x, iv, value)
workbook.close()
out.seek(0)
return out
def export_scores(self, context):
# National descriptors data
nda_labels = ('Country', 'Region', 'Descriptor', 'Article', 'Question',
'Option', 'Answer', 'Score', 'State', 'Last change')
nda_xlsdata = (self.get_data(nda) for nda in self.ndas)
# Regional descriptors data
rda_labels = ('Region', 'Descriptor', 'Article', 'Question',
'Option', 'Answer', 'Score', 'State')
rda_xlsdata = (self.get_data_rda(rda) for rda in self.rdas)
# Secondary Articles 3 & 4, 7
sec_labels = ('Country', 'Article', 'Question',
'Option', 'Answer', 'Score', 'State')
sec_xlsdata = (self.get_data_sec(sec) for sec in self.ndas_sec)
all_data = [
('National descriptors', nda_labels, nda_xlsdata),
('Regional descriptors', rda_labels, rda_xlsdata),
('Articles 3 & 4, 7', sec_labels, sec_xlsdata)
]
xlsio = self.data_to_xls(all_data)
sh = self.request.response.setHeader
sh('Content-Type', 'application/vnd.openxmlformats-officedocument.'
'spreadsheetml.sheet')
fname = "-".join(['Assessment_Scores',
str(datetime.now().replace(microsecond=0))])
sh('Content-Disposition',
'attachment; filename=%s.xlsx' % fname)
return xlsio.read()
def __call__(self):
msgs = IStatusMessage(self.request)
if 'export-scores' in self.request.form:
return self.export_scores(self.context)
if 'reset-assessments' in self.request.form:
self.reset_assessment_data()
msgs.add('Assessments reseted successfully!', type='warning')
logger.info('Reset score finished!')
if 'recalculate-scores' in self.request.form:
self.recalculate_scores()
msgs.add('Scores recalculated successfully!', type='info')
logger.info('Recalculating score finished!')
return self.index()
class SetupAssessmentWorkflowStates(BaseComplianceView):
@property
def ndas(self):
catalog = get_tool('portal_catalog')
brains = catalog.searchResults(
portal_type='wise.msfd.nationaldescriptorassessment',
)
for brain in brains:
obj = brain.getObject()
yield obj
def __call__(self):
changed = 0
not_changed = 0
logger.info("Changing workflow states to not_started...")
for nda in self.ndas:
state = get_wf_state_id(nda)
if hasattr(nda, 'saved_assessment_data'):
data = nda.saved_assessment_data.last()
if data:
not_changed += 1
continue
if state == 'in_work':
changed += 1
logger.info("State changing for {}".format(nda.__repr__()))
transition(obj=nda, to_state='not_started')
logger.info("States changed: {}, Not changed: {}".format(
changed, not_changed)
)
return "Done"
class TranslateIndicators(BrowserView):
def __call__(self):
labels = get_indicator_labels().values()
site = portal.get()
storage = ITranslationsStorage(site)
count = 0
for label in labels:
lang = get_detected_lang(label)
if (not lang) or (lang == 'en'):
continue
lang = lang.upper()
langstore = storage.get(lang, None)
if langstore is None:
continue
if label not in langstore:
langstore[label] = u''
logger.info('Added %r to translation store for lang %s',
label, lang)
count = +1
return "Added %s labels" % count
class MigrateTranslationStorage(BrowserView):
def __call__(self):
site = portal.get()
storage = ITranslationsStorage(site)
count = 0
for langstore in storage.values():
for original, translated in langstore.items():
count = +1
if hasattr(translated, 'text'):
translated = translated.text
translated = Translation(translated, 'original')
if not translated.text.startswith('?'):
translated.approved = True
langstore[original] = translated
return "Migrated {} strings".format(count)
|
#!/usr/bin/python3
import socket
import struct
import os
import sys
import json
import time
import signal
import argparse
# Authored by Alex Ionita taionita@uwaterloo.ca
""" ___
,-"" `.
,' _ e )`-._
/ ,' `-._<.===-'
/ /
/ ;
_ / ;
(`._ _.-"" ""--..__,' |
<_ `-"" \
<`- :
(__ <__. ;
`-. '-.__. _.' /
\ `-.__,-' _,'
`._ , /__,-'
""._\__,'< <____
| | `----.`.
| | \ `.
; |___ \-``
\ --<
`.`.<
`-'
oh, hello there
"""
# first epoch is the initial time given for all vrouters to converge (first epoch includes init phase and forwarding phase)
# every subsequent epoch, each `CONVERGENCE_EPOCH_TIME` seconds long, corresponds to one network failure scenario
CONVERGENCE_EPOCH_TIME = 10 # length of an epoch in seconds
class MessageType:
INIT = 1
INIT_REPLY = 4
HEARTBEAT = 2
LSA = 3
TERMINATE = 5
class LSAMessage:
class Link:
def __init__(self, advertising_id, neighbour_id, cost):
self.advertising_id = advertising_id
self.neighbour_id = neighbour_id
self.cost = cost
def __eq__(self, other):
return self.advertising_id == other.advertising_id and \
self.neighbour_id == other.neighbour_id and \
self.cost == other.cost
# LSA Message -- vrouter to vrouter
# int32 type (0x3)
# int32 sender_id
# int32 destination_id
# int32 advertising router id
# int32 sequence number
# int32 number of links
# int32 router_neighbour 1
# int32 link_cost 1
# int32 router_neighbour N
# int32 link_cost N
def __init__(self, buffer):
len_metadata = (6 * 4) # the first 6 fields (4 bytes each) must exist
if len(buffer) < len_metadata:
raise Exception("LSA message expected to be at least 24 bytes long (this accounts for the first 6 fields), this one is {} bytes".format(len(buffer)))
data = struct.unpack("!iiiiii", buffer[:len_metadata])
self.sender_id = data[1]
self.destination_id = data[2]
self.advertising_id = data[3]
self.sequence = data[4]
self.links = []
number_of_links = data[5] # todo: we're not checking if number_of_links is too great; what's too great look like?
if number_of_links < 0:
raise Exception("LSA message's number_of_links field is negative {}".format(number_of_links))
payload_len = len(buffer) - len_metadata
expected_payload_len = number_of_links * 2 * 4 # 2 fields per link, 4 bytes per field
if expected_payload_len != payload_len:
raise Exception("LSA message indicated {} links are described ,which would require {} bytes to describe but {} bytes "
"have been found describing the links".format(number_of_links, expected_payload_len, payload_len))
data_links = struct.unpack("!" + "ii" * number_of_links, buffer[len_metadata:])
for i in range(number_of_links):
index_neighbour_id = (i * 2)
index_cost = (i * 2) + 1
self.links.append(LSAMessage.Link(self.advertising_id, data_links[index_neighbour_id], data_links[index_cost]))
def __str__(self):
msg = "type:{},sender_id:{},destination_id:{},advertising_id:{},sequence_number:{}"
for link in self.links:
msg += ",neighbour_id:{},cost:{}".format(link.neighbour_id, link.cost)
return msg
class HeartbeatMessage:
# Heartbeat Message -- vrouter to vrouter
# int32 type(0x2)
# int32 sender_id
# int32 destination_id
def __init__(self, buffer):
if len(buffer) != 12:
raise Exception("Heartbeat message expected to be 12 bytes long in total, this one is {} bytes".format(len(buffer)))
data = struct.unpack("!iii", buffer)
if data[0] != MessageType.HEARTBEAT:
raise Exception("NFE misidentified Heartbeat message")
self.sender_id = data[1]
self.destination_id = data[2]
def __str__(self):
return "type:{},sender_id:{},destination_id:{}".format('heartbeat', self.sender_id, self.destination_id)
class VirtualRouter: # holds data pertaining to UDP messages (and links (ip, port) of sender to a virtual router id)
def __init__(self, address, router_id):
self.address = address
self.router_id = router_id
class Neighbour: # router's neighbour
def __init__(self, router_id, link_cost):
self.id = router_id
self.link_cost = link_cost
def __eq__(self, other):
return self.id == other.id and self.link_cost == other.link_cost
class Router:
def __init__(self, id):
self.id = id
self.neighbours = []
def add_neighbour(self, other_router_id, link_cost):
self.neighbours.append(Neighbour(other_router_id, link_cost))
def __str__(self):
return "Router #{}".format(self.id)
def __eq__(self, other):
if self.id != other.id:
return False
our_neighbours = sorted(self.neighbours, key=lambda n: n.id)
their_neighbours = sorted(other.neighbours, key=lambda n: n.id)
return our_neighbours == their_neighbours
class Failures:
# Failures file looks like this
# (1 - 2),(2 - 3)
# (1 - 2)
# (2 - 3)
# ()
def __init__(self, failures_lines, topology):
self.current_epoch = 0
self.epochs = []
self.current_disconnects = None
if not failures_lines:
failures_lines = []
for line in failures_lines:
line = line.strip()
if not line:
continue
# in case of classic comments
if line[0] == '#':
continue
line = line.split("#")[0]
epoch_disconnected_pairs = []
if self._no_disconnected_links(line):
self.epochs.append(None)
continue
line = line.split(",")
router_pairs = [change.strip() for change in line] # clean up
for pair in router_pairs:
router1 = int(pair.split("-")[0].strip().strip("()").strip())
router2 = int(pair.split("-")[1].strip().strip("()").strip())
if router1 not in [r.id for r in topology.routers]:
raise Exception("failures file contained an invalid router: \"{}\"".format(router1))
if router2 not in [r.id for r in topology.routers]:
raise Exception("failures file contained an invalid router: \"{}\"".format(router2))
epoch_disconnected_pairs.append((router1, router2))
self.epochs.append(epoch_disconnected_pairs)
def next_epoch(self):
try:
self.current_disconnects = self.epochs[self.current_epoch]
except IndexError:
raise StopIteration
self.current_epoch += 1
def endpoints_disconnected(self, sender_id, destination_id):
if self.current_disconnects:
return (sender_id, destination_id) in self.current_disconnects or\
(destination_id, sender_id) in self.current_disconnects
return False
@staticmethod
def _no_disconnected_links(line):
# checks for a line in the failures file that looks like this
# ()
# or some variation, like this
# ( )
return line.strip()[0] == '(' and line.strip().split("(")[1].strip()[0] == ')' \
and line.strip().split("(")[1].strip()[-1] == ')'
class Topology:
def __init__(self, topology_description):
self.routers = []
self.router_pairs = []
self.parse_topology_description(topology_description)
self.validate_no_self_connection()
self.validate_only_1_link()
# why json.load() doesn't have a flag for this, I cannot fathom
@staticmethod
def dup_key_verify(ordered_pairs):
dict = {}
for key, val in ordered_pairs:
if key in dict:
raise Exception("JSON contains duplicate link id {}".format(key))
else:
dict[key] = val
return dict
def parse_topology_description(self, topology_description):
# JSON part we care about looks like this:
# {
# "links":
# [
# [["1", "2"], "3"], # router 1 and 2 are connected by a link of cost 3
# [["1", "4"], "2"],
# [["1", "6"], "10"]
# ]
# }
if(len(topology_description['links'])) == 0:
raise Exception("The topology file seems to have no routers connected to each other; emulator needs at least one link between two routers")
for connection in (topology_description['links']):
router1_id, router2_id = connection[0]
router1_id = int(router1_id)
router2_id = int(router2_id)
link_cost = int(connection[1])
self.add_router_connection(router1_id, router2_id, link_cost)
self.add_router_connection(router2_id, router1_id, link_cost)
self.router_pairs.append([router1_id, router2_id]) # for later validation
def validate_no_self_connection(self):
for r in self.routers:
for neighbour in r.neighbours:
if r.id == neighbour.id:
raise Exception("Router {} connects to itself".format(r.id))
def validate_only_1_link(self):
# validate only 1 link between two routers
# sorting the pairs so that [2,1] becomes [1,2], such that comparison is simpler
# it's awkward but sort() is in-place
[pair.sort() for pair in self.router_pairs]
# it's n^2 but given the expected size, good enough
for index1, pair1 in enumerate(self.router_pairs):
for index2, pair2 in enumerate(self.router_pairs):
if index1 != index2 and pair1 == pair2:
raise Exception("There is more than 1 link between router ids {}".format(pair1))
def get_router_by_id(self, id):
for router in self.routers:
if router.id == id:
return router
raise Exception("Emulator has messed something while validating if the topology is connected, I'm sorry. Try another topology")
def add_router_connection(self, router_id, other_router_id, link_cost):
router = None
# does this router already exist?
for r in self.routers:
if r.id == router_id:
router = r
break
else:
# no? let's create it and add it to our collection
router = Router(router_id)
self.routers.append(router)
router.add_neighbour(other_router_id, link_cost)
def main():
topology, failures, is_log_msg = parse_args()
listen_loop(topology, failures, is_log_msg)
def parse_args():
parser = argparse.ArgumentParser(description='NFE')
parser._action_groups.pop()
required = parser.add_argument_group('required arguments')
optional = parser.add_argument_group('optional arguments')
required.add_argument("-t", "--topology-file", help="path to the JSON topology file", required=True)
optional.add_argument("-f", "--failures-file", help="(optional) path to the topology failures file", required=False)
args = parser.parse_args()
is_log_msg = os.getenv('NFE_LOG_MSG') # we want to log messages passing through the NFE
try:
with open(args.topology_file) as fd:
topology = Topology(json.load(fd, object_pairs_hook=Topology.dup_key_verify))
except Exception as e:
print("Either couldn't open \"{}\" or couldn't parse JSON to construct desired topology: {}".format(args.topology_file, str(e)))
sys.exit(-1)
if args.failures_file:
try:
with open(args.failures_file) as fd:
failures = Failures(fd.readlines(), topology)
except Exception as e:
print("Either couldn't open \"{}\" or couldn't process failures file: {}".format(args.failures_file, str(e)))
sys.exit(-2)
else:
failures = Failures(None, topology)
return topology, failures, is_log_msg
def log_init_messages(router_id):
with open("nfe.init_messages", 'a') as fd:
fd.write("{}\n".format(router_id))
def init_phase(sock, topology, is_log_msg):
# Init -- vrouter to NFE
# int32 type (0x1)
# int32 router_id
# Init-reply -- NFE to vrouter
# int32 type (0x4)
# int32 number of links
# int32 (1) neighbour router id
# int32 (1) link_cost
# int32 (n) neighbour router id
# int32 (n) link_cost
clients = []
expected_clients = len(topology.routers)
# Waiting for init messages; noting down which router id goes with which udp client
print("Emulator is waiting for init messages from {} virtual routers".format(expected_clients))
# creating file to log init messages received
if is_log_msg:
with open("nfe.init_messages", 'w') as fd:
fd.write("")
while len(clients) != expected_clients:
buffer, address = sock.recvfrom(4096)
if len(buffer) < 4:
print(
"UDP message is only {} byte(s) long. The emulator expects at least 4 bytes as that's the size of the message type. Byte(s) received: {}".format(
address, len(buffer), ' '.join('0x{:02x}'.format(byte) for byte in buffer)))
continue
message_type_buffer = buffer[:4]
message_type = struct.unpack("!i", message_type_buffer)[0]
if message_type != MessageType.INIT:
print("UDP message has a message type that doesn't correspond to the Init message type. Message type received: {} ({})".format(message_type, ' '.join(
'0x{:02x}'.format(byte) for byte in message_type_buffer)))
continue
if len(buffer) != 8:
print("Message type is 'Init'(0x01) but it is {} bytes long, and it's expected to actually be 8 bytes".format(len(buffer)))
continue
router_id = struct.unpack("!i", buffer[4:8])[0]
if router_id not in [r.id for r in topology.routers]:
print("Received Init from virtual router id {} but that router id is not in the topology, ignoring".format(router_id))
continue
if router_id in [c.router_id for c in clients]:
print("Received Init from virtual router id {} but that router id has already been received, ignoring".format(router_id))
continue
print("Received Init from virtual router id {} correctly, from udp client (ip, port) {})".format(router_id, address))
if is_log_msg:
log_init_messages(router_id)
clients.append(VirtualRouter(address, router_id))
# Sending the clients their info
print("Emulator sending neighbour info to virtual routers")
for client in clients:
router = topology.get_router_by_id(client.router_id)
data = struct.pack("!i", MessageType.INIT_REPLY) # message type, 0x4
data += struct.pack("!i", len(router.neighbours)) # nbr neighbours
for neighbour in router.neighbours:
data += struct.pack("!i", neighbour.id) # neighbour_id
data += struct.pack("!i", neighbour.link_cost) # link_cost
print("Sending data to virtual router {}".format(client.router_id))
sock.sendto(data, client.address)
return clients
def termination_phase(sock, vrouter_clients):
for client in vrouter_clients:
sock.sendto(struct.pack("!i", MessageType.TERMINATE), client.address)
sock.close()
def log_message(timestamp_sec, message, is_connected):
with open("nfe.log", 'a') as fd:
fd.write("[{}],timestamp_sec:{},".format(is_connected, timestamp_sec) + str(message) + "\n")
def forwarding_phase(sock, topology, failures, vrouter_clients, is_log_msg):
# Forwarding phase
if is_log_msg:
with open("nfe.log", 'w') as fd:
fd.write("")
print("Emulator forwarding traffic between virtual routers")
time_global = time.time()
time_t0 = int(time.time())
while True:
if time.time() - time_t0 >= CONVERGENCE_EPOCH_TIME:
time_t0 = int(time.time())
try:
failures.next_epoch()
print("\nConnecting/disconnecting links in the topology")
except StopIteration:
print("\nDone, sending termination message")
termination_phase(sock, vrouter_clients)
return
time_to_next_epoch = (CONVERGENCE_EPOCH_TIME + time_t0) - time.time()
if time_to_next_epoch < 1:
time_to_next_epoch = 1
sock.settimeout(time_to_next_epoch)
try:
buffer, address = sock.recvfrom(4096)
except socket.timeout:
continue
# let's find the topology vrouter corresponding to the udp source (ip, port)
for client in vrouter_clients:
if client.address == address:
router = topology.get_router_by_id(client.router_id)
break
else:
print("Received data from virtual router (ip, port) {} but that virtual router did not send an init message during the init phase, ignoring".format(
address))
continue
# Termination Message -- NFE to vrouter
# int32 type (0x5)
if len(buffer) < 4:
print("Virtual Router {} has sent a udp message with less than 4 bytes; 4 bytes which is the minimum to specify a message type".format(router.id))
continue
# let's just unpack 4 bytes, message type
message_type = struct.unpack("!i", buffer[:4])[0]
# we unpack a second time for the rest
if message_type == MessageType.HEARTBEAT:
try:
message = HeartbeatMessage(buffer)
except Exception as e:
print("Virtual Router {} - message type is {} (Heartbeat) but the rest of the message could not be parsed, ignoring: {}".format(router.id, message_type, str(e)))
continue
elif message_type == MessageType.LSA:
try:
message = LSAMessage(buffer)
except Exception as e:
print("Virtual Router {} - message type is {} (LSA) but the rest of the message could not be parsed, ignoring: {}".format(router.id, message_type, str(e)))
continue
else:
print("Virtual Router {} - message type is {} but that that's not the expected message type, ignoring".format(router.id, message_type))
continue
# does the sender id in the LSA match the UDP client we've exchanged Init messages with?
if message.sender_id != router.id:
print(
"Virtual Router {} - message cannot be forwarded: message indicates the sender id is {} but the UDP source of the message corresponds to"
" sender id {} with which the emulator spoke during the init phase. Ignoring message".format(router.id, message.sender_id, router.id))
continue
# the virtual router whose LSA/heartbeat message we just received wants that LSA/hearbeat forwarded to some neighbouring router
# let's make sure the destination of the message is an actual neighbour
if message.destination_id not in [neighbour.id for neighbour in router.neighbours]:
print(
"Virtual Router {} - message cannot be forwarded, the router destination id {} is invalid: it's not a neighbour. Ignoring message".format(router.id,
message.destination_id))
continue
# let's find that neighbour's UDP address in clients
for client in vrouter_clients:
if message.destination_id == client.router_id:
neighbour_address = client.address
break
else:
raise Exception("The emulator couldn't find an address it should have been able to find, sorry. This is a bug. Try another topology")
# let's see if failures in the topology make this forwarding impossible
if failures.endpoints_disconnected(message.sender_id, message.destination_id):
print("x", end='', flush=True) # show the user forwarding activity would have happened if not for disconnection
if is_log_msg:
log_message(time.time() - time_global, message, "disconnected")
continue
if is_log_msg:
log_message(time.time() - time_global, message, "connected")
sock.sendto(buffer, neighbour_address)
print(".", end='', flush=True) # show the user forwarding activity is happening
def listen_loop(topology, failures, is_log_msg):
# Bind to port, write it to file
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('', 0))
port_used = sock.getsockname()[1]
with open("port", 'w') as fd:
fd.write(str(port_used))
print("Emulator is listening on port {} (and has also written this port number to a file named 'port')".format(str(port_used)))
vrouter_clients = init_phase(sock, topology, is_log_msg)
forwarding_phase(sock, topology, failures, vrouter_clients, is_log_msg)
if __name__ == '__main__':
main()
|
# Generated by Django 2.2 on 2020-08-21 02:42
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Candidat',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nom', models.CharField(default='', max_length=30)),
('prenom', models.CharField(default='', max_length=30)),
('email', models.EmailField(default='', max_length=200, unique=True)),
('date_naiss', models.DateField()),
('tel', models.CharField(default='', max_length=8, unique=True)),
('disp', models.FloatField(verbose_name=(0, 1, 2, 3, 4, 5))),
('exp', models.PositiveIntegerField(validators=[django.core.validators.MinValueValidator(0)])),
('cv', models.FileField(upload_to='')),
('msg', models.CharField(max_length=300)),
('cand_status', models.CharField(choices=[('New Candidate', 'new'), ('In Progress', 'progress'), ('Accepted', 'accepted'), ('Refused', 'refused')], default='New Candidate', max_length=2)),
('email_status', models.CharField(choices=[('Yes', 'yes'), ('No', 'no')], default='No', max_length=2)),
],
),
]
|
import pygame
from os import getcwd, path
from random import shuffle
class Button:
sprites = ["Button_Sprite0.png", # Standby
"Button_Sprite1.png", # Neutral
"Button_Sprite2.png", # Flag
"Button_Sprite3.png", # Bomb
"Button_Sprite4.png", # Exploded
"Button_Sprite5.png", # 1
"Button_Sprite6.png",
"Button_Sprite7.png",
"Button_Sprite8.png",
"Button_Sprite9.png",
"Button_Sprite10.png",
"Button_Sprite11.png",
"Button_Sprite12.png"] # 8
isActive = True
isFlag = False
adj = ["self.index+1", # index
"self.index-1",
"self.index-x", # index-x
"self.index-x+1",
"self.index-x-1",
"self.index+x", # index+x
"self.index+x+1",
"self.index+x-1"]
def __init__(self, x, y, i, bomb):
global xButtons, yButtons
self.swapSprite(0)
self.imageBorder = self.imageSprite.get_rect()
self.x = x
self.y = y
self.index = i
self.xDraw = xButtons + x*16
self.yDraw = yButtons + y*16
self.isBomb = bomb
self.adjCoord = [(self.x+1, self.y),
(self.x-1, self.y),
(self.x, self.y+1),
(self.x+1, self.y+1),
(self.x-1, self.y+1),
(self.x, self.y-1),
(self.x+1, self.y-1),
(self.x-1, self.y-1)]
def __repr__(self):
return f"Button object at({self.x}, {self.y})"
def __str__(self):
return f"(({self.x}, {self.y}), {self.index}, bomb = {self.isBomb})"
def swapSprite(self, index):
""" function for swapping the sprite of button """
imagePath = path.join(getcwd(), "Assets", self.sprites[index])
self.imageSprite = pygame.image.load(imagePath)
def draw(self, screen):
""" function for drawing the button at predefined location """
screen.blit(self.imageSprite, (self.xDraw, self.yDraw))
def count(self):
""" counts the amount of bombs in adjacent squares """
global x, y
count = 0
for i in range(len(self.adj)):
try:
tmp = eval(self.adj[i])
if ((buttonLst[tmp].x, buttonLst[tmp].y) in self.adjCoord):
if buttonLst[tmp].isBomb:
count += 1
except IndexError:
continue
if count == 0:
self.swapSprite(1)
for i in range(len(self.adj)):
try:
tmp = eval(self.adj[i])
if ((buttonLst[tmp].x, buttonLst[tmp].y) in self.adjCoord):
buttonLst[tmp].clicked("Click")
except IndexError:
continue
return count
def clicked(self, event):
""" a function that runs when a button is clicked.
a Click event is a left click while
a Flag event is a right click """
global cooldown, tick
if (event == "Flag"):
if (cooldown <= 0):
cooldown = tick//5
if (self.isActive and (not self.isFlag)):
self.swapSprite(2)
self.isActive = False
self.isFlag = True
elif ((not self.isActive) and self.isFlag):
self.swapSprite(0)
self.isActive = True
self.isFlag = False
elif (event == "Click"):
if self.isActive:
self.isActive = False
if self.isBomb:
self.swapSprite(4)
global defeat
defeat = True
else:
count = self.count()
if (count > 0):
self.swapSprite(count+4)
def main():
pygame.init()
# Coordinates & Variables
global x, y
x = 16
y = 16
global xButtons, yButtons
xButtons = 150
yButtons = 50
xScreen = x*16 + xButtons + yButtons
yScreen = y*16 + yButtons + yButtons
global cooldown, tick
count = 0
time = 900
tick = 60
cooldown = 0
code = ""
# Screens & clock
screen = pygame.display.set_mode((xScreen, yScreen))
pygame.display.set_caption("Minesweeper")
clock = pygame.time.Clock()
font = pygame.font.Font(None, 36)
# Buttons & Bombs
bombCnt = (x*y)//4
bombLst = [False for i in range(x*y)]
bombLst[:bombCnt] = [True for i in range(bombCnt)]
shuffle(bombLst)
global buttonLst
buttonLst = [Button(i%x, i//x, i, bombLst[i]) for i in range(x*y)]
# Game loop
global game, defeat
game = True
defeat = False
win = False
cheat = False
while game:
# Begin step & exit code
keys = pygame.key.get_pressed()
for event in pygame.event.get():
if event.type == pygame.QUIT:
game = False
# Cheat code input recognization
if keys[pygame.K_h] and code == "":
code = "H"
elif keys[pygame.K_e] and code == "H":
code = "HE"
elif keys[pygame.K_s] and code == "HE":
code = "HES"
elif keys[pygame.K_o] and code == "HES":
code = "HESO"
elif keys[pygame.K_y] and code == "HESO":
code = "HESOY"
elif keys[pygame.K_a] and code == "HESOY":
code = "HESOYA"
elif keys[pygame.K_m] and code == "HESOYA":
code = "HESOYAM"
cheat = True
# Step code
mouse = pygame.mouse.get_pos()
# Mouseclick check
if ((xButtons < mouse[0] < xButtons+256) and
(yButtons < mouse[1] < yButtons+256)):
click = pygame.mouse.get_pressed()
pos = (((mouse[0]-xButtons)//16) + ((mouse[1]-yButtons)//16)*x)
if (click[2] == 1):
buttonLst[pos].clicked("Flag")
elif (click[0] == 1):
buttonLst[pos].clicked("Click")
# Defeated or cheating check
if defeat or cheat:
for i in range(x*y):
if (buttonLst[i].isBomb and buttonLst[i].isActive):
buttonLst[i].swapSprite(3)
buttonLst[i].isActive = False
if cheat:
buttonLst[i].isFlag = True
if defeat:
buttonLst[i].isActive = False
# win check
win = True
for i in range(x*y):
if ((not buttonLst[i].isBomb) and
(buttonLst[i].isActive or buttonLst[i].isFlag)):
win = False
break
elif (buttonLst[i].isBomb and (not buttonLst[i].isFlag)):
win = False
break
# Draw code
# Screen draw
screen.fill((127, 127, 127))
pygame.draw.rect(screen, (192, 192, 192),
(4, 4, xScreen-8, yScreen-8))
pygame.draw.rect(screen, (225, 225, 225),
(xButtons-4, yButtons-4, 256+8, 256+8))
# Time draw
text = font.render("Time:", 1, (0, 0, 0))
screen.blit(text, (40, 50))
pygame.draw.rect(screen, (0, 0, 0),
(45, 90, 55, 35))
text = font.render(f"{time}", 1, (255, 255, 255))
screen.blit(text, (50, 95))
# Cheat code draw
text = font.render(f"{code}", 1, (255, 0, 255))
screen.blit(text, (150, 15))
# Button draw
for i in range(x*y):
buttonLst[i].draw(screen)
# Conditional message draw
if defeat:
text = font.render("Game", 1, (255, 0, 0))
screen.blit(text, (40, 150))
text = font.render("Over", 1, (255, 0, 0))
screen.blit(text, (40, 185))
if win:
text = font.render("You", 1, (0, 200, 0))
screen.blit(text, (40, 150))
text = font.render("Win!", 1, (0, 200, 0))
screen.blit(text, (40, 185))
pygame.display.flip()
# End step code
# Count is increased by 1 per frame
count += 1
# Reduce cooldown by 1 per frame
if (cooldown > 0):
cooldown -= 1
# Reduce time by 1 per second
if ((not defeat) and (not win)):
if (count >= tick):
time -= 1
count = 0
# If time reaches zero, player is defeated
if (time <= 0):
defeat = True
clock.tick(tick)
pygame.quit()
if __name__ == "__main__":
main()
|
from itertools import combinations
N = int(input())
students = []
numbers = []
same = False
length = 0
for i in range(N):
students.append(input())
numbers.append('/')
length = len(students[0])
for index in range(length):
for i, stu in enumerate(students):
numbers[i] = str(stu[length-index-1]) + str(numbers[i])
for c in combinations(numbers, 2):
if(c[0] == c[1]):
same = True
break;
else:
same = False
if not same:
print(index+1)
break;
|
from django.contrib.gis import admin
from models import UserStop, BaseStop, Agency, Source, StopAttribute, SourceAttribute
import reversion
class StopAttributeInline(admin.TabularInline):
model = StopAttribute
class SourceAttributeInline(admin.TabularInline):
model = SourceAttribute
class StopAdmin(admin.OSMGeoAdmin, reversion.VersionAdmin):
inlines = [
StopAttributeInline,
SourceAttributeInline
]
list_display = ('__unicode__', 'stop_type')
list_filter = ('stop_type',)
search_fields = ['common_name','common_city']
class UserStopAdmin(StopAdmin):
list_filter = ()
list_display = ('__unicode__', 'has_parent')
actions = ['merge_stops']
def has_parent(self, obj):
return (obj.parent is not None)
has_parent.short_description = "Has parent?"
def merge_stops(self, request, queryset):
base_stop = BaseStop(common_name=queryset[0].common_name, common_city=queryset[1].common_name)
base_stop.save()
for obj in queryset:
obj.parent = base_stop
obj.save()
self.message_user(request, "%s added as logical parent stop" % base_stop)
merge_stops.short_description = "Add stops to same logical stop"
class AgencyAdmin(admin.ModelAdmin):
model = Agency
class SourceAdmin(admin.ModelAdmin):
model = Source
'''
Please note, registering these models has the side-effect of registering them for
django-reversion and keeping track of revisions. Please think twice before removing
'''
admin.site.register(BaseStop, StopAdmin)
admin.site.register(UserStop, UserStopAdmin)
admin.site.register(Agency, AgencyAdmin)
admin.site.register(Source, SourceAdmin) |
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from product.views import ProductViewset, ListPublishedProductView
router = DefaultRouter()
router.register('products', ProductViewset, basename='product_crud')
urlpatterns = [
path('published-products/', ListPublishedProductView.as_view(), name='list_published_products'),
path('', include(router.urls)),
]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
# Create your views here.
from django.shortcuts import render, redirect, get_object_or_404
from shop.models import Product
from django.contrib.auth.decorators import login_required
from .models import Wishlist, WishlistItem
@login_required
def wishlist_add(request, product_id,):
wishlist, created = Wishlist.objects.update_or_create(user=request.user)
product = get_object_or_404(Product, id=product_id)
item, itemCreated = WishlistItem.objects.update_or_create(wishlist=wishlist, product=product)
wishlist.items.add(item)
item.save()
wishlist.save()
return redirect('wishlist:wishlist_detail')
@login_required
def wishlist_remove(request, product_id):
wishlist, created = Wishlist.objects.update_or_create(user=request.user)
product = get_object_or_404(Product, id=product_id)
wishlistItems = WishlistItem.objects.filter(wishlist=wishlist, product=product)
wishlistItems.delete()
return redirect('wishlist:wishlist_detail')
@login_required
def wishlist_detail(request):
wishlist, created = Wishlist.objects.update_or_create(user=request.user)
return render(request, 'wishlist/wishlist_detail.html', {'wishlist': wishlist})
|
from django.contrib import admin
from web.models import Place,Activity,Activity_Checkout,User_Account
admin.site.register(Place)
admin.site.register(Activity) |
#DP and bottom up approach
def Fibonacci_sequence(n):
if n == 1 or n ==2:
result = 1
#the bottom up list stored previous calculation
bottom_up = [None] * (n+1)
bottom_up[1] = 1
bottom_up[2] = 1
for i in range(3,n+1):
bottom_up[i] = bottom_up[i-1] + bottom_up[i-2]
return bottom_up[n]
assert Fibonacci_sequence(35) == 9227465
assert Fibonacci_sequence(100) == 354224848179261915075
|
import argparse
def argument():
parser = argparse.ArgumentParser(description = '''
Needs a profiler.py, already executed.
Produces 3 png files, containing timeseries for some statistics, for each wmo.
''', formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument( '--maskfile', '-m',
type = str,
default = "/pico/home/usera07ogs/a07ogs00/OPA/V2C/etc/static-data/MED1672_cut/MASK/meshmask.nc",
required = False,
help = ''' Path of maskfile''')
parser.add_argument( '--inputdir', '-i',
type = str,
default = None,
required = True,
help = "")
parser.add_argument( '--outdir', '-o',
type = str,
default = None,
required = True,
help = "")
return parser.parse_args()
args = argument()
import numpy as np
from commons.mask import Mask
from instruments import lovbio_float as bio_float
from instruments.matchup_manager import Matchup_Manager
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from commons.utils import addsep
from profiler_floatsat import ALL_PROFILES,TL,RUN
from SingleFloat_vs_Model_Stat_Timeseries_IOnc import ncreader
import basins.V2 as OGS
from datetime import datetime
def fig_setup(wmo,Lon,Lat):
from layer_integral import coastline
fig = plt.figure()
ax0 = plt.subplot2grid((4, 3), (0, 0), colspan=2)
ax1 = plt.subplot2grid((4, 3), (0, 2))
ax2 = plt.subplot2grid((4, 3), (1, 0), colspan=3)
ax3 = plt.subplot2grid((4, 3), (2, 0), colspan=3)
ax4 = plt.subplot2grid((4, 3), (3, 0), colspan=3)
axs = [ax0, ax1, ax2, ax3, ax4]
for ax in [ax2, ax3, ax4]:
ax.xaxis.grid(True)
#ax.set_xlim([datetime(2015,1,1),datetime(2017,1,1)])
ax.set_xlim(TL.Timelist[0],TL.Timelist[-1])
fig.set_size_inches(10,15)
fig.set_dpi(150)
c_lon,c_lat=coastline.get()
# list_float_track=bio_float.filter_by_wmo(Profilelist_1,wmo_list[j])
ax0.plot(c_lon,c_lat,'k')
ax0.plot(Lon,Lat,'r.')
ax0.plot(Lon[0],Lat[0],'b.')
ax0.set_title("TRAJECTORY of FLOAT " + wmo , color = 'r', fontsize = 18)
# ind_max_sup=plotmat[0,:].argmax()
# print Lon[ind_max_sup],Lat[ind_max_sup]
# ax0.plot(Lon[ind_max_sup],Lat[ind_max_sup],'g.')
# ax0.plot(Lon[0],Lat[0],'bx')
ax0.set_xlim([-10,36])
#ax0.set_ylabel("LAT",color = 'k', fontsize = 15)
#ax0.set_xlabel("LON",color = 'k', fontsize = 15)
extent=4
ax1.plot(c_lon,c_lat,'k')
ax1.plot(Lon,Lat,'ro')
ax1.plot(Lon[0],Lat[0],'bo')
ax1.set_xlim([Lon.min() -extent/2, Lon.max() +extent/2])
ax1.set_ylim([Lat.min() -extent/2, Lat.max() +extent/2])
return fig, axs
TheMask=Mask(args.maskfile)
INDIR = addsep(args.inputdir)
OUTDIR = addsep(args.outdir)
VARLIST = ['P_l']#,'N3n','O2o']
VARLIST_NAME = ['Chlorophyll']#,'Nitrate','Oxygen']
nVar = len(VARLIST)
METRICS = ['Int_0-200','Corr','DCM','z_01','Nit_1']
nStat = len(METRICS)
#M = Matchup_Manager(ALL_PROFILES,TL,BASEDIR)
MED_PROFILES = bio_float.FloatSelector(None,TL.timeinterval,OGS.med)
wmo_list=bio_float.get_wmo_list(MED_PROFILES)
izmax = TheMask.getDepthIndex(200) # Max Index for depth 200m
for wmo in wmo_list:
#for wmo in ['6901512']:
INPUT_FILE = INDIR + wmo + ".nc"
print INPUT_FILE
A = ncreader(INPUT_FILE)
wmo_track_list = bio_float.filter_by_wmo(ALL_PROFILES,wmo)
nP = len(wmo_track_list)
Lon = np.zeros((nP,), np.float64)
Lat = np.zeros((nP,), np.float64)
for ip, p in enumerate(wmo_track_list):
Lon[ip] = p.lon
Lat[ip] = p.lat
times = [p.time for p in wmo_track_list]
for ivar, var in enumerate(VARLIST):
OUTFILE = OUTDIR + var + "_" + wmo + ".png"
print OUTFILE
fig, axes = fig_setup(wmo,Lon,Lat)
fig.suptitle(VARLIST_NAME[ivar],fontsize=36,color='b')
model, ref =A.plotdata(var,'Int_0-200')
surf_model, surf_ref = A.plotdata(var,'SurfVal')
if (~np.isnan(model).all() == True) or (~np.isnan(ref).all() == True):
axes[2].plot(times, ref,'r',label='REF INTEG')
axes[2].plot(times,model,'b',label='MOD INTEG')
axes[2].plot(times, surf_ref,'--r',label='REF SURF')
axes[2].plot(times, surf_model,'--b',label='MOD SURF')
if (var == "P_l"):
axes[2].set_ylabel('Chlorophyll \n $[mg{\ } m^{-3}]$',fontsize=15)
axes[4].set_ylim(40,200)
if (var == "O2o"):
axes[2].set_ylabel('Oxygen 0-200m \n $[mmol{\ } m^{-3}]$',fontsize=15)
if (var == "N3n"):
axes[2].set_ylabel('Nitrate 0-200m \n $[mmol{\ } m^{-3}]$',fontsize=15)
#axes[2].set_ylabel('INTEG 0-200m \n $[mmol{\ } m^{-3}]$',fontsize=15)
axes[2].set_ylim(0,8)
axes[4].set_ylim(0,200)
legend = axes[2].legend(loc='upper left', shadow=True, fontsize=12)
model_corr, ref_corr =A.plotdata(var,'Corr')
times_r = times
for icr , cr in enumerate(ref_corr):
if (cr <= 0):
ref_corr[icr] = np.nan
#times_r.remove(times[icr])
axes[3].plot(times,ref_corr,'k')
#axes[3].plot(times_r,ref_corr[ref_corr>0],'k')
axes[3].set_ylabel('CORR',fontsize=15)
axes[2].set_xticklabels([])
axes[2].set_title(RUN,fontsize=18)
axes[3].set_ylim(0,1)
if (var == "P_l"):
model_dcm, ref_dcm =A.plotdata(var,'DCM')
model_mld, ref_mld =A.plotdata(var,'z_01')
#if (model_mld > 150):
#model_mld = np.nan
if (~np.isnan(model_dcm).all() == True) or (~np.isnan(ref_dcm).all() == True):
axes[3].set_xticklabels([])
axes[4].plot(times, ref_dcm,'r',label='DCM REF')
axes[4].plot(times,model_dcm,'b',label='DCM MOD')
axes[4].plot(times, ref_mld,'--r',label='MLB REF')
axes[4].plot(times,model_mld,'--b',label='MLB MOD')
#axes[4].plot(times,np.ones_like(times)* np.nanmean(ref_dcm),'r',linewidth=3)
#axes[4].plot(times,np.ones_like(times)* np.nanmean(model_dcm),'b',linewidth=3) #marker='.')
axes[4].invert_yaxis()
axes[4].set_ylabel('DCM/MLB $[m]$',fontsize=15)
#axes[4].xaxis_date()
#axes[4].xaxis.set_major_locator(mdates.MonthLocator())
axes[4].xaxis.set_major_formatter(mdates.DateFormatter("%d%b%y"))
xlabels = axes[4].get_xticklabels()
plt.setp(xlabels, rotation=30)
else:
axes[3].xaxis.set_major_formatter(mdates.DateFormatter("%d%b%y"))
xlabels = axes[3].get_xticklabels()
plt.setp(xlabels, rotation=30)
# if (~np.isnan(model_mld).all() == True) or (~np.isnan(ref_mld).all() == True):
# axes_4b = axes[4].twinx()
# axes_4b.plot(times, ref_mld,'--r',label='REF')
# axes_4b.plot(times,model_mld,'--b',label='MOD')
# axes_4b.invert_yaxis()
# axes_4b.set_ylabel('MLD $[m]$ - -',fontsize=15)
legend = axes[4].legend(loc='lower left', shadow=True, fontsize=12)
if (var == "N3n"):
model_nit, ref_nit =A.plotdata(var,'Nit_1')
if (~np.isnan(ref_nit).all() == True) or (~np.isnan(model_nit).all() == True):
axes[4].plot(times, ref_nit,'r',label='REF')
axes[4].plot(times,model_nit,'b',label='MOD')
axes[4].invert_yaxis()
axes[4].set_ylabel('NITRICL $[m]$',fontsize=15)
else:
axes[4].plot(times, np.ones_like(times))
axes[4].xaxis.set_major_formatter(mdates.DateFormatter("%d-%m-%Y"))
xlabels = axes[4].get_xticklabels()
plt.setp(xlabels, rotation=30)
legend = axes[4].legend(loc='upper left', shadow=True, fontsize=12)
if (var == "O2o"):
axes[4].plot(times, np.ones_like(times))
axes[4].xaxis.set_major_formatter(mdates.DateFormatter("%d-%m-%Y"))
xlabels = axes[4].get_xticklabels()
plt.setp(xlabels, rotation=30)
fig.savefig(OUTFILE)
plt.close(fig)
# import sys
# sys.exit()
|
# Imports
from flask import Flask, render_template, redirect, url_for
from flask_pymongo import PyMongo
import pymongo
from flask import Flask, jsonify
import io
import json
from bson import ObjectId
from flask.json import JSONEncoder
app = Flask(__name__)
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
mydb = myclient["sp500"]
mycol = mydb["stock"]
@app.route("/")
def test():
# mycol.update({}, {'$unset': {'dividendYield':"NaN"}}, multi=True)
test = mycol.find()
print(test)
inventory = list(test)
# inventory.updateMany({}, { $unset : { description : 1} })
# inventory.updateMany({}, { unset : { dividendYield : 1} })
# db.collection.updateMany(
# <filter>,
# <update>,
# {
# upsert: <boolean>,
# writeConcern: <document>,
# collation: <document>,
# arrayFilters: [ <filterdocument1>, ... ],
# hint: <document|string> // Available starting in MongoDB 4.2.1
# }
# )
# print("====================")
# print("====================")
# print("====================")
# print("====================")
# print("====================")
# # print(inventory)
# print("====================")
class JSONEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, ObjectId):
return str(o)
return json.JSONEncoder.default(self, o)
# print("====================")
inventory = JSONEncoder().encode(inventory)
# print(inventory)
# print(type(inventory))
# print("====================")
x = json.loads(inventory)
# print(x)
# print(type(x))
# print(x[0])
# print(type(x[0]))
with open('stock.json', 'w') as json_file:
json.dump(x, json_file)
return jsonify(x)
def test_connection():
with app.app_context():
#test code
test()
print("----------------")
print("Successful!")
print("----------------")
if __name__ == "__main__":
app.run(debug=True)
test_connection()
|
import sys
import pdb
import random
import numpy as np
import perm2
import unittest
from wreath import wreath_yor, get_mat, WreathCycSn, cyclic_irreps, wreath_rep
from utils import load_irrep
from coset_utils import young_subgroup_perm, coset_reps
from cube_irrep import Cube2Irrep
sys.path.append('./cube')
from str_cube import *
class TestWreath(unittest.TestCase):
def test_wreath(self):
alpha = (0, 3, 2)
_parts = ((), (2,1), (1,1))
ydict = wreath_yor(alpha, _parts)
ks = list(ydict.keys())
vals = list(ydict.values())
size = len(ydict) * len(vals)
g = random.choice(ks)
h = random.choice(ks)
gh = perm2.Perm2.from_tup(g) * perm2.Perm2.from_tup(h)
gh_mat = np.matmul(get_mat(g, ydict), get_mat(h, ydict))
self.assertTrue(np.allclose(gh_mat, get_mat(gh, ydict)))
eye = perm2.Perm2.eye(len(g))
eye_g_mat = np.matmul(get_mat(eye, ydict), get_mat(g, ydict))
eye_mat = get_mat(eye * perm2.Perm2.from_tup(g), ydict)
self.assertTrue(np.allclose(eye_mat, eye_g_mat))
def test_wreath_full(self):
o1, p1 = get_wreath('YYRMRMWWRWRYWMYMGGGGBBBB') # 14
o2, p2 = get_wreath('YYBWGYRWMRBWMRMGYBRBGGMW') # 3
o3, p3 = get_wreath('GGBWMGBBMRYGMRYRYBYRWWWM') # 4
w1 = WreathCycSn.from_tup(o1, p1, order=3)
w2 = WreathCycSn.from_tup(o2, p2, order=3)
w3 = WreathCycSn.from_tup(o3, p3, order=3)
prod12 = w1 * w2
prod13 = w1 * w3
o12 = prod12.cyc.cyc
o13 = prod13.cyc.cyc
perm12 = prod12.perm.tup_rep
perm13 = prod13.perm.tup_rep
# load some pickle
alpha = (2, 3, 3)
parts = ((1,1), (1,1,1), (2,1))
cos_reps = coset_reps(perm2.sn(8), young_subgroup_perm(alpha))
cyc_irrep_func = cyclic_irreps(alpha)
start = time.time()
print('Loading {} | {}'.format(alpha, parts))
yor_dict = load_irrep('/local/hopan/cube/', alpha, parts)
if yor_dict is None:
exit()
print('Done loading | {:.2f}s'.format(time.time() - start))
wreath1 = wreath_rep(o1, p1, yor_dict, cos_reps, cyc_irrep_func, alpha)
wreath2 = wreath_rep(o2, p2, yor_dict, cos_reps, cyc_irrep_func, alpha)
wreath3 = wreath_rep(o3, p3, yor_dict, cos_reps, cyc_irrep_func, alpha)
w12 = np.matmul(wreath1, wreath2)
w13 = np.matmul(wreath1, wreath3)
wd12 = wreath_rep(o12, perm12, yor_dict, cos_reps, cyc_irrep_func, alpha)
wd13 = wreath_rep(o13, perm13, yor_dict, cos_reps, cyc_irrep_func, alpha)
self.assertTrue(np.allclose(w12, wd12))
self.assertTrue(np.allclose(w13, wd13))
def test(self):
alpha = (1, 2, 5)
parts = ((1,), (1, 1), (3, 2))
loc = '/local/hopan/cube/pickles/{}/{}.pkl'.format(str(alpha), str(parts))
cirrep = Cube2Irrep(alpha, parts, numpy=True)
otup = (0,) * 8
ptup = tuple(i for i in range(1, len(otup)+1))
grep = cirrep.tup_to_irrep_np(otup, ptup)
self.assertTrue(np.allclose(grep, np.eye(*grep.shape)))
'''
def test_cube_wreath(self):
c = init_2cube()
for f in ['r', 'l', 'f', 'b', 'u', 'd']:
cube_str = rotate(c, f)
o1, p1 = get_wreath(cube_str)
o2, pinv = get_wreath(rotate(c, 'i' + f))
c1 = CyclicGroup(o1, 3)
c2 = CyclicGroup(o2, 3)
p1 = perm2.Perm2.from_tup(p1)
p2 = perm2.Perm2.from_tup(pinv)
w = WreathCycSn(c1, p1)
winv = WreathCycSn(c2, p2)
prod = w * winv
print('Face: {} | prod should be identity wreath: {}'.format(f, prod))
print('===============')
'''
if __name__ == '__main__':
unittest.main()
|
from tensorboardX import SummaryWriter
import os
from Experiment import *
Pendulum = ExperimentClass('Pendulum-v0')
# os.chdir("debug")
os.chdir("trainModel_runs")
# actor_neuron_parameters = [25,35,45]
# critic_neuron_parameters = [4,5,6]
# min_reward = -100
# for actor_neuron in actor_neuron_parameters:
# for critic_neuron in critic_neuron_parameters:
# w = SummaryWriter()
# model = Lunar.trainModel(actor_neuron,critic_neuron,w)
# average_reward,std = Lunar.averageModelRuns(model,w)
# w.close()
# print("Actor Hidden Units: {} Critic Hidden Units: {}".format(actor_neuron,critic_neuron))
# print("Mean rewards: {}, Standard Deviation: {}".format(average_reward,std))
# if average_reward > min_reward:
# best_model = model
# min_reward = average_reward
w = SummaryWriter()
target_actor_model = Pendulum.trainModel(36,6,w)
mean, std = Pendulum.averageModelRuns(target_actor_model,w)
w.close()
|
"""Callback registry"""
from jwst.lib.signal_slot import Signal
__all__ = ['CallbackRegistry']
class CallbackRegistry():
"""Callback registry"""
def __init__(self):
self.registry = dict()
def add(self, event, callback):
"""Add a callback to an event"""
try:
signal = self.registry[event]
except KeyError:
signal = Signal()
signal.connect(callback)
self.registry[event] = signal
def reduce(self, event, *args):
"""Perform a reduction on the event args
Parameters
----------
args : [arg[,...]]
The args to filter
Returns
-------
The reduced results.
If no results can be determined,
such as if no callbacks were registered,
`None` is returned.
Notes
-----
Each function is given the results of the previous
function. As such, if the data has more than one
object, the return of each function should be a tuple that can
then be passed as arguments to the next function.
There is no guarantee on order which the registered
callbacks are made. Currently, the callbacks are in a list.
Hence, the callbacks will be called in the order registered.
"""
result = self.registry[event].reduce(*args)
return result
def add_decorator(self, event):
"""Add callbacks by decoration
Parameters
----------
event : str
The name of event to attach the object to.
"""
def decorator(func):
self.add(event, func)
return func
return decorator
__call__ = add_decorator
|
################################################
#Try to blend the image
# Make image as background and draw lines on it
#
#################################################
from PIL import Image
import os
def image_blend(path):
images = []
for filename in os.listdir(path):
if 'png' in filename or 'jpg' in filename or 'jpeg' in filename:
images.append(filename)
for image in images:
im1 = Image.open(os.path.join(path, image))
im2 = im1.convert('RGBA')
im3 = Image.new('RGBA', im1.size, (255, 255, 255))
alpha = 0.7
out = Image.blend(im2, im3, alpha)
index = image.find('.')
newfilename = image[:index]+'_blend'+'.png'
out.save(os.path.join(path, newfilename), "png")
def main():
image_blend('/Users/yanchunyang/Documents/highschools/scripts/snow/')
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
1、执行SQL
复制代码
# !/usr/bin/env pytho
# -*- coding:utf-8 -*-
import pymysql
# 创建连接
conn = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd='', db='tkq1', charset='utf8')
# 创建游标
cursor = conn.cursor()
# 执行SQL,并返回收影响行数
effect_row = cursor.execute("select * from tb7")
# 执行SQL,并返回受影响行数
# effect_row = cursor.execute("update tb7 set pass = '123' where nid = %s", (11,))
# 执行SQL,并返回受影响行数,执行多次
# effect_row = cursor.executemany("insert into tb7(user,pass,licnese)values(%s,%s,%s)", [("u1","u1pass","11111"),("u2","u2pass","22222")])
# 提交,不然无法保存新建或者修改的数据
conn.commit()
# 关闭游标
cursor.close()
# 关闭连接
conn.close()
复制代码
注意:存在中文的时候,连接需要添加charset = 'utf8',否则中文显示乱码。
2、获取查询数据
复制代码
# ! /usr/bin/env python
# -*- coding:utf-8 -*-
# __author__ = "TKQ"
import pymysql
conn = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd='', db='tkq1')
cursor = conn.cursor()
cursor.execute("select * from tb7")
# 获取剩余结果的第一行数据
row_1 = cursor.fetchone()
print
row_1
# 获取剩余结果前n行数据
# row_2 = cursor.fetchmany(3)
# 获取剩余结果所有数据
# row_3 = cursor.fetchall()
conn.commit()
cursor.close()
conn.close()
复制代码
3、获取新创建数据自增ID
可以获取到最新自增的ID,也就是最后插入的一条数据ID
复制代码
# ! /usr/bin/env python
# -*- coding:utf-8 -*-
# __author__ = "TKQ"
import pymysql
conn = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd='', db='tkq1')
cursor = conn.cursor()
effect_row = cursor.executemany("insert into tb7(user,pass,licnese)values(%s,%s,%s)",
[("u3", "u3pass", "11113"), ("u4", "u4pass", "22224")])
conn.commit()
cursor.close()
conn.close()
# 获取自增id
new_id = cursor.lastrowid
print
new_id
复制代码
4、移动游标
操作都是靠游标,那对游标的控制也是必须的
注:在fetch数据时按照顺序进行,可以使用cursor.scroll(num, mode)
来移动游标位置,如:
cursor.scroll(1, mode='relative') # 相对当前位置移动
cursor.scroll(2, mode='absolute') # 相对绝对位置移动
5、fetch数据类型
关于默认获取的数据是元祖类型,如果想要或者字典类型的数据,即:
复制代码
# ! /usr/bin/env python
# -*- coding:utf-8 -*-
# __author__ = "TKQ"
import pymysql
conn = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd='', db='tkq1')
# 游标设置为字典类型
cursor = conn.cursor(cursor=pymysql.cursors.DictCursor)
cursor.execute("select * from tb7")
row_1 = cursor.fetchone()
print
row_1 # {u'licnese': 213, u'user': '123', u'nid': 10, u'pass': '213'}
conn.commit()
cursor.close()
conn.close()
复制代码
6、调用存储过程
a、调用无参存储过程
复制代码
# ! /usr/bin/env python
# -*- coding:utf-8 -*-
# __author__ = "TKQ"
import pymysql
conn = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd='', db='tkq1')
# 游标设置为字典类型
cursor = conn.cursor(cursor=pymysql.cursors.DictCursor)
# 无参数存储过程
cursor.callproc('p2') # 等价于cursor.execute("call p2()")
row_1 = cursor.fetchone()
print
row_1
conn.commit()
cursor.close()
conn.close()
复制代码
b、调用有参存储过程
复制代码
# ! /usr/bin/env python
# -*- coding:utf-8 -*-
# __author__ = "TKQ"
import pymysql
conn = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd='', db='tkq1')
cursor = conn.cursor(cursor=pymysql.cursors.DictCursor)
cursor.callproc('p1', args=(1, 22, 3, 4))
# 获取执行完存储的参数,参数@开头
cursor.execute("select @p1,@_p1_1,@_p1_2,@_p1_3") # {u'@_p1_1': 22, u'@p1': None, u'@_p1_2': 103, u'@_p1_3': 24}
row_1 = cursor.fetchone()
print
row_1
conn.commit()
cursor.close()
conn.close()
复制代码
三、关于pymysql防注入
1、字符串拼接查询,造成注入
正常查询语句:
复制代码
# ! /usr/bin/env python
# -*- coding:utf-8 -*-
# __author__ = "TKQ"
import pymysql
conn = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd='', db='tkq1')
cursor = conn.cursor()
user = "u1"
passwd = "u1pass"
# 正常构造语句的情况
sql = "select user,pass from tb7 where user='%s' and pass='%s'" % (user, passwd)
# sql=select user,pass from tb7 where user='u1' and pass='u1pass'
row_count = cursor.execute(sql)
row_1 = cursor.fetchone()
print
row_count, row_1
conn.commit()
cursor.close()
conn.close()
复制代码
构造注入语句:
复制代码
# ! /usr/bin/env python
# -*- coding:utf-8 -*-
# __author__ = "TKQ"
import pymysql
conn = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd='', db='tkq1')
cursor = conn.cursor()
user = "u1' or '1'-- "
passwd = "u1pass"
sql = "select user,pass from tb7 where user='%s' and pass='%s'" % (user, passwd)
# 拼接语句被构造成下面这样,永真条件,此时就注入成功了。因此要避免这种情况需使用pymysql提供的参数化查询。
# select user,pass from tb7 where user='u1' or '1'-- ' and pass='u1pass'
row_count = cursor.execute(sql)
row_1 = cursor.fetchone()
print
row_count, row_1
conn.commit()
cursor.close()
conn.close()
复制代码
2、避免注入,使用pymysql提供的参数化语句
正常参数化查询
复制代码
# ! /usr/bin/env python
# -*- coding:utf-8 -*-
# __author__ = "TKQ"
import pymysql
conn = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd='', db='tkq1')
cursor = conn.cursor()
user = "u1"
passwd = "u1pass"
# 执行参数化查询
row_count = cursor.execute("select user,pass from tb7 where user=%s and pass=%s", (user, passwd))
row_1 = cursor.fetchone()
print
row_count, row_1
conn.commit()
cursor.close()
conn.close()
复制代码
构造注入,参数化查询注入失败。
复制代码
# ! /usr/bin/env python
# -*- coding:utf-8 -*-
# __author__ = "TKQ"
import pymysql
conn = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd='', db='tkq1')
cursor = conn.cursor()
user = "u1' or '1'-- "
passwd = "u1pass"
# 执行参数化查询
row_count = cursor.execute("select user,pass from tb7 where user=%s and pass=%s", (user, passwd))
# 内部执行参数化生成的SQL语句,对特殊字符进行了加\转义,避免注入语句生成。
# sql=cursor.mogrify("select user,pass from tb7 where user=%s and pass=%s",(user,passwd))
# print sql
# select user,pass from tb7 where user='u1\' or \'1\'-- ' and pass='u1pass'被转义的语句。
row_1 = cursor.fetchone()
print
row_count, row_1
conn.commit()
cursor.close()
conn.close()
复制代码
结论:excute执行SQL语句的时候,必须使用参数化的方式,否则必然产生SQL注入漏洞。
3、使用存mysql储过程动态执行SQL防注入
使用MYSQL存储过程自动提供防注入,动态传入SQL到存储过程执行语句。
复制代码
delimiter \ \
DROP
PROCEDURE
IF
EXISTS
proc_sql \ \
CREATE
PROCEDURE
proc_sql(
in nid1
INT,
in nid2
INT,
in callsql
VARCHAR(255)
)
BEGIN
set @ nid1 = nid1;
set @ nid2 = nid2;
set @ callsql = callsql;
PREPARE
myprod
FROM @ callsql;
-- PREPARE
prod
FROM
'select * from tb2 where nid>? and nid<?';
传入的值为字符串,?为占位符
-- 用 @ p1,和 @ p2填充占位符
EXECUTE
myprod
USING @ nid1,
@nid2
;
DEALLOCATE
prepare
myprod;
END\ \
delimiter;
复制代码
set @ nid1 = 12;
set @ nid2 = 15;
set @ callsql = 'select * from tb7 where nid>? and nid<?';
CALL
proc_sql( @ nid1,
@nid2
,
@callsql
)
pymsql中调用
复制代码
# ! /usr/bin/env python
# -*- coding:utf-8 -*-
# __author__ = "TKQ"
import pymysql
conn = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd='', db='tkq1')
cursor = conn.cursor()
mysql = "select * from tb7 where nid>? and nid<?"
cursor.callproc('proc_sql', args=(11, 15, mysql))
rows = cursor.fetchall()
print
rows # ((12, 'u1', 'u1pass', 11111), (13, 'u2', 'u2pass', 22222), (14, 'u3', 'u3pass', 11113))
conn.commit()
cursor.close()
conn.close()
复制代码
四、使用with简化连接过程
每次都连接关闭很麻烦,使用上下文管理,简化连接过程
复制代码
# ! /usr/bin/env python
# -*- coding:utf-8 -*-
# __author__ = "TKQ"
import pymysql
import contextlib
# 定义上下文管理器,连接后自动关闭连接
@contextlib.contextmanager
def mysql(host='127.0.0.1', port=3306, user='root', passwd='', db='tkq1', charset='utf8'):
conn = pymysql.connect(host=host, port=port, user=user, passwd=passwd, db=db, charset=charset)
cursor = conn.cursor(cursor=pymysql.cursors.DictCursor)
try:
yield cursor
finally:
conn.commit()
cursor.close()
conn.close()
# 执行sql
with mysql() as cursor:
print(cursor)
row_count = cursor.execute("select * from tb7")
row_1 = cursor.fetchone()
print
row_count, row_1 |
__author__ = 'Jakub Wojtanek. Kwojtanek@gmail.com'
import re
def L2():
xt = open('l2.txt', 'r')
xt = xt.read()
print ''.join(re.findall("[A-Za-z]", xt))
l2() |
import requests
import json
from pathlib import Path
# Gets your currentt ipv4 / ipv6 address
class _ipify():
apiAddress = "https://api64.ipify.org"
def __init__(self, ca=None, requestTimeout=15):
self.requestTimeout = requestTimeout
if ca != None:
if type(ca) is str:
self.ca = str(Path(ca))
elif type(ca) is bool:
self.ca = ca
else:
self.ca = None
def apiCall(self,endpoint,methord="GET",data=None):
kwargs={}
kwargs["timeout"] = self.requestTimeout
if self.ca != None:
kwargs["verify"] = self.ca
try:
url = "{0}/{1}".format(self.apiAddress,endpoint)
if methord == "GET":
response = requests.get(url, **kwargs)
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError) as e:
return 0, "Connection Timeout - {0}".format(e)
return response.status_code, json.loads(response.text)
def getMyIPAddress(self):
statusCode, response = self.apiCall("?format=json")
if statusCode == 200:
return response["ip"]
return None
# Gets IP Geo Infomation
class _geoipify():
apiAddress = "https://geo.ipify.org/api/v1"
def __init__(self, apiToken, ca=None, requestTimeout=15):
self.apiToken = apiToken
self.requestTimeout = requestTimeout
if ca != None:
if type(ca) is str:
self.ca = str(Path(ca))
elif type(ca) is bool:
self.ca = ca
else:
self.ca = None
def apiCall(self,endpoint,methord="GET",data=None):
kwargs={}
kwargs["timeout"] = self.requestTimeout
if self.ca != None:
kwargs["verify"] = self.ca
try:
url = "{0}?apiKey={1}&{2}".format(self.apiAddress,self.apiToken,endpoint)
if methord == "GET":
response = requests.get(url, **kwargs)
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError) as e:
return 0, "Connection Timeout - {0}".format(e)
return response.status_code, json.loads(response.text)
def geoIPLookup(self,ip):
statusCode, response = self.apiCall("ipAddress={0}".format(ip))
return statusCode, response
# Detects proxy, vpn / tor addresses from ip
class _proxyipify():
apiAddress = "https://vpn-proxy-detection.ipify.org/api/v1"
def __init__(self, apiToken, ca=None, requestTimeout=15):
self.apiToken = apiToken
self.requestTimeout = requestTimeout
if ca != None:
if type(ca) is str:
self.ca = str(Path(ca))
elif type(ca) is bool:
self.ca = ca
else:
self.ca = None
def apiCall(self,endpoint,methord="GET",data=None):
kwargs={}
kwargs["timeout"] = self.requestTimeout
if self.ca != None:
kwargs["verify"] = self.ca
try:
url = "{0}?apiKey={1}&{2}".format(self.apiAddress,self.apiToken,endpoint)
if methord == "GET":
response = requests.get(url, **kwargs)
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError) as e:
return 0, "Connection Timeout - {0}".format(e)
return response.status_code, json.loads(response.text)
def proxyDetect(self,ip):
statusCode, response = self.apiCall("ipAddress={0}".format(ip))
return statusCode, response
|
"""Program that outputs one of at least four random, good fortunes."""
__author__ = "730249177"
from random import randint
print("Your fortune cookie says...")
choice = randint(1, 100)
if choice < 50:
print("An old love will come back to you in the coming days.")
else:
if choice <= 30:
print("The specific thing that you have wished for will come true.")
else:
if choice <= 10:
print("Life is about to be better than it has ever been for you.")
else:
print("A fresh start will put you on your way.")
print("Now, go spread positive vibes!") |
import numpy
import labrad
cxn = labrad.connect()
dv = cxn.data_vault
import matplotlib
from matplotlib import pyplot
totalTraces = 200
#title = '2012 June 21 delay time 5 ions' ; datasets = ['2012Jun21_{0:=04}_{1:=02}'.format(x/100, x % 100) for x in [192851,193305,193518,193826, 194101,194327,194601,194925,200802, 201806]] #heating time'
#title = '2012 June 21 delay time 10 ions' ; datasets = ['2012Jun21_{0:=04}_{1:=02}'.format(x/100, x % 100) for x in [204347,204755, 205035, 205356, 205735, 210012, 210252, 210519, 210742, 211338]]
#title = '2012 June 21 delay time 15 ions' ; datasets = ['2012Jun21_{0:=04}_{1:=02}'.format(x/100, x % 100) for x in [213349, 213806, 214046, 214446, 214913, 215238, 215610, 215848, 223117, 223533, 223852, 224508]]
#title = '2012 June 21 delay time 19 ions' ; datasets = ['2012Jun21_{0:=04}_{1:=02}'.format(x/100, x % 100) for x in [231600, 232030, 232357, 232707, 233111, 233359, 234121]]
title = '2012 June 21 delay time 2 ions'; datasets = ['2012Jun22_{0:=04}_{1:=02}'.format(x/100, x % 100) for x in [1335, 1743, 2050, 2314, 3047, 2550, 3417, 3946, 2824, 5047]]
def arangeByParameter(datasets, parameter):
parList = []
for dataset in datasets:
dv.cd(['','Experiments','LatentHeat_no729_autocrystal',dataset])
dv.open(1)
par = float(dv.get_parameter(parameter))
parList.append(par)
together = zip(datasets, parList)
s = sorted(together, key = lambda x: x[1])
s = list(zip(*s)[0])
return s
datasets = arangeByParameter(datasets, 'readout_delay')
refSigs = []
detectedCounts = [] #list of counts detected during readout
figure = pyplot.figure()
figure.clf()
pyplot.suptitle('Histogram of readouts')
colormap = pyplot.cm.gist_ncar
ax = pyplot.axes()
ax.set_color_cycle([colormap(i) for i in numpy.linspace(0, 0.9, len(datasets))])
for datasetName in datasets:
print 'Getting timetags...{}'.format(datasetName)
dv.cd(['','Experiments','LatentHeat_no729_autocrystal',datasetName])
datasetCounts = []
dv.open(1)
initial_cooling = dv.get_parameter('initial_cooling')
heat_delay = dv.get_parameter('heat_delay')
axial_heat = dv.get_parameter('axial_heat')
readout_delay = dv.get_parameter('readout_delay')
readout_time = dv.get_parameter('readout_time')
xtal_record = dv.get_parameter('xtal_record')
#
# readout range
heatStart = (initial_cooling + heat_delay ) # / 10.0**6 #in seconds
heatEnd = (initial_cooling + heat_delay +axial_heat )
startReadout = (axial_heat + initial_cooling + heat_delay + axial_heat + readout_delay )
stopReadout = startReadout + readout_time
print datasetName, heatStart, heatEnd, startReadout, stopReadout
print 'Heating time :', heatEnd - heatStart
print 'Delay time :', readout_delay
dv.cd(['','Experiments','LatentHeat_no729_autocrystal',datasetName,'timetags'])
for dataset in range(1,totalTraces+1):
dv.open(int(dataset))
timetags = dv.get().asarray[:,0]
countsReadout = numpy.count_nonzero((startReadout <= timetags) * (timetags <= stopReadout))
countsReadout = countsReadout / float(readout_time) #now in counts/secz
datasetCounts.append(countsReadout)
detectedCounts.append(countsReadout)
pyplot.hist(datasetCounts, 60, histtype = 'step', label = 'heating = {0} ms, delay = {1} ms'.format(1000 * axial_heat, 1000 * readout_delay))
print 'Done'
pyplot.hist(detectedCounts, 60, histtype = 'step', label = 'Total', color = 'black')
pyplot.xlabel('Counts / Sec')
pyplot.title(title)
pyplot.legend()
pyplot.show() |
# -*- coding: utf-8 -*-
"""
A file with helper functions to create and Excel file with the associated
analysis previously performed.
Created on Fri Jul 12 12:03:48 2019Created on Mon Jul 15 10:01:37 2019
@author: sdtaylor
"""
# imports
import pandas as pd
import xlsxwriter
import itertools
import os
from datetime import datetime
from django.conf import settings
def write_xlsx(combined_api_data):
"""
A function to write the data to an Excel file, by sheet.
Parameters
----------
combined_api_data : dict
Data as a dictionary, where the keys are the tab names and the values are DataFrames of which will
be added as the data in each tab.
"""
api_data_df = combined_api_data
"""
ADJUST HEADING NAME FOR EXPORT TO EXCEL
"""
# create Excel file on Desktop
#new_file_path = os.path.join(os.path.join(os.environ['USERPROFILE']), 'Desktop', "stock_data.xlsx")
# create Excel file in media folder (strip first slash off)
new_file_path = settings.MEDIA_URL[1:] + 'stock_data.xlsx'
writer = pd.ExcelWriter(new_file_path, engine='xlsxwriter', datetime_format='dd/mm/yy')
# create list of sheet names
sheets_xlsx = []
num_rows_by_sheet = []
for key, value in api_data_df.items():
value.to_excel(writer, sheet_name=key)
sheets_xlsx.append(key)
num_rows_by_sheet.append(len(value))
# format Excel file
format_xlsx(writer, sheets_xlsx, num_rows_by_sheet)
writer.save()
def format_xlsx(writer, sheets_xlsx, num_rows):
"""
A function to format the Excel document.
"""
# get xlsxwriter workbook object for formatting
workbook = writer.book
# get the xlsx worksheet objects for formatting
worksheets = []
for i in sheets_xlsx:
worksheets.append(writer.sheets[i])
# create formats
bolded_text = workbook.add_format({'bold': True})
whole_nums = workbook.add_format({'num_format': '#,##0.00'})
currencies = workbook.add_format({'num_format': '$#,##0.00'})
percentages = workbook.add_format({'num_format': '0.00%'})
fx = workbook.add_format({'num_format': '#0.0000000'})
# format worksheets
for i in worksheets:
i.set_column('B:B', 30, bolded_text)
i.set_column('D:D', 9, whole_nums)
i.set_column('E:E', 15, currencies)
i.set_column('F:F', 15, currencies)
i.set_column('G:G', 15, currencies)
i.set_column('H:H', 15, currencies)
i.set_column('I:I', 15, percentages)
i.set_column('J:J', 20, currencies)
i.set_column('L:L', 12, currencies)
i.set_column('M:M', 12, currencies)
i.set_column('N:N', 12, percentages)
i.set_column('O:O', 12)
i.set_column('P:P', 12, currencies)
i.set_column('Q:Q', 12, currencies)
i.set_column('R:R', 15)
i.set_column('S:S', 15)
i.set_column('T:T', 25)
i.set_column('U:U', 25)
i.set_column('V:V', 25)
i.set_column('W:W', 15, currencies)
i.set_column('X:X', 15, currencies)
i.set_column('Y:Y', 15)
i.set_column('Z:Z', 15)
i.set_column('AA:AA', 15)
i.set_column('AB:AB', 18, percentages)
i.set_column('AC:AC', 18)
i.set_column('AD:AD', 18)
i.set_column('AE:AE', 18)
i.set_column('AF:AF', 18, fx)
i.set_column('AG:AG', 18, fx)
# conditonally format worksheets
counter = 0
for i in worksheets:
# colour code gain/loss
i.conditional_format('G2:G' + str(1 + num_rows[counter]), {'type': '3_color_scale'})
# add icon to 24hr change
i.conditional_format('H2:H' + str(1 + num_rows[counter]), {'type': 'icon_set',
'icon_style': '3_arrows',
'icons': [{'criteria': '>=', 'type': 'number', 'value': 0.00001},
{'criteria': '>=', 'type': 'number', 'value': -0.00001}]})
i.conditional_format('I2:I' + str(1 + num_rows[counter]), {'type': 'icon_set',
'icon_style': '3_arrows',
'icons': [{'criteria': '>=', 'type': 'number', 'value': 0.00001},
{'criteria': '>=', 'type': 'number', 'value': -0.00001}]})
# format Excel
print("Formatting Excel ...")
def add_charts():
"""
A function to add charts to the Excel document.
"""
# add charts to Excel
print("Adding charts to Excel ...")
|
# 그래프 인접리스트
ajd_list = [
[2, 1], [3, 0], [3, 0], [9, 8, 2, 1],
[5], [7, 6, 4], [7, 5], [6, 5], [3], [3]
]
N = len(ajd_list)
# 저점 방문 여부 확인 용
visited = [False] * N
def dfs(v):
visited[v] = True
print(v, ' ', end='')
for w in ajd_list[v]:
if not visited[w]:
# 정점) w에 인접한 정점으로 dfs()재귀호출
dfs(w)
if __name__ == '__main__':
print('DFS 방문순서')
for i in range(N):
if not visited[i]:
dfs(i)
|
import cwiid
import time
from phue import Bridge
import requests
#Check config for later use
f = open("./config.py")
lines = f.readlines()
f.close()
#Get Hue bridge ip from the site below
r = requests.get('https://www.meethue.com/api/nupnp')
x = r.json()
for item in x:
bridge_ip = dict(item)['internalipaddress']
print 'Hue Bridge IP: ' + bridge_ip
b = Bridge(bridge_ip)
wiimote_connected = False
#Attempt to connect wiimote until successful
while wiimote_connected == False:
print "press 1 + 2 now"
try:
# attempt to connect wii remote
wm = cwiid.Wiimote()
print "wiimote found"
# set buttons to report when pressed
wm.rpt_mode = cwiid.RPT_BTN
wm.led = 1
wiimote_connected = True
except (RuntimeError, NameError):
print "failed to find wiimote, retrying"
#dictionary for program use
dict = {'start' : 0, 'end' : 0, 'room1' : [], 'room2' : [], 'bright' : 0, 'group_state' : True, 'room_name': '', 'timer' : 0, 'repeat_cycle' : True}
#START DEFINING ROOMS from config
linenum = 0
for txt_line in lines:
try:
txt = eval(lines[linenum])
if txt == 'START':
dict['start'] = linenum
if txt =='END':
dict['end'] = linenum
except SyntaxError:
pass
linenum += 1
eval_line_txt = dict['start']
room_num = 0
while eval_line_txt != dict['end'] - 1:
eval_line_txt += 1
evaled_lines = eval(lines[eval_line_txt])
room_num += 1
room_num_txt = 'room' + str(room_num)
dict[room_num_txt] = evaled_lines[1]
b.create_group('room1', dict['room1'])
b.create_group('room2', dict['room2'])
#END DEFINING ROOMS
def check_bat(wm):
#ensure battery is not to low
battery_stat = wm.state['battery']
if battery_stat <= 10:
print 'remote disconnected due to low battery'
wm.close()
dict['timer'] = 0
dict['repeat_cycle'] = False
def rumble(wm):
#make wiimote rumble
wm.rumble = True
time.sleep(.1)
wm.rumble = False
def change_lights(wm):
#change state of hue light based upon the light thats on on the wii remote
try:
light_number = wm.state['led']
led_state = b.get_light(light_number, 'on')
if led_state == True:
b.set_light(light_number, 'on', False)
dict['bright'] = 0
elif led_state == False:
b.set_light(light_number, 'on', True)
dict['bright'] = 254
except TypeError:
#if no lights with a set number are found this is pulled
print 'no attached light'
rumble(wm)
rumble(wm)
def led_increase(wm):
#change wii remote leds
led_state = wm.state['led']
if led_state >= 16:
wm.led = 0
else:
wm.led = led_state + 1
def check_leds(wm):
#check that the leds are not above the ones that are on the wiiremote
if wm.state['led'] >= 16:
wm.led = 15
if wm.state['led'] <= 1:
wm.led = 1
def checkset_bright(wm):
#change hue light brightness
if dict['bright'] >= 250:
dict['bright'] = 254
if dict['bright'] <= 25:
dict['bright'] = 0
light_number = wm.state['led']
b.set_light(light_number, 'bri', dict['bright'])
def change_group_light():
#change hue lights that are assigned to a room
light_set = dict[dict['room_name']]
#light_set = dict[indict]
if dict['group_state'] == True:
b.set_light(light_set, 'on', False)
dict['group_state'] = False
elif dict['group_state'] == False:
b.set_light(light_set, 'on', True)
dict['group_state'] = True
def check_light_state(wm):
#check state of the hue lights and then give feedback on wiimote, flash twice for on and once for off
led_num = wm.state['led']
led_state = b.get_light(led_num, 'on')
if led_state == True:
wm.led = 15
time.sleep(1)
wm.led = 0
time.sleep(1)
wm.led = 15
time.sleep(1)
wm.led = 0
time.sleep(1)
wm.led = 1
if led_state == False:
wm.led = 15
time.sleep(1)
wm.led = 0
time.sleep(1)
wm.led = 1
wm.led = led_num
def read_btns(wm):
#read wii remote buttons
#UP
if (wm.state['buttons'] & cwiid.BTN_RIGHT):
wm.led = wm.state['led'] + 1
rumble(wm)
check_leds(wm)
#DOWN
if (wm.state['buttons'] & cwiid.BTN_LEFT):
wm.led = wm.state['led'] - 1
rumble(wm)
check_leds(wm)
#A
if (wm.state['buttons'] & cwiid.BTN_A):
change_lights(wm)
rumble(wm)
#B
if (wm.state['buttons'] & cwiid.BTN_B):
check_light_state(wm)
#LEFT
if (wm.state['buttons'] & cwiid.BTN_DOWN):
dict['bright'] = dict['bright'] - 50
checkset_bright(wm)
rumble(wm)
#RIGHT
if (wm.state['buttons'] & cwiid.BTN_UP):
dict['bright'] = dict['bright'] + 50
checkset_bright(wm)
rumble(wm)
#ONE
if (wm.state['buttons'] & cwiid.BTN_1):
dict['room_name'] = 'room1'
change_group_light()
rumble(wm)
#TWO
if (wm.state['buttons'] & cwiid.BTN_2):
dict['room_name'] = 'room2'
change_group_light()
rumble(wm)
check_bat(wm)
#HOME
if (wm.state['buttons'] & cwiid.BTN_HOME):
print 'remote diconnected manually'
rumble(wm)
wm.close()
dict['timer'] = 0
dict['repeat_cycle'] = False
while True:
#a timer that is used to disconnect the wiimote
if dict['repeat_cycle'] == True:
read_btns(wm)
dict['timer'] += 1
#Thats about an hour of inactivity till it disconnects
if dict['timer'] >= 10000000:
dict['timer'] = 0
wm.close()
print "remote disconnected due to inactivity"
dict['repeat_cycle'] = False
#if the wii remote disconnects this starts and continues to look for a new wiiremote and trys to attach it
if dict['repeat_cycle'] == False:
print 'looking for remote'
try:
# attempt to connect wii remote
wm = cwiid.Wiimote()
print "wiimote found"
time.sleep(2)
# set buttons to report when pressed
wm.rpt_mode = cwiid.RPT_BTN
wm.led = 1
dict['repeat_cycle'] = True
except (RuntimeError, ValueError):
print "failed to find wiimote"
time.sleep(.1)
|
class Home:
def get_cost(self):
return 200000
class Plaster(Home):
def __init__(self , wrapper):
self.wrapper = wrapper
def get_cost(self):
return 10000 + self.wrapper.get_cost()
class Painting(Home):
def __init__(self , wrapper):
self.wrapper = wrapper
def get_cost(self):
return 5000 + self.wrapper.get_cost()
class Marble_Flooring(Home):
def __init__(self , wrapper):
self.wrapper = wrapper
def get_cost(self):
return 50000 + self.wrapper.get_cost()
class AirConditioner(Home):
def __init__(self , wrapper):
self.wrapper = wrapper
def get_cost(self):
return 15000 + self.wrapper.get_cost()
home = Home()
total_cost = Plaster(Painting(Marble_Flooring(AirConditioner(home))))
print(total_cost.get_cost()) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.