text stringlengths 38 1.54M |
|---|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from flask import g
from flask_restful import Api, Resource
from flask_restful import abort, reqparse, fields, marshal_with, marshal
from models import PersonalUser, EnterpriseUser, UserAuthInfo
from models import UserSocialInfo, UserIMConfig, UserPushConfig
from models import VerificationCode
from models import db, auth, basic_auth, token_auth
api = Api()
class StringFormat(object):
@staticmethod
def email(string: str):
# todo: 验证 email 格式
if True:
return string
else:
raise ValueError('{} 未通过 E-mail 校验.'.format(string))
@staticmethod
def phone(string: str):
# todo: 验证 phone 格式
if True:
return string
else:
raise ValueError('{} 未通过 Phone 校验.'.format(string))
@staticmethod
def mobile(string):
# todo: 验证 mobile 格式
if True:
return string
else:
raise ValueError('{} 未通过 Mobile 校验.'.format(string))
class JSONFields(object):
social_fields = {
'nick_name': fields.String,
'experience': fields.Integer,
'uri': fields.Url('api.user_social_info', absolute=False),
}
im_config_fields = {
'uri': fields.Url('api.user_im_config', absolute=False),
'token': fields.String,
'chat_room_limit': fields.String,
'chat_group_limit': fields.String
}
push_config_fields = {
'uri': fields.Url('api.user_push_config', absolute=False),
'not_allow_ad': fields.Integer
}
user_fields = {
'uuid': fields.String,
'type': fields.String(default='personal'),
'uri': fields.Url('api.user')
}
token_fields = {
'id': fields.String(default=''),
'token': fields.String(default=''),
'uri': fields.Url(endpoint='api.token')
}
class User(Resource):
parser = reqparse.RequestParser()
fields = JSONFields.user_fields
status = 200
headers = {}
def __init__(self):
self.fields['social_info'] = fields.Nested(JSONFields.social_fields)
self.fields['im_config'] = fields.Nested(JSONFields.im_config_fields)
self.fields['push_config'] = fields.Nested(JSONFields.push_config_fields)
def get(self, uuid):
user = UserAuthInfo.query.outerjoin(UserSocialInfo, UserAuthInfo.uuid == UserSocialInfo.uuid) \
.filter(UserAuthInfo.uuid == str(uuid)).first_or_404()
self.headers['Etag'] = user.last_modify_tag
return marshal(user, self.fields), self.status, self.headers
class Users(Resource):
parser = reqparse.RequestParser()
fields = JSONFields.user_fields
status = 200
headers = {}
def get(self):
users = UserAuthInfo.query.outerjoin(UserSocialInfo, UserAuthInfo.id == UserSocialInfo.id).all()
return marshal(users, self.fields), self.status, self.headers
def post(self):
self.parser.add_argument('login_name', default='', location='form')
self.parser.add_argument('password', required=True, location='form')
self.parser.add_argument('nick_name', default='', location='form')
self.parser.add_argument('mobile', default='', type=StringFormat.mobile, location='form')
self.parser.add_argument('type', default='personal', choices=('personal', 'enterprise'),
type=str, location='form')
self.parser.add_argument('email', default='', type=StringFormat.email, location='form')
self.parser.add_argument('address', default='', type=str, location='form')
args = self.parser.parse_args()
if ''.join((args['login_name'], args['mobile'], args['email'])) == '':
abort(400, message='{}'.format('login name mobile email 至少有一个合法'))
user_auth = UserAuthInfo(uuid=str(uuid1()), password=args['password'],
login_name=args['login_name'], mobile=args['mobile'],
type=args['type'], email=args['email'], last_modify_tag=str(uuid4()))
if user_auth.is_exist:
abort(400, message="{} 已经存在".format('login_name or mobile or email'))
if args['nick_name'] == '':
if args['login_name'] != '':
args['nick_name'] = args['login_name']
else:
args['nick_name'] = ''.join(('djt', '_', user_auth.id[:8]))
user_social = UserSocialInfo(uuid=user_auth.uuid, type=user_auth.type, nick_name=args['nick_name'])
user_im_config = UserIMConfig(uuid=user_auth.uuid, type=user_auth.type)
user_push_config = UserPushConfig(uuid=user_auth.uuid, type=user_auth.type)
if args['type'] == 'personal':
user = PersonalUser(uuid=user_auth.uuid, address=args['address'])
elif args['type'] == 'enterprise':
user = EnterpriseUser(uuid=user_auth.uuid, enterprise_name=args['enterprise_name'], address=args['address'])
else:
user = None
if user:
user.auth_info = user_auth
user.social_info = user_social
user.im_config = user_im_config
user.push_config = user_push_config
self.status = 201
else:
abort(400)
user.save()
return marshal(user_auth, self.fields), self.status, self.headers
class UsersSocialInfo(Resource):
parser = reqparse.RequestParser()
fields = JSONFields.social_fields
status = 200
headers = {}
def __init__(self):
self.fields['id'] = fields.String
self.fields['type'] = fields.String
def get(self, id):
self.parser.add_argument('If-Match', location='headers')
args = self.parser.parse_args()
user = UserSocialInfo.query.filter(UserSocialInfo.id == str(id)).first_or_404()
if user.if_match_tag(args['If-Match']):
self.status = 304
else:
self.status = 200
self.headers['Etag'] = user.last_modify_tag
return marshal(user, self.fields), self.status, self.headers
def put(self, id):
self.parser.add_argument('nick_name', location='form')
self.parser.add_argument('If-Match', location='headers')
args = self.parser.parse_args()
user = UserSocialInfo.query.filter(UserSocialInfo.id == str(id)).first_or_404()
headers = {}
if user.if_match_tag(args['If-Match']):
user.nick_name = args.get('nick_name') if args.get('nick_name') else user.nick_name
user.update_info()
headers['Etag'] = user.last_modify_tag
else:
abort(412)
return marshal(user, self.fields), 201, headers
class UsersIMConfig(Resource):
parser = reqparse.RequestParser()
fields = JSONFields.im_config_fields
status = 200
headers = {}
def __init__(self):
self.fields['id'] = fields.String
self.fields['type'] = fields.String
@token_auth.login_required
def get(self, id):
self.parser.add_argument('If-Match', location='headers')
user = UserIMConfig.query.filter(UserIMConfig.uuid == str(id)).first_or_404()
if user.if_match_tag(args['If-Match']):
self.status = 304
else:
self.status = 200
self.headers['Etag'] = user.last_modify_tag
return marshal(user, self.fields), self.status, self.headers
@token_auth.login_required
def put(self, id):
pass
class UsersPushConfig(Resource):
parser = reqparse.RequestParser()
fields = JSONFields.push_config_fields
status = 200
headers = {}
def __init__(self):
self.fields['id'] = fields.String
self.fields['type'] = fields.String
@token_auth.login_required
def get(self, id):
user = UserPushConfig.query.filter(UserPushConfig.uuid == str(id)).first_or_404()
self.headers['Etag'] = user.last_modify_tag
return marshal(user, self.fields), self.status, self.headers
@token_auth.login_required
def put(self, id):
pass
class Tokens(Resource):
parser = reqparse.RequestParser()
fields = JSONFields.token_fields
status = 200
headers = {}
@basic_auth.login_required
def post(self):
user = g.user
user.update_last_login_datetime(device_id='')
return marshal(user, self.fields), self.status, self.headers
@token_auth.login_required
def put(self, id):
if g.user_id == str(id):
user = UserAuthInfo.query.filter(UserAuthInfo.uuid == g.user_id).first_or_404()
self.status = 201
else:
abort(403, message='用户不匹配.')
return marshal(user, self.fields), self.status, self.headers
class VerificationCodes(Resource):
"""
使用 hash (手机号 + jwt secret) 作为 OTP SECRET 生成验证码
对 手机号 + 验证码 签名,生成签名作为 token 返回客户端
验证时,使用客户端提交的手机号 + 验证码生成签名,与 token 中的签名进行比对验证是否正确
"""
parser = reqparse.RequestParser()
fields = {'token': fields.String}
status = 200
headers = {}
def post(self):
self.parser.add_argument('mobile', default='', type=StringFormat.mobile, location='form')
self.parser.add_argument('email', default='', type=StringFormat.email, location='form')
args = self.parser.parse_args()
if args.get('mobile') and args.get('email'):
abort(400, message='')
sms = VerificationCode(args.get('mobile'))
self.status = 202
return marshal(sms, self.fields), self.status, self.headers
api.add_resource(Users, '/users', methods=['GET', 'POST'])
api.add_resource(User, '/users/<uuid:uuid>', endpoint='api.user', methods=['GET', ])
api.add_resource(UsersSocialInfo, '/users/<uuid:uuid>/social-info', endpoint='api.user_social_info',
methods=['GET', 'PUT'])
api.add_resource(UsersIMConfig, '/users/<uuid:uuid>/im-config', endpoint='api.user_im_config',
methods=['GET', 'PUT'])
api.add_resource(UsersPushConfig, '/users/<uuid:uuid>/push-config', endpoint='api.user_push_config',
methods=['GET', 'PUT'])
api.add_resource(Tokens, '/tokens', methods=['POST', ])
api.add_resource(Tokens, '/users/<uuid:uuid>/token', endpoint='api.token', methods=['GET', 'PUT'])
api.add_resource(VerificationCodes, '/verification-codes', methods=['POST', ])
|
import torch
import torchvision
import torch.utils.data as data
import os
from os.path import join
import argparse
import logging
from tqdm import tqdm
#user import
from data_generator.DataLoader_Pretrain_Alexnet import CACD
from model.faceAlexnet import AgeClassify
from utils.io import check_dir,Img_to_zero_center
#step1: define argument
parser = argparse.ArgumentParser(description='pretrain age classifier')
# Optimizer
parser.add_argument('--learning_rate', '--lr', type=float, help='learning rate', default=1e-4)
parser.add_argument('--batch_size', '--bs', type=int, help='batch size', default=512)
parser.add_argument('--max_epoches', type=int, help='Number of epoches to run', default=200)
parser.add_argument('--val_interval', type=int, help='Number of steps to validate', default=20000)
parser.add_argument('--save_interval', type=int, help='Number of batches to save model', default=20000)
# Model
# Data and IO
parser.add_argument('--cuda_device', type=str, help='which device to use', default='0')
parser.add_argument('--checkpoint', type=str, help='logs and checkpoints directory', default='./checkpoint/pretrain_alexnet')
parser.add_argument('--saved_model_folder', type=str,
help='the path of folder which stores the parameters file',
default='./checkpoint/pretrain_alexnet/saved_parameters/')
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.cuda_device
check_dir(args.checkpoint)
check_dir(args.saved_model_folder)
#step2: define logging output
logger = logging.getLogger("Age classifer")
file_handler = logging.FileHandler(join(args.checkpoint, 'log.txt'), "w")
stdout_handler = logging.StreamHandler()
logger.addHandler(file_handler)
logger.addHandler(stdout_handler)
stdout_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
file_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
logger.setLevel(logging.INFO)
def main():
logger.info("Start to train:\n arguments: %s" % str(args))
#step3: define transform
transforms = torchvision.transforms.Compose([
torchvision.transforms.Resize((227, 227)),
torchvision.transforms.ToTensor(),
Img_to_zero_center()
])
#step4: define train/test dataloader
train_dataset = CACD("train", transforms, None)
test_dataset = CACD("test", transforms, None)
train_loader = torch.utils.data.DataLoader(
dataset=train_dataset,
batch_size=args.batch_size,
shuffle=True
)
test_loader = torch.utils.data.DataLoader(
dataset=test_dataset,
batch_size=args.batch_size,
shuffle=True
)
#step5: define model,optim
model=AgeClassify()
optim=model.optim
for epoch in range(args.max_epoches):
for train_idx, (img,label) in enumerate(train_loader):
img=img.cuda()
label=label.cuda()
#train
optim.zero_grad()
model.train(img,label)
loss=model.loss
loss.backward()
optim.step()
format_str = ('step %d/%d, cls_loss = %.3f')
logger.info(format_str % (train_idx, len(train_loader), loss))
# save the parameters at the end of each save interval
if train_idx*args.batch_size % args.save_interval == 0:
model.save_model(dir=args.saved_model_folder,
filename='epoch_%d_iter_%d.pth'%(epoch, train_idx))
logger.info('checkpoint has been created!')
#val step
if train_idx % args.val_interval == 0:
train_correct=0
train_total=0
with torch.no_grad():
for val_img,val_label in tqdm(test_loader):
val_img=val_img.cuda()
val_label=val_label.cuda()
output=model.val(val_img)
train_correct += (output == val_label).sum()
train_total += val_img.size()[0]
logger.info('validate has been finished!')
format_str = ('val_acc = %.3f')
logger.info(format_str % (train_correct.cpu().numpy()/train_total))
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
FOR PROJECT WRITE-UP ONLY
Generating a few illustrative plots of activation
functions and what image elements they can produce.
Created on Tue Sep 22 20:58:33 2020
@author: riversdale
"""
import numpy as np
import matplotlib.pyplot as plt
from src.imaging import ImageCreator
from src.genome import Genome
from src import funcs
settings = {'output_funcs': [{'func': 'sigmoid', 'prob': 1}]}
fig, axs = plt.subplots(7, 2, figsize=[5, 9.3])
# sigmoid xcoords
x = np.linspace(-1, 1, 128)
y = funcs.sigmoid(x * 5)
axs[0,0].plot(x,y)
axs[0,0].set_xlim([-1,1])
axs[0,0].set_ylim([0,1])
axs[0,0].set_title('y = sigmoid(5x)')
axs[0,0].set_ylabel('(a)', y=1, fontweight='bold', labelpad=10, rotation=0)
G = Genome(4, 1, init_conns=False, settings=settings)
G._create_conn_gene(path=(0,4), wgt=5)
C = ImageCreator(colour_channels=1)
C.bias_vec = [1]
img = C.create_image(G)
img.show(axs[0,1])
axs[0,1].set_title('x_coords -> Sigmoid')
# abs, xcoords
x = np.linspace(-1, 1, 128)
y = funcs.absz(x)
axs[1,0].plot(x,y)
axs[1,0].set_xlim([-1,1])
axs[1,0].set_ylim([0,1])
axs[1,0].set_title('y = abs(x)')
axs[1,0].set_ylabel('(b)', y=1, fontweight='bold', labelpad=10, rotation=0)
G = Genome(4, 1, init_conns=False, settings=settings)
G._node_genes.append({
'id': 5,
'layer': 'hidden',
'agg_func': 'sum',
'act_func': 'abs',
'act_args': {}
})
G._create_conn_gene(path=(0,5), wgt=1)
G._create_conn_gene(path=(3,4), wgt=-2) #bias
G._create_conn_gene(path=(5,4), wgt=3)
C = ImageCreator(colour_channels=1)
C.bias_vec = [1]
img = C.create_image(G)
img.show(axs[1,1])
axs[1,1].set_title('x_coords -> Abs')
# round, dcoords
x = np.linspace(-1, 1, 128)
y = funcs.round1dp(x / 2) * 2
axs[2,0].plot(x,y)
axs[2,0].set_xlim([-1,1])
axs[2,0].set_ylim([-1,1])
axs[2,0].set_title('y = round(x)')
axs[2,0].set_ylabel('(c)', y=1, fontweight='bold', labelpad=10, rotation=0)
G = Genome(4, 1, init_conns=False, settings=settings)
G._node_genes.append({
'id': 5,
'layer': 'hidden',
'agg_func': 'sum',
'act_func': 'round',
'act_args': {}
})
G._create_conn_gene(path=(2,5), wgt=0.5)
G._create_conn_gene(path=(3,5), wgt=-0.3) #bias
G._create_conn_gene(path=(5,4), wgt=3)
C = ImageCreator(colour_channels=1)
C.bias_vec = [1]
img = C.create_image(G)
img.show(axs[2,1])
axs[2,1].set_title('r_coords -> Round')
# sin, ycoors
x = np.linspace(-1, 1, 128)
y = funcs.sinz(10 * x)
axs[3,0].plot(x,y)
axs[3,0].set_xlim([-1,1])
axs[3,0].set_ylim([-1,1])
axs[3,0].set_title('y = sin(10x)')
axs[3,0].set_ylabel('(d)', y=1, fontweight='bold', labelpad=10, rotation=0)
G = Genome(4, 1, init_conns=False, settings=settings)
G._node_genes.append({
'id': 5,
'layer': 'hidden',
'agg_func': 'sum',
'act_func': 'sin',
'act_args': {}
})
G._create_conn_gene(path=(1,5), wgt=10)
G._create_conn_gene(path=(3,5), wgt=1) #bias
G._create_conn_gene(path=(5,4), wgt=1.5)
C = ImageCreator(colour_channels=1)
C.bias_vec = [1]
img = C.create_image(G)
img.show(axs[3,1])
axs[3,1].set_title('y_coords -> Round')
# relu, xcoords
x = np.linspace(-1, 1, 128)
y = funcs.relu(x)
axs[4,0].plot(x,y)
axs[4,0].set_xlim([-1,1])
axs[4,0].set_ylim([0,1])
axs[4,0].set_title('y = relu(x)')
axs[4,0].set_ylabel('(e)', y=1, fontweight='bold', labelpad=10, rotation=0)
G = Genome(4, 1, init_conns=False, settings=settings)
G._node_genes.append({
'id': 5,
'layer': 'hidden',
'agg_func': 'sum',
'act_func': 'relu',
'act_args': {}
})
G._create_conn_gene(path=(0,5), wgt=3)
G._create_conn_gene(path=(3,4), wgt=-2) #bias
G._create_conn_gene(path=(5,4), wgt=1)
C = ImageCreator(colour_channels=1)
C.bias_vec = [1]
img = C.create_image(G)
img.show(axs[4,1])
axs[4,1].set_title('x_coords -> ReLU')
# mod, xcoords+ycoords
x = np.linspace(-1, 1, 128)
y = funcs.modz(x * 2, thresh=0.1)
axs[5,0].plot(x,y)
axs[5,0].set_xlim([-1,1])
axs[5,0].set_ylim([0,1])
axs[5,0].set_title('y = mod(2x)<0.1')
axs[5,0].set_ylabel('(f)', y=1, fontweight='bold', labelpad=10, rotation=0)
G = Genome(4, 1, init_conns=False, settings=settings)
G._node_genes.append({
'id': 5,
'layer': 'hidden',
'agg_func': 'sum',
'act_func': 'mod',
'act_args': {'thresh':0.1}
})
G._create_conn_gene(path=(0,5), wgt=2)
G._create_conn_gene(path=(1,5), wgt=2)
G._create_conn_gene(path=(3,4), wgt=-1) #bias
G._create_conn_gene(path=(5,4), wgt=7)
C = ImageCreator(colour_channels=1)
C.bias_vec = [1]
img = C.create_image(G)
img.show(axs[5,1])
axs[5,1].set_title('x_coords, y_coords -> ThreshMod')
# point, xcoords
x = np.linspace(-1, 1, 128)
y = funcs.point(x, p=-0.5)
axs[6,0].plot(x,y)
axs[6,0].set_xlim([-1,1])
axs[6,0].set_ylim([0,1])
axs[6,0].set_title('y = abs(x+0.5)<0.05')
axs[6,0].set_ylabel('(g)', y=1, fontweight='bold', labelpad=10, rotation=0)
G = Genome(4, 1, init_conns=False, settings=settings)
G._node_genes.append({
'id': 5,
'layer': 'hidden',
'agg_func': 'sum',
'act_func': 'point',
'act_args': {'p': -0.5}
})
#G._create_conn_gene(path=(2,5), wgt=1)
G._create_conn_gene(path=(0,5), wgt=1)
G._create_conn_gene(path=(3,4), wgt=-1) #bias
G._create_conn_gene(path=(5,4), wgt=7)
C = ImageCreator(colour_channels=1)
C.bias_vec = [1]
img = C.create_image(G)
img.show(axs[6,1])
axs[6,1].set_title('x_coords -> Point')
fig.tight_layout()
plt.show()
# # trying to get floating square off centre!!
# G = Genome(4, 1, init_conns=False, settings=settings)
# G._node_genes.append({
# 'id': 5,
# 'layer': 'hidden',
# 'agg_func': 'min',
# 'act_func': 'nofunc',
# 'act_args': {}
# })
# G._node_genes.append({
# 'id': 6,
# 'layer': 'hidden',
# 'agg_func': 'max',
# 'act_func': 'nofunc',
# 'act_args': {}
# })
# G._node_genes.append({
# 'id': 7,
# 'layer': 'hidden',
# 'agg_func': 'sum',
# 'act_func': 'nofunc',
# 'act_args': {}
# })
# G._node_genes.append({
# 'id': 8,
# 'layer': 'hidden',
# 'agg_func': 'sum',
# 'act_func': 'nofunc',
# 'act_args': {}
# })
# # min of x & y
# G._create_conn_gene(path=(0,5), wgt=1)
# G._create_conn_gene(path=(1,5), wgt=1)
# # is greater than 0.5
# G._create_conn_gene(path=(5,7), wgt=1)
# G._create_conn_gene(path=(3,7), wgt=-0.5)
# # max of x & y
# G._create_conn_gene(path=(0,6), wgt=1)
# G._create_conn_gene(path=(1,6), wgt=1)
# # is less than 0.5
# G._create_conn_gene(path=(6,8), wgt=1)
# G._create_conn_gene(path=(3,8), wgt=0.5)
# # output
# G._create_conn_gene(path=(7,4), wgt=1)
# G._create_conn_gene(path=(8,4), wgt=1)
# # image
# C = ImageCreator(colour_channels=1)
# C.bias_vec = [1]
# img = C.create_image(G)
# img.show()
|
import timeit
setup = '''
import numpy as np
# from numba import jit
# @jit
def bubblesort(X):
N = len(X)
for end in range(N, 1, -1):
for i in range(end - 1):
cur = X[i]
if cur > X[i + 1]:
tmp = X[i]
X[i] = X[i + 1]
X[i + 1] = tmp
original = np.arange(0.0, 10.0, 0.01, dtype='f4')
shuffled = original.copy()
np.random.shuffle(shuffled)
sorted = shuffled.copy()
# bubblesort(sorted)
# print(np.array_equal(sorted, original))
# sorted[:] = shuffled[:];
'''
print(timeit.timeit(setup=setup, stmt="bubblesort(sorted)", number=1))
|
# -*- coding: utf-8 -*-
class Node(object):
def __init__(self, data, left=None, right=None):
self.data = data
self.left = left
self.right = right
tree = Node(1, Node(3, Node(7, Node(0)), Node(6)), Node(2, Node(5), Node(4)))
def lookup(root):
"""
层次遍历
"""
stack = [root]
while stack:
print([i.data for i in stack])
stack = [kid for item in stack for kid in (item.left, item.right) if kid]
def deep(root):
if not root:
return
print root.data
deep(root.left)
deep(root.right)
def maxDepth(root):
if not root:
return 0
return max(maxDepth(root.left), maxDepth(root.right)) + 1
if __name__ == "__main__":
# lookup(tree)
# deep(tree)
print maxDepth(tree)
|
from kmk.keys import KC
from kb import KMKKeyboard
from kmk.hid import HIDModes
from kmk.modules.layers import Layers
from kmk.modules.modtap import ModTap
keyboard = KMKKeyboard()
modtap = ModTap()
layers_ext = Layers()
keyboard.modules = [layers_ext, modtap]
# Cleaner key names
_______ = KC.TRNS
CTL_Z = KC.MT(KC.Z, KC.LCTRL)
ALT_X = KC.MT(KC.X, KC.LALT)
LT3_C = KC.LT(3, KC.C)
LT4_V = KC.LT(4, KC.V)
LT2_B = KC.LT(2, KC.B)
LT1_N = KC.LT(1, KC.N)
LT5_M = KC.LT(5, KC.M)
ALT_SPC = KC.MT(KC.SPC, KC.RALT)
CTL_BS = KC.MT(KC.BSPC, KC.RCTRL)
SFT_ESC = KC.MT(KC.ESC, KC.RSFT)
keyboard.keymap = [
[
# Layer 0
KC.Q, KC.W, KC.E, KC.R, KC.T, KC.Y, KC.U, KC.I, KC.O, KC.P,
KC.A, KC.S, KC.D, KC.F, KC.G, KC.H, KC.J, KC.K, KC.L, KC.ENT,
CTL_Z, ALT_X, LT3_C, LT4_V, LT2_B, LT1_N, LT5_M, ALT_SPC, CTL_BS, SFT_ESC,
],
[
# Layer 1
KC.N1, KC.N2, KC.N3, KC.N4, KC.N5, KC.N6, KC.N7, KC.N8, KC.N9, KC.N0,
KC.F1, KC.F2, KC.F3, KC.F4, KC.F5, KC.F6, KC.F7, KC.F8, KC.F9, KC.F10,
_______, _______, _______, _______, KC.DEL, _______, _______, _______, _______, _______,
],
[
# Layer 2
KC.EXLM, KC.AT, KC.HASH, KC.DLR, KC.PERC, KC.CIRC, KC.AMPR, KC.ASTR, KC.LPRN, KC.RPRN,
KC.F11, KC.F12, _______, _______, _______, _______, _______, _______, _______, KC.GRV,
_______, _______, _______, _______, _______, _______, _______, _______, _______, _______,
],
[
# Layer 3
_______, _______, _______, _______, _______, KC.MINS, KC.EQL, KC.LBRC, KC.RBRC, KC.BSLS,
KC.TAB, _______, _______, _______, _______, KC.COMM, KC.DOT, KC.SLSH, KC.SCLN, KC.QUOT,
_______, _______, _______, _______, _______, _______, KC.LEFT, KC.DOWN, KC.UP, KC.RGHT,
],
[
# Layer 4
_______, _______, _______, _______, _______, KC.UNDS, KC.PLUS, KC.LCBR, KC.RCBR, KC.PIPE,
KC.TAB, _______, _______, _______, _______, KC.LABK, KC.RABK, KC.QUES, KC.COLN, KC.DQUO,
_______, _______, _______, _______, _______, _______, KC.HOME, KC.PGDN, KC.PGUP, KC.END,
],
[
# Layer 5
_______, _______, _______, _______, _______, _______, _______, _______, _______, _______,
_______, _______, _______, _______, _______, _______, _______, _______, _______, _______,
_______, _______, _______, _______, _______, KC.MHEN, _______, KC.HENK, _______, _______,
],
]
if __name__ == '__main__':
keyboard.go(hid_type=HIDModes.USB) #Wired USB enable |
from InBedManagementTreeView import InBedManagementTreeView
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from Gui.ItemModels.BoundaryTypeInBedItemModel import *
class BoundaryTypeInBedItemView(InBedManagementTreeView):
def __init__(self, parent):
InBedManagementTreeView.__init__(self, parent)
self.configureModel(BoundaryTypeInBedItemModel(self))
|
from django.apps import AppConfig
class IndividualworkappConfig(AppConfig):
name = 'individualworkapp'
|
for x in range(10):
print(x+1)
print(
)
word = input('Введите любое слово: ')
for letter in word:
print(letter)
print(
)
rating = [{'shool_class': '4a', 'scores': [2, 3, 3, 5, 4]},
{'shool_class': '4b', 'scores': [2, 4, 5, 5, 4]},
{'shool_class': '4v', 'scores': [2, 2, 3, 5, 3]}]
a = 0
for result in rating:
print('Средний балл {} класса: {}'
.format(result['shool_class'], sum(result['scores'])/len(result['scores'])))
sum_scores += sum(result['scores'])/len(result['scores'])
print(sum_scores/len(rating)) |
# -*- coding: utf-8 -*-
from zope.interface import Interface
class IDoormat(Interface):
"""Marker interface for .Doormat.Doormat
"""
class IDoormatColumn(Interface):
"""Marker interface for .DoormatColumn.DoormatColumn
"""
class IDoormatSection(Interface):
"""Marker interface for .DoormatSection.DoormatSection
"""
class IDoormatReference(Interface):
"""Marker interface for .DoormatReference.DoormatReference
"""
class IDoormatMixin(Interface):
"""Marker interface for .DoormatMixin.DoormatMixin
"""
class IDoormatCollection(Interface):
"""Marker interface for .DoormatCollection.DoormatCollection
"""
|
"""
1.设置日志的收集级别
2.可以将日志输出到文件和控制台
3.一下这些方法:
info()
debug()
error()
warning()
critical()
额外扩展:单列模式
"""
import logging
from logging import Logger
from day15.myconf import Myconf
class MyLogger(Logger):
def __init__(self):
conf = Myconf("conf.ini")
file = conf.get("log", "file")
# 1.设置日志的名字,日志的收集级别
super().__init__(conf.get("log","name"), conf.get("log","level"))
# 2.可以将日志输出到文件和控制台
# 自定义日志格式(Formatter)
fmt_str = "%(asctime)s %(name)s %(levelname)s %(filename)s [%(lineno)d] %(message)s"
# 实例化一个日志格式类
formatter = logging.Formatter(fmt_str)
# 实例化渠道(Handle)
# 控制台(StreamHandle)
handle1 = logging.StreamHandler()
# 设置渠道当中的日志显示格式
handle1.setFormatter(formatter)
# 将渠道与日志收集器绑定起来
self.addHandler(handle1)
if file:
# 文件渠道(FileHandle)
handle2 = logging.FileHandler("file", encoding="utf-8")
# 设置渠道当中的日志显示格式
handle2.setFormatter(formatter)
self.addHandler(handle2)
logger = MyLogger()
|
class student:
def __init__(self, name, grade):
self.name = name
self.grade = grade
# finish the logic in the class
def __repr__(self):
return self.name
def __lt__(self,another_student):
return self.grade < another_student.grade
AList = [student("Mary", 80), student("Jack", 66), student("Eric", 93), student("June", 86), student("Jupiter", 79)]
AList.sort(reverse=True)
print(AList) |
from django.urls import path
from webapp import views
urlpatterns = [
path('', views.hello_world, name='hello_world'),
path('savedata', views.savedata, name='savedata'),
path('savedata/', views.savedata, name='savedata'),
]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# this imports verbs for loading by lib/verbs/verbs.py
# pylint: disable=unused-import
# pylint doesn't know where this file is imported from
# pylint: disable=import-error
# "verbs" (lowcase) is standard among all files
# pylint: disable=invalid-name
"""
[load_packages.py]
External package loader.
"""
import os # for os.path.expanduser()
import sys # for importing from ~/.ergo/packages
import importlib # for programatic importing
from lib.lib import verbs
packages_path = os.path.join(os.path.expanduser("~"), ".ergo", "packages")
sys.path.append(packages_path)
try:
for module in os.listdir(packages_path):
try:
if module[-3:] != "pyc":
loaded_module = importlib.import_module(module[:-3])
verbs.update(loaded_module.verbs)
except ImportError:
pass
except OSError:
print("[ergo: ConfigError]: No directory ~/.ergo/packages. Please run ergo_setup.")
|
# -*- coding: utf-8 -*-
from hmonitor.autofix.scripts import AutoFixBase
class JustShowEventInfo(AutoFixBase):
def do_fix(self, trigger_name, hostname, executor, event, *args, **kwargs):
raise Exception("ERROR TEST")
def get_author(self):
return "Qin TianHuan"
def get_version(self):
return "1"
def get_description(self):
return u"测试用脚本"
def get_create_date(self):
return "2015-06-30 09:00:00" |
flag = True
while flag:
try:
sec = int(input('Input time in seconds:' ))
flag = False
except:
print('Entered time is not a number!!!')
print(f'{sec//3600:02}:{(sec//60)%60:02}:{sec%60:02}') |
import sys
sys.stdin = open('주사위 던지기2.txt')
def myprint(q):
tmp = 0
t = []
while q != 0:
q -= 1
t.append(T[q])
tmp += T[q]
if tmp>m:
break
if tmp==m:
result.append(t)
def PI(n, r, q):
if r == 0:
myprint(q)
else:
for i in range(n-1,-1,-1):
arr[i], arr[n-1] = arr[n-1], arr[i]
T[r-1] = arr[n-1]
PI(n, r-1, q)
arr[i], arr[n-1] = arr[n-1], arr[i]
n, m = map(int,input().split())
arr = [1,2,3,4,5,6]
T = [0] * n
result = []
PI(6, n, n)
result.sort()
for i in range(len(result)):
print(' '.join(map(str, result[i]))) |
import unittest
import parseGEDCOM
class us07Test(unittest.TestCase):
def testUS07(self):
self.assertEqual(parseGEDCOM.checkUS07(),
"ERROR: INDIVIDUAL: US07: @I20@: More than 150 years old at death - Birth 1800-07-28: Death 1980-10-27\n"+
"ERROR: INDIVIDUAL: US07: @I21@: More than 150 years old - Birth date 1760-05-10\n")
if __name__ == "__main__":
unittest.main()
|
"""
Definition of ListNode
class ListNode(object):
def __init__(self, val, next=None):
self.val = val
self.next = next
"""
class Solution:
"""
@param head: The first node of the linked list.
@return: nothing
"""
def reorderList(self,head):
# basic steps:
# step 1: split linked list into two halves
# step 2: reverse the second half linked list
# step 3: create new linked list using those two halves
if head:
head1,head2 = self.half(head)
head2 = self.reverse(head2)
result = ListNode(0)
pointer = result
while head1 and head2:
pointer.next = head1
head1 = head1.next
pointer = pointer.next
pointer.next = head2
head2 = head2.next
pointer = pointer.next
if head1:
pointer.next = head1
result = result.next
return result
def half(self,head):
# use slow and quick pointer
dummy = ListNode(0)
dummy.next = head
slow = dummy
fast = dummy
while fast and fast.next:
slow = slow.next
fast = fast.next.next
head1 = dummy
head2 = slow.next
slow.next = None
head1 = head1.next
return head1,head2
def reverse(self,head):
if head:
dummy = ListNode(0)
dummy.next = head
h2 = head.next
head.next = None
while h2:
tmp = h2
h2 = h2.next
tmp.next = dummy.next
dummy.next = tmp
return dummy.next |
import urllib
import requests
import pandas as pd
import sys
# scrape movie posters and corresponding meta data
def scrape_meta_posters(mids):
'''
Scrape posters and metadata from IMDB using OMDB API
Input: IMDB ids
Output: meta data and posters
'''
meta_info = []
for id_ in mids:
url_meta = "http://www.omdbapi.com/?apikey={}&i={}".format(api_key,id_)
response = requests.get(url_meta,stream=True)
if response.status_code != 200:
continue
content = response.json()
if 'Poster' not in content or content['Poster'] == 'N/A':
continue
else:
info = response.json()
url_poster = "http://img.omdbapi.com/?apikey={}&i={}".format(api_key,id_)
urllib.urlretrieve(url_poster, "imgs/{}.png".format(id_))
meta_info.append(info)
return meta_info
if __name__ == '__main__':
n1,n2 = int(sys.argv[1]),int(sys.argv[2])
ids = pd.read_csv('movie_ids.csv')['ids'].values[n1:n2]
df = pd.DataFrame(scrape_meta_posters(ids))
df.to_csv('metas/{}_to_{}.csv'.format(n1,n2),encoding='utf-8')
|
"""
Project Euler's Problem 014
Sebuah barisan iteratif berikut didefinisikan untuk himpunan bilangan bulat positif dengan aturan:
n → n/2 (n ∈ bilangan genap)
n → 3n + 1 (n ∈ bilangan ganjil)
Menggunakan aturan di atas, dimulai dari 13, maka kita akan mendapatkan barisan:
13 → 40 → 20 → 10 → 5 → 16 → 8 → 4 → 2 → 1
Dapat terlihat bahwa barisan ini (yang dimulai dari 13 dan berakhir di 1) memiliki 10 suku.
Meskipun belum ada bukti matematisnya, diperkirakan bahwa apapun bilangan awalnya, barisan
seperti ini akan selalu berakhir di 1 (Masalah Collatz).
Bilangan awal manakah yang besarnya lebih kecil daripada satu juta yang akan menghasilkan barisan terpanjang?
Catatan : besar suku berikutnya (setelah bilangan awal) dalam barisan boleh melebihi satu juta.
"""
from time import time
import psutil
t0 = time()
daftar_bilangan = [x for x in range(999999, 500000, -2)]
init = [0, 0]
for i in daftar_bilangan:
temp = i
suku = [i]
while i != 1:
if i % 2 == 0:
i /= 2
suku.append(i)
else:
i = 3*i + 1
suku.append(i)
if len(suku) > init[0]:
init = [len(suku), temp]
print(init[1],"creates", init[0])
print("\nfinished in", time()-t0, "seconds")
print("takes",psutil.Process().memory_info().vms / 1024 ** 2,"MB of memory") |
import requests
import brotli
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
from bs4 import BeautifulSoup
sess = requests.Session()
http_proxy = "http://163.172.110.14:1457"
proxyDict = {
"http": http_proxy,
"https": http_proxy
}
def brotli_decompress_utf8(html):
r = brotli.decompress(html)
decoded = r.decode('utf-8', 'ignore')
return str(decoded)
def step_one():
headers = {
"Host": "mbasic.facebook.com",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/17.17134",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Language": "en-US,en;q=0.5",
"Accept-Encoding": "gzip, deflate, br",
"DNT": "1",
"Connection": "keep-alive",
"Upgrade-Insecure-Requests": "1"
}
r = sess.get("https://mbasic.facebook.com/login/identify/",headers=headers,proxies=proxyDict,verify=False)
m= r.content
m = brotli_decompress_utf8(m)
soup = BeautifulSoup(m,"html.parser")
face = soup.find(attrs={"name": "lsd"})['value']
print(face)
lsd = face
result = step_two(lsd)
return result
def step_two(lsd):
email="mustafasarikaya1@hotmail.com"
headers = {
"Host": "mbasic.facebook.com",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/17.17134",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Language": "en-US,en;q=0.5",
"Accept-Encoding": "gzip, deflate, br",
"Referer": "https://mbasic.facebook.com/login/identify/",
"Content-Type": "application/x-www-form-urlencoded",
"DNT": "1",
"Connection": "keep-alive",
"Upgrade-Insecure-Requests": "1"
}
params = {
"ctx": "recover",
"search_attempts": "1"
}
data = {
"lsd":lsd,
"email":email,
"did_submit": "Search"
}
r = sess.post("https://mbasic.facebook.com/login/identify/",headers=headers,params=params,data=data,proxies=proxyDict,verify=False)
m= r.content
m = brotli_decompress_utf8(m)
#print(r.text)
result = ""
if "login_identify_search_error_msg" in m:
result = "Available"
elif 'pic.php'in m or 'reset_action' in m:
result = "Not Available"
print(m)
return result
def start():
result = step_one()
print(result)
start() |
import asyncpg
import pandas as pd
from liualgotrader.common import config
from liualgotrader.common.tlog import tlog
async def create_db_connection(dsn: str = None) -> None:
config.db_conn_pool = await asyncpg.create_pool(
dsn=dsn or config.dsn,
min_size=2,
max_size=40,
)
tlog("db connection pool initialized")
async def fetch_as_dataframe(query: str, *args) -> pd.DataFrame:
try:
config.db_conn_pool
except (NameError, AttributeError):
await create_db_connection()
async with config.db_conn_pool.acquire() as con:
stmt = await con.prepare(query)
columns = [a.name for a in stmt.get_attributes()]
data = await stmt.fetch(*args)
return (
pd.DataFrame(data=data, columns=columns)
if data and len(data) > 0
else pd.DataFrame()
)
|
import os
from typing import Union, List, Callable, Optional, Tuple
import hydra
import torch
from kornia.losses import BinaryFocalLossWithLogits
import yaml
with open(os.path.join(
'/home/rishabh/Thesis/TrajectoryPredictionMastersThesis/src/position_maps/',
'config/model/model.yaml'), 'r') as f:
model_config = yaml.safe_load(f)
if model_config['use_torch_deform_conv']:
from torchvision.ops import DeformConv2d
else:
from mmcv.ops import DeformConv2d
from mmdet.core import multi_apply
from mmdet.models import HourglassNet
from omegaconf import DictConfig
# from pl_bolts.models.vision import UNet # has some matplotlib issue
from pytorch_lightning import LightningModule
from torch import nn
import torch.nn.functional as F
from torch.nn import MSELoss, Module
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader, Dataset
from average_image.utils import SDDMeta
from baselinev2.stochastic.model_modules import BaselineGenerator
from log import get_logger
from hourglass import PoseNet
logger = get_logger(__name__)
def post_process_multi_apply(x):
out = [torch.stack(multi_apply_feats) for multi_apply_feats in zip(*x)]
return out
class UNet(nn.Module):
"""
Paper: `U-Net: Convolutional Networks for Biomedical Image Segmentation
<https://arxiv.org/abs/1505.04597>`_
Paper authors: Olaf Ronneberger, Philipp Fischer, Thomas Brox
Implemented by:
- `Annika Brundyn <https://github.com/annikabrundyn>`_
- `Akshay Kulkarni <https://github.com/akshaykvnit>`_
Args:
num_classes: Number of output classes required
desired_output_shape: Out shape of model
input_channels: Number of channels in input images (default 3)
num_layers: Number of layers in each side of U-net (default 5)
num_additional_double_conv_layers: Number of layers before U-net starts (default 0)
features_start: Number of features in first layer (default 64)
bilinear: Whether to use bilinear interpolation or transposed convolutions (default) for upsampling.
"""
def __init__(
self,
config: DictConfig,
num_classes: int,
desired_output_shape: Tuple[int, int] = None,
input_channels: int = 3,
num_layers: int = 5,
num_additional_double_conv_layers: int = 0,
features_start: int = 64,
bilinear: bool = False
):
if num_layers < 1:
raise ValueError(f'num_layers = {num_layers}, expected: num_layers > 0')
super().__init__()
self.num_layers = num_layers
self.num_additional_double_conv_layers = num_additional_double_conv_layers
self.desired_output_shape = desired_output_shape # unused
self.config = config
self.sdd_meta = SDDMeta(self.config.root + 'H_SDD.txt')
layers = [DoubleConv(input_channels, features_start)]
feats = features_start
for _ in range(self.num_additional_double_conv_layers):
layers.append(Down(feats, feats * 2))
feats *= 2
for _ in range(num_layers - 1):
layers.append(Down(feats, feats * 2))
feats *= 2
for _ in range(num_layers - 1):
layers.append(Up(feats, feats // 2, bilinear))
feats //= 2
layers.append(nn.Conv2d(feats, num_classes, kernel_size=1))
self.layers = nn.ModuleList(layers)
def forward(self, x):
xi = [self.layers[0](x)]
# Extra DownScale
for layer in self.layers[1:self.num_additional_double_conv_layers + 1]:
xi.append(layer(xi[-1]))
# Down path
for layer in self.layers[self.num_additional_double_conv_layers + 1: self.num_layers +
self.num_additional_double_conv_layers]:
xi.append(layer(xi[-1]))
# Up path
for i, layer in enumerate(self.layers[self.num_layers + self.num_additional_double_conv_layers: -1]):
xi[-1] = layer(xi[-1], xi[-2 - i])
up_scaled = xi[-1]
if self.desired_output_shape is not None:
up_scaled = F.interpolate(up_scaled, size=self.desired_output_shape)
out = self.layers[-1](up_scaled)
return out
class DoubleConv(nn.Module):
"""
[ Conv2d => BatchNorm (optional) => ReLU ] x 2
"""
def __init__(self, in_ch: int, out_ch: int):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(in_ch, out_ch, kernel_size=3, padding=1), nn.BatchNorm2d(out_ch), nn.ReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, kernel_size=3, padding=1), nn.BatchNorm2d(out_ch), nn.ReLU(inplace=True)
)
def forward(self, x):
return self.net(x)
class Down(nn.Module):
"""
Downscale with MaxPool => DoubleConvolution block
"""
def __init__(self, in_ch: int, out_ch: int):
super().__init__()
self.net = nn.Sequential(nn.MaxPool2d(kernel_size=2, stride=2), DoubleConv(in_ch, out_ch))
def forward(self, x):
return self.net(x)
class Destination(nn.Module):
def __init__(self):
super(Destination, self).__init__()
class Up(nn.Module):
"""
Upsampling (by either bilinear interpolation or transpose convolutions)
followed by concatenation of feature map from contracting path, followed by DoubleConv.
"""
def __init__(self, in_ch: int, out_ch: int, bilinear: bool = False):
super().__init__()
self.upsample = None
if bilinear:
self.upsample = nn.Sequential(
nn.Upsample(scale_factor=2, mode="bilinear", align_corners=True),
nn.Conv2d(in_ch, in_ch // 2, kernel_size=1),
)
else:
self.upsample = nn.ConvTranspose2d(in_ch, in_ch // 2, kernel_size=2, stride=2)
self.conv = DoubleConv(in_ch, out_ch)
def forward(self, x1, x2):
x1 = self.upsample(x1)
# Pad x1 to the size of x2
diff_h = x2.shape[2] - x1.shape[2]
diff_w = x2.shape[3] - x1.shape[3]
x1 = F.pad(x1, [diff_w // 2, diff_w - diff_w // 2, diff_h // 2, diff_h - diff_h // 2])
# Concatenate along the channels axis
x = torch.cat([x2, x1], dim=1)
return self.conv(x)
class PositionMapUNetBase(LightningModule):
def __init__(self, config: 'DictConfig', train_dataset: 'Dataset', val_dataset: 'Dataset',
desired_output_shape: Tuple[int, int] = None, loss_function: 'nn.Module' = None,
collate_fn: Optional[Callable] = None):
super(PositionMapUNetBase, self).__init__()
self.config = config
# fixme: update u_net to network for next trainings
self.u_net = UNet(num_classes=self.config.unet.num_classes,
input_channels=self.config.unet.input_channels,
num_layers=self.config.unet.num_layers,
num_additional_double_conv_layers=self.config.unet.num_additional_double_conv_layers,
features_start=self.config.unet.features_start,
bilinear=self.config.unet.bilinear,
desired_output_shape=desired_output_shape,
config=self.config)
self.train_dataset = train_dataset
self.val_dataset = val_dataset
self.loss_function = loss_function
self.collate_fn = collate_fn
self.desired_output_shape = desired_output_shape
self.save_hyperparameters(self.config)
self.init_weights()
def forward(self, x):
return self.u_net(x)
def _one_step(self, batch):
return NotImplementedError
def training_step(self, batch, batch_idx):
loss = self._one_step(batch)
self.log('train_loss', loss, on_step=True, on_epoch=True, prog_bar=True, logger=True)
return loss
def validation_step(self, batch, batch_idx):
loss = self._one_step(batch)
self.log('val_loss', loss, on_step=True, on_epoch=True, prog_bar=True, logger=True)
return loss
def configure_optimizers(self):
opt = torch.optim.Adam(self.parameters(), lr=self.config.lr, weight_decay=self.config.weight_decay,
amsgrad=self.config.amsgrad)
schedulers = [
{
'scheduler': ReduceLROnPlateau(opt,
patience=self.config.patience,
verbose=self.config.verbose,
factor=self.config.factor,
min_lr=self.config.min_lr),
'monitor': self.config.monitor,
'interval': self.config.interval,
'frequency': self.config.frequency
}]
return [opt], schedulers
def train_dataloader(self) -> DataLoader:
return DataLoader(
dataset=self.train_dataset, batch_size=self.config.batch_size,
shuffle=False, num_workers=self.config.num_workers,
collate_fn=self.collate_fn, pin_memory=self.config.pin_memory,
drop_last=self.config.drop_last)
def val_dataloader(self) -> Union[DataLoader, List[DataLoader]]:
return DataLoader(
dataset=self.val_dataset, batch_size=self.config.batch_size * self.config.val_batch_size_factor,
shuffle=False, num_workers=self.config.num_workers,
collate_fn=self.collate_fn, pin_memory=self.config.pin_memory,
drop_last=self.config.drop_last)
def init_weights(self):
def init_kaiming(m):
if type(m) in [nn.Conv2d, nn.ConvTranspose2d]:
torch.nn.init.kaiming_normal_(m.weight, mode='fan_in')
m.bias.data.fill_(0.01)
def init_xavier(m):
if type(m) == [nn.Conv2d, nn.ConvTranspose2d]:
torch.nn.init.xavier_uniform(m.weight)
m.bias.data.fill_(0.01)
self.apply(init_kaiming)
class PositionMapUNetHeatmapRegression(PositionMapUNetBase):
def __init__(self, config: 'DictConfig', train_dataset: 'Dataset', val_dataset: 'Dataset',
desired_output_shape: Tuple[int, int] = None, loss_function: 'nn.Module' = MSELoss(),
collate_fn: Optional[Callable] = None):
super(PositionMapUNetHeatmapRegression, self).__init__(
config=config, train_dataset=train_dataset, val_dataset=val_dataset, loss_function=loss_function,
collate_fn=collate_fn, desired_output_shape=desired_output_shape)
def _one_step(self, batch):
frames, heat_masks, _, _, _, _ = batch
out = self(frames)
loss = self.loss_function(out, heat_masks)
return loss
class PositionMapUNetPositionMapSegmentation(PositionMapUNetBase):
def __init__(self, config: 'DictConfig', train_dataset: 'Dataset', val_dataset: 'Dataset',
desired_output_shape: Tuple[int, int] = None,
loss_function: 'nn.Module' = BinaryFocalLossWithLogits(alpha=0.8, reduction='mean'),
collate_fn: Optional[Callable] = None):
super(PositionMapUNetPositionMapSegmentation, self).__init__(
config=config, train_dataset=train_dataset, val_dataset=val_dataset, loss_function=loss_function,
collate_fn=collate_fn, desired_output_shape=desired_output_shape)
def _one_step(self, batch):
frames, _, position_map, _, _, _ = batch
out = self(frames)
loss = self.loss_function(out, position_map.long().squeeze(dim=1))
return loss
class PositionMapUNetClassMapSegmentation(PositionMapUNetBase):
def __init__(self, config: 'DictConfig', train_dataset: 'Dataset', val_dataset: 'Dataset',
desired_output_shape: Tuple[int, int] = None,
loss_function: 'nn.Module' = BinaryFocalLossWithLogits(alpha=0.8, reduction='mean'),
collate_fn: Optional[Callable] = None):
super(PositionMapUNetClassMapSegmentation, self).__init__(
config=config, train_dataset=train_dataset, val_dataset=val_dataset, loss_function=loss_function,
collate_fn=collate_fn, desired_output_shape=desired_output_shape)
def _one_step(self, batch):
frames, _, _, _, class_maps, _ = batch
out = self(frames)
loss = self.loss_function(out, class_maps.long().squeeze(dim=1))
return loss
class PositionMapUNetHeatmapSegmentation(PositionMapUNetBase):
def __init__(self, config: 'DictConfig', train_dataset: 'Dataset', val_dataset: 'Dataset',
desired_output_shape: Tuple[int, int] = None,
loss_function: 'nn.Module' = BinaryFocalLossWithLogits(alpha=0.8, reduction='mean'),
collate_fn: Optional[Callable] = None):
super(PositionMapUNetHeatmapSegmentation, self).__init__(
config=config, train_dataset=train_dataset, val_dataset=val_dataset, loss_function=loss_function,
collate_fn=collate_fn, desired_output_shape=desired_output_shape)
def _one_step(self, batch):
frames, heat_masks, _, _, _, _ = batch
out = self(frames)
loss = self.loss_function(out, heat_masks)
return loss
class PositionMapStackedHourGlass(PositionMapUNetBase):
def __init__(self, config: 'DictConfig', train_dataset: 'Dataset', val_dataset: 'Dataset',
desired_output_shape: Tuple[int, int] = None,
loss_function: 'nn.Module' = BinaryFocalLossWithLogits(alpha=0.8, reduction='mean'),
collate_fn: Optional[Callable] = None):
super(PositionMapStackedHourGlass, self).__init__(
config=config, train_dataset=train_dataset, val_dataset=val_dataset, loss_function=loss_function,
collate_fn=collate_fn, desired_output_shape=desired_output_shape)
self.network = PoseNet(num_stack=self.config.stacked_hourglass.num_stacks,
input_channels=self.config.stacked_hourglass.input_channels,
num_classes=self.config.stacked_hourglass.num_classes,
loss_fn=self.loss_function,
bn=self.config.stacked_hourglass.batch_norm,
increase=self.config.stacked_hourglass.increase)
self.u_net = self.network
def _one_step(self, batch):
frames, heat_masks, _, _, _, _ = batch
out = self(frames)
loss = self.network.calc_loss(combined_hm_preds=out, heatmaps=heat_masks)
return loss.mean()
class TrajectoryModel(LightningModule):
def __init__(self, config: 'DictConfig'):
super(TrajectoryModel, self).__init__()
self.config = config
net_params = self.config.trajectory_baseline.generator
self.net = BaselineGenerator(embedding_dim_scalars=net_params.embedding_dim_scalars,
encoder_h_g_scalar=net_params.encoder_h_g_scalar,
decoder_h_g_scalar=net_params.decoder_h_g_scalar,
pred_len=net_params.pred_len,
noise_scalar=net_params.noise_scalar,
mlp_vec=net_params.mlp_vec,
mlp_scalar=net_params.mlp_scalar,
POV=net_params.POV,
noise_type=net_params.noise_type,
social_attention=net_params.social_attention,
social_dim_scalar=net_params.social_dim_scalar)
def forward(self, x):
# input batch["in_dxdy", "in_xy"]
return self.net(x) # {"out_xy": out_xy, "out_dxdy": out_dxdy}
class PositionMapWithTrajectories(PositionMapUNetBase):
def __init__(self, config: 'DictConfig', position_map_model: 'Module', trajectory_model: 'Module',
train_dataset: 'Dataset', val_dataset: 'Dataset',
desired_output_shape: Tuple[int, int] = None, loss_function: 'nn.Module' = None,
collate_fn: Optional[Callable] = None):
super(PositionMapWithTrajectories, self).__init__(
config=config, train_dataset=train_dataset, val_dataset=val_dataset, loss_function=loss_function,
collate_fn=collate_fn, desired_output_shape=desired_output_shape)
self.position_map_model = position_map_model
self.trajectory_model = trajectory_model
self.config = config
self.train_dataset = train_dataset
self.val_dataset = val_dataset
self.desired_output_shape = desired_output_shape
self.loss_function = loss_function
self.collate_fn = collate_fn
self.first_iter = True
def _one_step(self, batch):
frames, heat_masks, position_map, distribution_map, class_maps, meta = batch
out = self(frames)
loss = self.loss_function(out, heat_masks)
return NotImplementedError # loss
def freeze_position_map_model(self):
self.position_map_model.freeze()
def unfreeze_position_map_model(self):
self.position_map_model.unfreeze()
def freeze_trajectory_model(self):
self.trajectory_model.freeze()
def unfreeze_trajectory_model(self):
self.trajectory_model.unfreeze()
class DoubleDeformableConv(nn.Module):
"""
[ DeformableConv2d => BatchNorm (optional) => ReLU ] x 2
"""
def __init__(self, config: DictConfig, in_ch: int, out_ch: int, last_layer: bool = False,
use_conv_deform_conv=False,
deform_groups: int = 1):
super().__init__()
self.config = config
self.first_layer = nn.Conv2d(in_ch, out_ch, kernel_size=self.config.hourglass.deform.kernel,
padding=self.config.hourglass.deform.padding) \
if use_conv_deform_conv else DeformConv2d(in_ch, out_ch, kernel_size=self.config.hourglass.deform.kernel,
padding=self.config.hourglass.deform.padding)
self.post_first_layer = nn.Sequential(
nn.BatchNorm2d(out_ch), nn.ReLU(inplace=True),
)
if self.config.use_torch_deform_conv:
self.net = DeformConv2d(out_ch, out_ch, kernel_size=self.config.hourglass.deform.kernel,
padding=self.config.hourglass.deform.padding, groups=deform_groups)
else:
self.net = DeformConv2d(out_ch, out_ch, kernel_size=self.config.hourglass.deform.kernel,
padding=self.config.hourglass.deform.padding, deform_groups=deform_groups)
self.post_net = nn.Sequential() if last_layer else nn.Sequential(nn.BatchNorm2d(out_ch), nn.ReLU(inplace=True))
self.use_conv_deform_conv = use_conv_deform_conv
def forward(self, x, offsets):
out = self.first_layer(x) if self.use_conv_deform_conv else self.first_layer(x, offsets)
out = self.post_first_layer(out)
out = self.net(out, offsets)
return self.post_net(out)
class DeformableConvUp(nn.Module):
"""
Upsampling (by either bilinear interpolation or transpose convolutions)
followed by concatenation of feature map from contracting path, followed by DeformableDoubleConv.
"""
def __init__(self, config: DictConfig, in_ch: int, out_ch: int, bilinear: bool = False, last_layer: bool = False,
use_conv_deform_conv=False):
super().__init__()
self.config = config
self.upsample = None
kernel_size = self.config.hourglass.upsample_params.kernel_bilinear \
if bilinear else self.config.hourglass.upsample_params.kernel
if bilinear:
self.upsample = nn.Sequential(
nn.Upsample(scale_factor=self.config.hourglass.upsample_params.factor,
mode=self.config.hourglass.upsample_params.mode,
align_corners=self.config.hourglass.upsample_params.align_corners),
nn.Conv2d(in_ch, in_ch // 2, kernel_size=kernel_size),
)
else:
self.upsample = nn.ConvTranspose2d(in_ch, in_ch // 2,
kernel_size=kernel_size,
stride=self.config.hourglass.upsample_params.stride)
self.conv = DoubleDeformableConv(config, in_ch // 2, out_ch, last_layer=last_layer,
use_conv_deform_conv=use_conv_deform_conv,
deform_groups=self.config.hourglass.deform.groups)
offset_out_channel = self.config.hourglass.deform.groups * 2 \
* self.config.hourglass.deform.kernel \
* self.config.hourglass.deform.kernel
self.conv_offset = nn.ConvTranspose2d(in_ch, offset_out_channel,
kernel_size=self.config.hourglass.upsample_params.offset_kernel,
stride=self.config.hourglass.upsample_params.offset_stride,
padding=self.config.hourglass.upsample_params.offset_padding,
bias=False)
def forward(self, x_in):
x = self.upsample(x_in)
offsets = self.conv_offset(x_in)
return self.conv(x, offsets)
class DoubleConv2d(nn.Module):
"""
[ Conv2d => BatchNorm (optional) => ReLU ] x 2
"""
def __init__(self, config: DictConfig, in_ch: int, out_ch: int, last_layer: bool = False):
super().__init__()
self.config = config
self.first_layer = nn.Conv2d(in_ch, out_ch, kernel_size=self.config.hourglass.deform.kernel,
padding=self.config.hourglass.deform.padding)
self.post_first_layer = nn.Sequential(
nn.BatchNorm2d(out_ch), nn.ReLU(inplace=True),
)
self.net = nn.Conv2d(out_ch, out_ch, kernel_size=self.config.hourglass.deform.kernel,
padding=self.config.hourglass.deform.padding)
self.post_net = nn.Sequential() if last_layer else nn.Sequential(nn.BatchNorm2d(out_ch), nn.ReLU(inplace=True))
def forward(self, x):
out = self.first_layer(x)
out = self.post_first_layer(out)
out = self.net(out)
return self.post_net(out)
class ConvUp(nn.Module):
"""
Upsampling (by either bilinear interpolation or transpose convolutions)
followed by concatenation of feature map from contracting path, followed by DoubleConv.
"""
def __init__(self, config: DictConfig, in_ch: int, out_ch: int, bilinear: bool = False, last_layer: bool = False):
super().__init__()
self.config = config
self.upsample = None
kernel_size = self.config.hourglass.upsample_params.kernel_bilinear \
if bilinear else self.config.hourglass.upsample_params.kernel
if bilinear:
self.upsample = nn.Sequential(
nn.Upsample(scale_factor=self.config.hourglass.upsample_params.factor,
mode=self.config.hourglass.upsample_params.mode,
align_corners=self.config.hourglass.upsample_params.align_corners),
nn.Conv2d(in_ch, in_ch // 2, kernel_size=kernel_size),
)
else:
self.upsample = nn.ConvTranspose2d(in_ch, in_ch // 2,
kernel_size=kernel_size,
stride=self.config.hourglass.upsample_params.stride)
self.conv = DoubleConv2d(config, in_ch // 2, out_ch, last_layer=last_layer)
def forward(self, x_in):
x = self.upsample(x_in)
return self.conv(x)
class HourGlassNetwork(LightningModule):
def __init__(self, config: DictConfig):
super(HourGlassNetwork, self).__init__()
self.config = config
self.hour_glass = HourglassNet(downsample_times=self.config.hourglass.downsample_times,
num_stacks=self.config.hourglass.num_stacks,
stage_channels=self.config.hourglass.stage_channels,
stage_blocks=self.config.hourglass.stage_blocks,
feat_channel=self.config.hourglass.feat_channel,
norm_cfg=dict(type=self.config.hourglass.norm_cfg_type,
requires_grad=self.config.hourglass.norm_cfg_requires_grad),
pretrained=self.config.hourglass.pretrained,
init_cfg=self.config.hourglass.init_cfg)
self.hour_glass.init_weights()
def forward(self, x):
return self.hour_glass(x)
class PositionMapHead(LightningModule):
def __init__(self, config: DictConfig):
super(PositionMapHead, self).__init__()
self.config = config
layers = []
feats = self.config.hourglass.feat_channel
for idx in range(self.config.hourglass.head.num_layers):
if idx == self.config.hourglass.head.num_layers - 1:
if self.config.hourglass.use_deformable_conv:
layers.append(
DeformableConvUp(self.config, feats, feats // 2, self.config.hourglass.head.bilinear,
self.config.hourglass.head.enable_last_layer_activation,
use_conv_deform_conv=self.config.hourglass.head.use_conv_deform_conv))
else:
layers.append(
ConvUp(self.config, feats, feats // 2, self.config.hourglass.head.bilinear,
self.config.hourglass.head.enable_last_layer_activation))
else:
if self.config.hourglass.use_deformable_conv:
layers.append(
DeformableConvUp(self.config, feats, feats // 2, self.config.hourglass.head.bilinear,
use_conv_deform_conv=self.config.hourglass.head.use_conv_deform_conv))
else:
layers.append(ConvUp(self.config, feats, feats // 2, self.config.hourglass.head.bilinear))
feats //= 2
self.module = nn.Sequential(*layers)
# self.module.init_weights()
def forward(self, x):
return multi_apply(self.forward_single, x)
def forward_single(self, x):
return self.module(x)
class HourGlassPositionMapNetwork(LightningModule):
def __init__(self, config: 'DictConfig', backbone: 'nn.Module', head: 'nn.Module',
train_dataset: 'Dataset', val_dataset: 'Dataset',
desired_output_shape: Tuple[int, int] = None, loss_function: 'nn.Module' = None,
collate_fn: Optional[Callable] = None):
super(HourGlassPositionMapNetwork, self).__init__()
self.config = config
self.backbone = backbone
self.head = head
last_conv_type = DeformConv2d if self.config.hourglass.use_deformable_conv else nn.Conv2d
self.last_conv = last_conv_type(
in_channels=self.config.hourglass.feat_channel // (2 ** self.config.hourglass.head.num_layers),
out_channels=self.config.hourglass.last_conv.out_channels,
kernel_size=self.config.hourglass.last_conv.kernel,
stride=self.config.hourglass.last_conv.stride,
padding=self.config.hourglass.last_conv.padding)
offset_out_channel = self.config.hourglass.deform.groups * 2 \
* self.config.hourglass.last_conv.kernel \
* self.config.hourglass.last_conv.kernel
self.conv_offset = nn.Conv2d(
self.config.hourglass.feat_channel // (2 ** self.config.hourglass.head.num_layers),
offset_out_channel,
kernel_size=self.config.hourglass.last_conv.offset_kernel,
stride=self.config.hourglass.last_conv.offset_stride,
padding=self.config.hourglass.last_conv.offset_padding,
bias=False)
self.train_dataset = train_dataset
self.val_dataset = val_dataset
self.loss_function = loss_function
self.collate_fn = collate_fn
self.desired_output_shape = desired_output_shape
self.save_hyperparameters(self.config)
@classmethod
def from_config(cls, config: DictConfig, train_dataset: Dataset = None, val_dataset: Dataset = None,
desired_output_shape: Tuple[int, int] = None, loss_function: nn.Module = None,
collate_fn: Optional[Callable] = None):
return HourGlassPositionMapNetwork(
config=config,
backbone=HourGlassNetwork(config=config),
head=PositionMapHead(config=config),
train_dataset=train_dataset,
val_dataset=val_dataset,
desired_output_shape=desired_output_shape,
loss_function=loss_function,
collate_fn=collate_fn)
def forward(self, x):
out = self.backbone(x)
out = self.head(out)
out = post_process_multi_apply(out)
if self.desired_output_shape is not None:
out = [F.interpolate(o, size=self.desired_output_shape) for o in out]
return self.forward_last(out)
def forward_last(self, x):
return multi_apply(self.forward_last_single, x)
def forward_last_single(self, x):
if self.config.hourglass.use_deformable_conv:
offset = self.conv_offset(x)
return self.last_conv(x, offset)
else:
return self.last_conv(x)
def calculate_loss(self, predictions, heatmaps):
if self.loss_function._get_name() in ['GaussianFocalLoss', 'CenterNetFocalLoss']:
predictions = [p.sigmoid() for p in predictions]
combined_loss = [self.loss_function(pred, heatmaps) for pred in predictions]
combined_loss = torch.stack(combined_loss, dim=0)
return combined_loss
def _one_step(self, batch):
frames, heat_masks, _, _, _, meta = batch
out = self(frames)
out = post_process_multi_apply(out)
loss = self.calculate_loss(out, heat_masks)
return loss.sum()
def training_step(self, batch, batch_idx):
loss = self._one_step(batch)
self.log('train_loss', loss, on_step=True, on_epoch=True, prog_bar=True, logger=True)
return loss
def validation_step(self, batch, batch_idx):
loss = self._one_step(batch)
self.log('val_loss', loss, on_step=True, on_epoch=True, prog_bar=True, logger=True)
return loss
def configure_optimizers(self):
opt = torch.optim.Adam(self.parameters(), lr=self.config.lr, weight_decay=self.config.weight_decay,
amsgrad=self.config.amsgrad)
schedulers = [
{
'scheduler': ReduceLROnPlateau(opt,
patience=self.config.patience,
verbose=self.config.verbose,
factor=self.config.factor,
min_lr=self.config.min_lr),
'monitor': self.config.monitor,
'interval': self.config.interval,
'frequency': self.config.frequency
}]
return [opt], schedulers
def train_dataloader(self) -> DataLoader:
return DataLoader(
dataset=self.train_dataset, batch_size=self.config.batch_size,
shuffle=False, num_workers=self.config.num_workers,
collate_fn=self.collate_fn, pin_memory=self.config.pin_memory,
drop_last=self.config.drop_last)
def val_dataloader(self) -> Union[DataLoader, List[DataLoader]]:
return DataLoader(
dataset=self.val_dataset, batch_size=self.config.batch_size * self.config.val_batch_size_factor,
shuffle=False, num_workers=self.config.num_workers,
collate_fn=self.collate_fn, pin_memory=self.config.pin_memory,
drop_last=self.config.drop_last)
class HourGlassPositionMapNetworkDDP(HourGlassPositionMapNetwork):
def __init__(self, config: 'DictConfig', backbone: 'nn.Module', head: 'nn.Module',
train_dataset: 'Dataset', val_dataset: 'Dataset',
desired_output_shape: Tuple[int, int] = None, loss_function: 'nn.Module' = None,
collate_fn: Optional[Callable] = None):
super(HourGlassPositionMapNetworkDDP, self).__init__(
config=config, backbone=backbone, head=head, train_dataset=train_dataset, val_dataset=val_dataset,
desired_output_shape=desired_output_shape, loss_function=loss_function, collate_fn=collate_fn
)
def training_step(self, batch, batch_idx):
loss = self._one_step(batch)
self.log('train_loss', loss, on_step=True, on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
return loss
def validation_step(self, batch, batch_idx):
loss = self._one_step(batch)
self.log('val_loss', loss, on_step=True, on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
return loss
def train_dataloader(self) -> DataLoader:
return DataLoader(
dataset=self.train_dataset, batch_size=self.config.batch_size,
shuffle=False, num_workers=self.config.num_workers,
collate_fn=self.collate_fn, pin_memory=self.config.pin_memory,
drop_last=self.config.drop_last,
sampler=torch.utils.data.distributed.DistributedSampler(self.train_dataset, shuffle=False))
def val_dataloader(self) -> Union[DataLoader, List[DataLoader]]:
return DataLoader(
dataset=self.val_dataset, batch_size=self.config.batch_size * self.config.val_batch_size_factor,
shuffle=False, num_workers=self.config.num_workers,
collate_fn=self.collate_fn, pin_memory=self.config.pin_memory,
drop_last=self.config.drop_last,
sampler=torch.utils.data.distributed.DistributedSampler(self.val_dataset, shuffle=False))
@hydra.main(config_path="config", config_name="config")
def verify_nets(cfg):
model = HourGlassPositionMapNetwork.from_config(config=cfg, desired_output_shape=(720 // 3, 360 // 3))
inp = torch.randn((2, 3, 720 // 2, 360 // 2))
o = model(inp)
o = post_process_multi_apply(o)
print()
if __name__ == '__main__':
verify_nets()
|
totalMarks = 0
numberOfStudents = 0
marks = int(input("Enter the marks of students or a negative value to quit: "))
while marks >=0:
totalMarks += marks
numberOfStudents += 1
marks = int(input())
print("The total number of students is: {}".format(numberOfStudents))
print("The total marks of students is : {}".format(totalMarks))
print("The average mark of students is: {:.2f}".format(totalMarks/numberOfStudents))
|
import sys
nome = "Bruno Wayne"
idade = 30
peso = 92.3
list = ["youngling", "padawan", "knight", "master"]
categorias = ("youngling", "padawan", "knight", "master")
print("a var nome é do tipo {} e tem {} bytes".format(type(nome), sys.getsizeof(nome)))
print("a var idade é do tipo {} e tem {} bytes".format(type(idade), sys.getsizeof(idade)))
print("a var peso é do tipo {} e tem {} bytes".format(type(peso), sys.getsizeof(peso)))
print("a var list é do tipo {} e tem {} bytes".format(type(list), sys.getsizeof(list)))
print("a var categorias é do tipo {} e tem {} bytes".format(type(list), sys.getsizeof(categorias))) |
# Generated by Django 3.1.7 on 2021-03-04 06:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("committees", "0008_auto_20210112_1221"),
]
operations = [
migrations.AlterField(
model_name="committee",
name="date_added",
field=models.DateTimeField(auto_now_add=True, verbose_name="date added"),
),
migrations.AlterField(
model_name="membership",
name="date_added",
field=models.DateTimeField(auto_now_add=True, verbose_name="date added"),
),
]
|
# -*- coding: utf-8 -*-
import socket # Nécessaire pour ouvrir une connexion
import PoolAdresse #On fait appel aux fonctions présentes dans PoolAdresse.py
import csv #Nécessaire pour manipuler facilement les fichiers CSV
import smtplib
import datetime
import requests
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders
import os
def Connexion(ip, port, timeout,message): #Permet d'ouvrir une connexion
try:
connexion_avec_serveur = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connexion_avec_serveur.settimeout(timeout) #On définit le temps au bout du quel la connexion est abandonnée si le serveur ne répond pas
connexion_avec_serveur.connect((ip, port))
msg_a_envoyer =message.encode()
connexion_avec_serveur.send(msg_a_envoyer)
msg_recu = connexion_avec_serveur.recv(1024)
msg_recu = msg_recu.decode()
print("\n***************************************************")
print(msg_recu)
print("***************************************************\n")
connexion_avec_serveur.close()
resultatConnexion = msg_recu
except:
print("Impossible de joindre le serveur "+ip)
resultatConnexion = "Connexion impossible pour le serveur "+ ip + ':' + str(port)
return resultatConnexion
def ScanAuto(port, timeout): #Permet de scanner automatiquement l'ensemble des adresses du réseau local
hote=PoolAdresse.PoolAPing()
taille=len(hote)
for i in range(0,taille):
ip=hote[i]
Ecrire_Historique("Scan automatique : "+Connexion(ip,port,timeout,"GiveAllInfo"))
def ScanIp(port,timeout,ip): #Permet d'ouvrir une connexion et d'enregistrer le résultat dans les logs
Ecrire_Historique("Scan d'@IP : "+ Connexion(ip,port,timeout,"GiveAllInfo") )
def ScanCSV(fichier, port, timeout): #Permet d'ouvrir une connexion vers toutes les adresses présentes dans le fichier CSV fourni
adresses = Lire_CSV(fichier)
for adresse in adresses:
Ecrire_Historique("Scan CSV : "+Connexion(adresse,port,timeout,"GiveAllInfo"))
def Lire_CSV(fichier): #On fournit un fichier CSV contenant des adresses IP
adresse_valables = []
f=open(fichier,'r') #On ouvre le fichier fournit en mode lecture (read)
contenu = csv.reader(f,delimiter=';')
for ligne in contenu: #Pour chaque ligne :
for champ in ligne: #Pour chaque champ :
if est_IP(champ): #On vérifie si le champ est une adresse IP
adresse_valables.append(champ)
f.close()
return adresse_valables #retourne un tableau contenant dans chaque case une adresse ip
def est_IP(adresse): # retourne True si la chaine fournie est une adresse IP
if len(adresse.split('.')) != 4 : return False #Si l'adresse fournit n'as pas 4 champs séparés par des points
for nombre in adresse.split('.'):
if int(nombre) > 255 or int(nombre) < 0 : return False #Si l'un des 4 nombres de l'adresse IP n'est pas compris entre 0 et 255
return True
def Ecrire_Historique(resultat): #Ecrit le fichier log
heure = datetime.datetime.now().strftime('%d/%m/%Y %H:%M:%S')
mon_fichier = open("connexions.log", "a")
mon_fichier.write(heure + ' ' + resultat + '\n')
mon_fichier.close()
def EnvoieEmail(Email):
#Element de base du mail
email = 'scanipofficiel@gmail.com'
password = 'Scaniprt'
send_to_email = Email
subject = 'Resulat du scan - ScanIP'
message = '''Bonjour,
Vous trouverez ci-joint les résultats du scan que vous avez réalisez.
Cordialement, Le logiciel lui-meme.'''
file_location = './connexions.log'
msg = MIMEMultipart()
msg['From'] = email
msg['To'] = send_to_email
msg['Subject'] = subject
msg.attach(MIMEText(message, 'plain'))
# Initialisation de la piece joint
filename = os.path.basename(file_location)
attachment = open(file_location, "rb")
part = MIMEBase('application', 'octet-stream')
part.set_payload(attachment.read())
encoders.encode_base64(part)
part.add_header('Content-Disposition', "attachment; filename= %s" % filename)
#envoie de l'email via une connexion au serveur de google
msg.attach(part)
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(email, password)
text = msg.as_string()
server.sendmail(email, send_to_email, text)
server.quit()
#Connexion('127.0.0.1',12800,1,'GiveAllInfo')
|
# -*- coding: utf-8 -*-
import theano
theano.config.floatX= 'float64'
#out_sc_x is the Parsey McParseface parsing result of original sentence
#out_sc_y is the Parsey McParseface parsing result of summary
###############################################################################
f=open("./out_sc_x.txt",mode="r")
temp=[]
sc_parsed_x=[]
for lines in f.readlines():
if len(lines.split())!=0:
temp.append(lines.split())
else:
sc_parsed_x.append(temp)
temp=[]
f=open("./out_sc_y.txt",mode="r")
temp=[]
sc_parsed_y=[]
for lines in f.readlines():
if len(lines.split())!=0:
temp.append(lines.split())
else:
sc_parsed_y.append(temp)
temp=[]
sc_parsed_x1=[]
sc_parsed_y1=[]
for s in sc_parsed_x:
temp=[]
for w in s:
temp.append(w[1])
sc_parsed_x1.append(temp)
for s in sc_parsed_y:
temp=[]
for w in s:
temp.append(w[1])
sc_parsed_y1.append(temp)
win_nb=5
def win(f, posi, win_nb):
left=f[max(0,posi-win_nb/2):posi]
right=f[posi+1:min(len(f),posi+win_nb/2+1)]
weights=[1+i*0.1 for i in range(win_nb/2)]
left_sum=0
for i in range(len(left)):
left_sum+=left[i]*weights[i]
right.reverse()
right_sum=0
for i in range(len(right)):
right_sum+=right[i]*weights[i]
right.reverse()
return left_sum+right_sum
import collections as col
label_all=[]
for father, son in zip(sc_parsed_x1, sc_parsed_y1):
f_len=len(father)
s_len=len(son)
f_mark=[0]*f_len
s_mark=[0]*s_len
f_col=col.Counter(father)
#s_col=col.Counter(son)
temp_father=dict()
temp_son=dict()
for i in range(f_len):
temp_father[father[i]]=set()
for i in range(s_len):
temp_son[son[i]]=set()
for s_id in range(s_len):
for f_id in range(f_len):
if son[s_id]==father[f_id]:
if f_col[son[s_id]]==1:
f_mark[f_id]=1
s_mark[s_id]=1
else:
if f_col[son[s_id]]>=2:
temp_father[son[s_id]].add(f_id)
temp_son[son[s_id]].add(s_id)
for ele in temp_father.keys():
if temp_father[ele]==set():
del temp_father[ele]
for ele in temp_son.keys():
if temp_son[ele]==set():
del temp_son[ele]
for word in temp_son.keys():
if len(temp_son[word])==len(temp_father[word]):
for posi in temp_father[word]:
f_mark[posi]=1
else:
if len(temp_son[word]) < len(temp_father[word]):
num_max=len(temp_son[word])
max_context=0
queue=[]
for posi in temp_father[word]:
queue.append((win(f_mark, posi, win_nb), posi))
queue=sorted(queue, reverse=True)
for k in range(num_max):
f_mark[queue[k][1]]=1
label_all.append([temp_father,temp_son, f_mark,s_mark])
label=[]
for s in label_all:
label.append(s[-2])
###############################################################################
#train_x: sc_parsed_x1[i]
#train_y: label[i]
print 'Lemmatize...'
change=set()
from nltk.stem import WordNetLemmatizer
lem = WordNetLemmatizer()
for i in range(len(sc_parsed_x1)):
for j in range(len(sc_parsed_x1[i])):
try:
temp_a=sc_parsed_x1[i][j]
sc_parsed_x1[i][j]=lem.lemmatize(sc_parsed_x1[i][j])
temp_b=sc_parsed_x1[i][j]
if temp_a!=temp_b:
change.add((i,j,temp_a,temp_b))
except:
sc_parsed_x1[i][j]=sc_parsed_x1[i][j].decode("utf-8")
change_dict={}
for d in change:
change_dict[d[:2]]=d[2:]
###############################################################################
print "train word2vec using training dataset"+'\n'
import numpy as np
np.random.seed(1337) # for reproducibility
import multiprocessing
from gensim.models.word2vec import Word2Vec
from gensim.corpora.dictionary import Dictionary
# set parameters:
emb_dim = 50
n_exposures = 0
window_size = 7
cpu_count = multiprocessing.cpu_count()
#build word2vec(skip-gram) model for dundee corpus
model = Word2Vec(size=emb_dim,
min_count=n_exposures,
window=window_size,
workers=cpu_count,
iter=10,
sg=1)
model.build_vocab(sc_parsed_x1)
model.train(sc_parsed_x1)
gensim_dict = Dictionary()
gensim_dict.doc2bow(model.vocab.keys(),
allow_update=True)
index_dict = {v: k+1 for k, v in gensim_dict.items()}
word_vectors = {word: model[word] for word in index_dict.keys()}
index2word = {index_dict[num]: num for num in index_dict.keys()}
print('Setting up Arrays for Keras Embedding Layer...')
n_symbols = len(index_dict) + 1 # adding 1 to account for 0th index
embedding_weights = np.zeros((n_symbols, emb_dim))
for word, index in index_dict.items():
embedding_weights[index,:] = word_vectors[word]
###############################################################################
train_x2 = []
for i in range(len(sc_parsed_x1)):
temp=[]
for j in range(len(sc_parsed_x1[i])):
temp.append(index_dict[sc_parsed_x1[i][j]])
train_x2.append(temp)
print "theano RNN"
import theano
import theano.tensor as tt
from collections import OrderedDict
def contextwin(l, win):
assert (win % 2) == 1
assert win >= 1
l = list(l)
lpadded = win // 2 * [0] + l + win // 2 * [0]
out = [lpadded[i:(i + win)] for i in range(len(l))]
assert len(out) == len(l)
return out
nb_voca=30000
win_size = 1
rnn_hdim = 150
rnn_output_dim = 2
x_data = []
for d in train_x2:
x_data.append(np.array(contextwin(d, win_size),dtype=np.int64))
y_data=[]
for i in range(10000):
temp=[]
for j in range(len(label[i])):
if label[i][j]==1:
temp.append([0,1])
else:
temp.append([1,0])
y_data.append(np.array(temp,dtype=np.int64))
embedding = theano.shared(embedding_weights.astype(np.float64))
y = tt.lmatrix('y_label')
idxs = tt.lmatrix('idxs')
x = embedding[idxs].reshape((idxs.shape[0], emb_dim))
###############################################################################
#the second channal: dependency embeddings
#train_x: sc_parsed_x[i]
#train_y: label[i]
dep=[]
dep_dim=50
for i in range(len(sc_parsed_x)):
temp=[]
for j in range(len(sc_parsed_x[i])):
temp.append(sc_parsed_x[i][j][7])
dep.append(temp)
dep_all=set()
dep_num=0
for s in dep:
for d in s:
dep_all.add(d)
dep_all=list(dep_all)
dep_num=len(dep_all)
dep2id=dict()
for i in range(dep_num):
dep2id[dep_all[i]]=i
dep_idxs=[]
for i in range(len(dep)):
temp=[]
for j in range(len(dep[i])):
temp.append([dep2id[dep[i][j]]])
dep_idxs.append(np.array(temp))
emb_dep=np.random.uniform(-0.1, 0.1, (dep_num, dep_dim)).astype(np.float64)
embedding_dep = theano.shared(emb_dep.astype(np.float64))
dep_input = tt.lmatrix('dep_input')
x_dep = embedding_dep[dep_input].reshape((dep_input.shape[0], dep_dim))
###############################################################################
#the third channal: pos embeddings
#train_x: sc_parsed_x[i]
#train_y: label[i]
pos=[]
pos_dim=50
for i in range(len(sc_parsed_x)):
temp=[]
for j in range(len(sc_parsed_x[i])):
temp.append(sc_parsed_x[i][j][4])
pos.append(temp)
pos_all=set()
pos_num=0
for s in pos:
for d in s:
pos_all.add(d)
pos_all=list(pos_all)
pos_num=len(pos_all)
pos2id=dict()
for i in range(pos_num):
pos2id[pos_all[i]]=i
pos_idxs=[]
for i in range(len(pos)):
temp=[]
for j in range(len(pos[i])):
temp.append([pos2id[pos[i][j]]])
pos_idxs.append(np.array(temp))
emb_pos=np.random.uniform(-0.1, 0.1, (pos_num, pos_dim)).astype(np.float64)
embedding_pos = theano.shared(emb_pos.astype(np.float64))
pos_input = tt.lmatrix('pos_input')
x_pos = embedding_pos[pos_input].reshape((pos_input.shape[0], pos_dim))
###############################################################################
cat_inputs = tt.concatenate([x, x_dep,x_pos], axis=1)
ini_dim=emb_dim+dep_dim+pos_dim
wx1 = theano.shared(np.random.normal(0, 1/np.sqrt(ini_dim), (ini_dim, rnn_hdim)))
wh1 = theano.shared(np.random.normal(0, 1/np.sqrt(rnn_hdim), (rnn_hdim, rnn_hdim)))
h0_1 = theano.shared(np.zeros(rnn_hdim, ))
bh1 = theano.shared(np.random.normal(0, 1, (rnn_hdim, )))
wx2 = theano.shared(np.random.normal(0, 1/np.sqrt(ini_dim), (ini_dim, rnn_hdim)))
wh2 = theano.shared(np.random.normal(0, 1/np.sqrt(rnn_hdim), (rnn_hdim, rnn_hdim)))
h0_2 = theano.shared(np.zeros(rnn_hdim, ))
bh2 = theano.shared(np.random.normal(0, 1, (rnn_hdim, )))
wx3 = theano.shared(np.random.normal(0, 1/np.sqrt(2*rnn_hdim), (2*rnn_hdim, rnn_hdim)))
wh3 = theano.shared(np.random.normal(0, 1/np.sqrt(rnn_hdim), (rnn_hdim, rnn_hdim)))
h0_3 = theano.shared(np.zeros(rnn_hdim, ))
bh3 = theano.shared(np.random.normal(0, 1, (rnn_hdim, )))
wx4 = theano.shared(np.random.normal(0, 1/np.sqrt(2*rnn_hdim), (2*rnn_hdim, rnn_hdim)))
wh4 = theano.shared(np.random.normal(0, 1/np.sqrt(rnn_hdim), (rnn_hdim, rnn_hdim)))
h0_4 = theano.shared(np.zeros(rnn_hdim, ))
bh4 = theano.shared(np.random.normal(0, 1, (rnn_hdim, )))
def recurrence1(x_t, h_tm1):
h_t = tt.tanh(#tt.dot(x_t, wx1) * tt.dot(h_tm1, wh1) +
tt.dot(x_t, wx1) + tt.dot(h_tm1, wh1) + bh1)
return h_t
def recurrence2(x_t, h_tm1):
h_t = tt.tanh(#tt.dot(x_t, wx2) * tt.dot(h_tm1, wh2) +
tt.dot(x_t, wx2) + tt.dot(h_tm1, wh2) + bh2)
return h_t
def recurrence3(x_t, h_tm1):
h_t = tt.tanh(#tt.dot(x_t, wx3) * tt.dot(h_tm1, wh3) +
tt.dot(x_t, wx3) + tt.dot(h_tm1, wh3) + bh3)
return h_t
def recurrence4(x_t, h_tm1):
h_t = tt.tanh(#tt.dot(x_t, wx4) * tt.dot(h_tm1, wh4) +
tt.dot(x_t, wx4) + tt.dot(h_tm1, wh4) + bh4)
return h_t
h1, _ = theano.scan(fn=recurrence1,
sequences=cat_inputs,#
outputs_info=[h0_1],
n_steps=cat_inputs.shape[0])
h2, _ = theano.scan(fn=recurrence2,
sequences=cat_inputs,#
outputs_info=[h0_2],
n_steps=cat_inputs.shape[0],#
go_backwards=True)
hidden12 = tt.concatenate([h1, h2[::-1]], axis=1)
h3, _ = theano.scan(fn=recurrence3,
sequences=hidden12,
outputs_info=[h0_3],
n_steps=hidden12.shape[0])
h4, _ = theano.scan(fn=recurrence4,
sequences=hidden12,
outputs_info=[h0_4],
n_steps=hidden12.shape[0],
go_backwards=True)
hidden34 = tt.concatenate([h3, h4[::-1]], axis=1)
###############################################################################
gated_dim=emb_dim
wr1 = theano.shared(np.random.normal(0, 1/np.sqrt(2*rnn_hdim+emb_dim), (2*rnn_hdim+emb_dim, gated_dim)))
br1 = theano.shared(np.random.normal(0, 1, (gated_dim, )))
wr2 = theano.shared(np.random.normal(0, 1/np.sqrt(2*rnn_hdim+dep_dim), (2*rnn_hdim+dep_dim, gated_dim)))
br2 = theano.shared(np.random.normal(0, 1, (gated_dim, )))
wr3 = theano.shared(np.random.normal(0, 1/np.sqrt(2*rnn_hdim+pos_dim), (2*rnn_hdim+pos_dim, gated_dim)))
br3 = theano.shared(np.random.normal(0, 1, (gated_dim, )))
#wr4 = theano.shared(np.random.normal(0, 1/np.sqrt(2*rnn_hdim+dad_dep_dim), (2*rnn_hdim+dad_dep_dim, gated_dim)))
#br4 = theano.shared(np.random.normal(0, 1, (gated_dim, )))
w_cadi = theano.shared(np.random.normal(0, 1/np.sqrt(ini_dim), (ini_dim, gated_dim)))
b_cadi = theano.shared(np.random.normal(0, 1, (gated_dim, )))
wz1 = theano.shared(np.random.normal(0, 1/np.sqrt(2*rnn_hdim+emb_dim), (2*rnn_hdim+emb_dim, gated_dim)))
bz1 = theano.shared(np.random.normal(0, 1, (gated_dim, )))
wz2 = theano.shared(np.random.normal(0, 1/np.sqrt(2*rnn_hdim+dep_dim), (2*rnn_hdim+dep_dim, gated_dim)))
bz2 = theano.shared(np.random.normal(0, 1, (gated_dim, )))
wz3 = theano.shared(np.random.normal(0, 1/np.sqrt(2*rnn_hdim+pos_dim), (2*rnn_hdim+pos_dim, gated_dim)))
bz3 = theano.shared(np.random.normal(0, 1, (gated_dim, )))
#wz4 = theano.shared(np.random.normal(0, 1/np.sqrt(2*rnn_hdim+dad_dep_dim), (2*rnn_hdim+dad_dep_dim, gated_dim)))
#bz4 = theano.shared(np.random.normal(0, 1, (gated_dim, )))
wz5 = theano.shared(np.random.normal(0, 1/np.sqrt(2*rnn_hdim+gated_dim), (2*rnn_hdim+ gated_dim, gated_dim)))
bz5 = theano.shared(np.random.normal(0, 1, (gated_dim, )))
def gate_nn(hidden, x, x_dep ,x_pos):#, x_dad_dep
r1=tt.nnet.sigmoid(tt.dot(tt.concatenate([hidden, x]), wr1) + br1)
r2=tt.nnet.sigmoid(tt.dot(tt.concatenate([hidden, x_dep]), wr2) + br2)
r3=tt.nnet.sigmoid(tt.dot(tt.concatenate([hidden, x_pos]), wr3) + br3)
#r4=tt.nnet.sigmoid(tt.dot(tt.concatenate([hidden, x_dad_dep]), wr4) + br4)
w_= tt.tanh(tt.dot(tt.concatenate([x*r1, x_dep*r2, x_pos*r3]), w_cadi) + b_cadi)#, x_dad_dep*r4
z1=tt.exp(tt.dot(tt.concatenate([hidden, x]), wz1) + bz1)
z2=tt.exp(tt.dot(tt.concatenate([hidden, x_dep]), wz2) + bz2)
z3=tt.exp(tt.dot(tt.concatenate([hidden, x_pos]), wz3) + bz3)
#z4=tt.exp(tt.dot(tt.concatenate([hidden, x_dad_dep]), wz4) + bz4)
z5=tt.exp(tt.dot(tt.concatenate([hidden, w_]), wz5) + bz5)
z_sum=z1+z2+z3+z5#+z4
z1=z1/z_sum
z2=z2/z_sum
z3=z3/z_sum
#z4=z4/z_sum
z5=z5/z_sum
hz = z1*x + z2*x_dep + z3*x_pos + z5*w_ #z4*x_dad_dep +
h_cat = tt.concatenate([hz])
return h_cat #z4*x_dad_dep +
hidden_gate, _=theano.scan(fn=gate_nn,
sequences=[hidden34, x, x_dep ,x_pos],
n_steps=x.shape[0])
###############################################################################
cat_dim=gated_dim
w = theano.shared(np.random.normal(0, 1/np.sqrt(cat_dim),
(cat_dim, rnn_output_dim)))
b = theano.shared(np.zeros(rnn_output_dim, ))
softmax = tt.nnet.softmax(tt.dot(hidden_gate, w)+b)
dad_dep_x_train_set = dad_dep_idxs[2000:]
dad_x_train_set = dad_idxs[2000:]
dep_x_train_set = dep_idxs[2000:]
pos_x_train_set = pos_idxs[2000:]
x_train_set = x_data[2000:]
y_train_set = y_data[2000:]
dad_dep_x_val_set = dad_dep_idxs[1000:2000]
dad_x_val_set = dad_idxs[1000:2000]
dep_x_val_set = dep_idxs[1000:2000]
pos_x_val_set = pos_idxs[1000:2000]
x_val_set = x_data[1000:2000]
y_val_set = label[1000:2000]
dad_dep_x_test=dad_dep_idxs[:1000]
dad_x_test=dad_idxs[:1000]
dep_x_test=dep_idxs[:1000]
pos_x_test=pos_idxs[:1000]
x_test=x_data[:1000]
y_test=label[:1000]
params = [embedding, wx1, wh1, h0_1, bh1,
wx2, wh2, h0_2, bh2,
wx3, wh3, h0_3, bh3,
wx4, wh4, h0_4, bh4,
wr1, br1,
wr2, br2,
wr3, br3,
#wr4, br4,
w_cadi, b_cadi,
wz1, bz1,
wz2, bz2,
wz3, bz3,
#wz4, bz4,
wz5, bz5,
embedding_dep,
embedding_pos,
w, b]
###############################################################################
l2_loss=0
for param in params:
l2_loss+=(param**2).sum()
lamda=theano.shared(np.array(1e-04))
loss1 = tt.mean(tt.nnet.categorical_crossentropy(softmax, y)) + lamda/float(2) * l2_loss
y_pred = tt.argmax(softmax, axis=1)
gradients = tt.grad(loss1, params)
lr=tt.dscalar('lr')
updates = OrderedDict(( p, p-lr*g )
for p, g in zip( params , gradients))
train = theano.function(inputs = [idxs, dep_input, pos_input, y, lr],
outputs =loss1,
updates = updates)
#allow_input_downcast=True)
pred = theano.function(inputs=[idxs, dep_input, pos_input],
outputs=y_pred)
#allow_input_downcast=True)
def prediction(x_set,dep_x_set, pos_x_set,y_set):
incorrect=0
N=0
for i in range(len(x_set)):
incorrect+=sum(y_set[i]^pred(x_set[i],dep_x_set[i],pos_x_set[i]))
N+=len(x_set[i])
return (N-incorrect)/float(N)
print 'iteration starts...'
nb_epoch = 30
n = 0
learning_rate=0.007
val_acc1=[]
test_acc1=[]
best=0
import time
while(n<nb_epoch):
#learning_rate=learning_rate-n*0.0005
n+=1
val_acc=[]
test_acc=[]
train_data=zip(x_train_set, dep_x_train_set, pos_x_train_set, y_train_set)
t0 = time.time()
for i in range(len(train_data)):
cost=train(train_data[i][0], train_data[i][1], train_data[i][2],train_data[i][3],learning_rate)
if i!=0 and i%7999==0:
print "epoch:", n
print i
print "emb_dim=", emb_dim
print "rnn_hdim=", rnn_hdim
print "learning_rate:",learning_rate
print "cost:",cost
val_value = prediction(x_val_set, dep_x_val_set , pos_x_val_set, y_val_set)
test_value = prediction(x_test,dep_x_test , pos_x_test , y_test)
if val_value>best:
best=val_value
best_test=test_value
val_acc.append(val_value)
test_acc.append(test_value)
print "val_accuracy:", val_value
print "test_accuracy:", test_value
print "best till now:",best
print "best_test under best val:",best_test
print '\n'
t1 = time.time()
print "the hours this epoch takes is:",(t1-t0)/float(3600)
val_acc1.append(val_acc)
test_acc1.append(test_acc)
|
#Escriba un programa en Python donde el usuario introduce un número n
#y el programa imprime los primeros n números triangulares, junto con
#su índice. Los números triangulares se originan de la suma de los números
#naturales desde 1 hasta n.Ejemplo: Si se piden los primeros 3 números
#triangulares, la salida es: 1 - 1 2 - 3 3 - 6
try:
numero = int(input("Introduzca un numero: "))
except ValueError:
print("debe ser un numero Natural")
numero = 0
index = 0
result = 0
if numero < 0:
print("Este numero no es natural")
else:
while (index <= numero):
result += index
index += 1
print(str(numero) + " - " + str(result))
|
# Postorder Traversal
# Problem Description
# Given a binary tree, return the Postorder traversal of its nodes values.
# NOTE: Using recursion is not allowed.
# Problem Constraints
# 1 <= number of nodes <= 10^5
# Input Format
# First and only argument is root node of the binary tree, A.
# Output Format
# Return an integer array denoting the Postorder traversal of the given binary tree.
# Example Input
# Input 1:
# 1
# \
# 2
# /
# 3
# Input 2:
# 1
# / \
# 6 2
# /
# 3
# Example Output
# Output 1:
# [3, 2, 1]
# Output 2:
# [6, 3, 2, 1]
# Example Explanation
# Explanation 1:
# The Preoder Traversal of the given tree is [3, 2, 1].
# Explanation 2:
# The Preoder Traversal of the given tree is [6, 3, 2, 1].
# Definition for a binary tree node
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param A : root node of tree
# @return a list of integers
def postorderTraversal(self, A):
from queue import LifoQueue
stack = LifoQueue()
stack.put(A)
ans_arr = []
while not stack.empty():
curr_node = stack.get()
ans_arr.append(curr_node.val)
if curr_node.left:
stack.put(curr_node.left)
if curr_node.right:
stack.put(curr_node.right)
return list(reversed(ans_arr)) |
# 20 choose 3 with reps
counter = 10
for i in range(1, 16):
for j in range(i, 16):
for k in range(j, 16):
print(i, j, k)
counter += 1
print(counter)
|
import os,sys
import ROOT
from array import array
import argparse
from datetime import datetime
import pandas as pd
import numpy as np
import pdb
sys.path.append(os.getcwd())
print(os.getcwd())
os.sys.path.append(os.path.expandvars('$CMSSW_BASE/src/ZCounting/'))
from ZUtils.python.utils import to_RootTime
ROOT.gROOT.SetBatch(True)
ROOT.gStyle.SetCanvasPreferGL(1)
ROOT.gStyle.SetTitleX(.3)
parser = argparse.ArgumentParser()
parser.add_argument("--rates1", required=True, type=str, help="Nominator csv file with z rates perLS")
parser.add_argument("--rates2", required=True, type=str, help="Denominator second csv file with z rates perLS")
parser.add_argument("-s","--saveDir", default='./', type=str, help="give output dir")
args = parser.parse_args()
outDir = args.saveDir
if not os.path.isdir(outDir):
os.mkdir(outDir)
def make_ratio(dataNom, dataDenom, run_range_Nom=None, run_range_Denom=None,
lumiUnc=0.013, name="", lumi_name='lumiRec'):
if run_range_Nom:
dataNom = dataNom.query("run >= {0} & run <= {1}".format(*run_range_Nom))
if run_range_Denom:
dataDenom = dataDenom.query("run >= {0} & run <= {1}".format(*run_range_Denom))
rLumi = dataNom[lumi_name].sum() / dataDenom[lumi_name].sum()
rZ_BB = dataNom['zDelBB_mc'].sum() / dataDenom['zDelBB_mc'].sum()
rZ_BE = dataNom['zDelBE_mc'].sum() / dataDenom['zDelBE_mc'].sum()
rZ_EE = dataNom['zDelEE_mc'].sum() / dataDenom['zDelEE_mc'].sum()
rZ_tot = dataNom['zDel_mc'].sum() / dataDenom['zDel_mc'].sum()
# uncertainty on pileup correction: assume 100% uncertainty
rZ_BB_err_PU = abs(rZ_BB - dataNom['zDelBB'].sum() / dataDenom['zDelBB'].sum())
rZ_BE_err_PU = abs(rZ_BE - dataNom['zDelBE'].sum() / dataDenom['zDelBE'].sum())
rZ_EE_err_PU = abs(rZ_EE - dataNom['zDelEE'].sum() / dataDenom['zDelEE'].sum())
rZ_tot_err_PU = abs(rZ_tot - dataNom['zDel'].sum() / dataDenom['zDel'].sum())
rZ_BB_err = rZ_BB * 1. / np.sqrt(dataDenom['zYieldBB'].sum())
rZ_BE_err = rZ_BE * 1. / np.sqrt(dataDenom['zYieldBE'].sum())
rZ_EE_err = rZ_EE * 1. / np.sqrt(dataDenom['zYieldEE'].sum())
rZ_tot_err = rZ_tot * 1. / (dataDenom['zDelBB_mc'].sum() + dataDenom['zDelBE_mc'].sum() + dataDenom['zDelEE_mc'].sum()) * np.sqrt(dataDenom['zYieldBB'].sum() + dataDenom['zYieldBE'].sum() + dataDenom['zYieldEE'].sum())
points = np.array([rLumi, rZ_BB, rZ_BE, rZ_EE, rZ_tot])
points_err = np.array([rLumi*lumiUnc, rZ_BB_err, rZ_BE_err, rZ_EE_err, rZ_tot_err])
points_err2 = np.array([0., rZ_BB_err_PU, rZ_BE_err_PU, rZ_EE_err_PU, rZ_tot_err_PU])
########## Plot ##########
xmin = 0.9
xmax = 1.5
graphs = []
graphs2 = []
for i, (ipoint, ierr, ierr2, nam, ptr) in enumerate(
zip(points, points_err, points_err2,
['Lumi','Z BB','Z BE','Z EE','Z total'],
[20, 21, 22, 23, 34],
)):
graph=ROOT.TGraphErrors(1, np.array([0.1*i+1]), np.array([ipoint]), np.array([0.]), np.array(np.sqrt(np.array(ierr)**2 + np.array(ierr2)**2)))
graph2=ROOT.TGraphErrors(1, np.array([0.1*i+1]), np.array([ipoint]), np.array([0.]), np.array(ierr2))
graph.SetName(nam)
graph.SetTitle(nam)
graph.SetMarkerStyle(ptr)
graph2.SetMarkerStyle(ptr)
graph.SetMarkerColor(i+1)
graph2.SetMarkerColor(i+1)
graph.SetFillStyle(1001)
graph2.SetFillStyle(1001)
graph.SetMarkerSize(1.5)
graph2.SetMarkerSize(1.5)
graph.SetLineColor(1)
graph2.SetLineColor(2)
graphs.append(graph)
graphs2.append(graph2)
c2=ROOT.TCanvas("c2","c2",500,600)
pad1 = ROOT.TPad("pad1", "pad1", 0., 0.4, 1, 1.0)
pad1.SetBottomMargin(0.)
c2.SetTicks()
pad1.SetLeftMargin(0.2)
pad1.SetRightMargin(0.01)
pad1.SetTopMargin(0.1)
pad1.SetTickx()
pad1.SetTicky()
pad1.Draw()
pad1.cd()
textsize = 24./(pad1.GetWh()*pad1.GetAbsHNDC())
latex = ROOT.TLatex()
latex.SetNDC()
latex.SetTextAlign(11)
latex.SetTextFont(42)
latex.SetTextSize(textsize)
ymin = min(points-points_err)*0.999
ymax = ymin + 1.25 * (max(points) - ymin)
graphs[0].GetYaxis().SetRangeUser(ymin, ymax)
graphs[0].GetXaxis().SetRangeUser(xmin, xmax)
graphs[0].GetXaxis().SetLabelSize(0)
graphs[0].GetYaxis().SetTitle("Ratio")
graphs[0].GetYaxis().SetTitleOffset(1.4)
graphs[0].GetYaxis().SetTitleSize(textsize)
graphs[0].GetYaxis().SetLabelSize(textsize)
graphs[0].Draw("AP")
legend=ROOT.TLegend(0.75,0.55,0.98,0.85)
latex.SetTextSize(textsize)
latex.SetTextFont(42)
latex.DrawLatex(0.2, 0.91, name)
latex.SetTextAlign(11)
latex.DrawLatex(0.35, 0.81, "Preliminary")
latex.SetTextAlign(11)
latex.SetTextFont(62)
latex.DrawLatex(0.23, 0.81, 'CMS')
for graph in graphs:
graph.Draw("P same")
legend.AddEntry(graph,"","pe")
for graph in graphs2:
graph.Draw("E same")
legend.SetTextFont(42)
legend.SetTextSize(textsize)
legend.Draw("same")
graphs[0].SetTitle("")
graphs[0].Draw("same")
### ratio ###
points_err /= points[0]
points_err2 /= points[0]
points /= points[0]
rgraphs = []
rgraphs2 = []
for i, (ipoint, ierr, ierr2, nam, ptr) in enumerate(
zip(points, points_err, points_err2,
['Lumi','Z BB','Z BE','Z EE','Z total'],
[20, 21, 22, 23, 34],
)):
graph=ROOT.TGraphErrors(1, np.array([0.1*i+1]), np.array([ipoint]), np.array([0.]), np.array(np.sqrt(np.array(ierr)**2 + np.array(ierr2)**2)))
graph2=ROOT.TGraphErrors(1, np.array([0.1*i+1]), np.array([ipoint]), np.array([0.]), np.array(ierr2))
graph.SetName(nam)
graph.SetTitle(nam)
graph.SetMarkerStyle(ptr)
graph2.SetMarkerStyle(ptr)
graph.SetMarkerColor(i+1)
graph2.SetMarkerColor(i+1)
graph.SetFillStyle(1001)
graph2.SetFillStyle(1001)
graph.SetMarkerSize(1.5)
graph2.SetMarkerSize(1.5)
graph.SetLineColor(1)
graph2.SetLineColor(2)
rgraphs.append(graph)
rgraphs2.append(graph2)
c2.cd()
pad2 = ROOT.TPad("pad2", "pad2", 0, 0.05, 1, 0.4)
pad2.SetLeftMargin(0.2)
pad2.SetRightMargin(0.01)
pad2.SetTopMargin(0.0)
pad2.SetBottomMargin(0.001)
pad2.SetTickx()
pad2.SetTicky()
pad2.Draw("ALPF")
pad2.cd()
textsize = 24./(pad2.GetWh()*pad2.GetAbsHNDC())
ymin = min(points-points_err)*0.999
ymax = ymin + 1.15 * (max(points) - ymin)
rgraphs[0].GetYaxis().SetRangeUser(ymin, ymax)
rgraphs[0].GetXaxis().SetRangeUser(xmin, xmax)
rgraphs[0].GetXaxis().SetLabelSize(0)
rgraphs[0].GetYaxis().SetTitle("Ratio / Lumi")
rgraphs[0].GetYaxis().SetTitleOffset(.75)
rgraphs[0].GetYaxis().SetTitleSize(textsize)
rgraphs[0].GetYaxis().SetLabelSize(textsize)
rgraphs[0].GetYaxis().SetNdivisions(405)
rgraphs[0].Draw("AP")
line1 = ROOT.TLine(xmin, 1., xmax, 1)
line1.SetLineStyle(7)
line1.Draw("same")
for graph in rgraphs:
graph.Draw("P same")
for graph in rgraphs2:
graph.Draw("E same")
rgraphs[0].SetTitle("")
rgraphs[0].Draw("same")
outstring = 'ratio'
if run_range_Nom:
outstring += "_run{0}to{1}".format(*run_range_Nom)
if run_range_Denom:
outstring += "_run{0}to{1}".format(*run_range_Denom)
c2.SaveAs(outDir+"/"+outstring+".png")
c2.Close()
########## Data Acquisition ##########
# --- z luminosity
dataNom = pd.read_csv(str(args.rates1), sep=',',low_memory=False)#, skiprows=[1,2,3,4,5])
# --- get Z low PU
dataDenom = pd.read_csv(str(args.rates2), sep=',',low_memory=False)#, skiprows=[1,2,3,4,5])
dataNom['zDel_mc'] = dataNom['zDelBB_mc'] + dataNom['zDelBE_mc'] + dataNom['zDelEE_mc']
dataNom['zDel'] = dataNom['zDelBB'] + dataNom['zDelBE'] + dataNom['zDelEE']
dataDenom['zDel_mc'] = dataDenom['zDelBB_mc'] + dataDenom['zDelBE_mc'] + dataDenom['zDelEE_mc']
dataDenom['zDel'] = dataDenom['zDelBB'] + dataDenom['zDelBE'] + dataDenom['zDelEE']
# pdb.set_trace()
# sort out lumi section withou any counts
dataNom = dataNom.query('zDel_mc != 0')
dataDenom = dataDenom.query('zDel_mc != 0')
# make_ratio(dataDenom, dataNom, run_range=(297046,299329), name="2017 B/H")
# make_ratio(dataDenom, dataNom, run_range=(299368,302029), name="2017 C/H")
# make_ratio(dataDenom, dataNom, run_range=(302030,303434), name="2017 D/H")
# make_ratio(dataDenom, dataNom, run_range=(303434,304797), name="2017 E/H")
# make_ratio(dataDenom, dataNom, run_range=(305040,306462), name="2017 F/H")
#make_ratio(dataDenom, dataDenom, run_range_Denom=(317080,319310), run_range_Nom=(315252,316995), name="2018 A / 2018 B", lumiUnc=0.)
make_ratio(dataNom, dataDenom,
run_range_Nom=(315252,320065),
run_range_Denom=(297046,306462),
name="2018 ABC / 2017 B-F",
lumiUnc=np.sqrt(0.022**2 + 0.015**2),
lumi_name='recorded(/pb)')
# make_ratio(dataNom, dataDenom,
# name="2018 ABC / 2017 H",
# run_range_Nom=(315252,320065),
# lumiUnc=np.sqrt(0.015**2 + 0.015**2),
# lumi_name='recorded(/pb)')
# make_ratio(dataNom, dataDenom,
# run_range_Nom=(297046,306462),
# name="2017 B-F / 2017 H",
# lumiUnc=0.013,
# lumi_name='recorded(/pb)')
# make_ratio(dataNom, dataDenom,
# run_range_Nom=(303434,306462),
# run_range_Denom=(303434,306462),
# name="2017 EF(0.1) / 2017 EF(0.2)",
# lumiUnc=0.013,
# lumi_name='recorded(/pb)')
|
import gzip
import cv2
import _pickle
import tensorflow as tf
import numpy as np
# Translate a list of labels into an array of 0's and one 1.
# i.e.: 4 -> [0,0,0,0,1,0,0,0,0,0]
def one_hot(x, n):
"""
:param x: label (int)
:param n: number of bits
:return: one hot code
"""
if type(x) == list:
x = np.array(x)
x = x.flatten()
o_h = np.zeros((len(x), n))
o_h[np.arange(len(x)), x] = 1
return o_h
f = gzip.open('mnist.pkl.gz', 'rb')
train_set, valid_set, test_set = _pickle.load(f, encoding='latin1')
f.close()
train_x, train_y = train_set
# ---------------- Visualizing some element of the MNIST dataset --------------
import matplotlib.cm as cm
import matplotlib.pyplot as plt
#lt.show() # Let's see a sample
#print(train_x[57])
#print (train_y[57])
# TODO: the neural net!!
y_data = one_hot(train_y , 10)
x = tf.placeholder("float", [None, 784]) # samples
y_ = tf.placeholder("float", [None, 10]) # labels
W1 = tf.Variable(np.float32(np.random.rand(784, 20)) * 0.1)
b1 = tf.Variable(np.float32(np.random.rand(20)) * 0.1)
W2 = tf.Variable(np.float32(np.random.rand(20, 10)) * 0.1)
b2 = tf.Variable(np.float32(np.random.rand(10)) * 0.1)
h = tf.nn.sigmoid(tf.matmul(x, W1) + b1)
# h = tf.matmul(x, W1) + b1 # Try this!
y = tf.nn.softmax(tf.matmul(h, W2) + b2)
loss = tf.reduce_sum(tf.square(y_ - y))
train = tf.train.GradientDescentOptimizer(0.01).minimize(loss) # learning rate: 0.01
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
print ("----------------------")
print (" Start training... ")
print ("----------------------")
batch_size = 20
valid_x, valid_y = valid_set
error = 0
perror = 0
epoch = 0
epochs = []
errors = []
while abs(perror - error) >= perror * 0.0001:
for jj in range(len(train_x) // batch_size):
batch_xs = train_x[jj * batch_size: jj * batch_size + batch_size]
batch_ys = y_data[jj * batch_size: jj * batch_size + batch_size]
sess.run(train, feed_dict={x: batch_xs, y_: batch_ys})
perror = error
error= sess.run(loss, feed_dict={x: valid_x, y_: one_hot(valid_y,10)})
errors.append(error)
epochs.append(epoch)
print("Epoch #:", epoch, "Error: ", error)
epoch += 1
plt.plot(epochs, errors)
plt.show()
print ("----------------------")
print (" Test ")
print ("----------------------")
test_x, test_y = test_set
result = sess.run(y, feed_dict={x: test_x})
mistakes = 0
for b, r in zip(test_y , result):
if b != np.argmax(r):
mistakes += 1
print ( b, "-->", np.argmax(r) )
print ("accuracy percentage:", 100 - (mistakes * 100 / len(test_y)), "%")
print ("----------------------------------------------------------------------------------")
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# date :2018/1/
# discriptions :
# vision :
# copyright :All copyright reserved by FMSH company
__author__ = 'zuodengbo'
def TAC_enc(keys, datas):
assert len(keys) == 32
assert len(datas) == 48
length = len(keys) / 2
first_half_key = keys[:length]
second_half_key = keys[length:]
tac_1 = ord(first_half_key) ^ ord(datas)
tac_2 = ord(tac_1) ^ ord(second_half_key)
return tac_2
if __name__ == '__main__':
key = '11223344556677889900112233445566'
data = '12165464688448486764846846454864634123554554468'
TAC_enc(key,data)
|
class UCDay_Hourly:
def __init__(self, ucday):
self.date = ucday.date
self.hourly_map = {}
for i in range(0, 24):
self.hourly_map['hr'+str(i)] = ucday.hours[i]
|
import openpyxl as xl
import csv
import numpy as np
## flags {
TOP3 = False
TOP2 = True
TOP1 = True
## flags }
def main(dargs):
input_excel_name = dargs["INPUT_EXCEL_NAME"]
output_excel_name = dargs["OUTPUT_EXCEL_NAME"]
input_csv_name = dargs["INPUT_CSV_NAME"]
## READ Excel file
wb = xl.load_workbook(input_excel_name)
# ws = wb.copy_worksheet(wb["Sheet1"])
# ws.title = "Sheet2"
ws = wb["Sheet1"]
# wb = xl.load_workbook(input_excel_name)
# ws = wb.worksheets[0]
# wb.copy_worksheet(ws)
## DELETE previous keywords
keyword = ws[dargs["KEYWORD_RANGE"]]
for i in range(len(keyword)):
for j in range(len(keyword[i])):
keyword[i][j].value=""
## ADD auto filter
ws.auto_filter.ref = dargs["AUTO_FILTER_RANGE"]
## COPY color of auto filter
target = ws["B10:DI10"]
for i in range(len(target)):
for j in range(len(target[i])):
target[i][j]._style = ws["A10"]._style
## 学生番号抜き出し
# target = ws["C10:C89"]
# for i in range(len(target)):
# for j in range(len(target[i])):
# print(target[i][j].value)
# exit()
## READ keyword from csv file and WRITE
keyword_num = np.array([], dtype="int8")
row = dargs["KEYWORD_START_ROW_NUM"]
col = dargs["KEYWORD_START_COLUMN_NUM"]
with open(input_csv_name) as f:
reader = csv.reader(f)
for r, data in enumerate(reader):
print('input ID:', data[0])
### ここで学籍番号 or name チェック
student_ID = ws.cell(row+r, 3).value
#print('ID:', student_ID)
while data[0] != str(student_ID):
keyword_num = np.append(keyword_num, None)
ws.cell(row+r, 9).value = ""
row = row + 1
student_ID = ws.cell(row+r, 3).value
if student_ID is None:
print('ERROR : Not match ID')
exit()
keyword_num = np.append(keyword_num, len(data))
for index in range(len(data)-1):
# ws.cell(row=row+r, column=col+index).value = data[index]
ws.cell(row+r, col+index).value = data[index+1]
# keyword_num = np.append(keyword_num, None)
# print(keyword_num)
# print(keyword_num[3])
## SORT #keywords and select TOP3 score
# REMOVE None
filtered_list = [e for e in keyword_num if e is not None]
if len(filtered_list) < 3:
print("can\'t select TOP3. too few kinds of #keywords")
first = 10000
second = 10000
third = 10000
else:
# SORT
first = sorted(set(filtered_list))[-1]
second = sorted(set(filtered_list))[-2]
third = sorted(set(filtered_list))[-3]
print(first, second, third)
## DRAW TOP3 and NOT submit persons {
##----------------------------------##
## TOP3 -> fill Yellow ##
## NOT submit -> red ##
##----------------------------------##
yellow_fill = xl.styles.PatternFill(patternType='solid', fgColor='ffff00', bgColor='d7d7d7')
red_char = xl.styles.fonts.Font(color='FF0000')
row_offset = dargs["KEYWORD_START_ROW_NUM"]
for i, score in enumerate(keyword_num):
if score is None: # NOT submit
start = "A" + str(row_offset+i)
end = dargs["KEYWORD_END_COLUMN"] + str(row_offset+i)
cell = ws[start:end]
for i in range(len(cell)):
for j in range(len(cell[i])):
cell[i][j].font = red_char
else:
if score < third: # NOT TOP3
pass
else:
start = "A" + str(row_offset+i)
end = dargs["KEYWORD_END_COLUMN"] + str(row_offset+i)
cell = ws[start:end]
if score < second: # 3rd
for i in range(len(cell)):
for j in range(len(cell[i])):
cell[i][j].fill = yellow_fill
elif score < first: # 2nd
for i in range(len(cell)):
for j in range(len(cell[i])):
cell[i][j].fill = yellow_fill
else: # 1st
for i in range(len(cell)):
for j in range(len(cell[i])):
cell[i][j].fill = yellow_fill
## DRAW TOP3 and NOT submit persons }
## multi columns to one column {
row = dargs["KEYWORD_START_ROW_NUM"]
col = dargs["KEYWORD_START_COLUMN_NUM"]
count = 0
for i in range(104):
for j in range(89):
if ws.cell(row+i, col+j).value == "":
break
#ws.cell(100+i*104+j, 3).value = ws.cell(row+i, col+j).value
ws.cell(100+count, 3).value = ws.cell(row+i, col+j).value
count += 1
## multi columns to one column }
## COUNT KEYWORD { ## must CHANGE read mode
# for i in range(count):
# target = "=COUNTIF(J11:DI89, C" + str(100+i) + ")"
# ws.cell(100+i, 4).value = target
## COUNT KEYWORD }
## WRITE Excel file
wb.save(output_excel_name)
if __name__ == '__main__':
dargs = {
"INPUT_EXCEL_NAME" : "template.xlsx",
"OUTPUT_EXCEL_NAME" : "output.xlsx",
#"INPUT_CSV_NAME" : "result.csv",
"INPUT_CSV_NAME" : "id.csv",
# "AUTO_FILTER_RANGE" : "A10:DI81",
# "KEYWORD_RANGE" : "J11:DI81",
"AUTO_FILTER_RANGE" : "A10:DI89",
"KEYWORD_RANGE" : "J11:DI89",
"KEYWORD_START_ROW_NUM" : 11,
"KEYWORD_START_COLUMN_NUM" : 10,
"KEYWORD_START_COLUMN" : "J",
"KEYWORD_END_COLUMN" : "DI",
"KEYWORD_START_ROW" : 11,
"KEYWORD_END_ROW" : 89,
"ONE_COLUMN_RANGE" : "C100:C9356",
}
main(dargs)
|
"""
This code represents a set of functions to implement the toy problem for the Xor gate, it uses an Artificial Neural Network
in which the parameters are set by a Genetic Algorithm, in this case, the weights are being set. This must be used with
the interface, as it does not print any result on the screen and works just as a set of functions.
Author: Ithallo Junior Alves Guimaraes
Major: Electronics Engineering
Universidade de Brasilia
November/2015
"""
# Libraries
import numpy as np
import random as rd
import math as mt
from pybrain.tools.shortcuts import buildNetwork
from pybrain.structure import TanhLayer
# General things and parameters
# Creating the Artificial Neural Network,ANN, just an example, acording to the problem
#net = buildNetwork(2, 3, 1, bias=True, hiddenclass=TanhLayer)
# Creating data set for the Xor. [[inputA, inputB, Output], ...]
#dataSet = [[0,0,0],[0,1,1],[1,0,1],[1,1,0]]
# Function to generate the population, netp(the list, array of weights, an example), population size, limit1 limit2 for random. It returns a array with the population
def pop(netp, pop_size, mu, sigma):
population = []
individual = []
for i in xrange(0, pop_size):
for j in xrange(0, len(netp)):
individual.insert(j, rd.gauss(mu,sigma))
population.insert(i, individual)
individual = []
return np.array(population)
# B This functions ranks the population based on the Fitness (error, in this case) based on the Artificial Neural Network, ANN, the closer it is to 0 the better it is, This functons returns the values of the error, already sorted, and the number of the position of the individual [[a, b], ...]. The input is given by the population and a data set to which it has to be compared and the generated ANN.
def ranking(Population, dataSet, net):
# A value to get the minimal
minimal = 10000
# Getting the error, here, the mean squared error of each individual, remember the dataset
error = []
for i in xrange(0, len(Population)):
net._setParameters(Population[i])
midError = 0
for x in dataSet:
midError += (pow((net.activate([x[0], x[1]])[0] - x[2]), 2))
error.insert(i, [(midError/len(dataSet)), i])
# Sorting the values into a new final list
def getKey(item):
return item[0]
return sorted(error, key=getKey)
"""
To do this part, my idea is to define how much (%) of the past population is going to have offspring, it receives also the percent of the
population is going to mutate too. It should be remembered that some not fitted should have offspring too in order to keep the genetic diversity
"""
# Crossover, mutation, breeding
def breedND(Population,RANK, mutate, crossover, mu, sigma):
# Setting the number opopulation to reproduce
numM = int (crossover * len(Population))
Population = Population.tolist()
# Based on the gotten number, from here on, the said to be the best are going to mate and generate offspring, the new one will substitute the worst ones
children = []
mate1 = []
mate2 = []
for i in xrange(0, numM):
mate1 = Population[RANK[i][1]]
mate2 = rd.choice(Population)
# generating the child
children.insert(i, rd.choice([mate1, mate2]))
for a in xrange(0, rd.randint(1, (len(mate1) - 1))):
if children[i] == mate1:
children[i][a] = mate2[a]
else:
children[i][a] = mate1[a]
#reordening
PopMid = []
for h in xrange(0, len(RANK)):
PopMid.append(Population[RANK[h][1]])
for dl in range(((len(RANK)- len(children))), len(RANK)):
PopMid.pop(len(PopMid)-1)
Population = PopMid + children
# Mutating
muNum = int(mutate * len(Population))
for fi in xrange(0, muNum):
varl = Population.index(rd.choice(Population))
varl1 = rd.randint(0, len(Population[varl])-1)
Population[varl][varl1] = rd.gauss(mu, sigma)
return np.array(Population)
def breed(Population,RANK, mutate, crossover, mu, sigma, DyingRAte):
# Setting the number opopulation to reproduce
numM = int(crossover * len(Population))
Population = Population.tolist()
# Based on the gotten number, from here on, the said to be the best are going to mate and generate offspring, the new one will substitute the worst ones
children = []
mate1 = []
mate2 = []
for i in xrange(0, numM):
mate1 = Population[RANK[i][1]]
mate2 = rd.choice(Population)
children.insert(i, mate2)
for a in xrange(0, rd.randint(1, (len(mate1) - 1))):
children[i][a] = mate1[a]
#reordening
PopMid = []
for h in xrange(0, len(RANK)):
PopMid.append(Population[RANK[h][1]])
for dl in range((len(RANK)- int(DyingRAte*len(RANK))), len(RANK)):
PopMid.pop(len(PopMid)-1)
Population = PopMid + children
# Mutating
muNum = int(mutate * len(Population))
#print muNum
for fi in xrange(0, muNum):
#protecting the past best fit from mutating
varl = Population.index(rd.choice(Population))
varl1 = rd.randint(0, len(Population[varl])-1)
Population[varl][varl1] = rd.gauss(mu, sigma)
return np.array(Population)
|
# Capture mouse clicks and draw a dot on the image
import numpy as np
import argparse
import cv2
file_name = '..\image4\parking.jpg'
points = []
def onMouseClick (event, x, y, flags, param):
global points # grab references to the global variable
if event == cv2.EVENT_LBUTTONUP:
points.append((x, y))
cv2.circle(img,(x,y),20,(0,0,255),-1)
cv2.imshow("On the Dot", img) # the event handler can update the UI
cv2.namedWindow("On the Dot")
cv2.setMouseCallback("On the Dot", onMouseClick)
img = cv2.imread (file_name)
cv2.imshow("On the Dot", img)
# Note: there is no infinite loop
cv2.waitKey(0)
print '{0} points were captured'.format(len(points))
print points
cv2.destroyAllWindows()
|
#!C:\Users\Vaibhavi Raut\AppData\Local\Programs\Python\Python37
'''
WAP to accept a string from user and convert it to lowercase and uppercase.
'''
word = input("Enter a word: ")
print(type(word))
if word.islower():
print("UpperCase is ",word.upper())
elif word.isupper():
print("LowerCase is ",word.lower())
else:
print("Mix of both. Can't convert!")
print("In that case: ",word.swapcase())
'''
Comments!
'''
|
import re
def remove_comments(filename):
with open(filename, 'r') as f:
data = f.read()
# remove all occurance streamed comments (/*COMMENT */) from string
data = re.sub(re.compile("/\*.*?\*/", re.DOTALL) , "", data)
# remove all occurance singleline comments (//COMMENT\n ) from string
data = re.sub(re.compile("//.*?\n" ) ,"" ,data)
data = re.sub(re.compile("\\\\\n" ) ,"" ,data)
with open(filename + "_without_comments", 'w') as f:
f.write(data)
def clean_file(filename):
remove_comments(filename)
f_read = open(filename + "_without_comments", 'r')
f_write = open(filename + '_clean', 'w')
to_define = ['vmm_spinlock_t', 'u64', 'u16', 'bool', 'arch_regs_t',
'vmm_rwlock_t', 'resource_size_t', 'loff_t', 'int_T',
'real_T',
'irq_flags_t', 'u32', 'pthread_t', 'vmm_scheduler_ctrl',
'virtual_addr_t', 'u8', 'virtual_size_t', 'physical_addr_t',
'physical_size_t', 'atomic_t', 'vmm_iommu_fault_handler_t',
'dma_addr_t', 'size_t', 'off_t', 'vmm_dr_release_t',
'vmm_dr_match_t', 'vmm_clocksource_init_t', 's64', 'va_list',
'vmm_host_irq_handler_t', 'vmm_host_irq_function_t',
'vmm_host_irq_init_t', 'Elf_Ehdr', 'Elf_Shdr', 'Elf_Sym', 's16',
'vmm_clockchip_init_t', 'pthread_spinlock_t',
'ExtU_brake_acc_nodiv_ctrl_T',
'B_brake_acc_nodiv_ctrl_T']
new_file_lines = ['typedef int {};'.format(t) for t in to_define]
skip_lines_start_with_char = ['#', '/']
skip_lines_start_with_two_char = ['*/', '*\n', '*\t']
skip_lines_with = ['DEFINE_PER_CPU', 'asm']
delete_words = ['__initdata','__cpuinit', '__noreturn', '__init',
'__exit', '__notrace', '__weak', '__read_mostly',
'__attribute\(.*\)', '__extension__', '__inline',
'VMM_DEVTREE_PATH_SEPARATOR_STRING,',
'VMM_DEVTREE_PATH_SEPARATOR_STRING',
'struct vmm_semaphore_resource,',
'VMM_EXPORT_SYMBOL\(.*\);', '__attribute__\(.*\)',
'VMM_DECLARE_MODULE\(.*\);',
'vmm_early_param\(.*\);',
'DECLARE_COMPLETION\(.*\);',
'MODULE_AUTHOR,',
'MODULE_LICENSE,',
'MODULE_IPRIORITY,',
'MODULE_INIT,',
'MODULE_EXIT\);',
'the new constraints */',
'VMM_DECLARE_MODULE\(MODULE_DESC,',
'unsigned long addr_merge,', 'PRIPADDR', 'PRISIZE', 'PRIx64',
'struct vmm_region,', 'struct vmm_timer_event,',
'struct vmm_device,', 'struct vmm_work,', 'struct vmm_module,',
'struct vmm_vcpu_resource,', 'struct vmm_vcpu,',
'struct vmm_guest_request,', 'struct host_mhash_entry,', 'struct vmm_surface,',
'struct vmm_devtree_attr,', 'struct vmm_devtree_node,', 'struct vmm_vkeyboard_led_handler,',
'struct vmm_netport_xfer,', 'struct vmm_schedalgo_rq_entry,', 'struct blockpart_work,',
'struct vmm_blockdev,', 'struct blockrq_nop_work,']
replace_words = {'for_each_present_cpu':'while',
'for_each_online_cpu':'while',
'for_each_cpu\(.*\)':'while(1)',
'rbtree_postorder_for_each_entry_safe\(.*\)':'while(1)',
'vmm_devtree_for_each_child\(.*\)':'while(1)',
'list_for_each_entry\(.*\)':'if(1)',
'vmm_chardev_doread\(.*':'vmm_chardev_doread(',
'vmm_chardev_dowrite\(.*':'vmm_chardev_dowrite(',
'container_of\(.*\)':'1',
'va_arg\(.*\)':'va_arg(1)',
'align\(.*\)':'1',
'sizeof\(int\)':'1',
'list_for_each_entry_safe_reverse\(':'while(',
'list_for_each_entry_safe\(':'while(',
'vmm_devtree_for_each_attr\(.*\)':'while(1)',
'list_for_each_entry_reverse\(':'while(',
'list_for_each_safe\(':'while(',
'ether_srcmac\(.*\)':'ether_srcmac()',
'ether_dstmac\(.*\)':'ether_dstmac()',
'memcpy\(.*\)':'memcpy()',
'DECLARE_KEYMAP_FILE\(.*\);':''}
delete_suffix_start_with = ['/*']
for i, line in enumerate(f_read):
sline = line.lstrip(' \t')
if sline[0] in skip_lines_start_with_char:
continue
if sline[:2] in skip_lines_start_with_two_char:
continue
if any([w in sline for w in skip_lines_with]):
continue
for w in delete_words:
line = re.sub(w, '', line)
for k, v in replace_words.items():
line = re.sub(k, v, line)
for w in delete_suffix_start_with:
pos = line.find(w)
if pos != -1:
line = line[:pos] + '\n'
new_file_lines.append(line)
f_write.write(''.join(new_file_lines))
f_write.close()
f_read.close()
|
import setuptools
with open('VERSION', 'r') as verfile:
version = verfile.read()
with open("README.md", 'r') as fh:
long_description = fh.read()
setuptools.setup(
name="gqla",
version=version,
author="Alexey Kuzin",
author_email="alenstoir@yandex.ru",
description="A module used to generate querry statements and perform data fetching via GraphQL",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Alenstoir/GQLA",
license='MIT License',
requires='aiohttp',
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.5',
)
|
# Generated by Django 3.0 on 2020-09-12 01:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('coffee', '0002_coffeepod'),
]
operations = [
migrations.AlterField(
model_name='coffeepod',
name='product_type',
field=models.CharField(choices=[('COFFEE_POD_LARGE', 'COFFEE POD LARGE'), ('COFFEE_POD_SMALL', 'COFFEE POD SMALL'), ('ESPRESSO_POD', 'ESPRESSO POD')], default='COFFEE_POD_LARGE', max_length=250),
),
]
|
import tensorflow as tf
import numpy as np
num_units = 256
# num_units = 200
attention_len = 512
def matU(shape=[num_units,attention_len], stddev=0.1, mean=0):
initial = tf.truncated_normal(shape=shape, mean=mean, stddev=stddev)
with tf.variable_scope('attention', reuse=tf.AUTO_REUSE):
return tf.get_variable('matU',initializer=initial)
def queryW(shape=[attention_len], value=0.1):
initial = tf.constant(value=value, shape=shape)
with tf.variable_scope('attention', reuse=tf.AUTO_REUSE):
return tf.get_variable('queryW',initializer=initial)
# return tf.Variable(initial)
def queryV(shape=[attention_len,1], value=0.1):
initial = tf.constant(value=value, shape=shape)
with tf.variable_scope('attention', reuse=tf.AUTO_REUSE):
return tf.get_variable('queryV',initializer=initial)
# return tf.Variable(initial)
def encode(embedingPlaceholder):
input_embed = tf.reshape(embedingPlaceholder,[1,-1,200])
with tf.variable_scope('encode'):
cell = tf.contrib.rnn.GRUCell(num_units=num_units)
cell2 = tf.contrib.rnn.GRUCell(num_units=num_units)
encoder = tf.contrib.rnn.MultiRNNCell([cell,cell2])
encoder_outputs, encoder_final_state = tf.nn.dynamic_rnn(encoder, input_embed, dtype=tf.float32)
# shape=(1, ?, 256)
encoder_outputs = tf.reshape(encoder_outputs,[-1,num_units])
return encoder_outputs
def getScores(encoder_outputs):
with tf.variable_scope('attention'):
U = matU()
W = queryW()
V = queryV()
# activation = tf.tanh(tf.matmul(embedingPlaceholder, U) + W)
activation = tf.tanh(tf.matmul(encoder_outputs, U) + W)
value = tf.matmul(activation,V) # shape=(?, 1)
flatValue = tf.reshape(value,[-1]) # shape=(?)
scores = tf.nn.softmax(flatValue)
return scores
# def demoRun():
# scores = attentionModel()
# sess = tf.Session()
# sess.run(tf.global_variables_initializer()) # 每次不写就会报错
# value = sess.run(scores,feed_dict={'embeding:0':np.random.random((10,200))})
# print(value)
""" [0.09600932 0.10093873 0.04859696] """
# demoRun()
# embedingPlaceholder = tf.placeholder(tf.float32, shape=[None, 200], name='embeding')
def getArticleRepresentation(embedingPlaceholder): # 加权平均
encoder_outputs = encode(embedingPlaceholder)
scores = getScores(encoder_outputs)
reshapedScores = tf.reshape(scores,[-1,1])
Cs = encoder_outputs*reshapedScores
C = tf.reduce_sum(Cs, 0)
# print(C) # shape=(200,)
return C
|
# draw circles on a canvas
import numpy as np
import cv2
canvas = np.zeros((300, 300, 3), dtype = "uint8") # (y,x,3)
cx, cy = canvas.shape[1]/2, canvas.shape[0]/2
blue = [250,0,0]
for rad in range(0,150,20):
cv2.circle(canvas, (cx,cy), rad, blue)
cv2.imshow("My Art", canvas) # reuse the same window
cv2.waitKey(0)
cv2.destroyAllWindows() |
import cv2
import glob
import random
import numpy as np
emotions = ["neutral", "anger", "contempt", "disgust", "fear", "happy", "sadness", "surprise"] #Emotion list
#fishface = cv2.face.FisherFaceRecognizer_create() #Initialize fisher face classifier
fishface = cv2.createLBPHFaceRecognizer()
faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')
data = {}
def decodeEmotion(pred):
if pred == 0:
return "neutral"
elif pred == 1:
return "anger"
elif pred == 2:
return "contempt"
elif pred == 3:
return "disgust"
elif pred == 4:
return "fear"
elif pred == 5:
return "happy"
elif pred == 6:
return "sadness"
elif pred == 7:
return "surprise"
else:
return "oh well, something went wrong..."
def get_files(emotion): #Define function to get file list, randomly shuffle it and split 80/20
files = glob.glob("dataset\\%s\\*" %emotion)
random.shuffle(files)
training = files[:int(len(files)*0.8)] #get first 80% of file list
prediction = files[-int(len(files)*0.2):] #get last 20% of file list
return training, prediction
def make_sets():
training_data = []
training_labels = []
prediction_data = []
prediction_labels = []
for emotion in emotions:
training, prediction = get_files(emotion)
#Append data to training and prediction list, and generate labels 0-7
for item in training:
image = cv2.imread(item) #open image
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #convert to grayscale
training_data.append(gray) #append image array to training data list
training_labels.append(emotions.index(emotion))
for item in prediction: #repeat above process for prediction set
image = cv2.imread(item)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
prediction_data.append(gray)
prediction_labels.append(emotions.index(emotion))
return training_data, training_labels, prediction_data, prediction_labels
training_data, training_labels, prediction_data, prediction_labels = make_sets()
print("training fisher face classifier")
print("size of training set is:", len(training_labels), "images")
fishface.train(training_data, np.asarray(training_labels))
print("Training done")
# grab the reference to the webcam
vs = cv2.VideoCapture(0)
# keep looping
while True:
# grab the current frame
ret, frame = vs.read()
# if we are viewing a video and we did not grab a frame,
# then we have reached the end of the video
if frame is None:
break
faces = faceCascade.detectMultiScale(frame)
for (x, y, w, h) in faces:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Convert image to grayscale
face = gray[y:y + h, x:x + w] # Cut the frame to size
face_two = cv2.resize(face, (350, 350))
pred, conf = fishface.predict(face_two)
emotion = decodeEmotion(pred)
cv2.putText(frame, emotion, (x, y), cv2.FONT_HERSHEY_COMPLEX, 1.0, (255, 255, 255),
lineType=cv2.LINE_AA)
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
# show the frame to our screen
cv2.imshow("Video", frame)
key = cv2.waitKey(1) & 0xFF
# if the 'q' key is pressed, stop the loop
if key == ord("q"):
break
# close all windows
cv2.destroyAllWindows() |
# -*- coding:utf-8 -*-
# -------------------------------
# ProjectName : autoDemo
# Author : zhangjk
# CreateTime : 2020/10/3 16:08
# FileName : wjx
# Description :
# --------------------------------
import turtle
import time
turtle.pensize(5)
turtle.pencolor("yellow")
turtle.fillcolor("red")
turtle.begin_fill()
for _ in range(5):
turtle.forward(200)
turtle.right(144)
turtle.end_fill()
time.sleep(2)
turtle.penup()
turtle.goto(-150,-120)
turtle.color("violet")
turtle.write("Done", font=('Arial', 40, 'normal'))
turtle.mainloop()
|
# -*- coding: utf-8 -*-
import numpy as np
import scipy
import astropy.units as u
def deltaMag(p, Rp, d, Phi):
"""Calculates delta magnitudes for a set of planets, based on their albedo,
radius, and position with respect to host star.
Args:
p (ndarray):
Planet albedo
Rp (astropy Quantity array):
Planet radius in units of km
d (astropy Quantity array):
Planet-star distance in units of AU
Phi (ndarray):
Planet phase function
Returns:
~numpy.ndarray:
Planet delta magnitudes
"""
dMag = -2.5 * np.log10(p * (Rp / d).decompose() ** 2 * Phi).value
return dMag
def betaStar_Lambert():
"""Compute the Lambert phase function deltaMag-maximizing phase angle
Args:
None
Returns:
float:
Value of beta^* in radians
"""
betastarexpr = (
lambda beta: -(np.pi - beta) * np.sin(beta) ** 3 / np.pi
+ 2
* ((np.pi - beta) * np.cos(beta) + np.sin(beta))
* np.sin(beta)
* np.cos(beta)
/ np.pi
)
betastar = scipy.optimize.fsolve(betastarexpr, 63 * np.pi / 180)[0]
return betastar
def min_deltaMag_Lambert(Completeness, s=None):
"""Calculate the minimum deltaMag at given separation(s) assuming a Lambert phase
function
Args:
Completeness (BrownCompleteness):
BrownCompleteness object
s (float or ~numpy.ndarray, optional):
Projected separations (in AU) to compute minimum delta mag at.
If None (default) then uses Completeness.xnew
Returns:
~numpy.ndarray:
Minimum deltaMag values
"""
# this is the output of betaStar_Lambert (no need to recompute every time)
betastar = 1.1047288186445432
# if no s input supplied, use the full array of separations from Completeness
if s is None:
s = Completeness.xnew
# allocate output
dmagmin = np.zeros(s.size)
# idenitfy breakpoints
Ppop = Completeness.PlanetPopulation
bp1 = Ppop.rrange.min().to(u.AU).value * np.sin(betastar)
bp2 = Ppop.rrange.max().to(u.AU).value * np.sin(betastar)
# compute minimum delta mags
dmagmin[s < bp1] = -2.5 * np.log10(
Ppop.prange.max()
* ((Ppop.Rprange.max() / Ppop.rrange.min()).decompose().value) ** 2
* Completeness.PlanetPhysicalModel.calc_Phi(
(np.arcsin(s[s < bp1] / Ppop.rrange.min().value)) * u.rad
)
)
inds = (s >= bp1) & (s < bp2)
dmagmin[inds] = -2.5 * np.log10(
Ppop.prange.max()
* ((Ppop.Rprange.max().to(u.AU).value / s[inds])) ** 2
* Completeness.PlanetPhysicalModel.calc_Phi(betastar * u.rad)
* np.sin(betastar) ** 2
)
dmagmin[s >= bp2] = -2.5 * np.log10(
Ppop.prange.max()
* ((Ppop.Rprange.max() / Ppop.rrange.max()).decompose().value) ** 2
* Completeness.PlanetPhysicalModel.calc_Phi(
(np.arcsin(s[s >= bp2] / Ppop.rrange.max().value)) * u.rad
)
)
return dmagmin
def max_deltaMag_Lambert(Completeness, s=None):
"""Calculate the maximum deltaMag at given separation(s) assuming a Lambert phase
function
Args:
Completeness (BrownCompleteness):
BrownCompleteness object
s (float or ~numpy.ndarray, optional):
Projected separations (in AU) to compute minimum delta mag at.
If None (default) then uses Completeness.xnew
Returns:
~numpy.ndarray:
Maximum deltaMag values
"""
Ppop = Completeness.PlanetPopulation
dmagmax = -2.5 * np.log10(
Ppop.prange.min()
* ((Ppop.Rprange.min() / Ppop.rrange.max()).decompose().value) ** 2
* Completeness.PlanetPhysicalModel.calc_Phi(
(np.pi - np.arcsin(s / Ppop.rrange.max().value)) * u.rad
)
)
return dmagmax
|
# model form : model, form(parent)
from django.contrib.auth.models import User
from django import forms
class SignUpForm(forms.ModelForm):
# additional field password -> 우선순위가 더 높다
password = forms.CharField(label='Password', widget=forms.PasswordInput)
password2 = forms.CharField(label='Repeat Password', widget=forms.PasswordInput)
class Meta:
model = User # field password -> 우선순위가 낮다(생략되도 동작은 되나 순서가 마지막으로 변경됨)
fields = ['first_name', 'last_name', 'username', 'password', 'password2']
# fields의 순서는 html에 보여질 순서가 된다
# fields = '__all__'
# 추가 필드 -> 필드 목록과 추가 필드가 겹치면 오버라이드 된다(password)
# clean_fieldname -> valid에 의해 실행됨
def clean_password2(self):
cd = self.cleaned_data # fieldname(password2)의 index를 기준으로 앞에 해당하는 모든 field 값을 가져온다
if cd['password'] != cd['password2']:
raise forms.ValidationError('비밀번호가 일치하지 않습니다.')
# 규칙 : 해당 필드의 값을 리턴
return cd['password2']
|
#!/usr/bin/python
from api.utils.test_base import BaseTestCase
class TestEmployees(BaseTestCase):
def setUp(self):
super(TestEmployees, self).setUp()
def test_get_employees(self):
self.assertEqual(True, True)
|
import numpy as np
import cv2
from glob import glob
input_path = './data/video/mica-cam-output.mp4'
video_cap = cv2.VideoCapture(input_path)
while(video_cap.isOpened()):
ret, frame = video_cap.read()
if not ret:
break
cv2.imshow('img', frame)
cv2.waitKey()
video_cap.release()
cv2.destroyAllWindows()
|
import scipy.io
import numpy as np
class KittiDatasetReader:
"""
base class for Kitti DataReader
provides common function for kitti data readers
"""
@staticmethod
def get_file_content(file_path):
""" function read content of file in file_path """
with open(file_path, 'r') as f:
content = f.readlines()
return content
@staticmethod
def get_matrix_from_file_content(content, matrix_id):
"""
function get matrix from content of file
:param
content: list of lines read from text file that has the matrix
matrix_id: id of matrix to retrevie from content
:returns matrix if found
example:
to get translation_rotation matrix from content
get_matrix_from_file_content(content, "Tr_velo_cam")
"""
for line in content:
line_split = line.split(' ')
if line_split[0] == matrix_id:
l = ' '.join(line_split[1:])
return np.fromstring(l, sep=' ')
return None
def create_calib_file_path(self, sequence_number: str) -> str:
"""
function gets the path of calib file
Args:
sequence_number: sequence number to create path of
returns:
created path
"""
return str (self.calib_root_dir / "{num}.txt".format(num=sequence_number))
def read_calib_file_content(self, sequence_number: str) -> list:
"""
function reads content of calibration file
Args:
sequence_number: sequence number to read calibration file of
"""
return self.get_file_content(self.create_calib_file_path(sequence_number))
def get_translation_rotation_Velo_matrix(self, seq_number, return_homogeneous=True):
"""
function get translation rotation matrix from calibration file
:param
seq_number: sequence number to read calib file of
return_homogeneous: return matrix in homogeneous space
:return:
return matrix if found
"""
# read calibration file content
content = self.read_calib_file_content(seq_number)
# get translation rotation matrix from content
tr_velo_matrix = self.get_matrix_from_file_content(content, self.TRANSLATION_ROTATION_MATRIX)
# reshape matrix to expected shape
tr_velo_matrix = tr_velo_matrix.reshape(self.TRANSLATION_ROTATION_SHAPE)
if return_homogeneous:
# transform matrix to homogeneous space
result = np.eye(4)
result[:self.TRANSLATION_ROTATION_SHAPE[0], :self.TRANSLATION_ROTATION_SHAPE[1]] = tr_velo_matrix
else:
result = tr_velo_matrix
return result
def get_rectified_cam0_coord(self, seq_number, return_homogeneous=True):
"""
function get rectifying rotation matrix from calibration file
R_rect_xx is 3x3 rectifying rotation matrix to make image planes co-planar
gets matrix of cam0 because in kitti cam0 is reference frame
:param
seq_number: sequence number to read calib file of
return_homogeneous: return matrix in homogeneous space
:return:
return matrix if found
"""
# read calibration file content
content = self.read_calib_file_content(seq_number)
# get matrix from content
matrix = self.get_matrix_from_file_content(content, self.ROTATION_RECT).reshape(self.ROTATION_RECT_SHAPE)
if return_homogeneous:
# transform matrix to homogeneous space
R_rect_00_matrix = np.identity(4)
R_rect_00_matrix[:self.ROTATION_RECT_SHAPE[0], :self.ROTATION_RECT_SHAPE[1]] = matrix
else:
R_rect_00_matrix = matrix
return R_rect_00_matrix
def get_projection_rect(self, seq_number, mode='02'):
"""
function get projection matrix from calibration file
P_rect_xx is 3x4 projection matrix after rectification
:param
seq_number: sequence number to read calib file of
mode: define cam matrix to return, default '02' to color cam o2
:return:
return matrix if found
"""
# read calibration file content
content = self.read_calib_file_content(seq_number)
matrix_id = self.P_RECT.format(cam=int(mode))
# get matrix from content
matrix = self.get_matrix_from_file_content(content, matrix_id).reshape(self.P_RECT_SHAPE)
P_rect_matrix = matrix
return P_rect_matrix |
from typing import List
from itertools import permutations
def gen_primes(limit=10000):
"""Generates all primes up to a given limit."""
candidates = set(range(2, limit))
primes = []
while len(candidates) > 0:
prime = min(candidates)
primes.append(prime)
for number in range(prime, limit, prime):
candidates.discard(number)
return primes
def find_prime_permutations(primes, n):
"""Returns all permutations of n if they are prime."""
candidates = [int("".join(digits)) for digits in sorted(set(permutations(str(n))))]
return [c for c in candidates if c in primes]
def gen_pairs(l: List[int]):
def sort_pair(element_1, element_2):
return ((min(element_1, element_2), max(element_1, element_2)))
return sorted(set([sort_pair(e_1, e_2) for e_1 in l for e_2 in l if e_1 != e_2]))
def has_duplicates(l):
return len(set(l)) != len(l)
def find_duplicates(l):
already_found_elements = set()
duplicates = []
for element in l:
if element in already_found_elements:
duplicates.append(element)
already_found_elements.add(element)
return sorted(duplicates)
four_digit_primes = [prime for prime in gen_primes() if prime >= 1000]
primes_set = set(four_digit_primes)
results = set()
for prime in four_digit_primes:
prime_permutations = sorted(find_prime_permutations(primes_set, prime))
prime_pairs = gen_pairs(prime_permutations)
prime_diffs = [b - a for a, b in prime_pairs]
if has_duplicates(prime_diffs):
for difference in find_duplicates(prime_diffs):
diff_pairs = [pair for pair in prime_pairs if pair[1] - pair[0] == difference]
for i in range(len(diff_pairs) - 1):
if diff_pairs[i][1] == diff_pairs[i+1][0]:
get_str = lambda i: f'{diff_pairs[i][0]}{diff_pairs[i][1]}'
concatenated_num = get_str(i) + get_str(i+1)
results.add(concatenated_num)
print(results)
|
#_*_encoding:cp936_*_
import random
l = ['项','万','福','侠','心','海','康','宁','冲','元','云','飞','风','峰','贵','国','雪','夏','霞']
def choice(lists,num):
i = 1
while i<=num and num<=len(l):
print random.choice(lists)
i = i + 1
n = input('请输入要选择的人数:')
choice(l,n)
|
#!/usr/bin/env python
"""
Provides useful physical constants.
"""
from math import pi
__author__ = "Sean Hooten"
__license__ = "BSD-2-Clause"
__version__ = "0.2"
__maintainer__ = "Sean Hooten"
__status__ = "development"
h = 6.62607e-34
hbar = h / (2*pi)
c = 299792458.0
q = 1.60218e-19
eps0 = 8.85419e-12
m0 = 9.10938e-31
k = 1.38065e-23
|
from enum import Enum
class SIZE(Enum):
# The integer values define their order in a tensor
SMALL = 0
MEDIUM = 1
LARGE = 2
def main(state, event):
del event # unused
print("size :", state["size"].name)
print("sizerequired:", state["sizerequired"].name)
print("sizelist:", str(" ".join(s.name for s in state["sizelist"])))
def register(mf):
mf.register_defaults({
"sizerequired": SIZE,
"size": SIZE.SMALL,
"sizelist": [SIZE],
})
mf.register_event('main', main, unique=False)
|
from urllib.request import urlopen, Request
from BeautifulSoup import BeautifulSoup
import pickle
mistakes = {}
##for i in string.ascii_uppercase:
## url = "http://en.wikipedia.org/wiki/Wikipedia:Lists_of_common_misspellings/"+i
## req = Request(url, headers={'User-Agent':"Magic Browser"})
## print url
## page = BeautifulSoup(urlopen(req))
## for j in page.findAll("li"):
## if "plainlinks" in repr(j):
## k = j.text.split("(")
## mistakes[k[0].strip()] = k[1].strip(") ")
url = "http://en.wikipedia.org/wiki/Wikipedia:Lists_of_common_misspellings/For_machines"
req = Request(url, headers={'User-Agent':"Magic Browser"})
page = BeautifulSoup(urlopen(req))
for i in page.find("pre").text.splitlines():
j = i.split("->")
mistakes[j[0]] = j[1]
out = open("mistakes.pkl","wb")
pickle.dump(mistakes, out)
out.close()
|
import pandas as pd
from tkinter import filedialog
load_features_file = filedialog.askopenfilename()
df = pd.read_excel(load_features_file)
df = pd.DataFrame(df)
total_cols = len(df.columns)
print(total_cols)
|
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import re
from setuptools import setup, find_packages
def requirements(filename):
with open(filename) as f:
lines = f.read().splitlines()
c = re.compile(r'\s*#.*')
return list(filter(bool, map(lambda y: c.sub('', y).strip(), lines)))
setup(
name='kube_manager',
version='0.1dev',
packages=find_packages(),
package_data={'': ['*.html', '*.css', '*.xml', '*.yml']},
# metadata
author="OpenContrail",
author_email="dev@lists.opencontrail.org",
license="Apache Software License",
url="http://www.opencontrail.org/",
long_description="Kubernetes Network Manager",
test_suite='kube_manager.tests',
install_requires=requirements('requirements.txt'),
tests_require=requirements('test-requirements.txt'),
entry_points={
# Please update sandesh/common/vns.sandesh on process name change
'console_scripts': [
'contrail-kube-manager = kube_manager.kube_manager:main',
],
},
)
|
class Solution:
def getPermutation(self, n, k):
"""
:type n: int
:type k: int
:rtype: str
"""
if n == 1:
return "1"
nums = list(range(1, n+1))
i = 1
while i < k:
self.permutation_sequence(nums, n)
i += 1
return ''.join([str(num) for num in nums])
def permutation_sequence(self, nums, n):
swap_index = -1
for i in range(n-2, -1, -1):
if nums[i] < nums[i+1]:
swap_index = i
break
if swap_index == -1:
return
for j in range(n-1, -1, -1):
if nums[j] > nums[swap_index]:
nums[j], nums[swap_index] = nums[swap_index], nums[j]
break
self.quick_sort(nums, swap_index+1, n-1)
def quick_sort(self, nums, start, end):
if start >= end:
return
pivot = nums[start]
left, right = start+1, end
while left <= right:
while left <= right and nums[left] <= pivot:
left += 1
while left <= right and nums[right] > pivot:
right -= 1
if left > right:
break
nums[left], nums[right] = nums[right], nums[left]
nums[right], nums[start] = nums[start], nums[right]
self.quick_sort(nums, start, right-1)
self.quick_sort(nums, right+1, end)
if __name__ == '__main__':
n, k = 4, 9
finder = Solution()
res = finder.getPermutation(n, k)
print(res)
|
import requests
import os, sys
import json
from multiprocessing.dummy import Pool as ThreadPool
from datetime import datetime
import logging
def worker(i):
currentFile = "files\\{}.json".format(i)
if os.path.isfile(currentFile):
logging.info("{} - File exists".format(i))
return 1
url = "https://m.habr.com/kek/v1/articles/{}/?fl=ru%2Cen&hl=ru".format(i)
try:
r = requests.get(url)
if r.status_code == 503:
logging.critical("503 Error")
return 503
except:
with open("req_errors.txt") as file:
file.write(i)
return 2
data = json.loads(r.text)
if data['success']:
article = data['data']['article']
id = article['id']
is_tutorial = article['is_tutorial']
time_published = article['time_published']
comments_count = article['comments_count']
lang = article['lang']
tags_string = article['tags_string']
title = article['title']
content = article['text_html']
reading_count = article['reading_count']
author = article['author']['login']
score = article['voting']['score']
data = (id, is_tutorial, time_published, title, content, comments_count, lang, tags_string, reading_count, author, score)
with open(currentFile, "w") as write_file:
json.dump(data, write_file)
if __name__ == '__main__':
if len(sys.argv) < 3:
print("Необходимы параметры min и max. Использование: asyc.py 1 100")
sys.exit(1)
min = int(sys.argv[1])
max = int(sys.argv[2])
# Если потоков >3
# то хабр банит ipшник на время
pool = ThreadPool(3)
# Отсчет времени, запуск потоков
start_time = datetime.now()
results = pool.map(worker, range(min, max))
# После закрытия всех потоков печатаем время
pool.close()
pool.join()
print(datetime.now() - start_time) |
import pandas as pd
import numpy as np
from sklearn.gaussian_process import GaussianProcessClassifier, GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, WhiteKernel
from tqdm.notebook import tqdm
import scipy.optimize as op
def LineageCounts(df_data, dict_lineages, t_ranges):
'''
Function to generate a timeline dataframe of
counts by lineage aliases.
df_data : pandas.DataFrame
2- columns Dataframe with the raw occurences,
1st column (Day) are integers counting from
the 26/04/2020, 2nd column (paper_lineage)
are the labels of lineages
dict_lineages : dict
Dictionary with the lineage alias as keys
and a list of the conforming sublineages as
values
t_ranges : dict
Dictionary with the lineage alias as keys
and time ranges to consider as values
Returns : pandas.DataFrame
Count of the occurences of the given lineage aliases,
with the day number as indices and the lineages
aliases as columns
'''
day_column, lineage_column = df_data.columns
df_counts = (df_data
.groupby(day_column)
.count()
)
columns_name = {lineage_column:'All'}
df_counts.rename(columns=columns_name,
inplace=True)
for i, (key, value) in enumerate(dict_lineages.items()):
mask = (df_data[lineage_column]
.isin(value)
)
df_grouped = (df_data[mask]
.groupby(day_column)
)
Z = df_grouped.count()
t0, t1 = t_ranges[key]
Z = Z.loc[t0:t1, :]
columns_name = {lineage_column:key}
Z.rename(columns=columns_name,
inplace=True)
query = f'`{key}`.notna()'
indices = (Z
.query(query, engine='python')
.index
)
x_min, x_max = indices[[0, -1]]
Z = Z.loc[x_min:x_max+1, :]
Z = Z.reindex(np.arange(x_min, x_max+1))
Z.fillna(0, inplace=True)
df_counts = df_counts.join(Z, how='outer')
return df_counts
def LineageProbabilities(df_data, dict_lineages, t_ranges):
'''
Function to generate a timeline dataframe of
probabilities by lineages aliases.
df_data : pandas.DataFrame
2- columns Dataframe with the raw occurences,
1st column (Day) are integers counting from
the 26/04/2020, 2nd column (paper_lineage)
are the labels of lineages
dict_lineages : dict
Dictionary with the lineage alias as keys
and a list of the conforming sublineages as
values
t_ranges : dict
Dictionary with the lineage alias as keys
and time ranges to consider as values
Returns : pandas.DataFrame
Probability of occurences of the given lineage
aliases, with the day number as indices and the
lineages aliases as columns
'''
df_probabilities = pd.DataFrame()
df = LineageCounts(df_data, dict_lineages, t_ranges)
for lineage in df.columns[1:]:
eval_ = f'`{lineage}` / All'
p_lineage = df.eval(eval_)
p_lineage.name = f'P_{lineage}'
df_probabilities = (df_probabilities
.join(p_lineage, how='outer')
)
return df_probabilities
def LineageOccurences(df_data, dict_lineages):
'''
Function to generate a dictionary of the separated
occurrences of the lineage aliases.
df_data : pandas.DataFrame
2- columns Dataframe with the raw occurences,
1st column (Day) are integers counting from
the 26/04/2020, 2nd column (paper_lineage)
are the labels of lineages
dict_lineages : dict
Dictionary with the lineage alias as keys
and a list of the conforming sublineages as
values
Returns : dict
Occurences with the lineage alias as key and a
2-columns pandas.DataFrame, 1st column (Day)
the day number, 2nd column (lineage alias) vector
of ones or zeros for if is the strain in question
or not respectively
'''
day_column, lineage_column = df_data.columns
occurences = {}
for lineage in dict_lineages.keys():
lineage_list = dict_lineages[lineage]
query0 = f'{lineage_column} in @lineage_list'
x_min, x_max = (df_data
.query(query0)[day_column]
.agg([min, max])
.T
)
query1 = f'@x_min <= {day_column} <= @x_max'
df_lineage = df_data.query(query1)
eval_ = f'{lineage_column} = {query0}'
df_lineage.eval(eval_, inplace=True)
df_lineage.reset_index(drop=True, inplace=True)
occurences[lineage] = df_lineage
return occurences
def myop(obj_func, initial_theta, bounds):
'''
Optimisation function
'''
opt_res = op.minimize(obj_func,
initial_theta,
method="L-BFGS-B",
jac=True,
bounds=bounds,
options={'maxfun':100000,
'maxiter':100000,
}
)
theta_opt, func_min = opt_res.x, opt_res.fun
return theta_opt, func_min
def myboot(y):
'''
Bootstraping function
'''
n = int(np.sum(y))
p = y / np.sum(y)
return np.random.multinomial(n, p)
def GPRPreprocess(y):
'''
Function to preprocess data to be input to
sk-learn Gaussian Processes
'''
return np.atleast_2d(np.log(y+1)).T
def GPFitting(occurences, df_counts, classify, nboot=200):
'''
Function to produce fitting by either ocurrences
or counts of lineage alias.
occurences : dict
Segregation of occurences with the lineage
alias as key and a 2-columns pandas.DataFrame
of similar characteristics as df_data for
a single lineage alias
df_counts : pandas.DataFrame
Probability of occurences of the given lineage
aliases, with the day number as indices and the
lineages aliases as columns
classify: array
Boolean values for whether to use classification for each lineage
nboot: integer
Number of bootstrap samples to take
Return : dict
Collection of fitting data with the following keys and values:
'Pi_store': The probability of the occurence of the lineage alias,
'Pi_boot': The bootstrapping of the probability,
'r_store': The estimated growing rate,
'r_boot': The bootstrapping of the growing rate.
'''
Pi_store = pd.DataFrame()
Pi_boot = {}
r_store = pd.DataFrame()
r_boot = {}
for i, lineage in enumerate(df_counts.columns[1:]):
X, y = (occurences[lineage]
.T
.values
)
X = X.reshape(-1, 1)
m = len(y)
print(f'Loaded data for {lineage}')
columns = ['All', lineage]
df = df_counts[columns].dropna()
X0 = (df
.index
.to_numpy()
)
X0min, X0max = X0[[0,-1]]
X1 = np.atleast_2d(np.arange(X0min, X0max+1)).T
if classify[i]:
print('Running Gaussian process classification.')
kernel = 1.0 * RBF(1.0)
gpc = GaussianProcessClassifier(kernel=kernel,
copy_X_train=False,
n_jobs=-2,
)
gpc.fit(X, y)
Pi = gpc.predict_proba(X1.reshape(-1, 1))[:, 1]
dr = np.diff(np.log(Pi/(1.-Pi)))
print('Main fit done. Bootstrap progress:')
pb = np.zeros((nboot,len(X1)))
rb = np.zeros((nboot,len(X1)-1))
for j in tqdm(range(nboot)):
i_boot = np.random.randint(0, m, m)
y_boot = y[i_boot]
X_boot = X[i_boot].reshape(-1, 1)
kernel = gpc.kernel_
gpc_boot = GaussianProcessClassifier(kernel=kernel,
optimizer=None,
copy_X_train=False,
n_jobs=-2,
)
try:
gpc_boot.fit(X_boot, y_boot)
pb[j,:] = (gpc_boot
.predict_proba(X1
.reshape(-1, 1)
)[:, 1]
)
rb[j,:] = np.diff(np.log(pb[j,:] /
(1.-pb[j,:]))
)
except:
print(f'Failed on bootstrap {j:.0f}')
j -= 1
else:
print('Running Gaussian Process Regression.')
yy1 = df.eval(f'All - `{lineage}`')
yy2 = df[lineage]
X0 = np.atleast_2d(X0).T
y1 = GPRPreprocess(yy1)
y2 = GPRPreprocess(yy2)
kernel1 = (1.0 *
RBF(length_scale=10.) +
WhiteKernel(noise_level=1)
)
gpr1 = GaussianProcessRegressor(kernel=kernel1,
alpha=0.0,
n_restarts_optimizer=10,
optimizer=myop
)
gpr1.fit(X0,y1)
y_mean1 = gpr1.predict(X1, return_std=False)
kernel2 = (1.0 *
RBF(length_scale=10.) +
WhiteKernel(noise_level=1.0)
)
gpr2 = GaussianProcessRegressor(kernel=kernel2,
alpha=0.0,
n_restarts_optimizer=10,
optimizer=myop
)
gpr2.fit(X0,y2)
y_mean2 = gpr2.predict(X1, return_std=False)
k1store = gpr1.kernel_
k2store = gpr2.kernel_
mu1 = y_mean1.reshape(-1)
mu2 = y_mean2.reshape(-1)
Pi = ((np.exp(mu2)-1) /
(np.exp(mu1)+np.exp(mu2) -
2
)
)
dmu1 = np.diff(mu1)
dmu2 = np.diff(mu2)
dr = (dmu2) - (dmu1)
print('Main fit done. Bootstrap progress:')
pb = np.zeros((nboot,len(X1)))
rb = np.zeros((nboot,len(X1)-1))
for j in tqdm(range(nboot)):
y1 = GPRPreprocess(myboot(yy1))
y2 = GPRPreprocess(myboot(yy2))
gpr1 = GaussianProcessRegressor(kernel=k1store,
alpha=0.0,
n_restarts_optimizer=1
)
gpr1.fit(X0, y1)
y_mean1 = gpr1.predict(X1, return_std=False)
gpr2 = GaussianProcessRegressor(kernel=k2store,
alpha=0.0,
n_restarts_optimizer=1
)
gpr2.fit(X0, y2)
y_mean2 = gpr2.predict(X1, return_std=False)
mu1 = y_mean1.reshape(-1)
mu2 = y_mean2.reshape(-1)
pb[j,:] = ((np.exp(mu2)-1) /
(np.exp(mu1) +
np.exp(mu2) - 2
)
)
dmu1 = np.diff(mu1)
dmu2 = np.diff(mu2)
r = (dmu2) - (dmu1)
rb[j,:] = r
X1 = X1.reshape(-1)
df_Pi = pd.DataFrame(index=X1,
data=Pi,
columns=[lineage]
)
Pi_store = Pi_store.join(df_Pi, how='outer')
dr = pd.DataFrame(index=X1[1:],
data=dr,
columns=[lineage]
)
r_store = r_store.join(dr, how='outer')
pb = pd.DataFrame(data=pb.T, index=X1)
rb = pd.DataFrame(data=rb.T, index=X1[1:])
Pi_boot[lineage] = pb
r_boot[lineage] = rb
return {'Pi_store':Pi_store,
'Pi_boot':Pi_boot,
'r_store':r_store,
'r_boot':r_boot
}
def StringToDate(s):
if s.startswith('202'):
s = pd.to_datetime(s)
else:
s = np.nan
return s
def TimeDelta(date):
day1 = '26/04/2020'
day1 = pd.to_datetime(day1,
infer_datetime_format=True)
delta_days = date - day1
return delta_days.days
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Utilities for testing the logging system (metrics, common logs)."""
import fcntl
import os
class Fifo:
"""Facility for creating and working with named pipes (FIFOs)."""
path = None
fifo = None
def __init__(self, path, blocking=False):
"""Create a new named pipe."""
if os.path.exists(path):
raise FileExistsError("Named pipe {} already exists.".format(path))
os.mkfifo(path)
if not blocking:
fd = os.open(path, os.O_NONBLOCK)
self.fifo = os.fdopen(fd, "r")
else:
self.fifo = open(path, "r", encoding="utf-8")
self.path = path
def sequential_reader(self, max_lines):
"""Return up to `max_lines` lines from a non blocking fifo.
:return: A list containing the read lines.
"""
return self.fifo.readlines()[:max_lines]
@property
def flags(self):
"""Return flags of the opened fifo.
:return An integer with flags of the opened file.
"""
fd = self.fifo.fileno()
return fcntl.fcntl(fd, fcntl.F_GETFL)
@flags.setter
def flags(self, flags):
"""Set new flags for the opened fifo."""
fd = self.fifo.fileno()
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
def __del__(self):
"""Destructor cleaning up the FIFO from where it was created."""
if self.path:
try:
os.remove(self.path)
except OSError:
pass
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from base64 import b64encode
from .base import logger, Base
#
# Abstract
#
class AbstractKey(Base):
def __init__(self, size=None, key=None, **kwargs):
self.size = size
self.key = key
super(AbstractKey, self).__init__(size=size, key=key, **kwargs)
if self.id is not None and not kwargs.get('skip_update', False):
self.update()
self.pack()
def _instantiate_data(self):
if self.key is not None:
return self.to_binary()
elif self.size is not None:
return self.size
else:
logger.exception('Can\'t find .instantiate_data() data for %s', self)
return None
def to_binary(self):
return {'__type': 'binary', 'base64': self.key}
def pack(self):
rsp = self._invoke('pack')
self.key = rsp['base64']
return self.key
#
# Real
#
class KeyAddress(Base):
JAVA_CLASS = 'com.icodici.crypto.KeyAddress'
# __slots__ = ('id', 'address', 'uaddress')
def __init__(self, address=None, uaddress=None, **kwargs):
self.address = address
self.uaddress = uaddress
super(KeyAddress, self).__init__(address=address, uaddress=uaddress, **kwargs)
if self.address is None:
self.to_string()
if self.uaddress is None:
self.update()
def _instantiate_data(self):
if self.address:
return self.address
elif self.uaddress:
return self.to_binary()
def to_binary(self):
return {'__type': 'binary', 'base64': self.uaddress}
@staticmethod
def from_get(data):
return {'uaddress': data['uaddress']['base64']}
def is_matching_key_address(self, key_address):
if key_address is None or self.JAVA_CLASS != key_address.JAVA_CLASS or key_address.id is None:
return False
rsp = self._invoke('isMatchingKeyAddress', key_address.remote)
return rsp
def get_packed(self):
if self.uaddress:
return self.uaddress
self.uaddress = self._invoke('getPacked')['base64']
return self.uaddress
@property
def is_long(self):
return self._invoke('isLong')
def to_string(self):
self.address = self._invoke('toString')
return self.address
class PrivateKey(AbstractKey):
JAVA_CLASS = 'com.icodici.crypto.PrivateKey'
# __slots__ = ('id', 'size', 'packed', 'key')
def __init__(self, size=None, key=None, **kwargs):
if isinstance(key, bytes):
key = b64encode(key).decode()
super(PrivateKey, self).__init__(size=size, key=key, **kwargs)
self._public_key = None
@staticmethod
def from_get(data):
return {'packed': data['packed']}
def get_public_key(self):
rsp = self._invoke('getPublicKey')
return PublicKey.get(rsp['id'])
@property
def public_key(self):
"""
:rtype: PublicKey
"""
return self.get_public_key()
class PublicKey(AbstractKey):
JAVA_CLASS = 'com.icodici.crypto.PublicKey'
# __slots__ = ('id', 'size', 'key')
def __init__(self, **kwargs):
self._short_address, self._long_address = None, None
super(PublicKey, self).__init__(**kwargs)
@staticmethod
def from_get(data):
return {'key': data['packed']['base64']}
def _get_address(self, short=True):
"""
:rtype: KeyAddress
"""
rsp = self._invoke('getShortAddress' if short else 'getLongAddress')
return KeyAddress.get(rsp['id'])
@property
def short_address(self):
"""
:rtype: KeyAddress
"""
return self.get_short_address()
def get_short_address(self):
"""
:rtype: KeyAddress
"""
return self._get_address(short=True)
@property
def long_address(self):
"""
:rtype: KeyAddress
"""
return self.get_long_address()
def get_long_address(self):
"""
:rtype: KeyAddress
"""
return self._get_address(short=False)
|
from flask import jsonify, Response, url_for
from estimator import db
from database.models import Group, User
from flask import Blueprint
api = Blueprint('api', __name__)
@api.route('/rest/v1/group/<groupname>', methods = ['POST'])
def create_group(groupname):
user = User.query.filter_by(nickname='default').first()
group = Group.query.filter_by(name=groupname).first()
if group:
body = { "message" : "Group already exists" }
code = 400
return jsonify(body), code
else:
print("Creating new group {}".format(groupname))
group = Group(groupname, user)
db.session.add(group)
db.session.commit()
body = {}
code = 201
return jsonify({}), code, {'location': url_for('api.query_group', groupname=groupname)}
@api.route('/rest/v1/group/<groupname>', methods = ['GET'])
def query_group(groupname):
group = Group.query.filter_by(name=groupname).first()
if group:
return jsonify({ "groupname" : group.name })
else:
return jsonify({ "message" : "Group not found"}), 404, {'mimetype': 'application/json'}
|
#!/usr/bin/env python3.7
import click
import tempfile
import subprocess
import tqdm
import json
import inspect
import array
import math
import os
import functools
def helper_for(other):
def decorator(f):
@functools.wraps(f)
def helper(*args, **kwargs):
self = f.__name__
helps = other.__name__
caller = inspect.stack()[1][3]
assert caller == helps, f"{self} is helper function for {helps}, but was called by {caller}"
return f(*args, **kwargs)
return helper
return decorator
def tempfile_noise(tmpdir):
return os.path.join(tmpdir, "noise.prof")
def tempfile_audio(tmpdir):
return os.path.join(tmpdir, "audio.mp4")
@click.group(chain=True)
@click.option(
'--speed',
type=click.FloatRange(1/4, 4),
default=1,
help="Multiply playback speed of the video",
show_default=True)
@click.option(
'--pitch-shift',
type=click.FloatRange(-1, 1),
default=0,
help="Change the audio pitch by a factor",
show_default=True)
@click.option(
'--stack',
type=click.Choice(["horizontal", "vertical"]),
default="horizontal",
help="How to arrange multiple videos",
show_default=True)
@click.option(
'--default-filters/--no-default-filters',
default=True,
help="Use default highpass/lowpass filters",
show_default=True)
@click.option(
'--default-loudnorm/--no-default-loudnorm',
default=True,
help="Use default loudness normalization",
show_default=True)
@click.option(
'--silence-threshold',
type=click.FloatRange(-100, 0),
default=-40,
help="Amplitude (in dB) that separates background noise from speech",
show_default=True)
@click.option(
'--silence-minimum-duration',
type=click.FloatRange(0.01, 100),
default=0.2,
help="Silence shorter than this will not be cut",
show_default=True)
@click.option(
'--noiseremove-factor',
type=click.FloatRange(0, 1),
default=0.21,
help="Strength of noise filtering",
show_default=True)
@click.option(
'--tmpdir',
default=".",
help="Folder for temporary files",
show_default=True)
def cli(**kwargs):
pass
@cli.command('input')
@click.argument('file')
@click.option(
'-vf',
default="",
help="additional video filters")
@click.option(
'-af',
default="",
help="additional audio filters")
@click.option(
'--place',
type=int,
nargs=2,
help="Manually set Y, X positions for video")
@click.option(
'--this-audio',
is_flag=True,
help="If multiple input files contain audio, use this file's")
def addfile(file, vf, af, place, this_audio):
return "in", (file, vf, af, place, this_audio)
@cli.command('output')
@click.argument('file')
@click.option(
'-vf',
default="",
help="additional video filters")
@click.option(
'-af',
default="",
help="additional audio filters")
def outfile(file, vf, af):
return "out", (file, vf, af)
@cli.resultcallback()
def main(inputlist, **kwargs):
with tempfile.TemporaryDirectory(prefix="lecture-enhance-", dir=kwargs["tmpdir"]) as tmpdir:
duration, (w, h), videos, audio, output = preprocessOptions(inputlist, **kwargs)
bitmap, longest_silence = findSilence(audio, **kwargs)
analyzeNoise(audio, longest_silence, tmpdir)
processAudio(audio, output, bitmap, tmpdir, kwargs["noiseremove_factor"])
processVideo(videos, output, bitmap, tmpdir, w, h)
print("all done!")
@helper_for(main)
def preprocessOptions(inputlist, **kwargs):
videos = []
audio = None
output = None
duration = 0
used_this_audio_flag = False
for t, i in inputlist:
if t == "in":
file, vf, af, place, this_audio = i
d, (a_count, v_count), (w, h) = ffprobe(file)
duration = max(duration, d)
if a_count > 1 or v_count > 1:
raise click.ClickException(
"multiple channels in one file are not supported: "
+ f"\"{file}\" has {a_count} audio and {v_count} video channels")
if a_count == 1:
if this_audio and used_this_audio_flag:
raise click.ClickException(f"--this-audio was used more than once")
if audio is None or this_audio:
audio = {"file": file, "af": af}
else:
print(f"Warning: audio in \"{file}\" ignored")
if v_count == 1:
if vf != "":
w, h = getWH(file, vf)
videos.append({"file": file, "vf": vf, "place": place, "w": w, "h": h})
if a_count == 0 and v_count == 0:
raise click.ClickException(f"\"{file}\" has no audio or video")
elif t == "out":
file, vf, af = i
if output is not None:
raise click.ClickException("only one output may be specified")
output = {"file": file, "vf": vf, "af": af}
af, vf = defaultFilters(**kwargs)
if len(output["vf"]) > 0:
vf = [output["vf"]] + vf
if len(output["af"]) > 0:
af = [output["af"]] + af
output["vf"] = ",".join(vf)
output["af"] = ",".join(af)
videos, (w, h) = placeVideos(videos, kwargs["stack"] == "vertical")
return duration, (w, h), videos, audio, output
@helper_for(preprocessOptions)
def ffprobe(file):
cmd = [
"ffprobe",
"-hide_banner",
"-show_streams",
"-print_format", "json",
"-i", file
]
res = subprocess.run(cmd, capture_output=True, check=True)
res = json.loads(res.stdout)
duration = 0
a_count = 0
v_count = 0
w, h = 0, 0
for s in res["streams"]:
if "duration" in s:
duration = float(s["duration"])
if s["codec_type"] == "audio":
a_count += 1
elif s["codec_type"] == "video":
v_count += 1
w, h = s["width"], s["height"]
return duration, (a_count, v_count), (w, h)
@helper_for(preprocessOptions)
def getWH(file, vf):
cmd = [
"ffmpeg",
"-hide_banner",
"-nostdin",
"-i", file,
"-frames:v", "1",
"-vf", f"{vf},showinfo",
"-f", "null", "-"
]
res = subprocess.run(cmd, capture_output=True, check=True)
for line in res.stderr.decode("utf-8").split("\n"):
if "_showinfo_" in line and " s:" in line:
dims = line.split(" s:")[-1].split(" ")[0]
w, h = dims.split("x")
return int(w), int(h)
raise click.ClickException(
f"could not determine video dimensions for file \"{file}\" and filters \"{vf}\"")
@helper_for(preprocessOptions)
def defaultFilters(**kwargs):
af, vf = [], []
pitch = kwargs["pitch_shift"]
speed = kwargs["speed"]
samples = int(44100 * (1 - pitch))
tempo_total = speed * (1 - pitch)
tempo = []
epsilon = 0.5/44100
while tempo_total > 2:
tempo.append(2)
tempo_total *= 0.5
while tempo_total < 0.5:
tempo.append(0.5)
tempo_total *= 2
tempo.append(tempo_total)
if samples != 44100:
af.append(f"aresample={samples}")
for t in tempo:
if abs(t-1) > epsilon:
af.append(f"atempo={t}")
if samples != 44100:
af.append("asetrate=44100")
if kwargs["default_filters"]:
af.append("lowpass=f=1700,highpass=f=100")
if kwargs["default_loudnorm"]:
af.append("loudnorm=I=-23.0:TP=-2.0:LRA=7.0:print_format=summary")
if abs(speed-1) > epsilon:
vf.append(f"setpts=PTS/{speed}")
return af, vf
@helper_for(preprocessOptions)
def placeVideos(videos, vertical):
placed = []
if vertical:
w = 0
for v in videos:
if len(v["place"]) == 0:
w = max(w, v["w"])
h = 0
for v in videos:
if len(v["place"]) == 0:
x = (w - v["w"])//2
v["place"] = (h, x)
h += v["h"]
placed.append(v)
else:
w = 0
for v in videos:
if len(v["place"]) == 0:
v["place"] = (0, w)
w += v["w"]
placed.append(v)
w, h = 0, 0
for v in placed:
nw, nh = v["place"][1]+v["w"], v["place"][0]+v["h"]
w, h = max(w, nw), max(h, nh)
return placed, (w, h)
@helper_for(main)
def findSilence(audio, **kwargs):
print("detecting silence...")
af = audio["af"]
if len(af) > 0:
af += ","
silence_threshold = kwargs["silence_threshold"]
minimum_duration = kwargs["silence_minimum_duration"]
cmd = [
"ffmpeg",
"-hide_banner",
"-nostdin",
"-i", audio["file"],
"-vn",
"-af", f"{af}silencedetect=n={silence_threshold}dB:d={minimum_duration}",
"-f", "null", "-"
]
res = subprocess.run(cmd, capture_output=True, check=True)
silences = []
longest_silence = (0, 0)
start, end = None, None
for line in res.stderr.split(b"\n"):
if b"silencedetect" not in line:
continue
if b"silence_start:" in line:
start, end = float(line.split(b" ")[-1]), None
if b"silence_end:" in line:
end = float(line.split(b" | ")[0].split(b" ")[-1])
if start is not None:
silences.append((start, end))
if (end - start) > (longest_silence[1] - longest_silence[0]):
longest_silence = (start, end)
assert len(silences) > 0
fps = 12
N = int(math.ceil(silences[-1][1]) * fps) + 1
bitmap = array.array('B', (0 for _ in range(N)))
for start, end in silences:
start, end = int(math.ceil(start * fps)), int(math.floor(end * fps))
for i in range(start, end+1):
bitmap[i] = 1
print(f"found {len(silences)} pauses totaling {format_time(sum(bitmap) / fps)}")
print(f"longest period of silence: {format_time(longest_silence[1] - longest_silence[0])}")
return bitmap, longest_silence
@helper_for(findSilence)
def format_time(t):
if t < 60:
return f"{t:.1f} seconds"
t /= 60
if t < 60:
return f"{t:.1f} minutes"
t /= 60
return f"{t:.1f} hours"
@helper_for(main)
def analyzeNoise(audio, longest_silence, tmpdir):
print("analyzing noise...")
start, end = longest_silence
af = audio["af"]
if len(af) == 0:
af = "afifo"
null, pipe = subprocess.DEVNULL, subprocess.PIPE
with subprocess.Popen([
"ffmpeg",
"-y", "-nostdin",
"-ss", str(start),
"-i", audio["file"],
"-vn",
"-to", str(end),
"-af", af,
"-ar", "44100", "-ac", "1", "-f", "f32le",
"-fflags", "+bitexact", "-flags:a", "+bitexact", "-"
], stdin=null, stdout=pipe, stderr=null) as ffmpeg, subprocess.Popen([
"sox",
"-L", "-t", "raw", "-b", "32", "-e", "floating-point",
"-c", "1", "-r", "44100", "-",
"-n", "noiseprof", tempfile_noise(tmpdir)
], stdin=pipe, stdout=null, stderr=null) as sox:
while True:
data = ffmpeg.stdout.read(1024)
if not data:
break
sox.stdin.write(data)
sox.stdin.close()
assert ffmpeg.wait() == 0
assert sox.wait() == 0
@helper_for(main)
def processAudio(audio, output, bitmap, tmpdir, noiseremove_factor):
print("processing audio...")
af1 = audio["af"]
if len(af1) == 0:
af1 = "afifo"
af2 = output["af"]
if len(af2) == 0:
af2 = "afifo"
null, pipe = subprocess.DEVNULL, subprocess.PIPE
progress = tqdm.tqdm(total=len(bitmap), unit="f")
with subprocess.Popen([
"ffmpeg",
"-y", "-nostdin",
"-ss", "0",
"-i", audio["file"],
"-vn",
"-af", af1,
"-ar", "44100", "-ac", "1", "-f", "f32le",
"-fflags", "+bitexact", "-flags:a", "+bitexact", "-"
], stdin=null, stdout=pipe, stderr=null) as ffmpeg1:
with subprocess.Popen([
"sox",
"-L", "-t", "raw", "-b", "32", "-e", "floating-point",
"-c", "1", "-r", "44100", "-",
"-L", "-t", "raw", "-b", "32", "-e", "floating-point",
"-c", "1", "-r", "44100", "-",
"noisered",
tempfile_noise(tmpdir),
str(noiseremove_factor)
], stdin=ffmpeg1.stdout, stdout=pipe, stderr=null) as sox, subprocess.Popen([
"ffmpeg",
"-y", "-nostdin",
"-ar", "44100", "-ac", "1", "-f", "f32le",
"-i", "-",
"-af", af2,
"-c:a", "aac", "-ar", "44100", "-ac", "1", "-b:a", "64k",
"-f", "mp4",
tempfile_audio(tmpdir)
], stdin=pipe, stdout=null, stderr=null) as ffmpeg2:
size = 4 * 44100 // 12
i = 0
while True:
data = sox.stdout.read(size)
if not data:
break
if i >= len(bitmap) or bitmap[i] == 0:
ffmpeg2.stdin.write(data)
i += 1
progress.update()
ffmpeg2.stdin.close()
assert ffmpeg1.wait() == 0
assert sox.wait() == 0
assert ffmpeg2.wait() == 0
progress.close()
@helper_for(main)
def processVideo(videos, output, bitmap, tmpdir, w, h):
print("processing video...")
ovf = output["vf"]
if len(ovf) == 0:
ovf = "fifo"
ffmpeg1cmd = [
"ffmpeg",
"-y", "-nostdin"
]
duration = (len(bitmap) + 1) / 12
chain = [f"color=c=black,fps=fps=12,trim=duration={duration},scale={w}x{h},setsar=sar=1[bg0]"]
for i, v in enumerate(videos):
ffmpeg1cmd += [
"-ss", "0", "-i", v["file"]
]
vf = v["vf"]
if len(vf) == 0:
vf = "fifo"
chain.insert(0, f"[{i}:v]{vf}[v{i}]")
y, x = v["place"]
chain.append(f"[bg{i}][v{i}]overlay=x={x}:y={y}[bg{i+1}]")
ffmpeg1cmd += [
"-filter_complex", ";".join(chain),
"-an", "-map", f"[bg{len(videos)}]",
"-r", "12",
"-s", "{}x{}".format(w, h),
"-pix_fmt", "yuv420p16le", "-f", "rawvideo", "-"
]
ffmpeg2cmd = [
"ffmpeg",
"-y",
"-f", "rawvideo", "-pix_fmt", "yuv420p16le",
"-video_size", "{}x{}".format(w, h),
"-framerate", "12",
"-i", "-",
"-f", "mp4",
"-i", tempfile_audio(tmpdir),
"-vf", ovf,
"-c:v", "libx264",
"-pix_fmt", "yuv420p",
"-profile:v", "baseline",
"-crf", "23",
"-acodec", "copy",
"-f", "mp4",
"-movflags", "faststart",
output["file"]
]
null, pipe = subprocess.DEVNULL, subprocess.PIPE
progress = tqdm.tqdm(total=len(bitmap), unit="f")
with subprocess.Popen(ffmpeg1cmd, stdin=null, stdout=pipe, stderr=null) as ffmpeg1:
with subprocess.Popen(ffmpeg2cmd, stdin=pipe, stdout=null, stderr=null) as ffmpeg2:
size = w * h * 3
i = 0
while True:
data = ffmpeg1.stdout.read(size)
if not data:
break
if i >= len(bitmap) or bitmap[i] == 0:
ffmpeg2.stdin.write(data)
i += 1
progress.update()
ffmpeg2.stdin.close()
assert ffmpeg1.wait() == 0
assert ffmpeg2.wait() == 0
progress.close()
if __name__ == "__main__":
cli()
|
import sys
import os
num_walks = [10,30,100,300]
dimension = [32,64,128,256]
walk_len = [5,15,50,100]
window = [5,10,20]
iteration = [1,5,10,50,100]
p_list = [0.1,0.5,1,2,10]
q_list = [0.1,0.5,1,2,10]
input_file = "loc-brightkite_edges.txt"
for n_walk in num_walks:
dim = 128
w_len = 80
win = 10
ite = 1
p = 1
q = 1
output_file = "node2vec_" + str(n_walk) + "_" + str(dim) + "_" + str(w_len) + "_" + str(win)+ "_" + str(ite)+ "_" + str(p)+ "_" + str(q) + ".embeddings"
command = "time python2 -W ignore ../../node2vec/src/main.py --input " + input_file + " --output "+ output_file + " --dimensions " + str(dim) + " --walk-length " + str(w_len) + " --num-walks " + str(n_walk) + " --window-size " + str(win) + " --iter " + str(ite) + " --workers 8 --p " + str(p) + " --q " + str(q)
if os.path.exists(output_file):
continue
os.system(command)
print(output_file + " ---- DONE")
for dim in dimension:
n_walk = 10
w_len = 80
win = 10
ite = 1
p = 1
q = 1
output_file = "node2vec_" + str(n_walk) + "_" + str(dim) + "_" + str(w_len) + "_" + str(win)+ "_" + str(ite)+ "_" + str(p)+ "_" + str(q) + ".embeddings"
command = "time python2 -W ignore ../../node2vec/src/main.py --input " + input_file + " --output "+ output_file + " --dimensions " + str(dim) + " --walk-length " + str(w_len) + " --num-walks " + str(n_walk) + " --window-size " + str(win) + " --iter " + str(ite) + " --workers 8 --p " + str(p) + " --q " + str(q)
if os.path.exists(output_file):
continue
os.system(command)
print(output_file + " ---- DONE")
for w_len in walk_len:
n_walk = 10
dim = 128
win = 10
ite = 1
p = 1
q = 1
output_file = "node2vec_" + str(n_walk) + "_" + str(dim) + "_" + str(w_len) + "_" + str(win)+ "_" + str(ite)+ "_" + str(p)+ "_" + str(q) + ".embeddings"
command = "time python2 -W ignore ../../node2vec/src/main.py --input " + input_file + " --output "+ output_file + " --dimensions " + str(dim) + " --walk-length " + str(w_len) + " --num-walks " + str(n_walk) + " --window-size " + str(win) + " --iter " + str(ite) + " --workers 8 --p " + str(p) + " --q " + str(q)
if os.path.exists(output_file):
continue
os.system(command)
print(output_file + " ---- DONE")
for win in window:
n_walk = 10
dim = 128
w_len = 80
ite = 1
p = 1
q = 1
output_file = "node2vec_" + str(n_walk) + "_" + str(dim) + "_" + str(w_len) + "_" + str(win)+ "_" + str(ite)+ "_" + str(p)+ "_" + str(q) + ".embeddings"
command = "time python2 -W ignore ../../node2vec/src/main.py --input " + input_file + " --output "+ output_file + " --dimensions " + str(dim) + " --walk-length " + str(w_len) + " --num-walks " + str(n_walk) + " --window-size " + str(win) + " --iter " + str(ite) + " --workers 8 --p " + str(p) + " --q " + str(q)
if os.path.exists(output_file):
continue
os.system(command)
print(output_file + " ---- DONE")
for ite in iteration:
n_walk = 10
dim = 128
w_len = 80
win = 10
p = 1
q = 1
output_file = "node2vec_" + str(n_walk) + "_" + str(dim) + "_" + str(w_len) + "_" + str(win)+ "_" + str(ite)+ "_" + str(p)+ "_" + str(q) + ".embeddings"
command = "time python2 -W ignore ../../node2vec/src/main.py --input " + input_file + " --output "+ output_file + " --dimensions " + str(dim) + " --walk-length " + str(w_len) + " --num-walks " + str(n_walk) + " --window-size " + str(win) + " --iter " + str(ite) + " --workers 8 --p " + str(p) + " --q " + str(q)
if os.path.exists(output_file):
continue
os.system(command)
print(output_file + " ---- DONE")
for p in p_list:
n_walk = 10
dim = 128
w_len = 80
win = 10
ite = 1
q = 1
output_file = "node2vec_" + str(n_walk) + "_" + str(dim) + "_" + str(w_len) + "_" + str(win)+ "_" + str(ite)+ "_" + str(p)+ "_" + str(q) + ".embeddings"
command = "time python2 -W ignore ../../node2vec/src/main.py --input " + input_file + " --output "+ output_file + " --dimensions " + str(dim) + " --walk-length " + str(w_len) + " --num-walks " + str(n_walk) + " --window-size " + str(win) + " --iter " + str(ite) + " --workers 8 --p " + str(p) + " --q " + str(q)
if os.path.exists(output_file):
continue
os.system(command)
print(output_file + " ---- DONE")
for q in q_list:
n_walk = 10
dim = 128
w_len = 80
win = 10
ite = 1
p = 1
output_file = "node2vec_" + str(n_walk) + "_" + str(dim) + "_" + str(w_len) + "_" + str(win)+ "_" + str(ite)+ "_" + str(p)+ "_" + str(q) + ".embeddings"
command = "time python2 -W ignore ../../node2vec/src/main.py --input " + input_file + " --output "+ output_file + " --dimensions " + str(dim) + " --walk-length " + str(w_len) + " --num-walks " + str(n_walk) + " --window-size " + str(win) + " --iter " + str(ite) + " --workers 8 --p " + str(p) + " --q " + str(q)
if os.path.exists(output_file):
continue
os.system(command)
print(output_file + " ---- DONE")
|
from os import name
from flask_admin import model
from pymongo import MongoClient
from bson.objectid import ObjectId
import flask_admin as admin
from wtforms import form, fields
from flask_admin.form import Select2Widget
from flask_admin.contrib.pymongo import ModelView, filters, view
from bson.json_util import dumps
import json
from bson import json_util
import html
from flask import Flask, render_template, jsonify, request, session, url_for, redirect, flash
# from flask_pymongo import PyMongo
from wtforms.fields.simple import FileField
from werkzeug.security import generate_password_hash, check_password_hash
#도도 임포트
import datetime
#회원가입 비밀번호 암호화를 위해 werkzeug import
# Create application
app = Flask(__name__)
# 로그인 기능 구현을 위한 코드(아래 시크릿키와 어떻게 다른지 잘 모름)
# author 김진회
#app.secret_key = 'super secret key'
app.config['SESSION_TYPE'] = 'filesystem' # 아 이거 뭐지? 모르겠음 - 진회
#########################################################
# Flask 선언, mongodb와 연결
# 로컬환경 테스트시 DB연결 코드 pymongo용
conn = MongoClient()
# 서버측 DB연결 코드 pymongo용
#conn = MongoClient('mongodb://test:test@localhost', 27017)
db = conn.bdd
##안 쓰는 코드
# 로컬환경 테스트시 DB연결 코드 flask_pymongo용
#app.config["MONGO_URI"] = "mongodb://localhost:27017/bdd"
# 서버측 DB연결 코드 flask_pymongo용
#app.config["MONGO_URI"] = "mongodb://test:test@localhost:27017/bdd"
#app.config['SECRET_KEY'] = 'psswrd'
#mongo = PyMongo(app)
app.secret_key = 'supersupersecret'
#########################################################
# Create dummy secrey key so we can use sessions
app.config['SECRET_KEY'] = '123456790'
# Create models
# User admin
# author 배성현
class menu_form(form.Form):
img = fields.StringField('사진')
menu = fields.StringField('메뉴')
price = fields.StringField('가격')
category = fields.SelectField('카테고리', choices= [('김치', '김치'),
('기본반찬/나물', '기본반찬/나물'), ('국/탕/찌개', '국/탕/찌개'), ('조림/구이', '조림/구이'), ('튀김/전', '튀김/전'), ('도시락', '도시락'), ('제사/명절','제사/명절')])
hide = fields.SelectField('숨김/보임 (1/0)', choices= [('0','보임'), ('1','숨김')] )
# DB에 저장할때 사용하는 key = fields.StringField('name') < value 값이 저장되는 inputbox
# User admin
# author 배성현
class order_form(form.Form):
state = fields.SelectField ('주문 상태', choices= [('입금확인중','입금확인중'),('결제완료', '결제완료'),('상품준비중','상품준비중'),('배송중','배송중'),('배송완료','배송완료')])
deliverynum = fields.StringField('송장번호')
deliverycompany = fields.SelectField ('택배 회사', choices= [('kr.chunilps','천일택배'),('kr.cjlogistics', 'CJ대한통운'),('kr.cupost','CU 편의점택배'),('kr.cvsnet','GS Postbox 택배'),('kr.cway','CWAY (Woori Express)'),('kr.daesin','대신택배'),('kr.epost','우체국 택배'),('kr.hanips','한의사랑택배'),('kr.hanjin','한진택배'),('kr.hdexp','합동택배'),('kr.homepick','홈픽'),('kr.honamlogis','한서호남택배'),('kr.ilyanglogis','일양로지스'),('kr.kdexp','경동택배'),('kr.kunyoung','건영택배'),('kr.logen','로젠택배'),('kr.lotte','롯데택배')])
class origin_form(form.Form):
name = fields.StringField ('재료명')
origin = fields.StringField ('원산지')
class user_form(form.Form):
pw = fields.StringField ('비밀번호 변경')
# 구현해야할지 말아야할지?
# User admin
# author 배성현
class menu_view(ModelView):
column_list = ('img', 'menu', 'price','category','hide') #db에서 불러올때 사용하는 key값
form = menu_form
# User admin
# author 배성현
class order_view(ModelView):
column_list = ('name', 'menu', 'phone', 'address','postcode', 'price', 'state' , 'date','postmsg' , 'today', 'deliverycompany', 'deliverynum')
column_sortable_list = ('name', 'menu', 'phone', 'address','postcode', 'price', 'state' , 'date','postmsg' , 'today' ,'deliverycompany', 'deliverynum')
can_edit = True
form = order_form
# User admin
# author 배성현
class origin_view(ModelView):
column_list = ('name', 'origin')
form = origin_form
class user_view(ModelView):
column_list = ('userid', 'pw', 'phone', 'postcode', 'address', 'extraAddress')
form = user_form
# User admin
# author 배성현
class JSONEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, ObjectId):
return str(o)
return json.JSONEncoder.default(self, o)
# return model
# Flask views
# author 김진회
# 세션에 logged_in값이 true면 로그인 상태
@app.route('/')
def main():
id = session.get('logged_in')
return render_template('index.html', userid = id)
@app.route('/header.html')
def header():
id = session.get('logged_in')
return render_template('header.html', userid = id)
@app.route('/footer.html')
def footer():
return render_template('footer.html')
# 로그인기능 및 페이지 구현
# author 김진회
# modifier 이민훈 2021.08.05
# session["logged_in"] = True 를 넣어주면 로그인 성공한 이후의 상황이 됨.
# 로그인 페이지 별도 개설로 인해, 링크 및 render_template 페이지 변경
@app.route('/login_main', methods=['GET', 'POST'])
def member_login():
if request.method == 'GET':
return render_template('login_page.html')
elif request.method == 'POST':
userid = request.form.get("userid", type=str)
pw = request.form.get("userPW", type=str)
if userid == "":
flash("아이디를 입력하세요")
return render_template('login_page.html')
elif pw == "":
flash("비밀번호를 입력하세요")
return render_template('login_page.html')
else:
users = db.users
id_check = users.find_one({"userid": userid})
# print(id_check["pw"])
# print(generate_password_hash(pw))
if id_check is None:
flash("아이디가 존재하지 않습니다.")
return render_template('login_page.html')
elif check_password_hash(id_check["pw"],pw):
session["logged_in"] = userid
return render_template('index.html' , userid = userid)
else:
flash("비밀번호가 틀렸습니다.")
return render_template('login_page.html')
## 로그아웃
@app.route("/logout", methods=["GET"])
def logout():
session.pop('logged_in',None)
return redirect('/')
## 회원가입
@app.route("/join", methods=["GET", "POST"])
def member_join():
if request.method == "POST":
userid = request.form.get("userid", type=str)
pw = request.form.get("userPW", type=str)
name = request.form.get("name", type=str)
phone = request.form.get("phone1", type=str)+"-"+request.form.get("phone2", type=str)+"-"+request.form.get("phone3", type=str)
postcode = request.form.get("zipcode", type=str)
addr = request.form.get("addr", type=str)
extraAddr = request.form.get("addr_remain", type=str)
if userid == "":
flash("ID를 입력해주세요")
return render_template("join.html")
elif pw == "":
flash("패스워드를 입력해주세요")
return render_template("join.html")
users = db.users
check_cnt = users.find({"userid": userid}).count()
if check_cnt > 0:
flash("이미 존재하는 아이디입니다.")
return render_template("join.html")
to_db = {
"userid": userid,
"pw": generate_password_hash(pw),
"name": name,
"phone": phone,
"postcode": postcode,
"address": addr,
"extraAddress": extraAddr,
"orderlisttest":[]
}
users.insert_one(to_db)
last_signup = users.find().sort("_id", -1).limit(5)
for _ in last_signup:
print(_)
flash("가입이 완료되었습니다. 감사합니다!")
return render_template("index.html")
else:
return render_template("join.html")
## 회원가입 아이디 중복체크
@app.route("/join/checkid", methods=["POST"])
def join_id_check():
userid = request.form['userid']
check_cnt = db.users.find({"userid": userid}).count()
if check_cnt > 0:
msg = "이미 존재하는 아이디입니다."
else:
msg = "이용 가능한 아이디입니다."
return jsonify({'msg':msg})
## 회원정보변경
@app.route("/modify", methods=["GET", "POST"])
def member_modify():
if request.method == "POST":
update_id = session.get('logged_in')
userid = request.form.get("userid", type=str)
pw = request.form.get("userPW", type=str)
name = request.form.get("name", type=str)
phone = request.form.get("phone1", type=str)+"-"+request.form.get("phone2", type=str)+"-"+request.form.get("phone3", type=str)
postcode = request.form.get("zipcode", type=str)
addr = request.form.get("addr", type=str)
extraAddr = request.form.get("addr_remain", type=str)
if pw == "":
flash("패스워드를 입력해주세요")
return render_template("modify.html")
users = db.users
to_db = {
"pw": generate_password_hash(pw),
"name": name,
"phone": phone,
"postcode": postcode,
"address": addr,
"extraAddress": extraAddr,
}
users.update_one({'userid': update_id}, {'$set': to_db})
last_signup = users.find().sort("_id", -1).limit(5)
for _ in last_signup:
print(_)
flash("변경이 완료되었습니다.")
return render_template("index.html", userid = update_id)
else:
return render_template("modify.html")
# @app.route('/join', methods=['GET', 'POST'])
# def join():
# return render_template('join.html')
# 일단 보류
# @app.route('/habit_s', methods=['GET'])
# def show_habit():
# orders = list(db.user.find({}, {'_id': False}))
# return jsonify({'all_order': orders})
## 주문페이지
@app.route('/order')
def order():
id = session.get('logged_in')
if id is not None:
id_find = db.users.find_one({"userid": id})
name_set = id_find['name']
phone_set = id_find['phone']
postcode_find = id_find['postcode']
address_find = id_find['address']
exaddress_find = id_find['extraAddress']
phone1 = phone_set.split('-')[0]
phone2 = phone_set.split('-')[1]
phone3 = phone_set.split('-')[2]
print(address_find,exaddress_find)
return render_template('order.html',
name = name_set,
phone1 = phone1,
phone2 = phone2,
phone3 = phone3,
postcode =postcode_find,
address = address_find,
exaddress = exaddress_find)
return render_template('order.html')
##주문조회
@app.route('/orderlist')
def orderlist():
id = session.get('logged_in')
return render_template('orderlist.html', userid = id)
##주문조회 삭제
@app.route('/orderlist/dele',methods=['POST'])
def dele_orderlist():
id = request.form['id']
msg = ''
deleid = list(db.order.find({'_id':ObjectId(id)}))
#print(deleid[0]['state'])
userid = session.get('logged_in')
if userid is not None:
if deleid[0]['state'] == '입금확인중':
test = list(db.users.find({'userid':userid}))[0]['orderlisttest']
print(test)
test.remove(ObjectId(id))
print(id)
print(test)
db.users.update_one({'userid':userid},{'$set':{'orderlisttest':test}})
db.order.delete_one({'_id':ObjectId(id)})
msg = '삭제완료!'
else:
msg = '결제가 완료되어 삭제가 불가능합니다. 전화 문의부탁드립니다.'
else :
if deleid[0]['state'] == '입금확인중':
db.order.delete_one({'_id':ObjectId(id)})
msg = '삭제완료!'
else:
msg = '결제가 완료되어 삭제가 불가능합니다. 전화 문의부탁드립니다.'
return jsonify({'msg':msg})
## 주문조회 찾기
@app.route('/orderlist/find', methods=['POST'])
def find_orderlist():
id = session.get('logged_in')
if id is not None:
test = list(db.users.find({'userid':id}))[0]['orderlisttest']
test1 = []
for a in test:
test1.append(list(db.order.find({'_id':ObjectId(a)}))[0])
#print(test1)
return jsonify({'orderlist':dumps(test1), 'msg':'조회완료!'})
else:
phone = request.form['phone']
orderlist = list(db.order.find({'phone':phone}))
return jsonify({'orderlist':dumps(orderlist), 'msg':'조회완료!'})
@app.route('/manager')
def manager():
return render_template('manager_main.html')
@app.route('/details')
def details():
return render_template('details.html')
@app.route('/details/get', methods=['GET'])
def details_get():
originList = list(db.origin.find({}, {'_id':False}))
return jsonify({'originList': originList})
raise TypeError('타입 에러 확인')
@app.route('/maps')
def kakaomaps():
return render_template('maps.html')
@app.route('/mypage/do', methods=['GET'])
def post_test():
test = list(db.menu.find({},{'_id': False}))
#test = [doc for doc in db.user.find({},{'_id': False})]
return jsonify({'data': dumps(test)})
raise TypeError('타입 에러 확인')
@app.route('/bluenight/check', methods=['POST'])
def admin_pass():
something = request.form['pass']
correct = "cha"
if(something == correct):
return jsonify({'chk':'true'})
else:
return jsonify({'chk':'false','msg':'틀렸습니다'})
@app.route('/order/do', methods=['POST'])
def ordersave():
name_receive = html.escape(request.form['name'])
addr_receive = html.escape(request.form['addr'])
code_receive = html.escape(request.form['code'])
phone_receive = html.escape(request.form['phone'])
orderlist_receive = request.form.getlist('orderlist[]')
date_receive = html.escape(request.form['date'])
postmsg_receive = html.escape(request.form['ero'])
pricefinal_receive = html.escape(request.form['price_final'])
doc = {
'name':name_receive,
'address':addr_receive,
'postcode':code_receive,
'phone':phone_receive,
'menu':orderlist_receive,
'date':date_receive,
'postmsg':postmsg_receive,
'price':pricefinal_receive,
'state': '입금확인중',
'today': datetime.datetime.now(),
'deliverycompany': '입력대기중.',
'deliverynum': '입력해주세요~'
}
# 오더 리스트의 0:매뉴이름 1:가격 2:수량
# print(doc)
db.order.insert_one(doc)
a = list(db.order.find(doc))
print(a[0]['_id'])
ab =[]
id = session.get('logged_in')
if id is not None:
test = list(db.users.find({'userid':id}))[0]['orderlisttest']
test.append(a[0]['_id'])
db.users.update_one({'userid':id},{'$set':{'orderlisttest':test}})
return jsonify({'msg': name_receive+'님의 주문이 완료되었습니다. 계좌입금 부탁드립니다!'})
if __name__ == '__main__':
# url 변경할것
admin = admin.Admin(app, name='맘스키친', url='/bluenight')
# Add views
admin.add_view(menu_view(db.menu, '상품관리', url='/Product_management'))
admin.add_view(order_view(db.order, '주문내역', url='/Order_details'))
admin.add_view(origin_view(db.origin, '원산지표기', url='/Country_of_origin'))
admin.add_view(user_view(db.users, '회원 정보', url='/moms_users'))
# Start app
app.run('0.0.0.0', port=5000, debug=True)
|
import cv2
# Reading video
cap = cv2.VideoCapture('../data/seniorita.mp4')
# loop
while(cap.isOpened()):
ret, frame = cap.read()
if ret:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow('frame', gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
print("Video finished")
break
cap.release()
cv2.destroyAllWindows()
|
from django.test import TestCase
from rest_framework.response import Response
from django.contrib.auth import get_user_model
from rest_framework import request, status
from rest_framework.test import APIClient
User = get_user_model()
# Create your tests here.
class JwtAuthTest(TestCase):
def setUp(self) -> None:
self.client = APIClient()
self.user = User.objects.create_user(
email='test_user@email.com', password='password'
)
def test_signup(self) -> None:
response : Response = self.client.post(
'/api/v1/auth/register/', {
'first_name': 'John',
'last_name': 'Test',
'email': 'new_test@email.com',
'password': 'password'
}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)
self.assertIn('jwt', response.client.cookies)
self.assertIn('refresh', response.client.cookies)
def test_login(self) -> None:
response : Response = self.client.post(
'/api/v1/auth/login/', {
'email': 'test_user@email.com',
'password': 'password'
}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn('jwt', response.client.cookies)
self.assertIn('refresh', response.client.cookies)
self.assertEqual(self.user.slug, response.data.get('user').get('id'))
def test_logout(self) -> None:
self.client.post(
'/api/v1/auth/login/', {
'email': 'test_user@email.com',
'password': 'password'
}, format='json')
response : Response = self.client.post('api/v1/auth/logout/')
self.assertNotIn('jwt', response.cookies)
self.assertNotIn('refresh', response.cookies)
|
#import win32com.client
import os
import skimage
import skimage.viewer
import sys
import cv2
import csv
from matplotlib import pyplot as plt
import numpy as np
import glob
editFiles = glob.glob("D:/FOOTFALL_IMAGES/CMT_*.jpeg")
for i, fname in enumerate(editFiles):
name=fname[19:30]
img = cv2.imread(editFiles[i],0)
print(i)
plt.imshow(img)
#plt.show()
ret,thresh5 = cv2.threshold(img,127,255,cv2.THRESH_TOZERO_INV)
height, width = thresh5.shape[:2]
print(thresh5.shape)
# Let's get the starting pixel coordiantes (top left of cropped top)
start_row, start_col = int(0), int(0)
# Let's get the ending pixel coordinates (bottom right of cropped top)
end_row, end_col = int(height * .5), int(width)
#cropped_top = image[start_row:end_row , start_col:end_col]
cropped_top = thresh5[69:107 , start_col:end_col]
average1 = cropped_top.mean(axis=0).mean(axis=0)
print(start_row, end_row)
print(start_col, end_col)
plt.imshow(cropped_top,cmap='gray')
#plt.show()
#plt.waitKey(0)
#plt.destroyAllWindows()
# Let's get the starting pixel coordiantes (top left of cropped bottom)
start_row, start_col = int(height * .5), int(0)
# Let's get the ending pixel coordinates (bottom right of cropped bottom)
end_row, end_col = int(height), int(width)
#cropped_bot = image[start_row:end_row , start_col:end_col]
cropped_bot = thresh5[106:147 , start_col:end_col]
average2 = cropped_bot.mean(axis=0).mean(axis=0)
print(start_row, end_row)
print(start_col, end_col)
plt.imshow(cropped_bot,cmap='gray')
#plt.show()
#plt.waitKey(0)
#plt.destroyAllWindows()
print(cropped_top.size)
print(cropped_bot.size)
print((average1))
print((average2))
from skimage.util import img_as_float
image1 = np.sum(img_as_float(cropped_top))
image2 = np.sum(img_as_float(cropped_bot))
print(("left side",image1))
print(("right side",image2))
print((image1-image2))
if(image1 > image2):
s="Left"
else:
s="Right"
print(s)
def to_str(var):
return str(list(np.reshape(np.asarray(var), (1, np.size(var)))[0]))[1:-1]
object=[name, to_str(image1), to_str(image2),to_str(image1-image2), s]
#f = open('classifier.csv', 'w')
print(object)
header=['Participant','Rigt Side','Left Side','Difference','Classification']
with open('classifier.csv', 'a') as f:
wr = csv.writer(f, dialect='excel')
wr.writerow(object)
|
# ----------------------------------------------------------------------
# NWQBench: Northwest Quantum Proxy Application Suite
# ----------------------------------------------------------------------
# Ang Li, Samuel Stein, James Ang.
# Pacific Northwest National Laboratory(PNNL), U.S.
# BSD Lincese.
# Created 05/21/2021.
# ----------------------------------------------------------------------
import numpy as np
from qiskit import QuantumCircuit
from qiskit import execute, Aer
from qiskit_nwqsim_provider import NWQSimProvider
import sys
import math
n_qubits = int(sys.argv[1])
def cu1(qc, l, a, b):
qc.u1(l/2, a)
qc.cx(a, b)
qc.u1(-l/2, b)
qc.cx(a, b)
qc.u1(l/2, b)
def qft(qc, n):
for j in range(n):
for k in range(j):
cu1(qc, math.pi/float(2**(j-k)), j, k)
qc.h(j)
qc = QuantumCircuit(n_qubits, n_qubits)
qft(qc,n_qubits)
qc.measure_all()
#qasm_file = open("qft_n" + str(n_qubits) + ".qasm","w")
#qasm_file.write(qc.qasm())
#qasm_file.close()
simulator = Aer.get_backend('qasm_simulator')
job = execute(qc,simulator,shots=10)
result = job.result()
counts = result.get_counts(qc)
print (counts)
nwqsim = NWQSimProvider('DMSimSimulator')
dmsim = nwqsim.backends['dmsim_gpu']
job = execute(qc,dmsim,shots=10)
result = job.result()
counts = result.get_counts(qc)
print (counts)
|
import sys
from maraboupy import Marabou, MarabouUtils, MarabouCore
import numpy as np
from eval_network import evaluateNetwork
from tensorflow.python.saved_model import tag_constants
## SD QUERY : the situation is great and we were expected bitrate to be HD, but actual bitrate is SD
def create_network(filename,k):
# TODO check again the input op
input_op_names = ["Placeholder", "Placeholder_1", "Placeholder_2", "Placeholder_3", "Placeholder_4", "Placeholder_5", "Placeholder_6", "Placeholder_7", "Placeholder_8", "Placeholder_9", "Placeholder_10", "Placeholder_11", "Placeholder_12", "Placeholder_13", "Placeholder_14", "Placeholder_15", "Placeholder_16", "Placeholder_17", "Placeholder_18", "Placeholder_19", "Placeholder_20", "Placeholder_21", "Placeholder_22", "Placeholder_23", "Placeholder_24", "Placeholder_25", "Placeholder_26", "Placeholder_27", "Placeholder_28", "Placeholder_29", "Placeholder_30", "Placeholder_31", "Placeholder_32", "Placeholder_33", "Placeholder_34", "Placeholder_35", "Placeholder_36", "Placeholder_37", "Placeholder_38", "Placeholder_39", "Placeholder_40"]
output_op_name = "actor_agent_30/fully_connected_7/BiasAdd"
network = Marabou.read_tf(filename,inputName=input_op_names,outputName=output_op_name)
return network, input_op_names, output_op_name
# Network inputs:
# TODO
def k_test(filename,k):
network, input_op_names, output_op_name = create_network(filename,k)
inputVars = network.inputVars
outputVars = network.outputVars
print ("inputVars:", inputVars)
print ("outputVars:", outputVars)
# print ("len outputVars:",len(outputVars))
for j in range(len(outputVars)):
network.setLowerBound(outputVars[j], -1e6)
network.setUpperBound(outputVars[j], 1e6)
# choose k for the last chunk
# eq = MarabouUtils.Equation(EquationType=MarabouCore.Equation.GE)
#
# # The right one:
# # eq.addAddend(1, outputVars[(utils.S_LEN - 1) * utils.A_DIM])
# # eq.addAddend(-1, outputVars[-1]) # outputVars[utils.S_LEN - 1) * utils.A_DIM + (utils.A_DIM - 1)]
#
# # The sanity one:
# eq.addAddend(1, outputVars[-1]) # outputVars[utils.S_LEN - 1) * utils.A_DIM + (utils.A_DIM - 1)]
# eq.addAddend(-1, outputVars[(utils.S_LEN - 1) * utils.A_DIM])
# eq.setScalar(0)
# network.addEquation(eq)
print("\nMarabou results:\n")
# network.saveQuery("/cs/usr/tomerel/unsafe/VerifyingDeepRL/WP/proj/results/basic_query")
# Call to C++ Marabou solver
vals, stats = network.solve(verbose=True)
print(vals)
print('marabou solve run result: {} '.format(
'SAT' if len(list(vals.items())) != 0 else 'UNSAT'))
return result
def main():
if len(sys.argv) not in [3]:
print("usage:",sys.argv[0], "<pb_filename> [k] ")
exit(0)
filename = sys.argv[1]
k = int(sys.argv[2])
k_test(filename,k)
if __name__ == "__main__":
main()
|
from heapq import heappop, heappush
class Solution:
def shortestDistance(self, maze, start, destination):
start, destination = tuple(start), tuple(destination)
queue = [((0,) + start)]
visited = { start:0 }
m, n = len(maze), len(maze[0])
while queue:
dis, r, c = heappop(queue)
if r == destination[0] and c == destination[1]: return dis
if visited.get((r,c), float('inf')) < dis: continue
directions = ((1,0), (-1,0), (0,1), (0,-1))
for i, j in directions:
d = 0
ir, ic = r,c
while 0 <= ir + i < m and 0 <= ic + j < n and maze[ir+i][ic+j] != 1:
d += 1
ir += i
ic += j
if visited.get((ir,ic), float('inf')) > dis + d:
heappush(queue, (dis+d, ir, ic))
visited[(ir,ic)] = dis+d
return -1
print(Solution().shortestDistance([[0,0,1,0,0],[0,0,0,0,0],[0,0,0,1,0],[1,1,0,1,1],[0,0,0,0,0]],
[0,4],
[3,2]))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2020-03-15 21:19:23
# @Author : mutudeh (josephmathone@gmail.com)
# @Link : ${link}
# @Version : $Id$
import os
class Solution(object):
def generateAbbreviations(self, word):
"""
:type word: str
:rtype: List[str]
"""
if not word:
return []
abb_list = [""]
for w in word:
n = len(abb_list)
for i in range(n):
if abb_list[i] and abb_list[i][-1].isdigit():
abb_list.append(abb_list[i][:-1] + str(int(abb_list[i][-1])+1))
else:
abb_list.append(abb_list[i]+'1')
for i in range(n):
abb_list[i] += w
print(abb_list)
return abb_list
s = Solution()
print(s.generateAbbreviations("word")) |
#!/usr/bin/env python3
import os
import pprint
import fnmatch
from PhysicsTools.HeppyCore.utils.dataset import createDataset
if __name__ == '__main__':
import sys
from optparse import OptionParser
import pprint
parser = OptionParser()
parser.usage = "%prog [options] <dataset>\nPrints information on a sample."
parser.add_option("-w", "--wildcard", dest="wildcard", default='tree*root',help='A UNIX style wilfcard for root file printout')
parser.add_option("-u", "--user", dest="user", default=os.environ.get('USER', None),help='user owning the dataset.\nInstead of the username, give "LOCAL" to read datasets in a standard unix filesystem, and "CMS" to read official CMS datasets present at CERN.')
parser.add_option("-b", "--basedir", dest="basedir", default=os.environ.get('CMGLOCALBASEDIR',None),help='in case -u LOCAL is specified, this option allows to specify the local base directory containing the dataset. default is CMGLOCALBASEDIR')
parser.add_option("-a", "--abspath", dest="abspath",
action = 'store_true',
default=False,
help='print absolute path')
parser.add_option("-n", "--noinfo", dest="noinfo",
action = 'store_true',
default=False,
help='do not print additional info (file size and status)')
parser.add_option("-r", "--report", dest="report",
action = 'store_true',
default=False,
help='Print edmIntegrityCheck report')
parser.add_option("-c", "--readcache", dest="readcache",
action = 'store_true',
default=False,
help='Read from the cache.')
parser.add_option("--min-run", dest="min_run", default=-1, type=int, help='When querying DBS, require runs >= than this run')
parser.add_option("--max-run", dest="max_run", default=-1, type=int, help='When querying DBS, require runs <= than this run')
(options,args) = parser.parse_args()
if len(args)!=1:
parser.print_help()
sys.exit(1)
user = options.user
name = args[0]
info = not options.noinfo
run_range = (options.min_run,options.max_run)
data = createDataset(user, name,
fnmatch.translate( options.wildcard ),
options.readcache,
options.basedir,
run_range=run_range)
data.printInfo()
data.printFiles(abspath = options.abspath,
info = info)
pprint.pprint( data.filesAndSizes )
if options.report:
pprint.pprint( data.report )
|
# -*- coding: utf-8 -*-
# See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models
class ProductProduct(models.Model):
_inherit = 'product.product'
@api.model
def get_products_by_pricelist(self, product_ids = None, pricelist_ids = None,):
"""
It will calculate the price of the products based on pricelist.
@return: dictionary of dictionary {pricelist_id: {product_id: price}}
"""
pricelist_obj = self.env['product.pricelist']
product_price_res = {}
if not pricelist_ids:
pricelist_ids = pricelist_obj.sudo().search([]).sudo().ids
elif isinstance( pricelist_ids, int ):
pricelist_ids = [pricelist_ids]
if not product_ids:
product_ids = self.sudo().search([( 'available_in_pos', '=', True ), ( 'sale_ok', '=', True )]).sudo().ids
elif isinstance( product_ids, int ):
product_ids = [product_ids]
for pricelist_id in pricelist_ids:
product_price_res.update( {pricelist_id: {}} )
for product_id in product_ids:
price = pricelist_obj.price_get(product_id, 1.0)[pricelist_id]
product_price_res[pricelist_id].update( {product_id: price} )
return product_price_res
class PosOrder(models.Model):
_inherit = 'pos.order'
@api.model
def _order_fields(self, ui_order):
order_fields = super(PosOrder, self)._order_fields(ui_order)
order_fields['pricelist_id'] = ui_order.get( 'pricelist_id', False )
return order_fields
|
import os
import sys
import django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "AthleteAPI.settings")
django.setup()
from Athlete.importAthleteCSV import *
if len(sys.argv) >= 3:
pathNOC = sys.argv[1]
pathAthlete = sys.argv[2]
if validateNOCCSV(pathNOC):
importNOCCSV(pathNOC)
pass
if validateAthleteCSV(pathAthlete):
importAthleteCSV(pathAthlete)
pass |
# 编写一个 SQL 查询,来删除 Person 表中所有重复的电子邮箱,重复的邮箱里只保留 Id 最小 的那个。
#
# +----+------------------+
# | Id | Email |
# +----+------------------+
# | 1 | john@example.com |
# | 2 | bob@example.com |
# | 3 | john@example.com |
# +----+------------------+
# Id 是这个表的主键。
#
#
# 例如,在运行你的查询语句之后,上面的 Person 表应返回以下几行:
#
# +----+------------------+
# | Id | Email |
# +----+------------------+
# | 1 | john@example.com |
# | 2 | bob@example.com |
# +----+------------------+
#
#
#
#
# 提示:
#
#
# 执行 SQL 之后,输出是整个 Person 表。
# 使用 delete 语句。
#
#
# There is no code of Python type for this problem |
"""
You are given two non-empty linked lists representing two non-negative integers.
The digits are stored in reverse order and each of their nodes contain a single digit.
Add the two numbers and return it as a linked list.
You may assume the two numbers do not contain any leading zero, except the number 0 itself.
Example:
Input: (2 -> 4 -> 3) + (5 -> 6 -> 4)
Output: 7 -> 0 -> 8
Explanation: 342 + 465 = 807.
Input: (2 -> 4) + (5 -> 6 -> 4)
Output: 7 -> 0 -> 5
Explanation: 42 + 465 = 507
"""
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
def convert_linked_list_to_int(linked_list):
val_int = 0
current = linked_list
i = 0
while current:
# 4 -> 6 -> 2
# 4 + 10 * 6 + 100 * 2 -> 4 + 60 + 200 -> 264
# (10**0 * 4) + (10**1 * 6) + (10**2 *2)
val_int += (10 ** i) * current.val
current = current.next
i+=1
return val_int
def create_linked_list_repr(val_int):
# val_int = 264
# -> 4 -> 6 -> 2
val_int_str = str(val_int)
head = ListNode(0)
prev = ListNode(val_int_str[-1])
head.next = prev
for digit in val_int_str[::-1][1:]:
new_node = ListNode(digit)
prev.next = new_node
prev = prev.next
return head.next
def add_numbers(ll1, ll2):
# convert each linked list into its integer repr
val1 = convert_linked_list_to_int(ll1)
val2 = convert_linked_list_to_int(ll2)
#add them
sum_val = val1 + val2
#use a helper func to convert sum into a linked list repr
return create_linked_list_repr(sum_val)
###### A diff approach #######
def add_nums(l1, l2):
dummy_head = ListNode(0)
curr = dummy_head
carry = 0
p = l1
q = l2
while p or q:
if p:
x = p.val
else:
x = 0
if q:
y = q.val
else:
y = 0
sum_ = x+y+carry
carry = sum_//10
curr.next = ListNode(sum_%10)
curr = curr.next
if p:
p = p.next
if q:
q = q.next
if carry>0:
curr.next=ListNode(carry)
return dummy_head.next
two=ListNode(2)
four=ListNode(4)
three=ListNode(3)
two.next=four
four.next=three
five=ListNode(5)
six=ListNode(6)
four=ListNode(4)
five.next=six
six.next=four
# add_numbers(two, five)
# 2 -> 4 -> 3 + 5 -> 6 -> 4
# (6 -> 4 -> 1) + (4 -> 6 -> 2)
# 146 + 264 = 410
# six_=ListNode(6)
# four=ListNode(4)
# one=ListNode(1)
# six_.next=four
# four.next=one
# four=ListNode(4)
# six=ListNode(6)
# two=ListNode(2)
# four.next=six
# six.next=two
# add_numbers(six_, four)
# 6->4->8 + 4->6->2->1
six__=ListNode(6)
four=ListNode(4)
eight=ListNode(8)
six__.next=four
four.next=eight
four_=ListNode(4)
six=ListNode(6)
two=ListNode(2)
one=ListNode(1)
four_.next=six
six.next=two
two.next=one
add_numbers(six__, four_) |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.2.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # MID-2017 POPULATION ESTIMATES: Net migration by age and gender
# +
from gssutils import *
if is_interactive():
scraper = Scraper('https://www.nisra.gov.uk/publications/'\
'2017-mid-year-population-estimates-northern-ireland-new-format-tables')
dist = scraper.distribution(
title='Northern Ireland - Net migration by sex and single year of age (2001-2017)',
mediaType=Excel
)
flat = dist.as_pandas(sheet_name='Flat')
flat
# -
tidy = pd.DataFrame()
tidy["Value"] = flat["NETMIG"]
tidy['Mid Year'] = flat["year"]
tidy['Age'] = flat["age"]
tidy['Area'] = flat["area_code"]
tidy['Sex'] = flat["gender"]
tidy['Population Change Component'] = "Total Net"
tidy['Measure Type'] = "Count"
tidy['Unit'] = "People"
tidy
tidy['Mid Year'].unique()
tidy['Mid Year'] = tidy['Mid Year'].map(lambda x: str(x)[0:4] + '-06-30T00:00:00/P1Y')
tidy['Mid Year'].unique()
tidy.dtypes
tidy['Age'] = 'year/' + tidy['Age'].map(str)
tidy['Sex'] = tidy['Sex'].map(
lambda x: {
'All persons' : 'T',
'Females' : 'F',
'Males': 'M'
}.get(x, x))
tidy = tidy[['Mid Year','Area','Age','Sex','Population Change Component','Measure Type','Value','Unit']]
tidy.head()
tidy.tail()
tidy['Value'] = tidy['Value'].astype(int)
tidy.count()
tidy.dtypes
tidy['Age'].unique()
|
#A Denoising Sparse Autoencoder class using THEANO
#Class also has the encoder, decoder and getUpdate function to use for training
#The decoding weights are NOT transposed versions of the encoding weights
from theano.tensor.shared_randomstreams import RandomStreams
from theano import tensor as T
import numpy as np
import os
import theano
theano.config.floatX='float32'
class AE(object):
def __init__(
self,
numpy_rndgen,
theano_rndgen,
input=None,
n_in=30*30,
n_hidden=100,
Wenc=None,
Wdec=None,
benc=None,
bdec=None
):
self.n_in=n_in
self.n_hidden=n_hidden
#Theano random num generator --> symbolic random numbers:
if not theano_rndgen:
theano_rndgen=RandomStreams(numpy_rndgen.randint(2**30))
self.theano_rndgen=theano_rndgen
#Initialise the Wenc and Wdec with small random vars:
if Wenc is None:
initial_Wenc = np.asarray(
numpy_rndgen.uniform(
low=-0.01, high=0.01, size=(n_in,n_hidden)
),
dtype=theano.config.floatX
)
Wenc=theano.shared(value=initial_Wenc, name='Wenc', borrow=True)
if Wdec is None:
initial_Wdec = np.asarray(
numpy_rndgen.uniform(
low=-0.01, high=0.01, size=(n_hidden,n_in)
),
dtype=theano.config.floatX
)
Wdec=theano.shared(value=initial_Wdec, name='Wdec', borrow=True)
#Init the benc and bdec with zeros:
if benc is None:
benc=theano.shared(
value=np.zeros(
n_hidden,
dtype=theano.config.floatX
),
name='benc',
borrow=True
)
if bdec is None:
bdec=theano.shared(
value=np.zeros(
n_in,
dtype=theano.config.floatX
),
name='bdec',
borrow=True
)
#set all the values:
self.Wenc=Wenc
print 'Wenc:',self.Wenc.dtype
self.Wdec=Wdec
print 'Wdec:',self.Wdec.dtype
self.benc=benc
print 'benc:',self.benc.dtype
self.bdec=bdec
print 'bdec:',self.bdec.dtype
print 'Wenc:', self.Wenc.shape.eval(), 'Wdec:', self.Wdec.shape.eval()
print 'benc:', self.benc.shape.eval(), 'bdec:', self.bdec.shape.eval()
if input is None:
self.x=T.dmatrix(name='input')
else:
self.x=input
print 'input:', input.dtype
#Parmas
self.params=[self.Wenc, self.benc, self.Wdec, self.bdec]
def encode(self,input_x):
print('encoding...')
return T.nnet.sigmoid(T.dot(input_x,self.Wenc) + self.benc)
def decode(self,encoded):
print('decoding...')
return T.nnet.sigmoid(T.dot(encoded,self.Wdec) + self.bdec)
def addNoise(self,input, noiseLevel):
return self.theano_rndgen.binomial(size=input.shape, n=1, p=1-noiseLevel,dtype=theano.config.floatX)*input
def getUpdate(self, noiseLevel, learnRate, roh, beta):
#Add some corruption...
if(noiseLevel>0):
noiseySample=self.addNoise(self.x, noiseLevel)
y=self.encode(noiseySample)
# mean activation
roh_hat=T.mean(y, axis=0) #Mean activation across samples in a batch (roh_hat should have dim 1,100 )
z=self.decode(y)
#Sparisity parameter
sparsity=((roh * T.log(roh / roh_hat)) + ((1 - roh) * T.log((1 - roh) / (1 - roh_hat)))).sum()
L = - T.sum(((self.x * T.log(z)) + ((1 - self.x) * T.log(1 - z))), axis=1) #Log likely hood over the samples (N.B. L is a vector)
E=((self.x-z)**2).sum()
cost= T.mean(L) + beta * sparsity
grads= T.grad(cost,self.params)
updates=[(param, param - learnRate*grad) for param, grad in zip(self.params, grads)]
print updates[1][1].dtype
return (cost, updates, roh_hat, T.mean(E))
|
# Copyright (c) 2016 John Gateley
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions
# of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
# TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
class Record:
"""
The Record class.
domain_name: the name of the domain this record belongs to
domain_id: the Linode ID of the domain, only filled in via the API, not for records in YAML configuration file
resource_id: the Linode ID of the record, only filled in via the API
record_type: "A", "AAAA", "CNAME", "MX", or "TXT"
name: the host of the record
target: the target of the record
priority: the priority of the record, only used for MX
ttl_seconds: the time to live seconds. 0 indicates default
"""
def __init__(self, domain_name, domain_id, resource_id, record_type, name, target, priority, ttl_seconds):
self.domain_name = domain_name
self.domain_id = domain_id
self.resource_id = resource_id
self.record_type = record_type
self.name = name
self.target = target
self.priority = priority
self.ttl_seconds = None
if ttl_seconds != 0:
self.ttl_seconds = ttl_seconds
def from_json(json, domain_name):
"""
Create a record object from the JSON returned from the Linode API
:param json: JSON returned from Linode
:param domain_name: Name of the domain/zone
:return:
"""
return Record(domain_name, json['DOMAINID'], json['RESOURCEID'], json['TYPE'], json['NAME'], json['TARGET'],
json['PRIORITY'], json['TTL_SEC'])
|
from sqlAlchemy.models.utils.base_db import db
from sqlAlchemy.models.utils.database_session import Persistance
from sqlAlchemy.models.schema.movies import Movies
from sqlalchemy import text
class MovieDbOperations:
@staticmethod
def create(dict_args):
#print(dict_args)
movies_model_ins = Movies(dict_args)
with Persistance().session as session:
session.add(movies_model_ins)
@staticmethod
def delete(moviename):
with Persistance().session as session:
session.query(Movies).filter(text(f"movies.movie_name={moviename}")).delete(synchronize_session=False)
#try:
# db.session.query(Movies).filter(text(f"movies.movie_name={moviename}")).delete(synchronize_session=False)
# db.session.commit()
#except Exception as Error:
# db.session.rollback()
# raise Exception(Error)
#finally:
# db.session.close()
@staticmethod
def update(dict_args):
with Persistance().session as session:
session.query(Movies).filter(text(f"movies.movie_name={dict_args.get('moviename')}")).update(dict_args,
synchronize_session=False)
# try:
# db.session.query(Movies).filter(text(f"movies.movie_name={moviename}")).update(dict_args,
# synchronize_session=False)
# db.session.commit()
# except Exception as Error:
# db.session.rollback()
# raise Exception(Error)
# finally:
# db.session.close()
def select(self, condition, all_row):
if all_row:
return self._select_all(condition)
else:
return self._select_one(condition)
def _select_one(self,condition):
with Persistance().session as session:
if condition:
result = session.query(Movies).filter(text(condition)).first()
else:
result = session.query(Movies).first()
if result:
return [result.to_obj()]
else:
return []
def _select_all(self, condition):
with Persistance().session as session:
if condition:
result = session.query(Movies).filter(text(condition)).all()
else:
result = session.query(Movies).all()
if result:
return [each_row.to_obj() for each_row in result if each_row]
else:
return []
|
# -*- coding:utf-8 -*-
class Solution:
def FirstNotRepeatingChar(self, s):
# write code here
if None == s or 0 == len(s):
return -1
occurence = {}
keys = []
for i,c in enumerate(s):
if c in occurence:
occurence[c] += 1
else:
occurence[c] = 1
keys.append((c,i))
for k,ix in keys:
if 1 == occurence[k]:
return ix
return -1
def test():
solu = Solution()
strings = 'asldjaifas','asekhwenas','asnkabjmd'
for s in strings:
ix = solu.FirstNotRepeatingChar(s)
print(s,ix,s[ix])
if __name__ == '__main__':
test() |
"""Write GNSS position results
Description:
------------
"""
# Standard library imports
from collections import namedtuple
# External library imports
import numpy as np
# Midgard imports
from midgard.data import position
from midgard.dev import plugins
from midgard.writers._writers import get_field, get_header
# Where imports
import where
from where.lib import config
from where.lib import util
WriterField = namedtuple(
"WriterField", ["name", "field", "attrs", "dtype", "format", "width", "header", "unit", "description"]
)
WriterField.__new__.__defaults__ = (None,) * len(WriterField._fields)
WriterField.__doc__ = """A convenience class for defining a output field for the writer
Args:
name (str): Unique field name
field (str): Dataset field name
attrs (Tuple[str]): Field attributes
dtype (Numpy dtype): Type of field
format (str): Format string
width (int): Width of header information
header (str): Header information
unit (str): Unit of field
description (str): Description of field
"""
# Define fields to plot
#
# # PGM: where 0.21.2/midgard 0.3.0 RUN_BY: NMA DATE: 20190604 135301 UTC
# #
# # EPOCH MJD WEEK GPSSEC ECEF [X] ECEF [Y] ECEF [Z] LATITUDE
# # YYYY/MM/DD hh:mm:ss second m m m deg
# # ___________________________________________________________________________________________________________
# 2018/02/01 00:00:00 58150.000000 1986 345600.000 3275756.9411 321112.5306 5445048.1920 59.01771380 ...
# 2018/02/01 00:05:00 58150.003472 1986 345900.000 3275756.9004 321112.5296 5445048.4119 59.01771513 ...
# 2018/02/01 00:10:00 58150.006944 1986 346200.000 3275757.1458 321112.6499 5445047.0430 59.01770683 ...
# ----+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8----+----9----+----0----+----
#
FIELDS = (
WriterField(
"date",
"date",
(),
object,
"%21s",
19,
"DATE",
"YYYY/MM/DD hh:mm:ss",
"Date in format year, month, day and hour, minute and second",
),
WriterField("mjd", "time", ("gps", "mjd"), float, "%14.6f", 14, "MJD", "", "Modified Julian Day"),
WriterField("gpsweek", "time", ("gps", "gps_ws", "week"), int, "%5d", 5, "WEEK", "", "GPS week"),
WriterField(
"gpssec", "time", ("gps", "gps_ws", "seconds"), float, "%11.3f", 11, "GPSSEC", "second", "GPS seconds"
),
WriterField(
"x",
"site_pos",
("trs", "x"),
float,
"%13.4f",
13,
"ECEF [X]",
"meter",
"X-coordinate of station position given in Earth-Centered Earth-Fixed cartesian coordinate system",
),
WriterField(
"y",
"site_pos",
("trs", "y"),
float,
"%13.4f",
13,
"ECEF [Y]",
"meter",
"Y-coordinate of station position given in Earth-Centered Earth-Fixed cartesian coordinate system",
),
WriterField(
"z",
"site_pos",
("trs", "z"),
float,
"%13.4f",
13,
"ECEF [Z]",
"meter",
"Z-coordinate of station position given in Earth-Centered Earth-Fixed cartesian coordinate system",
),
WriterField(
"sigma_x",
"site_pos_sigma_x",
(),
float,
"%11.4f",
11,
"SIGMA_X",
"meter",
"Standard deviation of station position X-coordinate",
),
WriterField(
"sigma_y",
"site_pos_sigma_y",
(),
float,
"%11.4f",
11,
"SIGMA_Y",
"meter",
"Standard deviation of station position Y-coordinate",
),
WriterField(
"sigma_z",
"site_pos_sigma_z",
(),
float,
"%11.4f",
11,
"SIGMA_Z",
"meter",
"Standard deviation of station position Z-coordinate",
),
WriterField(
"lat",
"site_pos",
("llh", "lat"),
float,
"%13.8f",
13,
"LATITUDE",
"degree",
"Latitude coordinate of station position given in ellipsiodal reference frame",
),
WriterField(
"lon",
"site_pos",
("llh", "lon"),
float,
"%13.8f",
13,
"LONGITUDE",
"degree",
"Longitude coordinate of station position given in ellipsiodal reference frame",
),
WriterField(
"h",
"site_pos",
("llh", "height"),
float,
"%11.4f",
11,
"HEIGHT",
"meter",
"Height coordinate of station position given in ellipsiodal reference frame",
),
WriterField(
"east",
"site_pos_vs_ref_east",
(),
float,
"%11.4f",
11,
"EAST",
"meter",
"East coordinate of difference between station position and reference position (e.g."
"apriori station coordinate) given in topocentric coordinate system",
),
WriterField(
"north",
"site_pos_vs_ref_north",
(),
float,
"%11.4f",
11,
"NORTH",
"meter",
"North coordinate of difference between station position and reference position (e.g."
"apriori station coordinate) given in topocentric coordinate system",
),
WriterField(
"up",
"site_pos_vs_ref_up",
(),
float,
"%11.4f",
11,
"UP",
"meter",
"Up coordinate of difference between station position and reference position (e.g."
"apriori station coordinate) given in topocentric coordinate system",
),
WriterField(
"hpe",
"hpe",
(),
float,
"%11.4f",
11,
"HPE",
"meter",
"Horizontal Position Error of station position vs. reference position",
),
WriterField(
"vpe",
"vpe",
(),
float,
"%11.4f",
11,
"VPE",
"meter",
"Vertical Position Error of station position vs. reference position",
),
WriterField(
"c_xx",
"estimate_cov_site_pos_xx",
(),
float,
"%13.8f",
13,
"COV_XX",
"meter**2",
"Variance of station position X-coordinate",
),
WriterField(
"c_xy",
"estimate_cov_site_pos_xy",
(),
float,
"%13.8f",
13,
"COV_XY",
"meter**2",
"XY-covariance of station position",
),
WriterField(
"c_xz",
"estimate_cov_site_pos_xz",
(),
float,
"%13.8f",
13,
"COV_XZ",
"meter**2",
"XZ-covariance of station position",
),
WriterField(
"c_yy",
"estimate_cov_site_pos_yy",
(),
float,
"%13.8f",
13,
"COV_YY",
"meter**2",
"Variance of station position Y-coordinate",
),
WriterField(
"c_yz",
"estimate_cov_site_pos_yz",
(),
float,
"%13.8f",
13,
"COV_YZ",
"meter**2",
"YZ-covariance of station position",
),
WriterField(
"c_zz",
"estimate_cov_site_pos_zz",
(),
float,
"%13.8f",
13,
"COV_ZZ",
"meter**2",
"Variance of station position Z-coordinate",
),
)
@plugins.register
def gnss_position(dset: "Dataset") -> None:
"""Write GNSS position results
Args:
dset: A dataset containing the data.
"""
file_path = config.files.path("output_position", file_vars={**dset.vars, **dset.analysis})
# Add date field to dataset
if "date" not in dset.fields:
dset.add_text("date", val=[d.strftime("%Y/%m/%d %H:%M:%S") for d in dset.time.datetime], write_level="detail")
# Add ENU position to dataset
ref_pos = position.Position(
val=np.array([dset.meta["pos_x"], dset.meta["pos_y"], dset.meta["pos_z"]]), system="trs"
)
enu = (dset.site_pos.trs.pos - ref_pos).enu
dset.add_float("site_pos_vs_ref_east", val=enu.east, unit="meter", write_level="detail")
dset.add_float("site_pos_vs_ref_north", val=enu.north, unit="meter", write_level="detail")
dset.add_float("site_pos_vs_ref_up", val=enu.up, unit="meter", write_level="detail")
# Add HPE and VPE to dataset
dset.add_float("hpe", val=np.sqrt(enu.east ** 2 + enu.north ** 2), unit="meter", write_level="operational")
dset.add_float("vpe", val=np.absolute(enu.up), unit="meter", write_level="operational")
# Add standard deviation of site position coordinates
dset.add_float(
"site_pos_sigma_x",
val=np.sqrt(dset.estimate_cov_site_pos_xx),
unit="meter",
write_level="detail",
)
dset.add_float(
"site_pos_sigma_y",
val=np.sqrt(dset.estimate_cov_site_pos_yy),
unit="meter",
write_level="detail",
)
dset.add_float(
"site_pos_sigma_z",
val=np.sqrt(dset.estimate_cov_site_pos_zz),
unit="meter",
write_level="detail",
)
# Put together fields in an array as specified by the 'dtype' tuple list
if config.tech.estimate_epochwise.bool: # Epochwise estimation or over whole time period
output_list = list()
for epoch in dset.unique("time"):
idx = dset.filter(time=epoch)
# Append current epoch position solution to final output solution
output_list.append(tuple([get_field(dset, f.field, f.attrs, f.unit)[idx][0] for f in FIELDS]))
else:
# Get position solution for first observation
idx = np.squeeze(np.array(np.nonzero(dset.time.gps.mjd)) == 0) # first observation -> TODO: Better solution?
output_list = [tuple([get_field(dset, idx, f.field, f.attrs, f.unit)[idx][0] for f in FIELDS])]
output_array = np.array(output_list, dtype=[(f.name, f.dtype) for f in FIELDS])
# Write to disk
header = get_header(
FIELDS,
pgm_version=f"where {where.__version__}",
run_by=util.get_user_info()["inst_abbreviation"] if "inst_abbreviation" in util.get_user_info() else "",
summary="GNSS position results",
)
np.savetxt(
file_path,
output_array,
fmt=tuple(f.format for f in FIELDS),
header=header,
delimiter="",
encoding="utf8",
)
|
#! /usr/bin/env python
# usage: python convert_indicators_to_cvs_single <indicator_type> <input_file> <output_file>
#indicator type can be: "GO", "EC" (without quotation marks)
# converts the indicators to a .cvs file, on a network by network basis
import os
import sys
import csv
import networkx as nx
#from ..edgeList import readLeda
TYPE=sys.argv[1]
INPUT=sys.argv[2]
OUTPUT=sys.argv[3]
# Read the network
def readLeda(gwFile):
network = nx.Graph()
fRead = open(gwFile, 'r')
mode = 0
nodeIndexes = {}
nodeCount = 1
for line in fRead:
if (mode == 0 or mode == 1) and line.startswith('|'):
nodeName = line.rstrip().strip('|').lstrip('{').rstrip('}')
network.add_node(nodeName)
nodeIndexes[nodeCount] = nodeName
nodeCount += 1
if mode == 0:
mode = 1
elif mode == 1 and not line.startswith('|'):
mode = 2
elif mode == 2 and line.strip().endswith('|'):
splitted = line.strip().split(' ')
network.add_edge(nodeIndexes[int(splitted[0])], nodeIndexes[int(splitted[1])])
fRead.close()
return network
# resulting file is too big .. at least 111MB
if(TYPE == "GO"):
go_dict = {}
go_list = []
with open(INPUT) as f:
for line in f:
words = line.split("\t")
protein = words[0]
go_term = words[1].split("\n")[0]
go_list += [go_term]
#print words[1]
if(protein not in go_dict ):
go_dict[protein] = [go_term]
else:
go_dict[protein] += [go_term]
#print go_dict[protein]
#print go_list
#remove duplicated from list:
go_list = list(set(go_list))
with open(OUTPUT, 'wb') as out:
#writer = csv.writer(out)
#writer.writerows(['ID'] + go_list)
out.write("ID")
for go_term in go_list:
out.write("," + go_term)
out.write("\n")
for protein in go_dict.keys():
out.write(protein)
for go_term in go_list:
if(go_term in go_dict[protein]):
out.write(",1")
else:
out.write(",0")
out.write("\n")
if(TYPE == "EC"):
net = readLeda(INPUT)
with open(OUTPUT, 'wb') as out:
out.write("ID,EC1,EC2,EC3,EC4,EC5,EC6\n")
for node in net.nodes():
out.write(node)
ec_nr = int(node.split(".")[0].split(":")[1])
for i in range(1,7):
if(i == ec_nr):
out.write(",1")
else:
out.write(",0")
out.write("\n")
# 14 functional properties for yeast + additional logic for node labels
if(TYPE == "FUNCTIONAL"):
protein_to_function_dict = {} # maps protein codes to function (string -> string)
with open(INPUT) as f:
for line in f:
words = line.split("\t")
protein = words[0]
function = (" ".join(words[1:]))[:-1]
protein_to_function_dict[protein] = function
print "nr of functions=", set(protein_to_function_dict.values())
aux_file = "indicators/yeast_ppi/ppi.dic"
protein_to_id_dict = {} # maps protein codes to node numbers (string -> int)
with open(aux_file) as g:
for line in g:
words = line.split("\t")
ID = int(words[0][2:])
protein = words[1][:-1]
protein_to_id_dict[protein] = ID
#print protein_to_function_dict
#print protein_to_id_dict
with open(OUTPUT, 'wb') as out:
#writer = csv.writer(out)
#writer.writerows(['ID'] + go_list)
out.write("ID")
functions = list(set(protein_to_function_dict.values()))
functions = [f for f in functions if f != '']
# having a comma in the names will mess up the CSV file, which contains comma separated terms
assert ',' not in " ".join(functions)
for func in functions:
out.write("," + func)
out.write("\n")
print functions
print len(protein_to_function_dict.keys())
print len(protein_to_id_dict.keys())
for protein in protein_to_function_dict.keys():
if(protein in protein_to_id_dict.keys()):
out.write(str(protein_to_id_dict[protein]))
for func in functions:
#print func + " --- " + protein + " --- " + protein_to_function_dict[protein]
if(func in protein_to_function_dict[protein]):
out.write(",1")
else:
out.write(",0")
out.write("\n")
# same as before but with no extra logic for node labels
if(TYPE == "FUNCTIONAL_SIMPLE"):
protein_to_function_dict = {} # maps protein codes to function (string -> string)
print "Reading file: ", INPUT
with open(INPUT) as f:
for line in f:
words = line.split("\t")
protein = words[0]
function = (" ".join(words[1:]))[:-1]
protein_to_function_dict[protein] = function
print "nr of functions=", set(protein_to_function_dict.values())
with open(OUTPUT, 'wb') as out:
#writer = csv.writer(out)
#writer.writerows(['ID'] + go_list)
out.write("ID")
functions = list(set(protein_to_function_dict.values()))
functions = [f for f in functions if f != '']
# having a comma in the names will mess up the CSV file, which contains comma separated terms
assert ',' not in " ".join(functions)
for func in functions:
out.write("," + func)
out.write("\n")
print functions
print len(protein_to_function_dict.keys())
for protein in protein_to_function_dict.keys():
out.write(protein)
for func in functions:
#print func + " --- " + protein + " --- " + protein_to_function_dict[protein]
if(func in protein_to_function_dict[protein]):
out.write(",1")
else:
out.write(",0")
out.write("\n")
if(TYPE== "METABOLIC_COMPOUND"):
cpd_to_ecs = {} # maps each compounds to a list of ECs (the ECs will only store the first number)
with open(INPUT) as f:
for line in f:
words = line.split(" ")
cpd1 = words[0]
cpd2 = words[1]
assert "\n" not in cpd1 or "\n" not in cdp2
ecs = words[2:]
ecs = [int(ec[3]) for ec in ecs]
if(cpd1 in cpd_to_ecs.keys()):
cpd_to_ecs[cpd1] += ecs
else:
cpd_to_ecs[cpd1] = ecs
if(cpd2 in cpd_to_ecs.keys()):
cpd_to_ecs[cpd2] += ecs
else:
cpd_to_ecs[cpd2] = ecs
# remove the duplicate ECs in the lists
for key in cpd_to_ecs.keys():
cpd_to_ecs[key] = list(set(cpd_to_ecs[key]))
with open(OUTPUT, "w") as f:
f.write("ID")
for i in range(1,7):
f.write(",EC"+str(i))
f.write("\n")
for key in cpd_to_ecs.keys():
f.write(key)
for i in range(1,7):
if(i in cpd_to_ecs[key]):
f.write(",1")
else:
f.write(",0")
f.write("\n")
print cpd_to_ecs.keys()[:10]
print cpd_to_ecs.values()[:10]
|
"""
Copyright (c) 2021 ARM Limited
SPDX-License-Identifier: Apache-2.0
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mbed_host_tests import BaseHostTest
class ExampleHostTest(BaseHostTest):
"""Host test used by htrun for the greentea-client example.
"""
def setup(self):
"""Register callback to expected key.
"""
self.register_callback('device_greetings', self._callback_device_greetings)
def _callback_device_greetings(self, key, value, timestamp):
"""Reply to greetings from the device.
"""
self.log("Message received from the device: " + value)
self.send_kv('host_greetings', "Hello from the host!")
|
import torch
import torch.nn as nn
import cv2
import numpy as np
# class stepPool(nn.Module):
# # def __init__(self,):
# # super(stepPool, self).__init__()
# # self.set_pool = nn.AvgPool2d(kernel_size=)
img = cv2.imread('cat.jpg')
img = torch.Tensor(img)
img = np.int32(img)
cv2.imshow('1',img)
cv2.waitKey()
img = img.unsqueeze(0)
print(img.size())
img = img.permute(0,3, 1, 2)
print(img.size())
if 1==2:
step_pool = nn.AvgPool2d(kernel_size=3,stride=1,padding=1)
img = step_pool(img)
img = torch.floor(img)
newImg = img[0].permute(1, 2, 0).numpy()
# cv2.imshow('1',newImg)
# cv2.waitKey()
print(img.size()) |
#indexing assistance
name = 0
colour = 1
price = 2
rent = 3
onehouse = 4
twohouse = 5
threehouse = 6
fourhouse = 7
hotel = 8
buildprice = 9
owner = 10
#library
a = ['Go']
b = ["Old Kent Rd", "brown", "60", "2", "10", "30", "90", "160", "250", "50", '']
c = ['Community Chest']
d = ['Whitechapel Rd', 'brown', '60', '4', '20', '60', '180', '320', '450', '50', '']
e = ['Income Tax']
f = ['King\'s Cross Stn', 'railway', '200', '25', '25', '50', '100', '200', '', '', '']
g = ['The Angel, Islington', 'light blue', '100', '6', '30', '90', '270', '400', '550', '50', '']
h = ['Chance']
i = ['Euston Rd', 'light blue', '100', '6', '30', '90', '270', '400', '550', '50', '']
j = ['Pentonville Rd', 'light blue', '120', '8', '40', '100', '300', '450', '600', '50', '']
k = ['Jail']
l = ['Pall Mall', 'pink', '140', '10', '50', '150', '450', '625', '750', '100', '']
m = ['Electric Company', 'utility', '150', '']
n = ['Whitehall', 'pink', '140', '10', '50', '150', '450', '625', '750', '100', '']
o = ['Northumberland Ave', 'pink', '160', '12', '60', '180', '500', '700', '900', '100', '']
p = ['Marylebone Stn', 'railway', '200', '25', '25', '50', '100', '200', '', '' ,'']
q = ['Bow St', 'orange', '180', '14', '70', '200', '550', '750', '950', '100', '']
r = ['Community Chest']
s = ['Marlborough St', 'orange', '180', '14', '70', '200', '550', '750', '950', '100', '']
t = ['Vine St', 'orange', '200', '16', '80', '220', '600', '800', '1000', '100', '']
u = ['Free Parking']
v = ['The Strand', 'red', '220', '18', '90', '250', '700', '875', '1050', '150', '']
w = ['Chance']
x = ['Fleet St', 'red', '220', '18', '90', '250', '700', '875', '1050', '150', '']
y = ['Trafalgar Sq', 'red', '240', '20', '100', '300', '750', '925', '1100', '150', '']
z = ['Fenchurch St Stn', 'railway', '200', '25', '25', '50', '100', '200', '', '', '']
aa = ['Leicester Sq', 'yellow', '260', '22', '110', '330', '800', '975', '1150', '150', '']
ab = ['Coventry St', 'yellow', '260', '22', '110', '330', '800', '975', '1150', '150', '']
ac = ['Water Works', 'utility', '150', '']
ad = ['Piccadilly', 'yellow', '280', '22', '120', '360', '850', '1025', '1200', '140', '']
ae = ['Go To Jail']
af = ['Regent St', 'green', '300', '26', '130', '390', '900', '1100', '1275', '150', '']
ag = ['Oxford St', 'green', '300', '26', '130', '390', '900', '1100', '1275', '150', '']
ah = ['Community Chest']
ai = ['Bond St', 'green', '320', '28', '150', '450', '1000', '1200', '1400', '160', '']
aj = ['Liverpool St Stn', 'railway', '200', '25', '25', '50', '100', '200', '', '', '']
ak = ['Chance']
al = ['Park Ln', 'dark blue', '350', '35', '175', '500', '1100', '1300', '1500', '200', '']
am = ['Super Tax']
an = ['Mayfair', 'dark blue', '400', '50', '200', '600', '1400', '1700', '2000', '200', '']
#List of properties
boardpos = [a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z, aa, ab, ac, ad, ae, af, ag, ah, ai, aj, ak, al, am, an] |
# -*- coding=utf-8 -*-
# Created Time: 2015年06月26日 星期五 15时03分54秒
# File Name: __init__.py
|
#1 - Decision Tree on Guiding Question 1
import pandas as pd
from sklearn.model_selection import KFold
import numpy as np
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import scale
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
from datetime import datetime
startTime = datetime.now()
fields = ["age","capital gains","tax filer status"]
data=pd.read_csv('D:\\WPI\\DataSets\\census-income.csv',skipinitialspace=True,usecols=fields)
df = pd.DataFrame(data)
target = df['age']
df = df.drop(['age'], axis=1)
final_val = df
target=scale(target)
target = pd.DataFrame(target)
cat_values = final_val[['tax filer status']]
ohe = OneHotEncoder(drop='first')
cat_values_enc=pd.get_dummies(cat_values)
final_val.drop(['tax filer status'],axis=1,inplace=True)
categorical_variable_encoded=pd.concat([final_val,cat_values_enc],axis=1,sort=False)
categorical_variable_encoded=scale(categorical_variable_encoded)
categorical_variable_encoded = pd.DataFrame(categorical_variable_encoded)
kf = KFold(n_splits=10)
meanList = []
coeff = []
meanSquareList = []
for train_index, test_index in kf.split(categorical_variable_encoded):
print("TRAIN:", train_index, "TEST:", test_index)
X_train, X_test = categorical_variable_encoded.iloc[train_index], categorical_variable_encoded.iloc[test_index]
y_train, y_test = target.iloc[train_index], target.iloc[test_index]
regressionTree = DecisionTreeRegressor(random_state=np.random, min_weight_fraction_leaf=0.05)
regressionTree.fit(X_train, y_train)
y_pred = regressionTree.predict(X_test)
mean = mean_absolute_error(y_test,y_pred)
meanSquare = mean_squared_error(y_test, y_pred)
rScore = r2_score(y_test, y_pred)
print("Mean for each iteration: ", mean)
print("Coefficient for each iteration: ",rScore)
meanList.append(mean)
coeff.append(rScore)
meanSquareList.append(np.sqrt(meanSquare))
total=0
totalRScore=0
totalMeanSquare=0
for i in range(len(meanList)):
total+= meanList[i]
totalRScore+=coeff[i]
totalMeanSquare+=meanSquareList[i]
print("Average Mean: ", total/10)
print("Average Coefficient: ",totalRScore/10)
print("Average Root Mean Square Error: ",totalMeanSquare/10)
print(regressionTree.get_depth())
print (datetime.now() - startTime)
#2 - Linear Regression on Guiding Question 1
from sklearn import preprocessing
import numpy as np
import pandas as pd
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
from matplotlib.colors import ListedColormap
scaler = StandardScaler()
fields = ["marital status","sex"]
predicter=["age"]
data=pd.read_csv('census-income.csv',skipinitialspace=True,usecols=fields)
target=pd.DataFrame(preprocessing.scale(pd.read_csv('census-income.csv',skipinitialspace=True,usecols=predicter)))
df = pd.DataFrame(data, columns=fields)
res=pd.DataFrame(preprocessing.scale(pd.get_dummies(df)))
from sklearn.linear_model import LinearRegression
kf = KFold(n_splits=10)
meanList = []
coeff = []
meanSquareList = []
for train_index, test_index in kf.split(res):
print("TRAIN:", train_index, "TEST:", test_index)
x_train, x_test = res.iloc[train_index], res.iloc[test_index]
y_train, y_test = target.iloc[train_index], target.iloc[test_index]
reg=LinearRegression()
reg.fit(x_train,y_train)
y_pred= reg.predict(x_test)
mean = mean_absolute_error(y_test,y_pred)
meanSquare = mean_squared_error(y_test, y_pred)
rScore = r2_score(y_test, y_pred)
print("Mean for each iteration: ", mean)
print("Coefficient for each iteration: ",rScore)
meanList.append(mean)
coeff.append(rScore)
meanSquareList.append(np.sqrt(meanSquare))
total=0
totalRScore=0
totalMeanSquare=0
for i in range(len(meanList)):
total+= meanList[i]
totalRScore+=coeff[i]
totalMeanSquare+=meanSquareList[i]
print("Average Mean: ", total/10)
print("Average Coefficient: ",totalRScore/10)
print("Average Root Mean Square Error: ",totalMeanSquare/10)
#3 - ZeroR on Guiding Question 2
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
import pandas as pd
import statistics
from sklearn.metrics import r2_score
from sklearn import preprocessing
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from datetime import datetime
fields = ["age"]
startTime = datetime.now()
data=pd.read_csv('census-income.csv',skipinitialspace=True, usecols=fields)
df = pd.DataFrame(preprocessing.scale(data))
meanList = []
coeff = []
meanSquareList = []
kf = KFold(n_splits=10)
for train_index, test_index in kf.split(df):
target1=[]
print("TRAIN:", train_index, "TEST:", test_index)
y_train, y_test = df.iloc[train_index], df.iloc[test_index]
for i in range(0,y_test.shape[0]):
target1.append(df.mean())
mean = mean_absolute_error(y_test,target1)
meanSquare = mean_squared_error(y_test, target1)
rScore = r2_score(y_test, target1)
print("Mean for each iteration: ", mean)
print("Coefficient for each iteration: ",rScore)
meanList.append(mean)
coeff.append(rScore)
meanSquareList.append(np.sqrt(meanSquare))
total=0
totalRScore=0
totalMeanSquare=0
for i in range(len(meanList)):
total+= meanList[i]
totalRScore+=coeff[i]
totalMeanSquare+=meanSquareList[i]
print("Average Mean: ", total/10)
print("Average Coefficient: ",totalRScore/10)
print("Average Root Mean Square Error: ",totalMeanSquare/10)
print (datetime.now() - startTime)
#4 - Decision Tree on Guiding Question 2
import pandas as pd
from sklearn.model_selection import KFold
import numpy as np
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import scale
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
from datetime import datetime
startTime = datetime.now()
fields = ["age","marital status","income","education"]
data=pd.read_csv('D:\\WPI\\DataSets\\census-income.csv',skipinitialspace=True,usecols=fields)
df = pd.DataFrame(data)
target = df['age']
df = df.drop(['age'], axis=1)
final_val = df
labelEncode = final_val[['education']]
le = LabelEncoder()
labelEncod_enc = pd.DataFrame(le.fit_transform(labelEncode))
final_val.drop(['education'],axis=1,inplace=True)
final_val=pd.concat([final_val, labelEncod_enc], axis=1,sort=False)
cat_values = final_val[['marital status','income']]
ohe = OneHotEncoder(drop='first')
cat_values_enc=pd.get_dummies(cat_values)
final_val.drop(['marital status','income'],axis=1,inplace=True)
categorical_variable_encoded=pd.concat([final_val,cat_values_enc],axis=1,sort=False)
kf = KFold(n_splits=10)
meanList = []
coeff = []
meanSquareList = []
for train_index, test_index in kf.split(categorical_variable_encoded):
print("TRAIN:", train_index, "TEST:", test_index)
X_train, X_test = categorical_variable_encoded.iloc[train_index], categorical_variable_encoded.iloc[test_index]
y_train, y_test = target.iloc[train_index], target.iloc[test_index]
regressionTree = DecisionTreeRegressor(random_state=0, max_features=2)
regressionTree.fit(X_train, y_train)
y_pred = regressionTree.predict(X_test)
mean = mean_absolute_error(y_test,y_pred)
meanSquare = mean_squared_error(y_test, y_pred)
rScore = r2_score(y_test, y_pred)
print("Mean for each iteration: ", mean)
print("Coefficient for each iteration: ",rScore)
meanList.append(mean)
coeff.append(rScore)
meanSquareList.append(np.sqrt(meanSquare))
total=0
totalRScore=0
totalMeanSquare=0
for i in range(len(meanList)):
total+= meanList[i]
totalRScore+=coeff[i]
totalMeanSquare+=meanSquareList[i]
print("Average Mean: ", total/10)
print("Average Coefficient: ",totalRScore/10)
print("Average Root Mean Square Error: ",totalMeanSquare/10)
print(regressionTree.get_depth())
print ("Time taken to build the model: ",datetime.now() - startTime)
#5 - Model Tree on Guiding Question 2
import pandas as pd
import numpy as np
from sklearn.model_selection import KFold
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import LinearRegression
from datetime import datetime
def split_data(j_feature, threshold, X, y):
X = np.array(X)
idx_left = np.where(X[:, j_feature] <= threshold)[0]
idx_right = np.delete(np.arange(0, len(X)), idx_left)
assert len(idx_left) + len(idx_right) == len(X)
return (X[idx_left], y[idx_left]), (X[idx_right], y[idx_right])
startTime = datetime.now()
fields = ["age","education","income","marital status"]
data=pd.read_csv('D:\\WPI\\DataSets\\census-income.csv',skipinitialspace=True,usecols=fields)
df = pd.DataFrame(data)
target = df['age']
df = df.drop(['age'], axis=1)
final_val = df
labelEncode = final_val[['education']]
le = LabelEncoder()
labelEncod_enc = pd.DataFrame(le.fit_transform(labelEncode))
final_val.drop(['education'],axis=1,inplace=True)
final_val=pd.concat([final_val, labelEncod_enc], axis=1,sort=False)
cat_values = final_val[['marital status','income']]
ohe = OneHotEncoder(drop='first')
cat_values_enc=pd.DataFrame(ohe.fit_transform(cat_values).toarray())
final_val.drop(['marital status','income'],axis=1,inplace=True)
categorical_variable_encoded=pd.concat([final_val,cat_values_enc],axis=1,sort=False)
meanList = []
coeff = []
X_train, X_test, y_train, y_test = train_test_split(categorical_variable_encoded, target, test_size=0.3, random_state=0)
regressionTree = DecisionTreeRegressor(random_state=0,min_samples_split=2,max_leaf_nodes=5)
regressionTree.fit(X_train, y_train)
n_nodes = regressionTree.tree_.node_count
children_left = regressionTree.tree_.children_left
print(children_left)
children_right = regressionTree.tree_.children_right
print(children_right)
feature = regressionTree.tree_.feature
threshold = regressionTree.tree_.threshold
leaves = np.arange(0,regressionTree.tree_.node_count)
thistree = [regressionTree.tree_.feature.tolist()]
thistree.append(regressionTree.tree_.threshold.tolist())
thistree.append(regressionTree.tree_.children_left.tolist())
thistree.append(regressionTree.tree_.children_right.tolist())
leaf_observations = np.zeros((n_nodes,len(leaves)),dtype=bool)
print(n_nodes," ",children_left," ",children_right," ",feature," ",threshold)
node_depth = np.zeros(shape=n_nodes, dtype=np.int64)
is_leaves = np.zeros(shape=n_nodes, dtype=bool)
leafNodes = np.zeros(shape=n_nodes, dtype=np.int64)
stack = [(0, -1)]
X_left=[]
y_left=[]
X_right=[]
y_right = []
while len(stack) > 0:
node_id, parent_depth = stack.pop()
print(node_id," ",parent_depth)
node_depth[node_id] = parent_depth + 1
print("left and right: ",node_id,": ",children_left[node_id]," ",children_right[node_id])
if (children_left[node_id] != children_right[node_id]):
stack.append((children_left[node_id], parent_depth + 1))
stack.append((children_right[node_id], parent_depth + 1))
else:
is_leaves[node_id] = True
print(is_leaves)
def ModelTree():
N = X_train.shape
linearRegBest = np.Infinity
meanSquareBest = 0
for i in range(n_nodes):
if is_leaves[i] == True:
continue
else:
(X_left, y_left), (X_right, y_right) = split_data(feature[i], threshold[i], X_train, y_train)
N_left, N_right = len(X_left), len(X_right)
X_left = np.array(X_left, dtype=np.float64)
y_left = np.array(y_left, dtype=np.float64)
X_left = pd.DataFrame(X_left)
X_left = X_left.fillna(0)
y_left = pd.DataFrame(y_left)
y_left = y_left.fillna(0)
X_right = np.array(X_right, dtype=np.float64)
y_right = np.array(y_right, dtype=np.float64)
X_right = pd.DataFrame(X_right)
X_right = X_right.fillna(0)
y_right = pd.DataFrame(y_right)
y_right = y_right.fillna(0)
reg = LinearRegression()
leftModel = reg.fit(X_left,y_left)
rightModel = reg.fit(X_right, y_right)
y_predLeft = reg.predict(X_left)
y_predRight = reg.predict(X_right)
lossLeft = mean_absolute_error(y_left, y_predLeft)
lossRight = mean_absolute_error(y_right, y_predRight)
meanSquareLeft = mean_squared_error(y_left, y_predLeft)
meanSqaureRight = mean_squared_error(y_right, y_predRight)
linearReg = (N_left*lossLeft + N_right*lossRight) / N
meanSquare = (N_left*meanSquareLeft + N_right*meanSqaureRight) / N
linearReg = linearReg[0]
meanSquare = np.sqrt(meanSquare[0])
if(linearReg < linearRegBest):
linearRegBest = linearReg
meanSquareBest = meanSquare
print(leftModel)
model = [leftModel, rightModel]
print(model)
return linearReg, meanSquareBest
modelTree, meanSquare = ModelTree();
print(modelTree," ",meanSquare)
print ("Time taken to build the model: ",datetime.now() - startTime)
node_indicator = regressionTree.decision_path(X_test)
leave_id = regressionTree.apply(X_test)
sample_id = 0
node_index = node_indicator.indices[node_indicator.indptr[sample_id]:
node_indicator.indptr[sample_id + 1]]
for i in range(n_nodes):
if is_leaves[i]:
print("%snode=%s leaf node." % (node_depth[i] * "\t", i))
else:
print("%snode=%s test node: go to node %s if X[:, %s] <= %s else to "
"node %s."
% (node_depth[i] * "\t",
i,
children_left[i],
feature[i],
threshold[i],
children_right[i],
))
print()
#6 - Decision Tree on Guiding Question 3
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import scale
from sklearn.model_selection import KFold
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
from datetime import datetime
startTime = datetime.now()
fields = ["age","marital status","income","capital losses"]
data=pd.read_csv('D:\\WPI\\DataSets\\census-income.csv',skipinitialspace=True,usecols=fields)
df = pd.DataFrame(data)
df=df.loc[df["income"].isin(["-50000"])]
print(df)
target = df['age']
df = df.drop(['age'], axis=1)
final_val = df
print(final_val)
target=scale(target)
target = pd.DataFrame(target)
cat_values = final_val[['marital status','income']]
cat_values_enc=pd.get_dummies(cat_values)
final_val.drop(['marital status','income'],axis=1,inplace=True)
categorical_variable_encoded=pd.concat([final_val,cat_values_enc],axis=1,sort=False)
categorical_variable_encoded=scale(categorical_variable_encoded)
categorical_variable_encoded = pd.DataFrame(categorical_variable_encoded)
print(categorical_variable_encoded)
kf = KFold(n_splits=10)
meanList = []
coeff = []
meanSquareList = []
for train_index, test_index in kf.split(categorical_variable_encoded):
print("TRAIN:", train_index, "TEST:", test_index)
X_train, X_test = categorical_variable_encoded.iloc[train_index], categorical_variable_encoded.iloc[test_index]
y_train, y_test = target.iloc[train_index], target.iloc[test_index]
regressionTree = DecisionTreeRegressor(random_state=1, min_samples_leaf=10)
regressionTree.fit(X_train, y_train)
y_pred = regressionTree.predict(X_test)
mean = mean_absolute_error(y_test,y_pred)
meanSquare = mean_squared_error(y_test, y_pred)
rScore = r2_score(y_test, y_pred)
print("Mean for each iteration: ", mean)
print("Coefficient for each iteration: ",rScore)
meanList.append(mean)
coeff.append(rScore)
meanSquareList.append(np.sqrt(meanSquare))
total=0
totalRScore=0
totalMeanSquare=0
for i in range(len(meanList)):
total+= meanList[i]
totalRScore+=coeff[i]
totalMeanSquare+=meanSquareList[i]
print("Average Mean: ", total/10)
print("Average Coefficient: ",totalRScore/10)
print("Average Root Mean Square Error: ",totalMeanSquare/10)
print(regressionTree.get_depth())
print (datetime.now() - startTime)
|
def main():
a = []
b = []
n = int(input("Informe a quantidade de elementos do vetor: "))
for i in range(n):
elemento = input(f"Qual o valor do {i} elemento: ")
a.append(elemento)
b = a[::-1]
print(f"Vetor A = {a}")
print(f"Vetor B = {b}")
main() |
# Generated by Django 2.2.7 on 2019-11-29 22:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sightings', '0004_auto_20191129_2200'),
]
operations = [
migrations.AlterField(
model_name='squirrel',
name='unique_squirrel_id',
field=models.CharField(help_text='Squirrel ID', max_length=100, primary_key=True, serialize=False),
),
]
|
import tensorflow as tf
from utils import normal_initializer, zero_initializer
from layers import ConvLayer, ConvPoolLayer, DeconvLayer
import numpy as np
flags = tf.app.flags
FLAGS = flags.FLAGS
class AutoEncoder(object):
def __init__(self):
# placeholder for storing rotated input images
self.input_rotated_images = tf.placeholder(dtype=tf.float32,
shape=(None, FLAGS.height, FLAGS.width, FLAGS.num_channel))
# placeholder for storing original images without rotation
self.input_original_images = tf.placeholder(dtype=tf.float32,
shape=(None, FLAGS.height, FLAGS.width, FLAGS.num_channel))
# self.output_images: images predicted by model
# self.code_layer: latent code produced in the middle of network
# self.reconstruct: images reconstructed by model
self.code_layer, self.reconstruct, self.output_images = self.build()
self.loss = self._loss()
self.opt = self.optimization()
def optimization(self):
optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
return optimizer.minimize(self.loss)
def encoder(self, inputs):
# convolutional layer
conv1 = ConvLayer(input_filters=tf.cast(inputs.shape[3], tf.int32), output_filters=8, act=tf.nn.relu,
kernel_size=3, kernel_stride=1, kernel_padding="SAME")
conv1_act = conv1.__call__(inputs)
print(conv1_act.shape)
# convolutional and pooling layer
conv_pool1 = ConvPoolLayer(input_filters=8, output_filters=8, act=tf.nn.relu,
kernel_size=3, kernel_stride=1, kernel_padding="SAME",
pool_size=3, pool_stride=2, pool_padding="SAME")
conv_pool1_act = conv_pool1.__call__(conv1_act)
print(conv_pool1_act.shape)
# convolutional layer
conv2 = ConvLayer(input_filters=8, output_filters=16, act=tf.nn.relu,
kernel_size=3, kernel_stride=1, kernel_padding="SAME")
conv2_act = conv2.__call__(conv_pool1_act)
print(conv2_act.shape)
# convolutional and pooling layer
conv_pool2 = ConvPoolLayer(input_filters=16, output_filters=16, act=tf.nn.relu,
kernel_size=3, kernel_stride=1, kernel_padding="SAME",
pool_size=3, pool_stride=2, pool_padding="SAME")
conv_pool2_act = conv_pool2.__call__(conv2_act)
print(conv_pool2_act.shape)
conv3 = ConvLayer(input_filters=16, output_filters=32, act=tf.nn.relu,
kernel_size=3, kernel_stride=1, kernel_padding="SAME")
conv3_act = conv3.__call__(conv_pool2_act)
print(conv3_act.shape)
conv_pool3 = ConvPoolLayer(input_filters=32, output_filters=32, act=tf.nn.relu,
kernel_size=3, kernel_stride=1, kernel_padding="SAME",
pool_size=3, pool_stride=2, pool_padding="SAME")
conv_pool3_act = conv_pool3.__call__(conv3_act)
print(conv_pool3_act.shape)
last_conv_dims = conv_pool3_act.shape[1:]
# make output of pooling flatten
flatten = tf.reshape(conv_pool3_act, [-1,last_conv_dims[0]*last_conv_dims[1]*last_conv_dims[2]])
print(flatten.shape)
weights_encoder = normal_initializer((tf.cast(flatten.shape[1], tf.int32), FLAGS.code_size))
bias_encoder = zero_initializer((FLAGS.code_size))
# apply fully connected layer
dense = tf.matmul(flatten, weights_encoder) + bias_encoder
print(dense.shape)
return dense, last_conv_dims
def decoder(self, inputs, last_conv_dims):
# apply fully connected layer
weights_decoder = normal_initializer((FLAGS.code_size,
tf.cast(last_conv_dims[0]*last_conv_dims[1]*last_conv_dims[2], tf.int32)))
bias_decoder = zero_initializer((tf.cast(last_conv_dims[0]*last_conv_dims[1]*last_conv_dims[2], tf.int32)))
decode_layer = tf.nn.relu(tf.matmul(inputs, weights_decoder) + bias_decoder)
print(decode_layer.shape)
# reshape to send as input to transposed convolutional layer
deconv_input = tf.reshape(decode_layer, (-1,last_conv_dims[0],last_conv_dims[1],last_conv_dims[2]))
print(deconv_input.shape)
# transpose convolutional layer
deconv1 = DeconvLayer (input_filters=tf.cast(deconv_input.shape[3], tf.int32), output_filters=16, act=tf.nn.relu,
kernel_size=3, kernel_stride=2, kernel_padding="SAME")
deconv1_act = deconv1.__call__(deconv_input)
print(deconv1_act.shape)
# transpose convolutional layer
deconv2 = DeconvLayer (input_filters=16, output_filters=8, act=tf.nn.relu,
kernel_size=3, kernel_stride=2, kernel_padding="SAME")
deconv2_act = deconv2.__call__(deconv1_act)
print(deconv2_act.shape)
# transpose convolutional layer
deconv3 = DeconvLayer (input_filters=8, output_filters=1, act=None,
kernel_size=3, kernel_stride=2, kernel_padding="SAME")
deconv3_act = deconv3.__call__(deconv2_act)
print(deconv3_act.shape)
return deconv3_act
def _loss(self):
flatten_output = tf.reshape(self.reconstruct,
[-1,self.reconstruct.shape[1]*self.reconstruct.shape[2]*self.reconstruct.shape[3]])
flatten_input = tf.reshape(self.input_original_images,[-1,
self.input_original_images.shape[1]*
self.input_original_images.shape[2]*
self.input_original_images.shape[3]])
mean_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=flatten_output, labels=flatten_input))
return mean_loss
def build(self):
# evaluate encoding of images by self.encoder
code_layer, last_conv_dims = self.encoder(self.input_rotated_images)
# evaluate reconstructed images by self.decoder
reconstruct = self.decoder(code_layer, last_conv_dims)
# apply tf.nn.sigmoid to change pixel range to [0, 1]
output_images = tf.nn.sigmoid(reconstruct)
return code_layer, reconstruct, output_images
|
# -*- coding: utf8 - *-
"""Exceptions for tmuxp.
tmuxp.exc
~~~~~~~~~
:copyright: Copyright 2013 Tony Narlock.
:license: BSD, see LICENSE for details
"""
class TmuxSessionExists(Exception):
"""Session does not exist in the server."""
pass
class ConfigError(Exception):
"""Error parsing tmuxp configuration dict."""
pass
class EmptyConfigException(ConfigError):
"""Configuration is empty."""
pass
|
# -*- coding: utf-8 -*-
import json
from cgbeacon2.constants import (
BUILD_MISMATCH,
INVALID_COORDINATES,
NO_MANDATORY_PARAMS,
NO_POSITION_PARAMS,
NO_SECONDARY_PARAMS,
UNKNOWN_DATASETS,
)
HEADERS = {"Content-type": "application/json", "Accept": "application/json"}
BASE_ARGS = "query?assemblyId=GRCh37&referenceName=1&referenceBases=TA"
API_V1 = "/apiv1.0/"
################## TESTS FOR HANDLING WRONG REQUESTS ################
def test_post_empty_query(mock_app):
"""Test receiving an empty POST query"""
# When a POST request is missing data
response = mock_app.test_client().post("".join([API_V1, "query?"]), headers=HEADERS)
# Then it should return error
assert response.status_code == 400
def test_query_get_request_missing_mandatory_params(mock_app):
"""Test the query endpoint by sending a request without mandatory params:
referenceName, referenceBases, assemblyId
"""
# When a request missing one or more required params is sent to the server
response = mock_app.test_client().get("".join([API_V1, "query?"]), headers=HEADERS)
# Then it should return error
assert response.status_code == 400
data = json.loads(response.data)
assert data["error"] == NO_MANDATORY_PARAMS
assert data["exists"] is None
assert data["beaconId"]
assert data["apiVersion"] == "v1.0.1"
def test_query_get_request_unknown_datasets(mock_app):
"""Test the query endpoint with a qquery containing unknown datasets"""
# GIVEN a database with no datasets
database = mock_app.db
assert database["dataset"].find_one() is None
# WHEN a request contain a specific dataset ID
ds_param = "datasetIds=foo"
query_string = "&".join([BASE_ARGS, ds_param])
response = mock_app.test_client().get("".join([API_V1, query_string]), headers=HEADERS)
# THEN it should return the expected type of error
assert response.status_code == 400
data = json.loads(response.data)
assert data["error"] == UNKNOWN_DATASETS
def test_query_get_request_build_mismatch(mock_app, public_dataset):
"""Test the query endpoint by sending a request with build mismatch between queried datasets and genome build"""
# Having a dataset with genome build GRCh38 in the database:
database = mock_app.db
public_dataset["assembly_id"] = "GRCh38"
database["dataset"].insert_one(public_dataset)
# When a request with genome build GRCh37 and detasetIds with genome build GRCh38 is sent to the server:
query_string = "&".join([BASE_ARGS, f"datasetIds={public_dataset['_id']}"])
response = mock_app.test_client().get("".join([API_V1, query_string]), headers=HEADERS)
# Then it should return error
assert response.status_code == 400
data = json.loads(response.data)
assert data["error"] == BUILD_MISMATCH
def test_query_get_request_missing_secondary_params(mock_app):
"""Test the query endpoint by sending a request without secondary params:
alternateBases, variantType
"""
# When a request missing alternateBases or variantType params is sent to the server
query_string = BASE_ARGS
response = mock_app.test_client().get("".join([API_V1, query_string]), headers=HEADERS)
# Then it should return error
assert response.status_code == 400
data = json.loads(response.data)
assert data["error"] == NO_SECONDARY_PARAMS
def test_query_get_request_non_numerical_sv_coordinates(mock_app):
"""Test the query endpoint by sending a request with non-numerical start position"""
query_string = "&".join([BASE_ARGS, "start=FOO&end=70600&variantType=DUP"])
# When a request has a non-numerical start or stop position
response = mock_app.test_client().get("".join([API_V1, query_string]), headers=HEADERS)
data = json.loads(response.data)
# Then it should return error
assert response.status_code == 400
assert data["error"] == INVALID_COORDINATES
def test_query_get_request_missing_positions_params(mock_app):
"""Test the query endpoint by sending a request missing coordinate params:
Either start or any range coordinate
"""
# When a request missing start position and all the 4 range position coordinates (startMin, startMax, endMin, endMax)
query_string = "&".join([BASE_ARGS, "alternateBases=T"])
response = mock_app.test_client().get("".join([API_V1, query_string]), headers=HEADERS)
data = json.loads(response.data)
# Then it should return error
assert response.status_code == 400
assert data["error"] == NO_POSITION_PARAMS
def test_query_get_request_non_numerical_range_coordinates(mock_app):
"""Test the query endpoint by sending a request with non-numerical range coordinates"""
range_coords = "&variantType=DUP&startMin=2&startMax=3&endMin=6&endMax=FOO"
query_string = "&".join([BASE_ARGS, range_coords])
# When a request for range coordinates doesn't contain integers
response = mock_app.test_client().get("".join([API_V1, query_string]), headers=HEADERS)
data = json.loads(response.data)
# Then it should return error
assert response.status_code == 400
assert data["error"] == INVALID_COORDINATES
|
from django.contrib import admin
from game.models import Territory, Lobby, Session, TerritorySession, UserProfile
admin.site.register(Territory)
admin.site.register(Lobby)
admin.site.register(Session)
admin.site.register(TerritorySession)
admin.site.register(UserProfile)
|
# -*- coding:utf-8 -*-
__author__ = 'gusevsergey'
from pytils import dt
from coffin import template
register = template.Library()
@register.filter()
def date_inflected(d, date_format):
return dt.ru_strftime(unicode(date_format), d, inflected=True)
|
# Minimize Cost
a = [int(x) for x in input().split()]
if a[0] < 1 or a[1] < 1 or a[0] > 10e5 or a[1] > 10e5:
sys.exit()
b = [int(x) for x in input().split()]
for i in range(0, len(b)):
if b[i] > 10e9 or b[i] < -10e9:
sys.exit()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.