text stringlengths 38 1.54M |
|---|
# -*- coding:UTF-8 -*-
"""
Instagram批量关注
https://www.instagram.com/
@author: hikaru
email: hikaru870806@hotmail.com
如有问题或建议请联系
"""
import time
from common import *
from project.instagram import instagram
IS_FOLLOW_PRIVATE_ACCOUNT = False # 是否对私密账号发出关注请求
# 获取账号首页
def get_account_index_page(account_name):
account_index_url = "https://www.instagram.com/%s" % account_name
account_index_response = net.http_request(account_index_url, method="GET", cookies_list=instagram.COOKIE_INFO)
result = {
"is_follow": False, # 是否已经关注
"is_private": False, # 是否私密账号
"account_id": None, # 账号id
}
if account_index_response.status == net.HTTP_RETURN_CODE_SUCCEED:
# 获取账号id
account_id = tool.find_sub_string(account_index_response.data, '"profilePage_', '"')
if not crawler.is_integer(account_id):
raise crawler.CrawlerException("页面截取账号id失败\n%s" % account_index_response.data)
result["account_id"] = account_id
# 判断是不是已经关注
result["is_follow"] = tool.find_sub_string(account_index_response.data, '"followed_by_viewer": ', ",") == "true"
# 判断是不是私密账号
result["is_private"] = tool.find_sub_string(account_index_response.data, '"is_private": ', ",") == "true"
elif account_index_response.status == 404:
raise crawler.CrawlerException("账号不存在")
else:
raise crawler.CrawlerException(crawler.request_failre(account_index_response.status))
return result
# 关注指定账号
def follow_account(account_name, account_id):
follow_api_url = "https://www.instagram.com/web/friendships/%s/follow/" % account_id
header_list = {"Referer": "https://www.instagram.com/", "x-csrftoken": instagram.COOKIE_INFO["csrftoken"], "X-Instagram-AJAX": 1}
follow_response = net.http_request(follow_api_url, method="POST", header_list=header_list, cookies_list=instagram.COOKIE_INFO, json_decode=True)
if follow_response.status == net.HTTP_RETURN_CODE_SUCCEED:
follow_result = crawler.get_json_value(follow_response.json_data, "result", default_value="", type_check=str)
if follow_result == "following":
output.print_msg("关注%s成功" % account_name)
return True
elif follow_result == "requested":
output.print_msg("私密账号%s,已发送关注请求" % account_name)
return True
elif not follow_result:
output.print_msg("关注%s失败,返回内容不匹配\n%s" % (account_name, follow_response.json_data))
tool.process_exit()
else:
return False
elif follow_response.status == 403 and follow_response.data == "Please wait a few minutes before you try again.":
output.print_msg(crawler.CrawlerException("关注%s失败,连续关注太多等待一会儿继续尝试" % account_name))
tool.process_exit()
else:
output.print_msg(crawler.CrawlerException("关注%s失败,请求返回结果:%s" % (account_name, crawler.request_failre(follow_response.status))))
tool.process_exit()
def main():
# 初始化类
instagram_obj = instagram.Instagram()
count = 0
for account_name in sorted(instagram_obj.account_list.keys()):
try:
account_index_response = get_account_index_page(account_name)
except crawler.CrawlerException as e:
log.error(account_name + " 首页解析失败,原因:%s" % e.message)
continue
if account_index_response["is_follow"]:
output.print_msg("%s已经关注,跳过" % account_name)
elif account_index_response["is_private"] and not IS_FOLLOW_PRIVATE_ACCOUNT:
output.print_msg("%s是私密账号,跳过" % account_name)
else:
if follow_account(account_name, account_index_response["account_id"]):
count += 1
time.sleep(0.1)
output.print_msg("关注完成,成功关注了%s个账号" % count)
if __name__ == "__main__":
main()
|
from setuptools import setup, find_packages
from codecs import open
import os
here = os.path.abspath(os.path.dirname(__file__))
def read_text(fname):
if os.path.isfile(fname):
with open(os.path.join(here, fname)) as f:
return f.read()
else:
print("warning: file {} does not exist".format(fname))
return ""
setup(
name="jsonkv", # Required
version="1.1.10", # Required
description="Use JSON file as KV store easily",
long_description=read_text("README.md"), # Optional
long_description_content_type="text/markdown", # Optional (see note above)
url="https://github.com/weaming/jsonkv", # Optional
author="weaming", # Optional
author_email="garden.yuen@gmail.com", # Optional
packages=find_packages(),
# install_requires=[
# l
# for l in read_text("requirements.txt").split("\n")
# if l.strip() and not l.strip().startswith("#")
# ],
classifiers=[ # Optional
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
"Development Status :: 4 - Beta",
# Indicate who your project is intended for
"Intended Audience :: Developers",
"Topic :: Software Development :: Build Tools",
# Pick your license as you wish
"License :: OSI Approved :: MIT License",
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
],
keywords="json kv store", # Optional
)
|
# Generated by Django 3.0.7 on 2020-06-30 00:01
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('restaurants', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Menu',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='criado em')),
('modified', models.DateTimeField(auto_now=True, verbose_name='modificado em')),
('active', models.BooleanField(default=True, verbose_name='ativo')),
('name', models.CharField(max_length=100, verbose_name='Nome')),
('description', models.TextField(blank=True, verbose_name='Descrição')),
('slug', models.SlugField(blank=True, help_text='Preenchido automaticamente, não editar', max_length=255, null=True, verbose_name='slug')),
('restaurant', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='restaurants.Restaurant')),
],
options={
'abstract': False,
},
),
]
|
from Inference import Inference
def main():
'''main program'''
Inference.promptQuestion()
Inference.answerQuestion()
main()
|
import tweepy
import psycopg2
import psycopg2.extensions
from psycopg2.extensions import AsIs
import psycopg2.extras
import pickle
import string
import pandas as pd
import json
data = pickle.load( open( "twitter_data_Trump_SINCE.p", "rb" ))
data_dict = (data["statuses"])
# Connecting to database
try:
conn = psycopg2.connect("dbname='wrangleDB' user='postgres' host='localhost' password='password'")
print "postgresql database wrangleDB has been opened and a connection exists"
except:
print "I am unable to connect to the database"
# Open a cursor to perform database operations
cur = conn.cursor()
cur.execute("CREATE TABLE testJDUMP(Id INTEGER PRIMARY KEY, tweet TEXT)")
# Insert Data into Table
query = "INSERT INTO testJDUMP (Id, tweet) VALUES (%s, %s)"
serial_count = 0
for t in data_dict:
t = json.dumps(t)
data_tuple = (serial_count, t)
cur.execute(query, data_tuple)
serial_count += 1
conn.commit()
|
#!/usr/bin/env python3
import socket
import postgres
import csv
import os
import time
import sys
import collections
import multiprocessing as mp
from datetime import datetime
from psycopg2 import OperationalError
from psycopg2.extras import execute_values
def write_to_csv(wr_buff, can_bus):
# Write to CSV from write buffer. First item is timestamp, second is
# can_id and third is can_data
with open(f'/data/log/{can_bus}.csv', mode='a') as logfd:
logcsv = csv.writer(logfd, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL)
logcsv.writerows(wr_buff)
print(f'{can_bus}: Wrote successfully to CSV')
def write_to_db(db, wr_buff, can_bus):
try:
with db.get_cursor() as cursor:
execute_values(cursor,
"INSERT INTO can(time, can_interface, can_id, can_data) \
VALUES %s;", wr_buff)
except(Exception, SyntaxError):
print(f'{can_bus}: An error occured while inserting data to database')
else:
print(f'{can_bus}: Wrote successfully to DB')
def db_init():
connection_url = ('postgresql://' + os.environ['db_user'] + ':' +
os.environ['db_password'] + '@postgres:' +
os.environ['db_port'] + '/' + os.environ['db_database'])
print('Initializing Postgres Object...')
db = postgres.Postgres(url=connection_url)
print('Ensuring timescaledb ext. is enabled')
db.run("CREATE EXTENSION IF NOT EXISTS timescaledb;")
print("Ensuring tables are setup properly")
db.run("""
CREATE TABLE IF NOT EXISTS can (
time timestamptz NOT NULL,
can_interface text NOT NULL,
can_id text NOT NULL,
can_data text NOT NULL);""")
print("Ensuring can data table is a timescaledb hypertable")
db.run("""
SELECT create_hypertable('can', 'time', if_not_exists => TRUE,
migrate_data => TRUE);""")
print("Finished setting up tables")
return db
# This detection function was taken from can_watchdog. Author: Aaron Neustedter
def detect_can_interfaces():
can_interfaces = []
print('Gathering all can interfaces')
sysclass = '/mnt/host/sys/class/net/'
# Iterate through all links listed in /sys/class/net
for network in os.listdir(sysclass):
# This file defines the type of the network
path = sysclass + network + '/type'
print(f'Checking network {network}, type at path {path}')
# Sometimes things are not setup like we expect. Live and let live
if not os.path.isfile(path):
print(f'{network} does not have a type file. Skipping')
continue
# Open the file and read it
with open(path) as typefile:
networktype = typefile.read().strip()
# 280 is the type for CAN. 'Documentation' here:
# https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/if_arp.h#L56
if networktype.isdigit() and int(networktype) == 280:
print('\t', network, ' appears to be a CAN link')
can_interfaces.append(network)
if len(can_interfaces) <= 0:
print('FATAL: No CAN interfaces found')
sys.exit(-1)
print(len(can_interfaces), ' found: ', can_interfaces)
return can_interfaces
def log_can(can_interface):
print(f'Logging {can_interface}')
frame = ''
rx_buff = []
wr_buff = []
socket_connected = False
# Python deque to store the last 3 received elements from the socket
buff = collections.deque(maxlen=3)
# Initialize socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Connect to socketcand
while(socket_connected is False):
try:
s.connect((host_ip, int(host_port)))
except(ConnectionRefusedError):
print('Could not connect to socketcand. Connection Refused. \
Retrying...')
time.sleep(10)
socket_connected = False
else:
print('Successfully connected to socketcand at',
f'{host_ip}: {host_port}')
socket_connected = True
sys.stdout.flush()
# Receive socketcand's response. After each command, socketcand replies
# < ok > if the command was successful. Each reply must be received before
# sending new commands, else, socketcand won't receive new commands. For
# more details about socketcand's protocol, please refer to:
# https://github.com/linux-can/socketcand/blob/master/doc/protocol.md
s.recv(32)
# Connect to exposed CAN interface and receive socketcand's respone.
s.sendall(b'< open ' + can_interface.encode('utf-8') + b' >')
s.recv(32)
# Set socket to 'rawmode' to receive every frame on the bus.
s.sendall(b'< rawmode >')
s.recv(32)
# Receive data in a 54-byte long socket buffer. Data may come split and
# incomplete after each iteration, so data received from the socket
# buffer is copied to a circular buffer (buff). This circular buffer
# stores up to three messages to ensure a complete frame can be obtained.
# After filling this buffer, information is converted to a list and stored
# in a "frame buffer". This buffer contains data received from the last
# three iterations. After filling the frame_buffer, a complete frame is
# obtained by concatenating the second and third elements of the frame buffer.
# Then the resulting bytes element is encoded to a UTF-8 string and its data
# obtained using string manipulation. New frames always start with "<", so
# the string is split after each occurence of this character. Afterwards,
# the second element of the resulting list will contain the full data.
# Finally, some characters are stripped to clean up the received frame
# and then split the resulting string to get the timestamp, CAN ID
# and CAN frame.
while(True):
sys.stdout.flush()
# Buffer to store raw bytes received from the socket.
socket_buff = s.recv(54)
buff.append(socket_buff)
# List representation of buff
frame_buff = list(buff)
if(len(frame_buff) > 2):
# Decoded and assembled version of frame_buff in string format
frame = frame_buff[1] + frame_buff[2]
frame = frame.decode("utf-8").split("<")
frame = frame[1].strip('>').split(' ')
try:
(timestamp, can_bus, can_id, can_data) = (frame[3], can_interface,
frame[2], frame[4])
except(IndexError):
print(f'Error logging CAN frame at {can_interface}. Skipping...')
else:
timestamp = datetime.fromtimestamp(float(timestamp)).isoformat()
rx_buff.append((timestamp, can_bus, can_id, can_data))
# When the receive buffer reaches 1000 entries, copy data from receive
# buffer to write buffer, then write to database from write buffer and
# clear receive buffer to continue receiving data
if(len(rx_buff) >= 1000):
wr_buff.clear()
wr_buff = rx_buff.copy()
rx_buff.clear()
if(logtodb):
p_db = mp.Process(target=write_to_db, args=(db, wr_buff,
can_bus,))
p_db.start()
if(logtocsv):
p_csv = mp.Process(target=write_to_csv, args=(wr_buff,
can_bus,))
p_csv.start()
# Get host info using environment variables
host_ip = os.environ['socketcand_ip']
host_port = os.environ['socketcand_port']
host_interfaces = os.environ['can_interface']
logging = os.environ['log']
# Split host_interfaces string into a list of strings.
host_interfaces = host_interfaces.split(',')
# Initialize variables
can_interfaces = []
logtodb = False
logtocsv = False
socket_connected = False
db_started = False
# Check log selection from env. variables
if (logging.find('db') != -1):
logtodb = True
if (logging.find('csv') != -1):
logtocsv = True
# Detect available CAN interfaces
avail_interfaces = detect_can_interfaces()
print("Detected interfaces: " + str(avail_interfaces))
# Check selected CAN interfaces in env variable are available.
for i in host_interfaces:
if i in avail_interfaces:
can_interfaces.append(i)
else:
print(f'Interface {i} is not valid or is not currently available')
# Initialize postgres database if database logging is enabled. The database
# sometimes is not ready to accept connections. In that case, report the issue
# and wait 10 seconds to try again. Keep trying until a successful connection
# can be made.
if (logtodb):
while(db_started is False):
try:
db = db_init()
except(OperationalError):
print('Error: Database system has not been started up',
'or is starting up. Waiting...')
time.sleep(10)
db_started = False
else:
db_started = True
for can_bus in can_interfaces:
print('Creating process for', can_bus)
mp.Process(target=log_can, args=(can_bus,)).start()
|
from rest_framework.serializers import ModelSerializer
from manager.models import Comment, LikeCommentUser, Book
class CommentSerializer(ModelSerializer):
class Meta:
model = Comment
fields = "__all__"
class LikeCommentUserSerializer(ModelSerializer):
class Meta:
model = LikeCommentUser
fields = '__all__'
class BookSerializer(ModelSerializer):
class Meta:
model = Book
fields = ['title', 'text']
|
from sklearn import preprocessing
from sklearn.neural_network import MLPClassifier
from model_nnlm import *
import Write_xls
import pickle
import data_build
#准备训练数据x和y
with open('./data/label_transfer_dict.pkl', 'rb') as f:
dict = pickle.load(f)
print('ceshi', dict['肺经蕴热'])
# y_keras = np_utils.to_categorical(y,num_classes=40)
# print('keras',y_keras)
# print('标签个数',le.classes_,)
# print('标准化',le.transform(["肺经蕴热"]))
# print(y)
clf = MLPClassifier()
x = []
mlb1 = preprocessing.MultiLabelBinarizer()
mlb2 = preprocessing.MultiLabelBinarizer()
mlb3 = preprocessing.MultiLabelBinarizer()
mlb4 = preprocessing.MultiLabelBinarizer()
mlb5 = preprocessing.MultiLabelBinarizer()
mlb1.fit(['心肝脾肺肾胆胃'])
mlb2.fit(['气','血','湿','痰','泛','水','瘀'])
mlb3.fit(['阴阳表里虚实寒热'])
mlb4.fit(['卫','气','血'])
mlb5.fit(['上','中','下'])
def x_to_vector(x):
#x 是一个一维的列表 列表中分为五个元素
label1 = x[0]
if label1 != label1:
label1 = ''
x_temp = mlb1.transform([label1])
x_temp = np.reshape(x_temp, x_temp.size)
x1 = x_temp
label2 = x[1]
if label2 != label2:
label2 = ''
x_temp = mlb2.transform([label2])
x_temp = np.reshape(x_temp, x_temp.size)
x1 = np.append(x1, x_temp)
label3 = x[2]
if label3 != label3:
label3 = ''
x_temp = mlb3.transform([label3])
x_temp = np.reshape(x_temp, x_temp.size)
x1 = np.append(x1, x_temp)
label4 = x[3]
if label4 != label4:
label4 = ''
x_temp = mlb4.transform([label4])
x_temp = np.reshape(x_temp, x_temp.size)
x1 = np.append(x1, x_temp)
label5 = x[4]
if label5 != label5:
label5 = ''
x_temp = mlb5.transform([label5])
x_temp = np.reshape(x_temp, x_temp.size)
x1 = np.append(x1, x_temp)
return x1
#将辩证分词保存成词典
dict_qingxi ={}
for k in dict.keys():
x_train_temp = []
for j in range(3, 8):
if dict[k][j] !=dict[k][j]:
x_train_temp.append('')
else:
x_train_temp.append(dict[k][j])
dict_qingxi[k] = x_train_temp
str_dir = './y_str1'
with open(str_dir, 'rb') as f:
y_str1 = pickle.load(f)
y_str2 = pickle.load(f)
y_str3 = pickle.load(f)
y_str4 = pickle.load(f)
y_str5 = pickle.load(f)
y_bianzheng = []
for i in range(len(y_str1)):
y_bianzheng_temp = []
y_bianzheng_temp.append(y_str1[i])
y_bianzheng_temp.append(y_str2[i])
y_bianzheng_temp.append(y_str3[i])
y_bianzheng_temp.append(y_str4[i])
y_bianzheng_temp.append(y_str5[i])
y_bianzheng.append(y_bianzheng_temp)
# print('bianzheng',y_bianzheng)
save_Test_label_dir = './Test_Label'
with open(save_Test_label_dir, 'rb') as f:
y_label = pickle.load(f)
text,labels = data_build.data_build_label('./data/bingli_exp_result/test')
Not_match_list = []
Not_match_text = []
Not_match_label = []
for i in range(len(y_label)):
Not_match_list_temp = []
Leibie = y_label[i]
for j in range(5):
str_temp = set(y_bianzheng[i][j])
if(str_temp !=set(dict_qingxi[Leibie][j])):
Not_match_list_temp.append(Leibie)
Not_match_list_temp.append(y_bianzheng[i])
Not_match_list_temp.append(dict_qingxi[Leibie])
Not_match_list.append(Not_match_list_temp)
Not_match_text.append(text[i])
Not_match_label.append(labels[i])
break
Write_xls.list_to_xls4(Not_match_list,Not_match_text,Not_match_label,"不匹配结果_1.xls")
|
import numpy as np
ytrainn = np.load('data/ytrainn.npy')
ytestn = np.load('data/ytestn.npy')
ydevn = np.load('data/ydevn.npy')
trainwords=np.load('data/trainwords.npy')
testwords=np.load('data/testwords.npy')
devwords = np.load('data/devwords.npy')
Xtrain=np.load('data/Xtrain.npy')
Xtest=np.load('data/Xtest.npy')
Xdev = np.load('data/Xdev.npy')
filepath='experiments/cnn_noembed/crf/08'
testfile =filepath +'/predict_test.npy'
devfile =filepath+'/predict_dev.npy'
trainfile =filepath+'/predict_train.npy'
test= np.load(testfile)
dev= np.load(devfile)
train=np.load(trainfile)
def pretty_print(data,t,p,text):
for i in range(t.shape[0]):
print('==============================')
print('data',data[i])
print('target ',t[i])
print('predicted',p[i])
try:
print('text: ',text[i])
except Exception:
pass
print()
if __name__ == '__main__':
import sys
sys.stdout = open(filepath+ '/eval_text.txt','w+')
print('train dataset ==========================================')
pretty_print(Xtrain,ytrainn,train,trainwords)
print('test dataset ===========================================')
pretty_print(Xtest,ytestn,test,testwords)
print('dev dataset ===========================================')
pretty_print(Xdev,ydevn,dev,devwords)
|
#A suduko solver with Backtracking
#Main Sudoku Solving Class
class Sudoku_Solver:
def __init__ (self,sudoku):
self.grid = [[0 for x in range(9) for y in range(9)]]
self.grid = sudoku
self.curr = [0,0]
#finds empty cell in the sudoku
def EmptyFinder(self):
for row in range (9):
for col in range(9):
if (self.grid[row][col]==0):
self.curr = [row,col]
return True
return False
def inRow (self, row, num):
for j in range(9):
if(self.grid[row][j] == num):
return True
return False
def inCol (self, col, num):
for i in range(9):
if self.grid[i][col] == num:
return True
return False
def inBox (self, row, col, num):
r = row - row%3
c = col - col%3
for i in range(3):
for j in range(3):
if (self.grid[i+r][j+c]==num):
return True
return False
#checks if it safe to put a given number on a given cell
def isSafe(self,row,col,num):
return not self.inRow( row, num) and not self.inCol(col, num) and not self.inBox (row, col, num)
def solveSudoku (self):
self.curr = [0,0]
if (not self.EmptyFinder()): #checks if any empty cell exsists
return True
row = self.curr[0]
col = self.curr[1]
for num in range(1,10):
if self.isSafe(row,col,num): #checks if it is safe to enter a number to the given box
self.grid[row][col]=num
if self.solveSudoku(): #continue forward with the given number.
return True
self.grid[row][col] = 0 #reset the value of a given cell if it isn't safe to place it there
# backtracking
return False
def printSudoku (self):
for i in range(9):
print (self.grid[i])
# Driver Code
if __name__ == "__main__":
sudoku = [[0 for x in range(9) for y in range(9)]]
sudoku =[[3,0,6,5,0,8,4,0,0],
[5,2,0,0,0,0,0,0,0],
[0,8,7,0,0,0,0,3,1],
[0,0,3,0,1,0,0,8,0],
[9,0,0,8,6,3,0,0,5],
[0,5,0,0,9,0,6,0,0],
[1,3,0,0,0,0,2,5,0],
[0,0,0,0,0,0,0,7,4],
[0,0,5,2,0,6,3,0,0]]
MySudoku = Sudoku_Solver(sudoku)
if(MySudoku.solveSudoku()):
MySudoku.printSudoku()
else:
print("No Solution found")
|
#aklından bir sayı tut oyunu
#aklımdan tuttuğum sayıyı bilgisayar tahmin ediyor.
import random
enKucukDeger=1
enBuyukDeger=100
tahminSayisi=1
cevap="h"
while cevap!="e":
print("ek-{} , eb-{}".format(enKucukDeger,enBuyukDeger))
bilgisayarinTahminEttigiSayi=random.randint(enKucukDeger,enBuyukDeger)
cevap=input("{} senin tuttuğun sayı mı? [e]vet / daha [b]üyük olmalı / [k]üçük olmalı: ".format(bilgisayarinTahminEttigiSayi))
if cevap=="e":
print("Oley!! {} tahminde bildim".format(tahminSayisi))
elif cevap=="b":
enKucukDeger=bilgisayarinTahminEttigiSayi+1
else:
enBuyukDeger=bilgisayarinTahminEttigiSayi-1
tahminSayisi+=1
|
from django.db import models
# Create your models here.
class Grades(models.Model):##继承models.Model模型中的字段就对应表种的属性
gname = models.CharField(max_length=20)
gdate = models.DateTimeField()
ggirlnum = models.IntegerField()
gboynum = models.IntegerField()
isDelete = models.BooleanField(default=False)
class Students(models.Model):
sname = models.CharField(max_length=20)
sgender = models.BooleanField(default=True)
sage = models.IntegerField()
scontent = models.CharField(max_length=20)
isDelete = models.BooleanField(default=False)
##g关联外键,学生要对应班级
sgrade = models.ForeignKey('Grades',on_delete=models.CASCADE)
class Teacher_1(models.Model):
term = models.CharField(max_length=30)
cla_id = models.CharField(max_length=20)
cla_Name = models.CharField(max_length=20)
gra_Name = models.CharField(max_length=20)
sub_id = models.CharField(max_length=30)
sub_Name = models.CharField(max_length=20)
bas_id = models.CharField(max_length=20)
bas_Name = models.CharField(max_length=20)
|
class Solution(object):
def genLR(self, l, r, rStr, rLst):
if l > r: return
if l == 0 and r == 0:
rLst.append(rStr)
else:
if l > 0:
self.genLR(l-1, r, rStr+'(', rLst)
if r > 0:
self.genLR(l, r-1, rStr+')', rLst)
def generateParenthesis(self, n):
"""
:type n: int
:rtype: List[str]
"""
rLst = []
self.genLR(n, n, "", rLst)
return rLst
|
from playsound import playsound
playsound('C:/Users/HIMA/Music/life_goes_on.mp3')#specify the path of the song
print('playing sound using playsound') |
# 1. Write a function make_change that accepts two argument:
# A. total_charge = amount of money owed
# B. payment = amount of money paid
# 2. Return a 2-dimensional tuple whose values represent bills and coins
# (singles, fives, tens, twentys, fifties, hundreds)
# (pennies, nickles, dimes, quarters)
# First convert dollar amount to bills
# Second convert cents to coins
bills = [1, 5, 10, 20, 50, 100]
coins = [1, 5, 10, 25]
def convert_dollars(dollars):
bill_count = []
for bill in range(len(bills) - 1, -1, -1): # -1 from bill length since computer starts at 0, goes until -1 since I want to include 0, and decrements so that we start big and get to small
count = 0
while dollars >= bills[bill]:
dollars -= bills[bill]
count += 1
bill_count.append(count)
bill_count.reverse()
bill_count = tuple(bill_count)
return bill_count
def convert_coins(cents):
coin_count = []
for coin in range(len(coins) - 1, -1, -1): # -1 from cent length since computer starts at 0, goes until -1 since I want to include 0, and decrements so that we start big and get to small
count = 0
while cents >= coins[coin]:
cents -= coins[coin]
count += 1
coin_count.append(count)
coin_count.reverse()
coin_count = tuple(coin_count)
return coin_count
def dollar_split(total):
dollars = round(total, 0)
return int(dollars)
def cent_split(total):
cents = int(round(total - int(total), 2) * 100)
print(cents)
return cents
def make_change(total_charge, payment):
change = payment - total_charge
print(change)
dollars = dollar_split(change)
print(dollars)
cents = cent_split(change)
print(cents)
total = (convert_dollars(dollars), convert_coins(cents))
return total
user_bill = float(input("Give me the bill total you want to split up. "))
user_paid = float(input("Give me the amount that you paid. "))
change = make_change(user_bill, user_paid)
print(change)
def find_dollars(dollars_tuple):
sum = 0
for i in range(len(dollars_tuple)):
sum += dollars_tuple[i] * bills[i]
return sum
def find_coins(coins_tuple):
sum = 0
for i in range(len(coins_tuple)):
sum += coins_tuple[i] * coins[i]
sum /= 100
return sum
def value_of_change(dollar_coin_tuple):
total = 0
dollars, coins = dollar_coin_tuple
print(dollars)
print(coins)
print(find_dollars(dollars))
print(find_coins(coins))
total += find_dollars(dollars) + find_coins(coins)
return total
print(value_of_change(change))
|
# Programa que lista todas las imagenes
import json
with open("imagenes.json") as data_file:
data = json.load(data_file)
print " "
print "La lista de imagenes es la siguiente: "
print " "
for a in data["results"]["bindings"]:
print a["rdfs_label"]["value"]
print " "
|
from flask import Flask, Blueprint, request, json
from views import WalletViews
from decorators import api_login_required, check_wallet_amount_status
from App.Response import Response
wallet = Blueprint('wallet', __name__, template_folder='templates')
'''
Get Wallet balance
'''
@wallet.route('/wallet', methods=['GET'])
@api_login_required
def wallet_data():
response = WalletViews().get_wallet(request)
pass
'''
route for a transaction request
'''
@wallet.route('/wallet/<wallet_id>/transactions', methods=['GET', 'POST'])
@api_login_required
@check_wallet_amount_status
def wallet_transactions(wallet_id):
if request.method == 'GET':
response = WalletViews().fetch_all_wallet_transaction(wallet_id, request)
if 'type' in request.args and request.args['type'] == 'passbook':
return Response.respondWithCollection(response, hint='Transactions')
return Response.respondWithPaginatedCollection(response, hint='Transactions')
response = WalletViews().request_transaction(wallet_id, request.json)
return Response.respondWithItem(response, statusCode=201)
'''
route for a cancellation of transaction
'''
@wallet.route('/wallet/<wallet_id>/transactions/<transaction_id>', methods=['DELETE'])
@api_login_required
def transactions_actions(wallet_id, transaction_id):
response = WalletViews().cancel_transaction(wallet_id, transaction_id)
return Response.respondWithItem(response)
|
# coding=utf-8
import json
import re
from string import Template
from meitData import shopData
import connectdb
# 店铺列表
def main():
res = json.loads(shopData)
if res.get("data"):
shopList = res.get("data").get('shopList')
get_connect(shopList)
# 连接
def get_connect(shopList):
for shop in shopList:
info = {}
restaurant = shop
info['sid'] = restaurant.get('mtWmPoiId') #id
info['name'] = restaurant.get('shopName') #名字
info['image_path'] = restaurant.get('picUrl') #图片路径
info['address'] = restaurant.get('address') #地址
info['float_delivery_fee'] = float(re.findall(r"\d+\.?\d*",restaurant.get('shippingFeeTip'))[0]) #配送费
info['order_lead_time'] = int(re.sub("\D", "", restaurant.get('deliveryTimeTip'))) #配送时长
# 距离
distance = float(re.findall(r"\d+\.?\d*",restaurant.get('distance'))[0])
isKm = restaurant.get('distance').find('km')
if isKm == -1:
info['distance'] = distance
else:
info['distance'] = distance * 1000
info['float_minimum_order_amount'] = int(re.sub("\D", "", restaurant.get('shippingFeeTip'))) #起送价
info['rating'] = restaurant.get('wmPoiScore') / 10 #评分
info['recent_order_num'] = int(re.sub("\D", "", restaurant.get('monthSalesTip'))) #月销售量
info['mt_sale_cut'] = restaurant.get('discounts2')[0].get('info') #满减
# info['opening_hours'] = restaurant.get('shipping_time')
# print(info)
# # sql操作
if isExist(info['name']):
sql = Template("update final_shop set mt_delivery_fee=${mt_delivery_fee},mt_lead_time=${mt_lead_time},mt_order_amount=${mt_order_amount},mt_rating=${mt_rating},mt_recent_order_num=${mt_recent_order_num},mt_sale_cut='${mt_sale_cut}' where name='${name}'")
sql = sql.substitute(mt_delivery_fee=info['float_delivery_fee'],mt_lead_time=info['order_lead_time'],mt_order_amount=info['float_minimum_order_amount'],mt_rating=info['rating'],mt_recent_order_num=info['recent_order_num'],mt_sale_cut=info['mt_sale_cut'],name=info['name'])
sta = connectdb.exe_update(cur,sql)
if sta == 1:
print('更新成功')
else:
print('更新失败')
else:
sta = connectdb.exe_update(cur,"insert into final_shop(mt_sid, name, image_path, address, mt_delivery_fee, mt_lead_time, distance, mt_order_amount, mt_rating, mt_recent_order_num,mt_sale_cut) values('%s','%s','%s','%s','%f','%f','%f','%f','%f','%f','%s')" % (info['sid'], info['name'], info['image_path'], info['address'], info['float_delivery_fee'], info['order_lead_time'], info['distance'], info['float_minimum_order_amount'], info['rating'], info['recent_order_num'],info['mt_sale_cut']))
if sta == 1:
print('插入成功')
else:
print('插入失败')
# 是否已经存在该店铺
def isExist(name):
sta = connectdb.exe_query(cur, "select elm_sid from final_shop where name = '"+name+"'")
if len(sta) > 0:
return True
return False
conn, cur = connectdb.conn_db()
main()
connectdb.exe_commit(cur) # 注意!! 一定要记得commit,否则操作成功了,但是并没有添加到数据库中
connectdb.conn_close(conn, cur)
|
def gen(n, C, r):
if n == 0:
return [ 1 if i == r else 0 for i in range(3) ], C[r]
A1, s1 = gen(n - 1, C, r)
A2, s2 = gen(n - 1, C, (r + 1) % 3)
return [ A1[i] + A2[i] for i in range(3) ], min(s1, s2) + max(s1, s2)
def check(n, N, C, r):
A, s = gen(n, C, r)
if A[0] == N[0] and A[1] == N[1] and A[2] == N[2]:
return s
return None
def solve(n, N, C):
ans = None
for i in range(3):
here = check(n, N, C, i)
if ans is None or (here and here < ans):
ans = here
if ans:
return ans
return 'IMPOSSIBLE'
t = int(input().strip())
for i in range(t):
C = input().strip().split()
print('Case #{}: {}'.format(i + 1, solve(int(C[0]), [int(C[3]), int(C[2]), int(C[1])], ['S', 'P', 'R'])))
|
from .base_page import BasePage
from .locators import BasketPageLocators
class BasketPage(BasePage):
# в корзине нет товаров
def not_product_in_basket(self):
assert not self.is_element_present(*BasketPageLocators.BASKET_BUTTON_BLOCK), "Product in basket"
# в корзине есть товар
def product_in_basket(self):
assert not self.is_element_present(*BasketPageLocators.BASKET_BUTTON_BLOCK), "Basket is clear!"
# есть надпись, что корзина пуста
def text_basket_is_clear(self):
assert self.is_element_present(*BasketPageLocators.BASKET_CLEAR), "Basket is not clear!"
# нет надписи, что корзина пуста
def not_text_basket_is_clear(self):
assert not self.is_element_present(*BasketPageLocators.BASKET_CLEAR), "Basket is clear!"
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 7 10:01:05 2021
@author: kutalmisince
"""
import numpy as np
import matplotlib.pyplot as plt
class Superpixel:
def __init__(self, compactness = 8.0, tiling = 'iSQUARE', exp_area = 256.0, num_req_sps = 0, spectral_cost = 'Bayesian', spatial_cost = 'Bayesian', statistics_update_rate = 3):
'''
compactness: weight of spatial distance, can be any floating number
tiling: intial tiling {'SQUARE', 'HEX', 'iSQUARE'}
exp_area: required average area of SPs (not used if num_req_sps is set)
num_req_sps: number of required SPs
spectral_cost: spectral cost function {'L2', 'Bayesian'}
spatial_cost: spatial cost function {'L2', 'Bayesian'}
'''
self.compactness = float(compactness)
self.tiling = tiling
self.exp_area = float(exp_area)
self.num_req_sps = num_req_sps
self.spectral_cost = spectral_cost
self.spatial_cost = spatial_cost
# hidden hyper-parameters
self.measurement_precision = 1.0 # avreage SP variance is bounded with measurement precision for numeric stability
self.var_min = 0.5 # variance lower bound = average variance x var_min
self.var_max = 2.0 # variance upper bound = average variance x var_min
self.cov_reg_weight = 0.2 # covariance is regularized with (1-lambda) * cov + lambda * diag(exp_area / 12)
# neighbors defining connectedness from ls bit to ms bit (big endian)
self.neighbor_x = [0, 1, 0, -1, 0, 1, -1, -1, 1]
self.neighbor_y = [0, 0, -1, 0, 1, -1, -1, 1, 1]
# juct connected look-up table
self.LUT_JC = np.array([0,1,1,0,1,0,0,0,1,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,1,0,0,0,1,0,1,1,0], dtype=bool)
self.statistics_update_rate = statistics_update_rate
def extract_superpixels(self, img_proc, img_disp = None, main_channel = 0):
# get inputs, set process and display image
self.img_proc = img_proc.copy().astype(np.float64)
self.img_disp = img_disp
self.main_channel = main_channel
# get the size of the image
self.height = self.img_proc.shape[0]
self.width = self.img_proc.shape[1]
self.channels = 1 if self.img_proc.ndim == 2 else self.img_proc.shape[2]
# set grid (coordinate) image
self.img_grid = np.zeros((self.height, self.width, 2))
self.img_grid[:,:,0], self.img_grid[:,:,1] = np.meshgrid(np.arange(0, self.width), np.arange(0, self.height))
# initiate label image
self.img_label = np.zeros((self.height, self.width), dtype=np.uint32)
# set average SP area or number of required SPs
if self.num_req_sps > 0:
self.exp_area = self.height * self.width / float(self.num_req_sps)
else:
self.num_req_sps = np.round(self.height * self.width / self.exp_area)
# set covariance regularization term
self.cov_reg = np.eye(2) * self.exp_area / 12.0
# compactness = 8 generates visually pleasing results for Lab color space & 16x16 SP size with no spatial or spectral normalization
# if both cost fuctions are unnormalized, then both will be divided to default variance/covariance (multiplies given compactness by 0.93 for 16x16SP size)
# if both cost funcions are bayesian, then no problem (these values are not used)
# if one cost function is bayesian and the other is unnormalized then we need these default values!
self.var_default = 4.0
self.cov_default = self.exp_area / 12.0
# perform initial tiling
self.initial_tiling()
# refine grid
self.refine_grid()
# set final bboxex
self.update_bbox()
def initial_tiling(self):
# uncertainty of bbox
self.bbox_uncertainty = 0
# perform initial tiling
if self.tiling == 'iSQUARE':
self.isquare_tiling()
elif self.tiling == 'HEX':
self.honeycomb_tiling()
else:
self.square_tiling()
# if display image is specified, display initial tiling
if self.img_disp is not None:
plt.figure(dpi=300)
plt.axis('off')
plt.imshow(self.draw_boundaries(self.img_disp))
plt.title('inital tiling')
plt.show()
def square_tiling(self):
# edge length of the tiling square
self.edge_length = np.sqrt(self.exp_area)
# number of SPs on horizontal and vertical axes
num_h = np.rint(self.width / self.edge_length).astype(int)
num_v = np.rint(self.height / self.edge_length).astype(int)
# initiate number of SPs and bbox
self.num_sps = num_h * num_v
self.bbox = np.zeros((self.num_sps, 4), dtype=np.int32)
# set column and row start indexes for SPs
cst = np.rint(np.linspace(0, self.width, num_h + 1)).astype(int)
rst = np.rint(np.linspace(0, self.height, num_v + 1)).astype(int)
self.num_sps = 0
self.label_grid = np.zeros((num_v, num_h))
# set label image and bounding box
for j in range(num_v):
for i in range(num_h):
self.label_grid[j,i] = self.num_sps
self.img_label[rst[j] : rst[j + 1], cst[i] : cst[i + 1]] = self.num_sps
self.bbox[self.num_sps, :] = [cst[i], rst[j], cst[i + 1], rst[j + 1]]
self.num_sps += 1
def honeycomb_tiling(self):
# edge length of the tiling hexagon
self.edge_length = np.sqrt(self.exp_area * 4 / (6 * np.sqrt(3)))
# number of SPs on horizontal and vertical axes
num_h = np.rint(self.width / (1.5 * self.edge_length)).astype(int)
num_v = np.rint(self.height / (np.sqrt(3)/2 * self.edge_length)).astype(int)
# spacing between initial SP centers
horizontal_spacing = (self.width / num_h).astype(float)
vertical_spacing = (self.height / num_v).astype(float)
# centers
x = horizontal_spacing / 2 + np.arange(num_h) * horizontal_spacing
y = vertical_spacing / 2 + np.arange(num_v) * vertical_spacing
# row&column start&end indexes of sp grid
cst = np.floor(x - horizontal_spacing).astype(int); cst[0:2] = 0
rst = np.floor(y - vertical_spacing).astype(int); rst[0:2] = 0
cnd = np.ceil(x + horizontal_spacing).astype(int); cnd[-2:] = self.width
rnd = np.ceil(y + vertical_spacing).astype(int); rnd[-2:] = self.height
# initiate number of SPs, bbox and pixel to SP distance for each pixel
self.num_sps = 0
self.bbox = np.zeros((num_h * num_v, 4), dtype=np.int32)
d_min = np.full(self.img_label.shape, np.inf)
# set label image
for j in range(num_v):
for i in range(num_h):
# for even columns even rows, for odd columns odd rows will be set
if not np.logical_xor(i % 2 == 0, j % 2 == 0):
continue
# image patch
L = self.img_label[rst[j] : rnd[j], cst[i] : cnd[i]]
X = self.img_grid[rst[j] : rnd[j], cst[i] : cnd[i], :]
D = d_min[rst[j] : rnd[j], cst[i] : cnd[i]]
# pixel to sp distance
d = (X[:,:,0] - x[i]) ** 2 + (X[:,:,1] - y[j]) ** 2
# replace label image and min distance if current sp distance is smaller than previously set
mask = d < D
L[mask] = self.num_sps
D[mask] = d[mask]
self.bbox[self.num_sps, :] = [cst[i], rst[j], cnd[i], rnd[j]]
self.num_sps += 1
self.bbox = self.bbox[:self.num_sps, :]
def isquare_tiling(self):
# first perform square tiling to initiate bounding boxes
self.square_tiling()
# check the edge length, it must be an integer power of 2
if self.edge_length != 16:
print('Edge length must be 16 for iSQUARE tiling. Tiling is set to SQAURE!')
return
I0 = np.concatenate((np.expand_dims(self.img_proc[:, :, self.main_channel], 2), self.img_grid), axis=2)
A0 = np.ones((I0.shape[0], I0.shape[1]), dtype=float)
I1, A1, indR0, L1 = self.isq_downsample(I0, A0, spatial_reg=True)
I2, A2, indR1, L2 = self.isq_downsample(I1, A1, spatial_reg=True)
I3, A3, indR2, L3 = self.isq_downsample(I2, A2, spatial_reg=True)
I4, A4, indR3, L4 = self.isq_downsample(I3, A3, spatial_reg=True)
L3 = L4.flatten()[indR3.astype(int)]
L2 = L3.flatten()[indR2.astype(int)]
L1 = L2.flatten()[indR1.astype(int)]
L0 = L1.flatten()[indR0.astype(int)]
self.img_label = L0.astype(int)
self.bbox_uncertainty = 16
def isq_downsample(self, inp_img, inp_area, spatial_reg = True):
# get & check image size
h = inp_img.shape[0]
w = inp_img.shape[1]
ch = inp_img.shape[2]
if w % 2 or h % 2:
print('Error: image must have even number of rows and columns!')
return
# check and apply spatial regularization
if spatial_reg:
G = inp_img[:,:,-2:].copy()
G[:, 0::2, 0] = (G[:, 0::2, 0] + G[:, 1::2, 0]) / 2
G[:, 1::2, 0] = G[:, 0::2, 0]
G[0::2, :, 1] = (G[0::2, :, 1] + G[1::2, :, 1]) / 2
G[1::2, :, 1] = G[0::2, :, 1]
else:
G = inp_img[:,:,-2:]
# difference images
diff_L = np.full([h+2, w+2], np.inf); # difference with left (right can be obtained with 1px horizontal shift)
diff_T = np.full((h+2, w+2), np.inf); # difference with top (bottom can be obtained with 1px vertical shift)
diff_LT = np.full((h+2, w+2), np.inf); # difference with left-top (right-bottom can be obtained with [1, 1]px shift)
diff_RT = np.full((h+2, w+2), np.inf); # difference with right-top (left-bottom can be obtained with [-1, 1]px shift)
# set difference images, all have same index with the input image
diff_L[1:-1,2:-1] = np.sum((inp_img[:, 1:, :-2] - inp_img[:, :-1, :-2]) ** 2, axis=2) + np.sum((inp_img[:, 1:, -2:] - G[:, :-1, :]) ** 2, axis=2)
diff_T[2:-1,1:-1] = np.sum((inp_img[1:, :, :-2] - inp_img[:-1, :, :-2]) ** 2, axis=2) + np.sum((inp_img[1:, :, -2:] - G[:-1, :, :]) ** 2, axis=2)
diff_LT[2:-1,2:-1] = np.sum((inp_img[1:, 1:, :-2] - inp_img[:-1, :-1, :-2]) ** 2, axis=2) + np.sum((inp_img[1:, 1:, -2:] - G[:-1, :-1, :]) ** 2, axis=2)
diff_RT[2:-1,1:-2] = np.sum((inp_img[1:, :-1, :-2] - inp_img[:-1, 1:, :-2]) ** 2, axis=2) + np.sum((inp_img[1:, :-1, -2:] - G[:-1, 1:, :]) ** 2, axis=2)
# error image to store 4 neighbor differences
img_err = np.zeros([h // 2, w // 2, 4])
# horizontal/vertical neighbor flags
ind_h = np.zeros([h+2, w+2], dtype=bool)
ind_v = np.zeros([h+2, w+2], dtype=bool)
ind_d = np.zeros([h+2, w+2], dtype=np.uint8)
# set 1px padded area
A = np.ones([h+2, w+2]); A[1:-1, 1:-1] = inp_area
# alternative seed indexes on difference images (left-top, right-top, left-bottom, right-bottom) for downsampling
L = [1, 2, 1, 2]
T = [1, 1, 2, 2]
# check start indexes on difference image for corresponding seed indexes
X = [2, 1, 2, 1]
Y = [2, 2, 1, 1]
# initiate sum of squared errors for different seed indexes
sse = np.zeros(4)
# get sse for alternative seed indexes
for i in range(4):
# let values and area for pixels i and j are given as:
# area[i] = N, value[i] = I
# area[j] = M, value[j] = J
# then when we merge i and j, mean = (N * I + M * J) / (M + N)
# sse before merge:
# sse[i] = sum(i^2) - N * I^2
# sse[j] = sum(j^2) - M * J^2
# sse after merge:
# sse[i + j] = sum(i^2) + sum(j^2) - (N * I + M * J)^2 / (M + N)
# sse increment = N * I^2 + M * J^2 - (N * I + M * J)^2 / (M + N)
# sse increment = N * M / (N + M) * (I - J)^2
# horizontal downsampling
W = A[T[i]:-1:2, X[i]:-1:2] # area of pixels to be merged to left or right
WL = (W * A[T[i]:-1:2, X[i]-1:-2:2]) / (W + A[T[i]:-1:2, X[i]-1:-2:2]) # corresponding left/right weights
WR = (W * A[T[i]:-1:2, X[i]+1: :2]) / (W + A[T[i]:-1:2, X[i]+1: :2])
img_err[:,:,0] = WL * diff_L[T[i]:-1:2, X[i]:-1:2] # SSE for merging with left seed
img_err[:,:,1] = WR * diff_L[T[i]:-1:2, X[i]+1::2] # SSE for merging with right seed
ind_h[T[i]:-1:2, X[i]:-1:2] = img_err[:,:,1] < img_err[:,:,0] # select left/right seed: 0 means left, 1 means right
sse[i] = np.sum(img_err[:,:,0][~ind_h[T[i]:-1:2, X[i]:-1:2]]) + np.sum(img_err[:,:,1][ind_h[T[i]:-1:2, X[i]:-1:2]]) # initialize sse
# vertical downsampling
W = A[Y[i]:-1:2, L[i]:-1:2] # area of pixels to be merged to top or bottom
WL = (W * A[Y[i]-1:-2:2, L[i]:-1:2]) / (W + A[Y[i]-1:-2:2, L[i]:-1:2]) # corresponding top/bottom weights
WR = (W * A[Y[i]+1::2, L[i]:-1:2]) / (W + A[Y[i]+1::2, L[i]:-1:2])
img_err[:,:,0] = WL * diff_T[Y[i]:-1:2, L[i]:-1:2] # SSE for merging with top seed
img_err[:,:,1] = WR * diff_T[Y[i]+1::2, L[i]:-1:2] # SSE for merging with bottom seed
ind_v[Y[i]:-1:2, L[i]:-1:2] = img_err[:,:,1] < img_err[:,:,0] # select top/bottom seed: 0 means top, 1 means bottom
sse[i] += np.sum(img_err[:,:,0][~ind_v[Y[i]:-1:2, L[i]:-1:2]]) + np.sum(img_err[:,:,1][ind_v[Y[i]:-1:2, L[i]:-1:2]]) # update sse
# diagonal downsampling
W = A[Y[i]:-1:2, X[i]:-1:2]
WTL = (W * A[Y[i]-1:-2:2, X[i]-1:-2:2]) / (W + A[Y[i]-1:-2:2, X[i]-1:-2:2]) # top-left
WTR = (W * A[Y[i]-1:-2:2, X[i]+1::2]) / (W + A[Y[i]-1:-2:2, X[i]+1::2]) # top-right
WBR = (W * A[Y[i]+1::2, X[i]+1::2]) / (W + A[Y[i]+1::2, X[i]+1::2]) # bottom-right
WBL = (W * A[Y[i]+1::2, X[i]-1:-2:2]) / (W + A[Y[i]+1::2, X[i]-1:-2:2]) # bottom-left
# to merge with LT seed either top neighbor should merge to left or left neighbor merge with top, similar for other neighbors
WTL[~np.logical_or(~ind_v[Y[i]:-1:2, X[i]-1:-2:2], ~ind_h[Y[i]-1:-2:2, X[i]:-1:2])] = np.inf
WTR[~np.logical_or(~ind_v[Y[i]:-1:2, X[i]+1: :2], ind_h[Y[i]-1:-2:2, X[i]:-1:2])] = np.inf
WBL[~np.logical_or( ind_v[Y[i]:-1:2, X[i]-1:-2:2], ~ind_h[Y[i]+1: :2, X[i]:-1:2])] = np.inf # cicik
WBR[~np.logical_or( ind_v[Y[i]:-1:2, X[i]+1: :2], ind_h[Y[i]+1: :2, X[i]:-1:2])] = np.inf
img_err[:,:,0] = WTL * diff_LT[Y[i]:-1:2, X[i]:-1:2] # SSE for merging with top-left seed
img_err[:,:,1] = WTR * diff_RT[Y[i]:-1:2, X[i]:-1:2] # SSE for merging with top-right seed
img_err[:,:,2] = WBR * diff_LT[Y[i]+1::2, X[i]+1::2] # SSE for merging with bottom-right seed
img_err[:,:,3] = WBL * diff_RT[Y[i]+1::2, X[i]-1:-2:2] # SSE for merging with bottom-left seed
ind_d[Y[i]:-1:2, X[i]:-1:2] = np.argmin(img_err, axis=2)
for n in range(4): sse[i] += np.sum(img_err[:,:,n][ind_d[Y[i]:-1:2, X[i]:-1:2] == n])
# select the minimum error seed
i = np.argmin(sse)
# prepare input image for downsampling by weighting with input area
inp_weighted = np.zeros([h+2, w+2, ch])
inp_weighted[1:-1, 1:-1, :] = inp_img * np.expand_dims(inp_area, 2)
# set area of image boundary to zero so they do not contribute to downsampled image
A[[0, -1], :] = 0
A[:, [0, -1]] = 0
# initiate output with seed
out_img = inp_weighted[T[i]:-1:2, L[i]:-1:2, :].copy()
out_area = A[T[i]:-1:2, L[i]:-1:2].copy()
# initiate output inddex
out_ind = np.zeros([h+2, w+2], dtype=np.uint32)
out_ind[T[i]:-1:2, L[i]:-1:2] = np.arange(0, h//2 * w//2).reshape([h//2, w//2])
out_label = np.arange(0, h//2 * w//2).reshape([h//2, w//2])
# neighbors, indexes to be checked and required values to append
neighbor_x = np.array([1, 0, -1, 0, 1, -1, -1, 1])
neighbor_y = np.array([0, -1, 0, 1, -1, -1, 1, 1])
ind_list = [ind_h, ind_v, ind_h, ind_v, ind_d, ind_d, ind_d, ind_d]
req_val = [0, 1, 1, 0, 3, 2, 1, 0]
# add neighors
for x, y, n in zip(neighbor_x + L[i], neighbor_y + T[i], np.arange(8)):
mask = ind_list[n][y:y+h:2, x:x+w:2] == req_val[n]
out_img[mask, :] += inp_weighted[y:y+h:2, x:x+w:2, :][mask]
out_area[mask] += A[y:y+h:2, x:x+w:2][mask]
out_ind[y:y+h:2, x:x+w:2][mask] = out_ind[T[i]:-1:2, L[i]:-1:2][mask]
return out_img / np.expand_dims(out_area, 2), out_area, out_ind[1:-1, 1:-1], out_label
def refine_grid(self):
# set maximum number of iterations
'''
if self.tiling == 'iSQUARE':
self.max_iterations = np.maximum(np.ceil(self.edge_length * 0.4).astype(int), 4)
elif self.tiling == 'HEX':
self.max_iterations = np.ceil(self.edge_length).astype(int)
else:
self.max_iterations = np.ceil(self.edge_length * 0.8).astype(int)
'''
self.max_iterations = np.ceil(self.edge_length).astype(int)
# set image boundaries as sp = num_sps which does not exist! so they won't affect connectedness
self.update_image_boundaries(value = self.num_sps)
# initiate SP distributions
self.initiate_sp_distributions()
# set cost functions
if self.spectral_cost == 'Bayesian':
self.spectral_cost = self.spectral_bayesian
else:
self.spectral_cost = self.spectral_L2
if self.spatial_cost == 'Bayesian':
self.SpatialCost = self.spatial_bayesian
else:
self.SpatialCost = self.spatial_L2
# refine label image
for iteration in range(self.max_iterations):
#print('iteration: ' + str(iteration))
for i in np.arange(1, 4): # step by 3 pixels in each axis to preserve connectivity
for j in np.arange(1, 4): # do not start from 0 as it has not 8 neighbors
self.refine_grid_iteration(i, j)
self.update_image_boundaries()
if self.img_disp is not None:
plt.figure(dpi=300)
plt.axis('off')
plt.imshow(self.draw_boundaries(self.img_disp))
plt.title('iter: ' + str(iteration))
plt.show()
def refine_grid_iteration(self, l, t):
# right and bottom boundaries, do not come to image boundaries as they have not 8 neighbors
b = self.height - 1
r = self.width - 1
# apply connectedness control and find the pixels can be updated
B = np.zeros((np.ceil((b - t)/3).astype(int), np.ceil((r - l)/3).astype(int)), dtype=np.uint8)
for n in np.arange(1, 9): #np.arange(8, 0, -1):
B = np.left_shift(B, 1) + (self.img_label[t:b:3, l:r:3] == self.img_label[t+self.neighbor_y[n]:b+self.neighbor_y[n]:3, l+self.neighbor_x[n]:r+self.neighbor_x[n]:3]).astype(np.uint8)
B = self.LUT_JC[B]
# get the intensity values and coordinates of the pixels to be checked
I = self.img_proc[t:b:3, l:r:3, :][B, :]
X = self.img_grid[t:b:3, l:r:3, :][B, :]
# get the current labels and initiate pixel to sp distance
labels_updated = self.img_label[t:b:3, l:r:3][B]
labels_original = labels_updated.copy()
d_min = np.full(len(labels_updated), np.inf)
# handle NaN's: if a pixel value or one of its candidate labels is NaN, then spectral distance for that pixel is not taken into account
nan_mask = np.isnan(I)
for n in np.arange(1,5): # check NaN candidate labels
# get neighbor label
L = self.img_label[t+self.neighbor_y[n]:b+self.neighbor_y[n]:3, l+self.neighbor_x[n]:r+self.neighbor_x[n]:3][B]
nan_mask = np.logical_or(nan_mask, np.isnan(self.mean[L, :]))
I[nan_mask] = np.nan # set intensity values to NaN, as spectral distance is computed via nansum NaN channels won't be taken into account
# check neighbor sp distances
for n in np.arange(1,5):
# get neighbor label
L = self.img_label[t+self.neighbor_y[n]:b+self.neighbor_y[n]:3, l+self.neighbor_x[n]:r+self.neighbor_x[n]:3][B]
# get distance to neighbor label
d = self.spectral_cost(I, L) + self.compactness * self.SpatialCost(X, L)
# piksels to be updated
update = d < d_min
#performa update
labels_updated[update] = L[update]
d_min[update] = d[update]
# update label image
self.img_label[t:b:3, l:r:3][B] = labels_updated
self.bbox_uncertainty += 1
self.update_sp_distributions(labels_original, labels_updated, self.img_proc[t:b:3, l:r:3, :][B, :], X)
def update_image_boundaries(self, value = None):
if value == None:
labels_original = self.img_label.copy()
self.img_label[0, :] = self.img_label[1, :]
self.img_label[:, 0] = self.img_label[:, 1]
self.img_label[-1, :] = self.img_label[-2, :]
self.img_label[:, -1] = self.img_label[:, -2]
self.bbox_uncertainty += 1
self.update_sp_distributions(labels_original, self.img_label, self.img_proc, self.img_grid)
else:
self.img_label[0, :] = value
self.img_label[:, 0] = value
self.img_label[-1, :] = value
self.img_label[:, -1] = value
def update_sp_distributions_original(self):
# spectral distribution is expressed as mean and variance, spatial distribution is expresed as center and covariance
self.mean = np.ones((self.num_sps+1, self.channels))
self.var = np.ones((self.num_sps+1, self.channels))
self.center = np.ones((self.num_sps+1, 2))
self.cov = np.ones((self.num_sps+1, 2, 2))
self.mean[self.num_sps, :] = np.inf
self.center[self.num_sps, :] = np.inf
# find sp distributions
for n in range(self.num_sps):
# extend current bbox
l = np.maximum(self.bbox[n, 0] - self.bbox_uncertainty, 0)
t = np.maximum(self.bbox[n, 1] - self.bbox_uncertainty, 0)
r = np.minimum(self.bbox[n, 2] + self.bbox_uncertainty, self.width)
b = np.minimum(self.bbox[n, 3] + self.bbox_uncertainty, self.height)
# get mask for SP n
M = self.img_label[t:b, l:r] == n
# pixels of SP n
I = self.img_proc[t:b, l:r, :][M, :]
X = self.img_grid[t:b, l:r, :][M, :]
# set new bbox
r = np.max(X[:, 0]) + 1
l = np.min(X[:, 0])
b = np.max(X[:, 1]) + 1
t = np.min(X[:, 1])
self.bbox[n, :] = np.array([l,t,r,b])
# find spatial and spectral mean and covariance
self.mean[n, :] = np.nanmean(I, 0)
self.var[n, :] = np.nanvar(I, 0)
self.center[n, :] = np.nanmean(X, 0)
self.cov[n, :, :] = np.cov(X.transpose())
self.bbox_uncertainty = 0
# bound variance INDEPENDENT CHANNELS
'''
var_avg = np.nanmean(self.var, 0)
var_avg = np.maximum(var_avg, self.measurement_precision)
var_limited = np.minimum(np.maximum(self.var, self.var_max * var_avg), self.var_min * var_avg)
'''
# bound variances, for each SP all channels are normalized with the same variance
var_avg = np.nanmean(self.var)
var_avg = np.maximum(var_avg, self.measurement_precision)
var_limited = np.minimum(np.maximum(np.sum(self.var, 1, keepdims=True), self.var_max * var_avg), self.var_min * var_avg)
# get variance inverse
self.var_inv = 1 / var_limited
self.var_log = np.log(var_limited)
# regularize spatial covariance and get inverse
covLimited = (1 - self.cov_reg_weight) * self.cov + self.cov_reg_weight * self.cov_reg
covDet = covLimited[:, 0, 0] * covLimited[:, 1, 1] - covLimited[:, 1, 0] * covLimited[:, 0, 1]
self.covInv = np.vstack((self.cov[:, 1, 1]/covDet, -self.cov[:, 1, 0]/covDet, self.cov[:, 0, 0]/covDet)).transpose()
self.covLog = np.log(covDet)
def update_bbox(self):
if self.bbox_uncertainty == 0:
return
# find sp distributions
for n in range(self.num_sps):
# extend current bbox
l = np.maximum(self.bbox[n, 0] - self.bbox_uncertainty, 0)
t = np.maximum(self.bbox[n, 1] - self.bbox_uncertainty, 0)
r = np.minimum(self.bbox[n, 2] + self.bbox_uncertainty, self.width)
b = np.minimum(self.bbox[n, 3] + self.bbox_uncertainty, self.height)
# get mask for SP n
M = self.img_label[t:b, l:r] == n
# pixels of SP n
X = self.img_grid[t:b, l:r, :][M, :]
# set new bbox
r = np.max(X[:, 0]) + 1
l = np.min(X[:, 0])
b = np.max(X[:, 1]) + 1
t = np.min(X[:, 1])
self.bbox[n, :] = np.array([l,t,r,b])
def initiate_sp_distributions(self):
# first find sum and squared sums (will be employed in incremental update)
self.sum_I = np.zeros((self.num_sps+1, self.channels))
self.sum_I2 = np.zeros((self.num_sps+1, self.channels))
self.sum_X = np.zeros((self.num_sps+1, 2))
self.sum_X2 = np.zeros((self.num_sps+1, 2, 2))
self.area = np.zeros((self.num_sps+1, 1))
self.num_valid_pixels = np.zeros((self.num_sps+1, self.channels))
# find sp distributions
for n in range(self.num_sps):
# extend current bbox
l = np.maximum(self.bbox[n, 0] - self.bbox_uncertainty, 0)
t = np.maximum(self.bbox[n, 1] - self.bbox_uncertainty, 0)
r = np.minimum(self.bbox[n, 2] + self.bbox_uncertainty, self.width)
b = np.minimum(self.bbox[n, 3] + self.bbox_uncertainty, self.height)
# get mask for SP n
M = self.img_label[t:b, l:r] == n
# pixels of SP n
I = self.img_proc[t:b, l:r, :][M, :]
X = self.img_grid[t:b, l:r, :][M, :]
# set new bbox
r = np.max(X[:, 0]) + 1
l = np.min(X[:, 0])
b = np.max(X[:, 1]) + 1
t = np.min(X[:, 1])
self.bbox[n, :] = np.array([l,t,r,b])
# find spatial and spectral sum, squared sum and area (exclude nan's)
self.sum_I[n, :] = np.nansum(I, 0)
self.sum_I2[n, :] = np.nansum(I ** 2, 0)
self.num_valid_pixels[n] = np.sum(~np.isnan(I), 0)
self.sum_X[n, :] = np.sum(X, 0)
self.sum_X2[n, :, :] = np.expand_dims(np.matmul(X.transpose(), X), 0)
self.area[n] = X.shape[0]
self.bbox_uncertainty = 0
# set statistics of virtual sp
self.num_valid_pixels[-1, :] = 1
self.area[-1] = 1
# update statistics
self.update_sp_statistics()
def sp_statistics_check(self):
mean = np.zeros((self.num_sps+1, self.channels))
var = np.zeros((self.num_sps+1, self.channels))
center = np.zeros((self.num_sps+1, 2))
cov = np.zeros((self.num_sps+1, 2, 2))
area = np.zeros((self.num_sps+1, 1))
for n in range(self.num_sps):
# get mask for SP n
M = self.img_label == n
# pixels of SP n
I = self.img_proc[M, :]
X = self.img_grid[M, :]
mean[n, :] = np.nanmean(I, 0)
var[n, :] = np.nanvar(I, 0)
center[n, :] = np.nanmean(X, 0)
cov[n, :, :] = np.cov(X.transpose(), bias=True)
area[n, 0] = np.sum(M)
mean_err = np.sum(abs(mean[:-1, :] - self.mean[:-1, :]) > 0.0001)
center_err = np.sum(abs(center[:-1, :] - self.center[:-1, :]) > 0.0001)
var_err = np.sum(np.abs(var[:-1, :] - self.var[:-1, :]) > 0.0001)
cov_err = np.sum(np.abs(cov[:-1, :, :] - self.cov[:-1, :, :]) > 0.0001)
if mean_err:
print('mean_err')
if center_err:
print('center_err')
if var_err:
print('var_err')
if cov_err:
print('cov_err')
def update_sp_statistics(self):
# spectral distribution is expressed as mean and variance, spatial distribution is expresed as center and covariance
self.mean = self.sum_I / self.num_valid_pixels
self.var = self.sum_I2 / self.num_valid_pixels - self.mean ** 2
self.center = self.sum_X / self.area
self.cov = self.sum_X2 / np.expand_dims(self.area, 2) - np.array([[self.center[:, 0]**2, self.center[:, 0] * self.center[:, 1]], [self.center[:, 0] * self.center[:, 1], self.center[:, 1]**2]]).transpose([2,0,1])
self.mean[self.num_sps, :] = np.inf
self.center[self.num_sps, :] = np.inf
# self.sp_statistics_check()
# bound variance INDEPENDENT CHANNELS
'''
var_avg = np.nanmean(self.var, 0)
var_avg = np.maximum(var_avg, self.measurement_precision)
var_limited = np.minimum(np.maximum(self.var, self.var_max * var_avg), self.var_min * var_avg)
'''
# bound variances, for each SP all channels are normalized with the same variance
var_avg = np.nanmean(self.var)
var_avg = np.maximum(var_avg, self.measurement_precision)
var_limited = np.minimum(np.maximum(np.sum(self.var, 1, keepdims=True), self.var_max * var_avg), self.var_min * var_avg)
# get variance inverse
self.var_inv = 1 / var_limited
self.var_log = np.log(var_limited)
# regularize spatial covariance and get inverse
covLimited = (1 - self.cov_reg_weight) * self.cov + self.cov_reg_weight * self.cov_reg
covDet = covLimited[:, 0, 0] * covLimited[:, 1, 1] - covLimited[:, 1, 0] * covLimited[:, 0, 1]
self.covInv = np.vstack((self.cov[:, 1, 1]/covDet, -self.cov[:, 1, 0]/covDet, self.cov[:, 0, 0]/covDet)).transpose()
self.covLog = np.log(covDet)
def update_sp_distributions(self, labels_prev, labels_curr, I, X):
# select updated pixels
mask = labels_prev != labels_curr
labels_prev = np.hstack((-1, labels_prev[mask]))
labels_curr = np.hstack((-1, labels_curr[mask]))
I = np.vstack((np.zeros((1, self.channels)), I[mask]))
X = np.vstack((np.zeros((1, 2)), X[mask]))
# sort labels, so you can use cumsum and label difference to find updates
ind = np.argsort(labels_prev)
# get cumsum of label sorted I and X
sum_I = np.nancumsum(I[ind, :], axis=0)
sum_I2 = np.nancumsum(I[ind, :] ** 2, axis=0)
num_valid_pixels = np.nancumsum(~np.isnan(I[ind, :]), axis=0)
sum_X = np.cumsum(X[ind, :], axis=0)
sum_X2 = np.column_stack((np.cumsum(X[ind, 0] ** 2, 0), \
np.cumsum(X[ind, 0] * X[ind, 1], 0), \
np.cumsum(X[ind, 0] * X[ind, 1], 0), \
np.cumsum(X[ind, 1] ** 2, 0))).reshape((-1,2,2))
labels_prev = labels_prev[ind]
ind_d = np.nonzero(labels_prev != np.append(labels_prev[1:], -1))[0]
ind_c = ind_d[1:]
ind_p = ind_d[:-1]
labels = labels_prev[ind_c]
self.sum_I[labels, :] -= sum_I[ind_c, :] - sum_I[ind_p, :]
self.sum_I2[labels, :] -= sum_I2[ind_c, :] - sum_I2[ind_p, :]
self.num_valid_pixels[labels, :] -= num_valid_pixels[ind_c, :] - num_valid_pixels[ind_p, :]
self.sum_X[labels, :] -= sum_X[ind_c, :] - sum_X[ind_p, :]
self.sum_X2[labels, :, :] -= sum_X2[ind_c, :, :] - sum_X2[ind_p, :, :]
self.area[labels] -= np.expand_dims(ind_c - ind_p, 1)
# sort labels, so you can use cumsum and label difference to find updates
ind = np.argsort(labels_curr)
# get cumsum of label sorted I and X
sum_I = np.nancumsum(I[ind, :], axis=0)
sum_I2 = np.nancumsum(I[ind, :] ** 2, axis=0)
num_valid_pixels = np.nancumsum(~np.isnan(I[ind, :]), axis=0)
sum_X = np.cumsum(X[ind, :], axis=0)
sum_X2 = np.column_stack((np.cumsum(X[ind, 0] ** 2, 0), \
np.cumsum(X[ind, 0] * X[ind, 1], 0), \
np.cumsum(X[ind, 0] * X[ind, 1], 0), \
np.cumsum(X[ind, 1] ** 2, 0))).reshape((-1,2,2))
labels_curr = labels_curr[ind]
ind_d = np.nonzero(labels_curr != np.append(labels_curr[1:], -1))[0]
ind_c = ind_d[1:]
ind_p = ind_d[:-1]
labels = labels_curr[ind_c]
self.sum_I[labels, :] += sum_I[ind_c, :] - sum_I[ind_p, :]
self.sum_I2[labels, :] += sum_I2[ind_c, :] - sum_I2[ind_p, :]
self.num_valid_pixels[labels, :] += num_valid_pixels[ind_c, :] - num_valid_pixels[ind_p, :]
self.sum_X[labels, :] += sum_X[ind_c, :] - sum_X[ind_p, :]
self.sum_X2[labels, :, :] += sum_X2[ind_c, :, :] - sum_X2[ind_p, :, :]
self.area[labels] += np.expand_dims(ind_c - ind_p, 1)
self.update_sp_statistics()
def spectral_L2(self, I, L):
return np.nansum((I - self.mean[L, :]) ** 2, 1) / self.var_default
def spectral_bayesian(self, I, L):
return np.nansum((I - self.mean[L, :]) ** 2 * self.var_inv[L, :] + self.var_log[L, :], 1)
def spatial_L2(self, X, L):
return np.sum((X - self.center[L, :]) ** 2, 1) / self.cov_default
def spatial_bayesian(self, X, L):
dx = X[:, 0] - self.center[L, 0]
dy = X[:, 1] - self.center[L, 1]
X = np.vstack((dx ** 2, dx * dy, dy ** 2)).transpose()
return np.sum(X * self.covInv[L, :], axis=1) + self.covLog[L]
def fill_mean_image(self):
img_out = np.zeros(self.img_proc.shape)
for n in np.arange(self.num_sps):
b = self.bbox[n, :]
mask = self.img_label[b[1]:b[3], b[0]:b[2]] == n
img_out[b[1]:b[3], b[0]:b[2], :][mask, :] = self.mean[n, :]
return img_out
def draw_boundaries(self, I, color = [0, 0, 0]):
# get label image
L = self.img_label
# initiate boundary image
B = np.zeros((self.height, self.width), dtype=bool)
# add right edge
B[:, 0:-1] = np.logical_or(B[:, 0:-1], np.not_equal(L[:,0:-1], L[:,1:]));
# add right-bottom edge
B[0:-1, 0:-1] = np.logical_or(B[0:-1, 0:-1], np.not_equal(L[0:-1, 0:-1], L[1:,1:]));
# add bottom edge
B[0:-1, :] = np.logical_or(B[0:-1, :], np.not_equal(L[0:-1, :], L[1:,:]));
# prepare output image
J = I.copy()
if J.ndim == 2:
J = np.expand_dims(J, 2)
for ch in range(J.shape[2]): J[B, ch] = color[ch]
return J
def fill_plane_fitted_superpixel(self, pcloud, pt_indices, K):
img_out = np.zeros((self.img_proc.shape[0], self.img_proc.shape[1]))
K_inv = np.linalg.inv(K)
filename_counter = 0;
filename_base = "plane_points_"
filename_base_depths = "depths_"
for n in np.arange(self.num_sps):
b = self.bbox[n, :]
mask = self.img_label[b[1]:b[3], b[0]:b[2]] == n
if np.sum ( ~np.isnan( pt_indices[b[1]:b[3], b[0]:b[2]][mask]) ) < 10 :
continue
mask_not_nans = (mask * ~np.isnan(pt_indices[b[1]:b[3], b[0]:b[2]]))
pts_on_plane = pcloud[ :, pt_indices[b[1]:b[3], b[0]:b[2]][mask_not_nans].astype(np.uint32 ) ]
center = np.expand_dims(np.mean(pts_on_plane, 1), 1)
normalised_pts = pts_on_plane - center
U, S, Vh = np.linalg.svd(normalised_pts)
normal = np.expand_dims(U[:,2], 1)
# dists = np.abs(np.dot(normal.T, normalised_pts))
# avg_dist = np.sum(dists) / normalised_pts.shape[1]
# print("max err = " + str(np.max(dists)))
# print("avg err = " + str(avg_dist))
A = normal[0][0]
B = normal[1][0]
C = normal[2][0]
D = -1 * np.dot(normal.T, center)[0][0]
# print("Center = " + str(center))
assert center[2] > 0
# print("Normal = " + str(normal))
# print(D)
shape = ( b[3]-b[1], b[2]-b[0] )
objp = np.ones((shape[0] * shape[1], 3), np.uint32)
objp[:, :2] = np.mgrid[ b[0]:b[2], b[1]:b[3]].T.reshape(-1, 2)
objp = objp.T
worldP = np.dot(K_inv, objp)
worldP = worldP / worldP[2,:]
assert np.min(worldP[2,:]) > 0
depths = (-1 * D) / (A * worldP[0, :] + B * worldP[1, :] + C)
depths = depths.reshape( (shape[0], shape[1]) )
# plt.figure(0)
# plt.imshow(depths)
# return
if(np.min(depths) < 0):
# print("negative")
# np.savetxt(filename_base + str(filename_counter) + ".txt", pts_on_plane.T)
filename_counter += 1
# if(np.max(depths) > 0):
# print("different sign mix - max\n")
# img_out[b[1]:b[3], b[0]:b[2]][mask] = -1e5
print("Negative depth at SP no:" + str(n))
continue
img_out[b[1]:b[3], b[0]:b[2]][mask] = depths[mask] * 1e3
# print("Depths " + str(depths[mask].shape))
# print("img_out part" + str(img_out[b[1]:b[3], b[0]:b[2]][mask].shape))
# print(np.min(depths[mask]))
# print(np.max(depths[mask]))
# print("\n####################################\n")
# if(np.max(depths) > 4):
# img_out[b[1]:b[3], b[0]:b[2]][mask] = -1e5
# np.savetxt(filename_base + "absurd_" + str(n) + ".txt", pts_on_plane.T)
# continue
if(n == 1669):
print(normal)
print(D)
np.savetxt(filename_base + str(n) + ".txt", pts_on_plane.T)
# np.savetxt("point_indices" + str(n) + ".txt", pt_indices[b[1]:b[3], b[0]:b[2]][mask_not_nans].astype(np.uint32 ) )
np.savetxt(filename_base_depths + str(n) + ".txt", (depths.reshape( shape[0] * shape[1] ) * worldP).T )
# if (n % 50 == 0):
# np.savetxt(filename_base + "positive_" + str(n) + ".txt", pts_on_plane.T)
print(str(filename_counter) + " " + str(self.num_sps))
median = np.median(img_out)
img_out = self.draw_boundaries(img_out, color=[18 * median - 1, 18 * median - 1, 18 * median - 1])[:,:,0]
return img_out
|
# # declare a list with numbers 1 to 5 and add 6 at the end of list
# num_list = [1, 2, 3, 4, 5]
# print(num_list)
# num_list.append(6)
# print(num_list)
# 2 Create a tuple with values 1 - 5
# num_tuple = {1, 2, 3, 4, 5}
# num_list = list(num_tuple)
# print(num_list[:3])
# # You cannot append this
#
# # 3 declare a dictionary of a shopping list
# shopping_list = {
# 'fruits': 5.00,
# 'eggs' : 2.50,
# 'veg' : 8.99
# }
# print(type(shopping_list))
#
# print(shopping_list['eggs'])
#
# # 4 replace a value in a dictionary
# shopping_list['fruits'] = 6
#
# print(shopping_list)
#
#
# # 5 declare a method that adds two given arguments
# def add(num1, num2):
# return num1 + num2
#
# print(add(3, 5))
# 6 Create a class called person with name and age
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
test = Person('Dono', 14)
print(test.name)
print(test.age)
# 7
class Student(Person):
def __init__(self, name, age, studentID, course):
super().__init__(name, age)
self.studentID = studentID
self.course = course
student_test = Student('Dono', 18, 1, 'DevOps')
print(f'{student_test.name}, {student_test.age}, {student_test.studentID}')
# 8 create a dictionsary with 4 items and prices and get total cost
q8_dict = {'eggs': 2.58, 'paint': 4.99, 'pork': 3.49, 'cheese': 700}
# total_cost = sum(q8_dict.values())
# print(total_cost)
# 9 create function to do it
def total(dict):
return sum(dict.values())
print(total(q8_dict))
# 10 have a shopping list and add kiwis to it
q10_dict = q8_dict
q10_dict['kiwis'] = 3.49
print(q10_dict)
# 11
q10_list = list(q10_dict.keys())
for item in q10_list:
if item == 'pork':
break
|
# Embedded file name: .\Demo4.py
import random
def do_turn(game):
if len(game.islands()) == 0:
return
not_mine = game.islands()
for i in range(len(game.my_pirates())):
pirate = game.my_pirates()[i]
directions = game.get_directions(pirate, not_mine[i % len(not_mine)])
random.shuffle(directions)
game.set_sail(pirate, directions[0]) |
class Stack(list):
def push(self, v):
self.append(v)
def peek(self):
return self[-1]
def __iter__(self):
self.current = 0
return self
def __next__(self):
if self.current < len(self):
self.current += 1
return self[self.current - 1]
else:
raise StopIteration
st = Stack()
st.push(10)
st.push(20)
st.push(30)
st.push(40)
print('Size of stack is:', len(st))
print('First element is:', st[0])
print('The top of the stack is:', st.peek())
print(st.pop())
print(st.pop())
print(st.pop())
print('Size of stack is:', len(st)) |
from api.middlewares.application import ApplicationManager
from api.controllers import login
app = ApplicationManager().get_app()
app.add_url_rule('/login', 'login', login.login, methods=['POST']) |
# Generated by Django 3.2.2 on 2021-05-13 16:56
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('constituent_operations', '0013_actionplanparticipants'),
]
operations = [
migrations.AlterModelOptions(
name='actionplanparticipants',
options={'verbose_name_plural': 'Action Plan Participants'},
),
]
|
# coding: utf-8
import time
import logging
import datetime
import sys
import os
import getopt
from config import Config
from lib.nframe import PublicLib
from zookeeper import Zookeeper
from flow import Flow
from zk_redo import ZkRedo
from lib.receive_signal import ReceiveSignal
ReceiveSignal.receive_signal()
pl = PublicLib()
config_file = ''
# 获取用户传入的参数
try:
opts, args = getopt.getopt(sys.argv[1:], 'c')
opt = opts[0][0]
if opt == '-c':
config_file = args[0]
else:
print('-c: configfile')
sys.exit()
except getopt.GetoptError as e:
print(e)
sys.exit()
if not os.path.isfile(config_file):
print("config file :%s not exsit" % config_file)
sys.exit()
# 创建配置文件实例,获取配置文件内容
process_id = str(os.path.basename(config_file).split("_")[0])
config = Config(config_file)
cfg = config.get_config()
match_expr = cfg["rule"]["input_rule_exp"].strip()
log_path = cfg["common"]["logpath"].strip()
if log_path == "":
print("log path is null! please check the config file,exit")
sys.exit()
if not os.path.exists(log_path):
logging.info("logpath:%s not exist, please check the config file,exit" % log_path)
sys.exit()
filename_header = cfg["rule"]["filenameheader"].strip()
merge_interval = cfg["common"]["mergeinterval"].strip()
if merge_interval == "":
print("merge interval is null, please check the config file,exit")
sys.exit()
merge_interval = int(merge_interval)
process_path = cfg["zookeeper"]["processpath"].strip()
zk_filenamepool = cfg["zookeeper"]["filenamepool"].strip()
if process_path == "":
print("process path is null, please check the config file,exit")
sys.exit()
if zk_filenamepool == "":
print("zk filenamepool is null, please check the config file,exit")
sys.exit()
MAX_MERGE_FILE_SEQUENCE = 86400 / merge_interval - 1
zk_host_list = cfg["zookeeper"]["zklist"].strip()
if zk_host_list == "":
print("zk host list is null! please check the config file")
sys.exit()
# 创建zookeeper实例
zoo = Zookeeper(zk_host_list, MAX_MERGE_FILE_SEQUENCE)
zoo.connect()
# work_node = zoo.get_node(process_path)
work_node = "process_" + process_id
# process_id = ''.join(work_node.split('_')[1:])
pl.set_log(log_path, process_id)
# ------------------------------------
line_limit = cfg["common"]["line_limit"].strip()
input_path = cfg["common"]["inputdir"].strip()
output_path = cfg["common"]["destdir"].strip()
batch_size = cfg["common"]["batchsize"].strip()
bak_path = cfg["common"]["bakpath"].strip()
filename_part = cfg["rule"]["filenamepart"].strip()
# ------------------------------------
if line_limit == "":
line_limit = 2000000
if input_path == "":
logging.error("input path is null! please check the config file,exit")
sys.exit()
if bak_path == "":
logging.error("bak path is null! please check the config file,exit")
sys.exit()
if output_path == "":
logging.error("output path is null! please check the config file,exit")
sys.exit()
if not os.path.exists(input_path):
logging.error("input_path:%s not exist, please check the config file,exit" % input_path)
sys.exit()
if not os.path.exists(bak_path):
logging.error("bak_path:%s not exist, please check the config file,exit" % bak_path)
sys.exit()
if not os.path.exists(output_path):
logging.error("output_path:%s not exist, please check the config file,exit" % output_path)
sys.exit()
# ------------------------------------
redo_node = process_path + "/" + work_node + "/" + "redo"
redo_node_flag = zoo.check_exists(redo_node)
my_flow = Flow(process_id, line_limit, input_path, output_path, batch_size, bak_path, filename_header,
zoo, redo_node)
recover = 0
if redo_node_flag is not None:
redo_info, stat = zoo.get_node_value(redo_node)
redo_info = bytes.decode(redo_info)
if redo_info is not None:
zk_redo = ZkRedo(redo_info, process_id, input_path, output_path, bak_path)
filename_pool_str = zk_redo.do_task()
file_date, prov, zk_seq = filename_pool_str.split(",")
my_flow.work(file_date, zk_seq, prov, filename_part)
while 1:
redo_info = []
current_time = datetime.datetime.now().strftime('%Y%m%d-%H-%M-%S')
merge_date, hh, mi, ss = current_time.split('-')
# 获取当前系统序号
sequence = (int(hh) * 3600 + int(mi) * 60 + int(ss)) / merge_interval - 1
sequence = '%03d' % int(sequence)
sys_sequence = merge_date + str(sequence)
logging.info('get system sequence:%s' % sys_sequence)
filename_seq = zoo.zk_get_merge_fn(process_path, work_node, sys_sequence, zk_filenamepool)
if filename_seq == 0:
# zk_seq > cur_seq,未到合并时间点
time.sleep(10)
continue
if filename_seq == 1:
logging.info("get filename_pool failed, try again")
continue
file_date, zk_seq, prov = filename_seq.split(".")
filename_pool = ",".join([file_date, zk_seq, prov])
redo_info.append("filenamepool:" + filename_pool)
# zoo.create_node(redo_node)
# zoo.set_node_value(redo_node, ";".join(redo_info).encode("utf-8"))
logging.info("match expr:%s" % (match_expr + prov))
my_flow.get_file(match_expr + prov)
my_flow.work(file_date, prov, zk_seq, filename_part)
if ReceiveSignal.EXIT_FLAG:
sys.exit()
|
#! /usr/bin/env python
#coding=utf-8
'''
______________________________________________
_______________#########_______________________
______________############_____________________
______________#############____________________
_____________##__###########___________________
____________###__######_#####__________________
____________###_#######___####_________________
___________###__##########_####________________
__________####__###########_####_______________
________#####___###########__#####_____________
_______######___###_########___#####___________
_______#####___###___########___######_________
______######___###__###########___######_______
_____######___####_##############__######______
____#######__#####################_#######_____
____#######__##############################____
___#######__######_#################_#######___
___#######__######_######_#########___######___
___#######____##__######___######_____######___
___#######________######____#####_____#####____
____######________#####_____#####_____####_____
_____#####________####______#####_____###______
______#####______;###________###______#________
________##_______####________####______________
Handlers: index
author: K
mail: 13620459@qq.com
'''
import getopt, os, sys, shutil, pdb
import time
import urllib2, base64
import commands, re, string
import tarfile
#print os.getpid()
_GameName = 'yxgj'
_GameclientDir="/data/gameclient/"
_UpgradeDir="/data/Upgrade/"
_InitPort=8600
_ServerDir="/data/gameserver/%s/" % (_GameName)
_DownloadUrl="http://www.xxx.com/"
_RSYNC0="rsync -rlptvzP"
_RSYNC1="rsync -rlptvzP --delete"
_RSYNC2="--backup --backup-dir=$_BACKUP1/$(date +%F-%H%M%S)"
_RSYNC3="--exclude=*.pid --exclude=*.status"
_RSYNC4="--exclude=motif --exclude=upload --exclude=userfiles"
_FileName = 'U_cdkey.tar.gz'
_ProjLock="/tmp/game.lock"
def Server(_Action, _Sid, _Version, _Host, _Cid):
if _Action in ['install', 'update']:
_TarFile = "%s_server_%d_%d.tar.gz" % (_GameName, _Version, _Cid)
_DownCheck = DownLoad_File(_UpgradeDir,_TarFile)
if _DownCheck != True:
return _DownCheck
for _Sid_val in _Sid:
_Sid_val = int(_Sid_val)
_ServerPort = _InitPort + _Sid_val
_GameDir = "%sS%d" % (_ServerDir,_Sid_val)
if _Action == 'install':
if os.path.exists(_GameDir):
return "Error the ServerDir S%d aleady install" % (_Sid_val)
## mkdir gameserver dir
try:
os.makedirs(_GameDir)
except OSError:
return "mkdir %s access denied" % (_GameDir)
## unzip & install gameserver file
_ZipCheck = Tar_File('unzip', _UpgradeDir + _TarFile, _GameDir)
if _ZipCheck != True:
return _ZipCheck
##create password for db
_GameName_md5 = md5(_GameName)
_DbPassword = md5("%s%dxxxxx####wokao####%s" % (_GameName_md5,_Sid_val,_Sid_val))
_DbUserName = "%s_s%d" % (_GameName,_Sid_val)
#初始化gameserver 配置文件
_Config_xml = '''<?xml version="1.0" encoding="UTF-8"?>
<root>
<ID>%d</ID>
<ServerPort>%d</ServerPort>
<DatabaseUserName>%s</DatabaseUserName>
<DatabasePassword>%s</DatabasePassword>
<DatabaseIP>%s</DatabaseIP>
<DatabasePort>3306</DatabasePort>
<DatabaseName>%s</DatabaseName>
<Log>33</Log>
<FDB_UNIT>10240</FDB_UNIT>
<PlatformId>%d</PlatformId>
</root>''' % (_Sid_val, _ServerPort, _DbUserName, _DbPassword, _Host, _DbUserName, _Cid)
f = file(_GameDir + "/config.xml", 'w') # open for 'w'riting
f.write(_Config_xml) # write text to file
f.close() # close the file
elif _Action == 'update':
if os.path.exists(_GameDir) == False:
return "Error the ServerDir S%d not install" % (_Sid_val)
##check server status , stop it
_ServerExec_a, _ServerExec_b = commands.getstatusoutput('cd %s && /bin/bash start.sh status' % (_GameDir))
if _ServerExec_a == 0:
#stop server first
_ServerSh_a, _ServerSh_b = commands.getstatusoutput('cd %s && /bin/bash start.sh stop' % (_GameDir))
if _ServerSh_a != 0:
return _ServerSh_b
_ZipCheck = Tar_File('unzip', _UpgradeDir + _TarFile, _GameDir)
if _ZipCheck != True:
return _ZipCheck
elif _Action == 'cache':
##check server status , gamedir
if os.path.exists(_GameDir) == False:
return "Error the ServerDir S%d not install" % (_Sid_val)
_PidFile = _GameDir + "/PIDDIR/Server.pid"
if Check_Pid(_PidFile) == True:
return "Error the Server S%d is running, stop it first" % (_Sid_val)
try:
shutil.rmtree(_GameDir + '/fdb')
shutil.rmtree(_GameDir + '/online')
except OSError as e:
return e
elif _Action in ['start', 'stop', 'status']:
if os.path.exists(_GameDir) == False:
return "Error the ServerDir S%d not install" % (_Sid_val)
_ServerExec_a, _ServerExec_b = commands.getstatusoutput('cd %s && /bin/bash start.sh %s' % (_GameDir, _Action))
if _ServerExec_a != 0:
return _ServerExec_b
return True
def Db(_Action, _Sid, _Version, _Cid):
## check mysql system login
_Login_a, _Login_b = commands.getstatusoutput('mysql -e "show databases" > /dev/null 2>&1')
if _Login_a != 0:
return "Error, can not login mysql"
##creatre db & backup dir
if os.path.exists(_UpgradeDir + "db") == False:
os.makedirs(_UpgradeDir + "db")
if os.path.exists(_UpgradeDir + "backup") == False:
os.makedirs(_UpgradeDir + "backup")
_Table_TempDirNew = _UpgradeDir + "db/.table_new"
_Table_TempDirOld = _UpgradeDir + "db/.table_old"
if os.path.exists(_Table_TempDirNew) == False:
os.makedirs(_Table_TempDirNew)
if os.path.exists(_Table_TempDirOld) == False:
os.makedirs(_Table_TempDirOld)
removeFileInFirstDir(_Table_TempDirNew)
removeFileInFirstDir(_Table_TempDirOld)
_TarFile = "%s_db_%d_%d.tar.gz" % (_GameName, _Version, _Cid)
_DownCheck = DownLoad_File(_UpgradeDir,_TarFile)
_DBfile1 = 'Game_init.sql'
_DBfile2 = 'Game_sys.sql'
if _DownCheck != True:
return _DownCheck
_DbFileDir = _UpgradeDir + "db"
_ModifyTable = _DbFileDir + "/mtable.sql"
## unzip & install gameserver file
_ZipCheck = Tar_File('unzip', _UpgradeDir + _TarFile, _DbFileDir)
if _ZipCheck != True:
return _ZipCheck
for _Sid_val in _Sid:
_Sid_val = int(_Sid_val)
_DbName = "%s_s%d" % (_GameName,_Sid_val)
## get db list
_DBList_a,_DBList_b = commands.getstatusoutput('mysql -e "show databases" | sed "1d"')
_Db_search = re.search( r'%s' % (_DbName), _DBList_b, re.M)
if _Action == 'install':
if _Db_search:
return "Error , database %s was aleady install" % (_DbName)
##create password for db
_GameName_md5 = md5(_GameName)
_DbPassword = md5("%s%dzhaoyi###yunji###%s" % (_GameName_md5,_Sid_val,_Sid_val))
_DbCommand = "create database if not exists %s default character set utf8;grant all privileges on %s.* to '%s'@'%%' identified by '%s';flush privileges;" % (_DbName, _DbName, _DbName, _DbPassword)
_DbExec_a, _DbExec_b = commands.getstatusoutput('mysql -e "%s"' % (_DbCommand))
if _DbExec_a != 0:
return _DbExec_b
##load sql file
_DbInit_a, _DbInit_b = commands.getstatusoutput("mysql %s < %s/%s" % (_DbName, _DbFileDir, _DBfile1))
_DbSYS_a, _DbSYS_b = commands.getstatusoutput("mysql %s < %s/%s" % (_DbName, _DbFileDir, _DBfile2))
if _DbInit_a != 0 or _DbSYS_a != 0:
return _DbInit_b, _DbSYS_b
if _Action == 'update':
if not _Db_search:
return "Error , database %s was not install" % (_DbName)
## backup db
_Backup_a, _Backup_b = commands.getstatusoutput('mysqldump %s --default-character-set=utf8 > %s/backup/backup_%s.sql' % (_DbName, _UpgradeDir, _DbName))
if _Backup_a != 0:
return _Backup_b
_DbCommand = "drop database IF EXISTS %s_update;create database IF NOT EXISTS %s_update;" % (_DbName, _DbName)
_DbExec_a, _DbExec_b = commands.getstatusoutput('mysql -e "%s"' % (_DbCommand))
if _DbExec_a != 0:
return _DbExec_b
_DbUpdate_a, _DbUpdate_b = commands.getstatusoutput("mysql %s_update < %s/%s" % (_DbName, _DbFileDir, _DBfile1))
if _DbUpdate_a != 0:
return _DbUpdate_b
_NewTableList = commands.getoutput("mysql -Ne 'use %s_update;show tables;' | sed '1d' | egrep -v '^A_*|^S_*|^T_*'" % (_DbName)).split('\n')
_OldTableList = commands.getoutput("mysql -Ne 'use %s;show tables;' | sed '1d' | egrep -v '^A_*|^S_*|^T_*'" % (_DbName)).split('\n')
if _NewTableList == '' or _OldTableList == '':
return "Error , _NewTableList or _OldTableList is null, error"
## one , table update
CheckTable = Check_Table(_NewTableList, _OldTableList, _DbName, _ModifyTable)
if CheckTable != True:
return CheckTable
CreateNew = Create_TableFile(_NewTableList, _DbName + "_update", _Table_TempDirNew)
CreateOld = Create_TableFile(_OldTableList, _DbName , _Table_TempDirOld)
if CreateNew != True or CreateOld != True:
return CreateNew,CreateOld
for _N in _NewTableList:
_NewMd5 = commands.getoutput("md5sum %s/%s_table | awk '{print $1}'" % (_Table_TempDirNew, _N))
_OldMd5 = commands.getoutput("md5sum %s/%s_table | awk '{print $1}'" % (_Table_TempDirOld, _N))
if _NewMd5 != _OldMd5:
_ChangeTable_a,_ChangeTable_b = commands.getstatusoutput("mysql -e 'use %s;rename table %s to %s_old'" % (_DbName, _N, _N))
if _ChangeTable_a != 0:
return _ChangeTable_b
_MkTable_a,_MkTable_b = commands.getstatusoutput("mysql %s < %s/%s_table" % (_DbName, _Table_TempDirNew, _N))
if _MkTable_a != 0:
return _MkTable_b
_OldTable_field = []
for _OF in open("%s/%s_field" % (_Table_TempDirOld, _N)):
_OldTable_field.append(_OF.strip('\n'))
_NewTable_field = []
for _NF in open("%s/%s_field" % (_Table_TempDirNew, _N)):
if _NF.strip('\n') in _OldTable_field:
_NewTable_field.append('`' + _NF.strip('\n') + '`')
_Ins_field = ','.join(_NewTable_field)
_InsTable_a,_InsTable_b = commands.getstatusoutput("mysql -e 'use %s;INSERT INTO %s (%s) select %s FROM %s_old'" % (_DbName, _N, _Ins_field, _Ins_field, _N))
if _InsTable_a != 0:
return _InsTable_b
_DropTable = commands.getoutput("mysql -e 'use %s;drop table %s_old;'" % (_DbName, _N))
##load syssql file
_DbSys_a, _DbSys_b = commands.getstatusoutput("mysql %s < %s/%s" % (_DbName, _DbFileDir, _DBfile2))
if _DbSys_a != 0:
return _DbSys_b
return True
def Check_Table(_NewTableList, _OldTableList, _DbName, _ModifyTable):
### check newtable ,do add
for _N in _NewTableList:
if _N not in _OldTableList:
_DbDump_a, _DbDump_b = commands.getstatusoutput("mysqldump -d %s_update %s > %s" % (_DbName, _N, _ModifyTable))
if _DbDump_a != 0:
return _DbDump_b
_DbUpdate_a, _DbUpdate_b = commands.getstatusoutput("mysql %s < %s" % (_DbName, _ModifyTable))
if _DbUpdate_a != 0:
return _DbUpdate_b
### check oldtable ,do del
for _Old in _OldTableList:
if _Old not in _NewTableList:
_DbDel_a, _DbDel_b = commands.getstatusoutput("mysql -e 'use %s ;DROP table %s;'" % (_DbName, _Old))
if _DbDel_a != 0:
return _DbDel_b
return True
def Create_TableFile(_TableList, _DbName, _Table_Dir):
for _N in _TableList:
_DbDump_a, _DbDump_b = commands.getstatusoutput("mysqldump -d --compact %s %s > %s/%s_table" % (_DbName, _N, _Table_Dir, _N))
if _DbDump_a != 0:
return _DbDump_b
_DbExec_a, _DbExec_b = commands.getstatusoutput("mysql -Ne 'use %s;SHOW FIELDS FROM %s;' | awk '{print $1}' > %s/%s_field" % (_DbName, _N, _Table_Dir, _N))
if _DbExec_a != 0:
return _DbExec_b
return True
def Useage():
print '''
-p Proj Proj name | yxgj & qnzm"
-t Type Type | server & db & clent
-a Action Action | install & update
-s Sid Server sid | 1,2,3,4
-c Cid Clecs id | 1 & 2 & 3
-v Version svn version | 8664
-h Host server db host | 127.0.0.1
--test test mode |
--delcache delete fdb |
--help |
'''
def Report(_Rstatus,_Rmessage):
print _Rstatus,_Rmessage
def Check_Pid(_PidFile):
try:
_pf = file(_PidFile)
except IOError as e:
return False
else:
_pid = int(_pf.read())
_pf.close()
a,b = commands.getstatusoutput("ps -p %d" % (_pid))
if a == 0:
return True
else:
return False
def Tar_File(_TarAct,_TarFile,_TarPath):
if _TarAct == 'unzip':
try:
tar = tarfile.open(_TarFile)
except IOError as e:
return e
except:
return sys.exc_info()[0]
else:
tar.extractall(path=_TarPath)
tar.close()
return True
elif _TarAct == 'zip':
try:
tar = tarfile.open(_TarFile,'w:gz')
except IOError as e:
return e
except:
return sys.exc_info()[0]
else:
for root, dir, files in os.walk(_TarPath):
for file in files:
fullpath = os.path.join(root,file)
tar.add(fullpath,arcname=file)
tar.close()
return True
def DownLoad_File(_FilePath,_FileName):
'''
HTTP更新包下载服务器做了个简单的验证,此处是使用用户名密码下载
'''
request = urllib2.Request(_DownloadUrl + _FileName)
base64string = base64.encodestring('%s:%s' % ('xxx', 'xxxxxxxxx')).replace('\n', '')
request.add_header("Authorization", "Basic %s" % base64string)
try:
result = urllib2.urlopen(request, timeout=5)
except urllib2.HTTPError as e:
return e
except urllib2.URLError as e:
return e
except:
return sys.exc_info()[0]
else:
data = result.read()
with open(_FilePath + _FileName, "wb") as code:
code.write(data)
return True
def md5(str):
import hashlib
m = hashlib.md5()
m.update(str)
return m.hexdigest()
def removeFileInFirstDir(targetDir):
for file in os.listdir(targetDir):
targetFile = os.path.join(targetDir, file)
if os.path.isfile(targetFile):
os.remove(targetFile)
def main():
if len(sys.argv) == 1:
Useage()
sys.exit()
try:
opts, args = getopt.getopt(sys.argv[1:], "p:t:a:s:c:v:h:i:", ["test", "help", "delcache"])
for op, value in opts:
if op == "-p":
_Proj = value
elif op == "-t":
_Type = value
elif op == "-a":
_Action = value
elif op == "-s":
_Sid = value.split(',')
elif op == "-c":
_Cid = int(value)
elif op == "-v":
_Version = int(value)
elif op == "-h":
_Host = value
elif op == "-i":
_Id = value
elif op == "--test":
_TestMode = 1
elif op == "--help":
Useage()
sys.exit()
except getopt.GetoptError:
Useage()
sys.exit()
if _Type == 'server':
_MainFun = Server(_Action, _Sid, _Version, _Host, _Cid)
print _MainFun
elif _Type == 'db':
_MainFun = Db(_Action, _Sid, _Version, _Cid)
print _MainFun
elif _Type == 'res':
pass
if __name__ == '__main__':
main()
#print opts
#DownLoad_File(_FileName)
|
from celery import Celery
from django.core.mail import send_mail
from django.conf import settings
import time
app = Celery('celery_task.tasks', broker='redis://127.0.0.1:6379/7')
@app.task
def send_register_active_email(to_email, username, token):
subject = 'Welcom to Daily Fresh'
msg = ''
sender = settings.EMAIL_FROM
receivers = [to_email]
html_msg = "<h1>%s, welcom to daily fresh</h1>please click the link to active your account<br/><a href='http://127.0.0.1:8000/user/active/%s'>http://127.0.0.1:8000/user/active/%s</a>" % (username, token, token)
send_mail(subject, msg, sender, receivers, html_message=html_msg)
time.sleep(5)
|
# Generated by Django 2.2.7 on 2021-09-24 13:51
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('CororateInfo', '0001_initial'),
('User', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='userinfo',
name='info',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='CororateInfo.Information'),
),
migrations.AlterField(
model_name='role',
name='title_id',
field=models.IntegerField(default=10, max_length=32, verbose_name='代表角色的值'),
),
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
def main(args):
n = int(input("Podaj liczbe: "))
i = 2
while n % i > 0:
if n == 1:
print("Ani pierwsze ani zlozone")
break
if i * i >= n:
print("pierwsze")
break
i += 1
while(n % i == 0):
if n == 0:
print("Ani pierwsze ani zlozone")
break
elif n == 2:
print("Pierwsze")
break
print("Zlozona")
break
return 0
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
|
from djangobench.utils import run_benchmark
def setup():
global Book
from query_order_by.models import Book
def benchmark():
global Book
list(Book.objects.order_by('id'))
run_benchmark(
benchmark,
setup=setup,
meta={
'description': 'A simple Model.objects.order_by() call.',
}
)
|
import MegaPracticaDos
def test_PedirTotales_NmayoraX():
N,X = MegaPracticaDos.PedirTotales([8,2])
if(N>=X):
test= False
else:
test = True
assert test == True
def test_PedirTotales_NmenorX():
N,X = MegaPracticaDos.PedirTotales([2,8])
if (N>=X):
test = False
else:
test = True
assert test == False
def test_PedirTotales_StringNumero():
N,X = MegaPracticaDos.PedirTotales(["Tres",2])
e = None
try:
print(N+X)
except Exception as e:
print("Solo se aceptan numeros")
assert e == None
def test_PedirTotales_NumeroString():
N,X = MegaPracticaDos.PedirTotales([2, "Uno"])
e = None
try:
print (N+X)
except Exception as e:
print("Solo se aceptan numeros")
assert e == None
def test_PedirTotales_String():
N,X = MegaPracticaDos.PedirTotales(["Tres", "Uno"])
e = None
try:
int(N+X)
except Exception as e:
print("Solo se aceptan numeros")
assert e == 'can only concatenate str (not "int") to str'
def test_PedirTotales_Int():
N,X = MegaPracticaDos.PedirTotales([8,2])
e = None
e= N+X
assert e == 10
def test_Validar1():
lista = [["asd",3],["psd",1],["papa",3]]
pasar = MegaPracticaDos.validar1(lista)
assert pasar == False
def test_Validar5():
lista = [["asd",3],["psd",6],["papa",3]]
pasar = MegaPracticaDos.validar5(lista)
assert pasar == False |
##
# This module defines an employee class hierarchy for payroll processing.
#
## An employee has a name and a mechanism for computing weekly pay.
#
class Employee :
## Constructs an employee with a given name.
# @param name the name of the employee
#
def __init__(self, name) :
self._name = name
## Gets the name of this employee.
# @return the name
#
def getName(self) :
return self._name
## Computes the pay for one week of work.
# @param hoursWorked the number of hours worked in the week
# @return the pay for the given number of hours
#
def weeklyPay(self, hoursWorked) :
return 0.0
## An hourly employee is paid for every hour worked.
#
class HourlyEmployee(Employee) :
## Constructs an hourly employee with a given name and hourly wage.
# @param name the name of this employee
# @param wage the hourly wage
#
def __init__(self, name, wage) :
super().__init__(name)
self._hourlyWage = wage
# Overrides the superclass method.
def weeklyPay(self, hoursWorked) :
pay = hoursWorked * self._hourlyWage
if hoursWorked > 40 :
# Add overtime.
pay = pay + ((hoursWorked - 40) * 0.5) * self._hourlyWage
return pay
## A salaried employee is paid the same amount independent of the hours worked.
#
#
class SalariedEmployee(Employee) :
## Constructs a salaried employee with a given name and annual salary.
# @param name the name of this employee
# @param salary the annual salary
#
def __init__(self, name, salary) :
super().__init__(name)
self._annualSalary = salary
# Overrides the superclass method.
def weeklyPay(self, hoursWorked) :
WEEKS_PER_YEAR = 52
return self._annualSalary / WEEKS_PER_YEAR
## A manager is a salaried employee who also receives a bonus.
#
class Manager(SalariedEmployee) :
## Constructs a manager with a given name, annual salary, and weekly bonus.
# @param name the name of this employee
# @param salary the annual salary
# @param bonus the weekly bonus
#
def __init__(self, name, salary, bonus) :
super().__init__(name, salary)
self._weeklyBonus = bonus
# Overrides the superclass method.
def weeklyPay(self, hoursWorked) :
return super().weeklyPay(hoursWorked) + self._weeklyBonus
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-11-30 22:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('staff', '0006_auto_20171128_0109'),
]
operations = [
migrations.AlterField(
model_name='applicant',
name='description',
field=models.CharField(max_length=500, null=True),
),
]
|
import random
class NPC:
def __init__(self):
pass
def Power(self):
pass
class Person:
def __init__(self,Monster):
self.name = "Person"
self.HP = 100
def Power(self):
return 0
class Zombie:
def __init__(self):
self.name = "Zombie"
self.HP = random.randint(50, 100)
def Power(self):
power = random.randint(0, 10)
return power
class Vampire:
def __init__(self):
self.name = "Vampire"
self.HP = random.randint(100, 200)
def Power(self):
power = random.randint(10,20)
return power
class Ghoul:
def __init__(self):
self.name = "Ghoul"
self.HP = random.randint(40, 80)
def Power(self):
power = random.randint(15, 30)
return power
class Werewolves:
def __init__(self):
self.name = "Werewolf"
self.HP = 200
def Power(self):
power = random.randint(0, 40)
return power
|
'''
@author: Saurab Dulal
Date: Nov 13, 2017
Developed in Linux OS
Requirement = python 3.x +
Problem Description: This is a dynamic programming solution to the cloth cutting problem - please see the problem
description in README.md file
'''
import time
import sys
'''Using Dynamic Programming - orientation less computation'''
def clothCuttingDynamicProgramming(length, breadth, data):
cuttingMatrix = [[0 for x in range(0, breadth)] for y in range(0, length)]
'''Initializing all the given data'''
for i in data:
if (i['x'] <= length and i['y'] <= breadth):
cuttingMatrix[i['x']][i['y']] = i['w']
'''Making the program orientation variable i.e X x Y = Y x X'''
if(i['y']<=length and i['x']<=breadth):
cuttingMatrix[i['y']][i['x']] = i['w']
'''Filling cuttingMatrix with optimized cost for each cell'''
for lenX in range(0, length):
for lenY in range(0, breadth):
cut = 0
'''Cutting a cloth say of size (1,4 + 1,1) is equivalent to (1,1 + 1,4)
thus we are only checking till (lenX/2 + 1) and (lenY/2 +1) '''
for k in range(0, int(lenX / 2)+1):
if (cut < (cuttingMatrix[k][lenY] + cuttingMatrix[lenX - k][lenY])):
cut = (cuttingMatrix[k][lenY] + cuttingMatrix[lenX - k][lenY])
for k in range(0, int(lenY / 2)+1):
if (cut < (cuttingMatrix[lenX][k] + cuttingMatrix[lenX][lenY - k])):
cut = (cuttingMatrix[lenX][k] + cuttingMatrix[lenX][lenY - k])
cuttingMatrix[lenX][lenY] = cut
return cuttingMatrix[length-1][breadth-1]
''' Construction of sample data'''
def make_data_set(input): # [(),()]
sample_data = []
for i in input:
sample_data.append({'x':i[0],'y':i[1],'w':i[2]})
return sample_data
def read_data_from_file(filename):
try:
with open(filename,'r') as f:
a = f.readlines()
except Exception as e:
print(e)
return
dimensions = a[0].split()
no_of_input = a[1]
data = []
for x in range(2,len(a)):
#data.append(a[x].split())
tup = ()
for i in a[x].split():
tup = tup + (int(i),)
data.append(tup)
data_read = make_data_set(data)
''''+1 is providing offset to include 0th position'''
return data_read,int(dimensions[0])+1,int(dimensions[1])+1
'''Some sample data'''
def sample_data(n):
#20x30
sample_data1 = [(3, 4, 10), (4, 5, 9), (12,23,100),(3, 3, 2)]
#40,70
sample_data2 = [(21, 22, 582), (31, 13, 403), (9, 35, 315), (9, 24, 216), (30, 7, 210), (11, 13, 143),
(10, 14, 140), (14, 8, 110), (12, 8, 94), (13, 7, 90)]
#10x15
sample_data3 = [(8, 4, 66), (3, 7, 35), (8, 2, 24), (3, 4, 17), (3, 3, 11), (3, 2, 8), (2, 1, 2)]
# 40x70
sample_data4 = [(31, 43, 500), (30, 41, 480), (29, 39, 460), (28, 38, 440), (27, 37, 420), (26, 36, 410),
(25, 35, 400), (24, 34, 380), (33, 23, 360), (22, 32, 340), (31, 21, 320), (29, 18, 300),
(17, 27, 280), (15, 24, 240), (16, 25, 260),
(15, 24, 240), (23, 14, 220), (21, 12, 180), (19, 11, 160), (9, 17, 140)]
'''offsetting for 0,0 position in each data set by adding 1 to the length and breadth
since size will be from 0-n, so it will consider n, but the list will be of n-1 size'''
if n==1:
return make_data_set(sample_data1),21,31
if n==2:
return make_data_set(sample_data2),41,71
if n==3:
return make_data_set(sample_data3),11,16
if n==4:
return make_data_set(sample_data4),41,71
else:
return False
if __name__ == '__main__':
if sys.argv[1]:
try:
data, length, breadth = read_data_from_file(sys.argv[1])
print(data,length,breadth)
start_time = time.time()
print("The maximum profit using Dynamic programming: " +str(clothCuttingDynamicProgramming(length, breadth, data)))
diff_time = time.time() - start_time
print ('and the total time for execution of program :' +str(diff_time) + 'seconds')
except Exception as e:
print(e)
else:
print("File not found")
'''To use the sample data, please uncomment the code below'''
# n = 3
# if sample_data(n)!=False:
# data, length, breadth = sample_data(n)
# start_time = time.time()
# print("The maximum profit using Dynamic programming: " +str(clothCuttingDynamicProgramming(length, breadth, data))) # since size will be from 0-n, so it will consider n, but the list will be of n-1 size
# diff_time = time.time() - start_time
# print ('and the total time for execution of program :' +str(diff_time) + 'seconds')
#
# else:
# print("sample " + str(n) +' not found' )
#
|
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RANSACRegressor
from sklearn.model_selection import train_test_split
import scipy as sp
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
from sklearn.linear_model import ElasticNet
from sklearn.preprocessing import PolynomialFeatures
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
df = pd.read_csv('https://raw.githubusercontent.com/rasbt/'
'python-machine-learning-book-2nd-edition'
'/master/code/ch10/housing.data.txt',
header=None,
sep='\s+')
df.columns = ['CRIM', 'ZN', 'INDUS', 'CHAS',
'NOX', 'RM', 'AGE', 'DIS', 'RAD',
'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV']
print(df.head())
cols = ['LSTAT', 'INDUS', 'NOX', 'RM', 'MEDV']
sns.pairplot(df[cols], size=2.5)
plt.tight_layout()
# plt.savefig('images/10_03.png', dpi=300)
plt.show()
# -------------------heat map-------------------------------------
cm = np.corrcoef(df[cols].values.T)
#sns.set(font_scale=1.5)
hm = sns.heatmap(cm,
cbar=True,
annot=True,
square=True,
fmt='.2f',
annot_kws={'size': 15},
yticklabels=cols,
xticklabels=cols)
plt.tight_layout()
# plt.savefig('images/10_04.png', dpi=300)
plt.show()
# -------------------------------------------------------------------
|
# coding=utf-8
import math
from nose.tools import assert_almost_equal
from Rotation_Matrix.calc_pose import calculate_translation
# calculations:
# mag = sqrt(100^2 + 150^2- 2*100*150×cos(30))
# theta = asin(150 × sin(30) ÷ mag)
def test_translation_behind_right():
(theta, mag) = calculate_translation(math.radians(30), 150, 100)
assert_almost_equal(theta, -68.26, places=1)
assert_almost_equal(mag, 80.7, places=1)
def test_translation_behind_left():
(theta, mag) = calculate_translation(math.radians(-30), 150, 100)
assert_almost_equal(theta, 68.26, places=1)
assert_almost_equal(mag, 80.7, places=1)
def test_translation_ahead():
(theta, mag) = calculate_translation(math.radians(0), 150, 200)
assert_almost_equal(theta, -180)
assert_almost_equal(mag, 50, places=1)
def test_translation_ahead_right():
(theta, mag) = calculate_translation(math.radians(5), 150, 200)
assert_almost_equal(theta, -155.5, places=1)
assert_almost_equal(mag, 52.2, places=1)
def test_translation_ahead_left():
(theta, mag) = calculate_translation(math.radians(-5), 150, 200)
assert_almost_equal(theta, 155.5, places=1)
assert_almost_equal(mag, 52.2, places=1)
|
"""
Plot of radial density for Hookium k=1/4.
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.special import erf
import matplotlib
from matplotlib import rc
matplotlib.rcParams.update({'font.size': 22})
matplotlib.rcParams['text.usetex'] = True
matplotlib.rcParams['text.latex.preamble'] = r'\usepackage{libertine}'
def radial_dens(r):
rho = 2.0/(np.power(np.pi, 1.5)*(8.0+5.0*np.pi**0.5))
rho *= np.exp(-0.5*np.square(r))
rho *= np.sqrt(np.pi/2)*(7./4.+1./4.*np.square(r)+(r+np.reciprocal(r))*erf(r/np.sqrt(2))) + np.exp(-0.5*np.square(r))
return rho*np.square(r)*2*np.pi
# analytical intracule for k=1/4
r = np.linspace(1e-12, 7, 1000)
uan = radial_dens(r)
# 1e3 samples
for i in [3, 4, 5]:
u = np.load("../data/vmcConfigurations/vmc-1g-1e{}.npy".format(i), allow_pickle=True)
u = np.linalg.norm(u, axis=-1)
fig, ax = plt.subplots(figsize=[7, 7])
ax.plot(r, uan, '--', color='blue', linewidth=3, label='Analytical')
ax.hist(u, bins=20*i, histtype='bar', color='cornflowerblue', alpha=0.6, density=True, align='left', label='VMC')
plt.legend()
ax.hist(u, bins=20*i, histtype='step', color='black', density=True, align='left', label='VMC, $N={}$'.format(len(u)))
ax.set_xlabel('$r$')
ax.set_ylabel(r'$2\pi r^2 \rho(r)$')
plt.savefig("../plots/rdens1e{}-1g2j.png".format(i), bbox_inches='tight') |
import sys
sys.path.insert(0, "/home/yluo/learn/flaskbyex/waitercaller")
from waitercaller import app as application
|
from django.conf.urls import include, url
from django.conf.urls import *
from dealerfunnel.funnel.view.user import *
urlpatterns = [
url(r'^$',user().landing,name='user_landing'),
url(r'^create/$',user().createuser,name='create_user'),
url(r'^edituser/$',user().editusermodal,name='user_edituser'),
url(r'^updateuser/$',user().updateuser,name='user_updateuser'),
url(r'^create/modal/$',user().create_modal,name='user_create_modal'),
url(r'^deleteuser/$',user().deleteuser,name='user_delete'),
]
|
file = open("new.txt", "w")
listd = ["Zero" ,"Sqeezed " ,"Lemonade ", "Grandma ", " Gameplay ", "Mechanics ", "Walkers ", "Extreme ", "Produced "]
file.writelines(listd)
file.close()
listd.sort()
file=open("new.txt", "w")
file.writelines(listd)
file.close()
file=open("new.txt","r")
print(file.read())
|
class Solution:
def findMinStep(self, board: str, hand: str) -> int:
def helper(board: str, counter: collections.Counter) -> int:
if not board:
return 0
min_balls, i = 6, 0
while i < len(board):
j = i + 1
while j < len(board) and board[i] == board[j]:
j += 1
need_balls = 3 - (j - i)
if counter[board[i]] >= need_balls:
need_balls = 0 if need_balls < 0 else need_balls
counter[board[i]] -= need_balls
remain_need_balls = helper(board[:i]+board[j:], counter)
if remain_need_balls >= 0:
min_balls = min(min_balls, need_balls+remain_need_balls)
counter[board[i]] += need_balls
i = j
return min_balls if min_balls < 6 else -1
return helper(board, collections.Counter(hand))
|
import os
import random
import time
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
BASE_URL = 'https://www.linkedin.com/'
def start_driver(headless=False, maximized=True):
options = webdriver.ChromeOptions()
if maximized:
options.add_argument("--start-maximized")
if headless:
options.add_argument("--headless")
driver = webdriver.Chrome(executable_path=os.path.abspath('../chromedriver_72'), chrome_options=options)
return driver
def login(driver, email, password):
xpath_email = '//*[@id="login-email"]'
xpath_password = '//*[@id="login-password"]'
try:
driver.get(BASE_URL)
WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.XPATH, xpath_email)))
driver.find_element_by_xpath(xpath_email).send_keys(email)
time.sleep(2)
driver.find_element_by_xpath(xpath_password).send_keys(password)
WebDriverWait(driver, random.randint(5, 10))
driver.find_element_by_xpath('//*[@id="login-submit"]').click()
WebDriverWait(driver, 100).until(
EC.presence_of_element_located((By.XPATH, '//*[@id="notifications-tab-icon"]')))
return True
except:
if 'Welcome to your professional community' in driver.page_source:
try:
driver.find_element_by_xpath('//*[text()="Sign in"]').click()
except NoSuchElementException:
return False
xpath_email = '//*[@id="username"]'
xpath_password = '//*[@id="password"]'
WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.XPATH, xpath_email)))
driver.find_element_by_xpath(xpath_email).send_keys(email)
time.sleep(2)
driver.find_element_by_xpath(xpath_password).send_keys(password)
WebDriverWait(driver, random.randint(5, 10))
driver.find_element_by_xpath('//*[text()="Sign in"]').click()
WebDriverWait(driver, 100).until(
EC.presence_of_element_located((By.XPATH, '//*[@id="notifications-tab-icon"]')))
return True
else:
return False
if __name__ == '__main__':
driver = start_driver()
login(driver, 'email', 'passwd')
driver.quit()
|
import PySimpleGUI as sg
sg.theme('LightBlue6') # akna värvilahenduse muutmine
def arvuta(bruto, sots, pens, tooandja, tootaja, tulumaks, brutoo, neto, maksud, tooandjamaks, tulum, tmv, aasta):
#brutoo = int(bruto)
bruto = float(bruto)
if sots == True:
tooandjamaks += bruto * 0.33
maksud += bruto * 0.33
if pens == True:
neto = neto - (bruto * 0.02)
maksud += bruto * 0.02
if tooandja == True:
tooandjamaks += bruto * 0.008
maksud += bruto * 0.008
if tootaja == True:
neto = neto - (bruto * 0.016)
maksud += bruto * 0.016
if tulumaks == True:
aasta = bruto * 12
if aasta <= 6000:
tmv = 500
if aasta > 6000 and aasta <= 14400:
tmv = 500
if aasta > 14400 and aasta <= 25200:
tmv = 6000 - 6000 / 10800 * (aasta - 14400)
if aasta > 25200:
tmv = 0
if aasta >= 6000:
tulum = 0
if aasta > 6000 and aasta <= 14400:
tulum = (aasta - tmv - 6000) * 0.2
if aasta > 14400 and aasta <= 25200:
tulum = (aasta - 6000) * 0.2 + (aasta - tmv - 14400) * 0.311111
if aasta > 25200:
tulum = (aasta - 6000) * 0.2 + (25200-14400) * 0.311111 + (aasta - 25200) * 0.2
tulum = round(tulum / 12, 2)
neto = bruto - neto - tulum
maksud += tulum
return [neto, maksud, tooandjamaks, tulum, tmv]
brutoo = 0.0
neto = 0.0
maksud = 0.0
tooandjamaks = 0.0
tulum = 0.0
tmv = 0.0
aasta = 0.0
layout = [
[sg.Text('Palgakalkulaator'), sg.Text(size=(16,1), key='tekstisilt')],
[sg.Text('Vali maksud: '), sg.Text(size=(12,1), key='tekstisilt')],
[sg.Checkbox('Sotsiaalmaks', default=True, key = 'sotsmaks'), sg.Checkbox('Kogumispension', default=True, key ='pens')],
[sg.Text('Töötuskindlustusmaksed:'), sg.Text(size=(12,1), key='tekstisilt')],
[sg.Checkbox('Tööandja', default=True, key = 'tooandja'), sg.Checkbox('Töötaja', default=True, key = 'tootaja')],
[sg.Checkbox('Astmeline tulumaks', default=True, key = 'tulumaks')],
[sg.Text('Sisesta bruto palk: '), sg.Text(size=(16,1), key='tekstisilt'),
sg.InputText('EUR', size = (9,1), do_not_clear = True, key = 'bruto')],
[sg.Button('Kalkuleeri', key = 'button'), sg.Exit('Välju')]
]
window = sg.Window('Palgakalkulaator', layout)
while True:
event, values = window.read()
if event == sg.WIN_CLOSED or event == 'Välju':
break
if event == 'button':
list = arvuta(values['bruto'], values['sotsmaks'], values['pens'], values['tooandja'], values['tootaja'], values['tulumaks'], brutoo, neto, maksud, tooandjamaks, tulum, tmv, aasta)
print(list)
print(event, values)
window.close() |
import win32com.client
import os
class Macros():
def __init__(self, MacroContainingExcelFilePath, VBAModule):
self.ExcelMarcoFilePath = MacroContainingExcelFilePath
excel_file = os.path.basename(MacroContainingExcelFilePath)
self.Macro_Prefix = excel_file + "!" + VBAModule + "."
self.xl = win32com.client.Dispatch("Excel.Application")
self.xl.Visible = False
def BuildPrettyTableWorkbook(self, *args):
# args[0] the comma seperated csv file string
# args[1] file path/name with file extension to save resulting Pretty Table workbook
Macro_Name = self.Macro_Prefix + "ConvertCSVToPrettyTables"
self.xl.Workbooks.Open(Filename=self.ExcelMarcoFilePath)
self.xl.Application.Run(Macro_Name, args[0], args[1])
self.xl.Quit()
|
import psycopg2
url = "dbname='IReporter' user='postgres' host='localhost' port=5433 password='Boywonder47'"
class database_setup(object):
def __init__(self):
self.conn = psycopg2.connect(url)
self.cursor = self.conn.cursor()
def destroy_tables(self):
self.cursor.execute("""DROP TABLE IF EXISTS Users CASCADE;""")
self.cursor.execute("""DROP TABLE IF EXISTS Posts CASCADE;""")
self.conn.commit()
def create_tables(self):
self.cursor.execute("""CREATE TABLE IF NOT EXISTS Users (
id SERIAL PRIMARY KEY,
firstname VARCHAR(25) NOT NULL,
lastname VARCHAR(25) NOT NULL,
othernames VARCHAR(25),
email VARCHAR(25) NOT NULL,
phoneNumber VARCHAR(50) NOT NULL,
username VARCHAR(25) NOT NULL,
register VARCHAR(25),
isAdmin VARCHAR(25),
password VARCHAR(255) NOT NULL
);""")
self.cursor.execute("""CREATE TABLE IF NOT EXISTS Posts (
id SERIAL,
createdOn timestamp ,
createdBy SERIAL,
post_type VARCHAR(25) NOT NULL,
location VARCHAR(50) NOT NULL,
status VARCHAR(25) NOT NULL,
photo VARCHAR(25),
video VARCHAR(25),
comments VARCHAR(250) NOT NULL
);""")
self.conn.commit()
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>).
# Application developed by: Carlos Andrés Ordóñez P.
# Country: Ecuador
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.report import report_sxw
from openerp.osv import osv
import pdb
class rol_general_pdf(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(rol_general_pdf, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'cr':cr,
'uid': uid,
'generate_dict': self.generate_dict,
})
def generate_dict(self, obj):
#diccionario.values() devuelve los valores del diccionario
#diccionario.keys() devuelve las claves o cabeceras del diccionario
#diccionario.items() devuelve el par (clave,valor) de cada registro del diccionario
diccionario = {}
diccionario_totales = {}
#La estructura de diccionario es, por ejemplo:
#diccionario = {
# 'id_departamento': {
# 'nombre_empleado': {
# 'cedula': cedula_empleado,
# 'id_rubro': valor,
# },
# },
# }
departamentos = []
rubros = []
#for registro in self.browse(self.cr, self.uid, ids, context):
if obj:
registro=obj
for rol_individual in registro.slip_ids:
for rubro in rol_individual.line_ids:
#rubro tiene la informacion (id,secuencia,name)
rubros.append([rubro.salary_rule_id.id,rubro.salary_rule_id.sequence,rubro.salary_rule_id.name])
#departamentos tiene el par (id,name)
departamentos.append([rol_individual.department_id.id,rol_individual.department_id.name])
if diccionario.has_key(rol_individual.department_id.id):
if diccionario[rol_individual.department_id.id].has_key(rol_individual.employee_id.name_related):
if diccionario[rol_individual.department_id.id][rol_individual.employee_id.name_related].has_key(rubro.salary_rule_id.id):
diccionario[rol_individual.department_id.id][rol_individual.employee_id.name_related][rubro.salary_rule_id.id] += rubro.total
else:
diccionario[rol_individual.department_id.id][rol_individual.employee_id.name_related][rubro.salary_rule_id.id] = rubro.total
else:
diccionario[rol_individual.department_id.id][rol_individual.employee_id.name_related] = {'cedula': rol_individual.employee_id.name, 'puesto de trabajo': rol_individual.job_id.name, rubro.salary_rule_id.id: rubro.total}
else:
diccionario[rol_individual.department_id.id] = {rol_individual.employee_id.name_related: {'cedula': rol_individual.employee_id.name, 'puesto de trabajo': rol_individual.job_id.name, rubro.salary_rule_id.id: rubro.total}}
diccionario[rol_individual.department_id.id][rol_individual.employee_id.name_related]['dias laborados'] = 0
for asistencia in rol_individual.worked_days_line_ids:
if asistencia.code=='WORK100' or asistencia.code=='VAC' or asistencia.code=='ENF' or asistencia.code=='MAT':
diccionario[rol_individual.department_id.id][rol_individual.employee_id.name_related]['dias laborados']+=asistencia.number_of_days
departamentos_clean = []
for key in departamentos:
if key not in departamentos_clean:
departamentos_clean.append(key)
rubros_clean = []
for key in rubros:
if key not in rubros_clean:
rubros_clean.append(key)
#resultado = list(resultado.values())
#resultado.sort()
departamentos_clean.sort(key=lambda x: x[0])
rubros_clean.sort(key=lambda x: x[1])
#creamos la variable para la escritura en el archivo xls
writer = []
cabecera = ['CEDULA','EMPLEADO','DIAS LAB.']
pie = {}
for rubro in rubros_clean:
cabecera.append(rubro[2])
#writer.append(cabecera)
total = {}
for departamento in departamentos_clean:
linea_departamento = ['' for i in cabecera]
linea_departamento[0] = 'DEPARTAMENTO'
linea_departamento[1] = ' <br/> <br/>' + str(departamento[1])
linea_departamento[2] = ''
writer.append(linea_departamento)
pie = {}
writer.append(cabecera)
for empleado in diccionario[departamento[0]].keys():
linea = [diccionario[departamento[0]][empleado]['cedula'], empleado, diccionario[departamento[0]][empleado]['dias laborados']]
for rubro in rubros_clean:
if not pie.has_key(rubro[0]):
pie.update({rubro[0]:0.00})
if not total.has_key(rubro[0]):
total.update({rubro[0]:0.00})
if diccionario[departamento[0]][empleado].has_key(rubro[0]):
linea.append(diccionario[departamento[0]][empleado][rubro[0]])
pie[rubro[0]] = pie[rubro[0]] + diccionario[departamento[0]][empleado][rubro[0]]
total[rubro[0]] = total[rubro[0]] + diccionario[departamento[0]][empleado][rubro[0]]
else:
linea.append(0.00)
writer.append(linea)
linea = ['TOTAL','','']
for rubro in rubros_clean:
linea.append(pie[rubro[0]])
writer.append(linea)
writer.append([' <br/> '])
linea = ['TOTAL','','']
cabecera_final = [i for i in cabecera]
cabecera_final[0] = cabecera_final[1] = cabecera_final[2] = ''
writer.append(cabecera_final)
for rubro in rubros_clean:
linea.append(total[rubro[0]])
writer.append(linea)
return writer
report_sxw.report_sxw('report.rol_general_pdf',
'hr.payslip.run',
'addons/gad_payroll/report/rol_general_pdf.mako',
parser=rol_general_pdf,
header=False)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
import tensorflow as tf
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import Session
from time import process_time
assert (tf.test.is_built_with_cuda())
tf.keras.backend.clear_session()
config = ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.7
tf.compat.v1.keras.backend.set_session(Session(config=config))
# tf.config.optimizer.set_jit(False)
def model_fn(x, y, z):
return tf.reduce_sum(x + y * z)
# def create_and_run_graph():
with tf.compat.v1.Session() as sess:
x = tf.compat.v1.placeholder(tf.float32, name='x')
y = tf.compat.v1.placeholder(tf.float32, name='y')
z = tf.compat.v1.placeholder(tf.float32, name='z')
result = tf.xla.experimental.compile(computation=model_fn, inputs=(x, y, z))[0]
# `result` is a normal Tensor (albeit one that is computed by an XLA
# compiled executable) and can be used like any other Tensor.
result = tf.add(result, result)
sess.run(result, feed_dict={ ... })
# create_and_run_graph() |
import argparse
import json
import os
import random
import time
import tqdm
from pytok import PyTok
from pytok import exceptions
def main(args):
this_dir_path = os.path.dirname(os.path.abspath(__file__))
data_dir_path = os.path.join(this_dir_path, 'data')
videos_dir_path = os.path.join(data_dir_path, 'videos')
video_paths = [os.path.join(videos_dir_path, file_name) for file_name in os.listdir(videos_dir_path)]
videos = []
for video_path in video_paths:
file_path = os.path.join(video_path, 'video_data.json')
if not os.path.exists(file_path):
continue
with open(file_path, 'r') as f:
video_data = json.load(f)
videos.append(video_data)
delay = 0
backoff_delay = 1800
finished = False
while not finished:
random.shuffle(videos)
try:
with PyTok(chrome_version=args.chrome_version, request_delay=delay, headless=True) as api:
for video in tqdm.tqdm(videos):
comment_dir_path = os.path.join(videos_dir_path, video['id'])
if not os.path.exists(comment_dir_path):
os.mkdir(comment_dir_path)
comment_file_path = os.path.join(comment_dir_path, f"video_comments.json")
if os.path.exists(comment_file_path):
continue
try:
comments = []
for comment in api.video(id=video['id'], username=video['author']['uniqueId']).comments(count=1000):
comments.append(comment)
with open(comment_file_path, 'w') as f:
json.dump(comments, f)
except exceptions.NotAvailableException:
continue
finished = True
except exceptions.TimeoutException as e:
time.sleep(backoff_delay)
except Exception:
raise
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--chrome-version', type=int, default=104)
args = parser.parse_args()
main(args) |
# Complete the breakingRecords function in the editor below.
# It must return an integer array containing the numbers of times she broke her records.
# Index 0 is for breaking most points records, and index 1 is for breaking least points records.
# https://www.hackerrank.com/challenges/breaking-best-and-worst-records/problem
def breakingRecords(scores):
maxScore = scores[0]
minScore = scores[0]
maxScoreCounter, minScoreCounter = 0, 0
for score in range(len(scores)):
if scores[score] > maxScore:
maxScore = scores[score]
maxScoreCounter+=1
if scores[score] < minScore:
minScore = scores[score]
minScoreCounter+=1
print(maxScoreCounter, minScoreCounter)
|
import numpy as np
from scipy.ndimage import affine_transform
# Functions to convert points to homogeneous coordinates and back
pad = lambda x: np.hstack([x, np.ones((x.shape[0], 1))])
unpad = lambda x: x[:,:-1]
def plot_matches(ax, image1, image2, keypoints1, keypoints2, matches,
keypoints_color='k', matches_color=None, only_matches=False):
"""Plot matched features.
Parameters
----------
ax : matplotlib.axes.Axes
Matches and image are drawn in this ax.
image1 : (N, M [, 3]) array
First grayscale or color image.
image2 : (N, M [, 3]) array
Second grayscale or color image.
keypoints1 : (K1, 2) array
First keypoint coordinates as ``(row, col)``.
keypoints2 : (K2, 2) array
Second keypoint coordinates as ``(row, col)``.
matches : (Q, 2) array
Indices of corresponding matches in first and second set of
descriptors, where ``matches[:, 0]`` denote the indices in the first
and ``matches[:, 1]`` the indices in the second set of descriptors.
keypoints_color : matplotlib color, optional
Color for keypoint locations.
matches_color : matplotlib color, optional
Color for lines which connect keypoint matches. By default the
color is chosen randomly.
only_matches : bool, optional
Whether to only plot matches and not plot the keypoint locations.
"""
image1.astype(np.float32)
image2.astype(np.float32)
new_shape1 = list(image1.shape)
new_shape2 = list(image2.shape)
if image1.shape[0] < image2.shape[0]:
new_shape1[0] = image2.shape[0]
elif image1.shape[0] > image2.shape[0]:
new_shape2[0] = image1.shape[0]
if image1.shape[1] < image2.shape[1]:
new_shape1[1] = image2.shape[1]
elif image1.shape[1] > image2.shape[1]:
new_shape2[1] = image1.shape[1]
if new_shape1 != image1.shape:
new_image1 = np.zeros(new_shape1, dtype=image1.dtype)
new_image1[:image1.shape[0], :image1.shape[1]] = image1
image1 = new_image1
if new_shape2 != image2.shape:
new_image2 = np.zeros(new_shape2, dtype=image2.dtype)
new_image2[:image2.shape[0], :image2.shape[1]] = image2
image2 = new_image2
image = np.concatenate([image1, image2], axis=1)
offset = image1.shape
if not only_matches:
ax.scatter(keypoints1[:, 1], keypoints1[:, 0],
facecolors='none', edgecolors=keypoints_color)
ax.scatter(keypoints2[:, 1] + offset[1], keypoints2[:, 0],
facecolors='none', edgecolors=keypoints_color)
ax.imshow(image, interpolation='nearest', cmap='gray')
ax.axis((0, 2 * offset[1], offset[0], 0))
for i in range(matches.shape[0]):
idx1 = matches[i, 0]
idx2 = matches[i, 1]
if matches_color is None:
color = np.random.rand(3)
else:
color = matches_color
ax.plot((keypoints1[idx1, 1], keypoints2[idx2, 1] + offset[1]),
(keypoints1[idx1, 0], keypoints2[idx2, 0]),
'-', color=color)
def get_output_space(img_ref, imgs, transforms):
"""
Args:
img_ref: reference image
imgs: images to be transformed
transforms: list of affine transformation matrices. transforms[i] maps
points in imgs[i] to the points in img_ref
Returns:
output_shape
"""
assert (len(imgs) == len(transforms))
r, c = img_ref.shape
corners = np.array([[0, 0], [r, 0], [0, c], [r, c]])
all_corners = [corners]
for i in range(len(imgs)):
r, c = imgs[i].shape
H = transforms[i]
corners = np.array([[0, 0], [r, 0], [0, c], [r, c]])
warped_corners = corners.dot(H[:2,:2]) + H[2,:2]
all_corners.append(warped_corners)
# Find the extents of both the reference image and the warped
# target image
all_corners = np.vstack(all_corners)
# The overall output shape will be max - min
corner_min = np.min(all_corners, axis=0)
corner_max = np.max(all_corners, axis=0)
output_shape = (corner_max - corner_min)
# Ensure integer shape with np.ceil and dtype conversion
output_shape = np.ceil(output_shape).astype(int)
offset = corner_min
return output_shape, offset
def warp_image(img, H, output_shape, offset):
# Note about affine_transfomr function:
# Given an output image pixel index vector o,
# the pixel value is determined from the input image at position
# np.dot(matrix,o) + offset.
Hinv = np.linalg.inv(H)
m = Hinv.T[:2,:2]
b = Hinv.T[:2,2]
img_warped = affine_transform(img.astype(np.float32),
m, b+offset,
output_shape,
cval=-1)
return img_warped
|
import datetime
from functools import cached_property
from typing import Optional, cast
from models_library.basic_types import (
BootModeEnum,
BuildTargetEnum,
LogLevel,
VersionTag,
)
from models_library.docker import DockerLabelKey
from pydantic import Field, PositiveInt, parse_obj_as, validator
from settings_library.base import BaseCustomSettings
from settings_library.rabbit import RabbitSettings
from settings_library.utils_logging import MixinLoggingSettings
from types_aiobotocore_ec2.literals import InstanceTypeType
from .._meta import API_VERSION, API_VTAG, APP_NAME
class EC2Settings(BaseCustomSettings):
EC2_ACCESS_KEY_ID: str
EC2_ENDPOINT: Optional[str] = Field(
default=None, description="do not define if using standard AWS"
)
EC2_REGION_NAME: str = "us-east-1"
EC2_SECRET_ACCESS_KEY: str
class EC2InstancesSettings(BaseCustomSettings):
EC2_INSTANCES_ALLOWED_TYPES: list[str] = Field(
...,
min_items=1,
unique_items=True,
description="Defines which EC2 instances are considered as candidates for new EC2 instance",
)
EC2_INSTANCES_AMI_ID: str = Field(
...,
min_length=1,
description="Defines the AMI (Amazon Machine Image) ID used to start a new EC2 instance",
)
EC2_INSTANCES_MAX_INSTANCES: int = Field(
10,
description="Defines the maximum number of instances the autoscaling app may create",
)
EC2_INSTANCES_SECURITY_GROUP_IDS: list[str] = Field(
...,
min_items=1,
description="A security group acts as a virtual firewall for your EC2 instances to control incoming and outgoing traffic"
" (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-security-groups.html), "
" this is required to start a new EC2 instance",
)
EC2_INSTANCES_SUBNET_ID: str = Field(
...,
min_length=1,
description="A subnet is a range of IP addresses in your VPC "
" (https://docs.aws.amazon.com/vpc/latest/userguide/configure-subnets.html), "
"this is required to start a new EC2 instance",
)
EC2_INSTANCES_KEY_NAME: str = Field(
...,
min_length=1,
description="SSH key filename (without ext) to access the instance through SSH"
" (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html),"
"this is required to start a new EC2 instance",
)
@validator("EC2_INSTANCES_ALLOWED_TYPES")
@classmethod
def check_valid_intance_names(cls, value):
# NOTE: needed because of a flaw in BaseCustomSettings
# issubclass raises TypeError if used on Aliases
parse_obj_as(tuple[InstanceTypeType, ...], value)
return value
class NodesMonitoringSettings(BaseCustomSettings):
NODES_MONITORING_NODE_LABELS: list[DockerLabelKey] = Field(
default_factory=list,
description="autoscaling will only monitor nodes with the given labels (if empty all nodes will be monitored), these labels will be added to the new created nodes by default",
)
NODES_MONITORING_SERVICE_LABELS: list[DockerLabelKey] = Field(
default_factory=list,
description="autoscaling will only monitor services with the given labels (if empty all services will be monitored)",
)
NODES_MONITORING_NEW_NODES_LABELS: list[DockerLabelKey] = Field(
default=["io.simcore.autoscaled-node"],
description="autoscaling will add these labels to any new node it creates (additional to the ones in NODES_MONITORING_NODE_LABELS",
)
class ApplicationSettings(BaseCustomSettings, MixinLoggingSettings):
# CODE STATICS ---------------------------------------------------------
API_VERSION: str = API_VERSION
APP_NAME: str = APP_NAME
API_VTAG: VersionTag = API_VTAG
# IMAGE BUILDTIME ------------------------------------------------------
# @Makefile
SC_BUILD_DATE: Optional[str] = None
SC_BUILD_TARGET: Optional[BuildTargetEnum] = None
SC_VCS_REF: Optional[str] = None
SC_VCS_URL: Optional[str] = None
# @Dockerfile
SC_BOOT_MODE: Optional[BootModeEnum] = None
SC_BOOT_TARGET: Optional[BuildTargetEnum] = None
SC_HEALTHCHECK_TIMEOUT: Optional[PositiveInt] = Field(
None,
description="If a single run of the check takes longer than timeout seconds "
"then the check is considered to have failed."
"It takes retries consecutive failures of the health check for the container to be considered unhealthy.",
)
SC_USER_ID: Optional[int] = None
SC_USER_NAME: Optional[str] = None
# RUNTIME -----------------------------------------------------------
AUTOSCALING_DEBUG: bool = Field(
False, description="Debug mode", env=["AUTOSCALING_DEBUG", "DEBUG"]
)
AUTOSCALING_LOGLEVEL: LogLevel = Field(
LogLevel.INFO, env=["AUTOSCALING_LOGLEVEL", "LOG_LEVEL", "LOGLEVEL"]
)
AUTOSCALING_EC2_ACCESS: Optional[EC2Settings] = Field(auto_default_from_env=True)
AUTOSCALING_EC2_INSTANCES: Optional[EC2InstancesSettings] = Field(
auto_default_from_env=True
)
AUTOSCALING_NODES_MONITORING: Optional[NodesMonitoringSettings] = Field(
auto_default_from_env=True
)
AUTOSCALING_POLL_INTERVAL: datetime.timedelta = Field(
default=datetime.timedelta(seconds=10),
description="interval between each resource check (default to seconds, or see https://pydantic-docs.helpmanual.io/usage/types/#datetime-types for string formating)",
)
AUTOSCALING_RABBITMQ: Optional[RabbitSettings] = Field(auto_default_from_env=True)
@cached_property
def LOG_LEVEL(self):
return self.AUTOSCALING_LOGLEVEL
@validator("AUTOSCALING_LOGLEVEL")
@classmethod
def valid_log_level(cls, value: str) -> str:
# NOTE: mypy is not happy without the cast
return cast(str, cls.validate_log_level(value))
|
print "this will run forever if you don't \n use ctrl+c"
while True:
for i in ["/","-","|","\\","|"]:
print "%s\r" % i,
|
#!/usr/bin/env python
import os
import sys
import json
import time
import urllib2
import imghdr
import traceback
from ConfigParser import SafeConfigParser
import pynotify
DEFAULT_CONFIG_FILE = "~/.ttrss-notify.cfg"
class TTRSS(object):
def __init__(self, config_file):
# parse configuration
parser = SafeConfigParser()
parser.read(config_file)
self.initial_timeout = parser.getint('base', 'initial_timeout')
self.interval = parser.getint('base', 'interval')
self.baseurl = parser.get('web', 'baseurl')
web_auth_method = parser.get('web', 'auth_method')
web_realm = parser.get('web', 'realm')
web_user = parser.get('web', 'username')
web_password = parser.get('web', 'password')
ttrss_user = parser.get('ttrss', 'username')
ttrss_password = parser.get('ttrss', 'password')
self.ttrss_feed_id = parser.getint('ttrss', 'feed_id')
self.ttrss_is_cat = parser.getboolean('ttrss', 'is_cat')
self.notify_timeout = parser.getint('notify', 'timeout')
image = parser.get('notify', 'image')
try:
# see if imghdr can find a valid image
imghdr.what(image)
self.image = image
except:
self.image = None
self.apiurl = self.baseurl + '/api/'
# install http auth handler / opener
pwm = urllib2.HTTPPasswordMgr()
pwm.add_password(web_realm, self.baseurl, web_user, web_password)
if web_auth_method.lower() == "digest":
handler = urllib2.HTTPDigestAuthHandler
elif web_auth_method.lower() == "basic":
handler = urllib2.HTTPBasicAuthHandler
handler = handler(pwm)
opener = urllib2.build_opener(handler)
urllib2.install_opener(opener)
# login to tiny rss
self.session_id = ""
self.login(ttrss_user, ttrss_password)
pynotify.init("tinyrss")
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.logout()
def _request(self, call):
call['sid'] = self.session_id
response = urllib2.urlopen(self.apiurl, json.dumps(call))
return json.load(response)
def login(self, username="", password=""):
req = {'op': 'login', 'user': username, 'password': password}
res = self._request(req)
self.session_id = res['content']['session_id']
def logout(self):
req = {'op': 'logout'}
self._request(req)
def getUnreadCount(self):
req = {'op': 'getUnread'}
res = self._request(req)
return int(res['content']['unread'])
def getHeadlines(self, feed_id, is_cat):
req = {'op': 'getHeadlines', 'feed_id': feed_id,
'is_cat': is_cat, 'view_mode': "unread"}
res = self._request(req)
return res['content']
def getCategories(self):
req = {'op': 'getCategories'}
res = self._request(req)
return dict([(int(item['id']), item) for item in res['content']])
def getFeeds(self):
req = {'op': 'getFeeds', 'cat_id': -4}
res = self._request(req)
return dict([(int(item['id']), item) for item in res['content']])
def runOnce(self):
# check feed
headlines = None
if self.ttrss_is_cat:
categories = self.getCategories()
else:
categories = self.getFeeds()
category = categories[self.ttrss_feed_id]
if category['unread']:
headlines = self.getHeadlines(self.ttrss_feed_id,
self.ttrss_is_cat)
# notify if any unread messages
if headlines:
summary = "TTRSS: %i unread in %s" % (category['unread'],
category['title'])
body = "• "
body += "\n• ".join([h['title'] for h in headlines])
body += "\n<a href='%s/#f=%i&c=%i'>open TTRSS</a>" % \
(self.baseurl, self.ttrss_feed_id, self.ttrss_is_cat)
self.notify(summary, body, self.notify_timeout)
def notify(self, summary, body, timeout):
noti = pynotify.Notification(summary, body, self.image)
noti.set_timeout(timeout)
noti.show()
def main():
# read config at default location or specified on command line
filename = os.path.expanduser(DEFAULT_CONFIG_FILE)
try:
filename = sys.argv[1]
except:
pass
with TTRSS(filename) as ttrss:
time.sleep(ttrss.initial_timeout)
while True:
try:
ttrss.runOnce()
except Exception:
exc_info = sys.exc_info()
info = "".join(traceback.format_exception(*exc_info))
ttrss.notify("TTRSS: Caught exception", info, 0)
time.sleep(ttrss.interval)
if __name__ == "__main__":
main()
|
# Author: Xinshuo Weng
# email: xinshuo.weng@gmail.com
from .math_geometry import *
from .prob_stat import *
from .bbox_transform import *
from .mask_transform import *
from .math_algebra import *
from .math_conversion import *
from .pts_transform import *
from .bbox_3d_transform import *
|
import math
r,n=raw_input().split()
rad=float(r)
onts=int(n)
print round(onts*math.sqrt(2*rad*rad-2*rad*rad*math.cos(2*3.14/onts)),1)
|
class Behavior(object):
"""docstring for Comment"""
def __init__(self, comments, views):
super(Behavior, self).__init__()
self.comments = comments
self.views = views
class Tag(object):
tag = 0
attrs = []
def __init__(self, tag, attrs):
self.tag = tag
self.attrs = attrs
pass
class Info(object):
title = ""
date = ""
site = ""
link = ""
children = ""
behavior = Behavior(0, 0)
|
#Author:karim shoair (D4Vinci)
#Extract the best stargazers for any github repo
import mechanicalsoup as ms
from tqdm import tqdm
import readline
browser = ms.StatefulBrowser()
url = input("Repository link : ")+"/stargazers"
check_str = "This repository has no more stargazers."
G,W,B = '\033[92m','\x1b[37m','\033[94m'
end = '\033[0m'
def grab_users(grab):
tags = grab.soup.findAll("span",{"class":"css-truncate css-truncate-target"})
profiles = []
for i in tags:
try:
a = i.a.attrs
except:
continue
profiles.append("http://git-awards.com/users"+i.a.attrs['href'])
return profiles
def loop_over_pages(link):
profiles = []
check_str = "This repository has no more stargazers."
for i in range(1,1000):
page = browser.open(link+"?page="+str(i))
if check_str in page.content.decode():
break
profiles.extend( grab_users(page) )
return profiles
print("[+] Grabing users...")
stargazers = loop_over_pages(url)
print("[+] Found "+str(len(stargazers))+" stargazers!" )
print("[+] Now searching who's have more than 400 stars at total...\n")
def grab_stars_total(profiles):
famous_people = {}
with tqdm(total=len(profiles)) as bar:
for person in profiles:
bar.update(1)
page = browser.open(person)
try:
stars = int(page.soup.findAll("tbody")[0].findAll("td")[-1].text)
except:
continue
if stars>400:
famous_people[person.split("/")[-1]]=stars
return famous_people
famous = grab_stars_total(stargazers)
print("[+] Found "+B+str(len(famous))+end+" famous stargazers and they are :" )
for user in famous.keys():
print(G+"http://github.com/"+user+W+" | With stars => "+B+str(famous[user]))
print(end+"\n")
|
# Generated by Django 2.0.4 on 2018-05-08 01:05
from django.db import migrations
import internal.fields
class Migration(migrations.Migration):
dependencies = [
('internal', '0012_auto_20180508_0859'),
]
operations = [
migrations.AlterField(
model_name='stuinfo',
name='stu_id',
field=internal.fields.IdField(max_length=6, null=True, unique=True, verbose_name='学号'),
),
]
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import xlsxwriter
from sklearn.cluster import KMeans
from sklearn.manifold import MDS
filename = "/Users/Shatalov/Downloads/European Jobs_data.csv"
df = pd.read_table(filename, sep=";")
data = df.iloc[:, 1:10].values
wcss = []
for i in range(1, 15):
kmeans = KMeans(n_clusters=i)
kmeans.fit(data)
wcss.append(kmeans.inertia_)
plt.figure(figsize=(10, 5))
plt.plot(range(1, 15), wcss)
plt.title('Elbow Graph')
plt.xlabel('Number of cluster (k)')
plt.ylabel('WCSS')
plt.show()
kmeans = KMeans(n_clusters=4)
k = kmeans.fit_predict(data)
df['label'] = k
print(df)
cmd = MDS(n_components=2)
trans = cmd.fit_transform(data)
print(trans.shape)
plt.scatter(trans[k == 0, 0], trans[k == 0, 1], s=10, c='red', label='Cluster 1')
plt.scatter(trans[k == 1, 0], trans[k == 1, 1], s=10, c='blue', label='Cluster 2')
plt.scatter(trans[k == 2, 0], trans[k == 2, 1], s=10, c='green', label='Cluster 3')
plt.scatter(trans[k == 3, 0], trans[k == 3, 1], s=10, c='green', label='Cluster 4')
plt.show()
writer = pd.ExcelWriter('123.xlsx', engine='xlsxwriter')
|
# ref[1]: https://stackoverflow.com/questions/19695214/python-screenshot-of-inactive-window-printwindow-win32gui
# ref[2]: http://pythonstudy.xyz/python/article/406-%ED%8C%8C%EC%9D%B4%EC%8D%AC-%EC%9D%B4%EB%AF%B8%EC%A7%80-%EC%B2%98%EB%A6%AC-Pillow
# ref[3]: https://pythonpath.wordpress.com/2012/09/17/pil-image-to-cv2-image/
# ref[4]: https://theailearner.com/2018/10/15/creating-video-from-images-using-opencv-python/
# required modules
# pypiwin32 (구 win32api): pip install pypiwin32
# win32gui: 위 패키지 설치 후 Python\Python38-32\Scripts\pywin32_postinstall.py 실행 (필자는 관리자권한 powershell에서 cmd켜고 실행함)
# Image (구 PIL): pip install image
import cv2, win32gui, win32ui
from ctypes import windll
import numpy as np
from PIL import Image
hwnd = win32gui.FindWindow(None,'BlueStacks')
# 전체화면 쓰고싶을경우는 아래처럼
# left, top, right, bot = win32gui.GetClientRect(hwnd)
left, top, right, bot = win32gui.GetWindowRect(hwnd)
w = right - left
h = bot - top
# device context 읽기 (픽셀인식 가능하게)
# https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-getdc
hwndDC = win32gui.GetWindowDC(hwnd)
mfcDC = win32ui.CreateDCFromHandle(hwndDC)
saveDC = mfcDC.CreateCompatibleDC()
saveBitMap = win32ui.CreateBitmap()
saveBitMap.CreateCompatibleBitmap(mfcDC, w, h)
saveDC.SelectObject(saveBitMap)
arr = []
for k in range(200):
#result = windll.user32.PrintWindow(hwnd, saveDC.GetSafeHdc(), 1)
result = windll.user32.PrintWindow(hwnd, saveDC.GetSafeHdc(), 0)
# print(result)
bmpinfo = saveBitMap.GetInfo()
bmpstr = saveBitMap.GetBitmapBits(True)
im = Image.frombuffer(
'RGB',
(bmpinfo['bmWidth'], bmpinfo['bmHeight']),
bmpstr, 'raw', 'BGRX', 0, 1)
if result == 1:
# im.show()
# im.save('알리샤test.png')
arr.append(im)
win32gui.DeleteObject(saveBitMap.GetHandle())
saveDC.DeleteDC()
mfcDC.DeleteDC()
win32gui.ReleaseDC(hwnd, hwndDC)
out = cv2.VideoWriter('aa.avi',cv2.VideoWriter_fourcc(*'DIVX'),30, (w,h))
for element in arr:
element = cv2.cvtColor(np.asarray(element),cv2.COLOR_RGB2BGR)
out.write(element)
out.release() |
from typing import Any, Callable, Dict, Optional
import pytest
from tartiflette import Directive, Resolver, create_engine
from tartiflette.schema.registry import SchemaRegistry
@pytest.mark.asyncio
async def test_tartiflette_deprecated_execution_directive():
schema = """
type Query {
fieldNormal: Int
fieldDeprecatedDefault: Int @deprecated
fieldDeprecatedCustom: Int @deprecated(reason: "Unused anymore")
}
"""
@Resolver(
"Query.fieldNormal",
schema_name="test_tartiflette_deprecated_execution_directive",
)
async def func_field_resolver4(parent, arguments, request_ctx, info):
return 42
@Resolver(
"Query.fieldDeprecatedDefault",
schema_name="test_tartiflette_deprecated_execution_directive",
)
async def func_field_resolver5(parent, arguments, request_ctx, info):
return 42
@Resolver(
"Query.fieldDeprecatedCustom",
schema_name="test_tartiflette_deprecated_execution_directive",
)
async def func_field_resolver6(parent, arguments, request_ctx, info):
return 42
ttftt = await create_engine(
schema, schema_name="test_tartiflette_deprecated_execution_directive"
)
assert (
SchemaRegistry.find_schema(
"test_tartiflette_deprecated_execution_directive"
).find_directive("deprecated")
is not None
)
assert (
SchemaRegistry.find_schema(
"test_tartiflette_deprecated_execution_directive"
)
.find_directive("deprecated")
.implementation
is not None
)
result = await ttftt.execute(
"""
query Test{
fieldNormal
fieldDeprecatedDefault
fieldDeprecatedCustom
}
""",
operation_name="Test",
)
assert {
"data": {
"fieldNormal": 42,
"fieldDeprecatedDefault": 42,
"fieldDeprecatedCustom": 42,
}
} == result
@pytest.mark.asyncio
async def test_tartiflette_deprecated_introspection_directive():
schema = """
type Query {
fieldNormal: Int
fieldDeprecatedDefault: Int @deprecated
fieldDeprecatedCustom: Int @deprecated(reason: "Unused anymore")
}
"""
@Resolver(
"Query.fieldNormal",
schema_name="test_tartiflette_deprecated_introspection_directive",
)
async def func_field_resolver4(parent, arguments, request_ctx, info):
return 42
@Resolver(
"Query.fieldDeprecatedDefault",
schema_name="test_tartiflette_deprecated_introspection_directive",
)
async def func_field_resolver5(parent, arguments, request_ctx, info):
return 42
@Resolver(
"Query.fieldDeprecatedCustom",
schema_name="test_tartiflette_deprecated_introspection_directive",
)
async def func_field_resolver6(parent, arguments, request_ctx, info):
return 42
ttftt = await create_engine(
schema,
schema_name="test_tartiflette_deprecated_introspection_directive",
)
assert (
SchemaRegistry.find_schema(
"test_tartiflette_deprecated_introspection_directive"
).find_directive("deprecated")
is not None
)
assert (
SchemaRegistry.find_schema(
"test_tartiflette_deprecated_introspection_directive"
)
.find_directive("deprecated")
.implementation
is not None
)
result = await ttftt.execute(
"""
query Test{
__type(name: "Query") {
fields(includeDeprecated: true) {
name
isDeprecated
deprecationReason
}
}
}
""",
operation_name="Test",
)
assert {
"data": {
"__type": {
"fields": [
{
"name": "fieldNormal",
"isDeprecated": False,
"deprecationReason": None,
},
{
"name": "fieldDeprecatedDefault",
"isDeprecated": True,
"deprecationReason": "No longer supported",
},
{
"name": "fieldDeprecatedCustom",
"isDeprecated": True,
"deprecationReason": "Unused anymore",
},
]
}
}
} == result
@pytest.mark.asyncio
async def test_tartiflette_directive_declaration():
schema_sdl = """
directive @lol on FIELD_DEFINITION
directive @lol2( value: Int ) on FIELD_DEFINITION
type Query {
fieldLoled1: Int @lol
fieldLoled2: Int @lol @deprecated @lol2(value:2)
fieldLoled3: Int @deprecated @lol @lol2(value:6)
}
"""
# Execute directive
@Directive("lol2", schema_name="test_tartiflette_directive_declaration")
class Loled2:
@staticmethod
async def on_field_execution(
directive_args: Dict[str, Any],
next_resolver: Callable,
parent: Optional[Any],
args: Dict[str, Any],
ctx: Optional[Any],
info: "ResolveInfo",
):
return (await next_resolver(parent, args, ctx, info)) + int(
directive_args["value"]
)
@Resolver(
"Query.fieldLoled1",
schema_name="test_tartiflette_directive_declaration",
)
async def func_field_resolver4(_parent, _arguments, _request_ctx, _info):
return 42
@Resolver(
"Query.fieldLoled2",
schema_name="test_tartiflette_directive_declaration",
)
async def func_field_resolver5(_parent, _arguments, _request_ctx, _info):
return 42
@Resolver(
"Query.fieldLoled3",
schema_name="test_tartiflette_directive_declaration",
)
async def func_field_resolver6(_parent, _arguments, _request_ctx, _info):
return 42
@Directive("lol", schema_name="test_tartiflette_directive_declaration")
class Loled:
@staticmethod
async def on_field_execution(
directive_args: Dict[str, Any],
next_resolver: Callable,
parent: Optional[Any],
args: Dict[str, Any],
ctx: Optional[Any],
info: "ResolveInfo",
):
return (await next_resolver(parent, args, ctx, info)) + 1
ttftt = await create_engine(
schema_sdl, schema_name="test_tartiflette_directive_declaration"
)
assert (
SchemaRegistry.find_schema(
"test_tartiflette_directive_declaration"
).find_directive("lol")
is not None
)
assert (
SchemaRegistry.find_schema("test_tartiflette_directive_declaration")
.find_directive("lol")
.implementation
is not None
)
result = await ttftt.execute(
"""
query Test{
fieldLoled1
fieldLoled2
fieldLoled3
}
""",
operation_name="Test",
)
assert {
"data": {"fieldLoled1": 43, "fieldLoled2": 45, "fieldLoled3": 49}
} == result
@pytest.mark.asyncio
async def test_tartiflette_non_introspectable_execution_directive():
schema = """
type Query {
fieldNormal: Int
fieldHiddendToIntrospactable: Int @nonIntrospectable
}
"""
@Resolver(
"Query.fieldNormal",
schema_name="test_tartiflette_non_introspectable_execution_directive",
)
async def func_field_resolver4(parent, arguments, request_ctx, info):
return 42
@Resolver(
"Query.fieldHiddendToIntrospactable",
schema_name="test_tartiflette_non_introspectable_execution_directive",
)
async def func_field_resolver5(parent, arguments, request_ctx, info):
return 42
ttftt = await create_engine(
schema,
schema_name="test_tartiflette_non_introspectable_execution_directive",
)
assert (
SchemaRegistry.find_schema(
"test_tartiflette_non_introspectable_execution_directive"
).find_directive("nonIntrospectable")
is not None
)
assert (
SchemaRegistry.find_schema(
"test_tartiflette_non_introspectable_execution_directive"
)
.find_directive("nonIntrospectable")
.implementation
is not None
)
result = await ttftt.execute(
"""
query Test{
__type(name: "Query") {
fields {
name
isDeprecated
deprecationReason
}
}
}
""",
operation_name="Test",
)
assert {
"data": {
"__type": {
"fields": [
{
"name": "fieldNormal",
"isDeprecated": False,
"deprecationReason": None,
}
]
}
}
} == result
|
from assertpy import assert_that
from django.test import TestCase
from model_bakery import baker
from self_date.models import image_path
class SelfDateProfileImagePathTestCase(TestCase):
def test_image_path(self):
# Given: profile 하나가 주어진다.
self_date_profile = baker.make('self_date.SelfDateProfile')
file_name = 'image_name.png'
# When: image_path method를 호출한다.
path = image_path(self_date_profile, file_name)
# Then: image가 저장될 경로가 반환된다.
extension = file_name.split('.')[1]
assert_that(path).is_equal_to(f'profiles/{self_date_profile.profile.user.email}/image.{extension}')
|
import argparse
parser = argparse.ArgumentParser(description='Input arguments for generating input addresses to Character RNN')
parser.add_argument('inputFile', nargs='?', type=str)
parser.add_argument('functionName', nargs='?', default='increment', type=str)
parser.add_argument('delimeter', nargs='?', default=';', type=str)
args = parser.parse_args()
def increment(addr):
return addr+1
def skipOne(addr):
return addr+2
functionDict = {
'increment': increment,
'skipOne' : skipOne
}
funcToUse = functionDict[args.functionName]
with open(args.inputFile, 'r') as f:
addrs = [int(addr, 16) for addr in f.read().split(args.delimeter)]
numCorrect = 0
numTotal = len(addrs)-1
for i in xrange(numTotal):
if addrs[i+1] == funcToUse(addrs[i]):
numCorrect += 1
print "Testing {} with function {}: {} correct out of {} total ({} percent)".format(args.inputFile, args.functionName, numCorrect, numTotal, float(numCorrect)/numTotal)
|
from dataclasses import dataclass
from typing import List, Optional, Tuple
import torch
from .file_utils import ModelOutput
@dataclass
class BaseModelOutput(ModelOutput):
"""
Base class for model's outputs, with potential hidden states and attentions.
Args:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
last_hidden_state: torch.FloatTensor
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
|
from django.conf.urls import url
from django.urls import path
from . import views
urlpatterns = [
path('', views.post_list, name='post_list'),
# 정규표현식 url(r'^$', views.post_list, name='post_list'),
#url(r'^post/1/$', views.post_detail, name='post_detail'),
path('post/<int:pk>/', views.post_detail, name='post_detail'),
# 정규표현식 url(r'^post/(?P<pk>\d+)/$', views.post_detail, name='post_detail'),
# 정규표현식 사용 [첫번째자리][두번째자리] = 0123456789 = \d
# + : 숫자가 1번 이상 반복될 것이다.
path('post/new/', views.post_new, name='post_new'),
# url(r'^post/new/$', views.post_new, name='post_new'),
path('post/<int:pk>/edit/', views.post_edit, name='post_edit'),
#url(r'^post/(?P<pk>\d+)/edit/$', views.post_edit, name='post_edit'),
] |
'''
Created on 5 Aug 2018
@author: Ken
'''
from django.conf import settings
from django.conf.urls.static import static
from django.conf.urls import include, url
from karaokeapp.modules.room.room_info.views.room_info_views import listRoomInfo, addRoomInfo, getRoom
urlpatterns = [
url(r'^listRoomInfo/$', listRoomInfo, name='karaokeapp.listRoomInfo'),
url(r'^addRoomInfo/$', addRoomInfo, name='karaokeapp.addRoomInfo'),
url(r'^getRoomByStatus/$', getRoom, name='karaokeapp.getRoomByStatus'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) |
# Problem 1
def multiply():
num1 = int(raw_input('Type a number'))
while num1 < 1:
num1 = int(raw_input('your number is negative, enter a positive'))
num2 = int(raw_input('Type a number'))
while num2 < 1:
num2 = int(raw_input('your number is negative, enter a positive'))
product = 0;
for i in range(0,num2):
product+=num1
print(product)
print("The product is equal to %d" % product)
# multiply()
#
# Problem 2
def divide():
dividend = int(raw_input('Type a number '))
while dividend < 1:
dividend = int(raw_input('your number is negative, enter a positive'))
divisor = int(raw_input('Type a number '))
while divisor < 1:
divisor = int(raw_input('your number is negative, enter a positive'))
quotient = 0;
while dividend >= divisor:
dividend-=divisor
quotient+=1
print(quotient)
print("The quotient is equal to %d" % quotient)
divide()
# Problem 3
def pow():
a = int(raw_input('Type a number'))
while a < 1:
a = abs(int(raw_input('your number is negative, enter a positive')))
b = int(raw_input('Type a number'))
while b < 1:
b = abs(int(raw_input('your number is negative, enter a positive')))
if(b==0):
return 1
answer=a;
increment=a;
for i in range(1,b):
for j in range(1,a):
answer+=increment
increment+=answer
print('pow %d' %answer)
pow()
|
# -*- coding: utf-8 -*-
"""
Created on 2020/1/30 10:01
@author: dct
"""
import requests
from lxml import etree
def getNewsURLList(baseURL):
x = requests.get(baseURL)
x.encoding = 'utf-8'
selector = etree.HTML(x.text)
contents = selector.xpath('//div[@id = "content_right"]/div[@class = "content_list"]/ul/li[div]')
for eachlink in contents:
url = eachlink.xpath('div[@class = "dd_bt"]/a/@href')[0]
if url[:4] != 'http': # 有些网页地址只写了后边的一部分
url = 'http://www.chinanews.com' + url
label = eachlink.xpath('div/a/text()')[0]
title = eachlink.xpath('div[@class = "dd_bt"]/a/text()')[0]
ptime = eachlink.xpath('div[@class = "dd_time"]/text()')[0]
yield(label, title, url, ptime)
def getNewsContent(urllist):
for label, title, url, ptime in urllist:
x = requests.get(url)
x.encoding = 'utf-8'
selector = etree.HTML(x.text)
contents = selector.xpath('//div[@class="left_zw"]/p/text()')
news = '\r\n'.join(contents)
yield label, title, url, ptime, news
if __name__ == '__main__':
urltemplate = 'http://www.chinanews.com/scroll-news/{0}/{1}{2}/news.shtml'
testurl = urltemplate.format('2020','01','30')
print(testurl)
urllist = getNewsURLList(testurl)
# for row in urllist:
# print(row)
newscontents = getNewsContent(urllist)
f = open('news.txt', 'w', encoding="utf-8")
w = lambda x: f.write(x + u'\r\n')
for label, title, url, ptime, news in newscontents:
w(u'~' * 100)
w(label)
w(title)
w(url)
w(ptime)
w(news)
f.close() |
array = ['a','b','c','a','b','d']
#next = 0 0 0 1 2 0
#index = 0 1 2 3 4 5
next = [0] * len(array)
####t与i的初始位置
i = 1
t = 0
while i < len(array):
if array[i] == array[t]:
next[i] = t + 1
i += 1
t += 1
elif t>0: #这个地方最难记,把t退回到next[t-1]位置
t = next[t-1]
else:#t == 0
next[i] = 0
i += 1
print(next)
##############
'''
一共三种情况,
1.array[i] == array[t],next[i]=t+1
2.当array[i] != array[t],
'''
def prefix_table(pattern):
next = [0] * len(pattern)
####t与i的初始位置
i = 1
t = 0
while i < len(pattern):
if pattern[i] == pattern[t]:
next[i] = t + 1
i += 1
t += 1
elif t > 0: # 这个地方最难记,把t退回到next[t-1]位置
t = next[t - 1]
else: # t == 0
next[i] = 0
i += 1
return next
def move_prefix_table(prefix, n):
for i in range(n-1, 0, -1):
prefix[i] = prefix[i-1]
prefix[0] = -1
return prefix
def kmp_search(text, pattern):
next = prefix_table(pattern)
next = move_prefix_table(next, len(next))
m = len(text)
n = len(pattern)
i = 0
j = 0
while i < m:
if j == n-1 and text[i] == pattern[j]:
print("Found at ",(i-j))
break
j = next[j]
if text[i] == pattern[j]:
i += 1
j += 1
else:
j = next[j]
if j == -1:
i += 1
j += 1
pattern = ['A','B','A','B','C','A','B','A','A']
text = ['A','B','A','B','A','B','C','A','B','A','A','B','A','B','A','B','A','B']
kmp_search(text, pattern) |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Camp'
db.create_table('rsvp_camp', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('theme', self.gf('django.db.models.fields.CharField')(max_length=30)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
('start_date', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('end_date', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('logistics', self.gf('django.db.models.fields.TextField')(blank=True)),
('hotel', self.gf('django.db.models.fields.CharField')(max_length=30, blank=True)),
('hotel_link', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)),
('hotel_code', self.gf('django.db.models.fields.CharField')(max_length=30, blank=True)),
('hotel_deadline', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
('venue', self.gf('django.db.models.fields.CharField')(max_length=30, blank=True)),
('venue_address', self.gf('django.db.models.fields.CharField')(max_length=140, blank=True)),
('ignite', self.gf('django.db.models.fields.BooleanField')(default=False)),
('stipends', self.gf('django.db.models.fields.BooleanField')(default=False)),
('spreadsheet_url', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)),
('mailchimp_list', self.gf('django.db.models.fields.CharField')(max_length=140, blank=True)),
))
db.send_create_signal('rsvp', ['Camp'])
# Adding model 'Invitation'
db.create_table('rsvp_invitation', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('status', self.gf('django.db.models.fields.CharField')(default='Q', max_length=1)),
('type', self.gf('django.db.models.fields.CharField')(default='G', max_length=1)),
('plus_one', self.gf('django.db.models.fields.BooleanField')(default=False)),
('inviter', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['rsvp.Invitation'], null=True, blank=True)),
('expires', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('camp', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['rsvp.Camp'])),
('rand_id', self.gf('django.db.models.fields.CharField')(unique=True, max_length=8)),
('dietary', self.gf('django.db.models.fields.CharField')(default='None', max_length=140, blank=True)),
('arrival_time', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('departure_time', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('hotel_booked', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('rsvp', ['Invitation'])
# Adding unique constraint on 'Invitation', fields ['user', 'camp']
db.create_unique('rsvp_invitation', ['user_id', 'camp_id'])
# Adding model 'Stipend'
db.create_table('rsvp_stipend', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('invitation', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['rsvp.Invitation'], unique=True)),
('cost_estimate', self.gf('django.db.models.fields.IntegerField')(max_length=140, null=True, blank=True)),
('employer_subsidized', self.gf('django.db.models.fields.CharField')(default='U', max_length=1)),
('employer_percentage', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('invitee_percentage', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('details', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('rsvp', ['Stipend'])
# Adding model 'Ignite'
db.create_table('rsvp_ignite', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('invitation', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['rsvp.Invitation'], unique=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=140)),
('experience', self.gf('django.db.models.fields.CharField')(max_length=1)),
('description', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('rsvp', ['Ignite'])
# Adding model 'Roommate'
db.create_table('rsvp_roommate', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('invitation', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['rsvp.Invitation'], unique=True)),
('sex', self.gf('django.db.models.fields.CharField')(max_length=1)),
('roommate', self.gf('django.db.models.fields.CharField')(max_length=1)),
('more', self.gf('django.db.models.fields.CharField')(max_length=140, blank=True)),
))
db.send_create_signal('rsvp', ['Roommate'])
# Adding model 'Session'
db.create_table('rsvp_session', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('invitation', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['rsvp.Invitation'], unique=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=140)),
('description', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('rsvp', ['Session'])
# Adding model 'PlusOne'
db.create_table('rsvp_plusone', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('invitation', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['rsvp.Invitation'], unique=True)),
('first_name', self.gf('django.db.models.fields.CharField')(max_length=30)),
('last_name', self.gf('django.db.models.fields.CharField')(max_length=30)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75)),
('employer', self.gf('django.db.models.fields.CharField')(max_length=140, blank=True)),
('job_title', self.gf('django.db.models.fields.CharField')(max_length=140, blank=True)),
('reason', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('rsvp', ['PlusOne'])
# Adding model 'SparkProfile'
db.create_table('rsvp_sparkprofile', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['auth.User'], unique=True)),
('bio', self.gf('django.db.models.fields.CharField')(max_length=140, blank=True)),
('employer', self.gf('django.db.models.fields.CharField')(max_length=140, blank=True)),
('twitter', self.gf('django.db.models.fields.CharField')(max_length=20, blank=True)),
('url', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75, blank=True)),
('job_title', self.gf('django.db.models.fields.CharField')(max_length=140, blank=True)),
('phone', self.gf('django.db.models.fields.CharField')(max_length=30, blank=True)),
('poc', self.gf('django.db.models.fields.BooleanField')(default=False)),
('woman', self.gf('django.db.models.fields.BooleanField')(default=False)),
('journo', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('rsvp', ['SparkProfile'])
def backwards(self, orm):
# Removing unique constraint on 'Invitation', fields ['user', 'camp']
db.delete_unique('rsvp_invitation', ['user_id', 'camp_id'])
# Deleting model 'Camp'
db.delete_table('rsvp_camp')
# Deleting model 'Invitation'
db.delete_table('rsvp_invitation')
# Deleting model 'Stipend'
db.delete_table('rsvp_stipend')
# Deleting model 'Ignite'
db.delete_table('rsvp_ignite')
# Deleting model 'Roommate'
db.delete_table('rsvp_roommate')
# Deleting model 'Session'
db.delete_table('rsvp_session')
# Deleting model 'PlusOne'
db.delete_table('rsvp_plusone')
# Deleting model 'SparkProfile'
db.delete_table('rsvp_sparkprofile')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'rsvp.camp': {
'Meta': {'object_name': 'Camp'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'hotel': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'hotel_code': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'hotel_deadline': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'hotel_link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignite': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'logistics': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'mailchimp_list': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'spreadsheet_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'stipends': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'theme': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'venue': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'venue_address': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'})
},
'rsvp.ignite': {
'Meta': {'object_name': 'Ignite'},
'description': ('django.db.models.fields.TextField', [], {}),
'experience': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invitation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rsvp.Invitation']", 'unique': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140'})
},
'rsvp.invitation': {
'Meta': {'unique_together': "(('user', 'camp'),)", 'object_name': 'Invitation'},
'arrival_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'camp': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rsvp.Camp']"}),
'departure_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'dietary': ('django.db.models.fields.CharField', [], {'default': "'None'", 'max_length': '140', 'blank': 'True'}),
'expires': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'hotel_booked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inviter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rsvp.Invitation']", 'null': 'True', 'blank': 'True'}),
'plus_one': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rand_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '8'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'Q'", 'max_length': '1'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'G'", 'max_length': '1'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'rsvp.plusone': {
'Meta': {'object_name': 'PlusOne'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'employer': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invitation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rsvp.Invitation']", 'unique': 'True'}),
'job_title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'reason': ('django.db.models.fields.TextField', [], {})
},
'rsvp.roommate': {
'Meta': {'object_name': 'Roommate'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invitation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rsvp.Invitation']", 'unique': 'True'}),
'more': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'roommate': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'sex': ('django.db.models.fields.CharField', [], {'max_length': '1'})
},
'rsvp.session': {
'Meta': {'object_name': 'Session'},
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invitation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rsvp.Invitation']", 'unique': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140'})
},
'rsvp.sparkprofile': {
'Meta': {'object_name': 'SparkProfile'},
'bio': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'employer': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job_title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'journo': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'poc': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}),
'woman': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'rsvp.stipend': {
'Meta': {'object_name': 'Stipend'},
'cost_estimate': ('django.db.models.fields.IntegerField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'employer_percentage': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'employer_subsidized': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invitation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rsvp.Invitation']", 'unique': 'True'}),
'invitee_percentage': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['rsvp'] |
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, JsonResponse
from django.shortcuts import render, get_object_or_404, redirect
from django.views import View
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_GET, require_POST
from .models import Post, Comment, Category, UserFavorite
from notice.models import Notice
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.views.generic import ListView
from .forms import EmailPostForm, PostForm
from django.core.mail import send_mail
from taggit.models import Tag
from django.db.models import Count
from uuslug import slugify
from datetime import datetime
from django.contrib import messages
from django.contrib.auth.models import User
def post_share(request, post_id):
# 通过id 获取 post 对象
post = get_object_or_404(Post, id=post_id, status='published')
sent = False
if request.method == 'POST':
# 表单被提交
form = EmailPostForm(request.POST)
if form.is_valid():
# 验证表单数据
cd = form.cleaned_data
# 发送邮件......
post_url = request.build_absolute_uri(post.get_absolute_url())
subject = '() ({}) recommends you reading "{}"'.format(cd['name'], cd['email'], post.title)
message = 'Read "{}" at {}\n\n{}\'s comments:{}'.format(post.title, post_url, cd['name'], cd['comments'])
send_mail(subject, message, 'liu1xufeng@gmail.com', [cd['to']])
sent = True
else:
form = EmailPostForm()
return render(request, 'blog/static/share.html', {'post':post, "form":form, 'sent':sent})
class PostListView(ListView):
queryset = Post.published.all()
context_object_name = 'posts'
paginate_by = 3
template_name = 'blog/static/index.html'
def post_list(request, tag_slug=None, category_slug=None):
object_list = Post.published.all()
tag = None
notices = Notice.objects.all().order_by('-publish')
if tag_slug:
tag = get_object_or_404(Tag, slug=tag_slug)
object_list = object_list.filter(tags__in=[tag])
if category_slug:
category = get_object_or_404(Tag, slug=category_slug)
object_list = object_list.filter(category__in=[category])
categories = Category.objects.all()
# 获取推荐文章
recommend_post_list = object_list.filter(is_recommend=1)
if recommend_post_list:
recommend_post = recommend_post_list[0]
else:
recommend_post = []
paginator = Paginator(object_list, 3) # 每页显示5篇文章
page = request.GET.get('page', 1) # 获取当前页的页码,默认为第一页
try:
posts = paginator.page(page)
except PageNotAnInteger:
# 如果page参数不是一个整数就返回第一页
posts = paginator.page(1)
except EmptyPage:
# 如果页数超出总页数就返回最后一页
posts = paginator.page(paginator.num_pages)
return render(request, 'blog/static/index.html',{'page': page, 'posts': posts, 'tag':tag,"categories":categories, 'notices':notices, 'recommend_post':recommend_post})
def post_detail(request,year, month, day, post):
post = get_object_or_404(Post,slug=post,
status='published',
publish__year=year,
publish__month=month,
publish__day=day)
comments = post.comments.filter(active=True)
post.increase_views() # 浏览次数加1
# 判断是否是ajax提交数据
if request.is_ajax():
body = request.POST.get('body')
print(body)
new_comment = Comment.objects.create(name=request.user, body=body, post=post)
new_comment.save()
return HttpResponse('评论成功')
# else:
# return HttpResponse("还未登录")
# paginator = Paginator(comments, 5) # 每页显示5篇文章
# page = request.GET.get('page')
# try:
# comments = paginator.page(page)
# except PageNotAnInteger:
# # 如果page参数不是一个整数就返回第一页
# comments = paginator.page(1)
# except EmptyPage:
# # 如果页数超出总页数就返回最后一页
# comments = paginator.page(paginator.num_pages)
tags = post.tags.all()
post_tags_ids = post.tags.values_list('id', flat=True)
similar_tags = Post.published.filter(tags__in=post_tags_ids).exclude(id=post.id)
similar_posts = similar_tags.annotate(same_tags=Count('tags')).order_by('-same_tags','-publish')[:4]
has_fav = False
if request.user.is_authenticated:
if UserFavorite.objects.filter(user=request.user, post=post.id, fav_type=2):
has_fav = True
context = {'post': post, 'comments': comments, 'similar_posts': similar_posts, 'tags': tags, 'has_fav':has_fav}
return render(request, 'blog/static/detail.html', context)
def tag_list(request):
# tag_list = Tag.objects.all()
# return render(request, 'blog/static/tags.html',{'tag_list':tag_list,})
tag_list = Tag.objects.annotate(num_posts=Count('post')).filter(num_posts__gt=0)
# return {'category_list': tag_list,}
return render(request,'blog/static/tags.html',{'tag_list': tag_list,})
def links(request):
return render(request, 'blog/static/links.html')
def readers(request):
return render(request, 'blog/static/readers.html')
def search(request):
posts = ''
msg = ''
if request.method == 'POST':
q = request.POST.get('q')
posts = Post.published.filter(title__icontains=q)
if not posts:
msg = '没有搜索到符合条件的文章!'
else:
msg = '为您搜索到以下文章:'
return render(request, 'blog/static/search.html', {'msg': msg, 'posts': posts})
# class AddCommentView(View):
# def post(self, request):
# comment_form = CommentForm(request.POST)
# if comment_form.is_valid():
# comment_form.save()
# return HttpResponse('{"status": "success"}', content_type='application/json')
# else:
# return HttpResponse('{"status": "fail"}', content_type='application/json')
# @receiver(post_save)
# def callback(sender, **kwargs):
# messages.success(sender, "文章发表成功")
@csrf_exempt
def add_post(request):
if request.method == 'POST':
add_post_form = PostForm(request.POST, request.FILES)
title = request.POST.get('title')
slug = slugify(title)
is_exsit = Post.objects.filter(slug=slug,created__date=datetime.now().date())
if is_exsit:
return HttpResponse('今日已有重复标题的文章了,请返回修改')
if add_post_form.is_valid():
add_post_form.save()
messages.info(request, '文章发表成功')
return redirect('myaccount:my_post')
else:
return HttpResponse('表单内容有误,请重新填写,请返回修改')
else:
add_post_form = PostForm()
categories = Category.objects.all()
tags = Tag.objects.all()
context = {'add_post_form': add_post_form, 'categories':categories, 'tags':tags}
return render(request, 'blog/static/add_post.html', context)
def update_post(request, id):
post = Post.objects.get(id=id)
if request.method == 'POST':
update_post_form = PostForm(request.POST, request.FILES)
if update_post_form.is_valid():
# post.title = request.POST['title']
# post.body = request.POST['body']
# post.category = request.POST['category']
# post.tags = request.POST['tags']
# post.status = request.POST['status']
# post.post_img = request.POST['post_img']
post.title = update_post_form.cleaned_data['title']
post.body = update_post_form.cleaned_data['body']
post.category = update_post_form.cleaned_data['category']
post.tags = update_post_form.cleaned_data['tags']
post.status = update_post_form.cleaned_data['status']
post.post_img = update_post_form.cleaned_data['post_img']
post.save()
return HttpResponse('文章更新成功')
else:
return HttpResponse('表单内容有误,请重新填写')
else:
update_post_form = PostForm()
categories = Category.objects.all()
tags = Tag.objects.all()
str_tags = ''
for tag in post.tags.all():
str_tags += (str(tag ) + ',')
context = {'post': post, 'update_post_form': update_post_form, 'categories': categories, 'tags': tags, 'str_tags': str_tags}
return render(request, 'blog/static/update_post.html', context)
@login_required(login_url='/account/login')
@require_POST
@csrf_exempt
def delete_post(request):
post_id = request.POST['post_id']
try:
post = Post.objects.get(id=post_id)
post.delete()
return HttpResponse("1")
except:
return HttpResponse("2")
# 收藏的函数
class AddFavView(View):
def post(self, request):
# 收藏都是记录他们的id,如果没取到把它设置未0,避免查询时异常
post_id = request.POST.get('post_id', 0)
# 表明收藏的类别
fav_type = request.POST.get('fav_type', 0)
# 收藏与已收藏取消收藏
# 判断用户是否登录:即使没登录会有一个匿名的user
if not request.user.is_authenticated:
# 未登录时返回json提示未登录,跳转到登录页面是在ajax中做的
return HttpResponse('{"fav_status":"fail", "fav_msg":"用户未登录"}', content_type='application/json')
exist_records = UserFavorite.objects.filter(user=request.user, post=post_id, fav_type=fav_type)
if exist_records:
# 如果已经存在,表明用户取消收藏
exist_records.delete()
# 模型中存储的收藏数减1
Post.objects.get(id=post_id).change_fav_nums(add=-1)
return HttpResponse('{"fav_status":"success", "fav_msg":"添加收藏"}', content_type='application/json')
else:
user_fav = UserFavorite()
# 如果取到了id值才进行收藏
if int(post_id) > 0 and int(fav_type) > 0:
user_fav.post_id = post_id
user_fav.fav_type = fav_type
user_fav.user = request.user
user_fav.save()
# 机构模型中存储的收藏数加1
Post.objects.get(id=post_id).change_fav_nums(add=1)
return HttpResponse('{"fav_status":"success", "fav_msg":"取消收藏"}', content_type='application/json')
else:
return HttpResponse('{"fav_status":"fail", "fav_msg":"收藏出错"}', content_type='application/json') |
# Write a function to check if a linked list is a palindrome
from utils import LinkedList
# List visual: [r, a, c, e, c, a, r]
a_palindrome = LinkedList()
a_palindrome.add_to_tail("r")
a_palindrome.add_to_tail("a")
a_palindrome.add_to_tail("c")
a_palindrome.add_to_tail("e")
a_palindrome.add_to_tail("c")
a_palindrome.add_to_tail("a")
a_palindrome.add_to_tail("r")
# List visual: [c, o, a, c, h]
not_a_palindrome = LinkedList()
not_a_palindrome.add_to_tail("c")
not_a_palindrome.add_to_tail("o")
not_a_palindrome.add_to_tail("a")
not_a_palindrome.add_to_tail("c")
not_a_palindrome.add_to_tail("h")
def palindrome_check(ll):
ll.print_list()
ahead = behind = ll.head
stack = []
while ahead and ahead.next:
stack.append(behind.value)
# print(f"Stack: {stack}\nahead: {ahead.value}\nbehind: {behind.value}\n--------")
behind = behind.next
ahead = ahead.next.next
if ahead:
behind = behind.next
while behind:
top = stack.pop()
# print(f"Top: {top} Behind: {behind.value}")
if top != behind.value:
return False
behind = behind.next
return True
print(palindrome_check(a_palindrome))
print(palindrome_check(not_a_palindrome))
|
from django.db import models
from django.utils import timezone
# Create your models here.
class Interesados (models.Model):
id = models.AutoField(primary_key=True)
nombres = models.CharField(max_length=50)
apellidoPaterno = models.CharField(max_length=50)
email = models.CharField(max_length=50)
telefono = models.CharField(max_length=10)
extension = models.CharField(max_length=10)
celular = models.CharField(max_length=10)
insertDate = models.DateTimeField()
def __str__(self):
return (self.nombres + " " + self.apellidoPaterno)
def interesadosHoy(self):
return self.insertDate.date() == timezone.now().date() |
#!/usr/bin/python3
from cffi import FFI
ffibuilder = FFI()
ffibuilder.cdef('struct exb;')
ffibuilder.cdef('struct exb_server;')
ffibuilder.cdef('struct exb_request_state;')
#ffibuilder.cdef('struct exb_request_state *exb_py_get_request();')
ffibuilder.cdef('int exb_response_set_header_c(struct exb_request_state *rqstate, char *name, char *value);')
ffibuilder.cdef('int exb_response_append_body_cstr(struct exb_request_state *rqstate, char *text);')
ffibuilder.cdef('int exb_response_end(struct exb_request_state *rqstate);')
ffibuilder.cdef('extern "Python" void py_handle_request(struct exb_request_state *);')
ffibuilder.set_source('_exb_ffi', '''
#include "exb_py_ffi.h"
static void py_handle_request(struct exb_request_state *rqstate);
CFFI_DLLEXPORT void exb_py_handle_request(struct exb_request_state *rqstate)
{
py_handle_request(rqstate);
}
''', extra_compile_args=["-I../src", "-I../../../"], library_dirs=[], libraries=[])
if __name__ == '__main__':
ffibuilder.compile(verbose=True)
|
#! /usr/bin/env python3
name = input("Enter the file name: ")
f = open(name)
print(f.read())
f.close()
|
"""
What is this script:
--------------------
create a custom generator on top of existing keras data augmentation functionalities
such as random cropping and PCA whitening (details see `random_crop_n_pca_augment.py`)
and correct generator indices (details see `labels_corrector.py`)
"""
import numpy as np
import pandas as pd
import keras
from keras.preprocessing.image import ImageDataGenerator
from keras.applications.vgg16 import preprocess_input
from labels_corrector import wnids_to_network_indices, indices_rematch
from random_crop_n_pca_augment import crop_and_pca_generator
def create_good_generator(ImageGen,
directory,
batch_size=256,
seed=42,
shuffle=True,
class_mode='sparse',
classes=None, # a list of wordnet ids
subset=None, # specify training or validation set when needed
target_size=(256, 256),
AlextNetAug=True):
"""
usage:
------
given a generator with pre-defined data augmentations and preprocessing,
this function will swap the labels that are inferred by keras by the classes(wordnet ids)
you pass in to the true indices that match VGG's output layer. And if AlextNetAug=True,
extra data augmentations mentioned in both Alexnet and VGG paper will be used on the
given dataset.
return:
-------
- a generator which can be used in fitting
- steps that is required when evaluating
Example:
--------
Say you want to train model on categories ['dog', 'cat', 'ball'] which have
wordnet ids ['n142', 'n99', 'n200'] and their real indices on VGG's output layer
are [234, 101, 400]. The function works as follows:
1. You pass in classes=['n142', 'n99', 'n200']
2. classes will be sorted as ['n99', 'n142', 'n200']
3. keras auto-label them as [0, 1, 2]
4. `index_correct_generator` will relabel three categories as [101, 234, 400]
5. use extra Alexnet augmentation if specified.
"""
'''
# why sort classes?
-------------------
sort wordnet ids alphabatically (may not be necessary)
if sorted, keras will label the smallest wordnet id as class 0, so on.
and in the future when we need to replace class 0 with the actual network
index, class 0 will be replaced with the smallest network index as it should
be in sync with wordnet ids which are sorted in the first place.
'''
if classes == None:
pass
else:
sorted_classes = sorted(classes)
# the initial generator
bad_generator = ImageGen.flow_from_directory(directory=directory,
batch_size=batch_size,
seed=seed,
shuffle=shuffle,
class_mode=class_mode,
classes=classes,
subset=subset,
target_size=target_size
)
# number of steps go through the dataset is a required parameter later
steps = bad_generator.n//bad_generator.batch_size
# label correction
if classes == None:
# when use all 1000 categories, there is no need to rematch
# keras-auto labelled indices to the real network indices
# because keras labels all categories in the order of wnids which is
# the same as network indices
# so the bad_generator is already index correct!
index_correct_generator = bad_generator
else:
# Sanity check: network_indices are also sorted in ascending order
network_indices = wnids_to_network_indices(sorted_classes)
# rematch indices and get the index_correct_generator
index_correct_generator = indices_rematch(bad_generator, network_indices)
if AlextNetAug:
# crop and pca whitening
good_generator = crop_and_pca_generator(index_correct_generator, crop_length=224)
else:
good_generator = index_correct_generator
return good_generator, steps
if __name__ == '__main__':
"""
e.g. create a training generator
imagenet_train = '/mnt/fast-data17/datasets/ILSVRC/2012/clsloc/train/'
ImageGen = ImageDataGenerator(fill_mode='nearest',
horizontal_flip=True,
rescale=None,
preprocessing_function=preprocess_input,
data_format="channels_last",
validation_split=0.1
)
df_classes = pd.read_csv('groupings-csv/felidae_Imagenet.csv', usecols=['wnid'])
classes = sorted([i for i in df_classes['wnid']])
good_generator, steps = create_good_generator(ImageGen, imagenet_train, classes=classes)
"""
pass
|
#!/usr/bin/env python
from distutils.core import setup
CLASSIFIERS = [
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
]
long_desc = 'coming soon.'
setup(name='Octo',
version='0.2',
description='uPortal Log reader',
long_description=long_desc,
author='Toben Archer',
author_email='sandslash+Octo@gmail.com',
maintainer='Toben Archer',
maintainer_email='sandslash+Octo@gmail.com',
url='https://github.com/Narcolapser/Octo',
packages=[''],
install_requires=['paramiko'],
license='Apache 2.0',
classifiers=CLASSIFIERS
)
|
import tkinter as tk
class Calculator(tk.Frame):
def __init__(self, master=None):
super().__init__(master)
self.master = master
self.master.title('Simple Calculator')
self.master.resizable(0, 0)
self.entry = tk.Entry(self, width=30, borderwidth=5)
self.buttons = []
self.buttons = [tk.Button(self, text=str(i), width=10, height=5, command=lambda i=i: self.button_click(i)) for i in range(10)]
self.buttons[0] = tk.Button(self,text="0", width=20, height=5, command=lambda:self.button_click(0))
self.button_plus = tk.Button(self, text="+", width=10, height=5, command= lambda: self.button_op("+"))
self.button_minus = tk.Button(self, text="-", width=10, height=5, command=lambda: self.button_op("-"))
self.button_div = tk.Button(self, text="÷", width=10, height=5, command=lambda: self.button_op("/"))
self.button_multi = tk.Button(self, text="X", width=10, height=5, command=lambda: self.button_op("*"))
self.button_equal = tk.Button(self, text="=", width=10, height=5, command=lambda: self.button_eval(self.operation))
self.button_clear = tk.Button(self, text="C", width=10, height=5, command=self.button_clear)
self.button_minsym = tk.Button(self, text="+/-", width=10, height=5, command=self.button_negative)
self.button_percent = tk.Button(self,text="%", width=10, height=5, command=self.button_percentage)
self.button_decimal = tk.Button(self,text=".", width=10, height=5, command=self.button_point)
self.create_widgets()
self.grid()
self.curr = ""
self.result = False
self.operation = ""
self.firstNum = ""
def create_widgets(self):
self.entry.grid(row=0, column=0, columnspan=4, padx=10, pady=10)
j = 5
for i in range(1, 10):
if i % 3 == 1:
j -= 1
self.buttons[i].grid(row=j, column=(i - 1) % 3)
self.buttons[0].grid(row=5, column=0, columnspan=2)
self.button_clear.grid(row=1, column=0)
self.button_plus.grid(row=4, column=3)
self.button_equal.grid(row=5, column=3)
self.button_multi.grid(row=2, column=3)
self.button_minus.grid(row=3, column=3)
self.button_div.grid(row=1, column=3)
self.button_minsym.grid(row=1,column=1)
self.button_percent.grid(row=1,column=2)
self.button_decimal.grid(row=5,column=2)
def button_percentage(self):
current = self.entry.get()
if current != "Error":
self.entry.delete(0,'end')
self.entry.insert(0, str(float(current)/100))
def button_negative(self):
current = self.entry.get()
if current != "Error" and current != "0":
self.entry.delete(-1, 'end')
if "-" in current:
self.entry.insert(-1,current[1:])
else:
self.entry.insert(-1,"-"+ current)
def button_point(self):
current = self.entry.get()
if "." not in current and current != "Error":
self.entry.delete(0, "end")
self.entry.insert(0,current+".")
def button_click(self, number):
if self.result:
self.entry.delete(0, 'end')
self.result = False
if self.firstNum:
self.entry.delete(0,'end')
self.firstNum = ""
current = self.entry.get()
self.entry.delete(0, 'end')
self.entry.insert(0, str(current) + str(number))
def button_eval(self, op):
try:
current = self.entry.get()
self.entry.delete(0, 'end')
self.entry.insert(0, eval(self.curr +op+ current))
self.curr = ""
self.result = True
except:
self.entry.delete(0,'end')
self.entry.insert(0, 'Error')
self.curr = ""
self.result = True
def button_op(self, op):
selfBool = bool(self.operation)
self.operation = op
if self.curr and self.firstNum != op:
self.firstNum = op
self.button_eval(op)
self.curr = self.entry.get()
else:
self.curr = self.entry.get()
self.firstNum = op
# self.entry.delete(0, "end")
def button_clear(self):
self.entry.delete(0, 'end')
self.curr = ""
self.result = False
self.firtNum = ""
if __name__ == '__main__':
root = tk.Tk()
calculator = Calculator(master=root)
calculator.mainloop()
|
import ml
def test_month_length():
assert ml.month_length("January") == 31, "failed on January"
assert ml.month_length("February") == 28, "failed on February"
assert ml.month_length("February", leap_year=True) == 29, "failed on February, leap_year"
assert ml.month_length("March") == 31, "failed on March"
assert ml.month_length("April") == 30, "failed on April"
assert ml.month_length("May") == 31, "failed on May"
assert ml.month_length("June") == 30, "failed on June"
assert ml.month_length("July") == 31, "failed on July"
assert ml.month_length("August") == 31, "failed on August"
assert ml.month_length("September") == 30, "failed on September"
assert ml.month_length("October") == 31, "failed on October"
assert ml.month_length("November") == 30, "failed on November"
assert ml.month_length("December") == 31, "failed on December"
assert ml.month_length("Month") == None, "failed on invalid input" |
def namescore(name):
return sum(ord(i)-64 for i in name)
names = open('problem022.txt','r')
names = names.read().split(',')
names[-1] = names[-1][:-1]
names.sort()
print sum(namescore(names[i]) * (i+1) for i in xrange(len(names)))
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
# Create your views here.
def emp_list(request):
return render(request, 'emp_list.html')
def house_list(request):
return render(request, 'house_list.html')
def house_type_list(request):
return render(request, 'house_type_list.html')
def dept_list(request):
return render(request, 'dept_list.html')
def notice_list(request):
return render(request, 'notice_list.html') |
dict={}
dict['1']='apple'
dict['3']='banana'
dict['2']='cherry'
list=dict.keys()
#sorted by key
print("sorted by key:", sorted(list)) |
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Rahul Handay <rahulha@saltstack.com>`
'''
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.helpers import ensure_in_syspath
from salttesting.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
ensure_in_syspath('../../')
# Import Salt Libs
from salt.modules import systemd
# Globals
systemd.__salt__ = {}
systemd.__context__ = {}
@skipIf(NO_MOCK, NO_MOCK_REASON)
class SystemdTestCase(TestCase):
'''
Test case for salt.modules.systemd
'''
def test_systemctl_reload(self):
'''
Test to Reloads systemctl
'''
mock = MagicMock(side_effect=[1, 0])
with patch.dict(systemd.__salt__, {'cmd.retcode': mock}):
self.assertFalse(systemd.systemctl_reload())
self.assertTrue(systemd.systemctl_reload())
def test_get_enabled(self):
'''
Test to return a list of all enabled services
'''
def sysv(name):
if name in ['d', 'e']:
return True
return False
def sysve(name):
if name in ['e']:
return True
return False
mock = MagicMock(return_value={"a": "enabled", "b": "enabled",
"c": "disabled"})
lmock = MagicMock(return_value={"d": "disabled",
"a": "disabled",
"b": "disabled",
"e": "disabled"})
with patch.object(systemd, "_sysv_is_disabled", sysve):
with patch.object(systemd, "_service_is_sysv", sysv):
with patch.object(systemd, '_get_all_unit_files', mock):
with patch.object(systemd, '_get_all_units', lmock):
self.assertListEqual(
systemd.get_enabled(), ["a", "b", "d"])
def test_get_disabled(self):
'''
Test to return a list of all disabled services
'''
mock = MagicMock(return_value={"a": "enabled", "b": "enabled",
"c": "disabled"})
with patch.object(systemd, '_get_all_unit_files', mock):
mock = MagicMock(return_value={})
with patch.object(systemd, '_get_all_legacy_init_scripts', mock):
self.assertListEqual(systemd.get_disabled(), ["c"])
def test_get_all(self):
'''
Test to return a list of all available services
'''
mock = MagicMock(return_value={"a": "enabled", "b": "enabled",
"c": "disabled"})
with patch.object(systemd, '_get_all_units', mock):
mock = MagicMock(return_value={"a1": "enabled", "b1": "disabled",
"c1": "enabled"})
with patch.object(systemd, '_get_all_unit_files', mock):
mock = MagicMock(return_value={})
with patch.object(systemd,
'_get_all_legacy_init_scripts', mock):
self.assertListEqual(systemd.get_all(),
['a', 'a1', 'b', 'b1', 'c', 'c1'])
def test_available(self):
'''
Test to check that the given service is available
'''
mock = MagicMock(side_effect=["a", "@", "c"])
with patch.object(systemd, '_canonical_template_unit_name', mock):
mock = MagicMock(side_effect=[{"a": "z", "b": "z"},
{"@": "z", "b": "z"},
{"a": "z", "b": "z"}])
with patch.object(systemd, 'get_all', mock):
self.assertTrue(systemd.available("sshd"))
self.assertTrue(systemd.available("sshd"))
self.assertFalse(systemd.available("sshd"))
def test_missing(self):
'''
Test to the inverse of service.available.
'''
mock = MagicMock(return_value=True)
with patch.object(systemd, 'available', mock):
self.assertFalse(systemd.missing("sshd"))
def test_unmask(self):
'''
Test to unmask the specified service with systemd
'''
mock = MagicMock(return_value=False)
with patch.object(systemd, '_untracked_custom_unit_found', mock):
with patch.object(systemd, '_unit_file_changed', mock):
with patch.dict(systemd.__salt__, {'cmd.retcode': mock}):
self.assertTrue(systemd.unmask("sshd"))
def test_start(self):
'''
Test to start the specified service with systemd
'''
mock = MagicMock(return_value=False)
with patch.object(systemd, '_untracked_custom_unit_found', mock):
with patch.object(systemd, '_unit_file_changed', mock):
with patch.dict(systemd.__salt__, {'cmd.retcode': mock}):
self.assertTrue(systemd.start("sshd"))
def test_stop(self):
'''
Test to stop the specified service with systemd
'''
mock = MagicMock(return_value=False)
with patch.object(systemd, '_untracked_custom_unit_found', mock):
with patch.object(systemd, '_unit_file_changed', mock):
with patch.dict(systemd.__salt__, {'cmd.retcode': mock}):
self.assertTrue(systemd.stop("sshd"))
def test_restart(self):
'''
Test to restart the specified service with systemd
'''
mock = MagicMock(return_value=False)
with patch.object(systemd, '_untracked_custom_unit_found', mock):
with patch.object(systemd, '_unit_file_changed', mock):
with patch.dict(systemd.__salt__, {'cmd.retcode': mock}):
self.assertTrue(systemd.restart("sshd"))
def test_reload_(self):
'''
Test to Reload the specified service with systemd
'''
mock = MagicMock(return_value=False)
with patch.object(systemd, '_untracked_custom_unit_found', mock):
with patch.object(systemd, '_unit_file_changed', mock):
with patch.dict(systemd.__salt__, {'cmd.retcode': mock}):
self.assertTrue(systemd.reload_("sshd"))
def test_force_reload(self):
'''
Test to force-reload the specified service with systemd
'''
mock = MagicMock(return_value=False)
with patch.object(systemd, '_untracked_custom_unit_found', mock):
with patch.object(systemd, '_unit_file_changed', mock):
with patch.dict(systemd.__salt__, {'cmd.retcode': mock}):
self.assertTrue(systemd.force_reload("sshd"))
def test_status(self):
'''
Test to return the status for a service via systemd
'''
mock = MagicMock(return_value=False)
with patch.object(systemd, '_untracked_custom_unit_found', mock):
with patch.object(systemd, '_unit_file_changed', mock):
with patch.dict(systemd.__salt__, {'cmd.retcode': mock}):
self.assertTrue(systemd.status("sshd"))
def test_enable(self):
'''
Test to enable the named service to start when the system boots
'''
exe = MagicMock(return_value='foo')
tmock = MagicMock(return_value=True)
mock = MagicMock(return_value=False)
with patch.object(systemd, '_untracked_custom_unit_found', mock):
with patch.object(systemd, '_unit_file_changed', mock):
with patch.dict(systemd.__salt__, {'cmd.retcode': mock}):
with patch.object(systemd, "_service_is_sysv", mock):
self.assertTrue(systemd.enable("sshd"))
with patch.object(systemd, "_get_service_exec", exe):
with patch.object(systemd, "_service_is_sysv", tmock):
self.assertTrue(systemd.enable("sshd"))
def test_disable(self):
'''
Test to disable the named service to not
start when the system boots
'''
exe = MagicMock(return_value='foo')
tmock = MagicMock(return_value=True)
mock = MagicMock(return_value=False)
with patch.object(systemd, '_untracked_custom_unit_found', mock):
with patch.object(systemd, '_unit_file_changed', mock):
with patch.dict(systemd.__salt__, {'cmd.retcode': mock}):
with patch.object(systemd, "_service_is_sysv", mock):
self.assertTrue(systemd.disable("sshd"))
with patch.object(systemd, "_get_service_exec", exe):
with patch.object(systemd, "_service_is_sysv", tmock):
self.assertTrue(systemd.disable("sshd"))
def test_enabled(self):
'''
Test to return if the named service is enabled to start on boot
'''
mock = MagicMock(return_value=True)
with patch.object(systemd, '_enabled', mock):
self.assertTrue(systemd.enabled("sshd"))
def test_disabled(self):
'''
Test to Return if the named service is disabled to start on boot
'''
mock = MagicMock(return_value=True)
with patch.object(systemd, '_enabled', mock):
self.assertFalse(systemd.disabled("sshd"))
def test_show(self):
'''
Test to show properties of one or more units/jobs or the manager
'''
mock = MagicMock(return_value="a = b , c = d")
with patch.dict(systemd.__salt__, {'cmd.run': mock}):
self.assertDictEqual(systemd.show("sshd"), {'a ': ' b , c = d'})
def test_execs(self):
'''
Test to return a list of all files specified as ``ExecStart``
for all services
'''
mock = MagicMock(return_value=["a", "b"])
with patch.object(systemd, 'get_all', mock):
mock = MagicMock(return_value={"ExecStart": {"path": "c"}})
with patch.object(systemd, 'show', mock):
self.assertDictEqual(systemd.execs(), {'a': 'c', 'b': 'c'})
if __name__ == '__main__':
from integration import run_tests
run_tests(SystemdTestCase, needs_daemon=False)
|
import json
import base64
def requirebegin(func):
def inner(self, *args, **kwargs):
if not self._begin:
raise Exception('CAN bus not started')
return func(self, *args, **kwargs)
return inner
class CAN_message(object):
len = 0
id = 0x0
buf = ''
def __str__(self):
return json.dumps([self.len, int(self.id), base64.b64encode(self.buf)])
def __unicode__(self):
return unicode(str(self))
def set_message(self, data):
self.len, self.id, self.buf = json.loads(data)
self.buf = base64.b64decode(self.buf)[:self.len]
def __eq__(self, other):
return str(self) == str(other)
class FlexCAN(object):
_id = None
_canbus = None
_queue = None
_begin = False
def __init__(self, baudrate, bus=None):
if bus:
self.set_bus(bus)
def set_bus(self, bus):
self._canbus = bus
self._queue, self._queue_id = bus.add_listener_queue()
def begin(self):
self._begin = True
pass
@requirebegin
def available(self):
return len(self._queue)
@requirebegin
def read(self, msg):
if self.available() > 0:
msg.set_message(self._queue.pop(0))
return True
return False
@requirebegin
def write(self, msg):
self._canbus.send(str(msg), self._queue_id)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Generate JSON dataset for errmodel directly from yay.tsv and yay.summary
Prepare examples with
- multiple error lines (1-5)
- (no error lines.) instead, during training, predict code for err lines as well as randomly chosen other lines (which are already correct)
"""
import shutil, re, random
from collections import defaultdict, Counter
import csv, os, sys, math, time
import argparse
import heapq
import subprocess
from enum import Enum
import itertools
import traceback
import json
import numpy as np
sys.path.append("../utils")
from code_process import tokenize_code, TEXT_TOKENIZER
from code_process import parse_error, filter_error_message, fix_strings, fix_strings_one_whitespace, remove_braces_gold
from compilation import err, pass_test, compile_and_run_tests_all
## for parallel
from joblib import Parallel, delayed
import multiprocessing as mp
# Global arguments
ARGS = None
# indices in the respective tsv_files
class _inp():
text = 0
code = 1
hitid = 2
workerid = 3
probid = 4
subid = 5
line = 6
indent = 7
class _pred():
text = 1
gold_score = 2
pred_score = 3
gold = 4
pred_best = 5
def prepare_code_with_substitution(inp_stmt, pred_stmt, sub_lines):
# sub_lines: {idx: codeline_str, ...} to be used for substitution
code_header = "#include <bits/stdc++.h>\n#include <string>\nusing namespace std;\n\n" #CHANGE
curr_j = 0
curr_ind, prev_line = 0, " "
code = code_header
# generate code with everything else gold except the i-th line
for inp_j, pred_j in zip(inp_stmt, pred_stmt):
# find the number of tabs to insert
tmp_ind = int(inp_j[_inp.indent])
curr_line = remove_braces_gold(inp_j[_inp.code]).strip()
_prev_line = prev_line.replace(" ", "")
# handle case like
# cout << " YES "
# << " \n " ;
if (len(curr_line) >= 2 and curr_line[:2]=="<<"):
tmp_ind = curr_ind
# handle "std:", "pause:", "momo:", "start:", "label:", etc.
if (2<= len(curr_line) <=12 and re.match(r'^\w+:;?$', curr_line) is not None): #^ means start, $ means end
tmp_ind = tmp_ind + 1
# handle
# 10,
# 11
if _prev_line.endswith(",") and curr_line != "};":
tmp_ind = curr_ind
indent = '\t' * tmp_ind
# if tabs are decreasing then add } if not closing already
if tmp_ind < curr_ind:
if not (inp_j[_inp.code].replace(" ", "") in ["}", "};"]): ##CHANGE
indent += "} "
if curr_ind - tmp_ind > 1:
indent += (curr_ind - tmp_ind - 1) * "} "
# if tabs are increasing then add { if not open already
elif tmp_ind > curr_ind:
if not prev_line or prev_line[-1] != "{":
indent += "{ "
if tmp_ind - curr_ind > 1:
indent += (tmp_ind - curr_ind - 1) * "{ "
curr_ind = tmp_ind
# pick the line of code
## handle a case like
# if (i==10)
# else { ... }
if _prev_line.startswith("if(") and _prev_line.endswith(")") and curr_line.startswith("else"):
code += ("\t" *curr_ind + ";\n")
elif _prev_line.startswith("elseif(") and _prev_line.endswith(")") and curr_line.startswith("else"):
code += ("\t" *curr_ind + ";\n")
elif _prev_line =="else" and curr_line=="}":
code += ("\t" *curr_ind + "{\n")
elif _prev_line =="do" and curr_line.startswith("while"):
code += ("\t" *curr_ind + "{}\n")
if pred_j[_pred.text] == 'DUMMY' or curr_j not in sub_lines:
code += indent + curr_line + "\n"
prev_line = curr_line
else:
code += indent + fix_strings(sub_lines[curr_j]) + "\n"
prev_line = sub_lines[curr_j].strip()
curr_j += 1
return code
def detailed_oracle_with_test_custom(inp_stmt, pred_stmt, probid, subid):
unique_id = probid + "-" + subid
_return_ = [] #return this
curr_i, prob_list_i = 0, 0
code = prepare_code_with_substitution(inp_stmt, pred_stmt, {}) #gold code
passed, error, error_message = compile_and_run_tests_all(ARGS, code, probid, subid, None)
if error != err.no_err: #gold program has error
print ("gold program has error. bye")
print (error_message)
return None
else:
print ("gold program passed!")
# return [] ## Temporary
for curr_i, (inp_i, pred_i) in enumerate(zip(inp_stmt, pred_stmt)):
if pred_i[_pred.text] == 'DUMMY':
continue
# iterate over the i-th line predictions
for rank in range(2): #range(ARGS.num_preds):
sub_lines = {curr_i: pred_i[_pred.pred_best + rank]}
code = prepare_code_with_substitution(inp_stmt, pred_stmt, sub_lines)
passed, error, error_message = compile_and_run_tests_all(ARGS, code, probid, subid, None)
if passed == pass_test.none and error == err.compile_err:
error_message = filter_error_message(error_message, unique_id)
_obj_ = {
"rank": rank+1,
"wrong_lines_idx": [curr_i],
"wrong_lines_code": [pred_i[_pred.pred_best + rank]],
"passed": passed,
"error": error,
"error_message": error_message,
}
_return_.append(_obj_)
prob_list_i += 1
return _return_
def findsubsets(s, n): #e.g. s = {1, 2, 3}, n = 2
return list(itertools.combinations(s, n))
def filter_and_expand_to_multi_errs(detailed_oracle_out, inp_stmt, pred_stmt, probid, subid):
unique_id = probid + "-" + subid
#first filer to just get err lines
filtered_rank1 = []
filtered_rank2 = []
for oracle in detailed_oracle_out:
if oracle["error"] in [1,2,3]: #error indicator
if oracle["rank"] == 1:
filtered_rank1.append(oracle)
elif oracle["rank"] == 2:
filtered_rank2.append(oracle)
for_prep_multi = filtered_rank1[:]
idxs_from_rank2 = list(range(len(filtered_rank2)))
random.shuffle(idxs_from_rank2)
while len(for_prep_multi) < 6 and len(idxs_from_rank2) > 0:
_idx = idxs_from_rank2.pop(0)
for_prep_multi.append(filtered_rank2[_idx])
## Get tuples first, then trim, then get feedback
num_wrong = len(for_prep_multi) # we are only considering top1 pred
_return_ = []
for i in range(num_wrong):
for j in range(num_wrong):
if i < j:
wrong_idx_i = for_prep_multi[i]["wrong_lines_idx"][0]
wrong_code_i = for_prep_multi[i]["wrong_lines_code"][0]
wrong_idx_j = for_prep_multi[j]["wrong_lines_idx"][0]
wrong_code_j = for_prep_multi[j]["wrong_lines_code"][0]
if wrong_idx_i == wrong_idx_j:
continue
sub_lines = {wrong_idx_i: wrong_code_i, wrong_idx_j: wrong_code_j}
code = prepare_code_with_substitution(inp_stmt, pred_stmt, sub_lines)
passed, error, error_message = compile_and_run_tests_all(ARGS, code, probid, subid, None)
if passed == pass_test.none and error == err.compile_err:
error_message = filter_error_message(error_message, unique_id)
_obj_ = {
"rank": None,
"wrong_lines_idx": [wrong_idx_i, wrong_idx_j],
"wrong_lines_code": [wrong_code_i, wrong_code_j],
"passed": passed,
"error": error,
"error_message": error_message,
}
_return_.append(_obj_)
for k in range(num_wrong):
if j < k:
wrong_idx_k = for_prep_multi[k]["wrong_lines_idx"][0]
wrong_code_k = for_prep_multi[k]["wrong_lines_code"][0]
if (wrong_idx_i - wrong_idx_j) * (wrong_idx_j - wrong_idx_k) * (wrong_idx_k - wrong_idx_i) == 0:
continue
sub_lines = {wrong_idx_i: wrong_code_i, wrong_idx_j: wrong_code_j, wrong_idx_k: wrong_code_k}
code = prepare_code_with_substitution(inp_stmt, pred_stmt, sub_lines)
passed, error, error_message = compile_and_run_tests_all(ARGS, code, probid, subid, None)
if passed == pass_test.none and error == err.compile_err:
error_message = filter_error_message(error_message, unique_id)
_obj_ = {
"rank": None,
"wrong_lines_idx": [wrong_idx_i, wrong_idx_j, wrong_idx_k],
"wrong_lines_code": [wrong_code_i, wrong_code_j, wrong_code_k],
"passed": passed,
"error": error,
"error_message": error_message,
}
_return_.append(_obj_)
for l in range(num_wrong):
if k < l:
wrong_idx_l = for_prep_multi[l]["wrong_lines_idx"][0]
wrong_code_l = for_prep_multi[l]["wrong_lines_code"][0]
if (wrong_idx_i - wrong_idx_j) * (wrong_idx_j - wrong_idx_k) * (wrong_idx_k - wrong_idx_l) * (wrong_idx_l - wrong_idx_i) * (wrong_idx_i - wrong_idx_k) * (wrong_idx_j - wrong_idx_l) == 0:
continue
sub_lines = {wrong_idx_i: wrong_code_i, wrong_idx_j: wrong_code_j, wrong_idx_k: wrong_code_k, wrong_idx_l: wrong_code_l}
code = prepare_code_with_substitution(inp_stmt, pred_stmt, sub_lines)
passed, error, error_message = compile_and_run_tests_all(ARGS, code, probid, subid, None)
if passed == pass_test.none and error == err.compile_err:
error_message = filter_error_message(error_message, unique_id)
_obj_ = {
"rank": None,
"wrong_lines_idx": [wrong_idx_i, wrong_idx_j, wrong_idx_k, wrong_idx_l],
"wrong_lines_code": [wrong_code_i, wrong_code_j, wrong_code_k, wrong_code_l],
"passed": passed,
"error": error,
"error_message": error_message,
}
_return_.append(_obj_)
return (filtered_rank1 + filtered_rank2 + _return_)
def get_err_data_one_json(probno): #for one json file
folder = ARGS.folder
count = 0
inp_stmt, pred_stmt = [], []
lines = [] #for dump to json
# the following look extracts the input/pred lines for the probno specified
# and passes it further for stitching
with open(folder + '.tsv','r') as tsvin, open(folder + '.summary','r') as predin:
head_t = tsvin.readline().rstrip('\n').split('\t')
head_s = predin.readline().rstrip('\n').split('\t')
head_s.pop()
for _ in range(ARGS.num_preds):
head_s.append('pred_{}'.format(_ + 1))
for _ in range(ARGS.num_preds):
head_s.append('score_{}'.format(_ + 1))
probid, subid, hitid, workerid = None, None, None, None
while True:
inp = tsvin.readline()
if not inp:
# Special handling for last line
assert count == probno, \
'num problems = {} but probno = {}'.format(count, probno)
break
inp = inp.split('\t')
pred = predin.readline().rstrip('\n').split("\t")
s = dict(zip(head_s, pred))
if int(inp[_inp.line].strip()) == 0:
if count == probno:
break
count += 1
probid, subid = inp[_inp.probid].strip(), inp[_inp.subid].strip()
hitid = inp[_inp.hitid].strip()
workerid = inp[_inp.workerid].strip()
if count == probno:
inp_stmt.append(inp)
pred_stmt.append(pred)
line = {
'line': len(lines),
'text': s['text'],
'code': s['gold'],
'indent': int(inp[_inp.indent]),
}
lines.append(line)
# generate a unique id for this program
unique_id = "{:04d}-{}-{}".format(probno, probid, subid)
unique_id_dir = os.path.join("/".join(folder.split("/")[:-1]), unique_id)
cwd = os.getcwd()
os.system("mkdir -p %s" %(unique_id_dir))
os.chdir(unique_id_dir) #change dir to run detailed-oracle
detailed_oracle_out = detailed_oracle_with_test_custom(inp_stmt, pred_stmt, probid, subid)
if detailed_oracle_out is None: #gold program failed
detailed_oracle_out = []
# #### Temporary ####
# os.chdir(cwd)
# with open(ARGS.out_prefix_compiler + '/{}.txt'.format(unique_id), 'w') as fout: pass
# else:
# os.chdir(cwd)
# with open(ARGS.out_prefix_testcase + '/{}.txt'.format(unique_id), 'w') as fout: pass
# ##################
if detailed_oracle_out == []: #gold program failed
return
expanded_detailed_oracle_out = filter_and_expand_to_multi_errs(detailed_oracle_out, inp_stmt, pred_stmt, probid, subid)
os.chdir(cwd) #change dir back
# os.system("pwd")
## now dump to json
meta = {
'index': probno,
'hitid': hitid,
'workerid': workerid,
'probid': probid,
'subid': subid,
}
errors_compiler = []
for oracle in expanded_detailed_oracle_out:
if str(oracle["passed"]) + str(oracle["error"]) == "01": #compiler err
error_line, error_msg = parse_error(oracle["error_message"], line_offset=LINE_OFFSET)
if error_line is None:
continue
errors_compiler.append({
'mod_line': oracle["wrong_lines_idx"],
'mod_code': oracle["wrong_lines_code"],
'err_line': error_line,
'err_msg': error_msg,
})
with open(ARGS.out_prefix_compiler + '/{}.json'.format(unique_id), 'w') as fout: #CHANGE to /
json.dump({
'meta': meta,
'lines': lines,
'errors': errors_compiler,
}, fout, ensure_ascii=False, indent=2)
# tsv: text code hitid workerid probid subid line indent
# summary: index text gold_score pred_score gold pred_1 ... pred_30 prob_1 ... prob_30
# The actual code has 4 lines of preamble (#include<..> + using namespace std)
LINE_OFFSET = 5
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument('--prog-dir', default='../raw_data/spoc_data/spoc/testcases',
help='Path the codeforces-data repository, which contains test cases')
parser.add_argument('--max-heap', type=int, default=999999,
help='Suicide when heap is bigger than this')
parser.add_argument('-t', '--timeout', type=int, default=2,
help='Timeout for execution (in seconds)')
parser.add_argument('-T', '--gcc-timeout', type=int, default=60,
help='Timeout for compilation (in seconds)')
parser.add_argument('-c', '--compile-budget', type=int, default=999999,
help='Number of maximum g++ calls')
parser.add_argument('--num-preds', type=int, default=30)
parser.add_argument('folder')
parser.add_argument('probno', type=int)
parser.add_argument('out_prefix_compiler',
help='prefix for the output JSON files')
args = parser.parse_args()
global ARGS
ARGS = parser.parse_args()
probno = ARGS.probno
get_err_data_one_json(probno)
return
if __name__ == '__main__':
main()
|
from flask import redirect
from flask import render_template
from flask import request
from flask import url_for
# Pakai app dari package yang diatasnya
from aplikasi.form import app
# Import model
from aplikasi.model.anggota import tambah
# Decorator yang menyatakan kalau:
# + Untuk URL: /daftar
# + Jalankan method ini
# + Tetapi hanya untuk method: HTTP POST
@app.route("/form", methods=["POST"])
def form_anggota():
# Ambil parameter dari form
nama = request.form.get("txtNama")
umur = request.form.get("txtUmur")
umur = int(umur)
# Panggil business logic untuk tangani permintaan,
# dalam ha ini tambah anggota baru
tambah(nama, umur)
# Buat URL untuk redirect dengan mengirimkan dua parameter: nama dan umur
# Perhatikan:
# + Parameter pertama adalah nama method yang ingin dijalankan,
# Flask akan mencari URL yang dapat menjalankan method ini,
# sesuai .route yang terdaftar.
# + Paramerter selanjutnya adalaha parameter yang akan dikirim,
# disini ada dua, yaitu yang bernama: nama dan umur.
url = url_for("tampilkan_halaman_utama", nama=nama, umur=umur)
# Redirect ke URL yang telah dibangun
return redirect(url)
# Decorator yang menyatakan kalau:
# + Untuk URL: /daftar
# + Jalankan method ini
# + Tetapi hanya untuk method: HTTP POST
@app.route("/api/form", methods=["POST"])
def api_form():
# Parameter diterima dalam bentuk JSON
peserta = request.get_json()
# Periksa parameter sudah benar
if "nama" not in peserta.keys():
# Property nama tidak ada, batalkan operasi.
# Kembalikan pesan kesalahan dan kode HTTP 400 untuk informasikan
# operintah gafal.
return "Salah data!", 400
if "umur" not in peserta.keys():
# Property umur tidak ada, batalkan operasi.
# Kembalikan pesan kesalahan dan kode HTTP 400 untuk informasikan
# operintah gafal.
return "Salah data!", 400
# Perhatikan kita tidak peduli kalau property key tidak ada
# Panggil business logic untuk tangani permintaan,
# dalam ha ini tambah anggota baru
tambah(peserta["nama"], peserta["umur"])
# Kembalikan hasilnya dan kode HTTP 200 yang berarti OK (=sukses)
return "Done.", 200
|
"""DGM"""
from typing import (
List,
Dict
)
from re import sub
from .dataset import Dataset
class Dimension():
"""Metric Dimension"""
name: str
value: str
def __init__(self, name: str, value: str) -> None:
self.name = name
self.value = value
def api_structure(self) -> dict:
"""Return in boto3 API structure."""
return {
'Name': self.name,
'Value': self.value
}
class Widget():
"""
Declare dashboard name for the metric to be available
Declare dashboard_category for the use-case specific dashboard to be grouped in
"""
dashboard_name: str
dashboard_category: str
def __init__(
self,
dashboard_name: str,
dashboard_category: str = None
)-> None:
self.dashboard_name = dashboard_name
self.dashboard_category = dashboard_category
class Metadata():
"""Metric Metadata"""
name: str
value: str
def __init__(self, name: str, value: str) -> None:
self.name = name
self.value = value
class Metric():
"""Metric"""
namespace: str
name: str
frequency: str
statistic: str
period: int
metadata: List[Metadata]
dimensions: List[Dimension]
dashboard: Widget
DAY = 'day'
HOUR = 'hour'
MINUTE = 'minute'
def __init__(
self,
namespace: str,
name: str,
frequency: str,
statistic: str,
dashboard: Widget,
metric_set,
sla_set = None,
period: int = None,
metadata: List[Metadata] = None,
dimensions: List[Dimension] = None
) -> None:
self.namespace = namespace
self.name = name
self.frequency = frequency
self.period = period if period is not None else self.frequency_to_period(frequency)
self.statistic = statistic
self.metadata = metadata
self.dimensions = dimensions
self.metric_set = metric_set
self.sla_set = sla_set
self.dashboard = dashboard
self.metric_set.add(self)
@staticmethod
def frequency_to_period(frequency: str) -> int:
""" Convert rate string to period in seconds."""
if frequency == Metric.DAY:
period = 86400
if frequency == Metric.MINUTE:
period = 60
if frequency == Metric.HOUR:
period = 3600
return period
def api_structure(self) -> dict:
"""Return in boto3 API structure."""
dimensions = []
if self.dimensions:
for dimension in self.dimensions:
dimensions.append(dimension.api_structure())
return {
'Namespace': self.namespace,
'MetricName': self.name,
'Dimensions': dimensions
}
def widget_title(self) -> str:
"""Generate title for the CloudWatch Widgets"""
metric_id = self.name + ' per ' + self.frequency + '-'
if self.dimensions:
for dimension in self.dimensions:
if str(dimension.name).endswith('Bucket'):
continue
metric_id += dimension.value
return metric_id.replace('/', '').lower()
def alarm_unique_id(self) -> str:
"""Generate short ID for AlarmName creation"""
metric_id = self.namespace + '-' + self.name + '-' + self.frequency + '-'
if self.dimensions:
for dimension in self.dimensions:
if str(dimension.name).endswith('Bucket'):
continue
metric_id += dimension.name + '-' + dimension.value + '-'
return metric_id.replace('/', '').lower()
def unique_id(self) -> str:
"""Generate short ID."""
metric_id = self.namespace + self.name + self.frequency
if self.dimensions:
for dimension in self.dimensions:
if str(dimension.name).endswith('Bucket'):
continue
metric_id += dimension.name + dimension.value
return sub(r'\W+', '', metric_id).lower()
class DataSetMetric(Metric):
"""DataSetMetric"""
dataset: Dataset
def __init__(
self,
dataset: Dataset,
*args,
**kwargs
) -> None:
super().__init__(*args, **kwargs)
self.dataset = dataset
class BusinessMetric(DataSetMetric):
"""BusinessMetric"""
query: str
reference_datasets: List[Dataset]
def __init__(
self,
query: str,
reference_datasets: List[Dataset],
*args,
**kwargs
) -> None:
super().__init__(*args, **kwargs)
self.reference_datasets = reference_datasets
self.query = query |
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 4 21:52:33 2020
@author: steve
"""
"""
1296. Divide Array in Sets of K Consecutive Numbers
https://leetcode.com/problems/divide-array-in-sets-of-k-consecutive-numbers/
"""
# I think the easiest way for me is to sort it first
"""
Runtime: 6788 ms, faster than 5.02% of Python3 online submissions for Divide Array in Sets of K Consecutive Numbers.
Memory Usage: 25.8 MB, less than 96.45% of Python3 online submissions for Divide Array in Sets of K Consecutive Numbers.
"""
def isPossibleDivide(nums,k):
if len(nums) % k != 0:
return False
nums = sorted(nums) #Requires O(nlogn)
while nums: #(K*n)
check_num = nums[0]
for i in range(check_num,check_num + k):
try:
nums.remove(i)
except:
return False
return True
"""
Alright, let's do hands of straight style
Runtime: 448 ms, faster than 79.20% of Python3 online submissions for Divide Array in Sets of K Consecutive Numbers.
Memory Usage: 28.5 MB, less than 77.21% of Python3 online submissions for Divide Array in Sets of K Consecutive Numbers.
"""
def isPossibleDivide(nums,k):
import collections
from collections import Counter as ct
count = ct(nums)
number_list = sorted(count)
for numbers in number_list:
if count[numbers] >= 1:
for i in range(numbers,numbers + k)[::-1]: #Here use backward
count[i] -= count[numbers]
if count[i] < 0:
return False
return True
nums = [3,2,1,2,3,4,3,4,5,9,10,11]
k = 3
print(isPossibleDivide(nums,k)) |
# contours: continuous lines or curves the bound the object
import cv2
import numpy as np
# load the image
image = cv2.imread('../images/sudoku.jpg')
# convert it to grayscale
image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
cv2.imshow("original image", image)
cv2.waitKey(0)
# find the canny edges
edged = cv2.Canny(image,30,200)
cv2.imshow("canny edged", edged)
cv2.waitKey(0)
# finding contours
copied = edged.copy()
contours, hierarchy = cv2.findContours(copied,cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
cv2.imshow("canny edge after contouring", copied)
cv2.waitKey(0)
print(" NUMBER OF contour found "+ str(len(contours)))
# draw all contours
cv2.drawContours(image, contours, -1, (0,255,0), 3)
cv2.imshow("contours", image)
cv2.waitKey(0)
cv2.destroyAllWindows() |
import unittest
from bond import *
class TestBondMethods(unittest.TestCase):
def test_correct_form(self):
b = Bond("C1", "corporate", 3, 1.3)
self.assertEqual(b.get_name(), "C1")
self.assertEqual(b.get_type(), "corporate")
self.assertEqual(b.get_term(), 3)
self.assertEqual(b.get_yield(), 1.3)
def test_invalid_type(self):
with self.assertRaises(BondTypeError):
b = Bond("G1", "prison", 3, 1.3)
def test_invalid_term(self):
with self.assertRaises(InvalidTermError):
b = Bond("G1", "government", -3, 1.3)
def test_invalid_yield(self):
with self.assertRaises(InvalidYieldError):
b = Bond("G1", "government", 3, -1.3)
def test_term_difference(self):
b1 = Bond("C1", "corporate", 3, 1.3)
g1 = Bond("G1", "government", 4, 1.8)
self.assertEqual(b1.term_difference(g1), 1)
def test_yield_spread(self):
b1 = Bond("C1", "corporate", 3, 1.3)
g1 = Bond("G1", "government", 4, 1.8)
self.assertEqual(b1.yield_spread(g1), 0.5)
if __name__ == '__main__':
unittest.main()
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from datetime import datetime, timedelta
def r_style_interval(from_tuple, end_tuple, frequency):
"""
create time interval using R-style double-tuple notation
"""
from_year, from_seg = from_tuple
end_year, end_seg = end_tuple
n = (end_year - from_year + 1) * frequency
full_range = np.linspace(from_year, end_year + 1, num=n, endpoint=False)
real_range = full_range[(from_seg - 1):n - (frequency - end_seg)]
return real_range
data_folder = "../data/"
"""
R dataset: Average Yearly Temperatures in New Haven
"""
nhtemp = np.array([49.9, 52.3, 49.4, 51.1, 49.4, 47.9, 49.8, 50.9, 49.3, 51.9,
50.8, 49.6, 49.3, 50.6, 48.4, 50.7, 50.9, 50.6, 51.5, 52.8,
51.8, 51.1, 49.8, 50.2, 50.4, 51.6, 51.8, 50.9, 48.8, 51.7,
51.0, 50.6, 51.7, 51.5, 52.1, 51.3, 51.0, 54.0, 51.4, 52.7,
53.1, 54.6, 52.0, 52.0, 50.9, 52.6, 50.2, 52.6, 51.6, 51.9,
50.5, 50.9, 51.7, 51.4, 51.7, 50.8, 51.9, 51.8, 51.9, 53.0])
nhtemp_dates = np.arange("1912", "1972", dtype="datetime64[Y]")
"""
R dataset: Flow of the River Nile with one breakpoint: the annual flows drop
in 1898 because the first Ashwan dam was built
"""
nile = np.array([1120, 1160, 963, 1210, 1160, 1160, 813, 1230, 1370, 1140,
995, 935, 1110, 994, 1020, 960, 1180, 799, 958, 1140,
1100, 1210, 1150, 1250, 1260, 1220, 1030, 1100, 774, 840,
874, 694, 940, 833, 701, 916, 692, 1020, 1050, 969,
831, 726, 456, 824, 702, 1120, 1100, 832, 764, 821,
768, 845, 864, 862, 698, 845, 744, 796, 1040, 759,
781, 865, 845, 944, 984, 897, 822, 1010, 771, 676,
649, 846, 812, 742, 801, 1040, 860, 874, 848, 890,
744, 749, 838, 1050, 918, 986, 797, 923, 975, 815,
1020, 906, 901, 1170, 912, 746, 919, 718, 714, 740])
# nile_dates = np.arange("1871", "1971", dtype="datetime64[Y]")
nile_dates = np.arange(1871, 1971).astype(float)
"""
A multivariate monthly time series from 1959(1) to 2001(2) with variables
from the strucchange package.
by: Achim Zeileis
"""
us_inc_exp = np.load(data_folder + "USIncExp.npy")
"""
R dataset: Time series giving the monthly totals of car drivers in Great
Britain killed or seriously injured Jan 1969 to Dec 1984. Compulsory wearing of
seat belts was introduced on 31 Jan 1983.
"""
uk_driver_deaths = np.array([1687, 1508, 1507, 1385, 1632, 1511, 1559, 1630,
1579, 1653, 2152, 2148, 1752, 1765, 1717, 1558,
1575, 1520, 1805, 1800, 1719, 2008, 2242, 2478,
2030, 1655, 1693, 1623, 1805, 1746, 1795, 1926,
1619, 1992, 2233, 2192, 2080, 1768, 1835, 1569,
1976, 1853, 1965, 1689, 1778, 1976, 2397, 2654,
2097, 1963, 1677, 1941, 2003, 1813, 2012, 1912,
2084, 2080, 2118, 2150, 1608, 1503, 1548, 1382,
1731, 1798, 1779, 1887, 2004, 2077, 2092, 2051,
1577, 1356, 1652, 1382, 1519, 1421, 1442, 1543,
1656, 1561, 1905, 2199, 1473, 1655, 1407, 1395,
1530, 1309, 1526, 1327, 1627, 1748, 1958, 2274,
1648, 1401, 1411, 1403, 1394, 1520, 1528, 1643,
1515, 1685, 2000, 2215, 1956, 1462, 1563, 1459,
1446, 1622, 1657, 1638, 1643, 1683, 2050, 2262,
1813, 1445, 1762, 1461, 1556, 1431, 1427, 1554,
1645, 1653, 2016, 2207, 1665, 1361, 1506, 1360,
1453, 1522, 1460, 1552, 1548, 1827, 1737, 1941,
1474, 1458, 1542, 1404, 1522, 1385, 1641, 1510,
1681, 1938, 1868, 1726, 1456, 1445, 1456, 1365,
1487, 1558, 1488, 1684, 1594, 1850, 1998, 2079,
1494, 1057, 1218, 1168, 1236, 1076, 1174, 1139,
1427, 1487, 1483, 1513, 1357, 1165, 1282, 1110,
1297, 1185, 1222, 1284, 1444, 1575, 1737, 1763])
uk_driver_deaths_dates = np.arange("1969-01", "1985-01", dtype="datetime64[M]")
"""
NDVI time series, simulated by extracting key characteristics from MODIS 16-day
NDVI time series.
"""
ndvi = np.load(data_folder + "ndvi.npy")
ndvi_freq = 24
ndvi_dates = r_style_interval((1982, 1), (2011, 24), ndvi_freq).reshape(ndvi.shape[0], 1)
"""
SIMTS dataset
"""
simts_freq = 23
simts = np.load(data_folder + "simts.npy")
simts_sum = np.sum(simts, axis=2).reshape(simts.shape[1])
simts_dates = r_style_interval((2000, 4), (2008, 18), simts_freq).reshape(simts.shape[1], 1)
"""
harvest dataset
"""
harvest_freq = 23
harvest = np.load(data_folder + "harvest.npy")
harvest_dates = r_style_interval((2000, 4), (2008, 18), harvest_freq).reshape(harvest.shape[0], 1)
# """
# Test with breakpoints in both seasonal and trend
# """
# _both_dates = r_style_interval((1990, 1), (1999, 24), ndvi_freq)
# _both_n = _both_dates.shape[0]
# both_freq = 24
# _both_x = np.arange(_both_n)
# _both_harm = (np.sin(_both_x * 0.5))
# _both_harm[150:] *= 3
# _both_trend = 0.02 * _both_x
# # _both_trend[100:] += 5
# both_dates = _both_dates.reshape(_both_n, 1)
# # both = _both_trend + _both_harm
# both = _both_harm
if __name__ == "__main__":
print(ndvi)
print(simts_sum.shape)
print(simts_dates.shape)
# plt.plot(both_dates, both)
# plt.show()
|
import numpy as np
import copy
import random
import time
##设备.#
class S():
def __init__(self, p, t):
self.p = p
self.t = t
##任务.#
class T():
def __init__(self, time, d, i, j):
self.time = time
self.endtime = None
self.d = d
self.i = i
self.j = j
self.turn = None
self.ori = None
##.输入的任务队列#
class Q(list):
def __init__(self):
list.__init__([])
##.泳道#
class Pool(list):
def __init__(self, deviceId, num):
self.diviceId = deviceId
self.num = num
self.endtime = 0
self.task = [0]
##评估函数.#
def evaluate(s, q1, t1, v, rtt):
# show_p(q1,0)
etime = 0
q = copy.deepcopy(q1)
t = copy.deepcopy(t1)
size = len(s)
pool = [0]*size
for i in range(size):
pool[i] = Pool()
order = [0]*size
for i in range(size):
order[i] = Pool()
timeslice = 500
while 1:
etime = etime +timeslice
for y in range(0, size):
dlist = []
for k in range(0, len(q[y])):
i = fpi(q[y][k])
j = fpj(q[y][k])
if s[y].t > 0:
if (j == 0 and t[i][j].ori <= etime) or (t[i][j - 1].endtime is not None and t[i][j].ori <= etime):
pool[y].append(q[y][k])
dlist.append(q[y][k])
order[y].append(q[y][k])
s[y].t = s[y].t - 1
if s[y].t == 0:
break
for i in range(len(dlist)):
q[y].remove(dlist[i])
dlist.clear()
dlist = None
for y in range(0, size):
# s[y].t = 0
bb = 0
for k in range(0, len(pool[y])):
b1 = bb
bb = bb + 1
if b1 < len(pool[y]):
i = fpi(pool[y][b1])
j = fpj(pool[y][b1])
if j == 0 or (t[i][j - 1].endtime is not None ):
if t[i][j].time == t1[i][j].time:
if t[i][j].ori >= etime - timeslice:
d = t[i][j].time - (etime - t[i][j].ori)
else:
d = t[i][j].time - timeslice
else:
d = t[i][j].time -timeslice
if d > 0:
t[i][j].time = d
else:
t[i][j].time = 0
if t[i][j].time == 0:
t[i][j].endtime = etime + d
# print("t[",i,"]","[",j,"]",t[i][j].ori, ':', t[i][j].endtime)
if j < len(t[i]) - 1:
# print(i)
# print(j)
xx = getx(size, q, t[i][j + 1], i, j + 1)
t[i][j + 1].ori = t[i][j].endtime + timedelay(t[i][j + 1], v[xx][y], rtt[xx][y])
del pool[y][b1]
bb = bb - 1
s[y].t = s[y].t + 1
if num(q, size) == 0 and num(pool, size) == 0:
#print(1)
break
for i in range(0, num_node):
s[i].t = s[i].p
total = 0
for i in range(0, num_task):
total = total + t[i][num_layer-1].endtime - t[i][0].ori
# print(t[i][1].endtime)
# print(total / 15)
return [order, total/num_task]
def evaluate415(s, t1, v ,gene1):
#初始化每个设备的每个泳道starttime = 0,endtime = 0#
#循环判断每个任务t[i][j]#
#根据t[i][j]的父任务endtime与该任务所在设备的最先空闲泳道的endtime#
#更新t[i][j]的开始时间与结束时间并更新所运行泳道的endtime#
#计算所有T的(endtime - starttime) / 任务数#
t = copy.deepcopy(t1)
genee = copy.deepcopy(gene1)
size = len(s)
pool = [0] * size
for i in range(size):
pool[i] = [0] * s[i].p
for i in range(size):
for j in range(s[i].p):
pool[i][j] = Pool(i,j)
for i in range(num_task):
for j in range(num_layer):
num_t = i * num_layer + j
execution_time = Time[fpj(t[i][j])][genee[num_t]]
father_end_time = 0
if j == 0:
father_end_time = t[i][0].ori
if j != 0:
father_end_time = t[i][j-1].endtime
if genee[num_t] != genee[num_t-1]:
father_end_time = father_end_time + timedelay(t[i][j],v[j][genee[num_t]])
execution_pool = 0
wait = 1 #wait=1表示任务到达后需要等待
wast_time = 100000000
wait_time = 100000000
for k in range(s[genee[num_t]].p):
if pool[genee[num_t]][k].endtime <= father_end_time:
wait = 0
if father_end_time - pool[genee[num_t]][k].endtime < wast_time:
wast_time = father_end_time - pool[genee[num_t]][k].endtime
execution_pool = k
if wait == 1:
if pool[genee[num_t]][k].endtime - father_end_time < wait_time:
wait_time = pool[genee[num_t]][k].endtime - father_end_time
execution_pool = k
if wait == 0:
t[i][j].endtime = father_end_time + execution_time
pool[genee[num_t]][execution_pool].endtime = t[i][j].endtime
if wait == 1:
t[i][j].endtime = pool[genee[num_t]][execution_pool].endtime + execution_time
pool[genee[num_t]][execution_pool].endtime = t[i][j].endtime
#循环结束,已知所有t的结束时间
total = 0
for i in range(0, num_task):
total = total + t[i][num_layer - 1].endtime - t[i][0].ori
return total/num_task
def evaluate511(s, t1, v ,gene1):
#初始化每个设备的每个泳道starttime = 0,endtime = 0#
#循环判断每个任务t[i][j]#
#根据t[i][j]的父任务endtime与该任务所在设备的最先空闲泳道的endtime#
#更新t[i][j]的开始时间与结束时间并更新所运行泳道的endtime#
#计算所有T的(endtime - starttime) / 任务数#
global DeadLine
t = copy.deepcopy(t1)
genee = copy.deepcopy(gene1)
totalcost = 0
size = len(s)
pool = [0] * size
for i in range(size):
pool[i] = [0] * s[i].p
for i in range(size):
for j in range(s[i].p):
pool[i][j] = Pool(i,j)
for i in range(num_task):
for j in range(num_layer):
num_t = i * num_layer + j
execution_time = Time[fpj(t[i][j])][genee[num_t]]
totalcost = totalcost + execution_time * cost_node[genee[num_t]]
father_end_time = 0
if j == 0:
father_end_time = t[i][0].ori
if j != 0:
father_end_time = t[i][j-1].endtime
if genee[num_t] != genee[num_t-1]:
father_end_time = father_end_time + timedelay(t[i][j],v[j][genee[num_t]])
execution_pool = 0
wait = 1 #wait=1表示任务到达后需要等待
wast_time = 100000000
wait_time = 100000000
for k in range(s[genee[num_t]].p):
if pool[genee[num_t]][k].endtime <= father_end_time:
wait = 0
if father_end_time - pool[genee[num_t]][k].endtime < wast_time:
wast_time = father_end_time - pool[genee[num_t]][k].endtime
execution_pool = k
if wait == 1:
if pool[genee[num_t]][k].endtime - father_end_time < wait_time:
wait_time = pool[genee[num_t]][k].endtime - father_end_time
execution_pool = k
if wait == 0:
t[i][j].endtime = father_end_time + execution_time
pool[genee[num_t]][execution_pool].endtime = t[i][j].endtime
if wait == 1:
t[i][j].endtime = pool[genee[num_t]][execution_pool].endtime + execution_time
pool[genee[num_t]][execution_pool].endtime = t[i][j].endtime
#循环结束,已知所有t的结束时间
ct = 0
total = 0
for i in range(0, num_task):
if t[i][num_layer - 1].endtime - t[i][0].ori > DeadLine:
ct = ct + 1
total = total + t[i][num_layer - 1].endtime - t[i][0].ori
return [ct,totalcost]
##获取ti所属的T的编号.#
def fpi(task):
if task is not None:
return task.i
return None
##获取ti在T中的序号.#
def fpj(task):
if task is not None:
return task.j
return None
##数据传输时间.#
def timedelay(t, v):
return (t.d / v) * 1000
num_node = 7 # 所有节点数:移动+边缘+云
num_task = 12 # 总任务数
num_layer = 7 # 每个任务层数
num_mobile = 4 # 移动设备个数
task_node = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 0] # 每个任务初始在哪个移动设备上
d = [1.2, 0.3, 0.8, 0.2, 0.4, 0.1, 0.05] # 任务各层间的数据传输量
cost_node = [0, 0, 0, 0, 1.47, 1.47, 1]
s = [0] * num_node # 所有节点集合
s[0] = S(1, 1) # 并发数
s[1] = S(1, 1)
s[2] = S(1, 1)
s[3] = S(1, 1)
s[4] = S(4, 4)
s[5] = S(4, 4)
s[6] = S(8, 8)
t = []
for i in range(num_task):
t.append([0] * num_layer)
for i in range(0, num_task):
for j in range(0, num_layer):
t[i][j] = T(1, d[j], i, j)
start = [0] * num_mobile
for j in range(0, num_mobile):
start[j] = Q()
start[0] = [0, 4, 8, 11] # 每个移动设备上生成的任务
start[1] = [1, 5, 9]
start[2] = [2, 6, 10]
start[3] = [3, 7]
delay = 2500 # 任务到达速
for j in range(0, num_mobile):
c = delay/len(start[j])
ss = 0
for i in start[j]:
t[i][0].ori = ss*c
ss = ss + 1
#每层在每个节点上的执行时间
Time = np.array([[1032, 1032, 1032, 1032, 130, 130, 69],
[121, 121, 121, 121, 16, 16, 8],
[1584, 1584, 1584, 1584, 189, 189, 92],
[251, 251, 251, 251, 31, 31, 15],
[2313, 2313, 2313, 2313, 297, 297, 152],
[235, 235, 235, 235, 28, 28, 14],
[5425, 5425, 5425, 5425, 677, 677, 330]])
Time = Time / 4
#资源节点之间的传输速率
v = np.array([[100000, 0.001, 0.001, 0.001, 0.001, 10, 0.5],
[0.001, 100000, 0.001, 0.001, 10, 10, 0.5],
[0.001, 0.001, 100000, 0.001, 10, 10, 0.5],
[0.001, 0.001, 0.001, 100000, 10, 0.001, 0.5],
[0.001, 10, 10, 10, 100000, 0.001, 0.5],
[10, 10, 10, 0.001, 0.001, 100000, 0.5],
[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 100000]])
q = [0] * num_node
for j in range(0, num_node):
q[j] = Q()
a = Q()
for i in range(0, num_task):
for j in range(0, num_layer):
a.append(t[i][j])
# 种族大小
pop_size = 100
end_size = num_node
task_size = num_task*num_layer
# 各任务可连接的节点对象 ################################记得修改
maps = {0: [0, 5, 6],
1: [1, 4, 5, 6],
2: [2, 4, 5, 6],
3: [3, 4, 6],
4: [0, 5, 6],
5: [1, 4, 5, 6],
6: [2, 4, 5, 6],
7: [3, 4, 6],
8: [0, 5, 6],
9: [1, 4, 5, 6],
10: [2, 4, 5, 6],
11: [0, 5, 6]}
#粒子
class Chromosome():
def __init__(self):
self.gene = [0] * task_size
for i in range(0, task_size):
choose_pos = random.randint(0, len(maps[i // num_layer]) - 1)
self.gene[i] = maps[i // num_layer][choose_pos]
self.time = 0
self.cTask = 0
def Ran():
global bst
global best_time
for i in range(1000):
for j in range(100):
a = Chromosome()
for k in range(0, num_task):
a.gene[k * num_layer] = task_node[k]
for p in range(1, num_layer):
if a.gene[p+(k*7)] < a.gene[p+(k*7)-1]:
a.gene[p+(k*7)] = a.gene[p+(k*7)-1]
[a.cTask, a.time] = evaluate511(s, t, v, a.gene)
if a.time < best_time and a.cTask == 0:
best_time = a.time
bst = copy.deepcopy(a.gene)
print(bst)
print(best_time)
print("----------------------------")
bst = None
best_time = 10000000000
best_order = None
generation = 1
population = [0] * pop_size
DeadLine = 1000
g = [0, 0, 0, 0, 0, 0, 6,
1, 1, 1, 1, 1, 1, 6,
2, 2, 2, 2, 2, 2, 6,
3, 3, 3, 3, 3, 3, 6,
0, 0, 0, 0, 0, 0, 6,
1, 1, 1, 1, 1, 1, 6,
2, 2, 2, 2, 2, 2, 6,
3, 3, 3, 3, 3, 3, 6,
0, 0, 0, 0, 0, 0, 6,
1, 1, 1, 1, 1, 1, 6,
2, 2, 2, 2, 2, 2, 6,
0, 0, 0, 0, 0, 0, 6]
g2 = [0, 6, 6, 6, 6, 6, 6,
1, 6, 6, 6, 6, 6, 6,
2, 6, 6, 6, 6, 6, 6,
3, 6, 6, 6, 6, 6, 6,
0, 6, 6, 6, 6, 6, 6,
1, 6, 6, 6, 6, 6, 6,
2, 6, 6, 6, 6, 6, 6,
3, 6, 6, 6, 6, 6, 6,
0, 6, 6, 6, 6, 6, 6,
1, 6, 6, 6, 6, 6, 6,
2, 6, 6, 6, 6, 6, 6,
0, 6, 6, 6, 6, 6, 6]
g3 = [0, 5, 5, 5, 5, 5, 5,
1, 5, 5, 5, 5, 5, 5,
2, 5, 5, 5, 5, 5, 5,
3, 4, 4, 4, 4, 4, 4,
0, 5, 5, 5, 5, 5, 5,
1, 5, 5, 5, 5, 5, 5,
2, 5, 5, 5, 5, 5, 5,
3, 4, 4, 4, 4, 4, 4,
0, 5, 5, 5, 5, 5, 5,
1, 5, 5, 5, 5, 5, 5,
2, 5, 5, 5, 5, 5, 5,
0, 5, 5, 5, 5, 5, 5]
ts = time.time()
Ran()
te = time.time()
print('time cost', te - ts, 's')
# print(evaluate511(s,t,v,rtt,g))
print("云 :",evaluate511(s,t,v,g2))
print("边缘 :",evaluate511(s,t,v,g3))
print("本地-云 :",evaluate511(s,t,v,g)) |
import numpy as np
# 1. 학습데이터
x_train = np.array([1,2,3,4,5,6,7,8,9,10]) #10행 1열
y_train = np.array([1,2,3,4,5,6,7,8,9,10])
x_test = np.array([11,12,13,14,15,16,17,18,19,20])
y_test = np.array([11,12,13,14,15,16,17,18,19,20])
x3 = np.array([101, 102, 103, 104, 105, 106]) #6행 1열
x4 = np.array(range(30, 50))
from keras.models import Sequential
from keras.layers import Dense
model = Sequential()
# 2. 모델구성(레이어, 노드 개수 설정)
model.add(Dense(7, input_dim=1, activation="relu")) # input_dim=1 >> (column이 1개인 input), relu(완전 열결 층)
# model.add(Dense(5, input_shape=(1, ), activation="relu")) # input_shape=(1, ) >>(1행 n열인 input)
model.add(Dense(13))
model.add(Dense(8))
model.add(Dense(3))
model.add(Dense(1))
# model.summary() # 설정한노드 + 바이어스(편향) >> 1+1 * 5 = 10, 5+1 * 3 = 18, 3+1 * 4 = 16 ....
# 3. 훈련
model.compile(loss="mse", optimizer="adam", metrics=["accuracy"])
# 훈련실행(구성한 모델에 x,y 데이터를 n개씩 짤라서 n번 반복 훈련)
# model.fit(x, y, epochs=20, batch_size=3) # epochs >> 만들어준 모델링을 n회 반복
# batch_size >> n개씩 짤라서 연산
model.fit(x_train, y_train, epochs=100, batch_size=1)
# 4. 평가예측
loss, acc = model.evaluate(x_test, y_test, batch_size=1)
print("acc : ", acc)
# y값 예측 (x값 >> 훈련시킨 값, x2값 >> 훈련시킨 모델에서 나온 w값으로 새로운 데이터 결과값 예측)
# acc(분류모델용, 근사값을 이용해 분류), predict(acc가 100%이어도 100% 정확하게 예측값이 나오지는 않음)
y_predict = model.predict(x_test)
print(y_predict)
# RMSE 구하기 (오차비교)
# x_test값 + y_predict >> 제곱한 값들의 평균에 루트 // 낮을수록 정확
from sklearn.metrics import mean_squared_error
def RMSE(y_test, y_predict): # 결과값 :? 예측값
return np.sqrt(mean_squared_error(y_test, y_predict))
print("RMSE : ", RMSE(y_test, y_predict)) |
import secrets
from flask import request, render_template, redirect, url_for, flash, session
from shoppinglist import app
from shoppinglist.models.dashboard import Dashboard
dashboard = Dashboard()
app.secret_key = secrets.token_hex(32)
@app.route("/")
@app.route("/signup", methods=['GET', 'POST'])
def signup():
if request.method == "POST":
name = request.form.get("name")
email = request.form.get("email")
password = request.form.get("password")
confirm_password = request.form.get("confirm_password")
if name and email and password and confirm_password:
if password != confirm_password:
return render_template("signup.html", error="password mismatch: try again")
if dashboard.signup(name, email, password):
flash(f"user with email {email} has been registered!")
return redirect(url_for('login'))
return render_template("signup.html", error=f"user with {email} already exists, log in please")
return render_template("signup.html", error="all fields required! check to see if all boxes have been filled")
# when request.method == 'GET'
return render_template("signup.html", error=None)
@app.route("/login", methods=['GET', 'POST'])
def login():
if session.get("logged in"): # if a user is already logged in
return redirect(url_for("home"))
if request.method == "POST":
email = request.form.get("email")
password = request.form.get("password")
if email and password:
if len(dashboard.registry) == 0:
return render_template('login.html',
error='unknown email address: you need to first signup before you can log in')
if dashboard.login(email, password):
flash(f"Login successful for {email}")
session["logged in"] = True
session["email"] = email
return redirect(url_for("home"))
return render_template("login.html", error="password incorrect! please try again")
return render_template("login.html", error="missing fields: both email and password are required")
# when request.method == 'GET'
return render_template("login.html", error=None)
@app.route("/view/lists", methods=['GET', 'POST'])
def home():
""" displays the shopping lists that the user has...
Displays them in a table with links to edit and delete the lists. """
if not session.get("logged in"):
return redirect(url_for("login"))
user = dashboard.registry[session["email"]]
return render_template("home.html", user=user, shoppinglists=user.shoppinglists)
@app.route("/add/list/", methods=['POST'])
def add_list():
if not session.get("logged in"):
return redirect(url_for("login"))
user = dashboard.registry[session["email"]]
list_name = request.form.get("name")
notify_date = request.form.get("notify_date")
list_id = secrets.token_urlsafe(10)
if list_name and notify_date:
if user.create_shoppinglist(list_id, list_name, notify_date):
flash(f"List with name '{list_name.title()}' has been created")
else:
flash(
f"A shopping list with its name as '{list_name}' already exists")
else:
flash("unable to create list: please enter a valid list name")
return redirect(url_for('home'))
@app.route("/edit/list/<list_id>", methods=['GET', 'POST'])
def edit_list(list_id):
if not session.get("logged in"):
return redirect(url_for("login"))
user = dashboard.registry[session["email"]]
shoppinglist = user.get_shoppinglist(list_id)
if not shoppinglist:
flash("shoppinglist not found!")
return redirect(url_for('home'))
if request.method == 'POST':
name = request.form.get("name")
notify_date = request.form.get("notify_date")
if shoppinglist.name != name.title() or shoppinglist.notify_date != notify_date:
if user.edit_shoppinglist(shoppinglist.id, name, notify_date):
flash("List edited successfully")
else:
flash(
f"a shopping list with that name ('{name.title()}') already exists")
return redirect(url_for('home'))
flash('no changes have been made to the list!')
return redirect(url_for('home'))
return render_template('edit_list.html', user=user, shoppinglist=shoppinglist)
@app.route("/delete/list/<list_id>", methods=['GET', 'POST'])
def delete_list(list_id):
if not session.get("logged in"):
return redirect(url_for("login"))
user = dashboard.registry[session["email"]]
shoppinglist = user.get_shoppinglist(list_id)
if not shoppinglist:
flash("shoppinglist not found!")
return redirect(url_for('home'))
if request.method == 'POST':
if user.delete_shoppinglist(shoppinglist.id):
flash("List has been successfully deleted")
return redirect(url_for("home"))
return render_template('delete_list.html', user=user, shoppinglist=shoppinglist)
@app.route("/list/items/<list_id>", methods=["GET", "POST"])
def items(list_id):
if not session.get("logged in"):
return redirect(url_for("login"))
user = dashboard.registry[session["email"]]
shoppinglist = user.get_shoppinglist(list_id)
# regardless of the request method
if not shoppinglist:
# don't leave room for an error: redirect to the shoppinglists view
return redirect(url_for("home"))
return render_template("items.html", user=user, shoppinglist=shoppinglist)
@app.route("/add/list/items/<list_id>", methods=['POST'])
def add_item(list_id):
if not session.get("logged in"):
return redirect(url_for("login"))
user = dashboard.registry[session["email"]]
shoppinglist = user.get_shoppinglist(list_id)
if shoppinglist:
name = request.form.get("name")
price = request.form.get("price")
quantity = request.form.get("quantity")
if name and price and quantity:
item_id = secrets.token_urlsafe(10)
if shoppinglist.add_item(item_id, name, price, quantity):
return redirect(url_for("items", list_id=shoppinglist.id))
flash(f"item with name '{name}' already exists!")
return redirect(url_for("items", list_id=shoppinglist.id))
return redirect(url_for("home", user=user, shoppinglists=user.shoppinglists))
@app.route("/edit/list/items/<list_id>/<item_id>", methods=['GET', 'POST'])
def edit_item(list_id, item_id):
if not session.get("logged in"):
return redirect(url_for("login"))
user = dashboard.registry[session["email"]]
shoppinglist = user.get_shoppinglist(list_id)
if not shoppinglist:
flash("shopping list not found!")
return redirect(url_for("home", user=user, shoppinglists=user.shoppinglists))
item = shoppinglist.get_item(item_id)
if shoppinglist and not item:
# flash("item does not exist on this shopping list")
return redirect(url_for('items', list_id=shoppinglist.id))
if request.method == 'POST':
name = request.form.get("name")
price = request.form.get("price")
quantity = request.form.get("quantity")
if item.name != name.title() or item.quantity != quantity or item.price != price:
if shoppinglist.edit_item(item_id, name, price, quantity):
flash("Item edit successful")
else:
flash(
f"failed to edit Item: an item with name '{name}' already exists")
return redirect(url_for('items', list_id=shoppinglist.id))
flash("no changes were made to the item")
return redirect(url_for('items', list_id=shoppinglist.id))
return render_template('edit_item.html', user=user, shoppinglist=shoppinglist, item=item)
@app.route("/delete/list/items/<list_id>/<item_id>", methods=['GET', 'POST'])
def delete_item(list_id, item_id):
if not session.get("logged in"):
return redirect(url_for("login"))
user = dashboard.registry[session["email"]]
shoppinglist = user.get_shoppinglist(list_id)
if not shoppinglist:
flash("shopping list not found!")
return redirect(url_for("home", user=user, shoppinglists=user.shoppinglists))
item = shoppinglist.get_item(item_id)
if shoppinglist and not item:
# flash("item does not exist on this shopping list")
return redirect(url_for('items', list_id=shoppinglist.id))
if request.method == 'POST':
if shoppinglist.remove_item(item.id):
flash("Item deleted successfully")
return redirect(url_for('items', list_id=shoppinglist.id))
return render_template("delete_item.html", user=user, shoppinglist=shoppinglist, item=item)
@app.route("/logout")
def logout():
if session.get("logged in"):
del session["email"]
session["logged in"] = False
dashboard.logout()
return redirect(url_for('login'))
@app.errorhandler(404)
def not_found(_):
if session.get("logged in"):
user = dashboard.registry[session["email"]]
if user:
return render_template('404.html', user=user)
return render_template('404.html', user=None)
|
from __future__ import print_function
from robot_skills import api, base, ears, ebutton, head, lights, perception, robot, sound_source_localisation, speech, \
topological_planner, torso, world_model_ed
from robot_skills.arm import arms, gripper, handover_detector
from robot_skills.simulation import is_sim_mode, SimEButton
class Amigo(robot.Robot):
"""
Amigo
"""
def __init__(self, connection_timeout=robot.DEFAULT_CONNECTION_TIMEOUT):
"""
Constructor
:param connection_timeout: timeout to wait for ROS connections
:type connection_timeout: Optional[float]
"""
super(Amigo, self).__init__(robot_name="amigo", connection_timeout=connection_timeout)
self.add_body_part('base', base.Base(self.robot_name, self.tf_buffer))
self.add_body_part('torso', torso.Torso(self.robot_name, self.tf_buffer, self.get_joint_states))
# construct left arm
left_arm = arms.Arm(self.robot_name, self.tf_buffer, self.get_joint_states, "arm_left")
left_arm.add_part('gripper', gripper.ParrallelGripper(self.robot_name, self.tf_buffer, 'gripper_left'))
left_arm.add_part('handover_detector',
handover_detector.HandoverDetector(self.robot_name, self.tf_buffer, 'handoverdetector_left'))
self.add_arm_part('leftArm', left_arm)
# construct right arm
right_arm = arms.Arm(self.robot_name, self.tf_buffer, self.get_joint_states, "arm_right")
right_arm.add_part('gripper', gripper.ParrallelGripper(self.robot_name, self.tf_buffer, 'gripper_right'))
right_arm.add_part('handover_detector',
handover_detector.HandoverDetector(self.robot_name, self.tf_buffer, 'handoverdetector_right'))
self.add_arm_part('rightArm', right_arm)
self.add_body_part('head', head.Head(self.robot_name, self.tf_buffer))
self.add_body_part('perception', perception.Perception(self.robot_name, self.tf_buffer))
self.add_body_part('ssl', sound_source_localisation.SSL(self.robot_name, self.tf_buffer))
# Human Robot Interaction
self.add_body_part('lights', lights.TueLights(self.robot_name, self.tf_buffer))
self.add_body_part('speech', speech.Speech(self.robot_name, self.tf_buffer,
lambda: self.lights.set_color_rgba_msg(lights.SPEAKING),
lambda: self.lights.set_color_rgba_msg(lights.RESET)))
self.add_body_part('hmi', api.Api(self.robot_name, self.tf_buffer,
lambda: self.lights.set_color_rgba_msg(lights.LISTENING),
lambda: self.lights.set_color_rgba_msg(lights.RESET)))
self.add_body_part('ears', ears.Ears(self.robot_name, self.tf_buffer,
lambda: self.lights.set_color_rgba_msg(lights.LISTENING),
lambda: self.lights.set_color_rgba_msg(lights.RESET)))
ebutton_class = SimEButton if is_sim_mode() else ebutton.EButton
self.add_body_part('ebutton', ebutton_class(self.robot_name, self.tf_buffer))
# Reasoning/world modeling
self.add_body_part('ed', world_model_ed.ED(self.robot_name, self.tf_buffer))
# Action planning
self.add_body_part(
'topological_planner', topological_planner.TopologicalPlanner(self.robot_name, self.tf_buffer)
)
self.configure()
def move_to_pregrasp_pose(self, arm, grasp_target):
"""
This poses the robot for an inspect.
:param arm: PublicArm with an available joint_trajectory 'prepare_grasp' to use for grasping the target
:param grasp_target: kdl.Frame with the pose of the entity to be grasped.
:return: boolean, false if something went wrong.
"""
arm.send_joint_trajectory('prepare_grasp', timeout=0)
return True
|
# -*- coding: utf-8 -*-
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from django.contrib.auth.decorators import permission_required
# #index
from models import IndexHead, IndexDash, IndexHopper, IndexCity, IndexAcrepay
from serializers import IndexHeadSerializer, IndexDashSerializer, IndexHopperSerializer, IndexCitySerializer, IndexAcrepaySerializer
#userInfo
from models import UserAge, UserAgeAll, UserSex, UserSexAll, UserIncrease, UserRest
from serializers import UserAgeSerializer, UserAgeAllSerializer, UserSexSerializer, UserSexAllSerializer, UserIncreaseSerializer, UserRestSerializer
#flow
from models import FlowLoanMoney, FlowLoanMoneyNO, FlowLoanMoneySum, FlowDelayRate, FlowDelayRateNO, FlowLoanFund, FlowPaidMoney, FlowC2CFund
from serializers import FlowLoanMoneySerializer, FlowLoanMoneyNOSerializer, FlowLoanMoneySumSerializer, FlowDelayRateSerializer, FlowDelayRateNOSerializer, FlowLoanFundSerializer, FlowPaidMoneySerializer, FlowC2CFundSerializer
#collect
from models import CollectRate, CollectNum, CollectDis
from serializers import CollectRateSerializer, CollectNumSerializer, CollectDisSerializer
#market
from models import MarketNum
from serializers import MarketNumSerializer
#aeya
from models import AeyePassRate, AeyeGetRate, AeyeDelayRate, AeyeDelayRateNO
from serializers import AeyePassRateSerializer, AeyeGetRateSerializer, AeyeDelayRateSerializer, AeyeDelayRateNOSerializer
#model dict
tableModel = {
'indexhead': {
'models': IndexHead,
'serializers': IndexHeadSerializer,
},
'indexdash': {
'models': IndexDash,
'serializers': IndexDashSerializer,
},
'indexhopper': {
'models': IndexHopper,
'serializers': IndexHopperSerializer,
},
'indexcity': {
'models': IndexCity,
'serializers': IndexCitySerializer,
},
'indexacrepay': {
'models': IndexAcrepay,
'serializers': IndexAcrepaySerializer,
},
'userage': {
'models': UserAge,
'serializers': UserAgeSerializer,
},
'userageall': {
'models': UserAgeAll,
'serializers': UserAgeAllSerializer,
},
'usersex': {
'models': UserSex,
'serializers': UserSexSerializer,
},
'usersexall': {
'models': UserSexAll,
'serializers': UserSexAllSerializer,
},
'userincrease': {
'models': UserIncrease,
'serializers': UserIncreaseSerializer,
},
'userrest': {
'models': UserRest,
'serializers': UserRestSerializer,
},
'flowloanmoney': {
'models': FlowLoanMoney,
'serializers': FlowLoanMoneySerializer,
},
'flowloanmoneyno': {
'models': FlowLoanMoneyNO,
'serializers': FlowLoanMoneyNOSerializer,
},
'flowloanmoneysum': {
'models': FlowLoanMoneySum,
'serializers': FlowLoanMoneySumSerializer,
},
'flowdelayrate': {
'models': FlowDelayRate,
'serializers': FlowDelayRateSerializer,
},
'flowdelayrateno': {
'models': FlowDelayRateNO,
'serializers': FlowDelayRateNOSerializer,
},
'flowloanfund': {
'models': FlowLoanFund,
'serializers': FlowLoanFundSerializer,
},
'flowpaidmoney': {
'models': FlowPaidMoney,
'serializers': FlowPaidMoneySerializer,
},
'flowc2c': {
'models': FlowC2CFund,
'serializers': FlowC2CFundSerializer,
},
'collectrate': {
'models': CollectRate,
'serializers': CollectRateSerializer,
},
'collectnum': {
'models': CollectNum,
'serializers': CollectNumSerializer,
},
'collectdis': {
'models': CollectDis,
'serializers': CollectDisSerializer,
},
'marketnum': {
'models': MarketNum,
'serializers': MarketNumSerializer,
},
'aeyepassrate': {
'models': AeyePassRate,
'serializers': AeyePassRateSerializer,
},
'aeyegetrate': {
'models': AeyeGetRate,
'serializers': AeyeGetRateSerializer,
},
'aeyedelayrate': {
'models': AeyeDelayRate,
'serializers': AeyeDelayRateSerializer,
},
'aeyedelayrateno': {
'models': AeyeDelayRateNO,
'serializers': AeyeDelayRateNOSerializer,
},
}
import datetime
from django.db.models import Max
#@permission_required('part_admin.dayapi')
@api_view(['POST'])
def indexhead_item(request):
if request.method == 'POST':
paralist = eval(request.POST.get('para',None))
tables = paralist.get('table',None)
content = paralist.get('content',None)
if tables and content:
objectModel = tableModel[tables]['models']
objectSerializer = tableModel[tables]['serializers']
para = paralist.get('para',[])
print para
if para:
temp = objectModel.objects.all()
filterstrtemp = "temp.filter({}{}='{}')"
for xkey in para:
key = xkey.get('key','')
value = xkey.get('value','')
way = xkey.get('way','')
way = '__' + way if way else ''
filterstr = filterstrtemp.format(key,way,value)
temp = eval(filterstr)
serializer = objectSerializer(temp, many=True)
return Response(serializer.data)
else:
if content == 'item':
#yesterday = str(datetime.datetime.now() - datetime.timedelta(days=1))[:10]
yesterday = str(objectModel.objects.all().aggregate(Max('createDate')).values()[0])[:10]
temp = objectModel.objects.filter(createDate=yesterday)
serializer = objectSerializer(temp, many=True)
return Response(serializer.data)
elif content == 'list':
temp = objectModel.objects.all()
serializer = objectSerializer(temp, many=True)
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) |
from scipy import signal, fft
from scipy.io import wavfile
import numpy as np
GROUND_TRUTH_FILE = "music_speech.mf"
RESULT_FILE = "results.arff"
BUFFER_LEN = 1024
HOP_SIZE = 512
L = 0.85 # used for SRO
PRECISION = "%.6f"
HEADER = "@RELATION music_speech\n" \
"@ATTRIBUTE RMS_MEAN NUMERIC\n" \
"@ATTRIBUTE ZCR_MEAN NUMERIC\n" \
"@ATTRIBUTE SC_MEAN NUMERIC\n" \
"@ATTRIBUTE SRO_MEAN NUMERIC\n" \
"@ATTRIBUTE SFM_MEAN NUMERIC\n" \
"@ATTRIBUTE RMS_STD NUMERIC\n" \
"@ATTRIBUTE ZCR_STD NUMERIC\n" \
"@ATTRIBUTE SC_STD NUMERIC\n" \
"@ATTRIBUTE SRO_STD NUMERIC\n" \
"@ATTRIBUTE SFM_STD NUMERIC\n" \
"@ATTRIBUTE class {music,speech}\n" \
"@DATA\n"
def main():
# Open the necessary files
ground_truth = open(GROUND_TRUTH_FILE, "r")
result = open(RESULT_FILE, "w")
# Write header to output file
result.write(HEADER)
# Main loop to perform wav calculations
for line in ground_truth:
line_arr = line.split("\t")
wav_file_name = "music_speech/" + line_arr[0].strip()
wav_file_type = line_arr[1].strip()
# Split up wav file into buffers
buffers = get_buffers_from_wav(wav_file_name)
# Calculate time domain features
rms_arr = calc_rms_arr(buffers)
rms_mean = np.mean(rms_arr)
rms_std = np.std(rms_arr)
zcr_arr = calc_zcr_arr(buffers)
zcr_mean = np.mean(zcr_arr)
zcr_std = np.std(zcr_arr)
# Convert from time domain to frequency domain
windows = get_windows_from_buffers(buffers)
# Calculate frequency domain features
sc_arr = calc_sc_arr(windows)
sc_mean = np.mean(sc_arr)
sc_std = np.std(sc_arr)
sro_arr = calc_sro_arr(windows)
sro_mean = np.mean(sro_arr)
sro_std = np.std(sro_arr)
sfm_arr = calc_sfm_arr(windows)
sfm_mean = np.mean(sfm_arr)
sfm_std = np.std(sfm_arr)
result.write(PRECISION % rms_mean + "," +
PRECISION % zcr_mean + "," +
PRECISION % sc_mean + "," +
PRECISION % sro_mean + "," +
PRECISION % sfm_mean + "," +
PRECISION % rms_std + "," +
PRECISION % zcr_std + "," +
PRECISION % sc_std + "," +
PRECISION % sro_std + "," +
PRECISION % sfm_std + "," +
wav_file_type + "\n")
# Function to calculate buffers
def get_buffers_from_wav(wav_file_name):
freq, file_data = wavfile.read(wav_file_name)
data = file_data / 32768.0 # convert to samples
buffers = []
start = 0
end = BUFFER_LEN
num_buffers = int(len(file_data) / HOP_SIZE - 1)
for i in range(num_buffers):
buffer_data = data[start:end]
start += HOP_SIZE
end += HOP_SIZE
if len(buffer_data) == BUFFER_LEN:
buffers.append(buffer_data)
return buffers
# Convert buffers from time domain to frequency domain
def get_windows_from_buffers(buffers):
windows = []
for buf in buffers:
win = buf * signal.hamming(len(buf))
win = fft(win)
# Only keep the first half of the array
win = win[:int(len(win) / 2 + 1)]
windows.append(win)
return windows
# Calculation of time domain features
# Root mean squared
def calc_rms_arr(buffers):
rms_arr = []
for buf in buffers:
rms = calc_rms(buf)
rms_arr.append(rms)
return rms_arr
def calc_rms(buffer):
rms = np.mean(buffer ** 2)
rms = np.sqrt(rms)
return rms
# Zero crossings rate
def calc_zcr_arr(buffers):
zcr_arr = []
for buf in buffers:
zcr = calc_zcr(buf)
zcr_arr.append(zcr)
return zcr_arr
def calc_zcr(buffer):
sign = np.sign(buffer)
diff = np.diff(sign)
zcr = len(np.where(np.abs(diff) == 2)[0]) / (len(buffer) - 1)
return zcr
# Calculation of frequency domain features
# Spectral centroid
def calc_sc_arr(windows):
sc_arr = []
for win in windows:
sc = calc_sc(win)
sc_arr.append(sc)
return sc_arr
def calc_sc(buffer):
num = 0
for k in range(len(buffer)):
num += k * np.abs(buffer[k])
den = np.sum(np.abs(buffer))
sc = num / den
return sc
# Spectral roll-off
def calc_sro_arr(windows):
sro_arr = []
for win in windows:
sro = calc_sro(win)
sro_arr.append(sro)
return sro_arr
def calc_sro(buffer):
running_sum = 0
total = np.sum(np.abs(buffer))
i = 0
while 1:
running_sum += np.abs(buffer[i])
if running_sum >= L * total:
return i
i += 1
return -1
# Spectral flatness measure
def calc_sfm_arr(windows):
sfm_arr = []
for win in windows:
sfm = calc_sfm(win)
sfm_arr.append(sfm)
return sfm_arr
def calc_sfm(buffer):
num = np.exp(np.mean(np.log(np.abs(buffer))))
den = np.mean(np.abs(buffer))
sfm = num / den
return sfm
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.