text stringlengths 38 1.54M |
|---|
import asyncio, discord
from discord.ext import commands
from config import token
from service.crawling import Crawling
from service.statistics import Statistics
from service.utils import Util
my_token = token
game = discord.Game("도움말 : !도움말")
bot = commands.Bot(command_prefix="!", status=discord.Status.online, activity=game)
@bot.event
async def on_ready():
print("봇 시작")
@bot.command()
async def 거래내역(ctx, *param):
await ctx.send("잠시만 기다려주세요")
try:
await ctx.send(embed=Statistics.my_transaction_embed(param[0], param[1]))
except Exception as e:
print(e)
await ctx.send("오류가 발생하였습니다.")
@bot.command()
async def 이상치(ctx):
await ctx.send("잠시만 기다려주세요")
try:
await ctx.send(embed=Statistics.find_larger_than_usual_expenditure())
except Exception as e:
print(e)
await ctx.send("오류가 발생하였습니다.")
@bot.command()
async def 잔액통계(ctx):
await ctx.send("잠시만 기다려주세요")
try:
await ctx.send(file=Statistics.get_balance_graph())
except Exception as e:
print(e)
await ctx.send("오류가 발생하였습니다.")
bot.run(my_token) |
""" The same appearence as in Jupyter Notebook,
except bold font for the header and interleaved backround for rows.
"""
from IPython.display import display
import numpy as np
import pandas as pd
df = pd.DataFrame({ 'A' : 1.,
'B' : pd.Timestamp('20130102'),
'C' : pd.Series(1, index=list(range(4)), dtype='float32'),
'D' : np.array([3] * 4, dtype='int32'),
'E' : pd.Categorical(["test", "train", "test", "train"]),
'F' : 'foo' })
display(df)
|
# Generated by Django 3.2 on 2021-04-12 11:16
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Categories',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='ViewPhoto',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('images', models.ImageField(upload_to='')),
('description', models.TextField()),
('Categories', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='store.categories')),
],
),
]
|
#import sys, os, socket
#ipdefine contains the list of IP addresses we want to check
import subprocess
import ipdefine
#Let's see if the device is on the network right now
#for device, ipadd in ipdefine.ip_address.iteritems():
# try:
# socket.gethostbyaddr(ipadd)
# # If previous line doesn't throw exception, IP address is being used; let's perform some action
# print device+" on network"
# except socket.herror:
# # socket.gethostbyaddr() throws error, so IP is not being used at present; device not on network
# print device+" not on network"
for device, ipadd in ipdefine.ip_address.iteritems():
res = subprocess.call(['ping','-n','1',ipadd],stdout=False)
if res == 0:
print device+" is on network."
elif res == 2:
print device+" is not responding."
else:
print "Ping of "+device+" failed." |
# -*- coding: utf-8 -*-
from PIL import Image
import os
image_directory = '/Users/vdtang/Documents/test'
grayscale_directory = '/Users/vdtang/Documents/test_grayscale'
if not os.path.isdir(grayscale_directory):
os.makedirs(grayscale_directory)
counter = 0
for file in os.listdir(image_directory):
if file.lower().endswith(".jpeg"):
img = Image.open(image_directory + os.sep + file).convert('LA').convert('RGB')
img.save(grayscale_directory + os.sep + file)
print(counter)
counter+=1
|
from sys import argv, stdout, displayhook, path
script, first, second, third = argv
#from sys import *
#script = stdout,
#first = displayhook,
#second = path,
#third = argv
print "The script is called: ", script
print "Your first variable is: ", first
print "Your second variable is: ", second
print "Your third variable is: ", third = raw_input()
|
#! /usr/bin/python
from __future__ import print_function
import os, sys, re
#### REAL DEMO VERSION
TAB = '\t'
HOSTS = '/etc/hosts'; HOSTS = './hosts'
MDN = 'md.ddn.com'; MDN = 'homerj'
IME = 'ime.' + MDN; IME = 'homerj'
TOP = '/var/named/'; TOP = './'
#TOP = '/etc/named'
#TOP = '/etc/dhcp'
SOA = TOP + 'soa/'; SOA = TOP
PRI = TOP + 'pri/'; PRI = TOP
TMP = TOP + 'tmp/'; TMP = TOP
soa = {}
pri = {}
hdr = """$TTL 1D
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; This Zone is Generated in the Style of RFC2137
;;;; except that it uses the Whole Address rather than
;;;; just the Last Octet. Reverse Zones do something like
;;;;
;;;; $GENERATE 0-255 $.Z.Y.X CNAME X.Y.Z.$.md.ddn.com.
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
"""
os.chdir(TOP)
#### zones = os.listdir(SOA)
#### for z in zones:
#### pri[z] = file(TMP + z, 'w')
#### #### soa[z] = file(SOA + z, 'w')
####
#### # [ print(z) for z in zones ]
####
#### sys.stdout = pri[ZONE]
#@ mdn = file(PRI + MDN, 'w'); print(hdr, file=mdn)
ime = file(PRI + IME, 'w'); print(hdr, file=ime)
def AAA_rr(data, out):
global aaa
aaa += 1
print("%-23s A %s" % data, file=out)
def PTR_rr(data, out):
global ptr
ptr += 1
print("%-23s PTR %s" % data, file=out)
def CNM_rr(data, out):
global cnm
cnm += 1
print("%-23s CNAME %s" % data, file=out)
def TXT_rr(data, out):
global txt
txt += 1
print("%-23s TXT %s" % data, file=out)
aaa = ptr = cnm = txt = 0
for line in file(HOSTS).readlines():
(line, junk, junk) = line.partition('#')
fields = re.split(r'\s+', line.strip())
addr = fields.pop(0) if fields else None
host = fields.pop(0) if fields else None
rest = fields
if addr.startswith('127.'): continue
if not addr.startswith('1'): continue
if not addr: continue
if not host: continue
if 1: out = ime
else:
if addr.startswith('10.52.'):
if host.startswith('ime-'):
AAA_rr((host, addr), mdn)
host = re.sub('^ime-','', host)
out = ime
else: out = mdn
print(file=out)
PTR_rr((addr, host), out)
AAA_rr((host, addr), out)
for name in rest:
if name.startswith('#'): break
if name.find('.') != -1: continue
if re.match("^aabbccddeeff$", name): continue
if re.match("^[0-9a-fA-F]{12}$", name): # mac
CNM_rr((name, host), out)
TXT_rr((addr, name), out)
TXT_rr((host, name), out)
else: AAA_rr((name, addr), out) # alias
print(";;;; END ;;;;", file=out)
print("%d A, %d PTR, %d CNAME, %d TXT" % (aaa,ptr,cnm,txt), file=sys.stderr)
sys.stdout.close()
# change this for 2016
#@ os.system('sed -i s/^15......../$(date +%y%m%d%H%M)/ ' + SOA + MDN)
####os.system('sed -i "s/^1[56]......../$(date +%y%m%d%H%M)/" ' + SOA + IME)
|
def dateexam(d, m, y):
'''
Функция принимает дату и проверяет её на достоверность
'''
d_31 = [1, 3, 5, 7, 8, 10, 12]
d_30 = [4, 6, 9, 11]
if (y <= 2021) and (y > 0 and m > 0 and d > 0 and m < 13 and d < 32):
if (y < 2021) or (y == 2021 and m < 10) or (y == 2021 and m == 10 and d <= 9): # проверка на сегодняшнюю дату
if d < 32 and m in d_31:
return True
elif d < 31 and m in d_30:
return True
elif m == 2:
if (d == 29 and y % 4 == 0 and y % 100 != 0) or d < 29:
return True
else:
return False
help(dateexam)
print(dateexam(19, 5, 2003))
|
# -*- coding: utf-8 -*-
"""
computes the lag of the amdf function
Args:
x: audio signal
iBlockLength: block length in samples
iHopLength: hop length in samples
f_s: sample rate of audio data (unused)
Returns:
f frequency
t time stamp for the frequency value
"""
import numpy as np
import math
def PitchTimeAmdf(x, iBlockLength, iHopLength, f_s):
# initialize
f_max = 2000
f_min = 50
iNumOfBlocks = math.ceil (x.size/iHopLength)
# compute time stamps
t = (np.arange(0,iNumOfBlocks) * iHopLength + (iBlockLength/2))/f_s
# allocate memory
f = np.zeros(iNumOfBlocks)
eta_min = int(round (f_s/f_max))-1
eta_max = int(round (f_s/f_min))-1
for n in range(0,iNumOfBlocks):
i_start = n*iHopLength
i_stop = np.min([x.size-1, i_start + iBlockLength - 1])
# calculate the acf
if not x[np.arange(i_start, i_stop+1)].sum():
continue
else:
x_tmp = x[np.arange(i_start,i_stop+1)]
afCorr = computeAmdf(x_tmp, eta_max)
# find the coefficients specified in eta
f[n] = np.argmin(afCorr[np.arange(eta_min+1, afCorr.size)])+1
# convert to Hz
f[n] = f_s / (f[n] + eta_min +1)
return (f,t)
def computeAmdf(x, eta_max):
K = x.shape[0]
if K <= 0:
return 0
afAmdf = np.ones(K)
for eta in range(0,np.min([K,eta_max+1])):
afAmdf[eta] = np.sum(np.abs(x[np.arange(0,K-1-eta)] - x[np.arange(eta+1,K)])) / K
return (afAmdf) |
import matplotlib
matplotlib.use('Qt5Agg')
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from statsmodels.graphics.tsaplots import plot_acf
from statsmodels.graphics.tsaplots import plot_pacf
def plotDataset(dataset):
values = dataset.values
figure, axes = plt.subplots(nrows=len(dataset.columns), ncols=1)
for i in range(len(dataset.columns)):
axes[i].plot(values[:, i])
axes[i].set_title(dataset.columns[i], y=0.5, loc='right')
plt.show()
def plotviolinffpppp(dataset):
sns.set(style="whitegrid")
ax1 = plt.subplot(121)
sns.violinplot(data=dataset[['ff']], ax=ax1)
ax1.set_ylabel('m/s')
ax3 = plt.subplot(122)
sns.violinplot(data=dataset[['pppp']], ax=ax3)
ax3.set_ylabel('hPa')
def plotViolinPols(dataset):
sns.set(style="whitegrid")
f, axes = plt.subplots(2, 2)
sns.violinplot(data=dataset[['ff']], ax=axes[0,0])
axes[0, 0].set_ylabel('m/s')
sns.violinplot(data=dataset[['pppp']], ax=axes[0,1])
axes[0, 1].set_ylabel('hPa')
sns.violinplot(data=dataset[['no2','pm10']], ax=axes[1,0])
axes[1, 0].set_ylabel(r'$\mu g/m^{3}$')
sns.violinplot(data=dataset[['ttt','td']], ax=axes[1,1])
axes[1, 1].set_ylabel(r'$^\circ$C')
plt.show()
def plotBar(dataset):
fq = pd.value_counts(dataset.dd).to_dict()
new = dict()
for i in fq:
new[str(int(i))] = fq[i]
names = list(new.keys())
values = list(new.values())
plt.bar(names, values)
plt.xlabel('dd')
plt.show()
def plotAutocorrelation(dataset, numLags):
cols = ['no2','pm10']
pols = [r'NO$_{2}$',r'PM$_{10}$']
font = {'size': 12}
matplotlib.rc('font', **font)
f, axes = plt.subplots(2, 1)
plt.subplots_adjust(hspace=0.25)
for i in range(2):
plt.xlabel('lag')
plt.ylabel('r')
plot_acf(dataset[cols[i]], lags=numLags, ax = axes[i], title=pols[i])
plt.show()
def plotPartialCorrelation(dataset, col , numlags):
series = dataset[col]
plot_pacf(series, lags=numlags)
plt.show()
def pearsonCorrelationMatrix(dataset):
plt.matshow(dataset[list(set(dataset.columns) - {'dd'})].corr())
plt.show()
#outlier pm10: 38535:500, 33373:350
dataset = pd.read_csv("cleaned_trnavskeknn.csv", sep=',')
#dataset['dtvalue'] = pd.to_datetime(dataset['dtvalue'])
#dataset.set_index('dtvalue', inplace=True)
#cols = dataset.columns.tolist()
#cols = [cols[-1]] + cols[:-1]
#dataset = dataset[cols]
|
# Exercise 3
# Take a list, say for example this one:
# a = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]
# and write a program that prints out all the elements of the list that are less than 5.
# Extras:
# Instead of printing the elements one by one, make a new list that has all the elements less than 5 from this
# list in it and print out this new list.
# Ask the user for a number and return a list that contains only elements from the original list a that are
# smaller than that number given by the user.
import myFunctions
import random
numberList = myFunctions.random_number_list()
print(numberList)
lowerThan5List = []
lowerThanInputList = []
inputNumber = myFunctions.get_int("Enter a number: ")
def printLowerThan5(nList: list):
print("Numbers lower than 5: ")
for number in nList:
if number < 5:
print(number)
lowerThan5List.append(number)
if number < inputNumber:
lowerThanInputList.append(number)
printLowerThan5(numberList)
print("List of numbers lower than 5: " + str(lowerThan5List))
print("List of numbers lower than input: " + str(lowerThanInputList)) |
#!/usr/bin/python
import socket
from hashlib import md5
import socket
import sys
import threading
def modify(message):
print("\033[91m[!]\033[00m Start modification of request...")
#### START IMPLEMENTING YOUR LOGIC HERE
#### END
response = message
print("\033[91m[!]\033[00m Modified request:", response)
return response
def sendTCPMessage(ip, port, message):
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((ip, port))
client.send(message)
response = client.recv(4096)
return response
def handle_client_connection(client_socket, ip, dst_port):
request = client_socket.recv(1024)
print("\033[92m[+] Receive from client:\033[00m", request)
modified_request = modify(request)
response = sendTCPMessage(ip, dst_port, modified_request)
print("\033[91m[+] Response from server:\033[00m", response)
client_socket.send(response)
client_socket.close()
def main(argv):
src_port = 6665
ip = '0.0.0.0'
dst_port = 9999
if(argv != [] and len(argv) == 3):
src_port = int(argv[0])
ip = argv[1]
dst_port = int(argv[2])
else:
print("Help: fixed-proxy <src_port> <ip> <dst_port>")
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((ip, src_port))
server.listen(5) # max backlog of connections
print('\033[92m[+] Listening to client on '+ip+":"+str(src_port)+"\033[00m")
print('\033[91m[+] Sending intercepted packets to server on'+ip+':'+str(dst_port)+"\033[00m")
while True:
try:
client_sock, address = server.accept()
print()
print("\033[96m[+] Accepted connection from "+address[0]+":"+str(address[1])+"\033[00m")
client_handler = threading.Thread(
target=handle_client_connection,
args=(client_sock,ip,dst_port)
)
client_handler.start()
except KeyboardInterrupt:
break
if __name__ == "__main__":
main(sys.argv[1:])
|
import matplotlib.pyplot as plt
import __builtin__ as base
from numpy import *
from numpy.random import rand, randn, permutation, randint
class agent(object):
def __init__(self, environment):
assert(isinstance(environment, gridworld))
self.environment = environment
self.__init_Qmap()
self.epsilon = 0.1
self.alpha = 0.1
self.counter = 0
def __init_Qmap(self):
env = self.environment
self.__Qmap = 0.01*zeros(env.dim() + (len(env.actionSet),))
def initEpoch(self):
self.environment.initEpoch()
self.counter = 0
def Q(self):
return self.__Qmap.copy()
def actions(self):
return self.environment.actionSet.keys()
def takeAction(self):
self.counter += 1
env = self.environment
state = env.state()
actions = self.actions()
if rand() < self.epsilon:
action = actions[randint(len(actions))]
else:
action = actions[argmax(self.__Qmap[state])]
return (env.transit(action),state,action)
def updateQ(self, rwd, state, action):
index = state + (self.actions().index(action),)
q = self.__Qmap
q[index] += self.alpha * (rwd - q[index])
def train(self):
pass
class gridworld(object):
def __init__(self, N = 10):
assert(N==10)
self.__init_field(N)
self.actionSet = {'up': lambda x: (x[0]-1, x[1]),
'down': lambda x: (x[0]+1, x[1]),
'left': lambda x: (x[0], x[1]-1),
'right': lambda x: (x[0], x[1]+1)}
self.__state = None
def field_(self):
return self.__field
def state(self):
return self.__state
def field(self):
tmp = self.__field.copy()
if self.__state != None:
tmp[self.__state] = 7
return tmp
def dim(self):
return shape(self.__field)
def N(self):
return shape(self.__field)[0]
def __init_field(self, N):
self.__field = zeros((N,N), dtype=int)
self.__field[0,:] = 1
self.__field[-1,:] = 1
self.__field[:,0] = 1
self.__field[:,-1] = 1
self.__field[1:5][:,3] = 1
self.__field[3,6] = 2
def isAvailable(self, place):
return (self.__field[place] != 1)
def status(self):
return {0:'Floor', 1:'Wall', 2:'Goal'}.get(self.__field[self.__state])
def availablePlaces(self):
dim = self.dim()
return [(r,c)
for r in range(dim[0])
for c in range(dim[1])
if self.__field[r,c] == 0]
def state(self):
return self.__state
def initEpoch(self):
candidate = self.availablePlaces()
self.__state = candidate[randint(len(candidate))]
return self
def transit(self, action):
tmp = self.actionSet.get(action)(self.__state)
if self.isAvailable(tmp):
self.__state = tmp
if self.__field[tmp] == 7:
return 100
else:
return -1
else:
return -10
def test():
gw = gridworld()
a = agent(gw)
gw.initEpoch()
print(gw.field())
print(gw.state())
#print(gw.transit('up'))
print(a.takeAction())
print(gw.state())
print(gw.field())
print(gw.availablePlaces())
def main():
gw = gridworld()
a = agent(gw)
for epoch in range(20):
a.initEpoch()
while True:
rwd, stat, act = a.takeAction()
a.updateQ(rwd, stat, act)
if gw.status() == 'Goal':
break
if mod(a.counter, 10)==0:
print(gw.state())
print(gw.field())
print('Finished')
print(a.counter)
print(gw.state())
print(gw.field())
Q = transpose(a.Q(), (2,0,1))
for i in range(4):
plt.subplot(2,2,i)
plt.imshow(Q[i], interpolation='nearest')
plt.title(a.actions()[i])
plt.colorbar()
plt.show()
plt.quiver(Q[0]-Q[1], Q[3]-Q[2])
plt.show()
if __name__=="__main__":
main()
|
import numpy as np
#0.15582822
if __name__ == "__main__":
x, y = 1, 2
p, q = 3, 4
r = 1
for %R %x in (*.jpg) do (
for /f "tokens=1-3 delims=. " %%F in ("%%A") do (
set /a a=%%G
set zeros=
if !a! LSS 1000 set zeros=0
if !a! LSS 100 set zeros=00
if !a! LSS 10 set zeros=000
set "name=%%F !zeros!!a!.%%H"
echo ren "%%A" "!name!"
)
)
|
from django.contrib.sitemaps import Sitemap
from dish.files.models import (
Model_1,
Model_2,
)
class CuisinesSitemap |
import gensim, logging, os
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
import stat
import fileinput
import time
import random
import sys, traceback
import subprocess
from subprocess import Popen, PIPE
import cPickle
import re
import gzip
class MySentences(object):
def __init__(self, dirname):
self.dirname = dirname
def __iter__(self):
for fname in os.listdir(self.dirname):
if "txt" not in fname:
continue
print fname
for line in open(os.path.join(self.dirname, fname)):
line = line.split()
line = [x.strip() for x in line]
yield line
class MySentencesBigram(object):
def __init__(self, dirname,aBigram):
self.dirname = dirname
self.bigram = aBigram ## need to train data to get a bigram (or borrow it somewhere else)
def __iter__(self):
for fname in os.listdir(self.dirname):
if "txt" not in fname:
continue
print fname
for line in open(os.path.join(self.dirname, fname)):
line = line.split()
line = [x.strip() for x in line]
line = self.bigram[line] ## convert "los angeles" to "los_angeles"
yield line
def submitJobs (path2TextFiles , file2savePath, modelName2save, doBigram, bigramPath, minCountOfWord, dimensionOfVec):
if not os.path.exists(file2savePath):
os.mkdir(file2savePath)
print ("begin\n")
if doBigram == 1:
print ("now loading a bigram\n")
bigram = gensim.models.phrases.Phraser.load(bigramPath)
print ("now loading sentences\n")
sentences = MySentencesBigram(path2TextFiles,bigram)
else:
print ("now loading sentences\n")
sentences = MySentences(path2TextFiles)
print ('now running model\n')
model = gensim.models.Word2Vec(sentences,min_count=minCountOfWord,size=dimensionOfVec,max_vocab_size=120000000,workers=8,window=5)
print ('finished running, now save file\n')
model.save(os.path.join(file2savePath,modelName2save))
print ('finished saving file\n')
### -------------------------------------------------------
if len(sys.argv)<7:
print("Usage: \n")
sys.exit(1)
else:
submitJobs ( sys.argv[1], sys.argv[2], sys.argv[3], int(sys.argv[4]), sys.argv[5] , int(sys.argv[6]), int(sys.argv[7]) )
|
class Solution:
def shortestPalindrome(self, s):
A = s + "*" + s[::-1]
lps = [0]
for i in range(1, len(A)):
index = lps[i - 1]
while index > 0 and A[index] != A[i]:
index = lps[index - 1]
lps.append(index + (1 if A[index] == A[i] else 0))
return s[lps[-1] :][::-1] + s
if __name__ == "__main__":
solution = Solution()
print(solution.shortestPalindrome("aacecaaa"))
|
"""Evan, please check in the latest version, so that I can start from it"""
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib import rnn
import random
import collections
import time
start_time = time.time()
def elapsed(sec):
if sec<60:
return str(sec) + " sec"
elif sec<(60*60):
return str(sec/60) + " min"
else:
return str(sec/(60*60)) + " hr"
# Target log path
logs_path = 'tmp'
writer = tf.summary.FileWriter(logs_path)
# Text file containing words for training
training_file = '..\outputFiles\WordCountBytecodeHex.txt'
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import os
import time
import pycurl
import pymysql
import subprocess
from multiprocessing import Pool
# 导入配置文件
from config1 import *
# 记录日志
def write_log(e):
# 异常出现时间
err_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
# 异常信息
error_info = "[%s] %s\r\n" % (err_time, e)
with open('error.log','a+') as f:
try:
f.write(error_info)
finally:
if f:
f.close()
# 建立数据库连接
def db_connect():
try:
# 获取数据库
conn = pymysql.connect(host=HOST, port=PORT, user=USER, passwd=PASSWORD, db=DB, charset='utf8')
# 获得游标指针
cursor = conn.cursor()
# 函数返回值,供其他函数调用
return conn,cursor
except Exception as e:
print("建立数据库连接出现异常")
# 记录异常日志
write_log(e)
# 异常时等待重新连接
time.sleep(RETRY)
# 重新尝试建立连接
return db_connect()
# 关闭数据库连接
def db_close(cursor,conn):
try:
# 关闭游标
cursor.close()
# 关闭连接
conn.close()
except Exception as e:
print("关闭数据库连接出现异常")
# 记录异常日志
write_log(e)
# 异常时等待重新连接
time.sleep(RETRY)
# 重新尝试关闭连接
return db_close(cursor,conn)
# 获取异常事件类型id
def event_type(e):
# 获取数据库连接和游标指针
conn, cursor = db_connect()
# 查询出所有事件类型event_types
cursor.execute('select * from webmoni_event_type where (id != 0)')
event_types = cursor.fetchall()
db_close(cursor,conn)
# 遍历匹配出类型id:event_type[0],并作为函数结果返回
for event_type in event_types:
if event_type[1] in e:
return event_type[0]
# 没有匹配到的错误,则记录日志到
else:
# 记录新事件日志./event.log
fo = open("event.log", "a+")
err_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
fo.write("[%s] %s\r\n" % (err_time, e))
fo.close()
# 返回Unknown error的ID:99
return 99
# 域名检测
def checkDomain(url_t,check_number):
# 获得url
url = 'https://' + url_t[1]
#url = 'https://' + 'ju999.net'
# 创建Curl对象
c = pycurl.Curl()
# 定义请求的URL
c.setopt(pycurl.URL, url)
# 设置证书
# c.setopt(pycurl.SSL_VERIFYPEER, 1)
# c.setopt(pycurl.SSL_VERIFYHOST, 2)
# c.setopt(pycurl.CAINFO, certifi.where())
# 忽略证书
c.setopt(pycurl.SSL_VERIFYPEER, 0)
c.setopt(pycurl.SSL_VERIFYHOST, 0)
# 模拟IE11浏览器
c.setopt(pycurl.USERAGENT,
"Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko")
# 模拟Safari浏览器
#c.setopt(pycurl.USERAGENT,
# "Mozilla/5.0(Macintosh;intel Mac OS X 10_11_4) AppleWebKit/537.36 (KHTML,like Gecko) Chrome/52.0.2743.116 Safari/537.36")
# 定义请求连接的等待时间
c.setopt(pycurl.CONNECTTIMEOUT, TIME_OUT)
# 定义请求超时时间
c.setopt(pycurl.TIMEOUT, TIME_OUT)
# 屏蔽下载进度条
c.setopt(pycurl.NOPROGRESS, 1)
# 完成交互后强制断开连接,不重用
c.setopt(pycurl.FORBID_REUSE, 1)
# 指定HTTP重定向的最大次数为10
c.setopt(pycurl.MAXREDIRS, 5)
# 设置保存DNS信息的时间
c.setopt(pycurl.DNS_CACHE_TIMEOUT, 1)
# 创建一个文件,以wb方式打开,用来存储返回的HTTP头部和页面内容
indexFile = open(os.path.dirname(os.path.realpath(__file__)) + "content.txt", "wb")
# 将返回的HTTP头部定向到indexFile文件对象
c.setopt(pycurl.WRITEHEADER, indexFile)
# 将返回的HTML内容定向到indexFile对象
c.setopt(pycurl.WRITEDATA, indexFile)
try:
# 提交请求
c.perform()
# 获取DNS解析时间
NAMELOOKUP_TIME = c.getinfo(pycurl.NAMELOOKUP_TIME) * 1000
# 获取建立连接时间
CONNECT_TIME = c.getinfo(pycurl.CONNECT_TIME) * 1000
# 获取从建立连接到准备传输所消耗的时间
PRETRANSFER_TIME = c.getinfo(pycurl.PRETRANSFER_TIME) * 1000
# 获取从建立连接到传输开始消耗的时间
STARTTRANSFER_TIME = c.getinfo(pycurl.STARTTRANSFER_TIME) * 1000
# 获取传输的总时间
TOTAL_TIME = c.getinfo(pycurl.TOTAL_TIME) * 1000
# 获取HTTP状态码
HTTP_CODE = c.getinfo(pycurl.HTTP_CODE)
# 获取下载数据包大小
SIZE_DOWNLOAD = c.getinfo(pycurl.SIZE_DOWNLOAD)
# 获取HTTP头部大小
HEADER_SIZE = c.getinfo(pycurl.HEADER_SIZE)
# 获取平均下载速度
SPEED_DOWNLOAD = c.getinfo(pycurl.SPEED_DOWNLOAD)
# 关闭打开的文件
indexFile.close()
# 关闭Curl对象
c.close()
"""
# 打印看看
print("%s %s \r\n"
"HTTP状态码:%d \r\n"
"DNS解析时间:%d ms\r\n"
"建立连接时间:%d ms\r\n"
"准备传输时间:%d ms\r\n"
"传输开始时间:%d ms\r\n"
"传输结束总时间:%d ms\r\n"
"请求数据大小:%d betys\r\n"
"HTTP头部大小:%d betys\r\n"
"平均下载速度:%d betys/s\r\n" % (url_t[0],url,
HTTP_CODE,
NAMELOOKUP_TIME,
CONNECT_TIME,
PRETRANSFER_TIME,
STARTTRANSFER_TIME,
TOTAL_TIME,
SIZE_DOWNLOAD,
HEADER_SIZE,
SPEED_DOWNLOAD))
"""
# 返回检测结果元组给data
return (HTTP_CODE,
round(NAMELOOKUP_TIME),
round(CONNECT_TIME),
round(PRETRANSFER_TIME),
round(STARTTRANSFER_TIME),
round(TOTAL_TIME),
round(SIZE_DOWNLOAD),
round(HEADER_SIZE),
round(SPEED_DOWNLOAD),
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
NODE, # NODE引用自配置文件config
url_t[0])
# 检测出现异常时,做如下操作
except Exception as e:
# 先重新请求几次
if check_number <= CHECK_NUM:
# print(url_t[0], url, "第", check_number, "次检测出现异常:", e)
check_number += 1
return checkDomain(url_t, check_number)
# 重试超过设定次数,则记录到数据库和文件,并且发送邮件
else:
# 获取数据库和游标
conn, cursor = db_connect()
# 引用event_type()获取事件类型id
e = str(e)
event_type_id = event_type(e)
# 事件时间
now_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
# 获取url_id
url_id = url_t[0]
# print("\r\n节点", NODE, ",站点", url_id, url, ":连续", check_number - 1, "次检测均异常,保存异常结果到事件日志,并且发送邮件警告!\r\n")
content = "%s %s" % (url_t[1], e)
try:
# 分别捕获异常
try: # 检测异常时,发送邮件
if (url_t[5] == 0) and (url_t[4] == 0) and (url_t[3] != 0) and (content is not None):
subprocess.getstatusoutput("echo '%s' | mail -s '站点异常:%s' 1574956497@qq.com" % (content, str(now_time)))
#subprocess.getstatusoutput("echo '%s' | mail -s '站点异常:%s' epay7777@gmail.com" % (content, str(now_time)))
print("\r\n节点", NODE, ",站点", url_id, url, ":连续", check_number - 1,"次检测均异常,发送邮件警告!\r\n")
except Exception as e:
print("检测异常时,发送邮件失败:%s" % e)
# 记录异常日志
write_log(e)
try: # 检测异常时,更新webmoni_domainname表中url的状态status_id
if event_type_id != url_t[3]:
cursor.execute(
'UPDATE webmoni_domainname SET status_id = %d where id = %d' % (event_type_id, url_id))
conn.commit()
print("\r\n节点", NODE, ",站点", url_id, url, ":连续", check_number - 1,"次检测均异常,更新域名状态!\r\n")
else:
print("\r\n节点", NODE, ",站点", url_id, url, ":连续", check_number - 1, "次检测均异常,更新域名状态!\r\n")
except Exception as e:
print("检测异常时,更新webmoni_domainname表中url的状态status_id失败:%s" % e)
# 记录异常日志
write_log(e)
try: # 检测异常时,保存异常事件信息到日志表webmoni_event_log
err_info = "'%d','%d','%d','%s'" % (event_type_id, NODE, url_id, now_time)
cursor.execute('insert into webmoni_event_log(event_type_id,node_id,url_id,datetime) value(%s)' % (err_info))
conn.commit()
print("\r\n节点", NODE, ",站点", url_id, url, ":连续", check_number - 1, "次检测均异常,记录事件日志!\r\n")
except Exception as e:
print("检测异常时,保存异常事件信息到日志表webmoni_event_log失败:%s" % e)
# 记录异常日志
write_log(e)
try: # 检测异常时,继续保存空结果到表webmoni_monitordata
err_url = "'%s','%d','%d'" % (now_time, NODE, url_id)
cursor.execute('insert into webmoni_monitordata(datetime,node_id,url_id) value(%s)' % (err_url))
conn.commit()
print("\r\n节点", NODE, ",站点", url_id, url, ":连续", check_number - 1, "次检测均异常,记录空结果!\r\n")
# 检测异常时,返回False
return False
except Exception as e:
print("检测异常时,继续保存空结果到表webmoni_monitordata失败:%s" % e)
# 记录异常日志
write_log(e)
# 检测异常打印看看
# 检测失败时,一定要返回False
finally:
# 关闭数据库连接
db_close(cursor, conn)
# 关闭打开的文件
indexFile.close()
# 关闭Curl对象
c.close()
return False
# 保存检测结果data到数据库
def resultSave(url_t,data):
# 获取数据库和游标
conn, cursor = db_connect()
try:
# 检测结果data插入到数据库
cursor.execute("insert into webmoni_monitordata(http_code,"
"namelookup_time,"
"connect_time,"
"pretransfer_time,"
"starttransfer_time,"
"total_time,"
"size_download,"
"header_size,"
"speed_download,"
"datetime,"
"node_id,"
"url_id) value(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)", data)
# 提交执行
conn.commit()
print("^保存成功:",data[-1],data,"\r\n")
# 更新域名状态为正常
if (url_t[3] != 0):
cursor.execute('UPDATE webmoni_domainname SET status_id = %d where id = %d' % (0, url_t[0]))
print('*'*20,'I am here')
conn.commit()
except Exception as e:
print("保存检测结果时失败")
# 记录保存结果出现异常的日志
write_log(e)
exit(e)
finally:
# 关闭数据库连接
db_close(cursor,conn)
def main(url_t):
check_number = 1
data = checkDomain(url_t,check_number)
if data != False:
resultSave(url_t,data)
# 开始
if __name__ == '__main__':
# 这里开始轮询
while True:
# 连接数据库
conn, cursor = db_connect()
# 查询出所有要检测的域名
cursor.execute('select * from webmoni_domainname where (check_id = 0) ORDER BY id ASC')
domains = cursor.fetchall()
print(domains)
# 关闭数据库连接
db_close(cursor,conn)
# 打印配置信息
print("=" * 100, "\r\n设定轮询间隔时间:%ss" % INTERVAL)
print("设定数据库操作失败时重试间隔时间:%ss" % RETRY)
print("设定检测站点异常时重试次数:%s 次" % CHECK_NUM)
print("设定请求超时时间:%ss" % TIME_OUT)
print("设定线程/进程数:%s" % THREAD_NUM)
print("设定检测节点ID: %s\r\n" % NODE, "-" * 98)
print("此次轮询共有%s个域名待检测" % (len(domains)))
# 设定轮询间隔时间
time_remaining = INTERVAL - time.time() % INTERVAL
now_time = (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
time2go = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() + time_remaining))
print("现在是:[%s],[%s]开始执行,等待 %s second..." % (now_time, time2go, time_remaining))
# 整点开始
time.sleep(time_remaining)
print("[", time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), "] It is Time to Go Go Go…\r\n","-"*98)
star = time.time()
# 创建进程池,进程数=THREAD_NUM,进程调用函数main,参数url_t
try:
pool = Pool(THREAD_NUM)
for url_t in domains:
pool.apply_async(func=main, args=(url_t,))
# 终止创建子进程
pool.close()
# 等待所有子进程结束
pool.join()
except Exception as e:
print(e)
stop = time.time()
print(stop-star)
|
from data import data_manager
def get_shows(page):
page_number = int(page)
data = data_manager.execute_select(
f'''SELECT * FROM shows ORDER BY rating DESC LIMIT 15 OFFSET {15 * page_number};
''')
return data
# def get_pagination():
# data = data_manager.execute_select(
# f'''SELECT COUNT (DISTINCT id) FROM shows;
# ''')
# return data
def tv_show(show_id):
data = data_manager.execute_select(
f'''SELECT * FROM shows WHERE id = {show_id};''')
return data |
# 1. Создать класс для сериализации данных Serializer. Объект класса принимает на вход сложный объект(list, dict, etc), и обладает методами loads и dumps. Эти методы работают так же как и json.loads и json.dumps.
# 2. dumps принимает на вход один параметр - contetnt_type(это может быть json или pickle) и на выходе возвращает строку в нужном формате(json или pickle).
# 3. Метод loads принимает два параметра - data и content_type, и возвращает новый объект типа Serializer со сложным объектом внутри
import json
import pickle
class Serializer:
def __init__(self, obj=None):
self.obj = obj
def dumps(self, content_type=None):
if content_type == 'json':
print(json.dumps(self.obj))
elif content_type == 'pickle':
print(pickle.dumps(self.obj))
else:
return
def loads(self, data=None, content_type=None):
if content_type == 'json':
print(json.loads(data))
elif content_type == 'pickle':
print(pickle.loads(data))
else:
return
d = Serializer()
c = Serializer(obj={1:2})
print('-pickle example-')
c.dumps(content_type='pickle')
d.loads(data=b'\x80\x03}q\x00K\x01K\x02s.', content_type='pickle')
print('-json example-')
c.dumps(content_type='json')
d.loads(data='{"1": 2}', content_type='json')
# d.loads(data='[1,2]', content_type='json')
|
import torch
from torch import nn
M = {}
def setup(opt, checkpoint):
print('=> Creating model from file: models/' .. opt.netType .. '.lua')
model = require('models/' .. opt.netType)(opt)
if checkpoint:
modelPath = paths.concat(opt.resume, checkpoint.modelFile)
assert(paths.filep(modelPath), 'Saved model not found: ' .. modelPath)
print('=> Resuming model from ' .. modelPath)
model0 = torch.load(modelPath):type(opt.tensorType)
M.copyModel(model, model0)
elif opt.retrain ~= 'none':
assert(paths.filep(opt.retrain), 'File not found: ' .. opt.retrain)
print('Loading model from file: ' .. opt.retrain)
model0 = torch.load(opt.retrain).type(opt.tensorType)
M.copyModel(model, model0)
if torch.type(model) == 'nn.DataParallelTable':
model = model.get(1)
if opt.optnet or opt.optMemory == 1:
optnet = require 'optnet'
imsize = opt.dataset == 'imagenet' and 224 or 32
sampleInput = torch.zeros(4,3,imsize,imsize):type(opt.tensorType)
optnet.optimizeMemory(model, sampleInput, {inplace = false, mode = 'training'})
if opt.shareGradInput or opt.optMemory >= 2:
M.shareGradInput(model, opt)
M.sharePrevOutput(model, opt)
if opt.optMemory == 3:
M.sharePrevOutput(model, opt)
if opt.optMemory == 4:
M.shareBNOutput(model, opt)
if opt.resetClassifier and not checkpoint:
print(' => Replacing classifier with ' .. opt.nClasses .. '-way classifier')
orig = model:get(#model.modules)
assert(torch.type(orig) == 'nn.Linear',
'expected last layer to be fully connected')
linear = nn.Linear(orig.weight:size(2), opt.nClasses)
linear.bias:zero()
model.remove(#model.modules)
model.add(linear:type(opt.tensorType))
if opt.nn == 'fastest':
nn.fastest = true
nn.benchmark = true
elif opt.nn == 'deterministic':
model.apply(function(m)
if m.setMode:
m.setMode(1, 1, 1))
if opt.nGPU > 1:
gpus = torch.range(1, opt.nGPU).totable()
fastest, benchmark = nn.fastest, nn.benchmark
dpt = nn.DataParallelTable(1, true, true)
:add(model, gpus)
:threads(function()
nn = require 'nn'
require 'models/GunnLayer'
nn.fastest, nn.benchmark = fastest, benchmark)
dpt.gradInput = nil
model = dpt.type(opt.tensorType)
criterion = nn.CrossEntropyCriterion().type(opt.tensorType)
return model, criterion
M.shareGradInput(model, opt)
def sharingKey(m):
key = torch.type(m)
if m.__shareGradInputKey:
key = key .. ':' .. m.__shareGradInputKey
return key
sharingKey()
cache = {}
model.apply(
def(m):
moduleType = torch.type(m)
if torch.isTensor(m.gradInput) and moduleType ~= 'nn.ConcatTable' and moduleType ~= 'nn.Concat':
key = sharingKey(m)
if cache[key] == nil:
cache[key] = torch[opt.tensorType.match('torch.(%a+)').gsub('Tensor','Storage')](1)
m.gradInput = torch[opt.tensorType.match('torch.(%a+)')](cache[key], 1, 0)
m())
for i, m in ipairs(model.findModules('nn.ConcatTable')):
if cache[i % 2] == nil:
cache[i % 2] = torch[opt.tensorType.match('torch.(%a+)').gsub('Tensor','Storage')](1)
m.gradInput = torch[opt.tensorType.match('torch.(%a+)')](cache[i % 2], 1, 0)
for i, m in ipairs(model.findModules('nn.Concat')):
if cache[i % 2] == nil:
cache[i % 2] = torch[opt.tensorType.match('torch.(%a+)').gsub('Tensor','Storage')](1)
m.gradInput = torch[opt.tensorType.match('torch.(%a+)')](cache[i % 2], 1, 0)
print(cache)
def M.sharePrevOutput(model, opt):
buffer = nil
model.apply(
def(m):
moduleType = torch.type(m)
if moduleType == 'nn.DenseConnectLayerCustom':
if buffer == nil:
buffer = torch[opt.tensorType.match('torch.(%a+)').gsub('Tensor','Storage')](1)
m.input_c = torch[opt.tensorType.match('torch.(%a+)')](buffer, 1, 0)
m())
def M.shareBNOutput(model, opt):
buffer = nil
model.apply(function(m)
moduleType = torch.type(m)
if moduleType == 'nn.DenseConnectLayerCustom':
if buffer == nil:
buffer = torch[opt.tensorType:match('torch.(%a+)'):gsub('Tensor','Storage')](1)
m.net1.get(1).output = torch[opt.tensorType.match('torch.(%a+)')](buffer, 1, 0))
def M.copyModel(t, s):
wt, ws = t.parameters(), s.parameters()
assert(#wt==#ws, 'Model configurations does not match the resumed model!')
for l = 1:
wt[l].copy(ws[l])
bn_t, bn_s = {}, {}
for i, m in ipairs(s:findModules('nn.SpatialBatchNormalization')):
bn_s[i] = m
for i, m in ipairs(t:findModules('nn.SpatialBatchNormalization')):
bn_t[i] = m
assert(#bn_t==#bn_s, 'Model configurations does not match the resumed model!')
for i = 1:
bn_t[i].running_mean.copy(bn_s[i].running_mean)
bn_t[i].running_var.copy(bn_s[i].running_var)
return M
|
import dash
import dash_core_components as dcc
import dash_html_components as html
app = dash.Dash()
df = [{'x':[1,2,3,4,5], 'y':[1,4,9,16,25] , 'type':'line', 'name':'boats'},
{'x':[5,4,3,2,1], 'y':[9,8,7,6,5] , 'type':'bar', 'name':'cars'},
]
''' dict = [{'x': ['abhi','lovee','ishu','mouli'],
'y': [19,21,14,13],
'type':'bar', 'name':'details',
}]'''
app.layout = html.Div(children=[
html.H1('IPL Visualizations'),
dcc.Graph(id='example',
figure={
'data':df
,
'layout': {
'title': 'ipl simple plotting'
}
})
])
if __name__ == '__main__':
app.run_server(debug=True)
|
import pickle
s = 'Hola mundo'
print(s)
print(type(s))
se = s.encode()
print(se)
print(type(se))
sp = pickle.dumps(s)
print(sp)
print(type(sp))
ss2 = pickle.loads(se)
print(ss2)
print(type(ss2))
# No imprime debido a que encuenra una h primero en lugar de la direccion \x80
|
def ispalindrome(n):
liste=list(str(n))
liste2=list(reversed(liste))
return liste==liste2
def sominv(n):
liste=list(str(n))
liste2=reversed(liste)
chaine="".join(liste2)
n1=int(chaine)
return n+n1
def islychrel(n):
for _ in range(1,51):
if ispalindrome(sominv(n)): return False
n=sominv(n)
return True
def solve(n):
number=0
for i in range(n+1):
if islychrel(i):
number+=1
return number
|
from collections import Counter
import itertools
import re
import numpy as np
class Moon(object):
def __init__(self, line):
self.pos = np.array([int(s) for s in re.findall(r'[-\d]+', line)])
self.vel = np.array([0]*len(self.pos))
def __repr__(self):
return '%s, %s'%(self.pos, self.vel)
def apply_gravity(self, other):
for i, p in enumerate(self.pos):
p_other = other.pos[i]
if p < p_other:
self.vel[i] += 1
elif p > p_other:
self.vel[i] -= 1
def apply_velocity(self):
self.pos += self.vel
def potential_energy(self):
return np.abs(self.pos).sum()
def kinetic_energy(self):
return np.abs(self.vel).sum()
def total_energy(self):
return self.potential_energy() * self.kinetic_energy()
def step(moons):
for m1, m2 in itertools.permutations(moons, 2):
m1.apply_gravity(m2)
for m in moons:
m.apply_velocity()
def main():
moons = []
for li in open('input.txt', 'r'):
moons.append(Moon(li))
for i in range(1000):
step(moons)
print(sum([m.total_energy() for m in moons]))
histories = [dict() for i in range(3)]
max_intervals = [0]*3
for i in range(300000):
step(moons)
for j in range(3):
key = tuple(m.pos[j] for m in moons) + tuple(m.vel[j] for m in moons)
last_i = histories[j].get(key, None)
if last_i:
interval = i-last_i
if interval > max_intervals[j]:
max_intervals[j] = interval
histories[j][key] = i
i += 1
print(np.lcm.reduce(max_intervals))
if __name__ == '__main__':
main()
|
# coding = utf-8
# @Time : 2021/7/15 16:59
# @Author : pengjiangli
# @File : Pytest04.py
# @Software: PyCharm
# @contact: 292617625@qq.com
import pytest
'''
如果一个方法或者一个class用例想要同时调用多个fixture,可以使用@pytest.mark.usefixture()进行叠加。注意叠加顺序,先执行的放底层,后执行的放上层。
'''
@pytest.fixture()
def test1():
print('\n开始执行function1')
@pytest.fixture()
def test2():
print('\n开始执行function2')
@pytest.mark.usefixtures('test1')
@pytest.mark.usefixtures('test2')
def test_a():
print('---用例a执行---')
@pytest.mark.usefixtures('test2')
@pytest.mark.usefixtures('test1')
class TestCase:
def test_b(self):
print('---用例b执行---')
def test_c(self):
print('---用例c执行---')
if __name__=="__main__":
pytest.main(["-s","Pytest04.py"])
|
from airflow.models import DAG
from datetime import datetime
from airflow.operators import BashOperator
from airflow.operators import PythonOperator
dag =DAG(
dag_id='testdag',
schedule_interval=None,
start_date=datetime.now()
)
def check_stat(**kwargs):
context=kwargs
print(context['dag_run'].execution_date)
return context['dag_run'].execution_date
t1=PythonOperator(
task_id='python_exec_date',
python_callable=check_stat,
provide_context=True,
xcom_push=True,
dag=dag
)
t2 = BashOperator(
task_id='bash_run_id',
bash_command="task_state_test=$(airflow task_state testdag python_exec_date '{{ ti.xcom_pull(task_ids='python_exec_date') }}')",
dag=dag)
t1.set_downstream(t2)
|
"""
author: Dr. Mohammed Zia
https://www.linkedin.com/in/zia33
Problem Statement:
You are given N elements and your task is to Implement a Stack in which you can get minimum element in O(1) time.
more: https://practice.geeksforgeeks.org/problems/get-minimum-element-from-stack/1
"""
class Stack:
"""
Implement Stack
"""
def __init__(self):
self.arr = []
return
def push(self, item):
self.arr.append(item)
def pop(self):
item = self.arr[-1]
del self.arr[-1]
print(item)
return
def getMin(self):
arr = self.arr.copy()
arr = sorted(arr)
print(arr[0])
return
if __name__ == '__main__':
stack = Stack()
stack.push(2)
stack.push(3)
stack.pop()
stack.getMin()
stack.push(1)
stack.getMin() |
from rest_framework.views import APIView
from rest_framework.response import Response
from cloudengine.core.models import CloudApp
# View for creating new apps
class AppView(APIView):
def post(self, request, name):
app = CloudApp(name=name)
app.save()
return Response({"id": app.key})
class AppListView(APIView):
def get(self, request):
app_props = ['name', 'key']
app_list = []
apps = CloudApp.objects.all()
for app in apps:
new_app = {}
for prop in app_props:
new_app[prop] = getattr(app, prop)
app_list.append(new_app)
return Response({'result': app_list})
|
x=10
while x<=1:
if x%5==0:
print(x)
continue
x-=1
final copy
Thank you :)
|
#!python3
"""
This script updates the alexa rankings and sort the csv file.
Usage:
python3 update.py
"""
import csv
import sys
import os
import itertools
import math
import alexa
from download_favicons import download_favicons
sites_path = os.path.join(os.path.dirname(__file__), "..", "_data", "sites.csv")
update_blank_only = os.environ.get("update_blank_only", "false") == "true"
def add_commas_to_rank(number):
if number:
if isinstance(number, str):
number = remove_commas(number)
return "{:,}".format(number)
def round_rank(rank: int):
return round(rank, 1 - (1 + int(math.log10(abs(rank)))))
def remove_commas(string_number):
return int(string_number.replace(",", ""))
def update_alexa(links):
for link in links:
link['rank'] = add_commas_to_rank(link['rank'])
if link['rank'] and update_blank_only:
continue
print("Updating {}.. ".format(link['netloc']), end="")
sys.stdout.flush()
rank = alexa.get_rank(link['netloc'])
if rank:
link['rank'] = add_commas_to_rank(round_rank(rank))
print(link['rank'])
return links
def get_groups(links):
links.sort(key=lambda _: _['section'])
for group_name, group_data in itertools.groupby(links, lambda _: _['section']):
group_data = list(group_data)
group_data.sort(key=lambda _: remove_commas(_['rank']))
yield group_name, group_data
def get_groups_in_order(links):
rank_sorted_groups = dict(get_groups(links))
for group in ["Clients", "Tutoring", "Other", "Agency", "Jobs"]:
yield from rank_sorted_groups[group]
def sort(links):
return list(get_groups_in_order(links))
def main(func):
with open(sites_path, 'r') as csvfile:
links = list(csv.DictReader(csvfile))
links = func(links)
if not links:
return
with open(sites_path, 'w') as csvfile:
fieldnames = list(links[0].keys())
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
writer.writerows(links)
if __name__ == "__main__":
main(update_alexa)
main(sort)
main(download_favicons)
|
import unittest, prob13
class Test_testprob13(unittest.TestCase):
def test_make_self_quotient(self):
obj = prob13.Prob13()
obj.convert_grey()
obj.make_self_quotient(1)
obj.save('sd_1.jpg')
obj = prob13.Prob13()
obj.convert_grey()
obj.make_self_quotient(5)
obj.save('sd_5.jpg')
obj = prob13.Prob13()
obj.convert_grey()
obj.make_self_quotient(10)
obj.save('sd_10.jpg')
obj = prob13.Prob13()
obj.convert_grey()
obj.make_self_quotient(20)
obj.save('sd_20.jpg')
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
import dataset
import csv
from slugify import slugify
from collections import OrderedDict
from summarize import METRO_PARISHES, METRO_FIPS
INPUT_FILES = (
('decennial-2000-bg', 'data/decennial-2000-bg/DEC_00_SF1_P004_with_ann.csv'),
('acs-2013-bg', 'data/acs-2013-bg/ACS_13_5YR_B03002_with_ann.csv'),
)
FIPS_CROSSWALK_FILE = 'data/fips-crosswalk/st22_la_cou.txt'
ESTIMATES_2000_FILE = 'data/populations-estimates/2000-2010/CO-EST00INT-SEXRACEHISP.csv.txt'
ESTIMATES_2010_FILE = 'data/populations-estimates/PEP_2014_PEPSR6H/PEP_2014_PEPSR6H_with_ann.csv'
POSTGRES_URL = 'postgresql:///nola_demographics'
db = dataset.connect(POSTGRES_URL)
def import_data(db, product, filename):
"""
Process and read in data
"""
with open(filename) as f:
rows = list(csv.reader(f))
raw_columns = rows.pop(0)
columns = [slugify(column, separator='_') for column in raw_columns]
table = db['census_data']
data = []
for i, row in enumerate(rows):
processed_row = OrderedDict(zip(columns, row))
processed_row['product'] = product
data.append(processed_row)
table.insert_many(data)
def import_fips():
table = db['fips']
with open(FIPS_CROSSWALK_FILE) as f:
rows = list(csv.reader(f))
columns = ['state', 'state_fp', 'county_fp', 'county_name', 'class_fp']
data = []
for row in rows:
processed_row = OrderedDict(zip(columns, row))
data.append(processed_row)
table.insert_many(data)
def _write_2000_population_estimate(race, row):
table = db['population_estimates']
print 'processing %s (%s)' % (row['CTYNAME'], race)
for year in range(2000, 2011):
estimate_key = 'POPESTIMATE%s' % year
data = {
'county': row['CTYNAME'],
'year': year,
}
data[race] = row[estimate_key]
table.upsert(data, ['year', 'county'])
def import_2000_population_estimates():
with open(ESTIMATES_2000_FILE) as f:
rows = list(csv.DictReader(f))
for row in rows:
if (row['STNAME'] == 'Louisiana'
and row['CTYNAME'] in METRO_PARISHES
and row['SEX'] == '0'):
if row['ORIGIN'] == '2' and row['RACE'] == '0':
_write_2000_population_estimate('hispanic', row)
if row['ORIGIN'] == '1' and row['RACE'] == '1':
_write_2000_population_estimate('white', row)
if row['ORIGIN'] == '1' and row['RACE'] == '2':
_write_2000_population_estimate('black', row)
if row['ORIGIN'] == '1' and row['RACE'] == '3':
_write_2000_population_estimate('american_indian', row)
if row['ORIGIN'] == '1' and row['RACE'] == '4':
_write_2000_population_estimate('asian', row)
if row['ORIGIN'] == '1' and row['RACE'] == '5':
_write_2000_population_estimate('native_hawaiian', row)
if row['ORIGIN'] == '1' and row['RACE'] == '6':
_write_2000_population_estimate('two_or_more', row)
if row['ORIGIN'] == '0' and row['RACE'] == '0':
_write_2000_population_estimate('total', row)
def import_2010_population_estimates():
table = db['population_estimates']
with open(ESTIMATES_2010_FILE) as f:
rows = list(csv.DictReader(f))
for row in rows:
county_name, state = row['GEO.display-label'].split(', ')
est_type = row['Year.id'][:4]
est_year = row['Year.id'][4:]
data = {
'county': county_name,
'year': est_year,
}
if (county_name in METRO_PARISHES
and est_type == 'est7'
and est_year != '2010'
and row['Sex.id'] == 'totsex'):
if row['Hisp.id'] == 'hisp':
data['hispanic'] = row['totpop']
if row['Hisp.id'] == 'nhisp':
data['white'] = row['wa']
data['black'] = row['ba']
data['asian'] = row['aa']
data['american_indian'] = row['ia']
data['native_hawaiian'] = row['na']
data['two_or_more'] = row['tom']
if row['Hisp.id'] == 'tothisp':
data['total'] = row['totpop']
table.upsert(data, ['year', 'county'])
if __name__ == '__main__':
import_2000_population_estimates()
import_2010_population_estimates()
print 'import fips crosswalk'
import_fips()
for product, filename in INPUT_FILES:
print 'processing %s' % product
import_data(db, product, filename)
|
__author__ = 'siva'
from PIL import Image as PILImage
import FileHandle
import datetime
import zbarlight
import cv2
def QRReader(file_path = './out.png'):
start = datetime.datetime.utcnow()
with open(file_path, 'rb') as image_file:
image = PILImage.open(image_file)
image.load()
codes = zbarlight.scan_codes('qrcode', image)
print('QR codes: %s' % codes)
print(datetime.datetime.utcnow()-start)
def DecodeQRImage(CVImage):
#start = datetime.datetime.utcnow()
codes = None
if not CVImage == None :
image = PILImage.fromarray(CVImage)
codes = zbarlight.scan_codes('qrcode', image)
#print('QR codes: %s' % codes)
#print(datetime.datetime.utcnow()-start)
return codes
def readQRFromCamera():
font = cv2.FONT_HERSHEY_SIMPLEX
cam = cv2.VideoCapture(0)
if cam.isOpened(): # try to get the first frame
fps = cam.get(cv2.cv.CV_CAP_PROP_FPS)
rval, frame = cam.read()
else:
rval = False
#FileHandle.writetoFile("./results/QRdata.csv","FrameNumber,Time(Seconds)")
while rval:
qrData = DecodeQRImage(frame)
if qrData is None :
print "No QR Code identified"
else:
print('QR codes: %s, Type %s' %(qrData,type(qrData)))
if not qrData[0] == '' :
qrvals = qrData[0].split(",")
#cv2.putText(frame,'Video Frame identified # : {0} , Current video Duration # : {1} , playing : {2}'.format(qrvals[0],qrvals[0],qrvals[0]),(10,500), font, 0.5,(255,0,0),2)
cv2.putText(frame,'Video Data # : {0} '.format(qrData[0]),(100,850), font, 2,(0,0,255),2)
FileHandle.writetoFile("./results/QRdata.csv","{0}\n".format(qrData[0]))
cv2.imshow("VIPER Video Test Automation", frame)
# read teh next frame
rval, frame = cam.read()
key = cv2.waitKey(20)
if key == 27: # exit on ESC
break
cv2.destroyAllWindows()
|
from scipy.ndimage import interpolation
import os
import random
import tensorflow as tf
import numpy as np
import nibabel as nib
import copy
import pprint
import logging
from random import shuffle
import glob
import gc
try:
import medpy.io
medpy_found = True
except ImportError:
medpy_found = False
pass
try:
import cv2
cv2_found = True
except ImportError:
cv2_found = False
pass
NN_IN = 'network_in'
NN_OUT = 'network_out'
NN_ROI = 'network_roi'
CV_TRAIN = 'train_cv'
CV_TEST = 'test_cv'
def list_nohidden_directories(path):
if os.name == 'nt':
import win32api, win32con
def file_is_hidden(p):
if os.name == 'nt':
attribute = win32api.GetFileAttributes(p)
return attribute & (win32con.FILE_ATTRIBUTE_HIDDEN | win32con.FILE_ATTRIBUTE_SYSTEM)
else:
return p.startswith('.') # linux-osx
file_list = [f for f in sorted(os.listdir(path)) if (not file_is_hidden(os.path.join(path, f)) and os.path.isdir(os.path.join(path, f)))]
return file_list
def list_nohidden_files(path):
if os.name == 'nt':
import win32api, win32con
def file_is_hidden(p):
if os.name == 'nt':
attribute = win32api.GetFileAttributes(p)
return attribute & (win32con.FILE_ATTRIBUTE_HIDDEN | win32con.FILE_ATTRIBUTE_SYSTEM)
else:
return p.startswith('.') # linux-osx
file_list = [f for f in glob.glob(path + "/**/*", recursive=True) if
(not file_is_hidden(f) and not os.path.isdir(os.path.join(path, f)))]
return file_list
class Data_class():
def __init__(self, group_subject_list, output_dtype='float'):
self.group_subject_list = group_subject_list
self.output_dtype = output_dtype
def reading_fun(self, subject_index):
tensor_flair, tensor_lesions = tf.py_function(func=self.generate_cube, inp=[subject_index],
Tout=[tf.float32, tf.float32])
return tensor_flair, tensor_lesions
def generate_cube(self, subject_index):
dict_subject = self.group_subject_list[subject_index]
index = self.random_index_from_list(dict_subject['index_list'])
cube_input = self.get_cube_from_index(dict_subject['subject_data_dict'][NN_IN], index)
cube_output = self.get_cube_from_index(dict_subject['subject_data_dict'][NN_OUT], index)
cube_input, cube_output = self.data_augmentation(cube_input, cube_output, self.output_dtype)
return cube_input, cube_output
@staticmethod
def random_index_from_list(index_list):
random_index = random.randint(0, len(index_list[0]) - 1)
return (index_list[0][random_index], index_list[1][random_index], index_list[2][random_index])
@staticmethod
def get_cube_from_index(data, index, cube_shape=(32, 32, 32)):
return data[int(index[0] - cube_shape[0] / 2):int(index[0] + cube_shape[0] / 2),
int(index[1] - cube_shape[1] / 2):int(index[1] + cube_shape[1] / 2),
int(index[2] - cube_shape[2] / 2):int(index[2] + cube_shape[2] / 2), :]
@staticmethod
def data_augmentation(input, output, output_dtype):
angle = random.randint(0, 180)
input = interpolation.rotate(input, angle, axes=(1, 0), reshape=False, mode='nearest')
if output_dtype == 'int':
output = interpolation.rotate(output, angle, reshape=False, mode='nearest', order=0)
else:
output = interpolation.rotate(output, angle, reshape=False, mode='nearest')
return input, output
class DataOperator:
def __init__(self, dictionary):
self._dictionary = dictionary
def __str__(self):
return pprint.pformat(self._dictionary)
def __copy__(self):
return DataOperator(copy.deepcopy(self._dictionary))
def get_network_sides(self, group):
return list(self._dictionary[group].keys())
def get_list_subjects(self, group):
# we should have the same subject in every channel so just read channel 1
return list(self._dictionary[group][NN_IN][1].keys())
def get_list_channels(self, group, network_side):
return list(self._dictionary[group][network_side].keys())
def get_list_groups(self):
return list(self._dictionary.keys())
def get_subject_path(self, group, network_side, channel, subject):
return self._dictionary[group][network_side][channel][subject]
def get_groups_dictionary(self, groups):
if isinstance(groups, str):
groups_list = [groups]
elif isinstance(groups, list):
groups_list = groups
else:
raise ValueError(' Error: "groups" must be a string or a list.')
new_dict = dict()
for group in groups_list:
new_dict[group] = dict(self._dictionary[group])
return copy.deepcopy(new_dict)
def get_dictionary(self):
return copy.deepcopy(self._dictionary)
def get_data(self, groups='all'):
if not isinstance(groups, str) and not isinstance(groups, list) and not isinstance(groups, tuple):
raise ValueError(' get_data must recieve a string or a list')
if groups is 'all':
return self.__copy__()
else:
return DataOperator(self.get_groups_dictionary(groups))
class TfrecordCreator:
def __init__(self, dictionary):
assert (isinstance(dictionary, DataOperator))
self.dictionary = dictionary
self.dst_path = ''
self._shapes_tfrecord = dict()
self.z_correction = False
self.data_augmentation = False
self.slice_dim = 2
self.means_z = None
self.stds_z = None
self._used_roi_z_correction = False
self.shuffle = False
self.standar_maxmin = False
self.network_side_label_check = None
self.slices_ba = 0
self._NN_IN_DTYPE = 'float'
self._NN_OUT_DTYPE = 'float'
self._NN_ROI_DTYPE = 'int'
self.dtypes_dict = {
NN_IN: self._NN_IN_DTYPE,
NN_OUT: self._NN_OUT_DTYPE,
NN_ROI: self._NN_ROI_DTYPE
}
self._DX = 5
self._DY = 5
self._ANGLE = 30
self._read_function = None
self._Z_CORR = False
def _print_info(self, group):
logging.basicConfig(level=logging.INFO, format='%(message)s')
logger = logging.getLogger()
logger.addHandler(logging.FileHandler(os.path.join(self.dst_path, group + '.log'), 'w'))
print = logger.info
print('#######TFRECORD CREATION INFO#######################')
print('-Dictionary of data: ')
print(self.dictionary)
print('-Group: %s' % group)
print('-Shape tfrecord: %s' % self._shapes_tfrecord)
print('-Z correction: %r' % self.z_correction)
print('-Data augmentation: %r' % self.data_augmentation)
print('-Slice dimension: %d' % self.slice_dim)
print('-Means for Z correction: %s' % self.means_z)
print('-Stds for Z correction: %s' % self.stds_z)
print('-Use ROI in z correction: %r' % self._used_roi_z_correction)
print('-Shuffle: %r' % self.shuffle)
print('-NN_IN_DTYPE: %s' % self._NN_IN_DTYPE)
print('-NN_OUT_DTYPE: %s' % self._NN_OUT_DTYPE)
print('-NN_ROI_DTYPE: %s' % self._NN_ROI_DTYPE)
print('DX for data aug: %d' % self._DX)
print('DY for data aug: %d' % self._DY)
print('ANGLE for data aug: %d' % self._ANGLE)
print('Read function used: %s' % self._read_function)
print('###################################################')
logger.handlers.pop()
@staticmethod
def _check_valid_output_slice(network_vols3d_subject, slice_id, network_side):
slice_mask = network_vols3d_subject[network_side][:, :, slice_id, :].astype(
np.float32)
if np.sum(slice_mask) > 0:
contains_labels = True
else:
contains_labels = False
return contains_labels
@staticmethod
def _read_nii(subject_path):
if medpy_found:
vol, _ = medpy.io.load(subject_path)
else:
img = nib.load(subject_path)
vol = np.squeeze(img.get_data())
return vol
def resize_slices(self, new_size, group='all_groups', network_side=None):
if group is 'all_groups':
groups_to_do = self.dictionary.get_list_groups()
else:
groups_to_do = [group]
if network_side is None:
for group in groups_to_do:
for network_side in self.dictionary.get_network_sides(group):
self._set_size_side(group, network_side, new_size)
else:
for group in groups_to_do:
self._set_size_side(group, network_side, new_size)
def _set_size_side(self, group, network_side, new_size):
if not isinstance(new_size, tuple):
raise ValueError('Error: "new_shape" must be a tuple')
if len(new_size) != 2:
raise ValueError('Error: "new_shape" must have two values')
if network_side not in self.dictionary.get_network_sides(group):
raise ValueError('Error: %s is not a network side')
self._shapes_tfrecord[network_side] = new_size
def set_read_function(self, new_read_function):
self._read_function = new_read_function
def _read_data(self, subject_path):
if self._read_function is not None:
vol = self._read_function(subject_path)
else:
vol = self._read_nii(subject_path)
# Always perform a np.rollaxis, we want the slicing position last
if self.slice_dim != 2:
vol = np.rollaxis(vol, self.slice_dim, 3)
if self.standar_maxmin:
vol = (vol - vol.min()) / (vol.max() - vol.min())
if self.slices_ba != 0:
vol = np.pad(vol, ((0, 0), (0, 0), (self.slices_ba, self.slices_ba)), 'edge')
return vol
@staticmethod
def _resize_slice(slice_image, newsize, inter):
if cv2_found:
if inter is 'float':
inter = cv2.INTER_CUBIC
elif inter is 'int':
inter = cv2.INTER_NEAREST
slice_image = cv2.resize(slice_image, newsize,
interpolation=inter)
if slice_image.ndim is 2:
slice_image = np.expand_dims(slice_image, axis=-1)
else:
raise ValueError(
' CV2 is not installed and is needed for resize slices, to install it use "sudo pip install opencv-python"')
return slice_image
def calc_z_correction(self, group, use_roi=False):
# apriori the z_corr is only for network_in
# INPUTS:
# group: string that contains the group name that is going to be used to calculate the values for the z correction
# use_roi: Boolean used to whether use a ROI to calculate the correction or not. If ROI channels != In channels
# just the first roi channel is used
# OUTPUT:Means and stds for z correction in a list in channel order
means_per_channel = []
stds_per_channel = []
channel_list = self.dictionary.get_list_channels(group, NN_IN)
subject_list = self.dictionary.get_list_subjects(group)
for channel in channel_list:
vol_list_flatten = []
print('channel %d' % channel)
for subject in subject_list:
vol_subject = self._read_data(self.dictionary.get_subject_path(group, NN_IN, channel, subject))
# print(subject)
# print(np.max(vol_subject))
# print(np.min(vol_subject))
if use_roi:
self._used_roi_z_correction = True
if len(self.dictionary.get_list_channels(group, NN_IN)) == len(
self.dictionary.get_list_channels(group, NN_ROI)):
roi_subject = self._read_data(
self.dictionary.get_subject_path(group, NN_ROI, channel, subject))
else:
roi_subject = self._read_data(
self.dictionary.get_subject_path(group, NN_ROI, 1, subject))
vol_list_flatten.append(np.extract(roi_subject, vol_subject))
print(np.mean(np.extract(roi_subject, vol_subject)))
else:
vol_list_flatten.append(vol_subject.flatten())
data_for_scale = np.concatenate(vol_list_flatten)
means_per_channel.append(np.mean(data_for_scale))
stds_per_channel.append(np.std(data_for_scale))
self.means_z = means_per_channel
self.stds_z = stds_per_channel
self._Z_CORR = True
print(means_per_channel)
print(stds_per_channel)
return means_per_channel, stds_per_channel
def set_z_correction(self, means_z, stds_z):
self.means_z = means_z
self.stds_z = stds_z
self._Z_CORR = True
def _create_tfrecord_writer(self, group):
return tf.python_io.TFRecordWriter(os.path.join(self.dst_path, group + '.tfrecord'))
def _get_subject_data(self, group, subject):
network_vols3d_subject = {}
for network_side in self.dictionary.get_network_sides(group):
if self.dictionary.get_list_channels(group, network_side):
vol_channels_list = []
for channel in self.dictionary.get_list_channels(group, network_side):
vol = self._read_data(self.dictionary.get_subject_path(group, network_side, channel, subject))
vol = np.expand_dims(vol, axis=-1)
vol_channels_list.append(vol)
network_vols3d_subject[network_side] = np.concatenate(vol_channels_list,
axis=vol_channels_list[0].ndim - 1)
return network_vols3d_subject
def _convert_to_serial_and_write(self, group, data_list, tfrecord_writer):
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
num_examples = len(data_list)
sample_indices = list(range(num_examples))
if self.shuffle:
print('Doing shuffle!')
random.shuffle(sample_indices)
side_words = ['in', 'out', 'roi']
for index in sample_indices:
sample_features = dict()
for network_side, shape_side_word in zip(self.dictionary.get_network_sides(group), side_words):
sample_features[network_side] = _bytes_feature(data_list[index][network_side].tostring())
rows, cols, channels = data_list[index][network_side].shape
sample_features['rows_' + shape_side_word] = _int64_feature(rows)
sample_features['cols_' + shape_side_word] = _int64_feature(cols)
sample_features['channels_' + shape_side_word] = _int64_feature(channels)
example = tf.train.Example(features=tf.train.Features(feature=sample_features))
tfrecord_writer.write(example.SerializeToString())
def set_data_augmentation(self, dx=5, dy=5, angle=360):
self._DX = dx
self._DY = dy
self._ANGLE = angle
def set_dtypes(self, dtype_in='float', dtype_out='float', dtype_roi='int'):
if dtype_in != 'float' and dtype_in != 'int' or dtype_out != 'float' and dtype_out != 'int' or dtype_roi != 'float' and dtype_roi != 'int':
raise ValueError(' Bad dtype founded.')
self._NN_IN_DTYPE = dtype_in
self._NN_OUT_DTYPE = dtype_out
self._NN_ROI_DTYPE = dtype_roi
for network_side, dtype in zip(self.dictionary.get_network_sides(self.dictionary.get_list_groups()[0]),
[self._NN_IN_DTYPE, self._NN_OUT_DTYPE, self._NN_ROI_DTYPE]):
self.dtypes_dict[network_side] = dtype
def _do_data_augmentation(self, group, slice_data_dict):
dx = random.randrange(0, self._DX)
dy = random.randrange(0, self._DY)
angle = random.randrange(0, self._ANGLE)
for network_side in self.dictionary.get_network_sides(group):
dtype = self.dtypes_dict[network_side]
if slice_data_dict[network_side].ndim > 3:
slice_data_dict[network_side] = np.squeeze(slice_data_dict[network_side])
if slice_data_dict[network_side].ndim < 3:
slice_data_dict[network_side] = np.expand_dims(slice_data_dict[network_side], axis=-1)
if dtype is 'float':
slice_data_dict[network_side] = interpolation.shift(slice_data_dict[network_side], [dx, dy, 0],
mode='nearest')
slice_data_dict[network_side] = interpolation.rotate(slice_data_dict[network_side], angle,
reshape=False,
mode='nearest')
elif dtype is 'int':
slice_data_dict[network_side] = interpolation.shift(slice_data_dict[network_side], [dx, dy, 0],
mode='nearest', order=0)
slice_data_dict[network_side] = interpolation.rotate(slice_data_dict[network_side], angle,
reshape=False,
mode='nearest', order=0)
else:
raise ValueError('Error: "dtype" in %s not recognized, %s' % (network_side, dtype))
return slice_data_dict
def _list_slices_subject(self, group, subject):
network_vols3d_subject = self._get_subject_data(group, subject)
_, _, slices, _ = network_vols3d_subject[NN_IN].shape
tfrecord_slice_list = []
# HE PUESTO UN OFFSET PARA LA PELVIS EN RESO
for slice_id in range(self.slices_ba, slices - self.slices_ba - 20):
if self.network_side_label_check:
if not self._check_valid_output_slice(network_vols3d_subject, slice_id, self.network_side_label_check):
continue
slice_data_dict = dict()
for network_side in self.dictionary.get_network_sides(group):
if network_side in list(self._shapes_tfrecord.keys()) and \
network_vols3d_subject[network_side][:, :,
(slice_id - self.slices_ba):(slice_id + self.slices_ba + 1), :].shape[0:2] != \
self._shapes_tfrecord[
network_side]:
slice_data_dict[network_side] = self._resize_slice(
network_vols3d_subject[network_side][:, :,
(slice_id - self.slices_ba):(slice_id + self.slices_ba + 1), :]
, self._shapes_tfrecord[network_side], self.dtypes_dict[network_side]).astype(np.float32)
else:
slice_data_dict[network_side] = network_vols3d_subject[network_side][:, :,
(slice_id - self.slices_ba):(slice_id + self.slices_ba + 1),
:].astype(
np.float32)
if self.z_correction:
if self._Z_CORR:
slice_data_dict[NN_IN] = ((slice_data_dict[NN_IN] - self.means_z) / self.stds_z).astype(
np.float32)
else:
raise ValueError(
'Error: The calculation of the Z correction input parameters must be done before creating the tfrecord, \
or they must be sat manually in the object')
if self.data_augmentation:
slice_data_dict = self._do_data_augmentation(group, slice_data_dict)
tfrecord_slice_list.append(slice_data_dict)
return tfrecord_slice_list
def run(self, subjects_buffer_size=1):
if not isinstance(subjects_buffer_size, int):
raise ValueError(' Error: "subjects_buffer_size" must be an integer.')
groups_to_tfrecord = self.dictionary.get_list_groups()
for group in groups_to_tfrecord:
print('group %s' % group)
writer = self._create_tfrecord_writer(group)
subjects = self.dictionary.get_list_subjects(group)
shuffle(subjects)
for subject_id in range(0, len(subjects), subjects_buffer_size):
subjects_buffer = subjects[subject_id:subject_id + subjects_buffer_size]
list_slices_buffer = []
for subject in subjects_buffer:
print('subject %s' % subject)
list_slices_buffer = list_slices_buffer + self._list_slices_subject(group, subject)
self._convert_to_serial_and_write(group, list_slices_buffer
, writer)
self._print_info(group)
class TfrecordCreator_patch3d:
def __init__(self, dictionary):
assert (isinstance(dictionary, DataOperator))
self.dictionary = dictionary
self.dst_path = ''
self._shapes_tfrecord = dict()
self.set_patch_shape((32, 32, 32), group='all_groups', network_side=None)
self.z_correction = False
self.data_augmentation = False
self.means_z = None
self.stds_z = None
self._used_roi_z_correction = False
self.shuffle = False
self.standar_maxmin = False
self.network_side_label_check = None
# self.stride_cube = 4
self.group_stride_dict = {}
self._NN_IN_DTYPE = 'float'
self._NN_OUT_DTYPE = 'float'
self._NN_ROI_DTYPE = 'int'
self.dtypes_dict = {
NN_IN: self._NN_IN_DTYPE,
NN_OUT: self._NN_OUT_DTYPE,
NN_ROI: self._NN_ROI_DTYPE
}
self._DX = 5
self._DY = 5
self._ANGLE = 30
self._read_function = None
self._Z_CORR = False
def _print_info(self, group):
logging.basicConfig(level=logging.INFO, format='%(message)s')
logger = logging.getLogger()
logger.addHandler(logging.FileHandler(os.path.join(self.dst_path, group + '.log'), 'w'))
print = logger.info
print('#######TFRECORD CREATION INFO#######################')
print('-Dictionary of data: ')
print(self.dictionary)
print('-Group: %s' % group)
print('-Shape tfrecord: %s' % self._shapes_tfrecord)
print('-Z correction: %r' % self.z_correction)
print('-Data augmentation: %r' % self.data_augmentation)
print('-Means for Z correction: %s' % self.means_z)
print('-Stds for Z correction: %s' % self.stds_z)
print('-Use ROI in z correction: %r' % self._used_roi_z_correction)
print('-Shuffle: %r' % self.shuffle)
print('-NN_IN_DTYPE: %s' % self._NN_IN_DTYPE)
print('-NN_OUT_DTYPE: %s' % self._NN_OUT_DTYPE)
print('-NN_ROI_DTYPE: %s' % self._NN_ROI_DTYPE)
print('DX for data aug: %d' % self._DX)
print('DY for data aug: %d' % self._DY)
print('ANGLE for data aug: %d' % self._ANGLE)
print('Read function used: %s' % self._read_function)
print('###################################################')
logger.handlers.pop()
@staticmethod
def _check_valid_output_cube3d(network_vols3d_subject, cube_id, network_side, patch_shape):
x_i, y_i, z_i = cube_id
cube_mask = network_vols3d_subject[network_side][(x_i):(x_i + patch_shape[0]), (y_i):(y_i + patch_shape[1]),
(z_i):(z_i + patch_shape[2])].astype(
np.float32)
if np.sum(cube_mask) > 0:
contains_labels = True
else:
contains_labels = False
return contains_labels
@staticmethod
def _read_nii(subject_path):
if medpy_found:
vol, _ = medpy.io.load(subject_path)
else:
img = nib.load(subject_path)
vol = np.squeeze(img.get_data())
return vol
def set_patch_shape(self, new_size, group='all_groups', network_side=None):
if group is 'all_groups':
groups_to_do = self.dictionary.get_list_groups()
else:
groups_to_do = [group]
if network_side is None:
for group in groups_to_do:
for network_side in self.dictionary.get_network_sides(group):
self._set_size_side(group, network_side, new_size)
else:
for group in groups_to_do:
self._set_size_side(group, network_side, new_size)
def _set_size_side(self, group, network_side, new_size):
if not isinstance(new_size, tuple):
raise ValueError('Error: "new_shape" must be a tuple')
if len(new_size) != 3:
raise ValueError('Error: "new_shape" must have three values')
if network_side not in self.dictionary.get_network_sides(group):
raise ValueError('Error: %s is not a network side')
self._shapes_tfrecord[network_side] = new_size
def set_read_function(self, new_read_function):
self._read_function = new_read_function
def _read_data(self, subject_path):
if self._read_function is not None:
vol = self._read_function(subject_path)
else:
vol = self._read_nii(subject_path)
if self.standar_maxmin:
vol = (vol - vol.min()) / (vol.max() - vol.min())
return vol
@staticmethod
def _resize_slice(slice_image, newsize, inter):
if cv2_found:
if inter is 'float':
inter = cv2.INTER_CUBIC
elif inter is 'int':
inter = cv2.INTER_NEAREST
slice_image = cv2.resize(slice_image, newsize,
interpolation=inter)
if slice_image.ndim is 2:
slice_image = np.expand_dims(slice_image, axis=-1)
else:
raise ValueError(
' CV2 is not installed and is needed for resize slices, to install it use "sudo pip install opencv-python"')
return slice_image
def calc_z_correction(self, group, use_roi=False):
# apriori the z_corr is only for network_in
# INPUTS:
# group: string that contains the group name that is going to be used to calculate the values for the z correction
# use_roi: Boolean used to whether use a ROI to calculate the correction or not. If ROI channels != In channels
# just the first roi channel is used
# OUTPUT:Means and stds for z correction in a list in channel order
means_per_channel = []
stds_per_channel = []
channel_list = self.dictionary.get_list_channels(group, NN_IN)
subject_list = self.dictionary.get_list_subjects(group)
for channel in channel_list:
vol_list_flatten = []
print('channel %d' % channel)
for subject in subject_list:
vol_subject = self._read_data(self.dictionary.get_subject_path(group, NN_IN, channel, subject))
# print(subject)
# print(np.max(vol_subject))
# print(np.min(vol_subject))
if use_roi:
self._used_roi_z_correction = True
if len(self.dictionary.get_list_channels(group, NN_IN)) == len(
self.dictionary.get_list_channels(group, NN_ROI)):
roi_subject = self._read_data(
self.dictionary.get_subject_path(group, NN_ROI, channel, subject))
else:
roi_subject = self._read_data(
self.dictionary.get_subject_path(group, NN_ROI, 1, subject))
vol_list_flatten.append(np.extract(roi_subject, vol_subject))
print(np.mean(np.extract(roi_subject, vol_subject)))
else:
vol_list_flatten.append(vol_subject.flatten())
data_for_scale = np.concatenate(vol_list_flatten)
means_per_channel.append(np.mean(data_for_scale))
stds_per_channel.append(np.std(data_for_scale))
self.means_z = means_per_channel
self.stds_z = stds_per_channel
self._Z_CORR = True
print(means_per_channel)
print(stds_per_channel)
return means_per_channel, stds_per_channel
def set_z_correction(self, means_z, stds_z):
self.means_z = means_z
self.stds_z = stds_z
self._Z_CORR = True
def _create_tfrecord_writer(self, group):
return tf.python_io.TFRecordWriter(os.path.join(self.dst_path, group + '.tfrecord'))
def _get_subject_data(self, group, subject):
network_vols3d_subject = {}
for network_side in self.dictionary.get_network_sides(group):
if self.dictionary.get_list_channels(group, network_side):
vol_channels_list = []
for channel in self.dictionary.get_list_channels(group, network_side):
vol = self._read_data(self.dictionary.get_subject_path(group, network_side, channel, subject))
vol = np.expand_dims(vol, axis=-1)
vol_channels_list.append(vol)
network_vols3d_subject[network_side] = np.concatenate(vol_channels_list,
axis=vol_channels_list[0].ndim - 1)
return network_vols3d_subject
def _convert_to_serial_and_write(self, group, data_list, tfrecord_writer):
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
num_examples = len(data_list)
sample_indices = list(range(num_examples))
if self.shuffle:
print('Doing shuffle!')
random.shuffle(sample_indices)
side_words = ['in', 'out', 'roi']
for index in sample_indices:
sample_features = dict()
for network_side, shape_side_word in zip(self.dictionary.get_network_sides(group), side_words):
sample_features[network_side] = _bytes_feature(data_list[index][network_side].tostring())
rows, cols, depth, channels = data_list[index][network_side].shape
sample_features['rows_' + shape_side_word] = _int64_feature(rows)
sample_features['cols_' + shape_side_word] = _int64_feature(cols)
sample_features['depth_' + shape_side_word] = _int64_feature(depth)
sample_features['channels_' + shape_side_word] = _int64_feature(channels)
example = tf.train.Example(features=tf.train.Features(feature=sample_features))
tfrecord_writer.write(example.SerializeToString())
data_list = None
def set_data_augmentation(self, dx=5, dy=5, angle=360):
self._DX = dx
self._DY = dy
self._ANGLE = angle
def set_dtypes(self, dtype_in='float', dtype_out='float', dtype_roi='int'):
if dtype_in != 'float' and dtype_in != 'int' or dtype_out != 'float' and dtype_out != 'int' or dtype_roi != 'float' and dtype_roi != 'int':
raise ValueError(' Bad dtype founded.')
self._NN_IN_DTYPE = dtype_in
self._NN_OUT_DTYPE = dtype_out
self._NN_ROI_DTYPE = dtype_roi
for network_side, dtype in zip(self.dictionary.get_network_sides(self.dictionary.get_list_groups()[0]),
[self._NN_IN_DTYPE, self._NN_OUT_DTYPE, self._NN_ROI_DTYPE]):
self.dtypes_dict[network_side] = dtype
def _do_data_augmentation(self, group, slice_data_dict):
angle = random.randrange(0, self._ANGLE)
for network_side in self.dictionary.get_network_sides(group):
dtype = self.dtypes_dict[network_side]
if dtype is 'float':
slice_data_dict[network_side] = interpolation.rotate(slice_data_dict[network_side], angle,
reshape=False,
mode='nearest')
elif dtype is 'int':
slice_data_dict[network_side] = interpolation.rotate(slice_data_dict[network_side], angle,
reshape=False,
mode='nearest', order=0)
else:
raise ValueError('Error: "dtype" in %s not recognized, %s' % (network_side, dtype))
return slice_data_dict
def _list_cubes_subject(self, group, subject):
network_vols3d_subject = self._get_subject_data(group, subject)
x, y, z, _ = network_vols3d_subject[NN_IN].shape
tfrecord_cube_list = []
for x_i in range(0, x, int(self.group_stride_dict[group])):
for y_i in range(0, y, int(self.group_stride_dict[group])):
for z_i in range(0, z, int(self.group_stride_dict[group])):
cube_id = (x_i, y_i, z_i)
if self.network_side_label_check:
if not self._check_valid_output_cube3d(network_vols3d_subject, cube_id,
self.network_side_label_check,
self._shapes_tfrecord[self.network_side_label_check]):
continue
cube_data_dict = dict()
for network_side in self.dictionary.get_network_sides(group):
x_patch, y_patch, z_patch = self._shapes_tfrecord[network_side]
if network_side in list(self._shapes_tfrecord.keys()) and \
network_vols3d_subject[network_side][(x_i):(x_i + x_patch), (y_i):(y_i + y_patch),
(z_i):(z_i + z_patch)].shape[0:3] != self._shapes_tfrecord[network_side]:
break
else:
cube_data_dict[network_side] = network_vols3d_subject[network_side][(x_i):(x_i + x_patch),
(y_i):(y_i + y_patch), (z_i):(z_i + z_patch)].astype(
np.float32)
if len(cube_data_dict) is len(self.dictionary.get_network_sides(group)):
if self.z_correction:
if self._Z_CORR:
cube_data_dict[NN_IN] = ((cube_data_dict[NN_IN] - self.means_z) / self.stds_z).astype(
np.float32)
else:
raise ValueError(
'Error: The calculation of the Z correction input parameters must be done before creating the tfrecord, \
or they must be sat manually in the object')
if self.data_augmentation:
cube_data_dict = self._do_data_augmentation(group, cube_data_dict)
tfrecord_cube_list.append(cube_data_dict)
return tfrecord_cube_list
def run(self, subjects_buffer_size=1):
if not isinstance(subjects_buffer_size, int):
raise ValueError(' Error: "subjects_buffer_size" must be an integer.')
groups_to_tfrecord = self.dictionary.get_list_groups()
for group in groups_to_tfrecord:
print('group %s' % group)
writer = self._create_tfrecord_writer(group)
subjects = self.dictionary.get_list_subjects(group)
shuffle(subjects)
for subject_id in range(0, len(subjects), subjects_buffer_size):
subjects_buffer = subjects[subject_id:subject_id + subjects_buffer_size]
list_slices_buffer = []
gc.collect()
for subject in subjects_buffer:
print('subject %s' % subject)
list_slices_buffer = list_slices_buffer + self._list_cubes_subject(group, subject)
self._convert_to_serial_and_write(group, list_slices_buffer
, writer)
list_slices_buffer = None
self._print_info(group)
class Dataset_patch3d:
def __init__(self, dictionary):
assert (isinstance(dictionary, DataOperator))
self.dictionary = dictionary
self.dst_path = ''
self._shapes_tfrecord = dict()
self.z_correction = False
self.data_augmentation = False
self.means_z = None
self.stds_z = None
self._used_roi_z_correction = False
self.shuffle = False
self.standar_maxmin = False
self.network_side_label_check = None
# self.stride_cube = 4
self.group_stride_dict = None
self._NN_IN_DTYPE = 'float'
self._NN_OUT_DTYPE = 'float'
self._NN_ROI_DTYPE = 'int'
self.padding = None
self.dtypes_dict = {
NN_IN: self._NN_IN_DTYPE,
NN_OUT: self._NN_OUT_DTYPE,
NN_ROI: self._NN_ROI_DTYPE
}
self._DX = 5
self._DY = 5
self._ANGLE = 30
self._read_function = None
self._Z_CORR = False
def _print_info(self, group):
logging.basicConfig(level=logging.INFO, format='%(message)s')
logger = logging.getLogger()
logger.addHandler(logging.FileHandler(os.path.join(self.dst_path, group + '.log'), 'w'))
print = logger.info
print('#######TFRECORD CREATION INFO#######################')
print('-Dictionary of data: ')
print(self.dictionary)
print('-Group: %s' % group)
print('-Shape tfrecord: %s' % self._shapes_tfrecord)
print('-Z correction: %r' % self.z_correction)
print('-Data augmentation: %r' % self.data_augmentation)
print('-Means for Z correction: %s' % self.means_z)
print('-Stds for Z correction: %s' % self.stds_z)
print('-Use ROI in z correction: %r' % self._used_roi_z_correction)
print('-Shuffle: %r' % self.shuffle)
print('-NN_IN_DTYPE: %s' % self._NN_IN_DTYPE)
print('-NN_OUT_DTYPE: %s' % self._NN_OUT_DTYPE)
print('-NN_ROI_DTYPE: %s' % self._NN_ROI_DTYPE)
print('DX for data aug: %d' % self._DX)
print('DY for data aug: %d' % self._DY)
print('ANGLE for data aug: %d' % self._ANGLE)
print('Read function used: %s' % self._read_function)
print('###################################################')
logger.handlers.pop()
@staticmethod
def _read_nii(subject_path):
if medpy_found:
vol, _ = medpy.io.load(subject_path)
else:
img = nib.load(subject_path)
vol = np.squeeze(img.get_data())
return vol
def set_read_function(self, new_read_function):
self._read_function = new_read_function
def _read_data(self, subject_path):
if self._read_function is not None:
vol = self._read_function(subject_path)
else:
vol = self._read_nii(subject_path)
if self.standar_maxmin:
vol = (vol - vol.min()) / (vol.max() - vol.min())
return vol
def _get_subject_data(self, group, subject):
network_vols3d_subject = {}
for network_side in self.dictionary.get_network_sides(group):
if self.dictionary.get_list_channels(group, network_side):
vol_channels_list = []
for channel in self.dictionary.get_list_channels(group, network_side):
vol = self._read_data(self.dictionary.get_subject_path(group, network_side, channel, subject))
if self.padding is not None:
vol = np.pad(vol, (
(self.padding, self.padding), (self.padding, self.padding), (self.padding, self.padding)),
'constant', constant_values=0)
vol = np.expand_dims(vol, axis=-1)
vol_channels_list.append(vol)
network_vols3d_subject[network_side] = np.concatenate(vol_channels_list,
axis=-1)
return network_vols3d_subject
def set_dtypes(self, dtype_in='float', dtype_out='float', dtype_roi='int'):
if dtype_in != 'float' and dtype_in != 'int' or dtype_out != 'float' and dtype_out != 'int' or dtype_roi != 'float' and dtype_roi != 'int':
raise ValueError(' Bad dtype founded.')
self._NN_IN_DTYPE = dtype_in
self._NN_OUT_DTYPE = dtype_out
self._NN_ROI_DTYPE = dtype_roi
for network_side, dtype in zip(self.dictionary.get_network_sides(self.dictionary.get_list_groups()[0]),
[self._NN_IN_DTYPE, self._NN_OUT_DTYPE, self._NN_ROI_DTYPE]):
self.dtypes_dict[network_side] = dtype
def run(self):
groups_to_tfrecord = self.dictionary.get_list_groups()
dictionary_dataset = {}
for group in groups_to_tfrecord:
print('group %s' % group)
subjects = self.dictionary.get_list_subjects(group)
shuffle(subjects)
group_subject_list = []
for subject_id in subjects:
dictionary_subject = {}
subject_data = self._get_subject_data(group, subject_id)
dictionary_subject['subject_data_dict'] = subject_data
dictionary_subject['index_list'] = np.where(subject_data[NN_ROI][:, :, :, 0] == 1)
group_subject_list.append(dictionary_subject)
dictionary_dataset[group] = group_subject_list
return dictionary_dataset
class ProductionCreator:
def __init__(self, dictionary):
assert (isinstance(dictionary, DataOperator))
self.dictionary = dictionary
self.dst_path = ''
self._shapes_tfrecord = dict()
self.z_correction = False
self.slice_dim = 2
self.means_z = None
self.stds_z = None
self._used_roi_z_correction = False
self.network_side_label_check = None
self.slices_ba = 0
self._NN_IN_DTYPE = 'float'
self._NN_OUT_DTYPE = 'float'
self._NN_ROI_DTYPE = 'int'
self.dtypes_dict = {
NN_IN: self._NN_IN_DTYPE,
NN_OUT: self._NN_OUT_DTYPE,
NN_ROI: self._NN_ROI_DTYPE
}
self._read_function = None
self._Z_CORR = False
self.standar_maxmin = False
def _print_info(self, group):
logging.basicConfig(level=logging.INFO, format='%(message)s')
logger = logging.getLogger()
logger.addHandler(logging.FileHandler(os.path.join(self.dst_path, group + '.log'), 'w'))
print = logger.info
print('#######PRODUCTION CREATION INFO#######################')
print('-Dictionary of data: ')
print(self.dictionary)
print('-Group: %s' % group)
print('-Shape tfrecord: %s' % self._shapes_tfrecord)
print('-Z correction: %r' % self.z_correction)
print('-Slice dimension: %d' % self.slice_dim)
print('-Means for Z correction: %s' % self.means_z)
print('-Stds for Z correction: %s' % self.stds_z)
print('-Use ROI in z correction: %r' % self._used_roi_z_correction)
print('-NN_IN_DTYPE: %s' % self._NN_IN_DTYPE)
print('-NN_OUT_DTYPE: %s' % self._NN_OUT_DTYPE)
print('-NN_ROI_DTYPE: %s' % self._NN_ROI_DTYPE)
print('Read function used: %s' % self._read_function)
print('###################################################')
logger.handlers.pop()
@staticmethod
def _check_valid_output_slice(network_vols3d_subject, slice_id, network_side):
slice_mask = network_vols3d_subject[network_side][:, :, slice_id, :].astype(
np.float32)
if np.sum(slice_mask) > 0:
contains_labels = True
else:
contains_labels = False
return contains_labels
@staticmethod
def _read_nii(subject_path):
if medpy_found:
vol, _ = medpy.io.load(subject_path)
else:
img = nib.load(subject_path)
vol = np.squeeze(img.get_data())
return vol
def resize_slices(self, new_size, group='all_groups', network_side=None):
if group is 'all_groups':
groups_to_do = self.dictionary.get_list_groups()
else:
groups_to_do = [group]
if network_side is None:
for group in groups_to_do:
for network_side in self.dictionary.get_network_sides(group):
self._set_size_side(group, network_side, new_size)
else:
for group in groups_to_do:
self._set_size_side(group, network_side, new_size)
def _set_size_side(self, group, network_side, new_size):
if not isinstance(new_size, tuple):
raise ValueError('Error: "new_shape" must be a tuple')
if len(new_size) != 2:
raise ValueError('Error: "new_shape" must have two values')
if network_side not in self.dictionary.get_network_sides(group):
raise ValueError('Error: %s is not a network side')
self._shapes_tfrecord[network_side] = new_size
def set_read_function(self, new_read_function):
self._read_function = new_read_function
def _read_data(self, subject_path):
if self._read_function is not None:
vol = self._read_function(subject_path)
else:
vol = self._read_nii(subject_path)
# Always perform a np.rollaxis, we want the slicing position last
if self.slice_dim != 2:
vol = np.rollaxis(vol, self.slice_dim, 3)
if self.standar_maxmin:
vol = (vol - vol.min()) / (vol.max() - vol.min())
if self.slices_ba != 0:
vol = np.pad(vol, ((0, 0), (0, 0), (self.slices_ba, self.slices_ba)), 'edge')
return vol
@staticmethod
def _resize_slice(slice_image, newsize, inter):
if cv2_found:
if inter is 'float':
inter = cv2.INTER_CUBIC
elif inter is 'int':
inter = cv2.INTER_NEAREST
slice_image = cv2.resize(slice_image, newsize,
interpolation=inter)
if slice_image.ndim is 2:
slice_image = np.expand_dims(slice_image, axis=-1)
else:
raise ValueError(
' CV2 is not installed and is needed for resize slices, to install it use "sudo pip install opencv-python"')
return slice_image
def calc_z_correction(self, group, use_roi=False):
# apriori the z_corr is only for network_in
# INPUTS:
# group: string that contains the group name that is going to be used to calculate the values for the z correction
# use_roi: Boolean used to whether use a ROI to calculate the correction or not. If ROI channels != In channels
# just the first roi channel is used
# OUTPUT:Means and stds for z correction in a list in channel order
means_per_channel = []
stds_per_channel = []
channel_list = self.dictionary.get_list_channels(group, NN_IN)
subject_list = self.dictionary.get_list_subjects(group)
for channel in channel_list:
vol_list_flatten = []
for subject in subject_list:
vol_subject = self._read_data(self.dictionary.get_subject_path(group, NN_IN, channel, subject))
# print(subject)
# print(np.max(vol_subject))
# print(np.min(vol_subject))
if use_roi:
self._used_roi_z_correction = True
if len(self.dictionary.get_list_channels(group, NN_IN)) == len(
self.dictionary.get_list_channels(group, NN_ROI)):
roi_subject = self._read_data(
self.dictionary.get_subject_path(group, NN_ROI, channel, subject))
else:
roi_subject = self._read_data(
self.dictionary.get_subject_path(group, NN_ROI, 1, subject))
vol_list_flatten.append(np.extract(roi_subject, vol_subject))
else:
vol_list_flatten.append(vol_subject.flatten())
data_for_scale = np.concatenate(vol_list_flatten)
means_per_channel.append(np.mean(data_for_scale))
stds_per_channel.append(np.std(data_for_scale))
self.means_z = means_per_channel
self.stds_z = stds_per_channel
self._Z_CORR = True
print(means_per_channel)
print(stds_per_channel)
return means_per_channel, stds_per_channel
def set_z_correction(self, means_z, stds_z):
self.means_z = means_z
self.stds_z = stds_z
self._Z_CORR = True
def _get_subject_data(self, group, subject):
network_vols3d_subject = {}
for network_side in self.dictionary.get_network_sides(group):
if self.dictionary.get_list_channels(group, network_side):
vol_channels_list = []
for channel in self.dictionary.get_list_channels(group, network_side):
vol = self._read_data(self.dictionary.get_subject_path(group, network_side, channel, subject))
vol = np.expand_dims(vol, axis=-1)
vol_channels_list.append(vol)
network_vols3d_subject[network_side] = np.concatenate(vol_channels_list,
axis=vol_channels_list[0].ndim - 1)
return network_vols3d_subject
def set_dtypes(self, dtype_in='float', dtype_out='float', dtype_roi='int'):
if dtype_in != 'float' and dtype_in != 'int' or dtype_out != 'float' and dtype_out != 'int' or dtype_roi != 'float' and dtype_roi != 'int':
raise ValueError(' Bad dtype founded.')
self._NN_IN_DTYPE = dtype_in
self._NN_OUT_DTYPE = dtype_out
self._NN_ROI_DTYPE = dtype_roi
for network_side, dtype in zip(self.dictionary.get_network_sides(self.dictionary.get_list_groups()[0]),
[self._NN_IN_DTYPE, self._NN_OUT_DTYPE, self._NN_ROI_DTYPE]):
self.dtypes_dict[network_side] = dtype
def _list_slices_subject(self, group, subject):
network_vols3d_subject = self._get_subject_data(group, subject)
_, _, slices, _ = network_vols3d_subject[NN_IN].shape
tfrecord_slice_list = []
for slice_id in range(self.slices_ba, slices - self.slices_ba):
if self.network_side_label_check:
if not self._check_valid_output_slice(network_vols3d_subject, slice_id, self.network_side_label_check):
continue
slice_data_dict = dict()
for network_side in self.dictionary.get_network_sides(group):
if network_side in list(self._shapes_tfrecord.keys()) and \
network_vols3d_subject[network_side][:, :,
(slice_id - self.slices_ba):(slice_id + self.slices_ba + 1), :].shape[0:2] != \
self._shapes_tfrecord[
network_side]:
slice_data_dict[network_side] = self._resize_slice(
network_vols3d_subject[network_side][:, :,
(slice_id - self.slices_ba):(slice_id + self.slices_ba + 1), :]
, self._shapes_tfrecord[network_side], self.dtypes_dict[network_side]).astype(np.float32)
else:
slice_data_dict[network_side] = network_vols3d_subject[network_side][:, :,
(slice_id - self.slices_ba):(slice_id + self.slices_ba + 1),
:].astype(
np.float32)
if slice_data_dict[network_side].ndim > 3:
slice_data_dict[network_side] = np.squeeze(slice_data_dict[network_side])
if slice_data_dict[network_side].ndim == 2:
slice_data_dict[network_side] = np.expand_dims(slice_data_dict[network_side], axis=-1)
if self.z_correction:
if self._Z_CORR:
slice_data_dict[NN_IN] = ((slice_data_dict[NN_IN] - self.means_z) / self.stds_z).astype(
np.float32)
else:
raise ValueError(
'Error: The calculation of the Z correction input parameters must be done before creating the tfrecord, \
or they must be sat manually in the object')
tfrecord_slice_list.append(slice_data_dict)
return tfrecord_slice_list
def get_next_slice(self, group, subject, use_roi=True):
print('group %s' % group)
print('subject %s' % subject)
#
#
# subjects = self.dictionary.get_list_subjects(group)
# for subject in subjects:
for slices in self._list_slices_subject(group, subject):
if use_roi:
yield (slices[NN_IN], slices[NN_OUT], slices[NN_ROI])
else:
yield (slices[NN_IN], slices[NN_OUT])
def gen_nifti_from_list(self, slice_list, slice_gt_list, subject):
array_slices = np.asarray(slice_list)
array_slices = np.rollaxis(array_slices, 0, 3)
img = nib.Nifti1Image(array_slices, np.eye(4))
save_path = os.path.join(self.dst_path, subject + '.nii.gz')
nib.save(img, save_path)
slice_gt_list = np.asarray(slice_gt_list)
slice_gt_list = np.rollaxis(slice_gt_list, 0, 3)
img = nib.Nifti1Image(slice_gt_list, np.eye(4))
save_path = os.path.join(self.dst_path, subject + '_gt.nii.gz')
nib.save(img, save_path)
class TfrecordCreator3D:
def __init__(self, dictionary):
assert (isinstance(dictionary, DataOperator))
self.dictionary = dictionary
self.dst_path = ''
self._shapes_tfrecord = dict()
self.z_correction = False
self.data_augmentation = False
self.slice_dim = 2
self.means_z = None
self.stds_z = None
self._used_roi_z_correction = False
self.shuffle = False
self.network_side_label_check = None
self._NN_IN_DTYPE = 'float'
self._NN_OUT_DTYPE = 'float'
self._NN_ROI_DTYPE = 'int'
self.dtypes_dict = {
NN_IN: self._NN_IN_DTYPE,
NN_OUT: self._NN_OUT_DTYPE,
NN_ROI: self._NN_ROI_DTYPE
}
self._DX = 5
self._DY = 5
self._ANGLE = 360
self._read_function = None
self._Z_CORR = False
def _print_info(self, group):
logging.basicConfig(level=logging.INFO, format='%(message)s')
logger = logging.getLogger()
logger.addHandler(logging.FileHandler(os.path.join(self.dst_path, group + '.log'), 'w'))
print = logger.info
print('#######TFRECORD CREATION INFO#######################')
print('-Dictionary of data: ')
print(self.dictionary)
print('-Group: %s' % group)
print('-Shape tfrecord: %s' % self._shapes_tfrecord)
print('-Z correction: %r' % self.z_correction)
print('-Data augmentation: %r' % self.data_augmentation)
print('-Slice dimension: %d' % self.slice_dim)
print('-Means for Z correction: %s' % self.means_z)
print('-Stds for Z correction: %s' % self.stds_z)
print('-Use ROI in z correction: %r' % self._used_roi_z_correction)
print('-Shuffle: %r' % self.shuffle)
print('-NN_IN_DTYPE: %s' % self._NN_IN_DTYPE)
print('-NN_OUT_DTYPE: %s' % self._NN_OUT_DTYPE)
print('-NN_ROI_DTYPE: %s' % self._NN_ROI_DTYPE)
print('DX for data aug: %d' % self._DX)
print('DY for data aug: %d' % self._DY)
print('ANGLE for data aug: %d' % self._ANGLE)
print('Read function used: %s' % self._read_function)
print('###################################################')
logger.handlers.pop()
@staticmethod
def _check_valid_output_slice(network_vols3d_subject, slice_id, network_side):
slice_mask = network_vols3d_subject[network_side][:, :, slice_id, :].astype(
np.float32)
if np.sum(slice_mask) > 0:
contains_labels = True
else:
contains_labels = False
return contains_labels
@staticmethod
def _read_nii(subject_path):
if medpy_found:
vol, _ = medpy.io.load(subject_path)
else:
img = nib.load(subject_path)
vol = np.squeeze(img.get_data())
return vol
def resize_slices(self, new_size, group='all_groups', network_side=None):
if group is 'all_groups':
groups_to_do = self.dictionary.get_list_groups()
else:
groups_to_do = [group]
if network_side is None:
for group in groups_to_do:
for network_side in self.dictionary.get_network_sides(group):
self._set_size_side(group, network_side, new_size)
else:
for group in groups_to_do:
self._set_size_side(group, network_side, new_size)
def _set_size_side(self, group, network_side, new_size):
if not isinstance(new_size, tuple):
raise ValueError('Error: "new_shape" must be a tuple')
if len(new_size) != 2:
raise ValueError('Error: "new_shape" must have two values')
if network_side not in self.dictionary.get_network_sides(group):
raise ValueError('Error: %s is not a network side')
self._shapes_tfrecord[network_side] = new_size
def set_read_function(self, new_read_function):
self._read_function = new_read_function
def _read_data(self, subject_path):
if self._read_function is not None:
vol = self._read_function(subject_path)
else:
vol = self._read_nii(subject_path)
# Always perform a np.rollaxis, we want the slicing position last
if self.slice_dim != 2:
vol = np.rollaxis(vol, self.slice_dim, 3)
return vol
@staticmethod
def _resize_slice(slice_image, newsize, inter):
if cv2_found:
if inter is 'float':
inter = cv2.INTER_CUBIC
elif inter is 'int':
inter = cv2.INTER_NEAREST
slice_image = cv2.resize(slice_image, newsize,
interpolation=inter)
if slice_image.ndim is 2:
slice_image = np.expand_dims(slice_image, axis=-1)
else:
raise ValueError(
' CV2 is not installed and is needed for resize slices, to install it use "sudo pip install opencv-python"')
return slice_image
def calc_z_correction(self, group, use_roi=False):
# apriori the z_corr is only for network_in
# INPUTS:
# group: string that contains the group name that is going to be used to calculate the values for the z correction
# use_roi: Boolean used to whether use a ROI to calculate the correction or not. If ROI channels != In channels
# just the first roi channel is used
# OUTPUT:Means and stds for z correction in a list in channel order
means_per_channel = []
stds_per_channel = []
channel_list = self.dictionary.get_list_channels(group, NN_IN)
subject_list = self.dictionary.get_list_subjects(group)
for channel in channel_list:
vol_list_flatten = []
for subject in subject_list:
vol_subject = self._read_data(self.dictionary.get_subject_path(group, NN_IN, channel, subject))
if use_roi:
self._used_roi_z_correction = True
if len(self.dictionary.get_list_channels(group, NN_IN)) == len(
self.dictionary.get_list_channels(group, NN_ROI)):
roi_subject = self._read_data(
self.dictionary.get_subject_path(group, NN_ROI, channel, subject))
else:
roi_subject = self._read_data(
self.dictionary.get_subject_path(group, NN_ROI, 1, subject))
vol_list_flatten.append(np.extract(roi_subject, vol_subject))
else:
vol_list_flatten.append(vol_subject.flatten())
data_for_scale = np.concatenate(vol_list_flatten)
means_per_channel.append(np.mean(data_for_scale))
stds_per_channel.append(np.std(data_for_scale))
self.means_z = means_per_channel
self.stds_z = stds_per_channel
self._Z_CORR = True
return means_per_channel, stds_per_channel
def set_z_correction(self, means_z, stds_z):
self.means_z = means_z
self.stds_z = stds_z
self._Z_CORR = True
def _create_tfrecord_writer(self, group):
return tf.python_io.TFRecordWriter(os.path.join(self.dst_path, group + '.tfrecord'))
def _get_subject_data(self, group, subject):
network_vols3d_subject = {}
for network_side in self.dictionary.get_network_sides(group):
if self.dictionary.get_list_channels(group, network_side):
vol_channels_list = []
for channel in self.dictionary.get_list_channels(group, network_side):
vol = self._read_data(self.dictionary.get_subject_path(group, network_side, channel, subject))
vol = np.expand_dims(vol, axis=-1)
vol_channels_list.append(vol)
network_vols3d_subject[network_side] = np.concatenate(vol_channels_list,
axis=vol_channels_list[0].ndim - 1)
return network_vols3d_subject
def _convert_to_serial_and_write(self, group, data_list, vols_list, slice_id_list, tfrecord_writer):
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
num_examples = len(data_list)
sample_indices = list(range(num_examples))
if self.shuffle:
random.shuffle(sample_indices)
side_words = ['in', 'out', 'roi']
for index in sample_indices:
sample_features = dict()
for network_side, shape_side_word in zip(self.dictionary.get_network_sides(group), side_words):
sample_features[network_side] = _bytes_feature(data_list[index][network_side].tostring())
rows, cols, channels = data_list[index][network_side].shape
sample_features['rows_' + shape_side_word] = _int64_feature(rows)
sample_features['cols_' + shape_side_word] = _int64_feature(cols)
sample_features['channels_' + shape_side_word] = _int64_feature(channels)
sample_features['vols'] = _bytes_feature(vols_list[index].tostring())
sample_features['deep'] = _int64_feature(vols_list[index].shape[0])
sample_features['slice_id'] = _int64_feature(slice_id_list[index])
example = tf.train.Example(features=tf.train.Features(feature=sample_features))
tfrecord_writer.write(example.SerializeToString())
def _convert_to_serial_and_write_single(self, group, data, vol, slice_id, tfrecord_writer):
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
side_words = ['in', 'out', 'roi']
sample_features = dict()
for network_side, shape_side_word in zip(self.dictionary.get_network_sides(group), side_words):
sample_features[network_side] = _bytes_feature(data[network_side].tostring())
rows, cols, channels = data[network_side].shape
sample_features['rows_' + shape_side_word] = _int64_feature(rows)
sample_features['cols_' + shape_side_word] = _int64_feature(cols)
sample_features['channels_' + shape_side_word] = _int64_feature(channels)
sample_features['vols'] = _bytes_feature(vol.tostring())
sample_features['deep'] = _int64_feature(vol.shape[0])
sample_features['slice_id'] = _int64_feature(slice_id)
example = tf.train.Example(features=tf.train.Features(feature=sample_features))
tfrecord_writer.write(example.SerializeToString())
def set_data_augmentation(self, dx=5, dy=5, angle=360):
self._DX = dx
self._DY = dy
self._ANGLE = angle
def set_dtypes(self, dtype_in='float', dtype_out='float', dtype_roi='int'):
if dtype_in != 'float' and dtype_in != 'int' or dtype_out != 'float' and dtype_out != 'int' or dtype_roi != 'float' and dtype_roi != 'int':
raise ValueError(' Bad dtype founded.')
self._NN_IN_DTYPE = dtype_in
self._NN_OUT_DTYPE = dtype_out
self._NN_ROI_DTYPE = dtype_roi
for network_side, dtype in zip(self.dictionary.get_network_sides(self.dictionary.get_list_groups()[0]),
[self._NN_IN_DTYPE, self._NN_OUT_DTYPE, self._NN_ROI_DTYPE]):
self.dtypes_dict[network_side] = dtype
def _do_data_augmentation3d(self, group, slice_data_dict, vols_data):
# dx = random.randrange(0, self._DX)
# dy = random.randrange(0, self._DY)
angle = random.randrange(0, self._ANGLE)
# vols_data = interpolation.shift(vols_data, [0,dx, dy, 0],
# mode='constant', cval=0)
vols_data = interpolation.rotate(vols_data, angle, axes=(1, 2),
reshape=False,
mode='constant', cval=0)
for network_side in self.dictionary.get_network_sides(group):
dtype = self.dtypes_dict[network_side]
if dtype is 'float':
# slice_data_dict[network_side] = interpolation.shift(slice_data_dict[network_side], [dx, dy, 0],
# mode='constant', cval=0)
slice_data_dict[network_side] = interpolation.rotate(slice_data_dict[network_side], angle,
reshape=False,
mode='constant', cval=0)
elif dtype is 'int':
# slice_data_dict[network_side] = interpolation.shift(slice_data_dict[network_side], [dx, dy, 0],
# mode='constant', cval=0, order=0)
slice_data_dict[network_side] = interpolation.rotate(slice_data_dict[network_side], angle,
reshape=False,
mode='constant', cval=0, order=0)
else:
raise ValueError('Error: "dtype" in %s not recognized, %s' % (network_side, dtype))
return slice_data_dict, vols_data
@staticmethod
def _resize_3d(vol_input):
def resize_slice(slice_image, newsize, inter):
if inter is 'float':
inter = cv2.INTER_CUBIC
elif inter is 'int':
inter = cv2.INTER_NEAREST
slice_image = cv2.resize(slice_image, newsize,
interpolation=inter)
return slice_image
slice_list = []
for slice_id in range(vol_input.shape[2]):
slice = vol_input[:, :, slice_id, :]
slice_resized = resize_slice(slice, (128, 128), 'float')
slice_list.append(slice_resized)
return np.asarray(slice_list)
def _list_slices_subject(self, group, subject):
network_vols3d_subject = self._get_subject_data(group, subject)
_, _, slices, _ = network_vols3d_subject[NN_IN].shape
tfrecord_slice_list = []
tfrecord_vols_list = []
tfrecord_id_list = []
if self._Z_CORR:
network_vols3d_subject_zcorr = ((network_vols3d_subject[NN_IN] - self.means_z) / self.stds_z).astype(
np.float32)
network_vols3d_subject_zcorr_resize = self._resize_3d(network_vols3d_subject_zcorr)
for slice_id in range(0, slices):
if self.network_side_label_check:
if not self._check_valid_output_slice(network_vols3d_subject, slice_id, self.network_side_label_check):
continue
slice_data_dict = dict()
for network_side in self.dictionary.get_network_sides(group):
if network_side in list(self._shapes_tfrecord.keys()) and \
network_vols3d_subject[network_side][:, :, slice_id, :].shape[0:2] != self._shapes_tfrecord[
network_side]:
slice_data_dict[network_side] = self._resize_slice(
network_vols3d_subject[network_side][:, :, slice_id, :]
, self._shapes_tfrecord[network_side], self.dtypes_dict[network_side]).astype(np.float32)
else:
slice_data_dict[network_side] = network_vols3d_subject[network_side][:, :, slice_id, :].astype(
np.float32)
if self.z_correction:
if self._Z_CORR:
slice_data_dict[NN_IN] = ((slice_data_dict[NN_IN] - self.means_z) / self.stds_z).astype(
np.float32)
else:
raise ValueError(
'Error: The calculation of the Z correction input parameters must be done before creating the tfrecord, \
or they must be sat manually in the object')
if self.data_augmentation:
slice_data_dict, network_vols3d_subject_zcorr_aug = self._do_data_augmentation3d(group, slice_data_dict,
network_vols3d_subject_zcorr_resize)
mask_slice = np.zeros(network_vols3d_subject_zcorr_aug[:, :, :, 0].shape, dtype=np.float32)
mask_slice[slice_id, :, :] = 1
mask_slice = np.expand_dims(mask_slice, axis=-1)
network_vols3d_subject_zcorr_aug = np.concatenate((network_vols3d_subject_zcorr_aug, mask_slice),
axis=3)
tfrecord_id_list.append(slice_id)
tfrecord_slice_list.append(slice_data_dict)
tfrecord_vols_list.append(network_vols3d_subject_zcorr_aug)
return tfrecord_slice_list, tfrecord_vols_list, tfrecord_id_list
def _list_slices_subject_yield(self, group, subject):
network_vols3d_subject = self._get_subject_data(group, subject)
_, _, slices, _ = network_vols3d_subject[NN_IN].shape
if self._Z_CORR:
network_vols3d_subject_zcorr = ((network_vols3d_subject[NN_IN] - self.means_z) / self.stds_z).astype(
np.float32)
network_vols3d_subject_zcorr_resize = self._resize_3d(network_vols3d_subject_zcorr)
sample_indices = list(range(slices))
if self.shuffle:
random.shuffle(sample_indices)
for slice_id in sample_indices:
if self.network_side_label_check:
if not self._check_valid_output_slice(network_vols3d_subject, slice_id, self.network_side_label_check):
continue
slice_data_dict = dict()
for network_side in self.dictionary.get_network_sides(group):
if network_side in list(self._shapes_tfrecord.keys()) and \
network_vols3d_subject[network_side][:, :, slice_id, :].shape[0:2] != self._shapes_tfrecord[
network_side]:
slice_data_dict[network_side] = self._resize_slice(
network_vols3d_subject[network_side][:, :, slice_id, :]
, self._shapes_tfrecord[network_side], self.dtypes_dict[network_side]).astype(np.float32)
else:
slice_data_dict[network_side] = network_vols3d_subject[network_side][:, :, slice_id, :].astype(
np.float32)
if self.z_correction:
if self._Z_CORR:
slice_data_dict[NN_IN] = ((slice_data_dict[NN_IN] - self.means_z) / self.stds_z).astype(
np.float32)
else:
raise ValueError(
'Error: The calculation of the Z correction input parameters must be done before creating the tfrecord, \
or they must be sat manually in the object')
if self.data_augmentation:
slice_data_dict, network_vols3d_subject_zcorr_aug = self._do_data_augmentation3d(group, slice_data_dict,
network_vols3d_subject_zcorr_resize)
mask_slice = np.zeros(network_vols3d_subject_zcorr_aug[:, :, :, 0].shape, dtype=np.float32)
mask_slice[slice_id, :, :] = 1
mask_slice = np.expand_dims(mask_slice, axis=-1)
network_vols3d_subject_zcorr_aug = np.concatenate((network_vols3d_subject_zcorr_aug, mask_slice),
axis=3)
# tfrecord_id_list.append(slice_id)
# tfrecord_slice_list.append(slice_data_dict)
# tfrecord_vols_list.append(network_vols3d_subject_zcorr_aug)
#
# return tfrecord_slice_list, tfrecord_vols_list, tfrecord_id_list
yield network_vols3d_subject_zcorr_aug, slice_data_dict, slice_id
def run(self, subjects_buffer_size=1):
if not isinstance(subjects_buffer_size, int):
raise ValueError(' Error: "subjects_buffer_size" must be an integer.')
groups_to_tfrecord = self.dictionary.get_list_groups()
for group in groups_to_tfrecord:
print('group %s' % group)
writer = self._create_tfrecord_writer(group)
subjects = self.dictionary.get_list_subjects(group)
for subject_id in range(0, len(subjects)):
subject = subjects[subject_id]
print('subject %s' % subject)
# list_slices, list_vols, slice_id_list = self._list_slices_subject(group, subject)
for vol, slice, slice_id in self._list_slices_subject_yield(group, subject):
self._convert_to_serial_and_write_single(group, slice, vol, slice_id, writer)
self._print_info(group)
class ProductionCreator_patch3d:
def __init__(self, dictionary):
assert (isinstance(dictionary, DataOperator))
self.dictionary = dictionary
self.dst_path = ''
self._shapes_tfrecord = dict()
self.z_correction = False
self.patch_shape = (32, 32, 32)
self.stride_cube = 16
self.means_z = None
self.stds_z = None
self._used_roi_z_correction = False
self.network_side_label_check = None
self.check_valid_mask = False
self.valid_in_mask = (
(int(self.patch_shape[0] / 2 - self.stride_cube / 2), int(self.patch_shape[0] / 2 + self.stride_cube / 2)),
(int(self.patch_shape[1] / 2 - self.stride_cube / 2), int(self.patch_shape[1] / 2 + self.stride_cube / 2)),
(int(self.patch_shape[2] / 2 - self.stride_cube / 2), int(self.patch_shape[2] / 2 + self.stride_cube / 2)))
self._NN_IN_DTYPE = 'float'
self._NN_OUT_DTYPE = 'float'
self._NN_ROI_DTYPE = 'int'
self.dtypes_dict = {
NN_IN: self._NN_IN_DTYPE,
NN_OUT: self._NN_OUT_DTYPE,
NN_ROI: self._NN_ROI_DTYPE
}
self._read_function = None
self._Z_CORR = False
self.standar_maxmin = False
def _print_info(self, group):
logging.basicConfig(level=logging.INFO, format='%(message)s')
logger = logging.getLogger()
logger.addHandler(logging.FileHandler(os.path.join(self.dst_path, group + '.log'), 'w'))
print = logger.info
print('#######PRODUCTION CREATION INFO#######################')
print('-Dictionary of data: ')
print(self.dictionary)
print('-Group: %s' % group)
print('-Shape tfrecord: %s' % self._shapes_tfrecord)
print('-Z correction: %r' % self.z_correction)
print('-Patch shape: %s' % self.patch_shape)
print('-Means for Z correction: %s' % self.means_z)
print('-Stds for Z correction: %s' % self.stds_z)
print('-Use ROI in z correction: %r' % self._used_roi_z_correction)
print('-NN_IN_DTYPE: %s' % self._NN_IN_DTYPE)
print('-NN_OUT_DTYPE: %s' % self._NN_OUT_DTYPE)
print('-NN_ROI_DTYPE: %s' % self._NN_ROI_DTYPE)
print('Read function used: %s' % self._read_function)
print('###################################################')
logger.handlers.pop()
@staticmethod
def _check_valid_output_slice(network_vols3d_subject, slice_id, network_side):
slice_mask = network_vols3d_subject[network_side][:, :, slice_id, :].astype(
np.float32)
if np.sum(slice_mask) > 0:
contains_labels = True
else:
contains_labels = False
return contains_labels
@staticmethod
def _read_nii(subject_path):
if medpy_found:
vol, _ = medpy.io.load(subject_path)
else:
img = nib.load(subject_path)
vol = np.squeeze(img.get_data())
return vol
@staticmethod
def _read_nii_dic(subject_path):
if medpy_found:
img, dic = medpy.io.load(subject_path)
else:
img = nib.load(subject_path)
dic = img.header
return dic, img.affine
def resize_slices(self, new_size, group='all_groups', network_side=None):
if group is 'all_groups':
groups_to_do = self.dictionary.get_list_groups()
else:
groups_to_do = [group]
if network_side is None:
for group in groups_to_do:
for network_side in self.dictionary.get_network_sides(group):
self._set_size_side(group, network_side, new_size)
else:
for group in groups_to_do:
self._set_size_side(group, network_side, new_size)
def _set_size_side(self, group, network_side, new_size):
if not isinstance(new_size, tuple):
raise ValueError('Error: "new_shape" must be a tuple')
if len(new_size) != 2:
raise ValueError('Error: "new_shape" must have two values')
if network_side not in self.dictionary.get_network_sides(group):
raise ValueError('Error: %s is not a network side')
self._shapes_tfrecord[network_side] = new_size
def set_read_function(self, new_read_function):
self._read_function = new_read_function
def _read_data(self, subject_path):
if self._read_function is not None:
vol = self._read_function(subject_path)
else:
vol = self._read_nii(subject_path)
if self.standar_maxmin:
vol = (vol - vol.min()) / (vol.max() - vol.min())
return vol
def calc_z_correction(self, group, use_roi=False):
# apriori the z_corr is only for network_in
# INPUTS:
# group: string that contains the group name that is going to be used to calculate the values for the z correction
# use_roi: Boolean used to whether use a ROI to calculate the correction or not. If ROI channels != In channels
# just the first roi channel is used
# OUTPUT:Means and stds for z correction in a list in channel order
means_per_channel = []
stds_per_channel = []
channel_list = self.dictionary.get_list_channels(group, NN_IN)
subject_list = self.dictionary.get_list_subjects(group)
for channel in channel_list:
vol_list_flatten = []
for subject in subject_list:
vol_subject = self._read_data(self.dictionary.get_subject_path(group, NN_IN, channel, subject))
# print(subject)
# print(np.max(vol_subject))
# print(np.min(vol_subject))
if use_roi:
self._used_roi_z_correction = True
if len(self.dictionary.get_list_channels(group, NN_IN)) == len(
self.dictionary.get_list_channels(group, NN_ROI)):
roi_subject = self._read_data(
self.dictionary.get_subject_path(group, NN_ROI, channel, subject))
else:
roi_subject = self._read_data(
self.dictionary.get_subject_path(group, NN_ROI, 1, subject))
vol_list_flatten.append(np.extract(roi_subject, vol_subject))
else:
vol_list_flatten.append(vol_subject.flatten())
data_for_scale = np.concatenate(vol_list_flatten)
means_per_channel.append(np.mean(data_for_scale))
stds_per_channel.append(np.std(data_for_scale))
self.means_z = means_per_channel
self.stds_z = stds_per_channel
self._Z_CORR = True
print(means_per_channel)
print(stds_per_channel)
return means_per_channel, stds_per_channel
def set_z_correction(self, means_z, stds_z):
self.means_z = means_z
self.stds_z = stds_z
self._Z_CORR = True
def _get_subject_data(self, group, subject):
network_vols3d_subject = {}
for network_side in self.dictionary.get_network_sides(group):
if self.dictionary.get_list_channels(group, network_side):
vol_channels_list = []
for channel in self.dictionary.get_list_channels(group, network_side):
vol = self._read_data(self.dictionary.get_subject_path(group, network_side, channel, subject))
vol = np.expand_dims(vol, axis=-1)
vol_channels_list.append(vol)
network_vols3d_subject[network_side] = np.concatenate(vol_channels_list,
axis=vol_channels_list[0].ndim - 1)
return network_vols3d_subject
def set_dtypes(self, dtype_in='float', dtype_out='float', dtype_roi='int'):
if dtype_in != 'float' and dtype_in != 'int' or dtype_out != 'float' and dtype_out != 'int' or dtype_roi != 'float' and dtype_roi != 'int':
raise ValueError(' Bad dtype founded.')
self._NN_IN_DTYPE = dtype_in
self._NN_OUT_DTYPE = dtype_out
self._NN_ROI_DTYPE = dtype_roi
for network_side, dtype in zip(self.dictionary.get_network_sides(self.dictionary.get_list_groups()[0]),
[self._NN_IN_DTYPE, self._NN_OUT_DTYPE, self._NN_ROI_DTYPE]):
self.dtypes_dict[network_side] = dtype
def get_next_slice(self, group, subject, use_roi=True):
print('group %s' % group)
print('subject %s' % subject)
#
#
# subjects = self.dictionary.get_list_subjects(group)
# for subject in subjects:
for cubes_dict, index_tupple, shape_tupple in self._list_cubes_subject(group, subject):
if use_roi:
yield (cubes_dict[NN_IN], cubes_dict[NN_OUT], cubes_dict[NN_ROI], index_tupple, shape_tupple)
else:
yield (cubes_dict[NN_IN], cubes_dict[NN_OUT], index_tupple, shape_tupple)
def _list_cubes_subject(self, group, subject):
network_vols3d_subject = self._get_subject_data(group, subject)
for network_side in self.dictionary.get_network_sides(group):
network_vols3d_subject[network_side] = np.pad(network_vols3d_subject[network_side], (
(self.patch_shape[0], self.patch_shape[0]), (self.patch_shape[1], self.patch_shape[1]),
(self.patch_shape[2], self.patch_shape[2]), (0, 0)),
'constant', constant_values=((0, 0), (0, 0), (0, 0), (0, 0)))
x, y, z, _ = network_vols3d_subject[NN_IN].shape
for x_i in range(0, x, int(self.stride_cube)):
for y_i in range(0, y, int(self.stride_cube)):
for z_i in range(0, z, int(self.stride_cube)):
cube_data_dict = dict()
for network_side in self.dictionary.get_network_sides(group):
x_patch, y_patch, z_patch = self.patch_shape
if list(network_vols3d_subject[network_side][(x_i):(x_i + x_patch), (y_i):(y_i + y_patch),
(z_i):(z_i + z_patch)].shape[0:3]) != self.patch_shape:
continue
else:
cube_data_dict[network_side] = network_vols3d_subject[network_side][(x_i):(x_i + x_patch),
(y_i):(y_i + y_patch), (z_i):(z_i + z_patch)].astype(
np.float32)
if len(cube_data_dict) is len(self.dictionary.get_network_sides(group)):
if self.check_valid_mask:
# print(np.sum(cube_data_dict[NN_ROI]))
if np.sum(cube_data_dict[NN_ROI][self.valid_in_mask[0][0]:self.valid_in_mask[0][1]
, self.valid_in_mask[1][0]:self.valid_in_mask[1][1]
, self.valid_in_mask[2][0]:self.valid_in_mask[2][1]]) == 0:
continue
if self.z_correction:
if self._Z_CORR:
cube_data_dict[NN_IN] = ((cube_data_dict[NN_IN] - self.means_z) / self.stds_z).astype(
np.float32)
else:
raise ValueError(
'Error: The calculation of the Z correction input parameters must be done before creating the tfrecord, \
or they must be sat manually in the object')
yield (cube_data_dict, (x_i, y_i, z_i), (x, y, z))
def fill_cube(self, cube, index, cube_out):
if cube_out[index[0]:(index[0] + self.patch_shape[0]), index[1]:(index[1] + self.patch_shape[1]),
index[2]:(index[2] + self.patch_shape[2])].shape == cube[:, :, :, 0].shape:
cube_out[index[0]:(index[0] + self.patch_shape[0]), index[1]:(index[1] + self.patch_shape[1]),
index[2]:(index[2] + self.patch_shape[2])] = cube[:, :, :, 0]
return cube_out
def create_nifti_from_cube(self, cube, subject, group = None,meta_datos = False):
if meta_datos == False:
img = nib.Nifti1Image(cube, np.eye(4))
if meta_datos == True:
dic,affine = self._read_nii_dic(self.dictionary.get_subject_path(group,NN_OUT,1,subject))
img = nib.Nifti1Image(cube, affine,dic)
save_path = os.path.join(self.dst_path, subject + '_pseudo.nii.gz')
nib.save(img, save_path)
def fill_center(self, cube, index, cube_out):
if cube_out[index[0]:(index[0] + self.patch_shape[0]), index[1]:(index[1] + self.patch_shape[1]),
index[2]:(index[2] + self.patch_shape[2])].shape == cube[:, :, :, 0].shape:
cube_out[int(index[0] + self.patch_shape[0] / 2), int(index[1] + self.patch_shape[1] / 2), int(
index[2] + self.patch_shape[2] / 2)] \
= cube[int(self.patch_shape[0] / 2), int(self.patch_shape[1] / 2), int(self.patch_shape[2] / 2), 0]
# print([int(index[0]+self.patch_shape[0]/2),int(index[1]+self.patch_shape[1]/2), int(index[2]+self.patch_shape[2]/2)])
# print(cube[int(self.patch_shape[0]/2),int(self.patch_shape[1]/2),int(self.patch_shape[2]/2),0])
return cube_out
def fill_custom_cube(self, cube, index, cube_out, custom_shape=4):
offset = list()
offset.append(int(self.patch_shape[0] / 2 - custom_shape / 2))
offset.append(int(self.patch_shape[1] / 2 - custom_shape / 2))
offset.append(int(self.patch_shape[2] / 2 - custom_shape / 2))
if cube_out[index[0]:(index[0] + self.patch_shape[0]), index[1]:(index[1] + self.patch_shape[1]),
index[2]:(index[2] + self.patch_shape[2])].shape == cube[:, :, :, 0].shape:
cube_out[(index[0] + offset[0]):(index[0] + offset[0] + custom_shape),
(index[1] + offset[1]):(index[1] + offset[1] + custom_shape),
(index[2] + offset[2]):(index[2] + offset[2] + custom_shape)] = \
cube[offset[0]:(custom_shape + offset[0]), offset[1]:(custom_shape + offset[1]),
offset[2]:(custom_shape + offset[2]), 0]
return cube_out
def correct_background(self, cube, group, subject):
subject = self._get_subject_data( group, subject)
vol_mask_int = np.asarray(subject[NN_ROI][:,:,:,0], np.int)
cube[vol_mask_int == 0] = -1024
return cube
class Dataset:
def __init__(self, experiment_path, key_words_in, key_words_out, key_words_roi=None, group=None):
self._raw_dictionary = self._read_experimentdict_from_folder(experiment_path, key_words_in, key_words_out,
key_words_roi, group)
self._dict_experiment = DataOperator(self._raw_dictionary)
self._cross_validation = False
self._cv_group = None
self._cv_fold_size = None
def __str__(self):
return self._dict_experiment.__str__()
@staticmethod
def _create_dict_from_folder(path_subjects, key_words_in, key_words_out, key_words_roi):
# This function create a dictionary used to feed a tfrecord creator used in a Deep Learning
# experiment in Tensorflow.
# path_subjects -> root folder that contains the subjects
# key_words_* -> A list of keywords used to find the files used as input, output or roi in the
# neural network
subjects = list_nohidden_directories(path_subjects)
channels = dict()
channels[NN_IN] = len(key_words_in)
channels[NN_OUT] = len(key_words_out)
record_dict = dict()
record_dict[NN_IN] = {}
record_dict[NN_OUT] = {}
key_words = dict()
key_words[NN_IN] = key_words_in
key_words[NN_OUT] = key_words_out
if key_words_roi:
record_dict[NN_ROI] = {}
channels[NN_ROI] = len(key_words_roi)
key_words[NN_ROI] = key_words_roi
for subject in subjects:
files = list_nohidden_files(os.path.join(path_subjects, subject))
# lists to track the keys, every key must point one file and only one.
track_keys = dict()
track_keys[NN_IN] = key_words_in.copy()
track_keys[NN_OUT] = key_words_out.copy()
track_keys[NN_ROI] = key_words_roi.copy()
for file in files:
for network_side in record_dict.keys():
for key_word, channel in zip(key_words[network_side], range(1, channels[network_side] + 1)):
if key_word in file.split('/')[-1]:
if key_word in track_keys[network_side]:
track_keys[network_side].remove(key_word)
else:
raise ValueError(
'ERROR: Key word "%s" was used in various files, each key must point an unique file.' % key_word)
record_dict[network_side].setdefault(channel, {}).update({os.path.basename(subject): file})
error_keys = []
for network_side in record_dict.keys():
error_keys += track_keys[network_side]
if error_keys:
for key in error_keys:
print('ERROR: Key word "%s" was NOT used.' % key)
raise ValueError(' ERROR: Unused keywords.')
return record_dict
def _read_experimentdict_from_folder(self, path_experiment, key_words_in, key_words_out, key_words_roi=None,
group=None):
# This function create a dictionary used to feed a tfrecord creator used in a Deep Learning
# experiment in Tensorflow.
# path_subjects -> root folder that contains the experiments.
# key_words_* -> A list of keywords used to find the files used as input, output or roi in the
# nerual network.
if group == None:
folders_experiment = list_nohidden_directories(path_experiment)
else:
if type(group) is list:
folders_experiment = group
else:
folders_experiment = [group]
experiment_dict = dict()
for folder_experiment in folders_experiment:
folder_dictionary = self._create_dict_from_folder(os.path.join(path_experiment, folder_experiment),
key_words_in,
key_words_out, key_words_roi)
experiment_dict[folder_experiment] = folder_dictionary
return experiment_dict
def cv_data_iterator(self, group, fold_size, separable=None):
self._cross_validation = True
self._cv_group = group
self._cv_fold_size = fold_size
return self._iterator_crossvalidation(self._cv_group, self._cv_fold_size, separable)
def get_dict_subjects(self, list, separable):
dict_subjects = {}
for subject in list:
id = subject.split(separable)[0]
if id in dict_subjects:
dict_subjects[id].append(subject)
else:
dict_subjects[id] = []
dict_subjects[id].append(subject)
return dict_subjects
def _iterator_crossvalidation(self, group, fold_size, separable):
import collections
subjects_dict = self.get_dict_subjects(self._dict_experiment.get_list_subjects(group), separable)
subjects_rotate_list = collections.deque(list(subjects_dict.keys()))
n_folds = np.int(np.ceil(np.float(len(subjects_rotate_list)) / fold_size))
for _ in range(n_folds):
dict_cv = dict()
dict_cv[CV_TRAIN] = self._dict_experiment.get_groups_dictionary(group)[group]
dict_cv[CV_TEST] = self._dict_experiment.get_groups_dictionary(group)[group]
subjects_train = list(subjects_rotate_list)[fold_size::]
subjects_test = list(subjects_rotate_list)[0:fold_size]
subjects_rotate_list.rotate(-fold_size)
for network_side in self._dict_experiment.get_network_sides(group):
for channel in self._dict_experiment.get_list_channels(group, network_side):
for subject_name in subjects_rotate_list:
if subject_name not in subjects_train:
for subject_repeat in subjects_dict[subject_name]:
del (dict_cv[CV_TRAIN][network_side][channel][subject_repeat])
if subject_name not in subjects_test:
for subject_repeat in subjects_dict[subject_name]:
del (dict_cv[CV_TEST][network_side][channel][subject_repeat])
yield (DataOperator(dict_cv))
def get_groups_keys(self):
return self._dict_experiment.get_list_groups()
def get_subjects_keys(self, group):
return self._dict_experiment.get_list_subjects(group)
def get_data_from_groups(self, groups='all'):
return self._dict_experiment.get_data(groups=groups)
class Experiment:
def __init__(self, experiment_path, folder_records='records', folder_logs='logs_tb', folder_models='models',
folder_session_details='session_details', folder_production='production'):
self.logger = None
self.print = None
self._data_set = None
self.experiment_path = experiment_path
self._continue_session = True
self._folder_records = folder_records
self._folder_logs = folder_logs
self._folder_models = folder_models
self._folder_session_details = folder_session_details
self._folder_production = folder_production
self._train_name = None
self._test_name = None
self.channels_input = None
self.channels_output = None
self._session_name = None
self.dataset_info = None
self.tfrecord_info = None
self.train_info = None
self._clean_up_session = False
self._generate_folders()
def _get_channels_info(self):
channels = dict()
for network_side in self._data_set.get_network_sides(self._train_name):
channels[network_side] = len(self._data_set.get_list_channels(self._train_name, network_side))
return channels
def get_data(self):
if self._test_name:
return self._data_set.get_data([self._train_name, self._test_name])
else:
return self._data_set.get_data(self._train_name)
def _generate_folders(self):
if not os.path.exists(self.experiment_path):
os.makedirs(self.experiment_path)
if not os.path.exists(os.path.join(self.experiment_path, self._folder_records)):
os.makedirs(os.path.join(self.experiment_path, self._folder_records))
if not os.path.exists(os.path.join(self.experiment_path, self._folder_logs)):
os.makedirs(os.path.join(self.experiment_path, self._folder_logs))
if not os.path.exists(os.path.join(self.experiment_path, self._folder_session_details)):
os.makedirs(os.path.join(self.experiment_path, self._folder_session_details))
if not os.path.exists(os.path.join(self.experiment_path, self._folder_models)):
os.makedirs(os.path.join(self.experiment_path, self._folder_models))
if not os.path.exists(os.path.join(self.experiment_path, self._folder_production)):
os.makedirs(os.path.join(self.experiment_path, self._folder_production))
def get_records_path(self):
return os.path.join(self.experiment_path, self._folder_records)
def get_log_session_path(self):
return os.path.join(self.experiment_path, self._folder_logs, self._session_name)
def get_logs_experiment(self):
return os.path.join(self.experiment_path, self._folder_logs)
def get_production_path(self):
return os.path.join(self.experiment_path, self._folder_production)
def get_models_session_path(self):
return os.path.join(self.experiment_path, self._folder_models, self._session_name)
def get_details_session_path(self):
return os.path.join(self.experiment_path, self._folder_session_details, self._session_name)
def get_production_session_path(self):
return os.path.join(self.experiment_path, self._folder_production, self._session_name)
def get_record_train_name(self):
return self._train_name + '.tfrecord'
def get_record_test_name(self):
return self._test_name + '.tfrecord'
def _generate_folders_session(self):
if self._clean_up_session and not self._continue_session:
print('Session cleaned up!')
self._delete_session()
if not os.path.exists(self.get_log_session_path()):
os.makedirs(self.get_log_session_path())
elif not self._continue_session and not self._clean_up_session:
print(' session name %s already used.' % self._session_name)
print(' ¿Are you retraining?, please confirm flags "session_continue" and "clean_up" ')
raise ValueError('Set session_continue flag and clean_up flag')
if not os.path.exists(self.get_models_session_path()):
os.makedirs(self.get_models_session_path())
elif not self._continue_session and not self._clean_up_session:
print(' session name %s already used.' % self._session_name)
print(' ¿Are you retraining?, please confirm flags "session_continue" and "clean_up" ')
raise ValueError('Set session_continue flag and clean_up flag')
if not os.path.exists(self.get_details_session_path()):
os.makedirs(self.get_details_session_path())
elif not self._continue_session and not self._clean_up_session:
print(' session name %s already used.' % self._session_name)
print(' ¿Are you retraining?, please confirm flags "session_continue" and "clean_up" ')
raise ValueError('Set session_continue flag and clean_up flag')
if not os.path.exists(self.get_production_session_path()):
os.makedirs(self.get_production_session_path())
elif not self._continue_session and not self._clean_up_session:
print(' session name %s already used.' % self._session_name)
print(' ¿Are you retraining?, please confirm flags "session_continue" and "clean_up" ')
raise ValueError('Set session_continue flag and clean_up flag')
def set_session(self, session_name, session_data, train_name, test_name=None, continue_session=True,
clean_up=False):
assert isinstance(session_data, DataOperator)
self._data_set = session_data
self._session_name = session_name
self._train_name = train_name
if test_name:
self._test_name = test_name
self._continue_session = continue_session
self._clean_up_session = clean_up
self._generate_folders_session()
logging.basicConfig(level=logging.INFO, format='%(message)s')
self.logger = logging.getLogger()
channels_info = self._get_channels_info()
self.channels_input = channels_info[NN_IN]
self.channels_output = channels_info[NN_OUT]
self.print_info()
def set_record_session(self, session_data, train_name, test_name=None):
assert isinstance(session_data, DataOperator)
self._data_set = session_data
self._train_name = train_name
if test_name:
self._test_name = test_name
def print_info(self):
print = self.open_logger()
print('############################################################')
print('-Experiment path: %s' % self.experiment_path)
print('-Session name: %s' % self._session_name)
print('-dataset used:')
print(self._data_set)
self.close_logger()
def open_logger(self):
self.logger.addHandler(
logging.FileHandler(os.path.join(self.get_details_session_path(), self._session_name + '.log'), 'a'))
return self.logger.info
def close_logger(self):
self.logger.handlers.pop()
def _delete_session(self):
import shutil
if os.path.exists(os.path.join(self.get_models_session_path())):
shutil.rmtree(os.path.join(self.get_models_session_path(), ''))
if os.path.exists(os.path.join(self.get_log_session_path())):
shutil.rmtree(os.path.join(self.get_log_session_path(), ''))
if os.path.exists(os.path.join(self.get_details_session_path())):
shutil.rmtree(os.path.join(self.get_details_session_path(), ''))
def get_continue_flag(self):
return self._continue_session
class InputDictCreator:
def __init__(self, dictionary):
assert (isinstance(dictionary, DataOperator))
self.dictionary = dictionary
self._shapes_tfrecord = dict()
self.z_correction = False
self.slice_dim = 2
self.means_z = None
self.stds_z = None
self.network_side_label_check = None
self._NN_IN_DTYPE = 'float'
self._NN_OUT_DTYPE = 'float'
self._NN_ROI_DTYPE = 'int'
self.dtypes_dict = {
NN_IN: self._NN_IN_DTYPE,
NN_OUT: self._NN_OUT_DTYPE,
NN_ROI: self._NN_ROI_DTYPE
}
self._read_function = None
self._Z_CORR = False
@staticmethod
def _read_nii(subject_path):
if medpy_found:
vol, _ = medpy.io.load(subject_path)
else:
img = nib.load(subject_path)
vol = np.squeeze(img.get_data())
return vol
def resize_slices(self, new_size, group='all_groups', network_side=None):
if group is 'all_groups':
groups_to_do = self.dictionary.get_list_groups()
else:
groups_to_do = [group]
if network_side is None:
for group in groups_to_do:
for network_side in self.dictionary.get_network_sides(group):
self._set_size_side(group, network_side, new_size)
else:
for group in groups_to_do:
self._set_size_side(group, network_side, new_size)
def _set_size_side(self, group, network_side, new_size):
if not isinstance(new_size, tuple):
raise ValueError('Error: "new_shape" must be a tuple')
if len(new_size) != 2:
raise ValueError('Error: "new_shape" must have two values')
if network_side not in self.dictionary.get_network_sides(group):
raise ValueError('Error: %s is not a network side')
self._shapes_tfrecord[network_side] = new_size
def set_read_function(self, new_read_function):
self._read_function = new_read_function
def _read_data(self, subject_path):
if self._read_function is not None:
vol = self._read_function(subject_path)
else:
vol = self._read_nii(subject_path)
# Always perform a np.rollaxis, we want the slicing position last
if self.slice_dim != 2:
vol = np.rollaxis(vol, self.slice_dim, 3)
return vol
@staticmethod
def _resize_slice(slice_image, newsize, inter):
if cv2_found:
if inter is 'float':
inter = cv2.INTER_CUBIC
elif inter is 'int':
inter = cv2.INTER_NEAREST
slice_image = cv2.resize(slice_image, newsize,
interpolation=inter)
if slice_image.ndim is 2:
slice_image = np.expand_dims(slice_image, axis=-1)
else:
raise ValueError(
' CV2 is not installed and is needed for resize slices, to install it use "sudo pip install opencv-python"')
return slice_image
def calc_z_correction(self, group, use_roi=False):
# apriori the z_corr is only for network_in
# INPUTS:
# group: string that contains the group name that is going to be used to calculate the values for the z correction
# use_roi: Boolean used to whether use a ROI to calculate the correction or not. If ROI channels != In channels
# just the first roi channel is used
# OUTPUT:Means and stds for z correction in a list in channel order
means_per_channel = []
stds_per_channel = []
channel_list = self.dictionary.get_list_channels(group, NN_IN)
subject_list = self.dictionary.get_list_subjects(group)
for channel in channel_list:
vol_list_flatten = []
for subject in subject_list:
vol_subject = self._read_data(self.dictionary.get_subject_path(group, NN_IN, channel, subject))
if use_roi:
if len(self.dictionary.get_list_channels(group, NN_IN)) == len(
self.dictionary.get_list_channels(group, NN_ROI)):
roi_subject = self._read_data(
self.dictionary.get_subject_path(group, NN_ROI, channel, subject))
else:
roi_subject = self._read_data(
self.dictionary.get_subject_path(group, NN_ROI, 1, subject))
vol_list_flatten.append(np.extract(roi_subject, vol_subject))
else:
vol_list_flatten.append(vol_subject.flatten())
data_for_scale = np.concatenate(vol_list_flatten)
means_per_channel.append(np.mean(data_for_scale))
stds_per_channel.append(np.std(data_for_scale))
self.means_z = means_per_channel
self.stds_z = stds_per_channel
self._Z_CORR = True
return means_per_channel, stds_per_channel
def set_z_correction(self, means_z, stds_z):
self.means_z = means_z
self.stds_z = stds_z
self._Z_CORR = True
def _get_subject_data(self, group, subject):
network_vols3d_subject = {}
for network_side in self.dictionary.get_network_sides(group):
if self.dictionary.get_list_channels(group, network_side):
vol_channels_list = []
for channel in self.dictionary.get_list_channels(group, network_side):
vol = self._read_data(self.dictionary.get_subject_path(group, network_side, channel, subject))
vol = np.expand_dims(vol, axis=-1)
vol_channels_list.append(vol)
network_vols3d_subject[network_side] = np.concatenate(vol_channels_list,
axis=vol_channels_list[0].ndim - 1)
return network_vols3d_subject
def set_dtypes(self, dtype_in='float', dtype_out='float', dtype_roi='int'):
if dtype_in != 'float' and dtype_in != 'int' or dtype_out != 'float' and dtype_out != 'int' or dtype_roi != 'float' and dtype_roi != 'int':
raise ValueError(' Bad dtype founded.')
self._NN_IN_DTYPE = dtype_in
self._NN_OUT_DTYPE = dtype_out
self._NN_ROI_DTYPE = dtype_roi
for network_side, dtype in zip(self.dictionary.get_network_sides(self.dictionary.get_list_groups()[0]),
[self._NN_IN_DTYPE, self._NN_OUT_DTYPE, self._NN_ROI_DTYPE]):
self.dtypes_dict[network_side] = dtype
@staticmethod
def _check_valid_output_slice(network_vols3d_subject, slice_id, network_side):
slice_mask = network_vols3d_subject[network_side][:, :, slice_id, :].astype(
np.float32)
if np.sum(slice_mask) > 0:
contains_labels = True
else:
contains_labels = False
return contains_labels
def _list_slices_subject(self, group, subject):
network_vols3d_subject = self._get_subject_data(group, subject)
_, _, slices, _ = network_vols3d_subject[NN_IN].shape
tfrecord_slice_list = []
for slice_id in range(0, slices):
if self.network_side_label_check:
if self._check_valid_output_slice(network_vols3d_subject, slice_id, self.network_side_label_check):
continue
slice_data_dict = dict()
for network_side in self.dictionary.get_network_sides(group):
if network_side in list(self._shapes_tfrecord.keys()) and \
network_vols3d_subject[network_side][:, :, slice_id, :].shape[0:2] != self._shapes_tfrecord[
network_side]:
slice_data_dict[network_side] = self._resize_slice(
network_vols3d_subject[network_side][:, :, slice_id, :]
, self._shapes_tfrecord[network_side], self.dtypes_dict[network_side]).astype(np.float32)
else:
slice_data_dict[network_side] = network_vols3d_subject[network_side][:, :, slice_id, :].astype(
np.float32)
if self.z_correction:
if self._Z_CORR:
slice_data_dict[NN_IN] = ((slice_data_dict[NN_IN] - self.means_z) / self.stds_z).astype(
np.float32)
else:
raise ValueError(
'Error: The calculation of the Z correction input parameters must be done before creating the tfrecord, \
or they must be sat manually in the object')
tfrecord_slice_list.append(slice_data_dict)
return tfrecord_slice_list
def create_dict_group(self, group):
print('group %s' % group)
subjects = self.dictionary.get_list_subjects(group)
group_dictionary_iter = dict()
for subject in subjects:
group_dictionary_iter[subject] = []
print('subject %s' % subject)
group_dictionary_iter[subject] = self._list_slices_subject(group, subject)
return group_dictionary_iter
|
from django.contrib.auth.views import LoginView, LogoutView
from django.contrib.auth.mixins import LoginRequiredMixin
from cinema.permission import LoginSuperUserRequiredMixin
from django.views.generic import ListView, CreateView, UpdateView
from django.contrib import messages
from django.shortcuts import redirect
from django.utils import timezone
from datetime import timedelta
from cinema.models import Hall, Movies, Sessions, Purchase
from cinema.forms import RegisterForm, SessionCreateForm, HallsCreateForm, MoviesCreateForm, \
PurchaseForm, SortForm
class Login(LoginView):
template_name = 'login.html'
def get_success_url(self):
return '/'
class Logout(LogoutView):
next_page = '/'
class Register(CreateView):
form_class = RegisterForm
template_name = 'register.html'
def get_success_url(self):
return '/'
class SessionsListView(ListView):
model = Sessions
template_name = 'session_list.html'
extra_context = {'purchase_form': PurchaseForm, }
paginate_by = 3
def get_queryset(self):
today = timezone.now()
tomorrow = timezone.now() + timedelta(days=1)
session_form = self.request.GET.get('session_form')
if session_form == 'Today':
return super().get_queryset().filter(date_start_show__lte = today, date_end_show__gt = today)
elif session_form == 'Tomorrow':
return super().get_queryset().filter(date_start_show__lte = tomorrow, date_end_show__gt = tomorrow)
return super().get_queryset().filter(date_end_show__gte = today)
def get_ordering(self):
sort_form = self.request.GET.get('sort_form')
if sort_form == 'PriceLH':
self.ordering = ['price']
elif sort_form == 'PriceHL':
self.ordering = ['-price']
elif sort_form == "Time":
self.ordering = ['start_session_time']
return self.ordering
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['sort'] = SortForm
return context
class SessionCreateView(LoginSuperUserRequiredMixin, CreateView):
model = Sessions
form_class = SessionCreateForm
success_url = '/create_sessions/'
template_name = 'create_sessions.html'
def form_valid(self, form):
movie = form.save(commit=False)
hall = Hall.objects.get(id=self.request.POST['hall_name'])
movie.free_seats = hall.size
movie.save()
return super().form_valid(form=form)
class HallsCreateView(LoginSuperUserRequiredMixin, CreateView):
model = Hall
form_class = HallsCreateForm
success_url="/"
template_name = 'create_halls.html'
class MoviesCreateView(LoginSuperUserRequiredMixin, CreateView):
login_url = "login/"
form_class = MoviesCreateForm
template_name = 'create_movies.html'
extra_context = {'add_form': MoviesCreateForm()}
success_url='/movies/list_of_movies'
class MoviesListView(LoginRequiredMixin, ListView):
model = Movies
login_url = "login/"
template_name = 'movies_list.html'
class ProductPurchaseView(LoginRequiredMixin, CreateView):
model = Purchase
form_class = PurchaseForm
template_name = 'session_list.html'
paginate_by = 5
success_url='/'
def form_valid(self, form):
purchase = form.save(commit=False)
quantity = int(form.data['quantity'])
user = self.request.user
purchase.сonsumer = user
session = Sessions.objects.get(id=self.request.POST['session'])
purchase.session = session
total_quantity = purchase.quantity
if not total_quantity:
total_quantity = 0
free_seats = session.free_seats - total_quantity
if free_seats < 0:
messages.error(self.request, f'Dont enough free seats!')
return redirect(f"/")
session.free_seats = free_seats
session.save()
user.spent += quantity * session.price
user.save()
purchase.save()
return super().form_valid(form=form)
class ProductPurchaseListView(LoginRequiredMixin, ListView):
login_url = "login/"
model = Purchase
template_name = 'purchases_list.html'
def get_queryset(self):
return self.request.user.consumer_purchase.all()
class UpdateProductView(LoginSuperUserRequiredMixin, UpdateView):
template_name = 'update_sessions.html'
model = Sessions
form_class = SessionCreateForm
success_url = '/'
class UpdateHallsView(LoginSuperUserRequiredMixin, UpdateView):
template_name = 'update_halls.html'
model = Hall
form_class = HallsCreateForm
success_url = '/'
|
class PublicKey:
def __init__(self, pkey):
self.key = self.get_keytext(pkey)
def get_keytext(self, keytext):
return self.split_file(keytext)
@staticmethod
def split_file(keytext):
return keytext.replace('[', '').replace(']', '').replace('\t', '').replace(' ', '').replace('\n', '').replace('\r', '').split(',')
|
# Importing necessary libraries
import random
import time
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import confusion_matrix
# The following represents a class of RandomForrest implementation
#
from analysis.analysis_interface import AnalysisInterface
from response import Response
# Definition of Random Forest Regression Class with functions
"""
This class defines the blue print of AI method - Random Forrest
"""
class GradientBoostingClassification(AnalysisInterface):
"""
Documentation for Class artificial
The class can handle multiple AI methods
More Details
"""
def __init__(self, X_train, X_test, y_train, y_test):
"""The constructor
Initialise Model data
"""
self.__X_train = X_train
self.__X_test = X_test
self.__y_train = y_train
self.__y_test = y_test
self.__classifier = None
self.__predictionValues = None
self.importances = list()
def __del__(self):
print('AI Model has been deleted')
# setter methods
def build_model(self, learning_rate, n_estimators, subsample, criterion, min_samples_split, max_depth,
max_features, init, random_state, max_leaf_nodes):
"""
Sets the classifier to the type of Gradient Boosting Classifier
More Details
"""
try:
self.__classifier = GradientBoostingClassifier(learning_rate=learning_rate, n_estimators=n_estimators,
subsample=subsample, criterion=criterion,
min_samples_split=min_samples_split, max_depth=max_depth,
max_features=max_features, init=init,
random_state=random_state, max_leaf_nodes=max_leaf_nodes)
self.__classifier.fit(X=self.__X_train, y=self.__y_train, sample_weight=None)
# Obtain the importance features of the split
self.importances = self.__classifier.feature_importances_
except:
return Response.failure('Error building model')
return Response.success(self.__classifier)
# Get methods
def get_train_score(self):
score = self.__classifier.score(self.__X_train, self.__y_train)
return score
def get_test_score(self):
score = self.__classifier.score(self.__X_test, self.__y_test)
return score
def predict_y_from_x_test(self):
y_predict = self.__classifier.predict(self.__X_test)
return y_predict
def predict_y(self, X_Data):
y_predict = self.__classifier.predict(X_Data)
return y_predict
def get_model_params(self):
"""
Get model Parameters
More Details
"""
return self.__classifier.get_params(deep=True)
def get_confusion_plot(self, Ydata):
"""
Get model Parameters
More Details
"""
# Confirm the data parsed is of type pd.series
Ydata = self.predict_y_from_x_test()
y = np.array(Ydata)
label = np.unique(y)
confuseMatrix = confusion_matrix(self.__y_test, Ydata)
plt.figure(figsize=(10, 10))
sns.heatmap(confuseMatrix, annot=True, linewidths=0.5, xticklabels=label, yticklabels=label)
plt.xlabel('Predicted')
plt.ylabel('Truth')
plt.title('RFC Confusion Matrix')
figure_name = 'plots/figure_{}_{}.png'.format(random.randint(100, 999), time.time() * 1000)
plt.savefig(figure_name)
plt.close()
return figure_name
def get_visual_plot(self):
"""
Gets the Models Features of importance
More Details
"""
# Set up plot parameters
plt.figure(figsize=(20, 20))
width = 0.2
# Get the feature importances
importances = self.importances
# Set the location for Bar plot
xValues = list(range(len(importances)))
plt.bar(xValues, importances, width, color='r')
# Set the plot labels
plt.xticks(xValues, self.__X_test.columns)
plt.xlabel('Features', fontsize=16)
plt.ylabel('Importance', fontsize=16)
plt.title('Features Of Importance', fontsize=20)
figure_name = 'plots/figure_{}_{}.png'.format(random.randint(100, 999), time.time() * 1000)
plt.savefig(figure_name)
plt.close()
return figure_name
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 15 19:55:55 2019
@author: Yudy Andrea Fierro
"""
#EJERCICIO 8
mul_3 = []
mul_4 = []
'''Se verifica el residuo del número para saber si es multiplo de tres y si lo es
almacenarlo en una lista, igualmente para cuatro'''
for i in range(1,101):
if i%3 == 0 :
if len (mul_3) < 15:
mul_3.append(i)
if i%4 == 0 :
if len(mul_3) == 15:
mul_4.append(i)
print('Primeros 15 multiplos de 3 de los primeros 100 números enteros '
'positivos:',mul_3, end = '\n\n')
print('Multiplos de 4 a partir de los primeros 15 multiplos de 3 en los '
'primeros 100 números enteros positivos',mul_4) |
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
import keras
odd_nums = np.array([1,3,5,7,9,11,13])
series = np.array([1,3,5,7,9,11,13])
window_size = 2
X = []
y = series[window_size:]
for index in range(len(series) - window_size -1):
input = series[index : index + window_size]
X.append(input)
import re, string, timeit
s = "string. With. Punctuation"
exclude = set(string.punctuation)
table = string.maketrans("","")
regex = re.compile('[%s]' % re.escape(string.punctuation))
def test_set(s):
return ''.join(ch for ch in s if ch not in exclude)
def test_re(s): # From Vinko's solution, with fix.
return regex.sub('', s)
def test_trans(s):
return s.translate(table, string.punctuation)
def test_repl(s): # From S.Lott's solution
for c in string.punctuation:
s=s.replace(c,"")
return s
print "sets :",timeit.Timer('f(s)', 'from __main__ import s,test_set as f').timeit(1000000)
print "regex :",timeit.Timer('f(s)', 'from __main__ import s,test_re as f').timeit(1000000)
print "translate :",timeit.Timer('f(s)', 'from __main__ import s,test_trans as f').timeit(1000000)
print "replace :",timeit.Timer('f(s)', 'from __main__ import s,test_repl as f').timeit(1000000)
|
import glob
import shutil
import re
from typing import List, Union
fileList_working = []
def get_files(spec = 0):
#excluded_files = []
if spec == 0:
fileList_0 = sorted(glob.glob('R:/groups/seeley_pathology/NP Reports/Finalized/*.doc*'))
else:
fileList_0 = spec
fileList = [f for f in fileList_0 if not '$' in f]
#print(excluded_files)
#print(len(excluded_files))
#print(len(excluded_files))
return fileList
"""
print('\nResults of get_files:')
for filename in get_files():
print(filename)
print(len(get_files()))
"""
def get_pNums(spec=0):
if spec == 0:
fileList = get_files()
else:
fileList = spec
pNum_list= []
dup_pNums = []
for filename in fileList:
f = filename.split('_')
f = f[3]
pNum = f[:5]
if pNum in pNum_list:
dup_pNums.append(pNum)
else:
pNum_list.append(pNum)
if spec == 'duplicates':
return dup_pNums
else:
return sorted(pNum_list)
"""
print('\nResults of get_pNums:')
print(get_pNums())
print(len(get_pNums()))
"""
def get_dup_pNums(spec = 0):
pNum_list = []
dup_pNums = []
if spec == 0:
fileList = get_files()
else:
fileList = spec
for filename in fileList:
f = filename.split('_')
f = f[3]
pNum = f[:5]
if pNum in pNum_list:
dup_pNums.append(pNum)
else:
pNum_list.append(pNum)
return dup_pNums
"""
print('\nResults of get_dup_pNums:')
for pNum in sorted(get_dup_pNums()):
print(pNum)
print(len(get_dup_pNums()))
"""
def remove_dup_pNums(spec = 0):
if spec == 0:
fileList = get_files()
else:
fileList = spec
#global fileList_working
#if not fileList_working:
# fileList = get_files()
#else:
#fileList = fileList_working
dup_pNums = get_dup_pNums(spec)
if not dup_pNums:
print('\nWorking file list (no duplicate P numbers)')
else:
for pNum in dup_pNums:
dup_pNum_files = []
for filename in fileList:
if pNum in filename:
dup_pNum_files.append(filename) # I think the problem is here.
i = 1
pNum_choice_numbers = [i]
print('Which of these files would you like to keep in the queue?')
if len(dup_pNum_files) > 1:
for filename in dup_pNum_files:
print(f'{i}: {filename}')
i = i+1
pNum_choice_numbers.append(i)
usr_choice = int(input('Please type the number of file you would like to keep (on the left):'))
for filename in dup_pNum_files:
if dup_pNum_files.index(filename) != (usr_choice - 1):
fileList.remove(filename)
fileList_working = fileList
return fileList_working
fileList_working_0 = remove_dup_pNums()
fileList_working = remove_dup_pNums(fileList_working_0)
print('\nResults of remove_dup_pNums (fileList_working_0):')
for filename in fileList_working_0:
print(filename)
print(len(fileList_working_0))
"""
print('\nResults of remove_dup_pNums (fileList_working):')
for filename in fileList_working:
print(filename)
print(len(fileList_working))
## Okay, this seems chill...
## Let's try and actually get in the files...
"""
#print('Result of get_pNums:')
#get_pNums()
#get_dup_pNums()
#print('Result of remove_dup_pNums:')
#remove_dup_pNums()
#print('Result of ')
#get_files()
|
import tensorflow_datasets as tfds
import tensorflow as tf
import time
import numpy as np
from model import Transformer
from modules import create_look_ahead_mask, create_padding_mask
examples, metadata = tfds.load('ted_hrlr_translate/pt_to_en', with_info=True,
as_supervised=True)
train_examples, val_examples = examples['train'], examples['validation']
tokenizer_en = tfds.features.text.SubwordTextEncoder.build_from_corpus(
(en.numpy() for pt, en in train_examples), target_vocab_size=2**13)
tokenizer_pt = tfds.features.text.SubwordTextEncoder.build_from_corpus(
(pt.numpy() for pt, en in train_examples), target_vocab_size=2**13)
# sample_string = 'Transformer is awesome.'
# tokenized_string = tokenizer_en.encode(sample_string)
# print('Tokenized string is {}'.format(tokenized_string))
# original_string = tokenizer_en.decode(tokenized_string)
# print('The original string: {}'.format(original_string))
# assert original_string == sample_string
# for ts in tokenized_string:
# print('{} ----> {}'.format(ts, tokenizer_en.decode([ts])))
BUFFER_SIZE = 1000
BATCH_SIZE = 64
def encode(lang1, lang2):
lang1 = [tokenizer_pt.vocab_size] + tokenizer_pt.encode(
lang1.numpy()) + [tokenizer_pt.vocab_size+1]
lang2 = [tokenizer_en.vocab_size] + tokenizer_en.encode(
lang2.numpy()) + [tokenizer_en.vocab_size+1]
return lang1, lang2
def tf_encode(pt, en):
result_pt, result_en = tf.py_function(encode, [pt, en], [tf.int64, tf.int64])
result_pt.set_shape([None])
result_en.set_shape([None])
return result_pt, result_en
MAX_LENGTH = 40
def filter_max_length(x, y, max_length=MAX_LENGTH):
return tf.logical_and(tf.size(x) <= max_length,
tf.size(y) <= max_length)
train_dataset = train_examples.map(tf_encode)
train_dataset = train_dataset.filter(filter_max_length)
# cache the dataset to memory to get a speedup while reading from it.
train_dataset = train_dataset.cache()
train_dataset = train_dataset.shuffle(BUFFER_SIZE).padded_batch(
BATCH_SIZE, padded_shapes=([None], [None]))
train_dataset = train_dataset.prefetch(tf.data.experimental.AUTOTUNE)
val_dataset = val_examples.map(tf_encode)
val_dataset = val_dataset.filter(filter_max_length).padded_batch(
BATCH_SIZE, padded_shapes=([None], [None]))
num_layers = 4
d_model = 128
d_ff = 512
num_heads = 8
input_vocab_size = tokenizer_pt.vocab_size + 2
target_vocab_size = tokenizer_en.vocab_size + 2
dropout_rate = 0.1
class CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
def __init__(self, d_model, warmup_steps=4000):
super(CustomSchedule, self).__init__()
self.d_model = d_model
self.d_model = tf.cast(self.d_model, tf.float32)
self.warmup_steps = warmup_steps
def __call__(self, step):
arg1 = tf.math.rsqrt(step)
arg2 = step * (self.warmup_steps ** -1.5)
return tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1, arg2)
learning_rate = CustomSchedule(d_model)
optimizer = tf.keras.optimizers.Adam(learning_rate, beta_1=0.9, beta_2=0.98,
epsilon=1e-9)
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction='none')
def loss_function(real, pred):
mask = tf.math.logical_not(tf.math.equal(real, 0))
loss_ = loss_object(real, pred)
mask = tf.cast(mask, dtype=loss_.dtype)
loss_ *= mask
return tf.reduce_sum(loss_)/tf.reduce_sum(mask)
def create_masks(inp, tar):
# Encoder padding mask
enc_padding_mask = create_padding_mask(inp)
dec_padding_mask = create_padding_mask(inp)
look_ahead_mask = create_look_ahead_mask(tf.shape(tar)[1])
dec_target_padding_mask = create_padding_mask(tar)
combined_mask = tf.maximum(dec_target_padding_mask, look_ahead_mask)
return enc_padding_mask, combined_mask, dec_padding_mask
train_loss = tf.keras.metrics.Mean(name='train_loss')
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(
name='train_accuracy')
transformer = Transformer(num_layers, d_model, num_heads, d_ff,
input_vocab_size, target_vocab_size,
pos_enc_input=input_vocab_size,
pos_enc_target=target_vocab_size,
rate=dropout_rate)
checkpoint_path = "./checkpoints/train"
ckpt = tf.train.Checkpoint(transformer=transformer,
optimizer=optimizer)
ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=5)
# if a checkpoint exists, restore the latest checkpoint.
if ckpt_manager.latest_checkpoint:
ckpt.restore(ckpt_manager.latest_checkpoint)
print('Latest checkpoint restored!!')
EPOCHS = 20
train_step_signature = [
tf.TensorSpec(shape=(None, None), dtype=tf.int64),
tf.TensorSpec(shape=(None, None), dtype=tf.int64),
]
@tf.function(input_signature=train_step_signature)
def train_step(inp, tar):
tar_inp = tar[:, :-1]
tar_real = tar[:, 1:]
enc_padding_mask, combined_mask, dec_padding_mask = create_masks(
inp, tar_inp)
with tf.GradientTape() as tape:
predictions, _ = transformer(inp, tar_inp,
True,
enc_padding_mask,
combined_mask,
dec_padding_mask)
loss = loss_function(tar_real, predictions)
gradients = tape.gradient(loss, transformer.trainable_variables)
optimizer.apply_gradients(zip(gradients, transformer.trainable_variables))
train_loss(loss)
train_accuracy(tar_real, predictions)
for epoch in range(EPOCHS):
start = time.time()
train_loss.reset_states()
train_accuracy.reset_states()
# inp -> portuguese, tar -> english
for (batch, (inp, tar)) in enumerate(train_dataset):
train_step(inp, tar)
if batch % 50 == 0:
print('Epoch {} Batch {} Loss {:.4f} Accuracy {:.4f}'.format(
epoch + 1, batch, train_loss.result(), train_accuracy.result()))
if (epoch + 1) % 5 == 0:
ckpt_save_path = ckpt_manager.save()
print('Saving checkpoint for epoch {} at {}'.format(epoch+1,
ckpt_save_path))
print('Epoch {} Loss {:.4f} Accuracy {:.4f}'.format(epoch + 1,
train_loss.result(),
train_accuracy.result()))
print('Time taken for 1 epoch: {} secs\n'.format(time.time() - start))
|
import numpy as np
class Dataset(object):
def __init__(self, dtype='uint8', is_row_iamge=False):
'''数据集
Args:
dtype: uint8 或 float32,uint8:每个像素值的范围是[0, 255];float32像素值范围是[0., 1.]
is_row_image: 是否将3维图片展开成1维
'''
images = np.fromfile('./images/test_image.bin', dtype=np.uint8).reshape(-1, 28, 28, 1)
print(images.shape)
if dtype == 'uint8':
self.images = images
else:
images = images.astype(np.float32) / 255.
self.images = images
if is_row_iamge:
self.images = images.reshape([-1, 784])
self.num_of_images = 6500
self.offset = 0
print('共6500张图片')
def next_batch(self, batch_size=50):
# 返回False表示以及没有样本
# 注意:最后一个批次可能不足batch_size 所以推荐选择6500可以整除的batch_size
if (self.offset + batch_size) <= self.num_of_images:
self.offset += batch_size
return self.images[self.offset-batch_size : self.offset]
elif self.offset < self.num_of_images:
return self.images[self.offset : ]
else:
False
if __name__ == '__main__':
images = Dataset()
b_img = images.next_batch()
print(b_img.shape)
|
# !/usr/bin/python3
from tkinter import *
top = Tk()
E1 = Entry(top, bd=5)
E1.pack(side=RIGHT)
L1 = Label(top, text="User Name")
L1.pack(side=LEFT)
top.mainloop()
|
import sys
import MainWinHorLayout
from PyQt5.QtWidgets import QApplication, QMainWindow
if __name__ == '__main__':
# 创建QApplication类的实例
app = QApplication(sys.argv)
# 创建一个窗口
mainWindow = QMainWindow()
ui = MainWinHorLayout.UI_MainWindow
ui.setupUi(mainWindow)
mainWindow.show()
# 进入程序的主循环,并通过exit函数确保主循环安全结束
sys.exit(app.exec_()) |
import subprocess
def command(command):
command = command.rstrip()
try:
stdout = subprocess.check_output(
command,
stderr=subprocess.STDOUT,
shell=True
)
except:
stdout = f"Can't execute: {command}"
return stdout
|
'''
(C) Copyright 2020-2022 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
'''
from pydaos.raw import DaosApiError
import avocado
from data_mover_test_base import DataMoverTestBase
class DmvrSerialSmall(DataMoverTestBase):
# pylint: disable=too-many-ancestors
"""Object Data Mover validation for serializing/deserializing
generic containers at the object level.
Test Class Description:
Tests the following cases:
Serializing a small container with daos-serialize.
Deserializing a small container with daos-deserialize.
:avocado: recursive
"""
def setUp(self):
"""Set up each test case."""
# Start the servers and agents
super().setUp()
# Get the dataset parameters
self.num_objs = self.params.get(
"num_objs", "/run/dataset/*")
self.num_dkeys = self.params.get(
"num_dkeys", "/run/dataset/*")
self.num_akeys_single = self.params.get(
"num_akeys_single", "/run/dataset/*")
self.num_akeys_array = self.params.get(
"num_akeys_array", "/run/dataset/*")
self.akey_sizes = self.params.get(
"akey_sizes", "/run/dataset/*")
self.akey_extents = self.params.get(
"akey_extents", "/run/dataset/*")
def run_dm_serial_small(self, tool):
"""
Test Description:
Tests serializing/deserializing a small container.
Use Cases:
Create pool1.
Create cont1 in pool1.
Create a small dataset in cont1.
Serialize cont1 to an HDF5 file(s).
Create pool2.
Deserialize the HDF5 file(s) to a new cont2 in pool2.
"""
# Set the tool to use
self.set_tool(tool)
# Create pool1
pool1 = self.create_pool()
pool1.connect(2)
# Create cont1
cont1 = self.get_container(pool1)
# Create dataset in cont1
obj_list = self.dataset_gen(
cont1,
self.num_objs, self.num_dkeys, self.num_akeys_single,
self.num_akeys_array, self.akey_sizes, self.akey_extents)
# Create pool2
pool2 = self.create_pool()
pool2.connect(2)
# Serialize/Deserialize cont1 to a new cont2 in pool2
result = self.run_datamover(
self.test_id + " (cont1->HDF5->cont2)",
"DAOS_UUID", None, pool1, cont1,
"DAOS_UUID", None, pool2, None)
# Get the destination cont2 uuid
cont2_label = self.parse_create_cont_label(result.stdout_text)
# Verify data in cont2
cont2 = self.get_cont(pool2, cont2_label)
self.dataset_verify(
obj_list, cont2,
self.num_objs, self.num_dkeys, self.num_akeys_single,
self.num_akeys_array, self.akey_sizes, self.akey_extents)
# Must destroy before closing pools
cont1.destroy()
cont2.destroy()
pool1.disconnect()
pool2.disconnect()
@avocado.fail_on(DaosApiError)
def test_dm_serial_small_dserialize(self):
"""
Test Description:
DAOS-6875: Verify serializing a small container.
:avocado: tags=all,pr
:avocado: tags=vm
:avocado: tags=datamover,mfu,mfu_serialize,mfu_deserialize,hdf5
:avocado: tags=DmvrSerialSmall,test_dm_serial_small_dserialize
"""
self.run_dm_serial_small("DSERIAL")
|
import pandas as pd
dict_data = {
'c0': [1, 2, 3],
'c1': [4, 5, 6],
'c2': [7, 8, 9],
'c3': [10, 11, 12],
'c4': [13, 14, 15]
}
#리셋 인덱스
# print("# 딕셔너리를 데이터프레임으로 변환. 인덱스를 [r0, r1, r2]로 지정")
# df = pd.DataFrame(dict_data, index=['r0', 'r1', 'r2'])
# print(df, end='\n\n')
#
# print("#행 인덱스를 정수형으로 초기화")
# ndf = df.reset_index()
# print(ndf, end='\n\n')
#소트 인덱스
# print("# 딕셔너리를 데이터프레임으로 변환. 인덱스를 [r0, r1, r2]로 지정")
# df = pd.DataFrame(dict_data, index=['r0', 'r1', 'r2'])
# print(df, end='\n\n')
#
# print("# 내림차순으로 행 인덱스 정렬")
# ndf = df.sort_index(ascending=False)
# print(ndf, end='\n\n')
#소트 밸류
print("# 딕셔너리를 데이터프레임으로 변환. 인덱스를 [r0, r1, r2]로 지정")
df = pd.DataFrame(dict_data, index=['r0', 'r1', 'r2'])
print(df, end='\n\n')
print("# c1열을 기준으로 내림차순 정렬")
ndf = df.sort_values(by='c1', ascending=False)
print(ndf
) |
#/usr/bin/env python
###############################################################################
# Copyright one_formula.py script - 2020 Marc Rosanes Siscart
# Copyright of the formula_of_one discovered - May 2014 Marc Rosanes Siscart
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# GNU General Public License on: <https://www.gnu.org/licenses/>.
###############################################################################
import sys
import math
# Formula of One by Marc Rosanes Siscart:
# summation[(-1)**i * (n-i)**n / (i! * (n-i)!)] = 1
#
# Note: Summation can be done 'between 0 and n-1', or 'between 0 and n';
# it is indiferent.
def formula_one(n):
n = int(n)
term = 0
result = 0
factorial_n = math.factorial(n)
f_n = math.factorial(n)
for i in range(n):
f_i = math.factorial(i)
f_n_min_i = math.factorial(n-i)
term = (-1.0)**i * (n-i)**n / (f_i * f_n_min_i)
result += term
return result
if __name__ == "__main__":
try:
result = formula_one(sys.argv[1])
print("result= " + str(result))
except:
msg = Exception("Code length (int) must be given as input argument")
raise(msg)
|
import time
import hashlib
class Transaction:
def __init__(self, sender, receiver, amount, timestamp=None):
self.sender = sender
self.receiver = receiver
self.amount = amount
self.time = timestamp or time.time()
def get(self) -> dict:
return {
'sender': self.sender,
'receiver': self.receiver,
'amount': self.amount,
'time': self.time,
}
def __repr__(self):
return "sender: {}, receiver: {}, amount: {}$, time: {}\n".format(self.sender, self.receiver, self.amount, self.time)
class Wallet:
def __init__(self, username, amount, time=None):
self.username = username
self.amount = amount
self.time = timestamp or time.time()
self.public_key = get_public_key()
def get_public_key(self):
block_string = "{}{}".format(
self.username, self.time
)
return hashlib.sha256(block_string.tencode()).hexdigest()
def get (self) -> dict:
return {
'username': self.username,
'public_key': self.public_key,
'public_key': self.amount,
'time': self.time,
}
def __repr__(self):
return "username: {}, created: {}".format(self.username, self.time)
|
import time
books_avail=[{'name':'Mahabharata','author':'Maha author'},
{'name':'Ramayana','author':'Rama author'},
{'name':'Secret Seven','author':'Enid Blyton'}]
class Library:
def __init__(self,alistbook,alibname):
self.listbook=alistbook
self.libname=alibname
def disp_book(self):
disp_in=input("\nWhat do you want to display ? a) Available books b) Books lended to you c) Return File d) Lend File e) Exit ")
if disp_in == "a":
print("\nDisplaying Available Books .. ")
print(books_avail)
elif disp_in == "b":
print("\nBooks Lended to you are: ")
print(self.listbook)
elif disp_in == "c":
f=open("Return_Book.txt","rt")
for line in f:
print(line)
elif disp_in == "d":
f = open("Lend_Book.txt", "rt")
for line in f:
print(line)
elif disp_in == "e":
return None
else:
print("Input is incorrect ! Please check your input and try again.")
def lend_book(self):
print("Choose among these books...\n")
print(books_avail)
try:
book_name = input("\nEnter name of a book you want to issue or 'e' to Exit : ")
if book_name != 'e':
for i in range(len(books_avail)):
if books_avail[i]['name'] == book_name:
lend_bk=(books_avail[i]).copy()
(self.listbook).append(lend_bk)
del books_avail[i]
f=open("Lend_book.txt","a")
f.write(f"{lend_bk} : lended to {self.libname} : {time.ctime()}\n")
f.close()
print("Book Lended !")
break
else:
f2 = open("lend_Book.txt", "rt")
for lines in f2:
if len(book_name) > 2 and book_name != 'name' and book_name != 'author':
result = lines.find(book_name)
if result != -1:
print(f"\nBook is not available, its lended to {self.libname}")
break
else:
print("Book is not found")
break
else:
print("Check your input !")
break
else:
return 0
except Exception as e:
print("Book is not found")
def donate_book(self):
book_nm=input("\nEnter Book name to donate or 'e' to Exit : ")
if book_nm != 'e':
book_au=input("Enter Book author name: ")
books_avail.append({book_nm:book_au})
print("Thank you")
else:
return 0
def return_book(self):
book_nm = input("\nEnter Book name to return or 'e' to Exit : ")
if book_nm != 'e':
book_au = input("Enter Book author name: ")
for i in range(len(self.listbook)):
if self.listbook[i]['name'] == book_nm:
return_bk=(self.listbook[i]).copy()
books_avail.append({book_nm: book_au})
del self.listbook[i]
f1=open("Return_Book.txt","a")
f1.write(f"{return_bk} : returned by {self.libname} : {time.ctime()}\n")
f1.close()
break
print("Thank you for returning")
else:
return 0
if __name__ == '__main__':
print("**** Welcome To Library Management System ****")
print("\n Hello Prakriti, Your Library name : Prakriti_Lib")
p_book_names = [{'name': 'maze', 'author': 'maze author'}]
prakriti = Library(p_book_names, 'Prakriti_Lib')
while True:
try:
choice = int(input("\n * What are you upto ? 1) Display Books 2) Lend Book 3) Return Book 4) Donate Book : 5) Exit : "))
if choice == 1:
prakriti.disp_book()
elif choice == 2:
prakriti.lend_book()
elif choice == 3:
prakriti.return_book()
elif choice == 4:
prakriti.donate_book()
elif choice == 5:
print(" Bye - Bye")
exit()
else:
print("Input is incorrect ! Please check your input and try again.")
except Exception as e:
print("Input is incorrect ! Please check your input and try again.")
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 14 16:15:38 2021
@author: gtxnn
"""
string = "themartian"
dictionary = ['the', 'martian', ]
def max_match(sentence, dictionary):
if not sentence:
return ""
for i in range(len(sentence), -1, -1):
first_word = sentence[:i]
remainder = sentence[i:]
if first_word in dictionary:
return first_word + " " + max_match(remainder, dictionary)
first_word = sentence[0]
remainder = sentence[1:]
return first_word + max_match(remainder, dictionary)
print(max_match(string, dictionary)) |
# EXT ADV
import numpy as np
from scipy.optimize import minimize
import numdifftools as nd
"""X=[v1,v2,x11,x21,f11,f21,x12,x22,f12,f22,r11,r12,r21,r22,r01,r02]"""
a1=[0.0,0.3,0.7]
a2=[0.0,0.6,0.4]
rgov=0.0
def U_1(X,sign=1.0):
return sign*(((a1[0]+rgov)*X[0]**0.5+(a1[1]-rgov/2.0)*X[2]**0.5+(a1[2]-rgov/2.0)*X[3]**0.5)**2-X[4]**2-X[5]**2)
def U_2(X,sign=1.0):
return sign*(((a2[0]+rgov)*X[1]**0.5+(a2[1]-rgov/2.0)*X[6]**0.5+(a2[2]-rgov/2.0)*X[7]**0.5)**2-X[8]**2-X[9]**2)
exp=0.5
h=4.0 # Lower the probablity of falling sick at the social optimum
def tech_0(X):
return ((X[14]+h)**exp+(X[15]+h)**exp)/(exp)
def tech_1(X):
return (0.4*X[10]**exp+0.6*X[11]**exp)/(exp)
def tech_2(X):
return (0.7*X[12]**exp+0.3*X[13]**exp)/(exp)
g=0.5
rgov=0.3
pgov=1.0
def EU_1(X,ext=1.0,sign=1.0):
p=0.5/((X[0]+ext)**(1/g)+1)
return sign*((1-p)*(U_1(X)+rgov)+p*(U_1(X)-pgov))
def EU_2(X,ext=1.0,sign=1.0):
p=0.5/((X[1]+ext)**(1/g)+1)
return sign*((1-p)*(U_2(X)+rgov)+p*(U_2(X)-pgov))
def EWelf(X,sign=1.0):
return (EU_1(X,X[1],sign)+EU_2(X,X[0],sign))
X0=np.full(16,1,dtype=float)
Welfmax=minimize(EWelf,X0,args=(-1.0),method='SLSQP',constraints=[{'type':'eq','fun':lambda X:[tech_1(X)-X[2]-X[6],tech_2(X)-X[3]-X[7],tech_0(X)-X[0]-X[1]]},
{'type':'eq','fun':lambda X:[X[4]+X[8]-X[10]-X[12]-X[14],X[5]+X[9]-X[11]-X[13]-X[15]]},
{'type':'ineq','fun':lambda X:EU_1(X,X[1])},
{'type':'ineq','fun':lambda X:EU_1(X,X[1])}])
print Welfmax
print tech_0(Welfmax['x'])
X_max=Welfmax['x']
EU_1_max=minimize(EU_1,X0,args=(Welfmax['x'][1],-1.0),method='SLSQP',constraints=[{'type':'ineq','fun':lambda X:tech_0(X_max)-X[0]},
{'type':'ineq','fun':lambda X:tech_1(X_max)-X[2]},
{'type':'ineq','fun':lambda X:tech_2(X_max)-X[3]},
{'type':'ineq','fun':lambda X:X_max[0]+sum(X_max[2:4])-X[0]-X[2]-X[3]},
{'type':'eq','fun':lambda X:[X[i] for i in range(4,16)]},
{'type':'ineq','fun':lambda X:[X[i] for i in range(2,4)]},
{'type':'ineq','fun':lambda X:X[0]},
{'type':'eq','fun':lambda X:X[1]}])
EU_1_X=EU_1_max['x']
print EU_1_X
EU_2_max= minimize(EU_2,X0,args=(Welfmax['x'][0],-1.0),method='SLSQP',constraints=[{'type':'ineq','fun':lambda X:tech_0(X_max)-X[1]},
{'type':'ineq','fun':lambda X:tech_1(X_max)-X[6]},
{'type':'ineq','fun':lambda X:tech_2(X_max)-X[7]},
{'type':'ineq','fun':lambda X:X_max[1]+sum(X_max[6:8])-X[1]-X[6]-X[7]},
{'type':'eq','fun':lambda X:[X[i] for i in range(2,6)]},
{'type':'eq','fun':lambda X:[X[i] for i in range(8,15)]},
{'type':'ineq','fun':lambda X:[X[i] for i in range(6,8)]},
{'type':'ineq','fun':lambda X:X[1]},
{'type':'eq','fun':lambda X:X[0]}])
EU_2_X=EU_2_max['x']
print EU_2_X
tot=np.append(EU_1_X+EU_2_X,X_max[10:])
tot[4:6]=X_max[4:6]
tot[8:10]=X_max[8:10]
print "RESULTS"
print 'SOCIAL OPTIMUM : \n','Total Welfare : ',-Welfmax['fun'],'\n','Total consumption of vaccination : ',sum(X_max[0:2]),'(%s)'%(X_max[0:2]),'\n','Probability of getting sick :',0.5/((X_max[0]+X_max[1])**(1/g)+1),'\n','Consumer 1 Consumption :',np.append(X_max[0],X_max[2:6]),'\n','Consumer 1 Utility : ',EU_1(X_max,X_max[1]),'\n','Consumer 2 Consumption : ',np.append(X_max[1],X_max[6:10]),'\n','Consumer 2 Utility : ',EU_2(X_max,X_max[0]),'\n \n'
print 'INDIVIDUAL MAX : \n','Total Welfare : ',EWelf(tot),'\n','Total consumption of vaccination',EU_1_max['x'][0]+EU_2_max['x'][1],'(%s)'%(np.append(EU_1_X[0],EU_2_X[1])),'\n','Probability of getting sick :',0.5/((EU_1_X[0]**g+EU_2_X[1]**g)**(1/g)+1),'\n','Consumer 1 Consumption :',np.append(EU_1_X[0],EU_1_X[2:4]),'\n','Consumer 1 Utility : ',EU_1(tot,tot[1]),'\n','Consumer 2 Consumption : ',np.append(EU_2_X[1],EU_2_X[6:8]),'\n','Consumer 2 Utility : ',EU_2(tot,tot[0])
"""
g=0.5
def p_sick_square(X,c_level):
return (0.5/((X[0]+X[1])**(1/g)+1)-c_level)**2
X0=np.full(16,5,dtype=float)
print minimize(p_sick_square,X0,args=(0.05),method='SLSQP',constraints=[{'type':'ineq','fun':lambda X:X},
{'type':'eq','fun':lambda X: EWelf(X)+EWelf(tot)}])
"""
"""
EU_1_max= minimize(EU_1,X0,args=(Welfmax['x'][1],-1.0),method='SLSQP',constraints=[{'type':'eq','fun':lambda X:-EU_2(X,X[0])+EU_2(Welfmax['x'],Welfmax['x'][0])},
{'type':'eq','fun':lambda X:[tech_1(X)-X[2]-X[6],tech_2(X)-X[3]-X[7],tech_0(X)-X[0]-X[1]]},
{'type':'eq','fun':lambda X:[X[4]+X[8]-X[10]-X[12]-X[14],X[5]+X[9]-X[11]-X[13]-X[15]]},
{'type':'ineq','fun':lambda X:X}])
EU_1_X=EU_1_max['x']
print EU_1_X
EU_2_max= minimize(EU_2,X0,args=(Welfmax['x'][0],-1.0),method='SLSQP',constraints=[{'type':'eq','fun':lambda X:-EU_1(X,X[1])+EU_1(Welfmax['x'],Welfmax['x'][1])},
{'type':'eq','fun':lambda X:[tech_1(X)-X[2]-X[6],tech_2(X)-X[3]-X[7],tech_0(X)-X[0]-X[1]]},
{'type':'eq','fun':lambda X:[X[4]+X[8]-X[10]-X[12]-X[14],X[5]+X[9]-X[11]-X[13]-X[15]]},
{'type':'ineq','fun':lambda X:X}])
EU_2_X=EU_2_max['x']
print EU_2_X
tot=np.append(EU_1_X+EU_2_X,X_max[10:])
tot[4:6]=X_max[4:6]
tot[8:10]=X_max[8:10]
print "RESULTS"
print 'SOCIAL OPTIMUM : \n','Total Welfare : ',-Welfmax['fun'],'\n','Total consumption of vaccination : ',sum(X_max[0:2]),'(%s)'%(X_max[0:2]),'\n','Probability of getting sick :',0.5/((X_max[0]**g+X_max[1]**g)**(1/g)+1),'\n','Consumer 1 Consumption :',np.append(X_max[0],X_max[2:6]),'\n','Consumer 1 Utility : ',EU_1(X_max,X_max[1]),'\n','Consumer 2 Consumption : ',np.append(X_max[1],X_max[6:10]),'\n','Consumer 2 Utility : ',EU_2(X_max,X_max[0]),'\n \n'
print 'INDIVIDUAL MAX : \n','Total Welfare : ',EWelf(tot),'\n','Total consumption of vaccination',EU_1_max['x'][0]+EU_2_max['x'][1],'(%s)'%(np.append(EU_1_X[0],EU_2_X[1])),'\n','Probability of getting sick :',0.5/((EU_1_X[0]**g+EU_2_X[1]**g)**(1/g)+1),'\n','Consumer 1 Consumption :',np.append(EU_1_X[0],EU_1_X[2:4]),'\n','Consumer 1 Utility : ',EU_1(tot,tot[1]),'\n','Consumer 2 Consumption : ',np.append(EU_2_X[1],EU_2_X[6:8]),'\n','Consumer 2 Utility : ',EU_2(tot,tot[0])
"""
"""
# EXT ADV LOG
import numpy as np
from scipy.optimize import minimize
import numdifftools as nd
from math import exp
a1=[0.2,0.1,0.7]
a2=[0.4,0.2,0.4]
rg=0.0 # Pointless Here
def U_1(X,sign=1.0):
return sign*(((a1[0]+rg)*X[0]**0.5+(a1[1]-rg/2)*X[2]**0.5+(a1[2]-rg/2)*X[3]**0.5)**2-X[4]**2-X[5]**2)
def U_2(X,sign=1.0):
return sign*(((a2[0]+rg)*X[1]**0.5+(a2[1]-rg/2)*X[6]**0.5+(a2[2]-rg/2)*X[7]**0.5)**2-X[8]**2-X[9]**2)
e=0.5
h=0.0 # Lower the probablity of falling sick at the social optimum
def tech_0(X):
return ((X[14]+h)**e+(X[15]+h)**e)/(e)
def tech_1(X):
return (X[10]**e+X[11]**e)/(e)
def tech_2(X):
return (X[12]**e+X[13]**e)/(e)
g=0.5
rgov=0.0
pgov=0.5
def EU_1(X,ext=1.0,sign=1.0):
p=exp(-(X[0]+ext)**4)
return sign*((1-p)*(U_1(X)+rgov)+p*(U_1(X)-pgov))
def EU_2(X,ext=1.0,sign=1.0):
p=exp(-(X[1]+ext)**4)
return sign*((1-p)*(U_2(X)+rgov)+p*(U_2(X)-pgov))
def EWelf(X,sign=1.0):
return (EU_1(X,X[1],sign)+EU_2(X,X[0],sign))
X0=np.full(16,1,dtype=float)
Welfmax=minimize(EWelf,X0,args=(-1.0),method='SLSQP',constraints=[{'type':'eq','fun':lambda X:[tech_1(X)-X[2]-X[6],tech_2(X)-X[3]-X[7],tech_0(X)-X[0]-X[1]]},
{'type':'eq','fun':lambda X:[X[4]+X[8]-X[10]-X[12]-X[14],X[5]+X[9]-X[11]-X[13]-X[15]]},
{'type':'ineq','fun':lambda X:EU_1(X,X[1])},
{'type':'ineq','fun':lambda X:EU_1(X,X[1])}])
print Welfmax
print tech_0(Welfmax['x'])
X_max=Welfmax['x']
EU_1_max=minimize(EU_1,X0,args=(X_max[1],-1.0),method='SLSQP',constraints=[{'type':'ineq','fun':lambda X:tech_0(X_max)-X[0]},
{'type':'ineq','fun':lambda X:tech_1(X_max)-X[2]},
{'type':'ineq','fun':lambda X:tech_2(X_max)-X[3]},
{'type':'ineq','fun':lambda X:X_max[0]+sum(X_max[2:4])-X[0]-X[2]-X[3]},
{'type':'eq','fun':lambda X:[X[i] for i in range(4,16)]},
{'type':'ineq','fun':lambda X:[X[i] for i in range(2,4)]},
{'type':'ineq','fun':lambda X:X[0]},
{'type':'eq','fun':lambda X:X[1]}])
EU_1_X=EU_1_max['x']
print EU_1_X
EU_2_max= minimize(EU_2,X0,args=(X_max[0],-1.0),method='SLSQP',constraints=[{'type':'ineq','fun':lambda X:tech_0(X_max)-X[1]},
{'type':'ineq','fun':lambda X:tech_1(X_max)-X[6]},
{'type':'ineq','fun':lambda X:tech_2(X_max)-X[7]},
{'type':'ineq','fun':lambda X:X_max[1]+sum(X_max[6:8])-X[1]-X[6]-X[7]},
{'type':'eq','fun':lambda X:[X[i] for i in range(2,6)]},
{'type':'eq','fun':lambda X:[X[i] for i in range(8,16)]},
{'type':'ineq','fun':lambda X:[X[i] for i in range(6,8)]},
{'type':'ineq','fun':lambda X:X[1]},
{'type':'eq','fun':lambda X:X[0]}])
EU_2_X=EU_2_max['x']
print EU_2_X
tot=np.append(EU_1_X+EU_2_X,X_max[10:])
tot[4:6]=X_max[4:6]
tot[8:10]=X_max[8:10]
print "RESULTS"
print 'SOCIAL OPTIMUM : \n','Total Welfare : ',-Welfmax['fun'],'\n','Total consumption of vaccination : ',sum(X_max[0:2]),'(%s)'%(X_max[0:2]),'\n','Probability of getting sick :',exp((-(X_max[0]+X_max[1])**4)),'\n','Consumer 1 Consumption :',np.append(X_max[0],X_max[2:6]),'\n','Consumer 1 Utility : ',EU_1(X_max,X_max[1]),'\n','Consumer 2 Consumption : ',np.append(X_max[1],X_max[6:10]),'\n','Consumer 2 Utility : ',EU_2(X_max,X_max[0]),'\n \n'
print 'INDIVIDUAL MAX : \n','Total Welfare : ',EWelf(tot),'\n','Total consumption of vaccination',EU_1_max['x'][0]+EU_2_max['x'][1],'(%s)'%(np.append(EU_1_X[0],EU_2_X[1])),'\n','Probability of getting sick :',exp((-(EU_1_X[0]+EU_2_X[1])**4)),'\n','Consumer 1 Consumption :',np.append(EU_1_X[0],EU_1_X[2:4]),'\n','Consumer 1 Utility : ',EU_1(tot,tot[1]),'\n','Consumer 2 Consumption : ',np.append(EU_2_X[1],EU_2_X[6:8]),'\n','Consumer 2 Utility : ',EU_2(tot,tot[0])"""
|
my_set = set()
my_set.add(1)
my_set.add(2)
print(my_set)
mylist = [1,1,1,1,2,2,2,2,3,3,3,3]
my_set= set(mylist)
print(my_set)
type(False)
|
def try_to_change_string_reference(the_string):
print('got', the_string)
the_string = 'In a kingdom by the sea'
print('set to', the_string)
outer_string = 'It was many and many a year ago'
print('before, outer_string =', outer_string)
try_to_change_string_reference(outer_string)
print('after, outer_string =', outer_string) |
import sys
if len(sys.argv) < 2:
msg = '\n'
msg += "Usage 1: %s $INPUT_ROOT_FILE(s)\n" % sys.argv[0]
msg += '\n'
sys.stderr.write(msg)
sys.exit(1)
from larlite import larlite as fmwk
# Create ana_processor instance
my_proc = fmwk.ana_processor()
# Set input root file
for x in xrange(len(sys.argv)-1):
my_proc.add_input_file(sys.argv[x+1])
# Specify IO mode
my_proc.set_io_mode(fmwk.storage_manager.kREAD)
my_proc.set_ana_output_file('BT_backgrounds_uncertainty.root')
name = 'twoshower'
useTwoShower = False
useOneShower = False
if name =='twoshower':
useTwoShower = True
elif name =='singleshower':
useOneShower = True
ana = fmwk.BackgroundTruthMatchBT()
ana.UseMCSample(True)
ana.GetPi0Info(useTwoShower)
ana.GetSingleShowerInfo(useOneShower)
ana.SetBeamWindow(3.2,4.8)
ana.SetGainRecombLifetime(196.97874008458268,0.572,1e20)
ana.GetUncertaintyInfo(True)
ana.SetEWProducer("genieeventweight")
nominal = 69
nominal_xsec = 2.56e-38
if name == 'singleshower':
nominal = 241.8212
nominal_xsec = 2.64e-38
ana.SetNominalXsec(nominal_xsec)
ana.SetNominal(nominal)
my_proc.add_process(ana)
print
print "Finished configuring ana_processor. Start event loop!"
print
# Let's run it.
my_proc.run()
# done!
print
print "Finished running ana_processor event loop!"
print
sys.exit(0)
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name='home'),
path('<int:board_id>',views.board_topics, name='board_topics'),
path('<int:board_id>/new', views.new_topic, name='new_topic'),
]
|
"""
Problem 461 - Hamming Distance
Given two integers x and y, calculate the Hamming distance
"""
class Solution:
def hammingDistance(self, x: int, y: int) -> int:
new_int = x ^ y
hd = 0
while new_int != 0:
new_int = new_int & (new_int - 1)
hd += 1
return hd
if __name__ == "__main__":
print(Solution().hammingDistance(1, 4)) # Should return 2
|
from pymongo import MongoClient
from tkinter import *
import random
from PIL import Image, ImageTk
import base64
import io
import time
from bson.objectid import ObjectId
import threading
from DataBase import *
client = MongoClient('mongodb+srv://admin:admin@cluster0.o6hxp.mongodb.net/retryWrites=true&w=majority')
db = client.shop
people = db.people
def get_list_client():
list_person = people.find({"InShop" : 1})
ps = []
for p in list_person:
ps.append(p)
return ps
def person_to_text(p):
s = ""
list = ["Image"]
for k, v in p.items():
if k not in list:
s = s + str(k) + ":" + str(v) + '\n'
return s
def show_client(seconds):
root = Tk()
root.geometry("+20+20")
ps = get_list_client()
r = 0
c = 0
max_c = 5
for i in range(len(ps)):
image = ImageTk.PhotoImage(string_to_image(ps[i]['Image']).resize((200, 120), Image.ANTIALIAS))
label_img = Label(root, image = image)
label_img.photo = image
label_img.grid(row = r, column = c)
label_text = Label(root, text = person_to_text(ps[i])).grid(row = r+1, column = c)
if ps[i]['voleur'] != 0:
label_text.config(bg = "red")
if c == max_c - 1:
c = 0
r = r + 2
else:
c = c + 1
root.after(seconds*1000, lambda: root.destroy())
root.mainloop()
def repeat():
start_time = time.time()
seconds = 10
show_client(seconds)
while True:
current_time = time.time()
elapsed_time = current_time - start_time
if elapsed_time > seconds:
show_client(seconds)
repeat() |
from FPI_old import FPI
import pandas as pd
data = pd.read_csv("test/data/trainData.csv", sep=";")
labels = pd.read_csv("test/data/trainDataLabels.csv", sep=";")
fpi = FPI(data, 0.3)
fpiFit = FPI.fit(fpi)
fpiPredict = FPI.predict(data, fpiFit, 50)
results = fpiPredict.join(labels)
print(accuracy_score(results["Class"], results["Predicted"]))
print(results['Predicted'].value_counts())
print(results['Class'].value_counts())
cm = confusion_matrix(results['Class'], results['Predicted'])
print(cm)
exp_series = pd.Series(results['Class'])
pred_series = pd.Series(results['Predicted'])
cross = pd.crosstab(exp_series, pred_series, rownames=['Actual'], colnames=['Predicted'],margins=True)
print(cross) |
#!/usr/bin/python3
# -*- coding:utf-8 -*-
import unittest
import random
import heapq
from MinHeap import MinHeap
class TestMinHeap(unittest.TestCase):
def test_min1(self):
heap1 = MinHeap()
data = [5, 1, 2, 3, 4]
for v in data:
heap1.push(v)
self.assertEqual(1, heap1.min())
def test_min(self):
heap = MinHeap()
keys = random.sample(range(10000), 5000)
min_key = min(keys)
for key in keys:
heap.push(key)
self.assertEqual(min_key, heap.min())
heapq.heapify(keys)
while len(keys) > 0:
self.assertFalse(heap.is_empty())
self.assertEqual(heapq.heappop(keys), heap.min())
heap.pop()
self.assertTrue(heap.is_empty())
|
from flask import Flask, render_template
import os
app = Flask(__name__)
@app.route('/')
def index():
img_file = os.path.join(app.root_path, 'static/img/cat.jpg')
mtime =int(os.stat(img_file).st_mtime)
return render_template('05.index.htm', mtime = mtime) # mtime를 넣어주면 이미지경로가 바뀔때마다 리플레쉬됨 /html에도 동일적용
if __name__ == '__main__':
app.run(debug=True) |
"""
#serialize
pickle.dump(vectorizer, open(path+"vectorizer.p","wb"))
pickle.dump(data, open(path+"data.p","wb"))
pickle.dump(experiences, open(path+"experience_index.p","wb"))
pickle.dump(substance_index, open(path+"substance_index.p","wb"))
pickle.dump(tag_index, open(path+"tag_index.p","wb"))
pickle.dump(substance_count, open(path+"substance_count.p","wb"))
pickle.dump(tag_count, open(path+"tag_count.p","wb"))
#unserialize
if True:
import os, re, pickle, bs4, nltk, numpy as np
from sklearn.feature_extraction.text import CountVectorizer
path = 'C:/Users/Glenn/Documents/GitHub/mining-erowid/'
vectorizer = pickle.load(open(path+"vectorizer.p","rb"))
data = pickle.load(open(path+"data.p","rb"))
experiences = pickle.load(open(path+"experience_index.p","rb"))
substance_index = pickle.load(open(path+"substance_index.p","rb"))
tag_index = pickle.load(open(path+"tag_index.p","rb"))
substance_count = pickle.load(open(path+"substance_count.p","rb"))
tag_count = pickle.load(open(path+"tag_count.p","rb"))
vocab = np.array(vectorizer.get_feature_names())
with open(path+"files/stopwords.txt") as file:
substops = set([line.replace("\n","") for line in file.readlines()])
"""
#Let's view this as a fresh start.
#The first thing is to import the text into a generally usable format.
####Import everything we might need
import os, re, pickle, bs4, nltk, numpy as np
from sklearn.feature_extraction.text import CountVectorizer
###Set path
#path = ""
path = 'C:/Users/Glenn/Documents/GitHub/mining-erowid/'
##Prepare data-cleaning helper functions
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def is_okay_word(s):
import re
if len(s)==1:
return False
elif is_number(s) and float(s)<1900:
return False
elif re.match('\d+[mM]?[gGlLxX]',s):
return False
elif re.match('\d+[oO][zZ]',s):
return False
else:
return True
###Import the experience vault into data structures
substance_index, tag_index, experiences = [], [], os.listdir(path+'xml')
def parse_vault(path):
###Prepare data cleaning tools
tokenizer = nltk.tokenize.RegexpTokenizer(r'\w+')
tagger = nltk.tag.UnigramTagger(nltk.corpus.brown.tagged_sents())
lemmatizer = nltk.WordNetLemmatizer()
from nltk.corpus import wordnet
german = nltk.corpus.stopwords.words('german')
for n, experience in enumerate(experiences):
#if n>10:
#break
with open(path+"xml/"+experience) as f:
soup = bs4.BeautifulSoup(f)
words = []
tokens = tokenizer.tokenize(soup.bodytext.contents[0])
pos = tagger.tag(tokens)
for token in pos:
if token[1] == 'NN':
pos = wordnet.NOUN
elif token[1] == 'JJ':
pos = wordnet.ADJ
elif token[1] == 'VB':
pos = wordnet.VERB
elif token[1] == 'RV':
pos = wordnet.ADV
else:
pos = wordnet.NOUN
lemma = lemmatizer.lemmatize(token[0], pos)
if is_okay_word(lemma):
words.append(lemma)
substances = [unicode(substance.contents[0]) for substance in soup.find_all("substance")]
tags = [unicode(tag.contents[0]) for tag in soup.find_all("tag")]
substance_index.append(substances)
tag_index.append(tags)
yield " ".join(words)
if n%1000==0:
print("Finished " + str(n) + " files out of " + str(len(experiences)))
##create sklearn data structures
vectorizer = CountVectorizer(stop_words=nltk.corpus.stopwords.words('english'))
#vectorizer = CountVectorizer(stop_words=nltk.corpus.stopwords.words('english'), min_df=25)
data = vectorizer.fit_transform(parse_vault(path))
vocab = np.array(vectorizer.get_feature_names())
from collections import Counter
tag_count, substance_count = Counter(), Counter()
for row in tag_index:
for tag in row:
tag_count[tag]+=1
for row in substance_index:
for substance in row:
substance_count[substance]+=1
###Prepare custom stopwords
with open(path+"files/stopwords.txt") as file:
substops = set([line.replace("\n","") for line in file.readlines()])
#with open(path+"files/stopwords.txt","w") as file:
# for word in sorted(substops):
# file.write(str(word))
# file.write("\n")
def word_chisq(key, n=10, stops=True):
from sklearn.feature_selection import chi2
if key in tag_count.keys():
labels = [(key in row) for row in tag_index]
elif key in substance_count.keys():
labels = [(key in row) for row in substance_index]
chisq, p = chi2(data, labels)
ranking = np.argsort(chisq)[::-1]
values = []
for rank in ranking:
if key in substance_count.keys() and vocab[rank] in substops and stops==True:
continue
else:
values.append((chisq[rank],vocab[rank],p[rank]))
return values[0:n]
def bool_chisq(key, n=10, stops=True):
from sklearn.feature_selection import chi2
if key in tag_count.keys():
labels = [(key in row) for row in tag_index]
elif key in substance_count.keys():
labels = [(key in row) for row in substance_index]
chisq, p = chi2(data>0, labels)
ranking = np.argsort(chisq)[::-1]
values = []
for rank in ranking:
if key in substance_count.keys() and vocab[rank] in substops and stops==True:
continue
else:
values.append((chisq[rank],vocab[rank],p[rank]))
return values[0:n]
y,n,x = "y","n","x"
def create_file(key, binary=False, filter=True,nwords=50):
from sklearn.feature_selection import chi2
if key in tag_count.keys():
labels = [(key in row) for row in tag_index]
elif key in substance_count.keys():
labels = [(key in row) for row in substance_index]
if binary==False:
chisq, p = chi2(data, labels)
else:
chisq, p = chi2(data>0, labels)
ranking = np.argsort(chisq)[::-1]
values = []
for rank in ranking:
values.append((chisq[rank],vocab[rank],p[rank]))
filename = key
if binary==True:
filename+="_bin"
if filter==False:
filename+="_nof"
filename+=".txt"
print "Building " + filename + ":"
with open(path+"output/" + filename,"w") as file:
j = 0
for value in values:
if j!=None and j>nwords:
return
if filter==True:
response = input("Use " + str(value) + "? (" + str(j) + " words so far) (y/n/x)")
else:
response = "y"
if response == "y":
if binary==True:
r = int(value[0]/10)
else:
r = int(value[0]/100)
for i in range(r):
file.write(value[1])
file.write(" ")
print "Wrote " + str(value[1]) + " " + str(r) + " times."
j+=1
elif response == "x":
print "Finished " + filename + "."
return
else:
continue
#Subset the data by tag and/or substance
def dataslice(lst):
if lst==None:
return data, tag_index, substance_index, experiences
if type(lst)==str:
lst = [lst]
indices = []
slice_tags = []
slice_substances = []
slice_exps = []
for n,exp in enumerate(experiences):
for item in lst:
if item in tag_index[n] or item in substance_index[n]:
indices.append(n)
slice_tags.append(tag_index[n])
slice_substances.append(substance_index[n])
slice_exps.append(experiences[n])
break
return data[indices], slice_tags, slice_substances, slice_exps
#This is sort of a bad setup; instead I should pass ar eference to the data
def slice_chisq(d, tindex, sindex, key, n=10, stops=True):
from sklearn.feature_selection import chi2
if key in tag_count.keys():
labels = [(key in row) for row in tindex]
elif key in substance_count.keys():
labels = [(key in row) for row in sindex]
chisq, p = chi2(d, labels)
ranking = np.argsort(chisq)[::-1]
values = []
for rank in ranking:
if key in substance_count.keys() and vocab[rank] in substops and stops==True:
continue
elif not np.isnan(chisq[rank]):
values.append((chisq[rank],vocab[rank],p[rank]))
return values[0:n]
def word_index(word):
return np.where(vocab==word)[0][0]
def examples(word, lst=None, sort=True, n=5):
sub, foo, bar, subexps = dataslice(lst)
wdata = sub[:,vocab==word].toarray()
ranking = np.argsort(wdata[:,0])[::-1]
exps = []
for rank in ranking:
if wdata[rank,0] > 0:
exps.append(subexps[rank])
if sort==False:
exps = sorted(exps, key=lambda *args: random.random())
return exps[0:n]
def readfile(exp):
with open(path+"xml/"+exp) as f:
print f.read()
def read_examples(word, lst=None, sort=True, n=5):
files = examples(word, lst, sort, n)
print "Word occurs in " + str(len(files)) + " files."
for name in files:
dummy = raw_input("*************Next report?**************")
with open(path+"xml/"+name) as f:
txt = f.read()
txt = txt.replace("*","")
txt = txt.replace(word,"***"+word+"***")
txt = txt.replace(word.capitalize(),"***"+word.capitalize()+"***")
print txt |
class MyException(Exception):
'''这是我是某某异常'''
def __init__(self,num,atleast):
super().__init__()
self.num = num
self.atleast = atleast |
from organization.models import Organization
def user_context(request):
request.user.ownedorgs = Organization.objects.filter(
administrator=request.user.id)
return dict()
|
# #!
# # 畳み込みニューラルネットワーク
# # (https://www.tensorflow.org/tutorials/images/cnn)
from tensorflow.keras import datasets, layers, models
# gpu = tf.config.experimental.list_physical_devices('GPU')[0]
# tf.config.experimental.set_memory_growth(gpu, True)
# ## MNISTデータセットのダウンロードと準備
(train_images, train_labels), (test_images, test_labels) = datasets.mnist.load_data()
train_images = train_images.reshape((60000, 28, 28, 1))
test_images = test_images.reshape((10000, 28, 28, 1))
# !ピクセルの値を 0~1 の間に正規化
train_images, test_images = train_images / 255.0, test_images / 255.0
# ## 畳み込みの基礎部分の作成
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation="relu", input_shape=(28, 28, 1)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation="relu"))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation="relu"))
# ## 上にDenseレイヤーを追加
model.add(layers.Flatten())
model.add(layers.Dense(64, activation="relu"))
model.add(layers.Dense(10, activation="softmax"))
model.summary()
# ## モデルのコンパイルと学習
model.compile(
optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"]
)
model.fit(train_images, train_labels, epochs=5)
# ## モデルの評価
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
test_acc
|
import numpy as np
import theano
import theano.tensor as T
import lasagne
def make_network(settings):
last_layer = lasagne.layers.InputLayer(
shape=(None, settings['input_length'], settings['phi_length']),
)
for num_units in settings['layers']:
last_layer = lasagne.layers.DenseLayer(
last_layer,
num_units=num_units,
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.GlorotNormal(gain='relu'),
b=lasagne.init.Constant(0.1)
)
l_out = lasagne.layers.DenseLayer(
last_layer,
num_units=settings['num_actions'],
W=lasagne.init.HeUniform(),
b=lasagne.init.Constant(0.1)
)
return l_out
class Model(object):
def __init__(self, settings):
self.settings = settings
observation = T.tensor3('observation')
next_observation = T.tensor3('next_observation')
rewards = T.col('rewards')
actions = T.icol('actions')
terminals = T.icol('terminals')
self.observation = theano.shared(np.zeros(
(settings['batch_size'],
settings['phi_length'],
settings['input_length'],
),
dtype=theano.config.floatX))
self.next_observation = theano.shared(np.zeros(
(settings['batch_size'],
settings['phi_length'],
settings['input_length'],
),
dtype=theano.config.floatX))
self.rewards = theano.shared(np.zeros(
(settings['batch_size'], 1), dtype=theano.config.floatX),
broadcastable=(False, True))
self.actions = theano.shared(np.zeros(
(settings['batch_size'], 1), dtype='int32'),
broadcastable=(False, True))
self.terminals = theano.shared(np.zeros(
(settings['batch_size'], 1), dtype='int32'),
broadcastable=(False, True))
self.q_network = make_network(settings)
self.target_network = make_network(settings)
scores = lasagne.layers.get_output(self.q_network, observation)
target_scores = theano.gradient.disconnected_grad(
lasagne.layers.get_output(self.target_network, next_observation)
)
future_rewards = settings['discount'] * T.max(target_scores, axis=1, keepdims=True)
future_rewards = future_rewards * (T.ones_like(terminals) - terminals)
target = rewards + future_rewards
diff = target - scores[T.arange(settings['batch_size']), actions.reshape((-1,))].reshape((-1, 1))
loss = T.mean(diff ** 2)
loss += lasagne.regularization.regularize_network_params(self.q_network, lasagne.regularization.l2) * settings['l2_regularisation']
params = lasagne.layers.helper.get_all_params(self.q_network)
#updates = lasagne.updates.rmsprop(loss, params, settings['rms_learning_rate'],
# settings['rms_decay'], settings['rms_epsilon'])
updates = lasagne.updates.sgd(loss, params, settings['sgd_learning_rate'])
self._train = theano.function([], [loss, scores], updates=updates,
givens = {observation: self.observation,
next_observation: self.next_observation,
rewards: self.rewards,
actions: self.actions,
terminals: self.terminals
})
self._predict = theano.function([], scores, givens={observation: self.observation})
self.counter = 0
def update_target_network(self):
lasagne.layers.helper.set_all_param_values(
self.target_network,
lasagne.layers.helper.get_all_param_values(
self.q_network
)
)
def predict(self, observation):
observations = np.zeros(
(self.settings['batch_size'], self.settings['input_length'], self.settings['phi_length']),
dtype=theano.config.floatX)
observations[0, ...] = observation
self.observation.set_value(observations)
return self._predict()[0]
def act(self, observation, epsilon):
if np.random.rand() < epsilon:
return np.random.randint(0, self.settings['num_actions'])
predictions = self.predict(observation)
return np.argmax(predictions)
def train(self, observation, next_observation, actions, rewards, terminals):
self.observation.set_value(observation)
self.next_observation.set_value(next_observation)
self.actions.set_value(actions)
self.rewards.set_value(rewards)
self.terminals.set_value(terminals)
if self.counter % self.settings['target_update_frequency'] == 0:
self.update_target_network()
self.counter += 1
loss, _ = self._train()
return np.sqrt(loss)
|
import machine
import time
from machine import Timer
from machine import Pin
# 0 - close
# 1 - open
# 2 - closing
# 3 - opened
# 4 - error
PWM_CLOSE = 40
PWM_OPEN = 115
ADC_ALARM = 700
TIME_END = 500
class Valve:
def __init__(self, p_close, p_open, p_button, p_control, p_out):
self.pwm = None
self.tmr = None
self.p_close = machine.Pin(p_close, machine.Pin.IN, machine.Pin.PULL_UP)
self.p_open = machine.Pin(p_open, machine.Pin.IN, machine.Pin.PULL_UP)
self.p_button = machine.Pin(p_button, machine.Pin.IN, machine.Pin.PULL_UP)
self.p_control = machine.Pin(p_control, machine.Pin.OUT)
self.p_out = p_out
self.p_adc = machine.ADC(0)
self.status = 0
if self.p_close.value():
self.status = 0
if self.p_open.value():
self.status = 1
if self.p_open.value() ^ self.p_close.value():
self.status = 4
self.p_button.irq(trigger=Pin.IRQ_FALLING, handler=self._button)
self.p_close.irq(trigger=Pin.IRQ_FALLING, handler=self._endswitch())
self.p_open.irq(trigger=Pin.IRQ_FALLING, handler=self._endswitch())
def close(self):
if self.status == 1:
self.pwm = machine.PWM(machine.Pin(self.p_out), freq=50)
self.pwm.duty(PWM_CLOSE)
time.sleep_ms(100)
self.p_control.on()
self.status = 2
self.tmr = Timer(-1)
self.tmr.init(period=1500, mode=Timer.ONE_SHOT, callback=self._timer_valve_control())
def open(self):
if self.status == 0:
self.pwm = machine.PWM(machine.Pin(self.p_out), freq=50)
self.pwm.duty(PWM_OPEN)
time.sleep_ms(100)
self.p_control.on()
self.status = 3
self.tmr = Timer(-1)
self.tmr.init(period=1500, mode=Timer.ONE_SHOT, callback=self._timer_valve_control())
def get_status(self):
if self.status == 2 or self.status == 3:
if self.p_adc.read() > ADC_ALARM:
self._reset_out()
self.status = 4
if self.status == 2 and not self.p_close.value() and self.p_open.value():
self.status = 0
if self.status == 3 and self.p_close.value() and not self.p_open.value():
self.status = 1
return self.p_adc.read(), self.status
def reset_error(self):
self.status = 1
self.close()
def _timer_valve_control(self):
if (self.status == 2 or self.status == 3) and (self.p_open.value() ^ self.p_close.value()):
self._reset_out()
self.status = 4
def _reset_out(self):
self.p_control.off()
if self.pwm is not None:
self.pwm.deinit()
if self.tmr is not None:
self.tmr.deinit()
def _button(self):
if self.status == 4:
self.reset_error()
if self.status == 0:
self.open()
if self.status == 1:
self.close()
time.sleep_us(100)
def _endswitch(self):
time.sleep_ms(TIME_END)
self._reset_out()
if self.status == 2 and not self.p_close.value():
self.status = 0
return
if self.status == 3 and not self.p_open.value():
self.status = 1
return
self.status = 4
|
# -*- coding: utf-8 -*-
import os
class CapsNetParam(object):
"""A Container for the hyperparamters of Efficient-CapsNet.
Attributes:
"""
__slots__ = [
"input_width",
"input_height",
"input_channel",
"conv1_filter",
"conv1_kernel",
"conv1_stride",
"conv2_filter",
"conv2_kernel",
"conv2_stride",
"conv3_filter",
"conv3_kernel",
"conv3_stride",
"conv4_filter",
"conv4_kernel",
"conv4_stride",
"dconv_filter",
"dconv_kernel",
"dconv_stride",
"num_primary_caps",
"dim_primary_caps",
"num_digit_caps",
"dim_digit_caps",
]
def __init__(self,
input_width: int = 28,
input_height: int = 28,
input_channel: int = 1,
conv1_filter: int = 32,
conv1_kernel: int = 5,
conv1_stride: int = 1,
conv2_filter: int = 64,
conv2_kernel: int = 3,
conv2_stride: int = 1,
conv3_filter: int = 64,
conv3_kernel: int = 3,
conv3_stride: int = 1,
conv4_filter: int = 128,
conv4_kernel: int = 3,
conv4_stride: int = 2,
dconv_kernel: int = 9,
dconv_stride: int = 1,
num_primary_caps: int = 16,
dim_primary_caps: int = 8,
num_digit_caps: int = 10,
dim_digit_caps: int = 16,
*args,
**kwargs) -> None:
# Input Specification
self.input_width = input_width
self.input_height = input_height
self.input_channel = input_channel
# FeatureMap Layer
self.conv1_filter = conv1_filter
self.conv1_kernel = conv1_kernel
self.conv1_stride = conv1_stride
self.conv2_filter = conv2_filter
self.conv2_kernel = conv2_kernel
self.conv2_stride = conv2_stride
self.conv3_filter = conv3_filter
self.conv3_kernel = conv3_kernel
self.conv3_stride = conv3_stride
self.conv4_filter = conv4_filter
self.conv4_kernel = conv4_kernel
self.conv4_stride = conv4_stride
# PrimaryCap Layer
self.dconv_filter = num_primary_caps * dim_primary_caps
self.dconv_kernel = dconv_kernel
self.dconv_stride = dconv_stride
self.num_primary_caps = num_primary_caps
self.dim_primary_caps = dim_primary_caps
# DigitCap Layer
self.num_digit_caps = num_digit_caps
self.dim_digit_caps = dim_digit_caps
def get_config(self) -> dict:
return {
"input_width": self.input_width,
"input_height": self.input_height,
"input_channel": self.input_channel,
"conv1_filter": self.conv1_filter,
"conv1_kernel": self.conv1_kernel,
"conv1_stride": self.conv1_stride,
"conv2_filter": self.conv2_filter,
"conv2_kernel": self.conv2_kernel,
"conv2_stride": self.conv2_stride,
"conv3_filter": self.conv3_filter,
"conv3_kernel": self.conv3_kernel,
"conv3_stride": self.conv3_stride,
"conv4_filter": self.conv4_filter,
"conv4_kernel": self.conv4_kernel,
"conv4_stride": self.conv4_stride,
"dconv_filter": self.dconv_filter,
"dconv_kernel": self.dconv_kernel,
"dconv_stride": self.dconv_stride,
"num_primary_caps": self.num_primary_caps,
"dim_primary_caps": self.dim_primary_caps,
"num_digit_caps": self.num_digit_caps,
"dim_digit_caps": self.dim_digit_caps
}
def save_config(self, path: str) -> None:
"""Saves configuration.
Collects attributes as pair of name and value and saves them to a UTF-8
encoded file.
Args:
path (str): A filepath to write configuration. If any file already
exists, its contents will be overwritten.
Raises:
TypeError: If `path` is not string.
ValueError: If `path` is empty.
"""
if not isinstance(path, str):
raise TypeError()
elif len(path) == 0:
raise ValueError()
else:
with open(path, 'w', encoding='utf8') as f:
for k, v in self.get_config().items():
f.writelines(f"{k}={v}\n")
def load_config(path: str) -> CapsNetParam:
"""Loads configuration.
Reads file with the given path and makes `CapsNetParam` instance by parsing
the contents of the file.
Args:
path (str): A filepath to read configuration.
Returns:
A `CapsNetParam` instance.
Raises:
TypeError: If `path` is not string.
ValueError: If `path` is empty.
FileNotFoundError: If file of `path` not exists.
"""
if not isinstance(path, str):
raise TypeError()
elif len(path) == 0:
raise ValueError()
elif not os.path.isfile(path):
raise FileNotFoundError()
with open(path, 'r', encoding="utf8") as f:
config = []
for l in f.readlines():
k, v = l.strip().split('=')
config.append((k, int(v)))
return CapsNetParam(**dict(config))
def make_param(image_width: int = 28,
image_height: int = 28,
image_channel: int = 1,
conv1_filter: int = 32,
conv1_kernel: int = 5,
conv1_stride: int = 1,
conv2_filter: int = 64,
conv2_kernel: int = 3,
conv2_stride: int = 1,
conv3_filter: int = 64,
conv3_kernel: int = 3,
conv3_stride: int = 1,
conv4_filter: int = 128,
conv4_kernel: int = 3,
conv4_stride: int = 2,
dconv_kernel: int = 9,
dconv_stride: int = 1,
num_primary_caps: int = 16,
dim_primary_caps: int = 8,
num_digit_caps: int = 10,
dim_digit_caps: int = 16) -> CapsNetParam:
return CapsNetParam(
image_width,
image_height,
image_channel,
conv1_filter,
conv1_kernel,
conv1_stride,
conv2_filter,
conv2_kernel,
conv2_stride,
conv3_filter,
conv3_kernel,
conv3_stride,
conv4_filter,
conv4_kernel,
conv4_stride,
dconv_kernel,
dconv_stride,
num_primary_caps,
dim_primary_caps,
num_digit_caps,
dim_digit_caps,
)
|
warned={}
def warn_reset():
global warned
warned={}
def warn(msg):
if msg not in warned:
print("Warning:",msg)
warned[msg]=1
|
"""
This file contains code that was run once or a few times,
mainly to get an intuitive sense of the scope of the
problem or to create matrices that only need to be
created once.
"""
import numpy as np
OpeningMovesSaveFileName = "openingMoves.npz"
"""
How many possible hands are there of length cardsLeft
where the smallest card has value depth?
84,166,160 for cardsLeft=18
In other words, there are 84M possible hands to be dealt
at the start. This is not the number of possible
opening game states as that has the additional
factor of the cards in the opponents hand
"""
countHandsCache = {}
def countHands(cardsLeft, depth=0):
if cardsLeft == 0:
return 1
if depth == 15 and cardsLeft != 0:
return 0
if (cardsLeft, depth) in countHandsCache:
return countHandsCache[(cardsLeft, depth)]
if depth == 13 or depth == 14:
val = countHands(cardsLeft-1,depth+1) + countHands(cardsLeft,depth+1)
else:
val = sum([countHands(cardsLeft-i, depth+1) for i in range(min(5, cardsLeft+1))])
countHandsCache[(cardsLeft, depth)] = val
return val
"""
Extension of countHands: How many possible opening states are there?
The number of 18 card hands is the space of possible opening observations
But, the state space includes the cards the opponent has as well
So, how many possible ways are there to distribute 18 cards to each of two players
given that the suit doesn't matter?
RE: depth is the value of the smallest card allowed.
0 -> 3
...
7 -> 10
8 -> J
...
11 -> A
12 -> 2
13 -> JB (Joker black)
14 -> JR (Joker red)
Results: 151,632,049,354,500 possible opening states
"""
countStatesCache = {}
def countStates(cardsLeftA, cardsLeftB, depth=0):
# If there are no cards left to distribute, there's only one way to do it!
if cardsLeftA == 0:
if cardsLeftB == 0:
return 1
else:
return countHands(cardsLeftB, depth)
if cardsLeftB == 0:
return countHands(cardsLeftA, depth)
# If the depth has maxed out (i.e. the min card allowed to distribute > Joker red)
# there's no way to do it. It's invalid
if depth == 15 and (cardsLeftA != 0 or cardsLeftB != 0):
return 0
if (cardsLeftA, cardsLeftB, depth) in countStatesCache:
return countStatesCache[(cardsLeftA, cardsLeftB, depth)]
if (cardsLeftB, cardsLeftA, depth) in countStatesCache:
return countStatesCache[(cardsLeftB, cardsLeftA, depth)]
num = 0
if depth < 13:
for i in range(min(5, cardsLeftA+1)):
# give i cards of value "depth" to A
for j in range(min(5-i, cardsLeftB+1)):
# give j cards of value "depth" to B
num += countStates(cardsLeftA-i, cardsLeftB-j, depth+1)
else:
num += countStates(cardsLeftA, cardsLeftB, depth+1) + \
countStates(cardsLeftA-1, cardsLeftB, depth+1) + \
countStates(cardsLeftA, cardsLeftB-1, depth+1)
countStatesCache[(cardsLeftA, cardsLeftB, depth)] = num
return num
"""
How many possible chains of length cardsLeft are there?
9021 for cardsLeft=18
Given this surprising result, I thought that the best way
find all possible opening moves is to literally make
an array that contains all of them and see which rows
of that matrix a player's hand is greater than.
"""
def countChainsHelper(chain, cardsLeft):
if cardsLeft==0:
return (14-len(chain))*(len(chain)>1)
if cardsLeft<0:
return 0
return sum([countChainsHelper(chain+[i], cardsLeft-i) for i in range(2,5)])
def countChains(cardsLeft):
s = 0
for c in range(2, cardsLeft+1):
s += countChainsHelper([2], c-2)
s += countChainsHelper([3], c-3)
s += countChainsHelper([4], c-4)
return s
# 2D array of all possible opening moves
# should be of size (9075, 15)
def createOpeningMatrix(maxChainLength=9):
# all possible singles
OpM = np.eye(15).astype(np.int64)
# all possible doubles, triples, bombs
# 13 and 14, corresponding to the red and black jokers
# cannot be doubles or such because there is only 1 copy
doubles = 2*OpM[:13]
triples = 3*OpM[:13]
bombs = 4*OpM[:13]
OpM = np.concatenate((OpM, doubles, triples, bombs), axis=0)
# This is definitely NOT the most efficient way to find
# all possible chains, but I only have to run this once
# and save the result, so it should be fine
cc = [doubles, triples, bombs]
for chainLength in range(2,maxChainLength+1):
for i in range(3**chainLength):
cc_ind = np.unravel_index(i, [3]*chainLength)
# if this chain needs more than 18 cards, it's not possible
if(np.sum(cc_ind) + 2*len(cc_ind) > 18):
# print(cc_ind)
continue
base_chain = np.zeros((1,15)).astype(np.int64)
for j in range(chainLength):
base_chain += cc[cc_ind[j]][j]
OpM = np.concatenate([OpM] +
[np.roll(base_chain, j) for j in range(14-chainLength)], axis=0)
return OpM
def saveOpeningMatrix(OpM, fname=OpeningMovesSaveFileName):
f = open(fname, 'wb+')
np.save(f, OpM)
|
"""
@Time : 2021/8/6 下午12:37
@Author : lan
@Mail : lanzy.nice@gmail.com
@Desc : 将控制台输出到文本框内
"""
import sys
import time
from PyQt5.QtCore import QObject, pyqtSignal, QEventLoop, QTimer
from PyQt5.QtWidgets import QMainWindow, QPushButton, QApplication, QTextEdit
from PyQt5.QtGui import QTextCursor
class Stream(QObject):
"""Redirects console output to text widget."""
newText = pyqtSignal(str)
def write(self, text):
self.newText.emit(str(text))
class GenMast(QMainWindow):
"""Main application window."""
def __init__(self):
super().__init__()
self.initUI()
# Custom output stream.
sys.stdout = Stream(newText=self.onUpdateText)
def onUpdateText(self, text):
"""Write console output to text widget."""
cursor = self.process.textCursor()
cursor.movePosition(QTextCursor.End)
cursor.insertText(text)
self.process.setTextCursor(cursor)
self.process.ensureCursorVisible()
def closeEvent(self, event):
"""Shuts down application on close."""
# Return stdout to defaults.
sys.stdout = sys.__stdout__
super().closeEvent(event)
def initUI(self):
"""Creates UI window on launch."""
# Button for generating the master list.
btnGenMast = QPushButton('Run', self)
btnGenMast.move(450, 50)
btnGenMast.resize(100, 200)
btnGenMast.clicked.connect(self.genMastClicked)
# Create the text output widget.
self.process = QTextEdit(self, readOnly=True)
self.process.ensureCursorVisible()
self.process.setLineWrapColumnOrWidth(500)
self.process.setLineWrapMode(QTextEdit.FixedPixelWidth)
self.process.setFixedWidth(400)
self.process.setFixedHeight(200)
self.process.move(30, 50)
# Set window size and title, then show the window.
self.setGeometry(300, 300, 600, 300)
self.setWindowTitle('Generate Master')
self.show()
def printhello(self):
print('hello')
def genMastClicked(self):
"""Runs the main function."""
print('Running...')
self.printhello()
loop = QEventLoop()
QTimer.singleShot(2000, loop.quit)
loop.exec_()
print('Done.')
if __name__ == '__main__':
# Run the application.
app = QApplication(sys.argv)
app.aboutToQuit.connect(app.deleteLater)
gui = GenMast()
sys.exit(app.exec_()) |
# -*- coding: utf-8 -*-
'''
Salt module to manage unix mounts and the fstab file
'''
# Import python libs
import os
import re
import logging
# Import salt libs
import salt.utils
from salt.modules import state_std
from salt._compat import string_types
from salt.utils import which as _which
from salt.exceptions import CommandNotFoundError, CommandExecutionError
# Set up logger
log = logging.getLogger(__name__)
def _list_mounts():
ret = {}
for line in __salt__['cmd.run_stdout']('mount -l').split('\n'):
comps = re.sub(r"\s+", " ", line).split()
ret[comps[2]] = comps[0]
return ret
def _active_mountinfo(ret):
_list = _list_mounts()
filename = '/proc/self/mountinfo'
if not os.access(filename, os.R_OK):
msg = 'File not readable {0}'
raise CommandExecutionError(msg.format(filename))
with salt.utils.fopen(filename) as ifile:
for line in ifile:
comps = line.split()
device = comps[2].split(':')
ret[comps[4]] = {'mountid': comps[0],
'parentid': comps[1],
'major': device[0],
'minor': device[1],
'root': comps[3],
'opts': comps[5].split(','),
'fstype': comps[7],
'device': comps[8],
'alt_device': _list.get(comps[4], None),
'superopts': comps[9].split(',')}
return ret
def _active_mounts(ret):
_list = _list_mounts()
filename = '/proc/self/mounts'
if not os.access(filename, os.R_OK):
msg = 'File not readable {0}'
raise CommandExecutionError(msg.format(filename))
with salt.utils.fopen(filename) as ifile:
for line in ifile:
comps = line.split()
ret[comps[1]] = {'device': comps[0],
'alt_device': _list.get(comps[1], None),
'fstype': comps[2],
'opts': comps[3].split(',')}
return ret
def _active_mounts_freebsd(ret):
for line in __salt__['cmd.run_stdout']('mount -p').split('\n'):
comps = re.sub(r"\s+", " ", line).split()
ret[comps[1]] = {'device': comps[0],
'fstype': comps[2],
'opts': comps[3].split(',')}
return ret
def active():
'''
List the active mounts.
CLI Example:
.. code-block:: bash
salt '*' mount.active
'''
ret = {}
if __grains__['os'] in ('FreeBSD'):
_active_mounts_freebsd(ret)
else:
try:
_active_mountinfo(ret)
except CommandExecutionError:
_active_mounts(ret)
return ret
def fstab(config='/etc/fstab'):
'''
List the contents of the fstab
CLI Example:
.. code-block:: bash
salt '*' mount.fstab
'''
ret = {}
if not os.path.isfile(config):
return ret
with salt.utils.fopen(config) as ifile:
for line in ifile:
if line.startswith('#'):
# Commented
continue
if not line.strip():
# Blank line
continue
comps = line.split()
if len(comps) != 6:
# Invalid entry
continue
ret[comps[1]] = {'device': comps[0],
'fstype': comps[2],
'opts': comps[3].split(','),
'dump': comps[4],
'pass': comps[5]}
return ret
def rm_fstab(name, config='/etc/fstab', **kwargs):
'''
Remove the mount point from the fstab
CLI Example:
.. code-block:: bash
salt '*' mount.rm_fstab /mnt/foo
'''
contents = fstab(config)
if name not in contents:
return True
# The entry is present, get rid of it
lines = []
try:
with salt.utils.fopen(config, 'r') as ifile:
for line in ifile:
if line.startswith('#'):
# Commented
lines.append(line)
continue
if not line.strip():
# Blank line
lines.append(line)
continue
comps = line.split()
if len(comps) != 6:
# Invalid entry
lines.append(line)
continue
comps = line.split()
if comps[1] == name:
continue
lines.append(line)
except (IOError, OSError) as exc:
msg = "Couldn't read from {0}: {1}"
raise CommandExecutionError(msg.format(config, str(exc)))
try:
with salt.utils.fopen(config, 'w+') as ofile:
ofile.writelines(lines)
except (IOError, OSError) as exc:
msg = "Couldn't write to {0}: {1}"
raise CommandExecutionError(msg.format(config, str(exc)))
return True
def set_fstab(
name,
device,
fstype,
opts='defaults',
dump=0,
pass_num=0,
config='/etc/fstab',
test=False,
**kwargs):
'''
Verify that this mount is represented in the fstab, change the mount
to match the data passed, or add the mount if it is not present.
CLI Example:
.. code-block:: bash
salt '*' mount.set_fstab /mnt/foo /dev/sdz1 ext4
'''
# Fix the opts type if it is a list
if isinstance(opts, list):
opts = ','.join(opts)
lines = []
change = False
present = False
if not os.path.isfile(config):
raise CommandExecutionError('Bad config file "{0}"'.format(config))
try:
with salt.utils.fopen(config, 'r') as ifile:
for line in ifile:
if line.startswith('#'):
# Commented
lines.append(line)
continue
if not line.strip():
# Blank line
lines.append(line)
continue
comps = line.split()
if len(comps) != 6:
# Invalid entry
lines.append(line)
continue
if comps[1] == name:
# check to see if there are changes
# and fix them if there are any
present = True
if comps[0] != device:
change = True
comps[0] = device
if comps[2] != fstype:
change = True
comps[2] = fstype
if comps[3] != opts:
change = True
comps[3] = opts
if comps[4] != str(dump):
change = True
comps[4] = str(dump)
if comps[5] != str(pass_num):
change = True
comps[5] = str(pass_num)
if change:
log.debug(
'fstab entry for mount point {0} needs to be '
'updated'.format(name)
)
newline = (
'{0}\t\t{1}\t{2}\t{3}\t{4} {5}\n'.format(
device, name, fstype, opts, dump, pass_num
)
)
lines.append(newline)
else:
lines.append(line)
except (IOError, OSError) as exc:
msg = 'Couldn\'t read from {0}: {1}'
raise CommandExecutionError(msg.format(config, str(exc)))
if change:
if not salt.utils.test_mode(test=test, **kwargs):
try:
with salt.utils.fopen(config, 'w+') as ofile:
# The line was changed, commit it!
ofile.writelines(lines)
except (IOError, OSError):
msg = 'File not writable {0}'
raise CommandExecutionError(msg.format(config))
return 'change'
if not change:
if present:
# The right entry is already here
return 'present'
else:
if not salt.utils.test_mode(test=test, **kwargs):
# The entry is new, add it to the end of the fstab
newline = '{0}\t\t{1}\t{2}\t{3}\t{4} {5}\n'.format(
device,
name,
fstype,
opts,
dump,
pass_num)
lines.append(newline)
try:
with salt.utils.fopen(config, 'w+') as ofile:
# The line was changed, commit it!
ofile.writelines(lines)
except (IOError, OSError):
raise CommandExecutionError(
'File not writable {0}'.format(
config
)
)
return 'new'
def mount(name, device, mkmnt=False, fstype='', opts='defaults', **kwargs):
'''
Mount a device
CLI Example:
.. code-block:: bash
salt '*' mount.mount /mnt/foo /dev/sdz1 True
'''
if isinstance(opts, string_types):
opts = opts.split(',')
if not os.path.exists(name) and mkmnt:
os.makedirs(name)
lopts = ','.join(opts)
args = '-o {0}'.format(lopts)
if fstype:
args += ' -t {0}'.format(fstype)
cmd = 'mount {0} {1} {2} '.format(args, device, name)
out = __salt__['cmd.run_stdall'](cmd)
state_std(kwargs, out)
if out['retcode'] != 0:
return out['stdout']
return True
def remount(name, device, mkmnt=False, fstype='', opts='defaults', **kwargs):
'''
Attempt to remount a device, if the device is not already mounted, mount
is called
CLI Example:
.. code-block:: bash
salt '*' mount.remount /mnt/foo /dev/sdz1 True
'''
if isinstance(opts, string_types):
opts = opts.split(',')
mnts = active()
if name in mnts:
# The mount point is mounted, attempt to remount it with the given data
if 'remount' not in opts:
opts.append('remount')
lopts = ','.join(opts)
args = '-o {0}'.format(lopts)
if fstype:
args += ' -t {0}'.format(fstype)
cmd = 'mount {0} {1} {2} '.format(args, device, name)
out = __salt__['cmd.run_stdall'](cmd)
state_std(kwargs, out)
if out['retcode']:
return out['stderr']
return True
# Mount a filesystem that isn't already
return mount(name, device, mkmnt, fstype, opts)
def umount(name, **kwargs):
'''
Attempt to unmount a device by specifying the directory it is mounted on
CLI Example:
.. code-block:: bash
salt '*' mount.umount /mnt/foo
'''
mnts = active()
if name not in mnts:
return "{0} does not have anything mounted".format(name)
cmd = 'umount {0}'.format(name)
out = __salt__['cmd.run_stdall'](cmd)
state_std(kwargs, out)
if out['retcode']:
return out['stderr']
return True
def is_fuse_exec(cmd):
'''
Returns true if the command passed is a fuse mountable application.
CLI Example:
.. code-block:: bash
salt '*' mount.is_fuse_exec sshfs
'''
cmd_path = _which(cmd)
# No point in running ldd on a command that doesn't exist
if not cmd_path:
return False
elif not _which('ldd'):
raise CommandNotFoundError('ldd')
out = __salt__['cmd.run']('ldd {0}'.format(cmd_path))
return 'libfuse' in out
def swaps():
'''
Return a dict containing information on active swap
CLI Example:
.. code-block:: bash
salt '*' mount.swaps
'''
ret = {}
with salt.utils.fopen('/proc/swaps') as fp_:
for line in fp_:
if line.startswith('Filename'):
continue
comps = line.split()
ret[comps[0]] = {
'type': comps[1],
'size': comps[2],
'used': comps[3],
'priority': comps[4]}
return ret
def swapon(name, priority=None, **kwargs):
'''
Activate a swap disk
CLI Example:
.. code-block:: bash
salt '*' mount.swapon /root/swapfile
'''
ret = {}
on_ = swaps()
if name in on_:
ret['stats'] = on_[name]
ret['new'] = False
return ret
cmd = 'swapon {0}'.format(name)
if priority:
cmd += ' -p {0}'.format(priority)
result = __salt__['cmd.run_stdall'](cmd)
state_std(kwargs, result)
on_ = swaps()
if name in on_:
ret['stats'] = on_[name]
ret['new'] = True
return ret
return ret
def swapoff(name, **kwargs):
'''
Deactivate a named swap mount
CLI Example:
.. code-block:: bash
salt '*' mount.swapoff /root/swapfile
'''
on_ = swaps()
if name in on_:
result = __salt__['cmd.run_stdall']('swapoff {0}'.format(name))
state_std(kwargs, result)
on_ = swaps()
if name in on_:
return False
return True
return None
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
from django.http import HttpResponse, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from rest_framework.renderers import JSONRenderer
from rest_framework.parsers import JSONParser
from todo.models import Item
from todo.serializers import ItemSerializer
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from todo.models import Item
from todo.serializers import ItemSerializer
from todo.models import Item
from todo.serializers import ItemSerializer
from django.http import Http404
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework import mixins
from rest_framework import generics
from rest_framework import permissions
from todo.permissions import IsOwnerOrReadOnly
# Create your views here.
from django.contrib.auth.models import User
from todo.serializers import UserSerializer
from rest_framework.decorators import detail_route
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework import renderers
from rest_framework.response import Response
from rest_framework import viewsets
class UserViewSet(viewsets.ReadOnlyModelViewSet):
"""
This viewset automatically provides `list` and `detail` actions.
"""
queryset = User.objects.all()
serializer_class = UserSerializer
@api_view(['GET'])
def api_root(request, format=None):
return Response({
'users': reverse('user-list', request=request, format=format),
'todo': reverse('item-list', request=request, format=format)
})
''' class ItemHighlight(generics.GenericAPIView):
queryset = Item.objects.all()
renderer_classes = (renderers.StaticHTMLRenderer,)
def get(self, request, *args, **kwargs):
todo = self.get_object()
return Response(todo.highlighted)
class ItemList(mixins.ListModelMixin,
mixins.CreateModelMixin,
generics.GenericAPIView):
"""
List all items, or create a new item.
"""
queryset = Item.objects.all()
serializer_class = ItemSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
def perform_create(self, serializer):
serializer.save(owner=self.request.user)
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
return self.create(request, *args, **kwargs)
class ItemDetail(mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
generics.GenericAPIView):
"""
Retrieve, update or delete an item instance.
"""
queryset = Item.objects.all()
serializer_class = ItemSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly, IsOwnerOrReadOnly,)
def get(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
return self.update(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
return self.destroy(request, *args, **kwargs) '''
class ItemViewSet(viewsets.ModelViewSet):
"""
This viewset automatically provides `list`, `create`, `retrieve`,
`update` and `destroy` actions.
Additionally we also provide an extra `highlight` action.
"""
queryset = Item.objects.all()
serializer_class = ItemSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,
IsOwnerOrReadOnly,)
@detail_route(renderer_classes=[renderers.StaticHTMLRenderer])
def highlight(self, request, *args, **kwargs):
todo = self.get_object()
return Response(todo.highlighted)
def perform_create(self, serializer):
serializer.save(owner=self.request.user)
|
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def __init__(self):
self.ans = None
def reverseList(self, head: ListNode) -> ListNode:
if head is None:
return None
self.reverseAnswer(head)
return self.ans
def reverseAnswer(self, head):
if head.next is None:
self.ans = head
return head
else:
res = self.reverseAnswer(head.next)
head.next = None
res.next = head
return head |
import matplotlib.pyplot as plt; plt.rcdefaults()
import numpy as np
import matplotlib.pyplot as plt
import datetime
import os
import sys
#Time stamp function. Returns the time that the function was called at
def tstamp():
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
#Open the log file
log = open(sys.argv[2], 'w')
filePath = sys.argv[1]
print("Setting file path: {0}".format(filePath))
listOfFiles = list()
txtFile = list()
#Get a list of the files
for (dirpath, dirnames, filenames) in os.walk(filePath):
listOfFiles += [os.path.join(dirpath, file) for file in filenames]
for i in listOfFiles:
if "analyse_log" in i:
filePath = i
break
matchedEventIds = []
for i in open(filePath, "r"):
if "MATCHED" in i:
matchedEventIds.append(i)
filePath.strip("\n")
matchedEventsIdsInt = []
for i in matchedEventIds:
print(i[18:])
matchedEventsIdsInt.append(int(i[18:]))
uniqueIds = dict()
for i in matchedEventsIdsInt:
uniqueIds[i] = uniqueIds.get(i, 1) + 1
yAxis = []
for key in uniqueIds:
yAxis.append(uniqueIds[key])
xAxis = []
for i in uniqueIds:
xAxis.append(i)
plt.barh(range(len(uniqueIds)), uniqueIds.values(), align='center')
plt.yticks(np.arange(len(uniqueIds)), uniqueIds)
plt.xlabel("Event Count")
plt.ylabel("Event ID Codes")
plt.title("Event IDs")
plt.show()
|
import pytest
from pg8000.dbapi import convert_paramstyle
# "(id %% 2) = 0",
@pytest.mark.parametrize(
"query,statement",
[
[
'SELECT ?, ?, "field_?" FROM t '
"WHERE a='say ''what?''' AND b=? AND c=E'?\\'test\\'?'",
'SELECT $1, $2, "field_?" FROM t WHERE '
"a='say ''what?''' AND b=$3 AND c=E'?\\'test\\'?'",
],
[
"SELECT ?, ?, * FROM t WHERE a=? AND b='are you ''sure?'",
"SELECT $1, $2, * FROM t WHERE a=$3 AND b='are you ''sure?'",
],
],
)
def test_qmark(query, statement):
args = 1, 2, 3
new_query, vals = convert_paramstyle("qmark", query, args)
assert (new_query, vals) == (statement, args)
@pytest.mark.parametrize(
"query,expected",
[
[
"SELECT sum(x)::decimal(5, 2) :2, :1, * FROM t WHERE a=:3",
"SELECT sum(x)::decimal(5, 2) $2, $1, * FROM t WHERE a=$3",
],
],
)
def test_numeric(query, expected):
args = 1, 2, 3
new_query, vals = convert_paramstyle("numeric", query, args)
assert (new_query, vals) == (expected, args)
@pytest.mark.parametrize(
"query",
[
"make_interval(days := 10)",
],
)
def test_numeric_unchanged(query):
args = 1, 2, 3
new_query, vals = convert_paramstyle("numeric", query, args)
assert (new_query, vals) == (query, args)
def test_named():
args = {
"f_2": 1,
"f1": 2,
}
new_query, vals = convert_paramstyle(
"named", "SELECT sum(x)::decimal(5, 2) :f_2, :f1 FROM t WHERE a=:f_2", args
)
expected = "SELECT sum(x)::decimal(5, 2) $1, $2 FROM t WHERE a=$1"
assert (new_query, vals) == (expected, (1, 2))
@pytest.mark.parametrize(
"query,expected",
[
[
"SELECT %s, %s, \"f1_%%\", E'txt_%%' "
"FROM t WHERE a=%s AND b='75%%' AND c = '%' -- Comment with %",
"SELECT $1, $2, \"f1_%%\", E'txt_%%' FROM t WHERE a=$3 AND "
"b='75%%' AND c = '%' -- Comment with %",
],
[
"SELECT -- Comment\n%s FROM t",
"SELECT -- Comment\n$1 FROM t",
],
],
)
def test_format_changed(query, expected):
args = 1, 2, 3
new_query, vals = convert_paramstyle("format", query, args)
assert (new_query, vals) == (expected, args)
@pytest.mark.parametrize(
"query",
[
r"""COMMENT ON TABLE test_schema.comment_test """
r"""IS 'the test % '' " \ table comment'""",
],
)
def test_format_unchanged(query):
args = 1, 2, 3
new_query, vals = convert_paramstyle("format", query, args)
assert (new_query, vals) == (query, args)
def test_py_format():
args = {"f2": 1, "f1": 2, "f3": 3}
new_query, vals = convert_paramstyle(
"pyformat",
"SELECT %(f2)s, %(f1)s, \"f1_%%\", E'txt_%%' "
"FROM t WHERE a=%(f2)s AND b='75%%'",
args,
)
expected = "SELECT $1, $2, \"f1_%%\", E'txt_%%' FROM t WHERE a=$1 AND " "b='75%%'"
assert (new_query, vals) == (expected, (1, 2))
def test_pyformat_format():
"""pyformat should support %s and an array, too:"""
args = 1, 2, 3
new_query, vals = convert_paramstyle(
"pyformat",
"SELECT %s, %s, \"f1_%%\", E'txt_%%' " "FROM t WHERE a=%s AND b='75%%'",
args,
)
expected = "SELECT $1, $2, \"f1_%%\", E'txt_%%' FROM t WHERE a=$3 AND " "b='75%%'"
assert (new_query, vals) == (expected, args)
|
from django.shortcuts import render, get_object_or_404, redirect, render_to_response
from django.template import RequestContext
from myBlog.models import Post, Comment
from myBlog.forms import CommentForm
def view_post(request, slug):
post = get_object_or_404(Post, slug = slug)
if request.method == 'POST':
comment_form = CommentForm(request.POST)
if comment_form.is_valid:
comment = comment_form.save(commit=False)
comment.post = post
comment.save()
return(redirect(request.path))
else:
comment_form = CommentForm()
# comment_form.initial['name'] = 'Your name'
return(render_to_response('myBlog/blog_post.html',
{'post': post, 'comment_form': comment_form, },
context_instance = RequestContext(request) ) )
def blog_home(request):
all_blogs = Post.objects.all()
n_blogs = len(all_blogs)
max_display = min(n_blogs, 5)
blog_list = all_blogs.order_by('-id')[0:max_display]
return(render_to_response('myBlog/blog_home.html',
{'blog_list': blog_list, },
context_instance = RequestContext(request) ) )
|
import time
from time import sleep
import random
import sys
from config import *
from camera_smoketest_config import *
############################################################
print '*******************************************************************'
print 'Take pictures quickly'
print '*******************************************************************'
device.startActivity(component=runComponent)
wait(open_wait)
for s in range(2):
RestoreDefaults()
OpenCameraSettings()
for i in range(PictureSizeNum):
device.touch( CaptureResolutionDownKey.x, CaptureResolutionDownKey.y, 'DOWN_AND_UP')
time.sleep(1)
DoBack()
wait(2)
for j in range(PictureSizeNum):
for i in range(TestIterTimes):
print 'takePicture No.%d'%i
device.touch( CaptureKey.x, CaptureKey.y, 'DOWN_AND_UP')
time.sleep(0.5)
time.sleep(3)
OpenCameraSettings()
device.touch( CaptureResolutionUpKey.x, CaptureResolutionUpKey.y, 'DOWN_AND_UP')
time.sleep(1)
DoBack()
time.sleep(1)
SwitchCamera()
wait(2)
exit()
|
import unittest
from terratalk.bitbucket_server import BitbucketServer
class TestMain(unittest.TestCase):
def test_pr(self):
base_url = 'https://foobar.com'
username = 'user'
password = 'pass'
project_key = 'FOOBAR'
repository_slug = 'barfoo'
pull_request_id = 11
bs = BitbucketServer(base_url=base_url,
username=username,
password=password)
bs.pr(
project_key=project_key,
repository_slug=repository_slug,
pull_request_id=pull_request_id,
)
self.assertEqual(bs.base_url, f'{base_url}/rest/api/1.0')
self.assertEqual(bs.username, username)
self.assertEqual(bs.password, password)
self.assertEqual(bs.project_key, project_key)
self.assertEqual(bs.repository_slug, repository_slug)
self.assertEqual(bs.pull_request_id, pull_request_id)
|
#learning generators in python
from itertools import *
def main():
'''
f = nFibonacci(10)
print(list(f))
print(list(firstn(fibonacci(), 10)))
l = ['6','4','a']
print(list(permutations(l)))
'''
for time in trange((10, 10, 10), (13, 50, 15), (0, 15, 12) ):
print(time)
def trange(start, end, inc):
curr = list(start)
end = list(end)
while curr < end:
yield tuple(curr)
curr = [sum(x) for x in zip(curr, inc)]
for x in range(1,3):
if curr[x] > 60:
curr[x] = curr[x] - 60
curr[x-1] += 1
def firstn(g, n):
for i in range(n):
yield next(g)
#fibonacci example
def nFibonacci(n):
a, b, counter = 0, 1, 0
while True:
if (counter >= n):
return
yield a
a, b = b, a + b
counter += 1
def fibonacci():
a, b = 0, 1
while True:
yield a
a,b = b, a+b
def permutations(items):
n = len(items)
if n==0: yield []
else:
for i in range(len(items)):
for cc in permutations(items[:i]+items[i+1:]):
yield [items[i]]+cc
main() |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import __future__
import sys
sys.stdin = open("./stdin.txt", 'r')
def print_full_name(a, b):
print("Hello {} {}! You just delved into python.".format(a,b))
print_full_name(raw_input(),raw_input())
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import util.py2exe_helper
import time
from objects_canvas.strategy import Strategy
from objects_canvas.move_strategy import MoveAndSelectStrategy
from commands.simulation_command import SimulationCommand
import traceback
import json
from petri import petri
import wx
import wx.aui
import wx.grid
import wx.lib.buttons
import itertools
import petrigui.add_object_strategy
import petrigui.petri_objects
import petrigui.petri_panel
import petri.net_properties as net_properties
import stategraph.graph_frame
from petri.net_properties import Tristate
class SimulateStrategy(Strategy):
def __init__(self, panel):
super(SimulateStrategy, self).__init__(panel=panel)
self.allow_undo_redo = True
def on_left_down(self, event):
super(SimulateStrategy, self).on_left_down(event)
obj = self.panel.get_object_at(self.mouse_x, self.mouse_y)
if isinstance(obj, petri.Transition):
if obj.is_enabled:
fire_command = SimulationCommand(self.panel, obj)
self.panel.append_command(fire_command)
obj.fire()
self.panel.Refresh()
self.panel.on_petri_changed()
#print self.panel.petri.get_state()
class Buffer(object):
def __init__(self):
self.places = None
self.transitions = None
def set_content(self, places, transitions):
self.places = places
self.transitions = transitions
def reset_content(self):
self.places = self.transitions = None
def get_content(self):
return self.places, self.transitions
def is_empty(self):
return self.places is None and self.transitions is None
class ImportExportFormat(object):
@classmethod
def get_formats(self):
return [('All files', '*.*')]
@classmethod
def get_wildcard(self):
return '|'.join('%s (%s)|%s'%(label, wc, wc) for (label, wc) in self.get_formats())
@classmethod
def export_net(cls, petri):
raise NotImplementedError
@classmethod
def import_net(cls, s):
raise NotImplementedError
_persistent = False
@classmethod
@property
def persistent(cls):
""" True if petri net can be stored in this format without (major) loss. False if it's just an export/import format """
return cls._persistent
class TxtFormat(ImportExportFormat):
name = 'TXT file'
description = 'TXT file'
@classmethod
def get_formats(self):
return [('TXT files', '*.txt')] + super(TxtFormat, self).get_formats()
@classmethod
def export_net(cls, petri):
return petri.to_string()
@classmethod
def import_net(cls, s):
net = petrigui.petri_objects.GUIPetriNet.from_string(s)
net.automatic_layout()
return net
class JSONFormat(ImportExportFormat):
name = 'JSON file'
description = 'JSON file'
_persistent = True
@classmethod
def get_formats(self):
return [('JSON files', '*.json')] + super(JSONFormat, self).get_formats()
@classmethod
def export_net(cls, petri):
return json.dumps(petri.to_json_struct())
@classmethod
def import_net(cls, s):
return petrigui.petri_objects.GUIPetriNet.from_json_struct(json.loads(s))
# TODO: Well, probably we will have to create some unified interface to all those properties.
# And also,
class GUIProperty(object):
def __init__(self, field, properties, **kwargs):
self.field = field
self.properties = properties
self.parent = None
self.proportion = 0
def init_ui(self, parent):
self.element = self.create_element(parent)
self.parent = parent
return self.element
def create_element(self, parent):
raise NotImplementedError
def get_value(self):
return getattr(self.properties, self.field)
def update(self):
#setattr(self.properties, self.field, None)
value = self.get_value()
self.show_to_ui(value)
def show_to_ui(self, value):
raise NotImplementedError()
class LabeledProperty(GUIProperty):
def __init__(self, field, properties, label, **kwargs):
super(LabeledProperty, self).__init__(field, properties, **kwargs)
self.label = label+': '
def create_element(self, parent):
result = wx.BoxSizer(wx.HORIZONTAL)
label_elem = wx.StaticText(parent, label=self.label)
labeled_value_elem = self.create_element_for_label(parent)
result.Add(label_elem, flag=wx.CENTER)
result.Add(labeled_value_elem, flag=wx.CENTER, proportion=1)
return result
def create_element_for_label(self, parent):
raise NotImplementedError()
class ValueProperty(LabeledProperty):
def create_element_for_label(self, parent):
self.__value_elem = wx.TextCtrl(parent)
self.__value_elem.SetEditable(False)
return self.__value_elem
def value_to_string(self, value):
if isinstance(value, Tristate):
value = value.value
if value is None:
return 'Unknown'
return str(value)
def show_to_ui(self, value):
self.__value_elem.SetValue(self.value_to_string(value))
class ValueListProperty(ValueProperty):
def value_to_string(self, value):
return ', '.join(value)
class MatrixProperty(LabeledProperty):
def __init__(self, field, properties, label, row_label_getter, col_label_getter, row_dclick_handler=None, col_dclick_handler=None, **kwargs):
super(MatrixProperty, self).__init__(field, properties, label, **kwargs)
self.row_label_getter = row_label_getter
self.col_label_getter = col_label_getter
self.row_dclick_handler = row_dclick_handler
self.col_dclick_handler = col_dclick_handler
self.proportion = 1
def create_element(self, parent):
result = wx.BoxSizer(wx.VERTICAL)
label_elem = wx.StaticText(parent, label=self.label)
self.__labeled_value_elem = self.create_element_for_label(parent)
result.Add(label_elem, flag=wx.LEFT)
result.Add(self.__labeled_value_elem, flag=wx.EXPAND, proportion=1)
return result
def create_element_for_label(self, parent):
self.__grid = wx.grid.Grid(parent)
self.__grid.EnableDragColSize(True)
self.__grid.EnableDragRowSize(True)
self.__grid.EnableEditing(False)
self.__grid.CreateGrid(0,0)
self.__grid.Bind(wx.grid.EVT_GRID_LABEL_LEFT_DCLICK, self.OnCellDClicked)
return self.__grid
def OnCellDClicked(self, event):
row,col = event.GetRow(), event.GetCol()
if col==-1:
self.OnRowDClicked(event)
elif row==-1:
self.OnColDClicked(event)
def OnRowDClicked(self, event):
if self.row_dclick_handler is not None:
self.row_dclick_handler(event)
def OnColDClicked(self, event):
if self.col_dclick_handler is not None:
self.col_dclick_handler()
def to_list_matrix(self, value):
if isinstance(value, dict):
value = value.items()
return value
def get_grid(self):
return self.__grid
def show_to_ui(self, value):
matrix = self.to_list_matrix(value)
r = len(matrix)
try:
c = len(matrix[0])
except IndexError:
c = 0
rows, cols = self.__grid.GetNumberRows(), self.__grid.GetNumberCols()
diff_r = r-rows
diff_c = c-cols
if diff_r>0:
self.__grid.AppendRows(diff_r)
elif diff_r<0:
self.__grid.DeleteRows(pos=0, numRows=abs(diff_r))
if diff_c>0:
self.__grid.AppendCols(diff_c)
elif diff_c<0:
self.__grid.DeleteCols(pos=0, numCols=abs(diff_c))
for i, name in zip(xrange(c), self.col_label_getter()):
self.__grid.SetColLabelValue(i, str(name))
for i,name in zip(xrange(r), self.row_label_getter()):
self.__grid.SetRowLabelValue(i, str(name))
for i in xrange(r):
for j in xrange(c):
val = matrix[i][j]
self.__grid.SetCellValue(i, j, str(val))
self.__grid.SetCellBackgroundColour(i, j, wx.WHITE)
#self.__grid.SetRowLabelSize(wx.grid.GRID_AUTOSIZE)
#self.__grid.SetColLabelSize(wx.grid.GRID_AUTOSIZE)
def set_row_color(grid, row, color):
columns = grid.GetNumberCols()
for c in xrange(columns):
grid.SetCellBackgroundColour(row, c, color)
class TrapsMatrixProperty(MatrixProperty):
def show_to_ui(self, value):
super(TrapsMatrixProperty, self).show_to_ui(value)
grid = self.get_grid()
for i, trap in enumerate(value):
if trap.is_marked_trap:
set_row_color(grid, i, wx.GREEN)
class DeadlocksMatrixProperty(MatrixProperty):
def show_to_ui(self, value):
super(DeadlocksMatrixProperty, self).show_to_ui(value)
grid = self.get_grid()
for i, deadlock in enumerate(value):
if deadlock.has_marked_trap:
set_row_color(grid, i, wx.GREEN)
elif deadlock.has_trap:
set_row_color(grid, i, wx.Colour(128,128,0))
import wx.lib.scrolledpanel
class PropertiesTabPanelMixin(object):
def __init__(self, parent, petri_panel, properties, properties_lst, **kwargs):
scrolled = kwargs.pop('scrolled', False)
super(PropertiesTabPanelMixin, self).__init__(parent, **kwargs)
self.petri_panel = petri_panel
self.properties = properties
self.properties_lst = properties_lst
sizer = wx.BoxSizer(wx.VERTICAL)
update_button = wx.Button(self, id=wx.ID_ANY, label="Update properties")
update_button.Bind(wx.EVT_BUTTON, self.OnUpdate)
sizer.Add(update_button)
if scrolled:
additional_sizer = wx.BoxSizer(wx.VERTICAL)
else:
additional_sizer = sizer
for prop in self.properties_lst:
element = prop.init_ui(self)
additional_sizer.Add(element, flag=wx.EXPAND | wx.ALL, proportion=prop.proportion, border=3)
if scrolled:
sizer.Add(additional_sizer, flag=wx.EXPAND)
self.SetSizer(sizer)
try:
self.SetupScrolling()
except:
pass
def OnUpdate(self, event):
self.update_properties()
def update_properties(self):
#print "HERE ME CALLING"
#print traceback.print_stack()
self.properties._reset(self.petri_panel.petri)
for prop in self.properties_lst:
prop.update()
#self.Layout()
#self.Fit()
self.Refresh()
class ScrolledPropertiesTabPanel(PropertiesTabPanelMixin, wx.lib.scrolledpanel.ScrolledPanel):
def __init__(self, *args, **kwargs):
kwargs['scrolled'] = True
super(ScrolledPropertiesTabPanel, self).__init__(*args, **kwargs)
class UsualPropertiesTabPanel(PropertiesTabPanelMixin, wx.Panel):
pass
class PropertiesPanel(wx.Panel):
def __init__(self, parent, petri_panel, **kwargs):
super(PropertiesPanel, self).__init__(parent, **kwargs)
self.petri_panel = petri_panel
self.properties = net_properties.PetriProperties(self.petri_panel.petri)
sizer = wx.BoxSizer(wx.VERTICAL)
self.tabs = wx.aui.AuiNotebook(self)
self.tabs.Bind(wx.aui.EVT_AUINOTEBOOK_PAGE_CLOSE, lambda event:event.Veto()) # Forbid closing
sizer.Add(self.tabs, flag=wx.EXPAND, proportion=1)
self.SetSizer(sizer)
transition_lambda = lambda petri_panel=self.petri_panel:(transition.unique_id for transition in
petri_panel.petri.get_sorted_transitions())
place_lambda = lambda petri_panel=self.petri_panel:(place.unique_id for place in
petri_panel.petri.get_sorted_places())
ordinal_lambda = lambda:itertools.count(1)
empty_lambda = lambda:itertools.cycle([''])
clsf_properties = [('State machine', 'state_machine'),
('Marked graph','marked_graph'),
('Free choice net','free_choice'),
('Extended free choice net','extended_free_choice'),
('Simple','simple'),
('Asymmetric','asymmetric')]
clsf_properties = [ValueProperty(field, self.properties, label=label) for label,field in clsf_properties]
#classification_properties
self.tabs.AddPage(ScrolledPropertiesTabPanel(self, self.petri_panel, self.properties, clsf_properties), caption="Classification")
# incidence matrix
im_property = MatrixProperty('incidence_matrix', self.properties, label='Incidence matrix',
row_label_getter=place_lambda, col_label_getter=transition_lambda)
#liveness = ValueProperty('liveness', self.properties, label='Liveness')
incidence_properties = [im_property]
self.tabs.AddPage(UsualPropertiesTabPanel(self, self.petri_panel, self.properties, incidence_properties), caption="Incidence matrix")
# t-invariants
t_invariants_prop = MatrixProperty('t_invariants', self.properties, label='T invariants',
row_label_getter=ordinal_lambda, col_label_getter=transition_lambda,
row_dclick_handler=self.transition_selector)
uncovered_by_t = ValueListProperty('t_uncovered', self.properties, label='Transitions not covered by T invariants')
consistency = ValueProperty('consistency', self.properties, label='Consistency')
A_rank = ValueProperty('A_rank', self.properties, label='Incidence matrix rank')
Ax_ineq_prop = MatrixProperty('Ax_ineq_sol', self.properties, label='Ax>0 inequation solutions:',
row_label_getter=ordinal_lambda, col_label_getter=transition_lambda,
row_dclick_handler=self.transition_selector)
repeatable = ValueProperty('repeatable', self.properties, label='Repeatable')
regulated = ValueProperty('regulated', self.properties, label='Regulated')
t_inv_properties = [t_invariants_prop, uncovered_by_t, consistency, A_rank, Ax_ineq_prop, repeatable, regulated]
self.tabs.AddPage(UsualPropertiesTabPanel(self, self.petri_panel, self.properties, t_inv_properties), caption='T invariants')
# s-invariants
s_invariants_prop = MatrixProperty('s_invariants', self.properties, label='S invariants',
row_label_getter=ordinal_lambda, col_label_getter=place_lambda)
uncovered_by_s = ValueListProperty('s_uncovered', self.properties, label='Places not covered by S invariants')
place_limits = MatrixProperty('place_limits', self.properties, label='Token limits',
row_label_getter=empty_lambda, col_label_getter=lambda:['Place', 'Limit'])
bounded_by_s = ValueProperty('bounded_by_s', self.properties, label='Bounded by S')
structural_boundness = ValueProperty('structural_boundedness', self.properties, label='Structural boundness')
s_inv_properties = [s_invariants_prop, uncovered_by_s, place_limits, bounded_by_s, structural_boundness]
self.tabs.AddPage(UsualPropertiesTabPanel(self, self.petri_panel, self.properties, s_inv_properties), caption='S invariants')
# deadlocks and traps
deadlocks_prop = DeadlocksMatrixProperty('deadlocks', self.properties, label='Deadlocks (green deadlocks have marked trap, olive have trap)',
row_label_getter=ordinal_lambda, col_label_getter=place_lambda,
row_dclick_handler=self.place_selector)
traps_prop = TrapsMatrixProperty('traps', self.properties, label='Traps (green are marked traps)',
row_label_getter=ordinal_lambda, col_label_getter=place_lambda,
row_dclick_handler=self.place_selector)
liveness = ValueProperty('liveness', self.properties, label='Liveness')
structural_liveness = ValueProperty('structural_liveness', self.properties, label='Structural liveness')
dl_trap_properties = [deadlocks_prop, traps_prop, liveness, structural_liveness]
self.tabs.AddPage(UsualPropertiesTabPanel(self, self.petri_panel, self.properties, dl_trap_properties), caption='Deadlocks & traps')
self.tabs.AddPage(stategraph.graph_frame.GraphAndAnalysisPanel(self, self.petri_panel), caption='Reachability graph')
self.tabs.Bind(wx.aui.EVT_AUINOTEBOOK_PAGE_CHANGED, self.OnPageChanged)
#self.update_properties()
def OnPageChanged(self, event):
self.Parent.OnPageChanged(self.tabs.GetSelection())
def object_selector(self, event, objects):
row = event.GetRow()
grid = event.GetEventObject()
cols = grid.GetNumberCols()
result = {}
for i,obj in enumerate(objects):
val = grid.GetCellValue(row, i)
if int(val):
result[obj] = val
self.petri_panel.highlight_objects(result)
def transition_selector(self, event):
self.object_selector(event, self.petri_panel.petri.get_sorted_transitions())
def place_selector(self, event):
self.object_selector(event, self.petri_panel.petri.get_sorted_places())
def update_properties(self):
page = self.tabs.GetPage(self.tabs.GetSelection())
page.update_properties()
class PetriAndProperties(wx.SplitterWindow):
def __init__(self, parent, frame, clip_buffer, tab_splitter_position, **kwargs):
super(PetriAndProperties, self).__init__(parent, **kwargs)
self.frame = frame
self.tab_splitter_position = tab_splitter_position
self.petri_panel = petrigui.petri_panel.PetriPanel(self, frame=frame, clip_buffer=clip_buffer)
self.properties_panel = PropertiesPanel(self, self.petri_panel)
#self.SplitHorizontally(self.properties_panel, self.petri_panel, sashPosition=100)
#self.Unsplit()
self.SetMinimumPaneSize(self.properties_panel.tabs.GetTabCtrlHeight()*2)
self.SplitVertically(self.properties_panel, self.petri_panel, sashPosition=100)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.first_sash_event = False
self.Bind(wx.EVT_SPLITTER_SASH_POS_CHANGED, self.OnSashPosChanged)
def update_splitter_position(self, pos):
self.tab_splitter_position = pos
self.SetSashPosition(self.tab_splitter_position)
def OnSize(self, event):
self.update_splitter_position(self.tab_splitter_position)
event.Skip()
def OnSashPosChanged(self, event):
if not self.first_sash_event:
self.first_sash_event = True
else:
self.tab_splitter_position = self.GetSashPosition()
self.frame.set_tab_splitter_position(self.GetSashPosition())
event.Skip()
def OnPageChanged(self, page_number):
self.frame.tab_page_number = page_number
def set_tab_page_number(self, page_number):
self.properties_panel.tabs.SetSelection(page_number)
def update_properties(self):
self.properties_panel.update_properties()
class MainFrame(wx.Frame):
def __init__(self, parent, title):
super(MainFrame, self).__init__(parent, title=title,
size=(1400, 800))
ib=wx.IconBundle()
ib.AddIconFromFile("assets/icon.ico",wx.BITMAP_TYPE_ANY)
self.SetIcons(ib)
self.splitter_orientation = wx.SPLIT_VERTICAL
self.tab_page_number = 0
self.clip_buffer = Buffer()
vert_sizer = wx.BoxSizer(wx.VERTICAL)
buttons_sizer = wx.BoxSizer(wx.HORIZONTAL)
formats = [TxtFormat,]
menubar = wx.MenuBar()
fileMenu = wx.Menu()
new_page_item = fileMenu.Append(wx.ID_NEW, '&New\tCtrl+N', 'New Petri net')
open_item = fileMenu.Append(wx.ID_OPEN, '&Open\tCtrl+O', 'Open petri net')
self.close_item = fileMenu.Append(wx.ID_CLOSE, '&Close\tCtrl+W', 'Close current net')
fileMenu.AppendSeparator()
self.save_item = fileMenu.Append(wx.ID_SAVE, '&Save\tCtrl+S', 'Save petri net')
self.save_as_item = fileMenu.Append(wx.ID_SAVEAS, 'S&ave as\tCtrl+Shift+S', 'Save petri net as')
fileMenu.AppendSeparator()
import_menu = wx.Menu()
export_menu = wx.Menu()
for format in formats:
def import_handler(event):
self.open(format)
def export_handler(event):
self.save_as(format)
import_item = import_menu.Append(wx.NewId(), format.name, format.description)
self.Bind(wx.EVT_MENU, import_handler, import_item)
export_item = export_menu.Append(wx.NewId(), format.name, format.description)
self.Bind(wx.EVT_MENU, export_handler, export_item)
fileMenu.AppendMenu(wx.NewId(), '&Import from', import_menu)
fileMenu.AppendMenu(wx.NewId(), '&Export to', export_menu)
fileMenu.AppendSeparator()
quit_item = fileMenu.Append(wx.ID_EXIT, '&Quit\tCtrl+Q', 'Quit application')
menubar.Append(fileMenu, '&File')
editMenu = wx.Menu()
self.undo_item = editMenu.Append(wx.ID_UNDO, '&Undo\tCtrl+Z', 'Undo last action')
self.redo_item = editMenu.Append(wx.ID_REDO, '&Redo\tCtrl+Y', 'Redo last action')
editMenu.AppendSeparator()
self.cut_item = editMenu.Append(wx.ID_CUT, '&Cut\tCtrl+X', 'Cut elements')
self.copy_item = editMenu.Append(wx.ID_COPY, 'C&opy\tCtrl+C', 'Copy elements')
self.paste_item = editMenu.Append(wx.ID_PASTE, '&Paste\tCtrl+V', 'Paste elements')
self.delete_item = editMenu.Append(wx.ID_DELETE, '&Delete\tDelete', 'Delete elements')
editMenu.AppendSeparator()
self.select_all_item = editMenu.Append(wx.ID_SELECTALL, '&Select all\tCtrl+A', 'Select all elements')
menubar.Append(editMenu, '&Edit')
viewMenu = wx.Menu()
self.zoom_in_item = viewMenu.Append(wx.NewId(), 'Zoom &in\tCtrl++', 'Zoom in')
self.zoom_out_item = viewMenu.Append(wx.NewId(), 'Zoom &out\tCtrl+-', 'Zoom out')
self.zoom_restore_item = viewMenu.Append(wx.NewId(), '&Zoom restore\tCtrl+R', 'Zoom restore')
viewMenu.AppendSeparator()
change_splitter_orientation = viewMenu.Append(wx.NewId(), 'Change splitter orientation', 'Change splitter orientation')
menubar.Append(viewMenu, '&View')
layoutMenu = wx.Menu()
self.automatic_layout_item = layoutMenu.Append(wx.NewId(), '&Automatic layout', 'Automatic layout of current net')
menubar.Append(layoutMenu, '&Layout')
self.CreateStatusBar()
self.tab_splitter_position = 700
self.SetMenuBar(menubar)
# Menu bindings
# File
self.Bind(wx.EVT_MENU, self.OnNew, new_page_item)
self.Bind(wx.EVT_MENU, self.OnOpen, open_item)
self.Bind(wx.EVT_MENU, self.OnClose, self.close_item)
# --- separator ---
self.Bind(wx.EVT_MENU, self.OnSave, self.save_item)
self.Bind(wx.EVT_MENU, self.OnSaveAs, self.save_as_item)
# --- separator ---
self.Bind(wx.EVT_MENU, self.OnQuit, quit_item)
# Edit
self.Bind(wx.EVT_MENU, self.OnUndo, self.undo_item)
self.Bind(wx.EVT_MENU, self.OnRedo, self.redo_item)
# --- separator ---
self.Bind(wx.EVT_MENU, self.OnCut, self.cut_item)
self.Bind(wx.EVT_MENU, self.OnCopy, self.copy_item)
self.Bind(wx.EVT_MENU, self.OnPaste, self.paste_item)
self.Bind(wx.EVT_MENU, self.OnDelete, self.delete_item)
# --- separator ---
self.Bind(wx.EVT_MENU, self.OnSelectAll, self.select_all_item)
# View
self.Bind(wx.EVT_MENU, self.OnZoomIn, self.zoom_in_item)
self.Bind(wx.EVT_MENU, self.OnZoomOut, self.zoom_out_item)
self.Bind(wx.EVT_MENU, self.OnZoomRestore, self.zoom_restore_item)
# --- separator ---
self.Bind(wx.EVT_MENU, self.OnChangeSplitterOrientation, change_splitter_orientation)
# Layout
self.Bind(wx.EVT_MENU, self.OnKKLayout, self.automatic_layout_item)
# Bind close
self.Bind(wx.EVT_CLOSE, self.OnQuit)
# Button bitmaps
bmp_mouse = wx.Bitmap("assets/icons/arrow.png", wx.BITMAP_TYPE_ANY)
bmp_animate = wx.Bitmap("assets/icons/animate.png", wx.BITMAP_TYPE_ANY)
bmp_newplace = wx.Bitmap("assets/icons/addplace.png", wx.BITMAP_TYPE_ANY)
bmp_newtransition = wx.Bitmap("assets/icons/addtransition.png", wx.BITMAP_TYPE_ANY)
bmp_newarc = wx.Bitmap("assets/icons/addarc.png", wx.BITMAP_TYPE_ANY)
mouse_button = wx.lib.buttons.GenBitmapToggleButton(self, bitmap=bmp_mouse)
self.buttons = [(mouse_button, MoveAndSelectStrategy(self.panel_getter)),
(wx.lib.buttons.GenBitmapToggleButton(self, bitmap=bmp_animate), SimulateStrategy(self.panel_getter)),
(wx.lib.buttons.GenBitmapToggleButton(self, bitmap=bmp_newplace), petrigui.add_object_strategy.AddPlaceStrategy(self.panel_getter)),
(wx.lib.buttons.GenBitmapToggleButton(self, bitmap=bmp_newtransition), petrigui.add_object_strategy.AddTransitionStrategy(self.panel_getter)),
(wx.lib.buttons.GenBitmapToggleButton(self, bitmap=bmp_newarc), petrigui.add_object_strategy.AddArcStrategy(self.panel_getter)),
]
for button,_ in self.buttons:
buttons_sizer.Add(button)
self.Bind(wx.EVT_BUTTON, self.on_toggle_button)
self.buttons = dict(self.buttons)
self.strategy = self.buttons[mouse_button]
self.toggle_button(mouse_button)
vert_sizer.Add(buttons_sizer)
self.tabs = wx.aui.AuiNotebook(self)
self.Bind(wx.aui.EVT_AUINOTEBOOK_PAGE_CHANGED, self.OnPageChanged, self.tabs)
self.Bind(wx.aui.EVT_AUINOTEBOOK_PAGE_CLOSE, self.OnPageClose, self.tabs)
self._unnamed_count = 0
self.add_new_page()
vert_sizer.Add(self.tabs, proportion=1, flag=wx.EXPAND|wx.TOP, border=1)
self.SetSizer(vert_sizer)
self.Centre()
self.Show()
def OnChangeSplitterOrientation(self, event):
self.splitter_orientation = wx.SPLIT_HORIZONTAL if self.splitter_orientation == wx.SPLIT_VERTICAL \
else wx.SPLIT_VERTICAL
self.update_splitter_orientation()
def set_tab_splitter_position(self, position):
self.tab_splitter_position = position
def OnZoomIn(self, event):
self.petri_panel.zoom_in()
def OnZoomOut(self, event):
self.petri_panel.zoom_out()
def OnZoomRestore(self, event):
self.petri_panel.zoom_restore()
def create_new_panel(self):
return PetriAndProperties(self.tabs, frame=self, clip_buffer=self.clip_buffer, tab_splitter_position=self.tab_splitter_position)
#return petrigui.petri_panel.PetriPanel(self.tabs, frame=self, clip_buffer=self.clip_buffer)
def on_state_changed(self):
self.SetStatusText(str(self.petri_panel.petri.get_state()))
def add_new_page(self):
petri_panel = self.create_new_panel()
title = petri_panel.GetName()
self._unnamed_count += 1
title = '%s %s'%(title, (str(self._unnamed_count) if self._unnamed_count else ''))
petri_panel.SetName(title)
self.tabs.AddPage(petri_panel, petri_panel.petri_panel.get_name(), select=True)
self.update_menu()
def panel_getter(self):
return self.current_tab.petri_panel
@property
def current_tab(self):
return self.tabs.GetPage(self.tabs.Selection)
petri_panel = property(panel_getter)
def toggle_button(self, button_on):
button_on.SetValue(True)
for button in self.buttons:
if button!=button_on:
button.SetValue(False)
def on_toggle_button(self, event):
button_on = event.GetEventObject()
if not button_on.GetValue(): #can't untoggle
button_on.SetValue(True)
return
for button in self.buttons:
if button != button_on:
button.SetValue(False)
self.strategy.on_switched_strategy()
self.strategy = self.buttons[button_on]
self.update_menu()
self.petri_panel.SetFocus()
def OnKKLayout(self, event):
petri = self.petri_panel.petri
new_petri = petri.__class__.from_json_struct(petri.to_json_struct())
new_petri.remove_arc_points()
new_petri.automatic_layout()
self.add_new_page()
self.petri_panel.petri = new_petri
self.petri_panel.update_bounds()
self.petri_panel.Refresh()
def OnUndo(self, event):
self.petri_panel.undo()
self.update_menu()
def OnCut(self, event):
self.petri_panel.cut()
self.update_menu()
def OnCopy(self, event):
self.petri_panel.copy()
self.update_menu()
def OnPaste(self, event):
self.petri_panel.paste()
self.update_menu()
def OnDelete(self, event):
self.petri_panel.delete()
def OnNew(self, event):
self.add_new_page()
def OnSelectAll(self, event):
self.petri_panel.select_all()
def update_menu(self):
self.refresh_undo_redo()
if self.tabs.GetPageCount():
tab_name = self.petri_panel.get_name()
self.tabs.SetPageText(self.tabs.Selection, tab_name)
def refresh_undo_redo(self):
enable = True
if self.tabs.GetPageCount()==0:
enable = False
self.redo_item.Enable(enable and self.petri_panel.can_redo())
self.undo_item.Enable(enable and self.petri_panel.can_undo())
self.paste_item.Enable(enable and not self.clip_buffer.is_empty() and self.petri_panel.can_paste())
self.cut_item.Enable(enable and self.petri_panel.can_cut() )
self.copy_item.Enable(enable and self.petri_panel.can_copy() )
self.delete_item.Enable(enable and self.petri_panel.can_delete())
self.select_all_item.Enable(enable and self.petri_panel.can_select())
self.automatic_layout_item.Enable(enable)
self.close_item.Enable(enable)
self.save_as_item.Enable(enable)
self.save_item.Enable(enable and (self.petri_panel.has_unsaved_changes or ( self.petri_panel.filepath is None)))
def OnPageClose(self, event):
if not self.close_tab(event.Selection):
event.Veto()
else:
wx.CallAfter(self.update_menu)
def OnOpen(self, event):
self.open(JSONFormat)
def open(self, format):
dlg = wx.FileDialog(
self, message="Open file",
defaultFile="", wildcard=format.get_wildcard(), style=wx.OPEN
)
if dlg.ShowModal() != wx.ID_OK:
return
filepath = dlg.GetPath()
panel = self.create_new_panel()
try:
a = time.time()
panel.petri_panel.load_from_file(filepath, format=format)
print 'time to load',time.time()-a
except Exception, e:
self.DisplayError('Error while loading petri net:\n%s'%traceback.format_exc(), title='Error while opening file')
else:
#panel.update_properties()
self.tabs.AddPage(panel, panel.petri_panel.get_name(), select=True)
def close_tab(self, pos):
""" Returns false if user decided not to close the tab """
tab = self.tabs.GetPage(pos)
if not tab.petri_panel.has_unsaved_changes:
return True
dlg = wx.MessageDialog(self, message='There are unsaved changes in "%s". Save?'%tab.petri_panel.GetName(), style=wx.YES_NO|wx.CANCEL|wx.CENTER)
result = dlg.ShowModal()
if result == wx.ID_YES:
tab.save()
elif result == wx.ID_CANCEL:
return False
return True
def OnClose(self, event):
if self.close_tab(self.tabs.Selection):
self.tabs.DeletePage(self.tabs.Selection)
self.update_menu()
def DisplayError(self, message, title='Error'):
wx.MessageBox(message=message, caption=title, style=wx.ICON_ERROR)
def save_as(self, format):
try:
self.petri_panel.save_as(format)
except Exception, e:
self.DisplayError('Error while saving petri net:\n%s'%traceback.format_exc(), title='Error while saving file')
self.update_menu()
def OnSave(self, event):
try:
self.petri_panel.save(JSONFormat)
except Exception, e:
self.DisplayError('Error while saving petri net:\n%s'%traceback.format_exc(), title='Error while saving file')
self.update_menu()
def OnSaveAs(self, event):
self.save_as(JSONFormat)
def on_command_append(self):
self.update_menu()
def OnPageChanged(self, event):
self.update_menu()
self.on_state_changed()
self.petri_panel.SetFocus()
self.update_splitter_orientation()
self.current_tab.set_tab_page_number(self.tab_page_number)
def update_splitter_orientation(self):
tab = self.current_tab
if tab.GetSplitMode() != self.splitter_orientation:
tab.SetSplitMode(self.splitter_orientation)
tab.SendSizeEvent()
tab.update_splitter_position(self.tab_splitter_position)
self.Refresh()
def OnRedo(self, event):
self.petri_panel.redo()
self.update_menu()
def quit(self):
page_count = self.tabs.GetPageCount()
for i in xrange(page_count):
tab = self.tabs.GetPage(i)
if not tab.petri_panel.has_unsaved_changes:
continue
self.tabs.Selection = i
if not self.close_tab(i):
return False
return True
def OnQuit(self, event):
if self.quit():
self.Destroy()
if __name__ == '__main__':
MainFrame(None, 'Petri net editor')
from util import wx_app
wx_app.app.MainLoop()
|
from __future__ import print_function
# -*- coding:utf-8 -*-
__author__ = "ganbin"
import httplib2
import os
import re
from apiclient import discovery
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
import Gmail
import settings
import logging
# if retrive credentials, don't note these codes.
# try:
# import argparse
# flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
# except ImportError:
# flags = None
class SheetAPI(object):
def __init__(self):
self._scopes = [
'https://www.googleapis.com/auth/drive',
'https://www.googleapis.com/auth/drive.file',
'https://www.googleapis.com/auth/spreadsheets',
]
self._client_secret_file = 'client_sheet_secret.json'
self._application_name = 'Google Sheets API Python Quickstart'
self.service = self.get_service()
self.values = self._get_commer_values()
logging.basicConfig(filename='AutoScripts.log', level='DEBUG')
self.logger = logging.getLogger(__name__)
def _get_credentials(self):
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
'sheets.googleapis.com-python-quickstart.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(self._client_secret_file, self._scopes)
flow.user_agent = self._application_name
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def get_service(self):
credentials = self._get_credentials()
http = credentials.authorize(httplib2.Http())
discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?'
'version=v4')
service = discovery.build('sheets', 'v4', http=http,
discoveryServiceUrl=discoveryUrl)
return service
def _get_commer_values(self):
values_range_name = settings.DATA_RANGE_NAME
spreadsheetId = settings.DATA_SHEET_ID
values = self._list_value_with_range_sheetId(values_range_name, spreadsheetId)
return values
def get_weekly_values_with_gmail(self, gmail):
# 新人0 新人邮箱1 试用期目标地址2 Mgr名称3 Mgr邮箱地址4 mentor名称5 入职日期6 转正7 离职8 状态9
values = self.values
weekly_values = []
target_address = 2
new_name = 0
new_email = 1
mgr_name = 3
mgr_email = 4
mentor_name = 5
entry_date = 6
report_lack_list = []
score_lack_list = []
score_lack_dict = {}
for value in values:
daily_value = []
report_lack_times = 0
daily_value.append(value[new_name])
daily_value.append(value[mgr_name])
daily_value.append(value[mentor_name])
daily_value.append(value[entry_date])
if value[new_email]:
daily_count = []
weekly_count = gmail.count_daily_with_email(value[new_email])
score_date = weekly_count[0][0]
for d in weekly_count:
if d[1]:
daily_count.append(settings.FINDED_MARK)
else:
daily_count.append(settings.LACK_MARK)
report_lack_times += 1
else:
continue
re_sheetid = re.search('d/(.+)?/', value[target_address])
if re_sheetid:
sheetid = re_sheetid.group(1)
target_range_name = settings.TARGET_RANGE_NAME
score_range_name = settings.SCORE_RANGE_NAME
score = self.get_score(score_range_name, sheetid, score_date)
if not score:
if score_lack_dict.get(value[mgr_name]):
score_lack_dict[value[mgr_name]] += 1
else:
score_lack_dict[value[mgr_name]] = 1
target = self._list_value_with_range_sheetId(target_range_name, sheetid)
if target:
target = target[0][0]
else:
target = ''
daily_value.append(target)
daily_value.extend(daily_count)
daily_value.append(score)
report_lack_list.append((value[new_name], report_lack_times))
weekly_values.append(daily_value)
for mgr in score_lack_dict:
score_lack_list.append((mgr, score_lack_dict[mgr]))
return weekly_values, report_lack_list, score_lack_list
def _list_value_with_range_sheetId(self, range_name, spreadsheetId, majorDimension='ROWS'):
values = []
result = self.service.spreadsheets().values().get(spreadsheetId=spreadsheetId,
range=range_name,
majorDimension=majorDimension).execute()
if result.get('values'):
values = result['values']
return values
def update_sheet_title(self, match_date):
sheet_title = settings.RESULT_TITLE_1
daily_report_title = []
week_score_title = settings.RESULT_TITLE_2[1] % (match_date[0], match_date[-1])
for d in match_date:
daily_report_date = settings.RESULT_TITLE_2[0] % d
daily_report_title.append(daily_report_date)
sheet_title.extend(daily_report_title)
sheet_title.append(week_score_title)
range_name = settings.RESULT_RANGE_NAME
body = {
'values': [sheet_title],
}
self._write_weekly_values(range_name, body)
return [sheet_title]
def _write_weekly_values(self, range_name, body):
spreadsheet_id = settings.RESULT_SHEET_ID
result = self.service.spreadsheets().values().update(
spreadsheetId=spreadsheet_id,
range=range_name,
valueInputOption='USER_ENTERED',
body=body).execute()
return result
def update_weekly_report(self, weekly_values):
range_name = settings.RESULT_RANGE_NAME
body = {
'values': weekly_values
}
self._write_weekly_values(range_name, body)
def get_score(self, range_name, spreadsheetId, date):
values = self._list_value_with_range_sheetId(range_name, spreadsheetId, 'COLUMNS')
for v in values:
for d in v:
if date in d:
index_date = v.index(d)
try:
score = v[index_date + 1]
except IndexError:
score = ''
finally:
break
return score
def make_table_html(self, weekly_values):
result = '<table cellspacing="0" cellpadding="0" dir="ltr" border="1" style="table-layout:fixed;font-size:13px;font-family:arial,sans,sans-serif;border-collapse:collapse;border:none">'
result += '<colgroup><col width="209"><col width="209"><col width="80"><col width="86"><col width="86"><col width="93"><col width="197"><col width="197"><col width="93"></colgroup>'
result += '<tbody>'
for value in weekly_values:
result += '<tr style="height:21px">'
for i in value:
result += '<td style="padding:2px 3px;background-color:rgb(255, 255, 255);border-color:rgb(0,0,0);font-family:arial;font-weight:bold;word-wrap:break-word;vertical-align:top;text-align:center" rowspan="1" colspan="2">' + i + '</td>'
result += '</td>'
result += "</tr>"
result += '</tbody>'
result += "</table>"
return result
def make_message_text(self, report_lack_list, score_lack_list, html):
text = settings.RESULT_TEXT
report_lacked = False
score_lacked = False
for r in report_lack_list:
if r[1] == 0:
continue
else:
report_lacked = True
text += settings.NEW_LACK % (r)
for s in score_lack_list:
if s[1] == 0:
continue
else:
score_lacked = True
text += settings.MGR_LACK % (s)
if not report_lacked and not score_lacked:
text += settings.NO_ONE_LACK
elif not report_lacked:
text += settings.NO_NEW_LACK
elif not score_lacked:
text += settings.NO_MGR_LACK
text += html
return text
def main():
gmail = Gmail.GamilAPI()
match_date = gmail.get_match_date()
sheet = SheetAPI()
weekly_values, report_lack_list, score_lack_list = sheet.get_weekly_values_with_gmail(gmail)
sheet.update_weekly_report(weekly_values)
sheet_title = sheet.update_sheet_title(match_date)
subject = "New nondar Daily Report & Weekly Score Update"
sheet_title.extend(weekly_values)
html = sheet.make_table_html(sheet_title)
message_text = sheet.make_message_text(report_lack_list, score_lack_list, html)
message = gmail.create_message('xiaoxi@nonda.us', 'ganbinwen@nonda.me', subject, message_text)
gmail.create_draft(message)
if __name__ == '__main__':
main()
|
def rm_mtpls(base, nums):
for m in range(base*2, len(num), base):
nums[m] = False
num=[True]*2000000
#remove 0 and 1
num[0]=False
num[1]=False
listofprimes=[]
for i in range(len(num)):
if num[i]==True:
rm_mtpls(i,num)
listofprimes.extend([i])
ans=0
for i in listofprimes:
ans+=i
print(ans) |
#coding:utf-8
'''
背景:2018-01-24,电脑出现故障,启动蓝屏,导致无法开机启动,只好重置电脑,重置后chrome的书签全部没有了,感觉很可惜,
之前添加了许多很有用的网页全部都没有了
目的:在计算机后台运行,定时检查chorme书签是否有更新,并及时备份到非系统盘,以防再次发生意外
所用包:os
注意:代码文件不要删除,第二,备份文件地址之前写在与代码文件同级目录下,结果发现文件执行目录不是py文件存放目录,无法使用相对目录,当然也可以存放在相同目录下然后再查找一次也可以
'''
import os
import sys
import time
import win32api
import win32event
import win32service
import win32serviceutil
import servicemanager
import traceback
import os
import re
import shutil
import datetime
log_path = r'F:\备忘\浏览器书签备份\脚本\log.txt'
backup_file_path = r'F:\备忘\浏览器书签备份\脚本\备份数据'
def log(message):
with open(log_path,'a',encoding='utf-8') as f:
f.write(message+'\n')
class ChromeBookmarksBackup(win32serviceutil.ServiceFramework):
_svc_name_ = '浏览器书签备份'
_svc_display_name_ = '浏览器书签备份'
_svc_description_ = 'bookmarks backup'
def __init__(self, args):
win32serviceutil.ServiceFramework.__init__(self, args)
self.stop_event = win32event.CreateEvent(None,0,0,None)
log('init')
def findfile(self,backup_dir):
log('findfile begin')
#获取bookmark文件路径
mark_list = []
for root,dirs,files in os.walk('c:\\'):
if 'Bookmarks' in files:
for f in files:
if 'Bookmarks' in f:
mark_list.append(os.path.join(root,f))
#根据路径备份文件
for pt in mark_list:
if not os.path.isfile(pt): #判断是否为文件
continue
if not os.path.exists(backup_dir): #判断提供的备份文件夹是否存在
os.mkdir(backup_dir)
size = os.path.getsize(pt) #获取文件大小用来和已备份的文件做比较
print(size)
file_new_name = re.sub('[\\\.:\s]','_',pt) #将待备份文件路径替换特殊符号后作为备份文件名
print(file_new_name)
backup_file_path = os.path.join(backup_dir,file_new_name)
if not os.path.exists(backup_file_path): #如果备份文件夹下没有备份过此文件,则直接复制备份
shutil.copyfile(pt,backup_file_path)
continue
backup_size = os.path.getsize(backup_file_path)
types = isinstance(backup_size,int)
print('*'*10)
print(types)
if size < backup_size: #如果待备份的文件小于备份文件,则将已备份的文件备份,然后重新备份
print('#'*10)
time_str = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
print(''.join([backup_file_path,str(time_str)]))
shutil.copyfile(backup_file_path,''.join([backup_file_path,str(time_str)]))
shutil.copyfile(pt,backup_file_path)
else:
shutil.copyfile(pt,backup_file_path)
log('findfile end')
def start(self):
self.findfile(backup_file_path)
log('start end')
def SvcDoRun(self):
while 1:
log('SvcDoRun begin')
self.start()
time.sleep(18000)
def SvcStop(self):
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
win32event.SetEvent(self.stop_event)
if __name__ == '__main__':
log('##### begin')
if len(sys.argv) == 1:
try:
log('***** begin')
evtsrc_dll = os.path.abspath(servicemanager.__file__)
servicemanager.PrepareToHostSingle(ChromeBookmarksBackup)
servicemanager.Initialize('ChromeBookmarksBackup',evtsrc_dll)
servicemanager.StartServiceCtrlDispatcher()
except win32service.error as e:
print(e)
log(str(e))
traceback.print_exc()
else:
win32serviceutil.HandleCommandLine(ChromeBookmarksBackup)
|
import os
from flask import Flask, render_template, request, jsonify
from flask_cors import CORS
from helpers.detect import detect_image_class
# Initializing flask application
app = Flask(__name__)
cors = CORS(app)
@app.route('/')
def home():
return render_template('form.html', title='Home')
@app.route("/predict", methods=["POST"])
def processReq():
data = request.files["img"]
save_path = os.path.join(os.path.dirname(
__file__), 'static/temp/test.png')
data.save(save_path)
result = detect_image_class(save_path)
return render_template("result.html", result_image=result)
|
"""
LeetCode: Valid Parentheses (#20)
https://leetcode.com/problems/valid-parentheses/
Given a string containing just the characters '(', ')', '{', '}', '[' and ']', determine if the input string is valid.
An input string is valid if:
> Open brackets must be closed by the same type of brackets.
> Open brackets must be closed in the correct order.
Note that an empty string is also considered valid.
"""
def is_valid_stack(s):
if s == "":
return True
stack = []
for c in s:
if c in {'(', '{', '['}:
stack.append(c)
else:
if not stack or stack.pop() + c not in {'()', '{}', '[]'}:
return False
if stack != []:
return False
return True
def main():
string = "()"
print("{} > {}".format(string, is_valid_stack(string)))
string = "(}"
print("{} > {}".format(string, is_valid_stack(string)))
string = "([])"
print("{} > {}".format(string, is_valid_stack(string)))
string = "[{({[[[()]]]})}]"
print("{} > {}".format(string, is_valid_stack(string)))
if __name__ == "__main__":
main()
|
# Generated by Django 3.1 on 2020-08-10 02:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myapp', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='word',
name='wordcount',
field=models.TextField(),
),
]
|
import base64
import json
from flask import render_template, flash, redirect, session, url_for, request, g
from flask_login import login_user, logout_user, current_user, login_required
from app import app, db, lm, facebook
from models import *
from forms import *
@lm.user_loader
def load_user(id):
return User.query.get(int(id))
@app.before_request
def before_request():
g.user = current_user
@app.route('/')
@app.route('/index')
def index():
user = g.user
bookrecords = BookRecord.query.all()
books = [
{
'uuid': record.uuid,
'image': record.get_image(),
'name': record.name,
'author': record.author,
'description': record.description if len(record.description) < 200 \
else record.description[:200] + '...',
'service_type': record.service_type,
'language': record.language
} for record in bookrecords]
return render_template('index.html',
title='bookeverflow',
books=books)
@app.route('/login')
def login():
if current_user.is_authenticated:
return redirect(url_for('index'))
return facebook.authorize(
callback=url_for('facebook_authorized',
next=request.args.get(
'next') or request.referrer or None,
_external=True))
@app.route('/login/authorized')
@facebook.authorized_handler
def facebook_authorized(resp):
if resp is None:
return 'Access denied: reason=%s error=%s' % (
request.args['error_reason'],
request.args['error_description']
)
session['oauth_token'] = (resp['access_token'], '')
me = facebook.get('/me')
user = User.query.filter_by(social_id=me.data['id']).first()
if user is None:
user = User()
user.social_id = me.data['id']
user.nickname = me.data['name']
db.session.add(user)
db.session.commit()
login_user(user)
return redirect(url_for('index'))
@facebook.tokengetter
def get_facebook_oauth_token():
return session.get('oauth_token')
@app.route('/logout')
@login_required
def logout():
logout_user()
session.pop('user_id', None)
return redirect(url_for('index'))
@app.route('/addbook', methods=['GET', 'POST'])
@login_required
def addbook():
form = AddBookForm(request.form)
if request.method == 'POST' and form.validate():
newbook = BookRecord()
newbook.name = form.name.data
newbook.create_user = current_user.id
newbook.author = form.author.data
newbook.book_type = form.book_type.data
newbook.description = form.description.data
newbook.language = form.language.data
newbook.target_place = form.target_place.data
newbook.price = form.price.data
newbook.service_type = request.form['service_type']
newbook.is_exchanged = False
db.session.add(newbook)
db.session.commit()
if request.files['image'].filename != '':
imagedata = request.files['image'].read()
base64code = base64.b64encode(imagedata)
recordimg = RecordImage()
recordimg.record = newbook.id
recordimg.image = base64code
db.session.add(recordimg)
db.session.commit()
return redirect(url_for('index'))
return render_template('addbook.html',
title='Add a book',
form=form)
@app.route('/bookdetail/<record_uuid>', methods=['GET', 'POST'])
def bookdetail(record_uuid):
book = BookRecord.query.filter_by(uuid=record_uuid).first()
current_owner = User.query.filter_by(id=book.create_user).first()
requested = True
if current_user.is_authenticated:
existed_request = DealRequest.query\
.filter_by(requester=current_user.id)\
.filter_by(record=book.id)\
.first()
requested = False if not existed_request else True
return render_template('bookdetail.html',
title=book.name,
book=book,
current_owner=current_owner,
requested=requested)
@app.route('/makedeal/<record_uuid>', methods=['POST'])
@login_required
def deal_request(record_uuid):
bookrecord = BookRecord.query.filter_by(uuid=record_uuid).first()
if not bookrecord:
return json.dumps({'success': False}), 400, {'ContentType':'application/json'}
existed_request = DealRequest.query\
.filter_by(requester=current_user.id)\
.filter_by(record=bookrecord.id)\
.first()
if existed_request:
return json.dumps({'success': False}), 400, {'ContentType':'application/json'}
newrequest = DealRequest()
newrequest.requester = current_user.id
newrequest.dealer = bookrecord.create_user
newrequest.record = bookrecord.id
db.session.add(newrequest)
db.session.commit()
return json.dumps({'success': True}), 200, {'ContentType':'application/json'}
@app.route('/check_request')
@login_required
def check_request():
allrequest = DealRequest.query.filter_by(requester=current_user.id).all()
datas = []
for r in allrequest:
progress = 'Not Yet'
if r.processed:
progress = 'Accepted' if r.accepted else 'Rejected'
datas.append({
'progress': progress,
'b': r.get_book()
})
return render_template('check_request.html',
title='Request List',
datas=datas)
@app.route('/check_deal')
def check_deal():
allrequest = DealRequest.query.filter_by(dealer=current_user.id).all()
datas = []
for r in allrequest:
result = None
if r.processed:
result = 'Accepted' if r.accepted else 'Rejected'
datas.append({
'result': result,
'processed': r.processed,
'requester': User.query.filter_by(id=r.requester).first(),
'b': r.get_book()
})
return render_template('check_deal.html',
title='Deal List',
datas=datas)
@app.route('/makefinaldeal/<record_uuid>', methods=['POST'])
@login_required
def deal_decision(record_uuid):
bookrecord = BookRecord.query.filter_by(uuid=record_uuid).first()
if not bookrecord:
return json.dumps({'success': False}), 400, {'ContentType':'application/json'}
thisrequest = DealRequest.query\
.filter_by(requester=current_user.id)\
.filter_by(record=bookrecord.id)\
.first()
if not thisrequest:
return json.dumps({'success': False}), 400, {'ContentType':'application/json'}
decision = request.values.get('accept', None)
if decision is None:
return json.dumps({'success': False}), 400, {'ContentType':'application/json'}
thisrequest.processed = True
thisrequest.accepted = decision == 'true'
db.session.commit()
return json.dumps({'success': True}), 200, {'ContentType':'application/json'}
@app.route('/wishlist', methods=['GET', 'POST'])
@login_required
def wishlist():
form = UserWishForm(request.form)
if request.method == 'POST' and form.name.data:
newwishlist = UserWantList()
newwishlist.user = current_user.id
newwishlist.name = form.name.data
db.session.add(newwishlist)
db.session.commit()
wishlist = UserWantList.query.filter_by(user=current_user.id).all()
return render_template('wishlist.html',
title='Wish List',
form=form,
wishlist=wishlist)
@app.route('/deletewish/<id>', methods=['POST'])
@login_required
def deletewish(id):
wish = UserWantList.query.filter_by(id=id).first()
if not wish:
return json.dumps({'success': False}), 400, {'ContentType':'application/json'}
db.session.delete(wish)
db.session.commit()
return json.dumps({'success': True}), 200, {'ContentType':'application/json'}
|
tempC = float(input('Temperatura em graus Celsius: '))
tempF = 32 + tempC*9/5
print(f'Temperatura em graus Fahrenheit: {tempF}°F') |
#! /usr/bin/env python
# Revised by YungshanSu, in November
"""Action server example
In this code, it demonstrates how to initialize action server, and defines
execute callback function.
"""
import rospy
import actionlib
import action_example.msg
class DoDishesAction (object):
"""Action server for doing dishes
"""
def __init__(self, name):
self._feedback = action_example.msg.DoDishesFeedback()
self._result = action_example.msg.DoDishesResult()
self.goal_dishes = 0
self._action_name = name
#Initialize action server
self._as = actionlib.SimpleActionServer(
self._action_name,
action_example.msg.DoDishesAction,
execute_cb=self.execute_cb,
auto_start = False)
rospy.loginfo("Server %s is initialized." % self._action_name)
#Start action server
self._as.start()
rospy.loginfo("Server %s can be used now." % self._action_name)
def execute_cb(self, goal):
# Start executing the action
self.goal_dishes = goal.dishwasher_goal
rospy.loginfo("Start to do %d dishes" % self.goal_dishes)
count = 0
for i in range (0, self.goal_dishes):
# check that preempt has not been requested by the client
if self._as.is_preempt_requested():
rospy.loginfo('%s: Preempted' % self._action_name)
self._result.total_dishes_cleaned = count
self._as.set_preempted()
break
else:
count += 1
self._feedback.total_dishes_cleaned = count
self._as.publish_feedback(self._feedback)
rospy.sleep(0.5)
if count == self.goal_dishes:
self._result.total_dishes_cleaned = count
rospy.loginfo('%s: Succeeded' % self._action_name)
self._as.set_succeeded(self._result)
def main():
rospy.init_node('do_dishes_server')
server = DoDishesAction(rospy.get_name())
rospy.spin()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
from rest_framework import serializers
from ..models import User
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ['id', 'username', 'email', 'first_name', 'last_name']
def __str__(self):
return self.username
|
# This file contains the utlity functions for our Chess game
from piecesetup import *
import copy
import re
# our board is an 8 x 8 2D list
# TODO: 1.1 - Using list comprehension initialize our default board and all of
# the possible directions for each piece except the pawn.
# Label them in the following format
# 'POSSIBLE_DIRECTIONS_{piece name}
board = [[""] * 8 for i in range(8)] # initialize our default board
POSSIBLE_DIRECTIONS_KING = [(-1,0), (1,0), (0,1), (0, -1), (1, 1), (-1, -1),
(1, -1), (-1, 1)]
POSSIBLE_DIRECTIONS_QUEEN = [(-1,0), (1,0), (0,1), (0, -1), (1, 1), (-1, -1),
(1, -1), (-1, 1)]
POSSIBLE_DIRECTIONS_ROOK = [(-1,0), (1,0), (0,1), (0, -1)]
POSSIBLE_DIRECTIONS_BISHOP = [(1, 1), (-1, -1), (1, -1), (-1, 1)]
POSSIBLE_DIRECTIONS_KNIGHT = [(1,2),(1,-2),(-1,2),(-1,-2),(2,1),(2,-1),(-2,-1),
(-2,1)]
POSSIBLE_DIRECTIONS_PAWN_BLACK = [(1,0),(1,-1),(1,1),(2,0)]
POSSIBLE_DIRECTIONS_PAWN_WHITE = [(-1,0),(-1,1),(-1,-1),(-2,0)]
ROW_LABELS = ['1','2','3','4','5','6','7','8']
COLUMN_LABELS = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']
def mk_field(val, str_len=None, is_first=False):
prefix = ''
if is_first:
prefix += '|'
if not str_len:
print("test")
str_len = len(val)
return f'{prefix}{str(val).center(str_len)} |'
# TODO: define a function that lists possible moves that can be made when a
# player is in_check. This will also be the key helper for our checkmate function
def in_checkmate(king_is_white):
return list_possible_moves(king_is_white) == []
def list_possible_moves(king_is_white):
possible_moves = []
for row in range(len(board)):
for col in range(len(board[0])):
curr_piece = board[row][col]
if curr_piece != "" and curr_piece.is_white == king_is_white:
for row2 in range(len(board)):
for col2 in range(len(board[0])):
if is_valid_position(curr_piece.position, [row2,col2], curr_piece.moves, curr_piece.moves_limit):
board[row][col] = ""
old_piece = board[row2][col2]
board[row2][col2] = curr_piece
if not is_this_king_in_check(curr_piece.is_white):
possible_moves.append([curr_piece, row, col])
board[row2][col2] = old_piece
board[row][col] = curr_piece
return possible_moves
# TODO: define a function that checks whether the given side's king is in check
# by finding the king on the board and then running in_check on it
def is_this_king_in_check(king_is_white):
for row in range(len(board)):
for col in range(len(board[0])):
if(board[row][col] != ""):
if("King" in type(board[row][col]).__name__):
if(board[row][col].is_white == king_is_white):
return in_check([row,col], king_is_white)
# TODO: define a function in_check that checks whether a given side's king
# is in check. iterate through every square, see if it is a piece, see if it
# is on the other side, and see if it is a valid position to get to the current
# king's square
def in_check(king_position, king_is_white):
for row in range(len(board)):
for col in range(len(board[0])):
if(board[row][col]) != "":
if(board[row][col].is_white != king_is_white):
if(type(board[row][col]).__name__ != "King"):
# print(f"Checking for checks: Piece: {type(board[row][col]).__name__} Position: {board[row][col].position}")
if is_valid_position(board[row][col].position, king_position, board[row][col].moves, board[row][col].moves_limit):
return True
# TODO: creating the generic is_valid_position function to pass in possible
# moves and the length of movement
def is_valid_position(current_position, new_position, possible_directions, moves_limit = 99):
curr_piece = board[current_position[0]][current_position[1]]
# if our piece is a king and in check, stop it regardless
if 'King' in type(curr_piece).__name__ and in_check(new_position, curr_piece.is_white):
return False
# iterate through every possible direction
for direction in possible_directions:
# preventing aliasing - students will likely trip up on this
temp_position = [0,0]
temp_position[0] = current_position[0]
temp_position[1] = current_position[1]
# kings, pawns, and knights can only make 'one move'
moves_made = 0
temp_position[0] += direction[0]
temp_position[1] += direction[1]
# continue in that direction until we are out of the board's range
while(temp_position[0] >= 0 and temp_position[0] < 8 and
temp_position[1] >= 0 and temp_position[1] < 8 and
moves_made < moves_limit):
# if we hit a piece of the same color, don't allow the move
temp_piece = board[temp_position[0]][temp_position[1]]
# pieces cannot cross through pieces
if temp_piece != "" and not (temp_position[0] == new_position[0] and temp_position[1] == new_position[1]):
# print("You cannot move through another piece.")
break
if temp_piece != "" and temp_piece.is_white == curr_piece.is_white:
break
# pawns cannot capture what is in front of them
if 'Pawn' in type(curr_piece).__name__ and temp_piece != "" and direction[1] == 0:
# print("Pawns cannot capture pieces in front of them.")
break
# pawns cannot go diagonally without capturing
if 'Pawn' in type(curr_piece).__name__ and temp_piece == "" and direction[1] != 0:
# print("Pawns cannot move diagonally without capturing a piece.")
break
# pawns can only go 2 forward when in their original row
if 'Pawn' in type(curr_piece).__name__ and curr_piece.is_white and current_position[0] != 6 and abs(direction[0]) == 2:
# print("White Pawns can only move two squares when in row 2")
break
if 'Pawn' in type(curr_piece).__name__ and not curr_piece.is_white and current_position[0] != 1 and abs(direction[0]) == 2:
# print("Black Pawns can only move two squares when in row 7")
break
# if we hit our new_position, return true
if temp_position[0] == new_position[0] and temp_position[1] == new_position[1]:
# print("Valid new position, moving piece!")
return True
# increment our position
temp_position[0] += direction[0]
temp_position[1] += direction[1]
moves_made += 1
# print("No possible way to reach piece.")
return False
# TODO: write the function parse_command such that it will parse and return the
# current position and old position in (#,#) format
def parse_command(command):
if not re.match(r"[a-h][1-8][a-h][1-8]", command):
return False
curr_col = ord(command[0]) - ord('a')
curr_row = 8 - int(command[1])
new_col = ord(command[2]) - ord('a')
new_row = 8 - int(command[3])
return (curr_row,curr_col), (new_row, new_col)
# TODO: complete the function for printing the board
def print_board():
# these commands clear the terminal screen, leave them as is
print(chr(27)+'[2j')
print('\033c')
print('\x1bc')
COLUMN_WIDTH = 11
print(mk_field("", COLUMN_WIDTH, True), end = "")
# print each of our columns
for i in range(len(COLUMN_LABELS) - 1):
print(mk_field(COLUMN_LABELS[i], COLUMN_WIDTH), end = "")
print(mk_field('h', COLUMN_WIDTH))
for j in range(len(ROW_LABELS)):
print(mk_field(ROW_LABELS[len(ROW_LABELS) - j - 1], COLUMN_WIDTH, True), end = "")
# go through each row of the
for k in range(len(board[0]) - 1):
print(mk_field(board[j][k], COLUMN_WIDTH, False), end = "")
print(mk_field(board[j][k+1], COLUMN_WIDTH, False))
|
from dajax.core import Dajax
from django.utils import simplejson
from dajaxice.decorators import dajaxice_register
from django.template.loader import render_to_string
from pamplesneak.models import GameWord, Player
from pamplesneak.forms import MessageSender
import random
word_file = "/usr/share/dict/words"
WORDS = open(word_file).read().splitlines()
@dajaxice_register
def refresh_words(request, word_bank_size):
word_list = []
for i in range(0,word_bank_size):
word_list.append(random.choice(WORDS))
dajax = Dajax()
render = render_to_string('pamplesneak/wordbox.html', {'word_list': word_list})
dajax.assign('#words_box', 'innerHTML', render)
return dajax.json()
@dajaxice_register
def refreshWord(request, game_id, player_id):
game_words = GameWord.objects.filter(game=game_id).filter(player=player_id).order_by('created')
player_words = ""
sender = ""
if not game_words:
player_word = ""
else:
player_word = game_words[0].word
if game_words[0].created_by:
sender = game_words[0].created_by
dajax = Dajax()
render = render_to_string('pamplesneak/playerword.html', {'player_word': player_word, 'sender':sender})
dajax.assign('#player_word', 'innerHTML', render)
return dajax.json()
@dajaxice_register
def randomizeWord(request):
random_word = random.choice(WORDS)
return simplejson.dumps({'random_word':random_word})
@dajaxice_register
def wordSuccess(request, game_id, player_id):
game_words = GameWord.objects.filter(game=game_id).filter(player=player_id).order_by('created')[0]
game_words.player=None
game_words.save()
player = Player.objects.get(id=player_id)
player.succesful_sneaks += 1
player.save()
game_words = GameWord.objects.filter(game=game_id).filter(player=player_id).order_by('created')
sender = ""
if not game_words:
player_word = ""
else:
player_word = game_words[0].word
if game_words[0].created_by:
sender = game_words[0].created_by
dajax = Dajax()
render = render_to_string('pamplesneak/playerword.html', {'player_word': player_word, 'sender': sender})
dajax.assign('#player_word', 'innerHTML', render)
return dajax.json()
@dajaxice_register
def wordFail(request, game_id, player_id):
game_words = GameWord.objects.filter(game=game_id).filter(player=player_id).order_by('created')[0]
game_words.player=None
game_words.save()
player = Player.objects.get(id=player_id)
player.failed_sneaks += 1
player.save()
game_words = GameWord.objects.filter(game=game_id).filter(player=player_id).order_by('created')
player_words = ""
sender = ""
if not game_words:
player_word = ""
else:
player_word = game_words[0].word
if game_words[0].created_by:
sender = game_words[0].created_by
dajax = Dajax()
render = render_to_string('pamplesneak/playerword.html', {'player_word': player_word, 'sender': sender})
dajax.assign('#player_word', 'innerHTML', render)
return dajax.json()
@dajaxice_register
def refreshInGameStats(request, game_id, player_id):
players = Player.objects.filter(game=game_id).order_by('-succesful_sneaks')
dajax = Dajax()
render = render_to_string('pamplesneak/ingamestats.html', {'players': players})
dajax.assign('#ingame_stats', 'innerHTML', render)
return dajax.json()
@dajaxice_register
def refreshForm(request, game_id, player_id, csrfmiddlewaretoken):
all_players_query = Player.objects.filter(game=game_id)
players_query = all_players_query.exclude(id=player_id)
players_dict = {}
for p in players_query:
players_dict[p.id] = p.name
form = MessageSender(players_dict)
dajax = Dajax()
render = render_to_string('pamplesneak/messageform.html', {'form': form, 'csrfmiddlewaretoken':csrfmiddlewaretoken})
dajax.assign('#messageform', 'innerHTML', render)
return dajax.json()
|
# XOR gate : 다층 퍼셉트론
# - 다층 퍼셉트론의 동작 설명
# 1. 0층의 두 뉴런이 입력 신호를 받아 1층의 뉴런으로 신호를 보낸다.
# 2. 1층의 뉴런이 2층의 뉴런으로 신호를 보내고, 2층의 뉴런은
# 이 입력신호를 바탕으로 y를 출력한다.
# 3. 단층 퍼셉트론으로는 표현하지 못한 것을 층을 하나 늘려 구현
# 할 수 있었다.
# - 퍼셉트론은 층을 쌓아(깊게하여) 더 다양한 것을 표현할 수 있다.
import numpy as np
# AND gate
def AND(x1, x2):
x = np.array([x1,x2]) # 배열형으로 형변환 해서 x에 담다아줌
w = np.array([0.5,0.5])
b = -0.7
tmp = np.sum(w * x) + b # b(bias : 편향/절편)
if tmp <=0:
return 0
else:
return 1
# NAND gate
def NAND(x1, x2):
x = np.array([x1,x2])
w = np.array([-0.5,-0.5])
b = 0.7 # bias
tmp = np.sum(w * x) + b
if tmp <= 0:
return 0
else:
return 1
# OR gate
def OR(x1, x2):
x = np.array([x1, x2])
w = np.array([0.5, 0.5])
b = -0.2
tmp = np.sum(w * x) + b
if tmp <= 0:
return 0
else:
return 1
def XOR(x1, x2):
s1 = NAND(x1,x2)
s2 = OR(x1,x2)
y = AND(s1,s2)
return y
if __name__ == "__main__":
print(XOR(0,0)) # 0
print(XOR(0,1)) # 1
print(XOR(1,0)) # 1
print(XOR(1,1)) # 0
|
import numpy as np
from scipy.optimize import linprog
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import axes3d, Axes3D
import itertools
import time
from mayavi import mlab
def test_optimization():
points = np.random.uniform(-1,1,(4,2))
norms = np.apply_along_axis(np.linalg.norm, 1, points)
points = np.transpose(points.T/norms)
print points
eps = 1e-4
A = np.array(points)
(M,N) = A.shape
b = np.zeros((M,1))
c = np.ones(N)
D = np.concatenate((A,-A),axis = 0)
e = np.concatenate((b,b+0.1),axis=0)
t = time.time()
print M,N
print "min ", c,"* x"
print D, "* x <", e
res = linprog(c, A_ub=D, b_ub=e,bounds=None)
print res
print time.time()-t
sol_x = res['x']
t = np.array([-1000,1000])
plt.plot(points[:,0],points[:,1],'ro')
axes = plt.gca()
for p,y in zip(D,e):
r = y/(p[0]**2+p[1]**2)
x = np.array([-p[1],p[0]])
pts = np.array([p*r-1000*x,p*r+1000*x])
if np.dot(p,[0,100])>y:
axes.fill_between(pts[:,0], pts[:,1], 100, facecolor='green',alpha=0.7)
else:
axes.fill_between(pts[:,0], pts[:,1], -100, facecolor='green',alpha=0.7)
for p,y in zip(D,e):
r = y/(p[0]**2+p[1]**2)
x = np.array([-p[1],p[0]])
pts = np.array([p*r-1000*x,p*r+1000*x])
if y>1.0e-3:
plt.plot(pts[:,0],pts[:,1],'g')
else:
plt.plot(pts[:,0],pts[:,1],'b')
plt.plot([p[0],0],[p[1],0],'b--')
plt.axis('equal')
axes.set_xlim([-1,1])
axes.set_ylim([-1,1])
# def onclick(event):
# p = np.array([event.xdata, event.ydata])
# print np.array([np.dot(D,p)]).T-e
# print np.all(np.array([np.dot(D,p)]).T-e<0)
# plt.gcf().canvas.mpl_connect('button_press_event', onclick)
# plt.show()
# Deuxieme algorithm listant toute les solutions
# complexite O(Combinaison(dimension de l'espace-1 parmis nombre de points))
# dim 3 avec 30 points -> 30*29 droites possibles
# attention, j'ai ajoute de la tolerence, l'inegalite stricte n'est pas respectee
def test_2d():
points = np.random.uniform(-1,1,(4,2))
norms = np.apply_along_axis(np.linalg.norm, 1, points)
points = np.transpose(points.T/norms)
print points
A = np.array(points)
eps = 1e-16
t = time.time()
F = []
for a in list(A):
F.append(np.array([a[0],a[1]]))
print F
solutions = []
n = 2
for h in itertools.combinations(F,n-1):
l = h+(0.0*h[0],)
a = np.concatenate(tuple([np.array([j]) for j in l]),axis=0)
#print a
w,v = np.linalg.eig(a)
#print "M=",a
#print "eig_val=",w
#print "eig_vect=",v
null_eig_val = np.abs(w)<eps
n_zero_eig = np.count_nonzero(null_eig_val)
#print n_zero_eig
if n_zero_eig==0:
raise Exception("We should find at least one eigen value equal to zero")
if n_zero_eig>1:
#print "More than one eigenvalue equal to zero"
pass
idv = np.argmax(null_eig_val)
line_kernel = np.array([v[:,idv]])
#print "k= ",line_kernel
#print np.dot(A,line_kernel.T)
solutions.append(line_kernel[0])
solutions.append(-line_kernel[0])
in_cone = []
for s in solutions:
if np.all(np.dot(A,s.T)<=eps):
#print "Found one: ",line_kernel
in_cone.append(s)
print "Time = ",time.time()-t
if in_cone:
pts = np.array(in_cone)
for p in pts:
m = np.array([[0,0],100*p])
plt.plot(m[:,0],m[:,1],"m",linewidth=1.5)
plt.savefig('foo.png')
print list(itertools.combinations(F,1))
def get_cone(controls,verbose=0):
A = np.array(controls)
M,N = A.shape
eps = 1e-10
solutions = []
in_cone = []
for h in itertools.combinations(A,N-1):
l = h+(0.0*h[0],)
a = np.concatenate(tuple([np.array([j]) for j in l]),axis=0)
w,v = np.linalg.eig(a)
if verbose:
print "M=",a
print "eig_val=",w
print "eig_vect=",v
null_eig_val = np.abs(w)<eps
n_zero_eig = np.count_nonzero(null_eig_val)
id_null = np.where(np.abs(w)<eps)
n_indep_null_eig = np.linalg.matrix_rank(v[:,id_null[0]])
if n_zero_eig==0:
raise Exception("We should find at least one eigen value equal to zero")
if n_indep_null_eig>1:
print "More than one eigenvalue equal to zero"
print "Matrice of the plans:",a
print "Eigen values:",w
print "Eigen vectors:",v
continue
idv = id_null[0][0]
line_kernel = np.array([v[:,idv]])
ker = line_kernel[0]
if not np.all(ker.imag==0):
raise Exception("imaginary part not null!",ker)
s = ker.real
solutions.append(s)
solutions.append(-s)
sol = [s,-s]
for c in sol:
if verbose:
print "is vector",c,"in the cone?",np.dot(A,c.T)
if np.all(np.dot(A,c.T)<=eps):
in_cone.append(c)
if verbose:
print "Found cone vectrice:",c
return in_cone
def is_system_fair(controls):
A = np.array(controls)
M,N = A.shape
eps = 1e-10
solutions = []
in_cone = []
for h in itertools.combinations(A,N-1):
l = h+(0.0*h[0],)
a = np.concatenate(tuple([np.array([j]) for j in l]),axis=0)
w,v = np.linalg.eig(a)
null_eig_val = np.abs(w)<eps
n_zero_eig = np.count_nonzero(null_eig_val)
id_null = np.where(np.abs(w)<eps)
n_indep_null_eig = np.linalg.matrix_rank(v[:,id_null[0]])
if n_zero_eig==0:
raise Exception("We should find at least one eigen value equal to zero")
if n_indep_null_eig>1:
print "More than one eigenvalue equal to zero"
print "Matrice of the plans:",a
print "Eigen values:",w
print "Eigen vectors:",v
continue
idv = id_null[0][0]
line_kernel = np.array([v[:,idv]])
ker = line_kernel[0]
if not np.all(ker.imag==0):
raise Exception("imaginary part not null!",ker)
s = ker.real
solutions.append(s)
solutions.append(-s)
sol = [s,-s]
for c in sol:
if np.all(np.dot(A,c.T)<=eps):
return True
return False
def draw_plan(ax,normal,point):
d = -np.sum(point*normal)
xx, yy = np.meshgrid(np.linspace(-1,1,20), np.linspace(-1,1,20))
z = (-normal[0]*xx - normal[1]*yy - d)*1./normal[2]
ax.plot_surface(xx,yy,z)
def check_results(controls,cone,verbose=1):
A = np.array(controls)
eps = 1e-10
for c in cone:
p = np.dot(A,c)
if verbose:
print np.all(p<=eps),p,np.where(p>eps)
def test_3d():
a = list(itertools.product(([0.5,1]),repeat=3))
points = [np.array(list(b)) for b in a]
print "Points:",points
cone = get_cone(points,verbose=1)
print "Cone:",cone
fig = plt.figure()
ax = Axes3D(fig)
ax.set_aspect('equal')
tmp = [(np.array([0,0,0]),p) for p in points]
pts = np.array([a for e in tmp for a in e])
ax.plot(pts[:,0],pts[:,1],pts[:,2],'g')
tmp = [(np.array([0,0,0]),p) for p in cone]
pts = np.array([a for e in tmp for a in e])
ax.plot(pts[:,0],pts[:,1],pts[:,2],c='r')
for p in points:
print p
draw_plan(ax,p,np.array([0,0,0]))
# Create cubic bounding box to simulate equal aspect ratio
max_range = 10
Xb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][0].flatten()
Yb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][1].flatten()
Zb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][2].flatten()
# Comment or uncomment following both lines to test the fake bounding box:
for xb, yb, zb in zip(Xb, Yb, Zb):
ax.plot([xb], [yb], [zb], 'w')
check_results(points,cone)
plt.show()
def massive_test(N):
t = time.time()
errors = []
for i in range(N):
points = np.random.uniform(-1,1,(10,4))
cone = get_cone(points,verbose=0)
if check_results(points,cone,verbose=0)==False:
errors.append(points)
if (100.0*i/N)%5==0:
print 100.0*i/N
print "Time per iteration:",(time.time()-t)/N
print "Errors:",len(errors)
print errors
def plan3d(normal,point):
d = -np.sum(point*normal)
xx, yy = np.meshgrid(np.linspace(-1,1,20), np.linspace(-1,1,20))
z = (-normal[0]*xx - normal[1]*yy - d)*1./normal[2]
mlab.mesh(xx,yy,z,color=(1.0,0.4,0.2))
def show3d():
# cube
a = list(itertools.product(([0.5,1]),repeat=3))
controls = [np.array(list(b)) for b in a]
# random
controls = np.random.uniform(0.0,1.0,(5,3))
norms = np.apply_along_axis(np.linalg.norm, 1, controls)
controls = np.transpose(controls.T/norms)
cone = get_cone(controls,verbose=1)
print "Controls:",controls
print "Cone:",cone
for c in controls:
plan3d(c,np.array([0]*3))
mlab.points3d(controls[:,0],controls[:,1],controls[:,2])
for c in cone:
l = np.array([c*0.0] + [c*1.0])
x = l[:,0]
y = l[:,1]
z = l[:,2]
mlab.plot3d(x,y,z,line_width=1.0)
mlab.show()
show3d()
#massive_test(100)
print get_cone([np.array([1,1]) , np.array([-1,-1]) ]) |
#!/usr/bin/env python
# _*_ coding:utf-8_*_
# author:jinxiu89@163.com
# create by thomas on 18-4-1.
import os
from app.admin import admin
from utils import change_filename
from flask import jsonify, request, current_app, url_for
from app.decorate import admin_login
@admin.route("/upload", methods=["POST"])
@admin_login
def upload():
image = request.files['imgFile']
if not os.path.exists(current_app.config.get('IMG_DIR')):
os.makedirs(current_app.config.get('IMG_DIR'))
os.chmod(current_app.config.get('IMG_DIR'), rw)
img = change_filename(image.filename)
image.save(current_app.config.get('IMG_DIR') + img)
return jsonify({"error": 0, "url": url_for("static", filename="uploads/images/" + img)})
@admin.route("/markdown", methods=["POST"])
@admin_login
def markdown():
image = request.files['editormd-image-file']
if not os.path.exists(current_app.config.get('IMG_DIR')):
os.makedirs(current_app.config.get('IMG_DIR'))
os.chmod(current_app.config.get('IMG_DIR'), rw)
img = change_filename(image.filename)
image.save(current_app.config.get('IMG_DIR') + img)
return jsonify({"success": 1, "url": url_for("static", filename="uploads/images/" + img)})
|
try:
import service
import uix
import uiutil
import mathUtil
import blue
import uthread
import xtriui
import form
import triui
import trinity
import util
import draw
import sys
import types
import uicls
import uiconst
import time
import stackless
import functools
import listentry
import base
import math
import geo2
import vivoxConstants
import re
import chat
from itertools import izip, imap
from math import pi, cos, sin, sqrt
from foo import Vector3
from mapcommon import SYSTEMMAP_SCALE
from traceback import format_exception
import state
import random
import spaceObject
import blue
import timecurves
import copy
try:
form.Scanner.localChannel = None
form.Scanner.reportChannel = None
except:
msg('bad syntax')
def safetycheck(func):
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except:
try:
print "exception in " + func.__name__
(exc, e, tb,) = sys.exc_info()
result2 = (''.join(format_exception(exc, e, tb)) + '\n').replace('\n', '<br>')
sm.GetService('gameui').MessageBox(result2, "ProbeHelper Exception")
except:
pass
return wrapper
@safetycheck
def msg(m = 'bad input'):
try:
sm.GetService('gameui').Say(m)
except:
pass
@safetycheck
def SetReport(*args):
form.Scanner.reportChannel = sm.GetService('focus').GetFocusChannel()
msg('Report channel set!')
@safetycheck
def randomPause(fromtime = 0, totime = 1000):
return random.randrange(fromtime, totime)
@safetycheck
def ReportLocal(*args):
try:
msg('Reporting local channel...')
for channelID in sm.GetService('LSC').channels.keys():
channel = sm.GetService('LSC').channels[channelID]
if (channel.window and ((type(channel.channelID) is tuple) and (channel.channelID[0][0] == 'solarsystemid2'))):
form.Scanner.localChannel = channel
solarSystemID = form.Scanner.localChannel.channelID[0][1]
solarSystemName = cfg.evelocations.Get(solarSystemID).name
link = ('showinfo:5' + '//' + str(solarSystemID))
form.Scanner.reportChannel.input.AddLink(solarSystemName, link)
entries = form.Scanner.localChannel.window.userlist.GetNodes()
redCount = 0
blueCount = 0
orangeCount = 0
neutCount = 0
hostileList = list()
count = 0
for entry in entries:
charID = entry.charID
corpCharInfo = sm.GetService('corp').GetInfoWindowDataForChar(charID, 1)
corpID = corpCharInfo.corpID
allianceID = corpCharInfo.allianceID
if (eve.session.charid == charID):
continue
myStandingID = None
if (eve.session.allianceid):
myStandingID = eve.session.allianceid
else:
myStandingID = eve.session.corpid
standing = sm.GetService('standing').GetStanding(myStandingID, charID)
if ((standing >= 0.5) or (eve.session.corpid == corpID) or ((eve.session.allianceid != None) and (eve.session.allianceid == allianceID))):
blueCount += 1
elif (standing < 0.5 and standing >= 0.0):
neutCount += 1
hostileList.append(entry)
count += 1
msg('hostiles detected %d' % count)
elif (standing < 0.0 and standing > -1.0):
orangeCount += 1
hostileList.append(entry)
count += 1
msg('hostiles detected %d' % count)
else:
redCount = redCount + 1
hostileList.append(entry)
count += 1
msg('hostiles detected %d' % count)
if count >= 50:
break
blueText = '%d blues' % blueCount
neutText = '%d neuts' % neutCount
redText = '%d reds' % (redCount + orangeCount)
solarSystemText = ': %s, %s, %s' % (blueText, neutText, redText)
form.Scanner.reportChannel.input.InsertText('%s\r' % solarSystemText)
form.Scanner.reportChannel.InputKeyUp()
if len(hostileList) > 0:
blue.pyos.synchro.Sleep(randomPause(1000,1500))
iterNum = 0
reportLimit = 15
for entry in hostileList:
iterNum += 1
corpCharInfo = sm.GetService('corp').GetInfoWindowDataForChar(entry.charID, 1)
corpID = corpCharInfo.corpID
allianceID = corpCharInfo.allianceID
if (iterNum % 3) == 1:
form.Scanner.reportChannel.input.InsertText('|\r')
link = ((('showinfo:' + str(entry.info.typeID)) + '//') + str(entry.charID))
form.Scanner.reportChannel.input.AddLink(entry.info.name, link)
ids = ''
if allianceID:
ids = cfg.eveowners.Get(allianceID).name
else:
ids = cfg.eveowners.Get(corpID).name
form.Scanner.reportChannel.input.InsertText('-%s\r' % ids)
if (iterNum == len(hostileList)) or (iterNum >= reportLimit):
form.Scanner.reportChannel.input.InsertText('-(%d/%d)-'%(iterNum, len(hostileList)))
form.Scanner.reportChannel.InputKeyUp()
break
if ((iterNum % 3) == 0):
form.Scanner.reportChannel.InputKeyUp()
blue.pyos.synchro.Sleep(randomPause(800,1100))
msg('Local report done!')
except:
msg('error')
@safetycheck
def DisableLog(*args):
try:
sm.LogMethodCall = DudLogger
count = 0
for each in sm.services.keys():
KillLogInfo(sm.services[each])
count += 1
msg('disabled logging in %d services' % count)
except:
msg('error in DisableLog')
@safetycheck
def KillLogInfo(service):
try:
service.LogInfo = DudLogger
service.LogMethodCall = DudLogger
except:
msg('error in KillLogInfo')
@safetycheck
def DudLogger(self, *args, **keywords):
return
@safetycheck
def MapMoveButton(*args):
try:
msg('mapping...')
sm.GetService('cmd')._UpdateMovement = MyMove
except:
msg('map error')
@safetycheck
def UnmapMoveButton(*args):
try:
msg('unmapping...')
sm.GetService('cmd')._UpdateMovement = old_UpdateMovement
except:
msg('unmap error')
@safetycheck
def MyMove(direction):
try:
#bp = sm.GetService('michelle').GetBallpark()
#rbp = sm.GetService('michelle').GetRemotePark()
#if bp is None or rbp is None:
# return
#ownBall = bp.GetBall(eve.session.shipid)
if direction == const.MOVDIR_FORWARD:
msg('up')
d = trinity.TriVector(0.0, -1.0, 1.0)
elif direction == const.MOVDIR_BACKWARD:
msg('down')
d = trinity.TriVector(0.0, 1.0, 1.0)
elif direction == const.MOVDIR_LEFT:
msg('left')
d = trinity.TriVector(-1.0, 0.0, 1.0)
elif direction == const.MOVDIR_RIGHT:
msg('right')
d = trinity.TriVector(1.0, 0.0, 1.0)
#currentDirection = ownBall.GetQuaternionAt(blue.os.GetTime())
#direction.TransformQuaternion(currentDirection)
#rbp.GotoDirection(direction.x, direction.y, direction.z)
except:
msg('MyMove error')
class MyService(service.Service):
__guid__ = 'svc.MyService'
__servicename__ = 'MyService'
__displayname__ = 'My Service'
def __init__(self):
service.Service.__init__(self)
sm.GetService('gameui').Say('LocalWatch Service started')
self.pane = None
self.cache = {}
self.hostilecount = -1
self.busy = False
self.localChannel = None
self.warning = False
# search for local channel window and save it
# WARNING: currently we do not update this reference on session change, add it in later!
for channelID in sm.GetService('LSC').channels.keys():
channel = sm.GetService('LSC').channels[channelID]
if (channel.window and ((type(channel.channelID) is tuple) and (channel.channelID[0][0] == 'solarsystemid2'))):
self.localChannel = channel
self.alive = base.AutoTimer(1000, self.Update)
## self.A_down = False
## self.S_down= False
## self.D_down= False
## self.W_down= False
def initPane(self):
"""
self.pane = sm.GetService('window').GetWindow("Temp", create=1)
self.pane.windowID = "TempWindow"
btn = uix.GetBigButton(50, self.pane.sr.main, left=0)
btn.OnClick = ReportInfo
btn.hint = "ReportInfo"
btn.sr.icon.LoadIcon('44_02')
self.pane.sr.FocusBtn = btn
btn = uix.GetBigButton(50, self.pane.sr.main, left=50)
btn.OnClick = TryWarpToSafe
btn.hint = "TryWarpToSafe"
btn.sr.icon.LoadIcon('44_01')
self.pane.sr.Focus2Btn = btn
"""
self.pane = LocalDash(parent=uicore.layer.abovemain, name='LocalDash')
self.pane.SetOpacity(0.5)
#self.pane.ShowMsg("<color=0xff0037ff>BLUES</color> "+" <color=0xffff3700>REDS</color> "+" <color=0xffffffff>TOTAL</color>" + "<br>",
# "<color=0xff0037ff>0</color> "+" <color=0xffff3700>0</color> "+" <color=0xffffffff>0</color>")
if self.hostilecount == -1 or self.busy:
self.pane.ShowMsg("<color=0xffffffff>HOSTILES: " + "<color=0xffff3700>updating...</color>")
else:
self.pane.ShowMsg("<color=0xffffffff>HOSTILES: " + "<color=0xffff3700>%d</color>" % self.hostilecount)
def Update(self):
"""
self.A_down = False
self.S_down = False
self.D_down = False
self.W_down = False
ret = 'Keys down: '
if uicore.uilib.Key(uiconst.VK_A):
self.A_down = True
ret += ' A'
if uicore.uilib.Key(uiconst.VK_S):
self.S_down= True
ret += ' S'
if uicore.uilib.Key(uiconst.VK_D):
self.D_down= True
ret += ' D'
if uicore.uilib.Key(uiconst.VK_W):
self.W_down= True
ret += ' W'
msg(ret)
"""
if not self.busy:
# safe to update hostile count since we're not busy
self.hostilecount = self.GetHostileCount()
# update pane only if we have it open
if self.pane:
self.pane.Close()
self.pane = LocalDash(parent=uicore.layer.abovemain, name='LocalDash')
self.pane.SetOpacity(0.5)
if self.hostilecount == -1 or self.busy:
self.pane.ShowMsg("<color=0xffffffff>HOSTILES: " + "<color=0xffff3700>updating...</color>")
else:
self.pane.ShowMsg("<color=0xffffffff>HOSTILES: " + "<color=0xffff3700>%d</color>" % self.hostilecount)
if self.warning and (self.hostilecount > 0):
flash = WarningFlash(parent=uicore.layer.abovemain, name='WarningFlash')
flash.Flash()
def GetHostileCount(self):
# set service status to busy to avoid lock
self.busy = True
if self.hostilecount == -1:
self.hostilecount = 0
count = 0
mlist = copy.deepcopy(self.localChannel.memberList)
for charID in mlist:
# if it's myself, skip it
if charID == session.charid:
continue
else:
# check cache first
if charID in self.cache.keys():
if self.cache[charID] < 0.5:
# increment counter if less than friendly
count += 1
#msg('hostiles: %d' % count)
# charID not in cache, we need to find out the standing the hard way
else:
# figure out what ID to use for myself
myStandingID = None
if (eve.session.allianceid):
myStandingID = eve.session.allianceid
else:
myStandingID = eve.session.corpid
standing = sm.GetService('standing').GetStanding(myStandingID, charID)
if (standing == 0.0):
# neutral standing could mean charID is in our alliance or corp
corpCharInfo = sm.GetService('corp').GetInfoWindowDataForChar(charID, 1)
corpID = corpCharInfo.corpID
allianceID = corpCharInfo.allianceID
if (eve.session.corpid == corpID) or ((eve.session.allianceid != None) and (eve.session.allianceid == allianceID)):
# charID is in our alliance or corp, mark friendly in cache and skip
self.cache[charID] = 1.0
continue
else:
# charID is not in our alliance or corp, better mark hostile in cache and increment count
self.cache[charID] = standing
count += 1
#msg('hostiles: %d' % count)
elif (standing < 0.5):
# we found a new hostile, store in cache and increment counter
self.cache[charID] = standing
count += 1
#msg('hostiles: %d' % count)
else:
# charID is in a friendly alliance/corp or tagged friendly by alliance, store in cache and skip
self.cache[charID] = standing
continue
# we're done, no longer busy
self.busy = False
return count
def Open(self):
if self.pane:
return
self.initPane()
def Close(self):
self.Reset()
def Reset(self):
if self.pane:
self.pane.Close()
del self.pane
self.pane = None
def WarningOn(self):
self.warning = True
def WarningOff(self):
self.warning = False
def CleanUp(self):
del self.alive
self.alive = None
self.Reset()
class WarningFlash(uicls.Container):
__guid__ = 'uicls.Warning'
def ApplyAttributes(self, attributes):
self.scope = 'station_inflight'
self.frame = None
uicls.Container.ApplyAttributes(self, attributes)
def Flash(self, duration = 500):
self.SetAlign(uiconst.CENTERTOP)
self.SetSize(uicore.desktop.width, uicore.desktop.height)
self.frame = uicls.Frame(parent=self, color=(1.0, 0.25, 0.25, 0.25), frameConst=uiconst.FRAME_FILLED_CORNER1, state=uiconst.UI_DISABLED)
self.SetPosition(0, 0)
self.state = uiconst.UI_DISABLED
uiutil.SetOrder(self, 0)
blue.pyos.synchro.Sleep(duration)
self.Close()
class LocalDash(uicls.Container):
__guid__ = 'uicls.LocalDash'
def ApplyAttributes(self, attributes):
self.scope = 'station_inflight'
self.message = None
#self.message2 = None
uicls.Container.ApplyAttributes(self, attributes)
def Prepare_Text_(self):
self.message = uicls.Label(text='', parent=self, left=0, top=4, autowidth=False, width=100, fontsize=12, state=uiconst.UI_DISABLED)
#self.message2 = uicls.Label(text='', parent=self, left=0, top=16, autowidth=False, width=200, fontsize=12, state=uiconst.UI_DISABLED)
def Prepare_Underlay_(self):
border = uicls.Frame(parent=self, frameConst=uiconst.FRAME_BORDER1_CORNER1, state=uiconst.UI_DISABLED, color=(1.0, 1.0, 1.0, 0.25))
frame = uicls.Frame(parent=self, color=(0.0, 0.0, 0.0, 0.75), frameConst=uiconst.FRAME_FILLED_CORNER1, state=uiconst.UI_DISABLED)
def ShowMsg(self, text1):
if self.message is None:
self.Prepare_Text_()
self.Prepare_Underlay_()
self.message.text = '<center>' + text1
#self.message2.text = '<center>' + text2
self.SetAlign(uiconst.CENTERTOP)
self.SetSize(100, 20)
offset = sm.GetService('window').GetCameraLeftOffset(self.width, align=uiconst.CENTERTOP, left=0)
self.SetPosition(offset, 5)
self.state = uiconst.UI_DISABLED
uiutil.SetOrder(self, 0)
@safetycheck
def CreateIt(*args):
#create an instance of something
bottomline = sm.GetService('neocom').bottomline
if bottomline and hasattr(bottomline, "alive") and bottomline.alive:
msg('LocalWatch Service already running!')
else:
sm.GetService('neocom').bottomline = MyService()
@safetycheck
def DestroyIt(*args):
#destroy an instance of something
if sm.GetService('neocom').bottomline == None:
msg('LocalWatch Service not running!')
return
if hasattr(sm.GetService('neocom').bottomline, 'alive'):
sm.GetService('neocom').bottomline.CleanUp()
del sm.GetService('neocom').bottomline
sm.GetService('neocom').bottomline = None
msg('LocalWatch Service killed!')
@safetycheck
def ToggleIt(*args):
bottomline = sm.GetService('neocom').bottomline
if bottomline and hasattr(bottomline, 'alive') and bottomline.alive:
if bottomline.pane:
bottomline.Close()
else:
bottomline.Open()
@safetycheck
def WarnIt(*args):
bottomline = sm.GetService('neocom').bottomline
if bottomline and hasattr(bottomline, 'alive') and bottomline.alive:
if bottomline.warning:
bottomline.WarningOff()
msg('Warning toggled OFF!')
else:
bottomline.WarningOn()
msg('Warning toggled ON!')
@safetycheck
def TryGetInvItem(itemID):
if eve.session.shipid is None:
return
ship = eve.GetInventoryFromId(eve.session.shipid)
if ship:
for invItem in ship.List():
if invItem.itemID == itemID:
return invItem
@safetycheck
def GetItem(itemID):
item = uix.GetBallparkRecord(itemID)
if not item:
item = TryGetInvItem(itemID)
return item
@safetycheck
def ReportInfo(*args):
absvc = sm.GetService('addressbook')
bookmarks = absvc.GetBookmarks()
bmsInSystem = list()
for each in bookmarks.itervalues():
if each.locationID == session.solarsystemid:
bmsInSystem.append(each)
msg('bmsInSystem length is: %d' % (len(bmsInSystem)))
@safetycheck
def TryWarpToSafe(*args):
absvc = sm.GetService('addressbook')
bookmarks = absvc.GetBookmarks()
bms = list()
for each in bookmarks.itervalues():
if each.locationID == session.solarsystemid:
bms.append(each)
safeBM = None
for bm in bms:
memo = absvc.UnzipMemo(bm.memo)[0]
if memo == 'safePoS':
safeBM = bm
if safeBM == None:
msg('error finding safePoS')
return
if session.solarsystemid and session.shipid:
bp = sm.GetService('michelle').GetRemotePark()
if bp:
bp.WarpToStuff('bookmark', safeBM.bookmarkID, minRange=0.0, fleet=False)
sm.StartService('space').WarpDestination(None, safeBM.bookmarkID, None)
try:
"""
#DisableLog()
#sm.GetService('cmd').OpenUIDebugger()
#old_UpdateMovement = sm.GetService('cmd')._UpdateMovement
#sm.GetService('mouseInput').OnDoubleClick = MyOnDoubleClick
"""
neocomwnd = sm.GetService('neocom').main
btn = uix.GetBigButton(32, neocomwnd, top=800)
btn.OnClick = CreateIt
btn.hint = "Start LocalWatch service"
btn.sr.icon.LoadIcon('11_01')
createBtn = btn
btn = uix.GetBigButton(32, neocomwnd, top=833)
btn.OnClick = DestroyIt
btn.hint = "Kill LocalWatch service"
btn.sr.icon.LoadIcon('11_02')
destroyBtn = btn
btn = uix.GetBigButton(32, neocomwnd, top=866)
btn.OnClick = ToggleIt
btn.hint = "Show LocalDash"
btn.sr.icon.LoadIcon('11_03')
killBtn = btn
btn = uix.GetBigButton(32, neocomwnd, top=899)
btn.OnClick = WarnIt
btn.hint = "Toggle flash warning"
btn.sr.icon.LoadIcon('11_04')
warnBtn = btn
except:
msg('bad inject')
except:
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.